aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/RCU/torture.txt34
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--Documentation/pi-futex.txt121
-rw-r--r--Documentation/robust-futexes.txt2
-rw-r--r--Documentation/rt-mutex-design.txt781
-rw-r--r--Documentation/rt-mutex.txt79
-rw-r--r--Documentation/video4linux/README.pvrusb2212
-rw-r--r--arch/alpha/kernel/setup.c2
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/i386/Kconfig15
-rw-r--r--arch/i386/kernel/asm-offsets.c4
-rw-r--r--arch/i386/kernel/cpu/amd.c6
-rw-r--r--arch/i386/kernel/cpu/common.c25
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c16
-rw-r--r--arch/i386/kernel/cpu/proc.c8
-rw-r--r--arch/i386/kernel/cpuid.c2
-rw-r--r--arch/i386/kernel/entry.S22
-rw-r--r--arch/i386/kernel/irq.c8
-rw-r--r--arch/i386/kernel/msr.c2
-rw-r--r--arch/i386/kernel/scx200.c66
-rw-r--r--arch/i386/kernel/signal.c4
-rw-r--r--arch/i386/kernel/smpboot.c37
-rw-r--r--arch/i386/kernel/sysenter.c128
-rw-r--r--arch/i386/kernel/topology.c28
-rw-r--r--arch/i386/kernel/vsyscall-sysenter.S4
-rw-r--r--arch/i386/kernel/vsyscall.lds.S4
-rw-r--r--arch/i386/mach-voyager/setup.c5
-rw-r--r--arch/i386/mach-voyager/voyager_smp.c2
-rw-r--r--arch/i386/mm/init.c5
-rw-r--r--arch/i386/mm/pageattr.c4
-rw-r--r--arch/ia64/Kconfig4
-rw-r--r--arch/ia64/kernel/palinfo.c4
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/topology.c32
-rw-r--r--arch/ia64/mm/discontig.c57
-rw-r--r--arch/ia64/mm/init.c5
-rw-r--r--arch/ia64/sn/kernel/irq.c2
-rw-r--r--arch/m32r/kernel/setup.c2
-rw-r--r--arch/m68knommu/Makefile24
-rw-r--r--arch/m68knommu/defconfig207
-rw-r--r--arch/m68knommu/platform/68328/head-rom.S18
-rw-r--r--arch/m68knommu/platform/68360/head-ram.S19
-rw-r--r--arch/m68knommu/platform/68360/head-rom.S17
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/smtc.c4
-rw-r--r--arch/parisc/kernel/topology.c3
-rw-r--r--arch/powerpc/kernel/setup_32.c2
-rw-r--r--arch/powerpc/kernel/sysfs.c31
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/mm/mem.c11
-rw-r--r--arch/powerpc/mm/numa.c11
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c2
-rw-r--r--arch/powerpc/platforms/powermac/pfunc_core.c2
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c2
-rw-r--r--arch/powerpc/sysdev/mmio_nvram.c2
-rw-r--r--arch/ppc/kernel/setup.c2
-rw-r--r--arch/s390/appldata/appldata_base.c2
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/sh/kernel/setup.c2
-rw-r--r--arch/sh64/kernel/setup.c2
-rw-r--r--arch/sparc64/kernel/setup.c2
-rw-r--r--arch/sparc64/mm/init.c3
-rw-r--r--arch/x86_64/kernel/entry.S2
-rw-r--r--arch/x86_64/kernel/irq.c4
-rw-r--r--arch/x86_64/kernel/mce.c4
-rw-r--r--arch/x86_64/kernel/smp.c4
-rw-r--r--arch/x86_64/kernel/smpboot.c8
-rw-r--r--arch/x86_64/mm/init.c73
-rw-r--r--arch/xtensa/kernel/time.c2
-rw-r--r--arch/xtensa/kernel/traps.c2
-rw-r--r--block/ll_rw_blk.c6
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/acpi_memhotplug.c146
-rw-r--r--drivers/acpi/numa.c15
-rw-r--r--drivers/atm/firestream.c3
-rw-r--r--drivers/base/cpu.c28
-rw-r--r--drivers/base/dmapool.c3
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/base/node.c61
-rw-r--r--drivers/base/topology.c4
-rw-r--r--drivers/char/Kconfig23
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/agp/sgi-agp.c5
-rw-r--r--drivers/char/drm/drm_memory_debug.h2
-rw-r--r--drivers/char/drm/via_dmablit.c2
-rw-r--r--drivers/char/epca.c2
-rw-r--r--drivers/char/hvcs.c11
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/moxa.c2
-rw-r--r--drivers/char/nsc_gpio.c142
-rw-r--r--drivers/char/pc8736x_gpio.c340
-rw-r--r--drivers/char/scx200_gpio.c162
-rw-r--r--drivers/char/specialix.c2
-rw-r--r--drivers/char/stallion.c208
-rw-r--r--drivers/char/sx.c2
-rw-r--r--drivers/char/tty_io.c7
-rw-r--r--drivers/cpufreq/cpufreq.c8
-rw-r--r--drivers/cpufreq/cpufreq_stats.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c16
-rw-r--r--drivers/ide/pci/amd74xx.c7
-rw-r--r--drivers/input/input.c3
-rw-r--r--drivers/isdn/gigaset/common.c2
-rw-r--r--drivers/isdn/i4l/isdn_tty.c2
-rw-r--r--drivers/leds/led-core.c2
-rw-r--r--drivers/leds/led-triggers.c2
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/Makefile1
-rw-r--r--drivers/media/video/cx2341x.c40
-rw-r--r--drivers/media/video/cx88/cx88-blackbird.c13
-rw-r--r--drivers/media/video/pvrusb2/Kconfig62
-rw-r--r--drivers/media/video/pvrusb2/Makefile18
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.c204
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-audio.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.c230
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-context.h92
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ctrl.c593
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ctrl.h123
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c279
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debug.h67
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debugifc.c478
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-debugifc.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-demod.c126
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-demod.h38
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-eeprom.c164
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-eeprom.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.c418
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-encoder.h42
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h384
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c3120
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.h335
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c115
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c232
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h47
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.c937
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-i2c-core.h96
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-io.c695
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-io.h102
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ioread.c513
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-ioread.h50
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-main.c172
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-std.c408
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-std.h60
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.c865
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-sysfs.h47
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-tuner.c122
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-tuner.h38
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-util.h63
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.c1126
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-v4l2.h40
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.c253
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-video-v4l.h52
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-wm8775.c170
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-wm8775.h53
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2.h43
-rw-r--r--drivers/media/video/saa7134/saa6752hs.c4
-rw-r--r--drivers/media/video/stradis.c4
-rw-r--r--drivers/media/video/tda9887.c31
-rw-r--r--drivers/media/video/tuner-core.c8
-rw-r--r--drivers/media/video/v4l2-common.c2
-rw-r--r--drivers/message/fusion/mptfc.c6
-rw-r--r--drivers/message/fusion/mptsas.c3
-rw-r--r--drivers/message/i2o/iop.c1
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c1
-rw-r--r--drivers/mtd/chips/jedec.c1
-rw-r--r--drivers/mtd/chips/map_absent.c3
-rw-r--r--drivers/mtd/chips/map_ram.c1
-rw-r--r--drivers/mtd/chips/map_rom.c1
-rw-r--r--drivers/mtd/devices/block2mtd.c1
-rw-r--r--drivers/mtd/devices/ms02-nv.c1
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/phram.c1
-rw-r--r--drivers/mtd/devices/pmc551.c3
-rw-r--r--drivers/mtd/devices/slram.c1
-rw-r--r--drivers/mtd/maps/ixp2000.c2
-rw-r--r--drivers/mtd/maps/physmap.c2
-rw-r--r--drivers/mtd/mtdchar.c2
-rw-r--r--drivers/mtd/nand/nand_base.c16
-rw-r--r--drivers/mtd/nand/ndfc.c6
-rw-r--r--drivers/mtd/nand/s3c2410.c164
-rw-r--r--drivers/mtd/nand/ts7250.c2
-rw-r--r--drivers/net/fec.c310
-rw-r--r--drivers/net/fs_enet/fs_enet-mii.c3
-rw-r--r--drivers/net/wireless/ipw2200.c22
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c4
-rw-r--r--drivers/rapidio/rio-access.c4
-rw-r--r--drivers/rtc/class.c2
-rw-r--r--drivers/rtc/rtc-ds1553.c2
-rw-r--r--drivers/rtc/rtc-sa1100.c2
-rw-r--r--drivers/rtc/rtc-vr41xx.c2
-rw-r--r--drivers/s390/block/dasd_eer.c2
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/ahci.c2
-rw-r--r--drivers/scsi/ata_piix.c2
-rw-r--r--drivers/scsi/libata-core.c153
-rw-r--r--drivers/scsi/libata-eh.c63
-rw-r--r--drivers/scsi/libata-scsi.c4
-rw-r--r--drivers/scsi/libata.h4
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_sil.c31
-rw-r--r--drivers/scsi/sata_sil24.c2
-rw-r--r--drivers/scsi/sata_svw.c2
-rw-r--r--drivers/scsi/sata_uli.c2
-rw-r--r--drivers/scsi/sata_via.c2
-rw-r--r--drivers/scsi/sata_vsc.c12
-rw-r--r--drivers/sn/ioc3.c2
-rw-r--r--drivers/video/au1100fb.c3
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--fs/Kconfig42
-rw-r--r--fs/buffer.c3
-rw-r--r--fs/cifs/CHANGES17
-rw-r--r--fs/cifs/Makefile2
-rw-r--r--fs/cifs/README39
-rw-r--r--fs/cifs/asn1.c10
-rw-r--r--fs/cifs/cifs_debug.c134
-rw-r--r--fs/cifs/cifs_debug.h4
-rw-r--r--fs/cifs/cifs_unicode.c1
-rw-r--r--fs/cifs/cifsencrypt.c140
-rw-r--r--fs/cifs/cifsfs.c6
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h71
-rw-r--r--fs/cifs/cifspdu.h98
-rw-r--r--fs/cifs/cifsproto.h14
-rw-r--r--fs/cifs/cifssmb.c287
-rw-r--r--fs/cifs/connect.c498
-rw-r--r--fs/cifs/dir.c15
-rw-r--r--fs/cifs/fcntl.c4
-rw-r--r--fs/cifs/file.c52
-rw-r--r--fs/cifs/inode.c39
-rw-r--r--fs/cifs/link.c7
-rw-r--r--fs/cifs/misc.c10
-rw-r--r--fs/cifs/netmisc.c4
-rw-r--r--fs/cifs/ntlmssp.c143
-rw-r--r--fs/cifs/readdir.c184
-rw-r--r--fs/cifs/sess.c538
-rw-r--r--fs/cifs/smbencrypt.c1
-rw-r--r--fs/cifs/transport.c3
-rw-r--r--fs/jbd/journal.c3
-rw-r--r--fs/jffs2/acl.c2
-rw-r--r--fs/jffs2/erase.c19
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jffs2/gc.c6
-rw-r--r--fs/jffs2/jffs2_fs_sb.h3
-rw-r--r--fs/jffs2/malloc.c2
-rw-r--r--fs/jffs2/nodelist.c3
-rw-r--r--fs/jffs2/nodemgmt.c21
-rw-r--r--fs/jffs2/readinode.c1
-rw-r--r--fs/jffs2/scan.c55
-rw-r--r--fs/jffs2/summary.c41
-rw-r--r--fs/jffs2/xattr.c632
-rw-r--r--fs/jffs2/xattr.h21
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/internal.h8
-rw-r--r--fs/nfs/pagelist.c2
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmlock.c2
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c4
-rw-r--r--fs/ocfs2/dlmglue.c2
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/vote.c8
-rw-r--r--fs/proc/task_mmu.c30
-rw-r--r--fs/ufs/inode.c111
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h2
-rw-r--r--fs/xfs/xfs_behavior.h3
-rw-r--r--fs/xfs/xfs_inode.c4
-rw-r--r--fs/xfs/xfs_log.c4
-rw-r--r--fs/xfs/xfs_log_recover.c2
-rw-r--r--fs/xfs/xfs_mount.c21
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_trans.h4
-rw-r--r--fs/xfs/xfs_vnodeops.c11
-rw-r--r--include/asm-alpha/core_t2.h2
-rw-r--r--include/asm-arm/arch-s3c2410/regs-nand.h48
-rw-r--r--include/asm-generic/bug.h6
-rw-r--r--include/asm-i386/cpu.h2
-rw-r--r--include/asm-i386/elf.h53
-rw-r--r--include/asm-i386/fixmap.h10
-rw-r--r--include/asm-i386/mmu.h1
-rw-r--r--include/asm-i386/node.h29
-rw-r--r--include/asm-i386/page.h3
-rw-r--r--include/asm-i386/processor.h8
-rw-r--r--include/asm-i386/thread_info.h11
-rw-r--r--include/asm-i386/topology.h11
-rw-r--r--include/asm-i386/unwind.h4
-rw-r--r--include/asm-ia64/nodedata.h12
-rw-r--r--include/asm-ia64/topology.h1
-rw-r--r--include/asm-m68knommu/ptrace.h2
-rw-r--r--include/asm-powerpc/topology.h5
-rw-r--r--include/asm-sparc64/topology.h3
-rw-r--r--include/asm-x86_64/hw_irq.h2
-rw-r--r--include/asm-x86_64/topology.h2
-rw-r--r--include/linux/acpi.h6
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/cpu.h14
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/futex.h12
-rw-r--r--include/linux/init_task.h3
-rw-r--r--include/linux/ioport.h3
-rw-r--r--include/linux/ipmi.h4
-rw-r--r--include/linux/jffs2.h1
-rw-r--r--include/linux/libata.h15
-rw-r--r--include/linux/list.h9
-rw-r--r--include/linux/memory_hotplug.h73
-rw-r--r--include/linux/mm.h13
-rw-r--r--include/linux/node.h17
-rw-r--r--include/linux/nsc_gpio.h42
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/plist.h247
-rw-r--r--include/linux/poison.h58
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/rtmutex.h117
-rw-r--r--include/linux/sched.h59
-rw-r--r--include/linux/scx200.h7
-rw-r--r--include/linux/scx200_gpio.h21
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/syscalls.h4
-rw-r--r--include/linux/sysctl.h2
-rw-r--r--include/linux/topology.h3
-rw-r--r--include/media/cx2341x.h4
-rw-r--r--init/Kconfig5
-rw-r--r--init/main.c2
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/acct.c3
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/auditsc.c10
-rw-r--r--kernel/cpu.c8
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/fork.c19
-rw-r--r--kernel/futex.c1067
-rw-r--r--kernel/futex_compat.c14
-rw-r--r--kernel/hrtimer.c4
-rw-r--r--kernel/mutex-debug.c5
-rw-r--r--kernel/power/Kconfig13
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/rcupdate.c14
-rw-r--r--kernel/rcutorture.c201
-rw-r--r--kernel/resource.c38
-rw-r--r--kernel/rtmutex-debug.c513
-rw-r--r--kernel/rtmutex-debug.h37
-rw-r--r--kernel/rtmutex-tester.c440
-rw-r--r--kernel/rtmutex.c990
-rw-r--r--kernel/rtmutex.h29
-rw-r--r--kernel/rtmutex_common.h123
-rw-r--r--kernel/sched.c1199
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/sysctl.c27
-rw-r--r--kernel/timer.c4
-rw-r--r--kernel/workqueue.c2
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Makefile1
-rw-r--r--lib/plist.c118
-rw-r--r--lib/zlib_inflate/inffast.c6
-rw-r--r--lib/zlib_inflate/inftrees.c9
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/filemap.c18
-rw-r--r--mm/memory_hotplug.c126
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/slab.c21
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/vmscan.c39
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c2
-rw-r--r--net/tipc/bcast.c4
-rw-r--r--net/tipc/bearer.c2
-rw-r--r--net/tipc/config.c2
-rw-r--r--net/tipc/dbg.c2
-rw-r--r--net/tipc/handler.c2
-rw-r--r--net/tipc/name_table.c4
-rw-r--r--net/tipc/net.c2
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/port.c4
-rw-r--r--net/tipc/ref.c4
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/tipc/user_reg.c2
-rw-r--r--scripts/Kbuild.include5
-rw-r--r--scripts/Makefile.build2
-rw-r--r--scripts/Makefile.host6
-rw-r--r--scripts/Makefile.lib6
-rw-r--r--scripts/Makefile.modpost2
-rw-r--r--scripts/rt-tester/check-all.sh22
-rw-r--r--scripts/rt-tester/rt-tester.py222
-rw-r--r--scripts/rt-tester/t2-l1-2rt-sameprio.tst99
-rw-r--r--scripts/rt-tester/t2-l1-pi.tst82
-rw-r--r--scripts/rt-tester/t2-l1-signal.tst77
-rw-r--r--scripts/rt-tester/t2-l2-2rt-deadlock.tst89
-rw-r--r--scripts/rt-tester/t3-l1-pi-1rt.tst92
-rw-r--r--scripts/rt-tester/t3-l1-pi-2rt.tst93
-rw-r--r--scripts/rt-tester/t3-l1-pi-3rt.tst92
-rw-r--r--scripts/rt-tester/t3-l1-pi-signal.tst98
-rw-r--r--scripts/rt-tester/t3-l1-pi-steal.tst96
-rw-r--r--scripts/rt-tester/t3-l2-pi.tst92
-rw-r--r--scripts/rt-tester/t4-l2-pi-deboost.tst123
-rw-r--r--scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst183
-rw-r--r--scripts/rt-tester/t5-l4-pi-boost-deboost.tst143
-rw-r--r--security/keys/key.c3
-rw-r--r--security/selinux/hooks.c6
-rw-r--r--sound/oss/via82cxxx_audio.c5
-rw-r--r--sound/ppc/toonie.c0
411 files changed, 25775 insertions, 3134 deletions
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index e4c38152f7f799..a4948591607d0e 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -7,7 +7,7 @@ The CONFIG_RCU_TORTURE_TEST config option is available for all RCU
implementations. It creates an rcutorture kernel module that can
be loaded to run a torture test. The test periodically outputs
status messages via printk(), which can be examined via the dmesg
-command (perhaps grepping for "rcutorture"). The test is started
+command (perhaps grepping for "torture"). The test is started
when the module is loaded, and stops when the module is unloaded.
However, actually setting this config option to "y" results in the system
@@ -35,6 +35,19 @@ stat_interval The number of seconds between output of torture
be printed -only- when the module is unloaded, and this
is the default.
+shuffle_interval
+ The number of seconds to keep the test threads affinitied
+ to a particular subset of the CPUs. Used in conjunction
+ with test_no_idle_hz.
+
+test_no_idle_hz Whether or not to test the ability of RCU to operate in
+ a kernel that disables the scheduling-clock interrupt to
+ idle CPUs. Boolean parameter, "1" to test, "0" otherwise.
+
+torture_type The type of RCU to test: "rcu" for the rcu_read_lock()
+ API, "rcu_bh" for the rcu_read_lock_bh() API, and "srcu"
+ for the "srcu_read_lock()" API.
+
verbose Enable debug printk()s. Default is disabled.
@@ -42,14 +55,14 @@ OUTPUT
The statistics output is as follows:
- rcutorture: --- Start of test: nreaders=16 stat_interval=0 verbose=0
- rcutorture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915
- rcutorture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0
- rcutorture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0
- rcutorture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0
- rcutorture: --- End of test
+ rcu-torture: --- Start of test: nreaders=16 stat_interval=0 verbose=0
+ rcu-torture: rtc: 0000000000000000 ver: 1916 tfle: 0 rta: 1916 rtaf: 0 rtf: 1915
+ rcu-torture: Reader Pipe: 1466408 9747 0 0 0 0 0 0 0 0 0
+ rcu-torture: Reader Batch: 1464477 11678 0 0 0 0 0 0 0 0
+ rcu-torture: Free-Block Circulation: 1915 1915 1915 1915 1915 1915 1915 1915 1915 1915 0
+ rcu-torture: --- End of test
-The command "dmesg | grep rcutorture:" will extract this information on
+The command "dmesg | grep torture:" will extract this information on
most systems. On more esoteric configurations, it may be necessary to
use other commands to access the output of the printk()s used by
the RCU torture test. The printk()s use KERN_ALERT, so they should
@@ -115,8 +128,9 @@ The following script may be used to torture RCU:
modprobe rcutorture
sleep 100
rmmod rcutorture
- dmesg | grep rcutorture:
+ dmesg | grep torture:
The output can be manually inspected for the error flag of "!!!".
One could of course create a more elaborate script that automatically
-checked for such errors.
+checked for such errors. The "rmmod" command forces a "SUCCESS" or
+"FAILURE" indication to be printk()ed.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 2e352a605fcfef..0d189c93eeaf94 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1669,6 +1669,10 @@ running once the system is up.
usbhid.mousepoll=
[USBHID] The interval which mice are to be polled at.
+ vdso= [IA-32]
+ vdso=1: enable VDSO (default)
+ vdso=0: disable VDSO mapping
+
video= [FB] Frame buffer configuration
See Documentation/fb/modedb.txt.
diff --git a/Documentation/pi-futex.txt b/Documentation/pi-futex.txt
new file mode 100644
index 00000000000000..5d61dacd21f6dc
--- /dev/null
+++ b/Documentation/pi-futex.txt
@@ -0,0 +1,121 @@
+Lightweight PI-futexes
+----------------------
+
+We are calling them lightweight for 3 reasons:
+
+ - in the user-space fastpath a PI-enabled futex involves no kernel work
+ (or any other PI complexity) at all. No registration, no extra kernel
+ calls - just pure fast atomic ops in userspace.
+
+ - even in the slowpath, the system call and scheduling pattern is very
+ similar to normal futexes.
+
+ - the in-kernel PI implementation is streamlined around the mutex
+ abstraction, with strict rules that keep the implementation
+ relatively simple: only a single owner may own a lock (i.e. no
+ read-write lock support), only the owner may unlock a lock, no
+ recursive locking, etc.
+
+Priority Inheritance - why?
+---------------------------
+
+The short reply: user-space PI helps achieving/improving determinism for
+user-space applications. In the best-case, it can help achieve
+determinism and well-bound latencies. Even in the worst-case, PI will
+improve the statistical distribution of locking related application
+delays.
+
+The longer reply:
+-----------------
+
+Firstly, sharing locks between multiple tasks is a common programming
+technique that often cannot be replaced with lockless algorithms. As we
+can see it in the kernel [which is a quite complex program in itself],
+lockless structures are rather the exception than the norm - the current
+ratio of lockless vs. locky code for shared data structures is somewhere
+between 1:10 and 1:100. Lockless is hard, and the complexity of lockless
+algorithms often endangers to ability to do robust reviews of said code.
+I.e. critical RT apps often choose lock structures to protect critical
+data structures, instead of lockless algorithms. Furthermore, there are
+cases (like shared hardware, or other resource limits) where lockless
+access is mathematically impossible.
+
+Media players (such as Jack) are an example of reasonable application
+design with multiple tasks (with multiple priority levels) sharing
+short-held locks: for example, a highprio audio playback thread is
+combined with medium-prio construct-audio-data threads and low-prio
+display-colory-stuff threads. Add video and decoding to the mix and
+we've got even more priority levels.
+
+So once we accept that synchronization objects (locks) are an
+unavoidable fact of life, and once we accept that multi-task userspace
+apps have a very fair expectation of being able to use locks, we've got
+to think about how to offer the option of a deterministic locking
+implementation to user-space.
+
+Most of the technical counter-arguments against doing priority
+inheritance only apply to kernel-space locks. But user-space locks are
+different, there we cannot disable interrupts or make the task
+non-preemptible in a critical section, so the 'use spinlocks' argument
+does not apply (user-space spinlocks have the same priority inversion
+problems as other user-space locking constructs). Fact is, pretty much
+the only technique that currently enables good determinism for userspace
+locks (such as futex-based pthread mutexes) is priority inheritance:
+
+Currently (without PI), if a high-prio and a low-prio task shares a lock
+[this is a quite common scenario for most non-trivial RT applications],
+even if all critical sections are coded carefully to be deterministic
+(i.e. all critical sections are short in duration and only execute a
+limited number of instructions), the kernel cannot guarantee any
+deterministic execution of the high-prio task: any medium-priority task
+could preempt the low-prio task while it holds the shared lock and
+executes the critical section, and could delay it indefinitely.
+
+Implementation:
+---------------
+
+As mentioned before, the userspace fastpath of PI-enabled pthread
+mutexes involves no kernel work at all - they behave quite similarly to
+normal futex-based locks: a 0 value means unlocked, and a value==TID
+means locked. (This is the same method as used by list-based robust
+futexes.) Userspace uses atomic ops to lock/unlock these mutexes without
+entering the kernel.
+
+To handle the slowpath, we have added two new futex ops:
+
+ FUTEX_LOCK_PI
+ FUTEX_UNLOCK_PI
+
+If the lock-acquire fastpath fails, [i.e. an atomic transition from 0 to
+TID fails], then FUTEX_LOCK_PI is called. The kernel does all the
+remaining work: if there is no futex-queue attached to the futex address
+yet then the code looks up the task that owns the futex [it has put its
+own TID into the futex value], and attaches a 'PI state' structure to
+the futex-queue. The pi_state includes an rt-mutex, which is a PI-aware,
+kernel-based synchronization object. The 'other' task is made the owner
+of the rt-mutex, and the FUTEX_WAITERS bit is atomically set in the
+futex value. Then this task tries to lock the rt-mutex, on which it
+blocks. Once it returns, it has the mutex acquired, and it sets the
+futex value to its own TID and returns. Userspace has no other work to
+perform - it now owns the lock, and futex value contains
+FUTEX_WAITERS|TID.
+
+If the unlock side fastpath succeeds, [i.e. userspace manages to do a
+TID -> 0 atomic transition of the futex value], then no kernel work is
+triggered.
+
+If the unlock fastpath fails (because the FUTEX_WAITERS bit is set),
+then FUTEX_UNLOCK_PI is called, and the kernel unlocks the futex on the
+behalf of userspace - and it also unlocks the attached
+pi_state->rt_mutex and thus wakes up any potential waiters.
+
+Note that under this approach, contrary to previous PI-futex approaches,
+there is no prior 'registration' of a PI-futex. [which is not quite
+possible anyway, due to existing ABI properties of pthread mutexes.]
+
+Also, under this scheme, 'robustness' and 'PI' are two orthogonal
+properties of futexes, and all four combinations are possible: futex,
+robust-futex, PI-futex, robust+PI-futex.
+
+More details about priority inheritance can be found in
+Documentation/rtmutex.txt.
diff --git a/Documentation/robust-futexes.txt b/Documentation/robust-futexes.txt
index df82d75245a01b..76e8064b8c3a5c 100644
--- a/Documentation/robust-futexes.txt
+++ b/Documentation/robust-futexes.txt
@@ -95,7 +95,7 @@ comparison. If the thread has registered a list, then normally the list
is empty. If the thread/process crashed or terminated in some incorrect
way then the list might be non-empty: in this case the kernel carefully
walks the list [not trusting it], and marks all locks that are owned by
-this thread with the FUTEX_OWNER_DEAD bit, and wakes up one waiter (if
+this thread with the FUTEX_OWNER_DIED bit, and wakes up one waiter (if
any).
The list is guaranteed to be private and per-thread at do_exit() time,
diff --git a/Documentation/rt-mutex-design.txt b/Documentation/rt-mutex-design.txt
new file mode 100644
index 00000000000000..c472ffacc2f6f6
--- /dev/null
+++ b/Documentation/rt-mutex-design.txt
@@ -0,0 +1,781 @@
+#
+# Copyright (c) 2006 Steven Rostedt
+# Licensed under the GNU Free Documentation License, Version 1.2
+#
+
+RT-mutex implementation design
+------------------------------
+
+This document tries to describe the design of the rtmutex.c implementation.
+It doesn't describe the reasons why rtmutex.c exists. For that please see
+Documentation/rt-mutex.txt. Although this document does explain problems
+that happen without this code, but that is in the concept to understand
+what the code actually is doing.
+
+The goal of this document is to help others understand the priority
+inheritance (PI) algorithm that is used, as well as reasons for the
+decisions that were made to implement PI in the manner that was done.
+
+
+Unbounded Priority Inversion
+----------------------------
+
+Priority inversion is when a lower priority process executes while a higher
+priority process wants to run. This happens for several reasons, and
+most of the time it can't be helped. Anytime a high priority process wants
+to use a resource that a lower priority process has (a mutex for example),
+the high priority process must wait until the lower priority process is done
+with the resource. This is a priority inversion. What we want to prevent
+is something called unbounded priority inversion. That is when the high
+priority process is prevented from running by a lower priority process for
+an undetermined amount of time.
+
+The classic example of unbounded priority inversion is were you have three
+processes, let's call them processes A, B, and C, where A is the highest
+priority process, C is the lowest, and B is in between. A tries to grab a lock
+that C owns and must wait and lets C run to release the lock. But in the
+meantime, B executes, and since B is of a higher priority than C, it preempts C,
+but by doing so, it is in fact preempting A which is a higher priority process.
+Now there's no way of knowing how long A will be sleeping waiting for C
+to release the lock, because for all we know, B is a CPU hog and will
+never give C a chance to release the lock. This is called unbounded priority
+inversion.
+
+Here's a little ASCII art to show the problem.
+
+ grab lock L1 (owned by C)
+ |
+A ---+
+ C preempted by B
+ |
+C +----+
+
+B +-------->
+ B now keeps A from running.
+
+
+Priority Inheritance (PI)
+-------------------------
+
+There are several ways to solve this issue, but other ways are out of scope
+for this document. Here we only discuss PI.
+
+PI is where a process inherits the priority of another process if the other
+process blocks on a lock owned by the current process. To make this easier
+to understand, let's use the previous example, with processes A, B, and C again.
+
+This time, when A blocks on the lock owned by C, C would inherit the priority
+of A. So now if B becomes runnable, it would not preempt C, since C now has
+the high priority of A. As soon as C releases the lock, it loses its
+inherited priority, and A then can continue with the resource that C had.
+
+Terminology
+-----------
+
+Here I explain some terminology that is used in this document to help describe
+the design that is used to implement PI.
+
+PI chain - The PI chain is an ordered series of locks and processes that cause
+ processes to inherit priorities from a previous process that is
+ blocked on one of its locks. This is described in more detail
+ later in this document.
+
+mutex - In this document, to differentiate from locks that implement
+ PI and spin locks that are used in the PI code, from now on
+ the PI locks will be called a mutex.
+
+lock - In this document from now on, I will use the term lock when
+ referring to spin locks that are used to protect parts of the PI
+ algorithm. These locks disable preemption for UP (when
+ CONFIG_PREEMPT is enabled) and on SMP prevents multiple CPUs from
+ entering critical sections simultaneously.
+
+spin lock - Same as lock above.
+
+waiter - A waiter is a struct that is stored on the stack of a blocked
+ process. Since the scope of the waiter is within the code for
+ a process being blocked on the mutex, it is fine to allocate
+ the waiter on the process's stack (local variable). This
+ structure holds a pointer to the task, as well as the mutex that
+ the task is blocked on. It also has the plist node structures to
+ place the task in the waiter_list of a mutex as well as the
+ pi_list of a mutex owner task (described below).
+
+ waiter is sometimes used in reference to the task that is waiting
+ on a mutex. This is the same as waiter->task.
+
+waiters - A list of processes that are blocked on a mutex.
+
+top waiter - The highest priority process waiting on a specific mutex.
+
+top pi waiter - The highest priority process waiting on one of the mutexes
+ that a specific process owns.
+
+Note: task and process are used interchangeably in this document, mostly to
+ differentiate between two processes that are being described together.
+
+
+PI chain
+--------
+
+The PI chain is a list of processes and mutexes that may cause priority
+inheritance to take place. Multiple chains may converge, but a chain
+would never diverge, since a process can't be blocked on more than one
+mutex at a time.
+
+Example:
+
+ Process: A, B, C, D, E
+ Mutexes: L1, L2, L3, L4
+
+ A owns: L1
+ B blocked on L1
+ B owns L2
+ C blocked on L2
+ C owns L3
+ D blocked on L3
+ D owns L4
+ E blocked on L4
+
+The chain would be:
+
+ E->L4->D->L3->C->L2->B->L1->A
+
+To show where two chains merge, we could add another process F and
+another mutex L5 where B owns L5 and F is blocked on mutex L5.
+
+The chain for F would be:
+
+ F->L5->B->L1->A
+
+Since a process may own more than one mutex, but never be blocked on more than
+one, the chains merge.
+
+Here we show both chains:
+
+ E->L4->D->L3->C->L2-+
+ |
+ +->B->L1->A
+ |
+ F->L5-+
+
+For PI to work, the processes at the right end of these chains (or we may
+also call it the Top of the chain) must be equal to or higher in priority
+than the processes to the left or below in the chain.
+
+Also since a mutex may have more than one process blocked on it, we can
+have multiple chains merge at mutexes. If we add another process G that is
+blocked on mutex L2:
+
+ G->L2->B->L1->A
+
+And once again, to show how this can grow I will show the merging chains
+again.
+
+ E->L4->D->L3->C-+
+ +->L2-+
+ | |
+ G-+ +->B->L1->A
+ |
+ F->L5-+
+
+
+Plist
+-----
+
+Before I go further and talk about how the PI chain is stored through lists
+on both mutexes and processes, I'll explain the plist. This is similar to
+the struct list_head functionality that is already in the kernel.
+The implementation of plist is out of scope for this document, but it is
+very important to understand what it does.
+
+There are a few differences between plist and list, the most important one
+being that plist is a priority sorted linked list. This means that the
+priorities of the plist are sorted, such that it takes O(1) to retrieve the
+highest priority item in the list. Obviously this is useful to store processes
+based on their priorities.
+
+Another difference, which is important for implementation, is that, unlike
+list, the head of the list is a different element than the nodes of a list.
+So the head of the list is declared as struct plist_head and nodes that will
+be added to the list are declared as struct plist_node.
+
+
+Mutex Waiter List
+-----------------
+
+Every mutex keeps track of all the waiters that are blocked on itself. The mutex
+has a plist to store these waiters by priority. This list is protected by
+a spin lock that is located in the struct of the mutex. This lock is called
+wait_lock. Since the modification of the waiter list is never done in
+interrupt context, the wait_lock can be taken without disabling interrupts.
+
+
+Task PI List
+------------
+
+To keep track of the PI chains, each process has its own PI list. This is
+a list of all top waiters of the mutexes that are owned by the process.
+Note that this list only holds the top waiters and not all waiters that are
+blocked on mutexes owned by the process.
+
+The top of the task's PI list is always the highest priority task that
+is waiting on a mutex that is owned by the task. So if the task has
+inherited a priority, it will always be the priority of the task that is
+at the top of this list.
+
+This list is stored in the task structure of a process as a plist called
+pi_list. This list is protected by a spin lock also in the task structure,
+called pi_lock. This lock may also be taken in interrupt context, so when
+locking the pi_lock, interrupts must be disabled.
+
+
+Depth of the PI Chain
+---------------------
+
+The maximum depth of the PI chain is not dynamic, and could actually be
+defined. But is very complex to figure it out, since it depends on all
+the nesting of mutexes. Let's look at the example where we have 3 mutexes,
+L1, L2, and L3, and four separate functions func1, func2, func3 and func4.
+The following shows a locking order of L1->L2->L3, but may not actually
+be directly nested that way.
+
+void func1(void)
+{
+ mutex_lock(L1);
+
+ /* do anything */
+
+ mutex_unlock(L1);
+}
+
+void func2(void)
+{
+ mutex_lock(L1);
+ mutex_lock(L2);
+
+ /* do something */
+
+ mutex_unlock(L2);
+ mutex_unlock(L1);
+}
+
+void func3(void)
+{
+ mutex_lock(L2);
+ mutex_lock(L3);
+
+ /* do something else */
+
+ mutex_unlock(L3);
+ mutex_unlock(L2);
+}
+
+void func4(void)
+{
+ mutex_lock(L3);
+
+ /* do something again */
+
+ mutex_unlock(L3);
+}
+
+Now we add 4 processes that run each of these functions separately.
+Processes A, B, C, and D which run functions func1, func2, func3 and func4
+respectively, and such that D runs first and A last. With D being preempted
+in func4 in the "do something again" area, we have a locking that follows:
+
+D owns L3
+ C blocked on L3
+ C owns L2
+ B blocked on L2
+ B owns L1
+ A blocked on L1
+
+And thus we have the chain A->L1->B->L2->C->L3->D.
+
+This gives us a PI depth of 4 (four processes), but looking at any of the
+functions individually, it seems as though they only have at most a locking
+depth of two. So, although the locking depth is defined at compile time,
+it still is very difficult to find the possibilities of that depth.
+
+Now since mutexes can be defined by user-land applications, we don't want a DOS
+type of application that nests large amounts of mutexes to create a large
+PI chain, and have the code holding spin locks while looking at a large
+amount of data. So to prevent this, the implementation not only implements
+a maximum lock depth, but also only holds at most two different locks at a
+time, as it walks the PI chain. More about this below.
+
+
+Mutex owner and flags
+---------------------
+
+The mutex structure contains a pointer to the owner of the mutex. If the
+mutex is not owned, this owner is set to NULL. Since all architectures
+have the task structure on at least a four byte alignment (and if this is
+not true, the rtmutex.c code will be broken!), this allows for the two
+least significant bits to be used as flags. This part is also described
+in Documentation/rt-mutex.txt, but will also be briefly described here.
+
+Bit 0 is used as the "Pending Owner" flag. This is described later.
+Bit 1 is used as the "Has Waiters" flags. This is also described later
+ in more detail, but is set whenever there are waiters on a mutex.
+
+
+cmpxchg Tricks
+--------------
+
+Some architectures implement an atomic cmpxchg (Compare and Exchange). This
+is used (when applicable) to keep the fast path of grabbing and releasing
+mutexes short.
+
+cmpxchg is basically the following function performed atomically:
+
+unsigned long _cmpxchg(unsigned long *A, unsigned long *B, unsigned long *C)
+{
+ unsigned long T = *A;
+ if (*A == *B) {
+ *A = *C;
+ }
+ return T;
+}
+#define cmpxchg(a,b,c) _cmpxchg(&a,&b,&c)
+
+This is really nice to have, since it allows you to only update a variable
+if the variable is what you expect it to be. You know if it succeeded if
+the return value (the old value of A) is equal to B.
+
+The macro rt_mutex_cmpxchg is used to try to lock and unlock mutexes. If
+the architecture does not support CMPXCHG, then this macro is simply set
+to fail every time. But if CMPXCHG is supported, then this will
+help out extremely to keep the fast path short.
+
+The use of rt_mutex_cmpxchg with the flags in the owner field help optimize
+the system for architectures that support it. This will also be explained
+later in this document.
+
+
+Priority adjustments
+--------------------
+
+The implementation of the PI code in rtmutex.c has several places that a
+process must adjust its priority. With the help of the pi_list of a
+process this is rather easy to know what needs to be adjusted.
+
+The functions implementing the task adjustments are rt_mutex_adjust_prio,
+__rt_mutex_adjust_prio (same as the former, but expects the task pi_lock
+to already be taken), rt_mutex_get_prio, and rt_mutex_setprio.
+
+rt_mutex_getprio and rt_mutex_setprio are only used in __rt_mutex_adjust_prio.
+
+rt_mutex_getprio returns the priority that the task should have. Either the
+task's own normal priority, or if a process of a higher priority is waiting on
+a mutex owned by the task, then that higher priority should be returned.
+Since the pi_list of a task holds an order by priority list of all the top
+waiters of all the mutexes that the task owns, rt_mutex_getprio simply needs
+to compare the top pi waiter to its own normal priority, and return the higher
+priority back.
+
+(Note: if looking at the code, you will notice that the lower number of
+ prio is returned. This is because the prio field in the task structure
+ is an inverse order of the actual priority. So a "prio" of 5 is
+ of higher priority than a "prio" of 10.)
+
+__rt_mutex_adjust_prio examines the result of rt_mutex_getprio, and if the
+result does not equal the task's current priority, then rt_mutex_setprio
+is called to adjust the priority of the task to the new priority.
+Note that rt_mutex_setprio is defined in kernel/sched.c to implement the
+actual change in priority.
+
+It is interesting to note that __rt_mutex_adjust_prio can either increase
+or decrease the priority of the task. In the case that a higher priority
+process has just blocked on a mutex owned by the task, __rt_mutex_adjust_prio
+would increase/boost the task's priority. But if a higher priority task
+were for some reason to leave the mutex (timeout or signal), this same function
+would decrease/unboost the priority of the task. That is because the pi_list
+always contains the highest priority task that is waiting on a mutex owned
+by the task, so we only need to compare the priority of that top pi waiter
+to the normal priority of the given task.
+
+
+High level overview of the PI chain walk
+----------------------------------------
+
+The PI chain walk is implemented by the function rt_mutex_adjust_prio_chain.
+
+The implementation has gone through several iterations, and has ended up
+with what we believe is the best. It walks the PI chain by only grabbing
+at most two locks at a time, and is very efficient.
+
+The rt_mutex_adjust_prio_chain can be used either to boost or lower process
+priorities.
+
+rt_mutex_adjust_prio_chain is called with a task to be checked for PI
+(de)boosting (the owner of a mutex that a process is blocking on), a flag to
+check for deadlocking, the mutex that the task owns, and a pointer to a waiter
+that is the process's waiter struct that is blocked on the mutex (although this
+parameter may be NULL for deboosting).
+
+For this explanation, I will not mention deadlock detection. This explanation
+will try to stay at a high level.
+
+When this function is called, there are no locks held. That also means
+that the state of the owner and lock can change when entered into this function.
+
+Before this function is called, the task has already had rt_mutex_adjust_prio
+performed on it. This means that the task is set to the priority that it
+should be at, but the plist nodes of the task's waiter have not been updated
+with the new priorities, and that this task may not be in the proper locations
+in the pi_lists and wait_lists that the task is blocked on. This function
+solves all that.
+
+A loop is entered, where task is the owner to be checked for PI changes that
+was passed by parameter (for the first iteration). The pi_lock of this task is
+taken to prevent any more changes to the pi_list of the task. This also
+prevents new tasks from completing the blocking on a mutex that is owned by this
+task.
+
+If the task is not blocked on a mutex then the loop is exited. We are at
+the top of the PI chain.
+
+A check is now done to see if the original waiter (the process that is blocked
+on the current mutex) is the top pi waiter of the task. That is, is this
+waiter on the top of the task's pi_list. If it is not, it either means that
+there is another process higher in priority that is blocked on one of the
+mutexes that the task owns, or that the waiter has just woken up via a signal
+or timeout and has left the PI chain. In either case, the loop is exited, since
+we don't need to do any more changes to the priority of the current task, or any
+task that owns a mutex that this current task is waiting on. A priority chain
+walk is only needed when a new top pi waiter is made to a task.
+
+The next check sees if the task's waiter plist node has the priority equal to
+the priority the task is set at. If they are equal, then we are done with
+the loop. Remember that the function started with the priority of the
+task adjusted, but the plist nodes that hold the task in other processes
+pi_lists have not been adjusted.
+
+Next, we look at the mutex that the task is blocked on. The mutex's wait_lock
+is taken. This is done by a spin_trylock, because the locking order of the
+pi_lock and wait_lock goes in the opposite direction. If we fail to grab the
+lock, the pi_lock is released, and we restart the loop.
+
+Now that we have both the pi_lock of the task as well as the wait_lock of
+the mutex the task is blocked on, we update the task's waiter's plist node
+that is located on the mutex's wait_list.
+
+Now we release the pi_lock of the task.
+
+Next the owner of the mutex has its pi_lock taken, so we can update the
+task's entry in the owner's pi_list. If the task is the highest priority
+process on the mutex's wait_list, then we remove the previous top waiter
+from the owner's pi_list, and replace it with the task.
+
+Note: It is possible that the task was the current top waiter on the mutex,
+ in which case the task is not yet on the pi_list of the waiter. This
+ is OK, since plist_del does nothing if the plist node is not on any
+ list.
+
+If the task was not the top waiter of the mutex, but it was before we
+did the priority updates, that means we are deboosting/lowering the
+task. In this case, the task is removed from the pi_list of the owner,
+and the new top waiter is added.
+
+Lastly, we unlock both the pi_lock of the task, as well as the mutex's
+wait_lock, and continue the loop again. On the next iteration of the
+loop, the previous owner of the mutex will be the task that will be
+processed.
+
+Note: One might think that the owner of this mutex might have changed
+ since we just grab the mutex's wait_lock. And one could be right.
+ The important thing to remember is that the owner could not have
+ become the task that is being processed in the PI chain, since
+ we have taken that task's pi_lock at the beginning of the loop.
+ So as long as there is an owner of this mutex that is not the same
+ process as the tasked being worked on, we are OK.
+
+ Looking closely at the code, one might be confused. The check for the
+ end of the PI chain is when the task isn't blocked on anything or the
+ task's waiter structure "task" element is NULL. This check is
+ protected only by the task's pi_lock. But the code to unlock the mutex
+ sets the task's waiter structure "task" element to NULL with only
+ the protection of the mutex's wait_lock, which was not taken yet.
+ Isn't this a race condition if the task becomes the new owner?
+
+ The answer is No! The trick is the spin_trylock of the mutex's
+ wait_lock. If we fail that lock, we release the pi_lock of the
+ task and continue the loop, doing the end of PI chain check again.
+
+ In the code to release the lock, the wait_lock of the mutex is held
+ the entire time, and it is not let go when we grab the pi_lock of the
+ new owner of the mutex. So if the switch of a new owner were to happen
+ after the check for end of the PI chain and the grabbing of the
+ wait_lock, the unlocking code would spin on the new owner's pi_lock
+ but never give up the wait_lock. So the PI chain loop is guaranteed to
+ fail the spin_trylock on the wait_lock, release the pi_lock, and
+ try again.
+
+ If you don't quite understand the above, that's OK. You don't have to,
+ unless you really want to make a proof out of it ;)
+
+
+Pending Owners and Lock stealing
+--------------------------------
+
+One of the flags in the owner field of the mutex structure is "Pending Owner".
+What this means is that an owner was chosen by the process releasing the
+mutex, but that owner has yet to wake up and actually take the mutex.
+
+Why is this important? Why can't we just give the mutex to another process
+and be done with it?
+
+The PI code is to help with real-time processes, and to let the highest
+priority process run as long as possible with little latencies and delays.
+If a high priority process owns a mutex that a lower priority process is
+blocked on, when the mutex is released it would be given to the lower priority
+process. What if the higher priority process wants to take that mutex again.
+The high priority process would fail to take that mutex that it just gave up
+and it would need to boost the lower priority process to run with full
+latency of that critical section (since the low priority process just entered
+it).
+
+There's no reason a high priority process that gives up a mutex should be
+penalized if it tries to take that mutex again. If the new owner of the
+mutex has not woken up yet, there's no reason that the higher priority process
+could not take that mutex away.
+
+To solve this, we introduced Pending Ownership and Lock Stealing. When a
+new process is given a mutex that it was blocked on, it is only given
+pending ownership. This means that it's the new owner, unless a higher
+priority process comes in and tries to grab that mutex. If a higher priority
+process does come along and wants that mutex, we let the higher priority
+process "steal" the mutex from the pending owner (only if it is still pending)
+and continue with the mutex.
+
+
+Taking of a mutex (The walk through)
+------------------------------------
+
+OK, now let's take a look at the detailed walk through of what happens when
+taking a mutex.
+
+The first thing that is tried is the fast taking of the mutex. This is
+done when we have CMPXCHG enabled (otherwise the fast taking automatically
+fails). Only when the owner field of the mutex is NULL can the lock be
+taken with the CMPXCHG and nothing else needs to be done.
+
+If there is contention on the lock, whether it is owned or pending owner
+we go about the slow path (rt_mutex_slowlock).
+
+The slow path function is where the task's waiter structure is created on
+the stack. This is because the waiter structure is only needed for the
+scope of this function. The waiter structure holds the nodes to store
+the task on the wait_list of the mutex, and if need be, the pi_list of
+the owner.
+
+The wait_lock of the mutex is taken since the slow path of unlocking the
+mutex also takes this lock.
+
+We then call try_to_take_rt_mutex. This is where the architecture that
+does not implement CMPXCHG would always grab the lock (if there's no
+contention).
+
+try_to_take_rt_mutex is used every time the task tries to grab a mutex in the
+slow path. The first thing that is done here is an atomic setting of
+the "Has Waiters" flag of the mutex's owner field. Yes, this could really
+be false, because if the the mutex has no owner, there are no waiters and
+the current task also won't have any waiters. But we don't have the lock
+yet, so we assume we are going to be a waiter. The reason for this is to
+play nice for those architectures that do have CMPXCHG. By setting this flag
+now, the owner of the mutex can't release the mutex without going into the
+slow unlock path, and it would then need to grab the wait_lock, which this
+code currently holds. So setting the "Has Waiters" flag forces the owner
+to synchronize with this code.
+
+Now that we know that we can't have any races with the owner releasing the
+mutex, we check to see if we can take the ownership. This is done if the
+mutex doesn't have a owner, or if we can steal the mutex from a pending
+owner. Let's look at the situations we have here.
+
+ 1) Has owner that is pending
+ ----------------------------
+
+ The mutex has a owner, but it hasn't woken up and the mutex flag
+ "Pending Owner" is set. The first check is to see if the owner isn't the
+ current task. This is because this function is also used for the pending
+ owner to grab the mutex. When a pending owner wakes up, it checks to see
+ if it can take the mutex, and this is done if the owner is already set to
+ itself. If so, we succeed and leave the function, clearing the "Pending
+ Owner" bit.
+
+ If the pending owner is not current, we check to see if the current priority is
+ higher than the pending owner. If not, we fail the function and return.
+
+ There's also something special about a pending owner. That is a pending owner
+ is never blocked on a mutex. So there is no PI chain to worry about. It also
+ means that if the mutex doesn't have any waiters, there's no accounting needed
+ to update the pending owner's pi_list, since we only worry about processes
+ blocked on the current mutex.
+
+ If there are waiters on this mutex, and we just stole the ownership, we need
+ to take the top waiter, remove it from the pi_list of the pending owner, and
+ add it to the current pi_list. Note that at this moment, the pending owner
+ is no longer on the list of waiters. This is fine, since the pending owner
+ would add itself back when it realizes that it had the ownership stolen
+ from itself. When the pending owner tries to grab the mutex, it will fail
+ in try_to_take_rt_mutex if the owner field points to another process.
+
+ 2) No owner
+ -----------
+
+ If there is no owner (or we successfully stole the lock), we set the owner
+ of the mutex to current, and set the flag of "Has Waiters" if the current
+ mutex actually has waiters, or we clear the flag if it doesn't. See, it was
+ OK that we set that flag early, since now it is cleared.
+
+ 3) Failed to grab ownership
+ ---------------------------
+
+ The most interesting case is when we fail to take ownership. This means that
+ there exists an owner, or there's a pending owner with equal or higher
+ priority than the current task.
+
+We'll continue on the failed case.
+
+If the mutex has a timeout, we set up a timer to go off to break us out
+of this mutex if we failed to get it after a specified amount of time.
+
+Now we enter a loop that will continue to try to take ownership of the mutex, or
+fail from a timeout or signal.
+
+Once again we try to take the mutex. This will usually fail the first time
+in the loop, since it had just failed to get the mutex. But the second time
+in the loop, this would likely succeed, since the task would likely be
+the pending owner.
+
+If the mutex is TASK_INTERRUPTIBLE a check for signals and timeout is done
+here.
+
+The waiter structure has a "task" field that points to the task that is blocked
+on the mutex. This field can be NULL the first time it goes through the loop
+or if the task is a pending owner and had it's mutex stolen. If the "task"
+field is NULL then we need to set up the accounting for it.
+
+Task blocks on mutex
+--------------------
+
+The accounting of a mutex and process is done with the waiter structure of
+the process. The "task" field is set to the process, and the "lock" field
+to the mutex. The plist nodes are initialized to the processes current
+priority.
+
+Since the wait_lock was taken at the entry of the slow lock, we can safely
+add the waiter to the wait_list. If the current process is the highest
+priority process currently waiting on this mutex, then we remove the
+previous top waiter process (if it exists) from the pi_list of the owner,
+and add the current process to that list. Since the pi_list of the owner
+has changed, we call rt_mutex_adjust_prio on the owner to see if the owner
+should adjust its priority accordingly.
+
+If the owner is also blocked on a lock, and had its pi_list changed
+(or deadlock checking is on), we unlock the wait_lock of the mutex and go ahead
+and run rt_mutex_adjust_prio_chain on the owner, as described earlier.
+
+Now all locks are released, and if the current process is still blocked on a
+mutex (waiter "task" field is not NULL), then we go to sleep (call schedule).
+
+Waking up in the loop
+---------------------
+
+The schedule can then wake up for a few reasons.
+ 1) we were given pending ownership of the mutex.
+ 2) we received a signal and was TASK_INTERRUPTIBLE
+ 3) we had a timeout and was TASK_INTERRUPTIBLE
+
+In any of these cases, we continue the loop and once again try to grab the
+ownership of the mutex. If we succeed, we exit the loop, otherwise we continue
+and on signal and timeout, will exit the loop, or if we had the mutex stolen
+we just simply add ourselves back on the lists and go back to sleep.
+
+Note: For various reasons, because of timeout and signals, the steal mutex
+ algorithm needs to be careful. This is because the current process is
+ still on the wait_list. And because of dynamic changing of priorities,
+ especially on SCHED_OTHER tasks, the current process can be the
+ highest priority task on the wait_list.
+
+Failed to get mutex on Timeout or Signal
+----------------------------------------
+
+If a timeout or signal occurred, the waiter's "task" field would not be
+NULL and the task needs to be taken off the wait_list of the mutex and perhaps
+pi_list of the owner. If this process was a high priority process, then
+the rt_mutex_adjust_prio_chain needs to be executed again on the owner,
+but this time it will be lowering the priorities.
+
+
+Unlocking the Mutex
+-------------------
+
+The unlocking of a mutex also has a fast path for those architectures with
+CMPXCHG. Since the taking of a mutex on contention always sets the
+"Has Waiters" flag of the mutex's owner, we use this to know if we need to
+take the slow path when unlocking the mutex. If the mutex doesn't have any
+waiters, the owner field of the mutex would equal the current process and
+the mutex can be unlocked by just replacing the owner field with NULL.
+
+If the owner field has the "Has Waiters" bit set (or CMPXCHG is not available),
+the slow unlock path is taken.
+
+The first thing done in the slow unlock path is to take the wait_lock of the
+mutex. This synchronizes the locking and unlocking of the mutex.
+
+A check is made to see if the mutex has waiters or not. On architectures that
+do not have CMPXCHG, this is the location that the owner of the mutex will
+determine if a waiter needs to be awoken or not. On architectures that
+do have CMPXCHG, that check is done in the fast path, but it is still needed
+in the slow path too. If a waiter of a mutex woke up because of a signal
+or timeout between the time the owner failed the fast path CMPXCHG check and
+the grabbing of the wait_lock, the mutex may not have any waiters, thus the
+owner still needs to make this check. If there are no waiters than the mutex
+owner field is set to NULL, the wait_lock is released and nothing more is
+needed.
+
+If there are waiters, then we need to wake one up and give that waiter
+pending ownership.
+
+On the wake up code, the pi_lock of the current owner is taken. The top
+waiter of the lock is found and removed from the wait_list of the mutex
+as well as the pi_list of the current owner. The task field of the new
+pending owner's waiter structure is set to NULL, and the owner field of the
+mutex is set to the new owner with the "Pending Owner" bit set, as well
+as the "Has Waiters" bit if there still are other processes blocked on the
+mutex.
+
+The pi_lock of the previous owner is released, and the new pending owner's
+pi_lock is taken. Remember that this is the trick to prevent the race
+condition in rt_mutex_adjust_prio_chain from adding itself as a waiter
+on the mutex.
+
+We now clear the "pi_blocked_on" field of the new pending owner, and if
+the mutex still has waiters pending, we add the new top waiter to the pi_list
+of the pending owner.
+
+Finally we unlock the pi_lock of the pending owner and wake it up.
+
+
+Contact
+-------
+
+For updates on this document, please email Steven Rostedt <rostedt@goodmis.org>
+
+
+Credits
+-------
+
+Author: Steven Rostedt <rostedt@goodmis.org>
+
+Reviewers: Ingo Molnar, Thomas Gleixner, Thomas Duetsch, and Randy Dunlap
+
+Updates
+-------
+
+This document was originally written for 2.6.17-rc3-mm1
diff --git a/Documentation/rt-mutex.txt b/Documentation/rt-mutex.txt
new file mode 100644
index 00000000000000..243393d882ee7e
--- /dev/null
+++ b/Documentation/rt-mutex.txt
@@ -0,0 +1,79 @@
+RT-mutex subsystem with PI support
+----------------------------------
+
+RT-mutexes with priority inheritance are used to support PI-futexes,
+which enable pthread_mutex_t priority inheritance attributes
+(PTHREAD_PRIO_INHERIT). [See Documentation/pi-futex.txt for more details
+about PI-futexes.]
+
+This technology was developed in the -rt tree and streamlined for
+pthread_mutex support.
+
+Basic principles:
+-----------------
+
+RT-mutexes extend the semantics of simple mutexes by the priority
+inheritance protocol.
+
+A low priority owner of a rt-mutex inherits the priority of a higher
+priority waiter until the rt-mutex is released. If the temporarily
+boosted owner blocks on a rt-mutex itself it propagates the priority
+boosting to the owner of the other rt_mutex it gets blocked on. The
+priority boosting is immediately removed once the rt_mutex has been
+unlocked.
+
+This approach allows us to shorten the block of high-prio tasks on
+mutexes which protect shared resources. Priority inheritance is not a
+magic bullet for poorly designed applications, but it allows
+well-designed applications to use userspace locks in critical parts of
+an high priority thread, without losing determinism.
+
+The enqueueing of the waiters into the rtmutex waiter list is done in
+priority order. For same priorities FIFO order is chosen. For each
+rtmutex, only the top priority waiter is enqueued into the owner's
+priority waiters list. This list too queues in priority order. Whenever
+the top priority waiter of a task changes (for example it timed out or
+got a signal), the priority of the owner task is readjusted. [The
+priority enqueueing is handled by "plists", see include/linux/plist.h
+for more details.]
+
+RT-mutexes are optimized for fastpath operations and have no internal
+locking overhead when locking an uncontended mutex or unlocking a mutex
+without waiters. The optimized fastpath operations require cmpxchg
+support. [If that is not available then the rt-mutex internal spinlock
+is used]
+
+The state of the rt-mutex is tracked via the owner field of the rt-mutex
+structure:
+
+rt_mutex->owner holds the task_struct pointer of the owner. Bit 0 and 1
+are used to keep track of the "owner is pending" and "rtmutex has
+waiters" state.
+
+ owner bit1 bit0
+ NULL 0 0 mutex is free (fast acquire possible)
+ NULL 0 1 invalid state
+ NULL 1 0 Transitional state*
+ NULL 1 1 invalid state
+ taskpointer 0 0 mutex is held (fast release possible)
+ taskpointer 0 1 task is pending owner
+ taskpointer 1 0 mutex is held and has waiters
+ taskpointer 1 1 task is pending owner and mutex has waiters
+
+Pending-ownership handling is a performance optimization:
+pending-ownership is assigned to the first (highest priority) waiter of
+the mutex, when the mutex is released. The thread is woken up and once
+it starts executing it can acquire the mutex. Until the mutex is taken
+by it (bit 0 is cleared) a competing higher priority thread can "steal"
+the mutex which puts the woken up thread back on the waiters list.
+
+The pending-ownership optimization is especially important for the
+uninterrupted workflow of high-prio tasks which repeatedly
+takes/releases locks that have lower-prio waiters. Without this
+optimization the higher-prio thread would ping-pong to the lower-prio
+task [because at unlock time we always assign a new owner].
+
+(*) The "mutex has waiters" bit gets set to take the lock. If the lock
+doesn't already have an owner, this bit is quickly cleared if there are
+no waiters. So this is a transitional state to synchronize with looking
+at the owner field of the mutex and the mutex owner releasing the lock.
diff --git a/Documentation/video4linux/README.pvrusb2 b/Documentation/video4linux/README.pvrusb2
new file mode 100644
index 00000000000000..c73a32c34528d8
--- /dev/null
+++ b/Documentation/video4linux/README.pvrusb2
@@ -0,0 +1,212 @@
+
+$Id$
+Mike Isely <isely@pobox.com>
+
+ pvrusb2 driver
+
+Background:
+
+ This driver is intended for the "Hauppauge WinTV PVR USB 2.0", which
+ is a USB 2.0 hosted TV Tuner. This driver is a work in progress.
+ Its history started with the reverse-engineering effort by Björn
+ Danielsson <pvrusb2@dax.nu> whose web page can be found here:
+
+ http://pvrusb2.dax.nu/
+
+ From there Aurelien Alleaume <slts@free.fr> began an effort to
+ create a video4linux compatible driver. I began with Aurelien's
+ last known snapshot and evolved the driver to the state it is in
+ here.
+
+ More information on this driver can be found at:
+
+ http://www.isely.net/pvrusb2.html
+
+
+ This driver has a strong separation of layers. They are very
+ roughly:
+
+ 1a. Low level wire-protocol implementation with the device.
+
+ 1b. I2C adaptor implementation and corresponding I2C client drivers
+ implemented elsewhere in V4L.
+
+ 1c. High level hardware driver implementation which coordinates all
+ activities that ensure correct operation of the device.
+
+ 2. A "context" layer which manages instancing of driver, setup,
+ tear-down, arbitration, and interaction with high level
+ interfaces appropriately as devices are hotplugged in the
+ system.
+
+ 3. High level interfaces which glue the driver to various published
+ Linux APIs (V4L, sysfs, maybe DVB in the future).
+
+ The most important shearing layer is between the top 2 layers. A
+ lot of work went into the driver to ensure that any kind of
+ conceivable API can be laid on top of the core driver. (Yes, the
+ driver internally leverages V4L to do its work but that really has
+ nothing to do with the API published by the driver to the outside
+ world.) The architecture allows for different APIs to
+ simultaneously access the driver. I have a strong sense of fairness
+ about APIs and also feel that it is a good design principle to keep
+ implementation and interface isolated from each other. Thus while
+ right now the V4L high level interface is the most complete, the
+ sysfs high level interface will work equally well for similar
+ functions, and there's no reason I see right now why it shouldn't be
+ possible to produce a DVB high level interface that can sit right
+ alongside V4L.
+
+ NOTE: Complete documentation on the pvrusb2 driver is contained in
+ the html files within the doc directory; these are exactly the same
+ as what is on the web site at the time. Browse those files
+ (especially the FAQ) before asking questions.
+
+
+Building
+
+ To build these modules essentially amounts to just running "Make",
+ but you need the kernel source tree nearby and you will likely also
+ want to set a few controlling environment variables first in order
+ to link things up with that source tree. Please see the Makefile
+ here for comments that explain how to do that.
+
+
+Source file list / functional overview:
+
+ (Note: The term "module" used below generally refers to loosely
+ defined functional units within the pvrusb2 driver and bears no
+ relation to the Linux kernel's concept of a loadable module.)
+
+ pvrusb2-audio.[ch] - This is glue logic that resides between this
+ driver and the msp3400.ko I2C client driver (which is found
+ elsewhere in V4L).
+
+ pvrusb2-context.[ch] - This module implements the context for an
+ instance of the driver. Everything else eventually ties back to
+ or is otherwise instanced within the data structures implemented
+ here. Hotplugging is ultimately coordinated here. All high level
+ interfaces tie into the driver through this module. This module
+ helps arbitrate each interface's access to the actual driver core,
+ and is designed to allow concurrent access through multiple
+ instances of multiple interfaces (thus you can for example change
+ the tuner's frequency through sysfs while simultaneously streaming
+ video through V4L out to an instance of mplayer).
+
+ pvrusb2-debug.h - This header defines a printk() wrapper and a mask
+ of debugging bit definitions for the various kinds of debug
+ messages that can be enabled within the driver.
+
+ pvrusb2-debugifc.[ch] - This module implements a crude command line
+ oriented debug interface into the driver. Aside from being part
+ of the process for implementing manual firmware extraction (see
+ the pvrusb2 web site mentioned earlier), probably I'm the only one
+ who has ever used this. It is mainly a debugging aid.
+
+ pvrusb2-eeprom.[ch] - This is glue logic that resides between this
+ driver the tveeprom.ko module, which is itself implemented
+ elsewhere in V4L.
+
+ pvrusb2-encoder.[ch] - This module implements all protocol needed to
+ interact with the Conexant mpeg2 encoder chip within the pvrusb2
+ device. It is a crude echo of corresponding logic in ivtv,
+ however the design goals (strict isolation) and physical layer
+ (proxy through USB instead of PCI) are enough different that this
+ implementation had to be completely different.
+
+ pvrusb2-hdw-internal.h - This header defines the core data structure
+ in the driver used to track ALL internal state related to control
+ of the hardware. Nobody outside of the core hardware-handling
+ modules should have any business using this header. All external
+ access to the driver should be through one of the high level
+ interfaces (e.g. V4L, sysfs, etc), and in fact even those high
+ level interfaces are restricted to the API defined in
+ pvrusb2-hdw.h and NOT this header.
+
+ pvrusb2-hdw.h - This header defines the full internal API for
+ controlling the hardware. High level interfaces (e.g. V4L, sysfs)
+ will work through here.
+
+ pvrusb2-hdw.c - This module implements all the various bits of logic
+ that handle overall control of a specific pvrusb2 device.
+ (Policy, instantiation, and arbitration of pvrusb2 devices fall
+ within the jurisdiction of pvrusb-context not here).
+
+ pvrusb2-i2c-chips-*.c - These modules implement the glue logic to
+ tie together and configure various I2C modules as they attach to
+ the I2C bus. There are two versions of this file. The "v4l2"
+ version is intended to be used in-tree alongside V4L, where we
+ implement just the logic that makes sense for a pure V4L
+ environment. The "all" version is intended for use outside of
+ V4L, where we might encounter other possibly "challenging" modules
+ from ivtv or older kernel snapshots (or even the support modules
+ in the standalone snapshot).
+
+ pvrusb2-i2c-cmd-v4l1.[ch] - This module implements generic V4L1
+ compatible commands to the I2C modules. It is here where state
+ changes inside the pvrusb2 driver are translated into V4L1
+ commands that are in turn send to the various I2C modules.
+
+ pvrusb2-i2c-cmd-v4l2.[ch] - This module implements generic V4L2
+ compatible commands to the I2C modules. It is here where state
+ changes inside the pvrusb2 driver are translated into V4L2
+ commands that are in turn send to the various I2C modules.
+
+ pvrusb2-i2c-core.[ch] - This module provides an implementation of a
+ kernel-friendly I2C adaptor driver, through which other external
+ I2C client drivers (e.g. msp3400, tuner, lirc) may connect and
+ operate corresponding chips within the the pvrusb2 device. It is
+ through here that other V4L modules can reach into this driver to
+ operate specific pieces (and those modules are in turn driven by
+ glue logic which is coordinated by pvrusb2-hdw, doled out by
+ pvrusb2-context, and then ultimately made available to users
+ through one of the high level interfaces).
+
+ pvrusb2-io.[ch] - This module implements a very low level ring of
+ transfer buffers, required in order to stream data from the
+ device. This module is *very* low level. It only operates the
+ buffers and makes no attempt to define any policy or mechanism for
+ how such buffers might be used.
+
+ pvrusb2-ioread.[ch] - This module layers on top of pvrusb2-io.[ch]
+ to provide a streaming API usable by a read() system call style of
+ I/O. Right now this is the only layer on top of pvrusb2-io.[ch],
+ however the underlying architecture here was intended to allow for
+ other styles of I/O to be implemented with additonal modules, like
+ mmap()'ed buffers or something even more exotic.
+
+ pvrusb2-main.c - This is the top level of the driver. Module level
+ and USB core entry points are here. This is our "main".
+
+ pvrusb2-sysfs.[ch] - This is the high level interface which ties the
+ pvrusb2 driver into sysfs. Through this interface you can do
+ everything with the driver except actually stream data.
+
+ pvrusb2-tuner.[ch] - This is glue logic that resides between this
+ driver and the tuner.ko I2C client driver (which is found
+ elsewhere in V4L).
+
+ pvrusb2-util.h - This header defines some common macros used
+ throughout the driver. These macros are not really specific to
+ the driver, but they had to go somewhere.
+
+ pvrusb2-v4l2.[ch] - This is the high level interface which ties the
+ pvrusb2 driver into video4linux. It is through here that V4L
+ applications can open and operate the driver in the usual V4L
+ ways. Note that **ALL** V4L functionality is published only
+ through here and nowhere else.
+
+ pvrusb2-video-*.[ch] - This is glue logic that resides between this
+ driver and the saa711x.ko I2C client driver (which is found
+ elsewhere in V4L). Note that saa711x.ko used to be known as
+ saa7115.ko in ivtv. There are two versions of this; one is
+ selected depending on the particular saa711[5x].ko that is found.
+
+ pvrusb2.h - This header contains compile time tunable parameters
+ (and at the moment the driver has very little that needs to be
+ tuned).
+
+
+ -Mike Isely
+ isely@pobox.com
+
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
index 558b8336855939..254c507a608c07 100644
--- a/arch/alpha/kernel/setup.c
+++ b/arch/alpha/kernel/setup.c
@@ -481,7 +481,7 @@ register_cpus(void)
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return -ENOMEM;
- register_cpu(p, i, NULL);
+ register_cpu(p, i);
}
return 0;
}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 9fc9af88c60c72..093ccba0503c9a 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -808,7 +808,7 @@ static int __init topology_init(void)
int cpu;
for_each_possible_cpu(cpu)
- register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu, NULL);
+ register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
return 0;
}
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index 47c08bcd9b24f3..3bb221db164a65 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -233,7 +233,7 @@ config NR_CPUS
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
- depends on SMP
+ depends on X86_HT
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with Intel Pentium 4 chips with HyperThreading at a
@@ -242,7 +242,7 @@ config SCHED_SMT
config SCHED_MC
bool "Multi-core scheduler support"
- depends on SMP
+ depends on X86_HT
default y
help
Multi-core scheduler support improves the CPU scheduler's decision
@@ -780,6 +780,17 @@ config HOTPLUG_CPU
enable suspend on SMP systems. CPUs can be controlled through
/sys/devices/system/cpu.
+config COMPAT_VDSO
+ bool "Compat VDSO support"
+ default y
+ help
+ Map the VDSO to the predictable old-style address too.
+ ---help---
+ Say N here if you are running a sufficiently recent glibc
+ version (2.3.3 or later), to remove the high-mapped
+ VDSO mapping and to exclusively use the randomized VDSO.
+
+ If unsure, say Y.
endmenu
diff --git a/arch/i386/kernel/asm-offsets.c b/arch/i386/kernel/asm-offsets.c
index 1c3a809e642172..c80271f8f084c0 100644
--- a/arch/i386/kernel/asm-offsets.c
+++ b/arch/i386/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
#include <asm/fixmap.h>
#include <asm/processor.h>
#include <asm/thread_info.h>
+#include <asm/elf.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
@@ -54,6 +55,7 @@ void foo(void)
OFFSET(TI_preempt_count, thread_info, preempt_count);
OFFSET(TI_addr_limit, thread_info, addr_limit);
OFFSET(TI_restart_block, thread_info, restart_block);
+ OFFSET(TI_sysenter_return, thread_info, sysenter_return);
BLANK();
OFFSET(EXEC_DOMAIN_handler, exec_domain, handler);
@@ -69,7 +71,7 @@ void foo(void)
sizeof(struct tss_struct));
DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
- DEFINE(VSYSCALL_BASE, __fix_to_virt(FIX_VSYSCALL));
+ DEFINE(VDSO_PRELINK, VDSO_PRELINK);
OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
}
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index fd0457c9c827f4..e6a2d6b80cdae8 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -235,10 +235,10 @@ static void __init init_amd(struct cpuinfo_x86 *c)
while ((1 << bits) < c->x86_max_cores)
bits++;
}
- cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
- phys_proc_id[cpu] >>= bits;
+ c->cpu_core_id = c->phys_proc_id & ((1<<bits)-1);
+ c->phys_proc_id >>= bits;
printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
- cpu, c->x86_max_cores, cpu_core_id[cpu]);
+ cpu, c->x86_max_cores, c->cpu_core_id);
}
#endif
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index 44f2c5f2dda16a..70c87de582c7a7 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -294,7 +294,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
c->x86_mask = tfms & 15;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
#else
c->apicid = (ebx >> 24) & 0xFF;
@@ -319,7 +319,7 @@ void __cpuinit generic_identify(struct cpuinfo_x86 * c)
early_intel_workaround(c);
#ifdef CONFIG_X86_HT
- phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
+ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
#endif
}
@@ -477,11 +477,9 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
int index_msb, core_bits;
- int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx);
-
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
@@ -492,16 +490,17 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
} else if (smp_num_siblings > 1 ) {
if (smp_num_siblings > NR_CPUS) {
- printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
+ printk(KERN_WARNING "CPU: Unsupported number of the "
+ "siblings %d", smp_num_siblings);
smp_num_siblings = 1;
return;
}
index_msb = get_count_order(smp_num_siblings);
- phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
+ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
+ c->phys_proc_id);
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
@@ -509,12 +508,12 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
core_bits = get_count_order(c->x86_max_cores);
- cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
+ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
((1 << core_bits) - 1);
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
- cpu_core_id[cpu]);
+ c->cpu_core_id);
}
}
#endif
@@ -613,6 +612,12 @@ void __cpuinit cpu_init(void)
set_in_cr4(X86_CR4_TSD);
}
+ /* The CPU hotplug case */
+ if (cpu_gdt_descr->address) {
+ gdt = (struct desc_struct *)cpu_gdt_descr->address;
+ memset(gdt, 0, PAGE_SIZE);
+ goto old_gdt;
+ }
/*
* This is a horrible hack to allocate the GDT. The problem
* is that cpu_init() is called really early for the boot CPU
@@ -631,7 +636,7 @@ void __cpuinit cpu_init(void)
local_irq_enable();
}
}
-
+old_gdt:
/*
* Initialize the per-CPU GDT with the boot GDT,
* and set up the GDT descriptor:
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 6c37b4fd8ce285..e9f0b928b0a992 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -159,13 +159,13 @@ union l2_cache {
unsigned val;
};
-static unsigned short assocs[] = {
+static const unsigned short assocs[] = {
[1] = 1, [2] = 2, [4] = 4, [6] = 8,
[8] = 16,
[0xf] = 0xffff // ??
};
-static unsigned char levels[] = { 1, 1, 2 };
-static unsigned char types[] = { 1, 2, 3 };
+static const unsigned char levels[] = { 1, 1, 2 };
+static const unsigned char types[] = { 1, 2, 3 };
static void __cpuinit amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
union _cpuid4_leaf_ebx *ebx,
@@ -261,7 +261,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
#endif
@@ -383,14 +383,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
if (new_l2) {
l2 = new_l2;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
cpu_llc_id[cpu] = l2_id;
#endif
}
if (new_l3) {
l3 = new_l3;
-#ifdef CONFIG_SMP
+#ifdef CONFIG_X86_HT
cpu_llc_id[cpu] = l3_id;
#endif
}
@@ -729,7 +729,7 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
return;
}
-static int cacheinfo_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -747,7 +747,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cacheinfo_cpu_notifier =
+static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier =
{
.notifier_call = cacheinfo_cpu_callback,
};
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index a19fcb262dbb64..f54a15268ed730 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -18,7 +18,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
* applications want to get the raw CPUID data, they should access
* /dev/cpu/<cpu_nr>/cpuid instead.
*/
- static char *x86_cap_flags[] = {
+ static const char * const x86_cap_flags[] = {
/* Intel-defined */
"fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
"cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
@@ -62,7 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
};
- static char *x86_power_flags[] = {
+ static const char * const x86_power_flags[] = {
"ts", /* temperature sensor */
"fid", /* frequency id control */
"vid", /* voltage id control */
@@ -109,9 +109,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
#ifdef CONFIG_X86_HT
if (c->x86_max_cores * smp_num_siblings > 1) {
- seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]);
+ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
- seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]);
+ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
#endif
diff --git a/arch/i386/kernel/cpuid.c b/arch/i386/kernel/cpuid.c
index 1d9a4abcdfc71f..f6dfa9fb675c1b 100644
--- a/arch/i386/kernel/cpuid.c
+++ b/arch/i386/kernel/cpuid.c
@@ -183,7 +183,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb, unsigned long ac
return NOTIFY_OK;
}
-static struct notifier_block cpuid_class_cpu_notifier =
+static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
{
.notifier_call = cpuid_class_cpu_callback,
};
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
index e6e4506e749acb..fbdb933251b643 100644
--- a/arch/i386/kernel/entry.S
+++ b/arch/i386/kernel/entry.S
@@ -83,6 +83,12 @@ VM_MASK = 0x00020000
#define resume_kernel restore_nocheck
#endif
+#ifdef CONFIG_VM86
+#define resume_userspace_sig check_userspace
+#else
+#define resume_userspace_sig resume_userspace
+#endif
+
#define SAVE_ALL \
cld; \
pushl %es; \
@@ -211,6 +217,7 @@ ret_from_exception:
preempt_stop
ret_from_intr:
GET_THREAD_INFO(%ebp)
+check_userspace:
movl EFLAGS(%esp), %eax # mix EFLAGS and CS
movb CS(%esp), %al
testl $(VM_MASK | 3), %eax
@@ -263,7 +270,12 @@ sysenter_past_esp:
pushl $(__USER_CS)
CFI_ADJUST_CFA_OFFSET 4
/*CFI_REL_OFFSET cs, 0*/
- pushl $SYSENTER_RETURN
+ /*
+ * Push current_thread_info()->sysenter_return to the stack.
+ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
+ * pushed above; +8 corresponds to copy_thread's esp0 setting.
+ */
+ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
CFI_ADJUST_CFA_OFFSET 4
CFI_REL_OFFSET eip, 0
@@ -415,7 +427,7 @@ work_notifysig: # deal with pending signals and
# vm86-space
xorl %edx, %edx
call do_notify_resume
- jmp resume_userspace
+ jmp resume_userspace_sig
ALIGN
work_notifysig_v86:
@@ -428,7 +440,7 @@ work_notifysig_v86:
movl %eax, %esp
xorl %edx, %edx
call do_notify_resume
- jmp resume_userspace
+ jmp resume_userspace_sig
#endif
# perform syscall exit tracing
@@ -515,7 +527,7 @@ ENTRY(irq_entries_start)
.if vector
CFI_ADJUST_CFA_OFFSET -4
.endif
-1: pushl $vector-256
+1: pushl $~(vector)
CFI_ADJUST_CFA_OFFSET 4
jmp common_interrupt
.data
@@ -535,7 +547,7 @@ common_interrupt:
#define BUILD_INTERRUPT(name, nr) \
ENTRY(name) \
RING0_INT_FRAME; \
- pushl $nr-256; \
+ pushl $~(nr); \
CFI_ADJUST_CFA_OFFSET 4; \
SAVE_ALL; \
movl %esp,%eax; \
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c
index 061533e0cb5e8e..c703bc7b08800c 100644
--- a/arch/i386/kernel/irq.c
+++ b/arch/i386/kernel/irq.c
@@ -53,8 +53,8 @@ static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
*/
fastcall unsigned int do_IRQ(struct pt_regs *regs)
{
- /* high bits used in ret_from_ code */
- int irq = regs->orig_eax & 0xff;
+ /* high bit used in ret_from_ code */
+ int irq = ~regs->orig_eax;
#ifdef CONFIG_4KSTACKS
union irq_ctx *curctx, *irqctx;
u32 *isp;
@@ -100,8 +100,8 @@ fastcall unsigned int do_IRQ(struct pt_regs *regs)
* softirq checks work in the hardirq context.
*/
irqctx->tinfo.preempt_count =
- irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK |
- curctx->tinfo.preempt_count & SOFTIRQ_MASK;
+ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
+ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
asm volatile(
" xchgl %%ebx,%%esp \n"
diff --git a/arch/i386/kernel/msr.c b/arch/i386/kernel/msr.c
index 7a328230e540f8..d022cb8fd7251c 100644
--- a/arch/i386/kernel/msr.c
+++ b/arch/i386/kernel/msr.c
@@ -266,7 +266,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb, unsigned long acti
return NOTIFY_OK;
}
-static struct notifier_block msr_class_cpu_notifier =
+static struct notifier_block __cpuinitdata msr_class_cpu_notifier =
{
.notifier_call = msr_class_cpu_callback,
};
diff --git a/arch/i386/kernel/scx200.c b/arch/i386/kernel/scx200.c
index 321f5fd26e7506..9bf590cefc7d4d 100644
--- a/arch/i386/kernel/scx200.c
+++ b/arch/i386/kernel/scx200.c
@@ -9,6 +9,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/scx200.h>
@@ -45,11 +46,19 @@ static struct pci_driver scx200_pci_driver = {
.probe = scx200_probe,
};
-static DEFINE_SPINLOCK(scx200_gpio_config_lock);
+static DEFINE_MUTEX(scx200_gpio_config_lock);
-static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static void __devinit scx200_init_shadow(void)
{
int bank;
+
+ /* read the current values driven on the GPIO signals */
+ for (bank = 0; bank < 2; ++bank)
+ scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
+}
+
+static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
unsigned base;
if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE ||
@@ -63,10 +72,7 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
}
scx200_gpio_base = base;
-
- /* read the current values driven on the GPIO signals */
- for (bank = 0; bank < 2; ++bank)
- scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
+ scx200_init_shadow();
} else {
/* find the base of the Configuration Block */
@@ -87,12 +93,11 @@ static int __devinit scx200_probe(struct pci_dev *pdev, const struct pci_device_
return 0;
}
-u32 scx200_gpio_configure(int index, u32 mask, u32 bits)
+u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits)
{
u32 config, new_config;
- unsigned long flags;
- spin_lock_irqsave(&scx200_gpio_config_lock, flags);
+ mutex_lock(&scx200_gpio_config_lock);
outl(index, scx200_gpio_base + 0x20);
config = inl(scx200_gpio_base + 0x24);
@@ -100,45 +105,11 @@ u32 scx200_gpio_configure(int index, u32 mask, u32 bits)
new_config = (config & mask) | bits;
outl(new_config, scx200_gpio_base + 0x24);
- spin_unlock_irqrestore(&scx200_gpio_config_lock, flags);
+ mutex_unlock(&scx200_gpio_config_lock);
return config;
}
-#if 0
-void scx200_gpio_dump(unsigned index)
-{
- u32 config = scx200_gpio_configure(index, ~0, 0);
- printk(KERN_DEBUG "GPIO%02u: 0x%08lx", index, (unsigned long)config);
-
- if (config & 1)
- printk(" OE"); /* output enabled */
- else
- printk(" TS"); /* tristate */
- if (config & 2)
- printk(" PP"); /* push pull */
- else
- printk(" OD"); /* open drain */
- if (config & 4)
- printk(" PUE"); /* pull up enabled */
- else
- printk(" PUD"); /* pull up disabled */
- if (config & 8)
- printk(" LOCKED"); /* locked */
- if (config & 16)
- printk(" LEVEL"); /* level input */
- else
- printk(" EDGE"); /* edge input */
- if (config & 32)
- printk(" HI"); /* trigger on rising edge */
- else
- printk(" LO"); /* trigger on falling edge */
- if (config & 64)
- printk(" DEBOUNCE"); /* debounce */
- printk("\n");
-}
-#endif /* 0 */
-
static int __init scx200_init(void)
{
printk(KERN_INFO NAME ": NatSemi SCx200 Driver\n");
@@ -159,10 +130,3 @@ EXPORT_SYMBOL(scx200_gpio_base);
EXPORT_SYMBOL(scx200_gpio_shadow);
EXPORT_SYMBOL(scx200_gpio_configure);
EXPORT_SYMBOL(scx200_cb_base);
-
-/*
- Local variables:
- compile-command: "make -k -C ../../.. SUBDIRS=arch/i386/kernel modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/arch/i386/kernel/signal.c b/arch/i386/kernel/signal.c
index 5c352c3a9e7fa0..43002cfb40c4e2 100644
--- a/arch/i386/kernel/signal.c
+++ b/arch/i386/kernel/signal.c
@@ -351,7 +351,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
goto give_sigsegv;
}
- restorer = &__kernel_sigreturn;
+ restorer = (void *)VDSO_SYM(&__kernel_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
@@ -447,7 +447,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
goto give_sigsegv;
/* Set up to return from userspace. */
- restorer = &__kernel_rt_sigreturn;
+ restorer = (void *)VDSO_SYM(&__kernel_rt_sigreturn);
if (ka->sa.sa_flags & SA_RESTORER)
restorer = ka->sa.sa_restorer;
err |= __put_user(restorer, &frame->pretcode);
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bce5470ecb42e6..89e7315e539c49 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -67,12 +67,6 @@ int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
#endif
-/* Package ID of each logical CPU */
-int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
-
-/* Core ID of each logical CPU */
-int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
-
/* Last level cache ID of each logical CPU */
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
@@ -454,10 +448,12 @@ cpumask_t cpu_coregroup_map(int cpu)
struct cpuinfo_x86 *c = cpu_data + cpu;
/*
* For perf, we return last level cache shared map.
- * TBD: when power saving sched policy is added, we will return
- * cpu_core_map when power saving policy is enabled
+ * And for power savings, we return cpu_core_map
*/
- return c->llc_shared_map;
+ if (sched_mc_power_savings || sched_smt_power_savings)
+ return cpu_core_map[cpu];
+ else
+ return c->llc_shared_map;
}
/* representing cpus for which sibling maps can be computed */
@@ -473,8 +469,8 @@ set_cpu_sibling_map(int cpu)
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
- if (phys_proc_id[cpu] == phys_proc_id[i] &&
- cpu_core_id[cpu] == cpu_core_id[i]) {
+ if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
+ c[cpu].cpu_core_id == c[i].cpu_core_id) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
@@ -501,7 +497,7 @@ set_cpu_sibling_map(int cpu)
cpu_set(i, c[cpu].llc_shared_map);
cpu_set(cpu, c[i].llc_shared_map);
}
- if (phys_proc_id[cpu] == phys_proc_id[i]) {
+ if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
/*
@@ -1056,6 +1052,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
struct warm_boot_cpu_info info;
struct work_struct task;
int apicid, ret;
+ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
apicid = x86_cpu_to_apicid[cpu];
if (apicid == BAD_APICID) {
@@ -1063,6 +1060,18 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
goto exit;
}
+ /*
+ * the CPU isn't initialized at boot time, allocate gdt table here.
+ * cpu_init will initialize it
+ */
+ if (!cpu_gdt_descr->address) {
+ cpu_gdt_descr->address = get_zeroed_page(GFP_KERNEL);
+ if (!cpu_gdt_descr->address)
+ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
info.complete = &done;
info.apicid = apicid;
info.cpu = cpu;
@@ -1340,8 +1349,8 @@ remove_siblinginfo(int cpu)
cpu_clear(cpu, cpu_sibling_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
- phys_proc_id[cpu] = BAD_APICID;
- cpu_core_id[cpu] = BAD_APICID;
+ c[cpu].phys_proc_id = 0;
+ c[cpu].cpu_core_id = 0;
cpu_clear(cpu, cpu_sibling_setup_map);
}
diff --git a/arch/i386/kernel/sysenter.c b/arch/i386/kernel/sysenter.c
index 0bada1870bdf56..c60419dee0180b 100644
--- a/arch/i386/kernel/sysenter.c
+++ b/arch/i386/kernel/sysenter.c
@@ -2,6 +2,8 @@
* linux/arch/i386/kernel/sysenter.c
*
* (C) Copyright 2002 Linus Torvalds
+ * Portions based on the vdso-randomization code from exec-shield:
+ * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
*
* This file contains the needed initializations to support sysenter.
*/
@@ -13,12 +15,31 @@
#include <linux/gfp.h>
#include <linux/string.h>
#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/module.h>
#include <asm/cpufeature.h>
#include <asm/msr.h>
#include <asm/pgtable.h>
#include <asm/unistd.h>
+/*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+ */
+unsigned int __read_mostly vdso_enabled = 1;
+
+EXPORT_SYMBOL_GPL(vdso_enabled);
+
+static int __init vdso_setup(char *s)
+{
+ vdso_enabled = simple_strtoul(s, NULL, 0);
+
+ return 1;
+}
+
+__setup("vdso=", vdso_setup);
+
extern asmlinkage void sysenter_entry(void);
void enable_sep_cpu(void)
@@ -45,23 +66,122 @@ void enable_sep_cpu(void)
*/
extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+static void *syscall_page;
int __init sysenter_setup(void)
{
- void *page = (void *)get_zeroed_page(GFP_ATOMIC);
+ syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
- __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
+#ifdef CONFIG_COMPAT_VDSO
+ __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY);
+ printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
+#else
+ /*
+ * In the non-compat case the ELF coredumping code needs the fixmap:
+ */
+ __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
+#endif
if (!boot_cpu_has(X86_FEATURE_SEP)) {
- memcpy(page,
+ memcpy(syscall_page,
&vsyscall_int80_start,
&vsyscall_int80_end - &vsyscall_int80_start);
return 0;
}
- memcpy(page,
+ memcpy(syscall_page,
&vsyscall_sysenter_start,
&vsyscall_sysenter_end - &vsyscall_sysenter_start);
return 0;
}
+
+static struct page *syscall_nopage(struct vm_area_struct *vma,
+ unsigned long adr, int *type)
+{
+ struct page *p = virt_to_page(adr - vma->vm_start + syscall_page);
+ get_page(p);
+ return p;
+}
+
+/* Prevent VMA merging */
+static void syscall_vma_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct syscall_vm_ops = {
+ .close = syscall_vma_close,
+ .nopage = syscall_nopage,
+};
+
+/* Defined in vsyscall-sysenter.S */
+extern void SYSENTER_RETURN;
+
+/* Setup a VMA at program startup for the vsyscall page */
+int arch_setup_additional_pages(struct linux_binprm *bprm, int exstack)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ vma = kmem_cache_zalloc(vm_area_cachep, SLAB_KERNEL);
+ if (!vma) {
+ ret = -ENOMEM;
+ goto up_fail;
+ }
+
+ vma->vm_start = addr;
+ vma->vm_end = addr + PAGE_SIZE;
+ /* MAYWRITE to allow gdb to COW and set breakpoints */
+ vma->vm_flags = VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC|VM_MAYWRITE;
+ vma->vm_flags |= mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ vma->vm_ops = &syscall_vm_ops;
+ vma->vm_mm = mm;
+
+ ret = insert_vm_struct(mm, vma);
+ if (ret)
+ goto free_vma;
+
+ current->mm->context.vdso = (void *)addr;
+ current_thread_info()->sysenter_return =
+ (void *)VDSO_SYM(&SYSENTER_RETURN);
+ mm->total_vm++;
+up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
+
+free_vma:
+ kmem_cache_free(vm_area_cachep, vma);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
+ return "[vdso]";
+ return NULL;
+}
+
+struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
+{
+ return NULL;
+}
+
+int in_gate_area(struct task_struct *task, unsigned long addr)
+{
+ return 0;
+}
+
+int in_gate_area_no_task(unsigned long addr)
+{
+ return 0;
+}
diff --git a/arch/i386/kernel/topology.c b/arch/i386/kernel/topology.c
index 296355292c7c56..e2e281d4bcc80e 100644
--- a/arch/i386/kernel/topology.c
+++ b/arch/i386/kernel/topology.c
@@ -32,15 +32,8 @@
static struct i386_cpu cpu_devices[NR_CPUS];
-int arch_register_cpu(int num){
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- int node = cpu_to_node(num);
- if (node_online(node))
- parent = &node_devices[node].node;
-#endif /* CONFIG_NUMA */
-
+int arch_register_cpu(int num)
+{
/*
* CPU0 cannot be offlined due to several
* restrictions and assumptions in kernel. This basically
@@ -50,21 +43,13 @@ int arch_register_cpu(int num){
if (!num)
cpu_devices[num].cpu.no_control = 1;
- return register_cpu(&cpu_devices[num].cpu, num, parent);
+ return register_cpu(&cpu_devices[num].cpu, num);
}
#ifdef CONFIG_HOTPLUG_CPU
void arch_unregister_cpu(int num) {
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- int node = cpu_to_node(num);
- if (node_online(node))
- parent = &node_devices[node].node;
-#endif /* CONFIG_NUMA */
-
- return unregister_cpu(&cpu_devices[num].cpu, parent);
+ return unregister_cpu(&cpu_devices[num].cpu);
}
EXPORT_SYMBOL(arch_register_cpu);
EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,16 +59,13 @@ EXPORT_SYMBOL(arch_unregister_cpu);
#ifdef CONFIG_NUMA
#include <linux/mmzone.h>
-#include <asm/node.h>
-
-struct i386_node node_devices[MAX_NUMNODES];
static int __init topology_init(void)
{
int i;
for_each_online_node(i)
- arch_register_node(i);
+ register_one_node(i);
for_each_present_cpu(i)
arch_register_cpu(i);
diff --git a/arch/i386/kernel/vsyscall-sysenter.S b/arch/i386/kernel/vsyscall-sysenter.S
index 3b62baa6a371de..1a36d26e15eb0c 100644
--- a/arch/i386/kernel/vsyscall-sysenter.S
+++ b/arch/i386/kernel/vsyscall-sysenter.S
@@ -42,10 +42,10 @@ __kernel_vsyscall:
/* 7: align return point with nop's to make disassembly easier */
.space 7,0x90
- /* 14: System call restart point is here! (SYSENTER_RETURN - 2) */
+ /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
jmp .Lenter_kernel
/* 16: System call normal return point is here! */
- .globl SYSENTER_RETURN /* Symbol used by entry.S. */
+ .globl SYSENTER_RETURN /* Symbol used by sysenter.c */
SYSENTER_RETURN:
pop %ebp
.Lpop_ebp:
diff --git a/arch/i386/kernel/vsyscall.lds.S b/arch/i386/kernel/vsyscall.lds.S
index 98699ca6e52d7b..e26975fc68b650 100644
--- a/arch/i386/kernel/vsyscall.lds.S
+++ b/arch/i386/kernel/vsyscall.lds.S
@@ -7,7 +7,7 @@
SECTIONS
{
- . = VSYSCALL_BASE + SIZEOF_HEADERS;
+ . = VDSO_PRELINK + SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.dynsym : { *(.dynsym) }
@@ -20,7 +20,7 @@ SECTIONS
For the layouts to match, we need to skip more than enough
space for the dynamic symbol table et al. If this amount
is insufficient, ld -shared will barf. Just increase it here. */
- . = VSYSCALL_BASE + 0x400;
+ . = VDSO_PRELINK + 0x400;
.text : { *(.text) } :text =0x90909090
.note : { *(.note.*) } :text :note
diff --git a/arch/i386/mach-voyager/setup.c b/arch/i386/mach-voyager/setup.c
index 0e225054e2229c..defc6ebbd56517 100644
--- a/arch/i386/mach-voyager/setup.c
+++ b/arch/i386/mach-voyager/setup.c
@@ -5,10 +5,10 @@
#include <linux/config.h>
#include <linux/init.h>
#include <linux/interrupt.h>
-#include <asm/acpi.h>
#include <asm/arch_hooks.h>
#include <asm/voyager.h>
#include <asm/e820.h>
+#include <asm/io.h>
#include <asm/setup.h>
void __init pre_intr_init_hook(void)
@@ -27,8 +27,7 @@ void __init intr_init_hook(void)
smp_intr_init();
#endif
- if (!acpi_ioapic)
- setup_irq(2, &irq2);
+ setup_irq(2, &irq2);
}
void __init pre_setup_arch_hook(void)
diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c
index 70e560a1b79ad1..8242af9ebc6f99 100644
--- a/arch/i386/mach-voyager/voyager_smp.c
+++ b/arch/i386/mach-voyager/voyager_smp.c
@@ -661,6 +661,7 @@ do_boot_cpu(__u8 cpu)
print_cpu_info(&cpu_data[cpu]);
wmb();
cpu_set(cpu, cpu_callout_map);
+ cpu_set(cpu, cpu_present_map);
}
else {
printk("CPU%d FAILED TO BOOT: ", cpu);
@@ -1912,6 +1913,7 @@ void __devinit smp_prepare_boot_cpu(void)
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_callout_map);
cpu_set(smp_processor_id(), cpu_possible_map);
+ cpu_set(smp_processor_id(), cpu_present_map);
}
int __devinit
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index bf19513f0cea22..f84b16e007ff86 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/poison.h>
#include <linux/bootmem.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
@@ -654,7 +655,7 @@ void __init mem_init(void)
*/
#ifdef CONFIG_MEMORY_HOTPLUG
#ifndef CONFIG_NEED_MULTIPLE_NODES
-int add_memory(u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdata = &contig_page_data;
struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
@@ -753,7 +754,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)addr, 0xcc, PAGE_SIZE);
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
diff --git a/arch/i386/mm/pageattr.c b/arch/i386/mm/pageattr.c
index 0887b34bc59b98..353a836ed63c03 100644
--- a/arch/i386/mm/pageattr.c
+++ b/arch/i386/mm/pageattr.c
@@ -229,8 +229,8 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
if (PageHighMem(page))
return;
if (!enable)
- mutex_debug_check_no_locks_freed(page_address(page),
- numpages * PAGE_SIZE);
+ debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
/* the return value is ignored - the calls cannot fail,
* large pages are disabled at boot time.
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 18318749884b01..a56df7bf022da1 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -374,6 +374,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
+config HAVE_ARCH_NODEDATA_EXTENSION
+ def_bool y
+ depends on NUMA
+
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 859fb37ff49b68..303a9afcf2a1ce 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -959,7 +959,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
}
}
-static int palinfo_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -978,7 +978,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block palinfo_cpu_notifier =
+static struct notifier_block __cpuinitdata palinfo_cpu_notifier =
{
.notifier_call = palinfo_cpu_callback,
.priority = 0,
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 663a186ad194a1..9065f0f01ba3e7 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
};
#ifdef CONFIG_HOTPLUG_CPU
-static int
+static int __devinit
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int i, cpu = (unsigned long)hcpu;
@@ -673,9 +673,7 @@ salinfo_init(void)
salinfo_timer.function = &salinfo_timeout;
add_timer(&salinfo_timer);
-#ifdef CONFIG_HOTPLUG_CPU
- register_cpu_notifier(&salinfo_cpu_notifier);
-#endif
+ register_hotcpu_notifier(&salinfo_cpu_notifier);
return 0;
}
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 879edb51d1e0e1..5511d9c6c70152 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -26,19 +26,10 @@
#include <asm/numa.h>
#include <asm/cpu.h>
-#ifdef CONFIG_NUMA
-static struct node *sysfs_nodes;
-#endif
static struct ia64_cpu *sysfs_cpus;
int arch_register_cpu(int num)
{
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- parent = &sysfs_nodes[cpu_to_node(num)];
-#endif /* CONFIG_NUMA */
-
#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
/*
* If CPEI cannot be re-targetted, and this is
@@ -48,21 +39,14 @@ int arch_register_cpu(int num)
sysfs_cpus[num].cpu.no_control = 1;
#endif
- return register_cpu(&sysfs_cpus[num].cpu, num, parent);
+ return register_cpu(&sysfs_cpus[num].cpu, num);
}
#ifdef CONFIG_HOTPLUG_CPU
void arch_unregister_cpu(int num)
{
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- int node = cpu_to_node(num);
- parent = &sysfs_nodes[node];
-#endif /* CONFIG_NUMA */
-
- return unregister_cpu(&sysfs_cpus[num].cpu, parent);
+ return unregister_cpu(&sysfs_cpus[num].cpu);
}
EXPORT_SYMBOL(arch_register_cpu);
EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,17 +58,11 @@ static int __init topology_init(void)
int i, err = 0;
#ifdef CONFIG_NUMA
- sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL);
- if (!sysfs_nodes) {
- err = -ENOMEM;
- goto out;
- }
-
/*
* MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
*/
for_each_online_node(i) {
- if ((err = register_node(&sysfs_nodes[i], i, 0)))
+ if ((err = register_one_node(i)))
goto out;
}
#endif
@@ -426,7 +404,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
* When a cpu is hot-plugged, do a check and initiate
* cache kobject if necessary
*/
-static int cache_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -444,7 +422,7 @@ static int cache_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cache_cpu_notifier =
+static struct notifier_block __cpuinitdata cache_cpu_notifier =
{
.notifier_call = cache_cpu_callback
};
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index b6bcc9fa360306..525b082eb66198 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -33,7 +33,6 @@
*/
struct early_node_data {
struct ia64_node_data *node_data;
- pg_data_t *pgdat;
unsigned long pernode_addr;
unsigned long pernode_size;
struct bootmem_data bootmem_data;
@@ -46,6 +45,8 @@ struct early_node_data {
static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
static nodemask_t memory_less_mask __initdata;
+static pg_data_t *pgdat_list[MAX_NUMNODES];
+
/*
* To prevent cache aliasing effects, align per-node structures so that they
* start at addresses that are strided by node number.
@@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
* called yet. Note that node 0 will also count all non-existent cpus.
*/
-static int __init early_nr_cpus_node(int node)
+static int __meminit early_nr_cpus_node(int node)
{
int cpu, n = 0;
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node)
* compute_pernodesize - compute size of pernode data
* @node: the node id.
*/
-static unsigned long __init compute_pernodesize(int node)
+static unsigned long __meminit compute_pernodesize(int node)
{
unsigned long pernodesize = 0, cpus;
@@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode,
pernode += PERCPU_PAGE_SIZE * cpus;
pernode += node * L1_CACHE_BYTES;
- mem_data[node].pgdat = __va(pernode);
+ pgdat_list[node] = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
mem_data[node].node_data = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
- mem_data[node].pgdat->bdata = bdp;
+ pgdat_list[node]->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
static int __init free_node_bootmem(unsigned long start, unsigned long len,
int node)
{
- free_bootmem_node(mem_data[node].pgdat, start, len);
+ free_bootmem_node(pgdat_list[node], start, len);
return 0;
}
@@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void)
int node;
for_each_online_node(node) {
- pg_data_t *pdp = mem_data[node].pgdat;
+ pg_data_t *pdp = pgdat_list[node];
if (node_isset(node, memory_less_mask))
continue;
@@ -307,6 +308,17 @@ static void __init reserve_pernode_space(void)
}
}
+static void __meminit scatter_node_data(void)
+{
+ pg_data_t **dst;
+ int node;
+
+ for_each_online_node(node) {
+ dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
+ memcpy(dst, pgdat_list, sizeof(pgdat_list));
+ }
+}
+
/**
* initialize_pernode_data - fixup per-cpu & per-node pointers
*
@@ -317,17 +329,10 @@ static void __init reserve_pernode_space(void)
*/
static void __init initialize_pernode_data(void)
{
- pg_data_t *pgdat_list[MAX_NUMNODES];
int cpu, node;
- for_each_online_node(node)
- pgdat_list[node] = mem_data[node].pgdat;
+ scatter_node_data();
- /* Copy the pg_data_t list to each node and init the node field */
- for_each_online_node(node) {
- memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
- sizeof(pgdat_list));
- }
#ifdef CONFIG_SMP
/* Set the node_data pointer for each per-cpu struct */
for (cpu = 0; cpu < NR_CPUS; cpu++) {
@@ -372,7 +377,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
if (bestnode == -1)
bestnode = anynode;
- ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize,
+ ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
return ptr;
@@ -476,7 +481,7 @@ void __init find_memory(void)
pernodesize = mem_data[node].pernode_size;
map = pernode + pernodesize;
- init_bootmem_node(mem_data[node].pgdat,
+ init_bootmem_node(pgdat_list[node],
map>>PAGE_SHIFT,
bdp->node_boot_start>>PAGE_SHIFT,
bdp->node_low_pfn);
@@ -786,3 +791,21 @@ void __init paging_init(void)
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
+
+pg_data_t *arch_alloc_nodedata(int nid)
+{
+ unsigned long size = compute_pernodesize(nid);
+
+ return kzalloc(size, GFP_KERNEL);
+}
+
+void arch_free_nodedata(pg_data_t *pgdat)
+{
+ kfree(pgdat);
+}
+
+void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
+{
+ pgdat_list[update_node] = update_pgdat;
+ scatter_node_data();
+}
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 11f08001f8c26e..38306e98f04b28 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -652,7 +652,7 @@ void online_page(struct page *page)
num_physpages++;
}
-int add_memory(u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
struct zone *zone;
@@ -660,7 +660,7 @@ int add_memory(u64 start, u64 size)
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- pgdat = NODE_DATA(0);
+ pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_NORMAL;
ret = __add_pages(zone, start_pfn, nr_pages);
@@ -671,7 +671,6 @@ int add_memory(u64 start, u64 size)
return ret;
}
-EXPORT_SYMBOL_GPL(add_memory);
int remove_memory(u64 start, u64 size)
{
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index dc8e2b6967135f..677c6c0fd66188 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -27,7 +27,7 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
struct list_head **sn_irq_lh;
-static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
+static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info,
diff --git a/arch/m32r/kernel/setup.c b/arch/m32r/kernel/setup.c
index 3cd3c2988a4875..1ff483c8a4c974 100644
--- a/arch/m32r/kernel/setup.c
+++ b/arch/m32r/kernel/setup.c
@@ -275,7 +275,7 @@ static int __init topology_init(void)
int i;
for_each_present_cpu(i)
- register_cpu(&cpu_devices[i], i, NULL);
+ register_cpu(&cpu_devices[i], i);
return 0;
}
diff --git a/arch/m68knommu/Makefile b/arch/m68knommu/Makefile
index 6f880cbff1c88d..8951793fd8d4aa 100644
--- a/arch/m68knommu/Makefile
+++ b/arch/m68knommu/Makefile
@@ -21,6 +21,7 @@ platform-$(CONFIG_M527x) := 527x
platform-$(CONFIG_M5272) := 5272
platform-$(CONFIG_M528x) := 528x
platform-$(CONFIG_M5307) := 5307
+platform-$(CONFIG_M532x) := 532x
platform-$(CONFIG_M5407) := 5407
PLATFORM := $(platform-y)
@@ -44,6 +45,7 @@ board-$(CONFIG_senTec) := senTec
board-$(CONFIG_SNEHA) := SNEHA
board-$(CONFIG_M5208EVB) := M5208EVB
board-$(CONFIG_MOD5272) := MOD5272
+board-$(CONFIG_AVNET) := AVNET
BOARD := $(board-y)
model-$(CONFIG_RAMKERNEL) := ram
@@ -65,6 +67,7 @@ cpuclass-$(CONFIG_M527x) := 5307
cpuclass-$(CONFIG_M5272) := 5307
cpuclass-$(CONFIG_M528x) := 5307
cpuclass-$(CONFIG_M5307) := 5307
+cpuclass-$(CONFIG_M532x) := 5307
cpuclass-$(CONFIG_M5407) := 5307
cpuclass-$(CONFIG_M68328) := 68328
cpuclass-$(CONFIG_M68EZ328) := 68328
@@ -81,16 +84,17 @@ export PLATFORM BOARD MODEL CPUCLASS
#
# Some CFLAG additions based on specific CPU type.
#
-cflags-$(CONFIG_M5206) := -m5200 -Wa,-S -Wa,-m5200
-cflags-$(CONFIG_M5206e) := -m5200 -Wa,-S -Wa,-m5200
-cflags-$(CONFIG_M520x) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M523x) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M5249) := -m5200 -Wa,-S -Wa,-m5200
-cflags-$(CONFIG_M527x) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M5272) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M528x) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M5307) := -m5307 -Wa,-S -Wa,-m5307
-cflags-$(CONFIG_M5407) := -m5200 -Wa,-S -Wa,-m5200
+cflags-$(CONFIG_M5206) := -m5200
+cflags-$(CONFIG_M5206e) := -m5200
+cflags-$(CONFIG_M520x) := -m5307
+cflags-$(CONFIG_M523x) := -m5307
+cflags-$(CONFIG_M5249) := -m5200
+cflags-$(CONFIG_M527x) := -m5307
+cflags-$(CONFIG_M5272) := -m5307
+cflags-$(CONFIG_M528x) := -m5307
+cflags-$(CONFIG_M5307) := -m5307
+cflags-$(CONFIG_M532x) := -m5307
+cflags-$(CONFIG_M5407) := -m5200
cflags-$(CONFIG_M68328) := -m68000
cflags-$(CONFIG_M68EZ328) := -m68000
cflags-$(CONFIG_M68VZ328) := -m68000
diff --git a/arch/m68knommu/defconfig b/arch/m68knommu/defconfig
index 2d59ba1a79ba4e..3891de09ac2372 100644
--- a/arch/m68knommu/defconfig
+++ b/arch/m68knommu/defconfig
@@ -1,21 +1,22 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13-uc0
-# Wed Aug 31 15:03:26 2005
+# Linux kernel version: 2.6.17
+# Tue Jun 27 12:57:06 2006
#
-CONFIG_M68KNOMMU=y
+CONFIG_M68K=y
# CONFIG_MMU is not set
# CONFIG_FPU is not set
-CONFIG_UID16=y
CONFIG_RWSEM_GENERIC_SPINLOCK=y
# CONFIG_RWSEM_XCHGADD_ALGORITHM is not set
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_TIME_LOW_RES=y
#
# Code maturity level options
#
CONFIG_EXPERIMENTAL=y
-CONFIG_CLEAN_COMPILE=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_INIT_ENV_ARG_LIMIT=32
@@ -23,26 +24,30 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
# General setup
#
CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SYSVIPC is not set
# CONFIG_POSIX_MQUEUE is not set
# CONFIG_BSD_PROCESS_ACCT is not set
# CONFIG_SYSCTL is not set
# CONFIG_AUDIT is not set
-# CONFIG_HOTPLUG is not set
-# CONFIG_KOBJECT_UEVENT is not set
# CONFIG_IKCONFIG is not set
+# CONFIG_RELAY is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_UID16=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EMBEDDED=y
# CONFIG_KALLSYMS is not set
+# CONFIG_HOTPLUG is not set
CONFIG_PRINTK=y
CONFIG_BUG=y
+CONFIG_ELF_CORE=y
CONFIG_BASE_FULL=y
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-CONFIG_CC_ALIGN_FUNCTIONS=0
-CONFIG_CC_ALIGN_LABELS=0
-CONFIG_CC_ALIGN_LOOPS=0
-CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
+CONFIG_TINY_SHMEM=y
CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
#
# Loadable module support
@@ -50,6 +55,24 @@ CONFIG_BASE_SMALL=0
# CONFIG_MODULES is not set
#
+# Block layer
+#
+# CONFIG_BLK_DEV_IO_TRACE is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+
+#
# Processor type and features
#
# CONFIG_M68328 is not set
@@ -58,6 +81,7 @@ CONFIG_BASE_SMALL=0
# CONFIG_M68360 is not set
# CONFIG_M5206 is not set
# CONFIG_M5206e is not set
+# CONFIG_M520x is not set
# CONFIG_M523x is not set
# CONFIG_M5249 is not set
# CONFIG_M5271 is not set
@@ -65,29 +89,12 @@ CONFIG_M5272=y
# CONFIG_M5275 is not set
# CONFIG_M528x is not set
# CONFIG_M5307 is not set
+# CONFIG_M532x is not set
# CONFIG_M5407 is not set
CONFIG_COLDFIRE=y
-# CONFIG_CLOCK_AUTO is not set
-# CONFIG_CLOCK_11MHz is not set
-# CONFIG_CLOCK_16MHz is not set
-# CONFIG_CLOCK_20MHz is not set
-# CONFIG_CLOCK_24MHz is not set
-# CONFIG_CLOCK_25MHz is not set
-# CONFIG_CLOCK_33MHz is not set
-# CONFIG_CLOCK_40MHz is not set
-# CONFIG_CLOCK_45MHz is not set
-# CONFIG_CLOCK_48MHz is not set
-# CONFIG_CLOCK_50MHz is not set
-# CONFIG_CLOCK_54MHz is not set
-# CONFIG_CLOCK_60MHz is not set
-# CONFIG_CLOCK_62_5MHz is not set
-# CONFIG_CLOCK_64MHz is not set
-CONFIG_CLOCK_66MHz=y
-# CONFIG_CLOCK_70MHz is not set
-# CONFIG_CLOCK_100MHz is not set
-# CONFIG_CLOCK_140MHz is not set
-# CONFIG_CLOCK_150MHz is not set
-# CONFIG_CLOCK_166MHz is not set
+CONFIG_CLOCK_SET=y
+CONFIG_CLOCK_FREQ=66666666
+CONFIG_CLOCK_DIV=1
#
# Platform
@@ -102,11 +109,14 @@ CONFIG_M5272C3=y
CONFIG_FREESCALE=y
# CONFIG_LARGE_ALLOCS is not set
CONFIG_4KSTACKS=y
-CONFIG_RAMAUTO=y
-# CONFIG_RAM4MB is not set
-# CONFIG_RAM8MB is not set
-# CONFIG_RAM16MB is not set
-# CONFIG_RAM32MB is not set
+
+#
+# RAM configuration
+#
+CONFIG_RAMBASE=0x0
+CONFIG_RAMSIZE=0x800000
+CONFIG_VECTORBASE=0x0
+CONFIG_KERNELBASE=0x20000
CONFIG_RAMAUTOBIT=y
# CONFIG_RAM8BIT is not set
# CONFIG_RAM16BIT is not set
@@ -119,6 +129,8 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
#
# Bus options (PCI, PCMCIA, EISA, MCA, ISA)
@@ -140,6 +152,7 @@ CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_BINFMT_FLAT=y
# CONFIG_BINFMT_ZFLAT is not set
# CONFIG_BINFMT_SHARED_FLAT is not set
+# CONFIG_BINFMT_AOUT is not set
# CONFIG_BINFMT_MISC is not set
#
@@ -155,6 +168,7 @@ CONFIG_NET=y
#
# Networking options
#
+# CONFIG_NETDEBUG is not set
CONFIG_PACKET=y
# CONFIG_PACKET_MMAP is not set
CONFIG_UNIX=y
@@ -171,18 +185,30 @@ CONFIG_IP_FIB_HASH=y
# CONFIG_INET_AH is not set
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
# CONFIG_INET_TUNNEL is not set
-# CONFIG_IP_TCPDIAG is not set
-# CONFIG_IP_TCPDIAG_IPV6 is not set
+# CONFIG_INET_DIAG is not set
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
+# CONFIG_INET6_XFRM_TUNNEL is not set
+# CONFIG_INET6_TUNNEL is not set
# CONFIG_NETFILTER is not set
#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
+
+#
+# TIPC Configuration (EXPERIMENTAL)
+#
+# CONFIG_TIPC is not set
# CONFIG_ATM is not set
# CONFIG_BRIDGE is not set
# CONFIG_VLAN_8021Q is not set
@@ -195,8 +221,11 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
-# CONFIG_NET_CLS_ROUTE is not set
#
# Network testing
@@ -205,6 +234,7 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
#
# Device Drivers
@@ -218,6 +248,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
# Memory Technology Devices (MTD)
#
CONFIG_MTD=y
@@ -235,6 +270,7 @@ CONFIG_MTD_BLOCK=y
# CONFIG_FTL is not set
# CONFIG_NFTL is not set
# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
#
# RAM/ROM/Flash chip drivers
@@ -254,13 +290,13 @@ CONFIG_MTD_CFI_I2=y
CONFIG_MTD_RAM=y
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_OBSOLETE_CHIPS is not set
#
# Mapping drivers for chip access
#
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
CONFIG_MTD_UCLINUX=y
-# CONFIG_MTD_SNAPGEARuC is not set
# CONFIG_MTD_PLATRAM is not set
#
@@ -269,7 +305,6 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_SLRAM is not set
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_MTDRAM is not set
-# CONFIG_MTD_BLKMTD is not set
# CONFIG_MTD_BLOCK2MTD is not set
#
@@ -285,6 +320,11 @@ CONFIG_MTD_UCLINUX=y
# CONFIG_MTD_NAND is not set
#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
# Parallel port support
#
# CONFIG_PARPORT is not set
@@ -296,7 +336,6 @@ CONFIG_MTD_UCLINUX=y
#
# Block devices
#
-# CONFIG_BLK_DEV_FD is not set
# CONFIG_BLK_DEV_COW_COMMON is not set
# CONFIG_BLK_DEV_LOOP is not set
# CONFIG_BLK_DEV_NBD is not set
@@ -304,16 +343,7 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=4096
# CONFIG_BLK_DEV_INITRD is not set
-CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_AS is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
# CONFIG_ATA_OVER_ETH is not set
#
@@ -324,6 +354,7 @@ CONFIG_IOSCHED_NOOP=y
#
# SCSI device support
#
+# CONFIG_RAID_ATTRS is not set
# CONFIG_SCSI is not set
#
@@ -354,13 +385,15 @@ CONFIG_NETDEVICES=y
# CONFIG_TUN is not set
#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
# CONFIG_MII is not set
-# CONFIG_NET_VENDOR_SMC is not set
-# CONFIG_NE2000 is not set
-# CONFIG_NET_PCI is not set
CONFIG_FEC=y
# CONFIG_FEC2 is not set
@@ -392,6 +425,7 @@ CONFIG_PPP=y
# CONFIG_PPP_SYNC_TTY is not set
# CONFIG_PPP_DEFLATE is not set
# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_MPPE is not set
# CONFIG_PPPOE is not set
# CONFIG_SLIP is not set
# CONFIG_SHAPER is not set
@@ -425,8 +459,6 @@ CONFIG_PPP=y
#
# CONFIG_VT is not set
# CONFIG_SERIAL_NONSTANDARD is not set
-# CONFIG_LEDMAN is not set
-# CONFIG_RESETSWITCH is not set
#
# Serial drivers
@@ -450,8 +482,6 @@ CONFIG_LEGACY_PTY_COUNT=256
# Watchdog Cards
#
# CONFIG_WATCHDOG is not set
-# CONFIG_MCFWATCHDOG is not set
-# CONFIG_RTC is not set
# CONFIG_GEN_RTC is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
@@ -464,14 +494,19 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# TPM devices
#
-# CONFIG_MCF_QSPI is not set
-# CONFIG_M41T11M6 is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
#
# I2C support
#
# CONFIG_I2C is not set
-# CONFIG_I2C_SENSOR is not set
+
+#
+# SPI support
+#
+# CONFIG_SPI is not set
+# CONFIG_SPI_MASTER is not set
#
# Dallas's 1-wire bus
@@ -482,6 +517,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# Hardware Monitoring support
#
# CONFIG_HWMON is not set
+# CONFIG_HWMON_VID is not set
#
# Misc devices
@@ -491,6 +527,7 @@ CONFIG_LEGACY_PTY_COUNT=256
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
+CONFIG_VIDEO_V4L2=y
#
# Digital Video Broadcasting Devices
@@ -503,11 +540,6 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_FB is not set
#
-# SPI support
-#
-# CONFIG_SPI is not set
-
-#
# Sound
#
# CONFIG_SOUND is not set
@@ -517,6 +549,11 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# CONFIG_USB_ARCH_HAS_HCD is not set
# CONFIG_USB_ARCH_HAS_OHCI is not set
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
#
# USB Gadget Support
@@ -529,29 +566,43 @@ CONFIG_LEGACY_PTY_COUNT=256
# CONFIG_MMC is not set
#
+# LED devices
+#
+# CONFIG_NEW_LEDS is not set
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+
+#
# InfiniBand support
#
#
-# SN Devices
+# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
#
#
+# Real Time Clock
+#
+# CONFIG_RTC_CLASS is not set
+
+#
# File systems
#
CONFIG_EXT2_FS=y
# CONFIG_EXT2_FS_XATTR is not set
# CONFIG_EXT2_FS_XIP is not set
# CONFIG_EXT3_FS is not set
-# CONFIG_JBD is not set
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-
-#
-# XFS support
-#
# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
# CONFIG_MINIX_FS is not set
CONFIG_ROMFS_FS=y
# CONFIG_INOTIFY is not set
@@ -559,6 +610,7 @@ CONFIG_ROMFS_FS=y
# CONFIG_DNOTIFY is not set
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
#
# CD-ROM/DVD Filesystems
@@ -581,6 +633,7 @@ CONFIG_SYSFS=y
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
+# CONFIG_CONFIGFS_FS is not set
#
# Miscellaneous filesystems
@@ -611,6 +664,7 @@ CONFIG_RAMFS=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
#
# Partition Types
@@ -627,8 +681,12 @@ CONFIG_MSDOS_PARTITION=y
# Kernel hacking
#
# CONFIG_PRINTK_TIME is not set
+# CONFIG_MAGIC_SYSRQ is not set
# CONFIG_DEBUG_KERNEL is not set
CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_UNWIND_INFO is not set
# CONFIG_FULLDEBUG is not set
# CONFIG_HIGHPROFILE is not set
# CONFIG_BOOTPARAM is not set
@@ -655,5 +713,6 @@ CONFIG_LOG_BUF_SHIFT=14
# Library routines
#
# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
# CONFIG_CRC32 is not set
# CONFIG_LIBCRC32C is not set
diff --git a/arch/m68knommu/platform/68328/head-rom.S b/arch/m68knommu/platform/68328/head-rom.S
index 2b448a297011f4..234430b9551c96 100644
--- a/arch/m68knommu/platform/68328/head-rom.S
+++ b/arch/m68knommu/platform/68328/head-rom.S
@@ -28,6 +28,8 @@ _ramstart:
_ramend:
.long 0
+#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
+
#ifdef CONFIG_INIT_LCD
splash_bits:
#include "bootlogo.rh"
@@ -48,7 +50,7 @@ _stext: movew #0x2700,%sr
moveb #0x81, 0xfffffA27 /* LCKCON */
movew #0xff00, 0xfffff412 /* LCD pins */
#endif
- moveal #__ramend-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
+ moveal #RAMEND-CONFIG_MEMORY_RESERVE*0x100000 - 0x10, %sp
movew #32767, %d0 /* PLL settle wait loop */
1: subq #1, %d0
bne 1b
@@ -73,13 +75,13 @@ _stext: movew #0x2700,%sr
bhi 1b
movel #_sdata, %d0
- movel %d0, _rambase
- movel #_ebss, %d0
- movel %d0, _ramstart
- movel #__ramend-CONFIG_MEMORY_RESERVE*0x100000, %d0
- movel %d0, _ramend
- movel #__ramvec, %d0
- movel %d0, _ramvec
+ movel %d0, _rambase
+ movel #_ebss, %d0
+ movel %d0, _ramstart
+ movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0
+ movel %d0, _ramend
+ movel #CONFIG_VECTORBASE, %d0
+ movel %d0, _ramvec
/*
* load the current task pointer and stack
diff --git a/arch/m68knommu/platform/68360/head-ram.S b/arch/m68knommu/platform/68360/head-ram.S
index a5c639a51eefd2..f497713a4ec7ea 100644
--- a/arch/m68knommu/platform/68360/head-ram.S
+++ b/arch/m68knommu/platform/68360/head-ram.S
@@ -18,7 +18,6 @@
.global _start
.global _rambase
-.global __ramvec
.global _ramvec
.global _ramstart
.global _ramend
@@ -26,6 +25,8 @@
.global _quicc_base
.global _periph_base
+#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
+
#define REGB 0x1000
#define PEPAR (_dprbase + REGB + 0x0016)
#define GMR (_dprbase + REGB + 0x0040)
@@ -103,7 +104,7 @@ _stext:
nop
ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */
/* We should not need to setup the boot stack the reset should do it. */
- movea.l #__ramend, %sp /*set up stack at the end of DRAM:*/
+ movea.l #RAMEND, %sp /*set up stack at the end of DRAM:*/
set_mbar_register:
moveq.l #0x07, %d1 /* Setup MBAR */
@@ -163,7 +164,7 @@ configure_memory_controller:
move.l %d0, GMR
configure_chip_select_0:
- move.l #__ramend, %d0
+ move.l #RAMEND, %d0
subi.l #__ramstart, %d0
subq.l #0x01, %d0
eori.l #SIM_OR_MASK, %d0
@@ -234,16 +235,10 @@ store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
move.l #_ebss, _ramstart
- move.l #__ramend, %d0
+ move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
- move.l %d0, _ramend /* Different from __ramend.*/
+ move.l %d0, _ramend /* Different from RAMEND.*/
-store_flash_size:
- /* Set rom size information */
- move.l #__rom_end, %d0
- sub.l #__rom_start, %d0
- move.l %d0, rom_length
-
pea 0
pea env
pea %sp@(4)
@@ -286,7 +281,7 @@ _dprbase:
*/
.section ".data.initvect","awx"
- .long __ramend /* Reset: Initial Stack Pointer - 0. */
+ .long RAMEND /* Reset: Initial Stack Pointer - 0. */
.long _start /* Reset: Initial Program Counter - 1. */
.long buserr /* Bus Error - 2. */
.long trap /* Address Error - 3. */
diff --git a/arch/m68knommu/platform/68360/head-rom.S b/arch/m68knommu/platform/68360/head-rom.S
index 0da357a4cfee40..2d28c3e19a8818 100644
--- a/arch/m68knommu/platform/68360/head-rom.S
+++ b/arch/m68knommu/platform/68360/head-rom.S
@@ -18,7 +18,6 @@
.global _start
.global _rambase
-.global __ramvec
.global _ramvec
.global _ramstart
.global _ramend
@@ -26,6 +25,8 @@
.global _quicc_base
.global _periph_base
+#define RAMEND (CONFIG_RAMBASE + CONFIG_RAMSIZE)
+
#define REGB 0x1000
#define PEPAR (_dprbase + REGB + 0x0016)
#define GMR (_dprbase + REGB + 0x0040)
@@ -115,7 +116,7 @@ _stext:
nop
ori.w #MCU_DISABLE_INTRPTS, %sr /* disable interrupts: */
/* We should not need to setup the boot stack the reset should do it. */
- movea.l #__ramend, %sp /* set up stack at the end of DRAM:*/
+ movea.l #RAMEND, %sp /* set up stack at the end of DRAM:*/
set_mbar_register:
@@ -245,16 +246,10 @@ store_ram_size:
/* Set ram size information */
move.l #_sdata, _rambase
move.l #_ebss, _ramstart
- move.l #__ramend, %d0
+ move.l #RAMEND, %d0
sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/
- move.l %d0, _ramend /* Different from __ramend.*/
+ move.l %d0, _ramend /* Different from RAMEND.*/
-store_flash_size:
- /* Set rom size information */
- move.l #__rom_end, %d0
- sub.l #__rom_start, %d0
- move.l %d0, rom_length
-
pea 0
pea env
pea %sp@(4)
@@ -298,7 +293,7 @@ _dprbase:
*/
.section ".data.initvect","awx"
- .long __ramend /* Reset: Initial Stack Pointer - 0. */
+ .long RAMEND /* Reset: Initial Stack Pointer - 0. */
.long _start /* Reset: Initial Program Counter - 1. */
.long buserr /* Bus Error - 2. */
.long trap /* Address Error - 3. */
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 298f82fe8440a4..9096a5ea42298d 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -446,7 +446,7 @@ static int __init topology_init(void)
int ret;
for_each_present_cpu(cpu) {
- ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+ ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", cpu, ret);
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 2e8e52c135e6ed..70cf09afdf565c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -367,7 +367,7 @@ void mipsmt_prepare_cpus(void)
dvpe();
dmt();
- freeIPIq.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&freeIPIq.lock);
/*
* We probably don't have as many VPEs as we do SMP "CPUs",
@@ -375,7 +375,7 @@ void mipsmt_prepare_cpus(void)
*/
for (i=0; i<NR_CPUS; i++) {
IPIQ[i].head = IPIQ[i].tail = NULL;
- IPIQ[i].lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
ipi_timer_latch[i] = 0;
}
diff --git a/arch/parisc/kernel/topology.c b/arch/parisc/kernel/topology.c
index 3ba040050e4ca1..068b20d822e7b9 100644
--- a/arch/parisc/kernel/topology.c
+++ b/arch/parisc/kernel/topology.c
@@ -26,11 +26,10 @@ static struct cpu cpu_devices[NR_CPUS] __read_mostly;
static int __init topology_init(void)
{
- struct node *parent = NULL;
int num;
for_each_present_cpu(num) {
- register_cpu(&cpu_devices[num], num, parent);
+ register_cpu(&cpu_devices[num], num);
}
return 0;
}
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index e5a44812441ac1..0932a62a1c9637 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -215,7 +215,7 @@ int __init ppc_init(void)
/* register CPU devices */
for_each_possible_cpu(i)
- register_cpu(&cpu_devices[i], i, NULL);
+ register_cpu(&cpu_devices[i], i);
/* call platform init */
if (ppc_md.init != NULL) {
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 5bc2585c8036d9..4662b580efa164 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -279,7 +279,7 @@ static void unregister_cpu_online(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int sysfs_cpu_notify(struct notifier_block *self,
+static int __devinit sysfs_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned int)(long)hcpu;
@@ -297,30 +297,19 @@ static int sysfs_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block sysfs_cpu_nb = {
+static struct notifier_block __devinitdata sysfs_cpu_nb = {
.notifier_call = sysfs_cpu_notify,
};
/* NUMA stuff */
#ifdef CONFIG_NUMA
-static struct node node_devices[MAX_NUMNODES];
-
static void register_nodes(void)
{
int i;
- for (i = 0; i < MAX_NUMNODES; i++) {
- if (node_online(i)) {
- int p_node = parent_node(i);
- struct node *parent = NULL;
-
- if (p_node != i)
- parent = &node_devices[p_node];
-
- register_node(&node_devices[i], i, parent);
- }
- }
+ for (i = 0; i < MAX_NUMNODES; i++)
+ register_one_node(i);
}
int sysfs_add_device_to_node(struct sys_device *dev, int nid)
@@ -359,23 +348,13 @@ static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
static int __init topology_init(void)
{
int cpu;
- struct node *parent = NULL;
register_nodes();
-
register_cpu_notifier(&sysfs_cpu_nb);
for_each_possible_cpu(cpu) {
struct cpu *c = &per_cpu(cpu_devices, cpu);
-#ifdef CONFIG_NUMA
- /* The node to which a cpu belongs can't be known
- * until the cpu is made present.
- */
- parent = NULL;
- if (cpu_present(cpu))
- parent = &node_devices[cpu_to_node(cpu)];
-#endif
/*
* For now, we just see if the system supports making
* the RTAS calls for CPU hotplug. But, there may be a
@@ -387,7 +366,7 @@ static int __init topology_init(void)
c->no_control = 1;
if (cpu_online(cpu) || (c->no_control == 0)) {
- register_cpu(c, cpu, parent);
+ register_cpu(c, cpu);
sysdev_create_file(&c->sysdev, &attr_physical_id);
}
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 9e30f968c184af..d454caada265d5 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -41,6 +41,7 @@
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
+#include <linux/poison.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -90,7 +91,7 @@ void free_initmem(void)
addr = (unsigned long)__init_begin;
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
- memset((void *)addr, 0xcc, PAGE_SIZE);
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
free_page(addr);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 69f3b9a20beb76..089d939a0b3e98 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -114,15 +114,20 @@ void online_page(struct page *page)
num_physpages++;
}
-int __devinit add_memory(u64 start, u64 size)
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
+{
+ return hot_add_scn_to_nid(start);
+}
+#endif
+
+int __devinit arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdata;
struct zone *zone;
- int nid;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
- nid = hot_add_scn_to_nid(start);
pgdata = NODE_DATA(nid);
start = (unsigned long)__va(start);
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index aa98cb3b59d82a..fbe23933f73192 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -334,7 +334,7 @@ out:
return nid;
}
-static int cpu_numa_callback(struct notifier_block *nfb,
+static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -609,14 +609,15 @@ static void __init *careful_allocation(int nid, unsigned long size,
return (void *)ret;
}
+static struct notifier_block __cpuinitdata ppc64_numa_nb = {
+ .notifier_call = cpu_numa_callback,
+ .priority = 1 /* Must run before sched domains notifier. */
+};
+
void __init do_init_bootmem(void)
{
int nid;
unsigned int i;
- static struct notifier_block ppc64_numa_nb = {
- .notifier_call = cpu_numa_callback,
- .priority = 1 /* Must run before sched domains notifier. */
- };
min_low_pfn = 0;
max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 3068b429b03114..a656d810a44aa6 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -2203,7 +2203,7 @@ void spu_init_csa(struct spu_state *csa)
memset(lscsa, 0, sizeof(struct spu_lscsa));
csa->lscsa = lscsa;
- csa->register_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&csa->register_lock);
/* Set LS pages reserved to allow for user-space mapping. */
for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE)
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 047f954a89eb93..93e7505debc598 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -546,7 +546,7 @@ struct pmf_device {
};
static LIST_HEAD(pmf_devices);
-static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pmf_lock);
static DEFINE_MUTEX(pmf_irq_mutex);
static void pmf_release_device(struct kref *kref)
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index 8f2d12935b99fe..45ccc687e57cbe 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -35,7 +35,7 @@
*/
/* EEH event workqueue setup. */
-static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(eeh_eventlist_lock);
LIST_HEAD(eeh_eventlist);
static void eeh_thread_launcher(void *);
DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c
index 74e0d31a3559cf..615350d46b5261 100644
--- a/arch/powerpc/sysdev/mmio_nvram.c
+++ b/arch/powerpc/sysdev/mmio_nvram.c
@@ -32,7 +32,7 @@
static void __iomem *mmio_nvram_start;
static long mmio_nvram_len;
-static spinlock_t mmio_nvram_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(mmio_nvram_lock);
static ssize_t mmio_nvram_read(char *buf, size_t count, loff_t *index)
{
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 1f79e84ab464dd..4b4607d89bfa6e 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -475,7 +475,7 @@ int __init ppc_init(void)
/* register CPU devices */
for_each_possible_cpu(i)
- register_cpu(&cpu_devices[i], i, NULL);
+ register_cpu(&cpu_devices[i], i);
/* call platform init */
if (ppc_md.init != NULL) {
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 9a22434a580c52..54d35c13090798 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -652,7 +652,7 @@ appldata_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block appldata_nb = {
+static struct notifier_block __devinitdata appldata_nb = {
.notifier_call = appldata_cpu_notify,
};
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 343120c9223d79..8e03219eea7605 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -869,7 +869,7 @@ static int __init topology_init(void)
int ret;
for_each_possible_cpu(cpu) {
- ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+ ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu);
if (ret)
printk(KERN_WARNING "topology_init: register_cpu %d "
"failed (%d)\n", cpu, ret);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index bb229ef030f3cb..9af22116c9a2b3 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -402,7 +402,7 @@ static int __init topology_init(void)
int cpu_id;
for_each_possible_cpu(cpu_id)
- register_cpu(&cpu[cpu_id], cpu_id, NULL);
+ register_cpu(&cpu[cpu_id], cpu_id);
return 0;
}
diff --git a/arch/sh64/kernel/setup.c b/arch/sh64/kernel/setup.c
index d2711c9c9d1395..da98d8dbcf9532 100644
--- a/arch/sh64/kernel/setup.c
+++ b/arch/sh64/kernel/setup.c
@@ -309,7 +309,7 @@ static struct cpu cpu[1];
static int __init topology_init(void)
{
- return register_cpu(cpu, 0, NULL);
+ return register_cpu(cpu, 0);
}
subsys_initcall(topology_init);
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index a6a7d8168346c6..116d9632002def 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -537,7 +537,7 @@ static int __init topology_init(void)
for_each_possible_cpu(i) {
struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p) {
- register_cpu(p, i, NULL);
+ register_cpu(p, i);
err = 0;
}
}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 5c2bcf354ce64a..cb75a27adb517b 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -18,6 +18,7 @@
#include <linux/initrd.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
+#include <linux/poison.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/kprobes.h>
@@ -1520,7 +1521,7 @@ void free_initmem(void)
page = (addr +
((unsigned long) __va(kern_base)) -
((unsigned long) KERNBASE));
- memset((void *)addr, 0xcc, PAGE_SIZE);
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
p = virt_to_page(page);
ClearPageReserved(p);
diff --git a/arch/x86_64/kernel/entry.S b/arch/x86_64/kernel/entry.S
index 7290e72b9a34d2..22cac4487b57fd 100644
--- a/arch/x86_64/kernel/entry.S
+++ b/arch/x86_64/kernel/entry.S
@@ -588,7 +588,7 @@ END(common_interrupt)
*/
.macro apicinterrupt num,func
INTR_FRAME
- pushq $\num-256
+ pushq $~(\num)
CFI_ADJUST_CFA_OFFSET 8
interrupt \func
jmp ret_from_intr
diff --git a/arch/x86_64/kernel/irq.c b/arch/x86_64/kernel/irq.c
index 59518d4d43589a..3be0a7e4bf08d1 100644
--- a/arch/x86_64/kernel/irq.c
+++ b/arch/x86_64/kernel/irq.c
@@ -115,8 +115,8 @@ skip:
*/
asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
{
- /* high bits used in ret_from_ code */
- unsigned irq = regs->orig_rax & 0xff;
+ /* high bit used in ret_from_ code */
+ unsigned irq = ~regs->orig_rax;
exit_idle();
irq_enter();
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index acd5816b1a6f21..88845674c661a3 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -629,7 +629,7 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
#endif
/* Get notified when a cpu comes on/off. Be hotplug friendly. */
-static int
+static __cpuinit int
mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -647,7 +647,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block mce_cpu_notifier = {
+static struct notifier_block __cpuinitdata mce_cpu_notifier = {
.notifier_call = mce_cpu_callback,
};
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index acee4bc3f6fa94..5a1c0a3bf87262 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -135,10 +135,10 @@ asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
cpu = smp_processor_id();
/*
- * orig_rax contains the interrupt vector - 256.
+ * orig_rax contains the negated interrupt vector.
* Use that to determine where the sender put the data.
*/
- sender = regs->orig_rax + 256 - INVALIDATE_TLB_VECTOR_START;
+ sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
f = &per_cpu(flush_state, sender);
if (!cpu_isset(cpu, f->flush_cpumask))
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index 4e9755179ecf57..540c0ccbcccc89 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -455,10 +455,12 @@ cpumask_t cpu_coregroup_map(int cpu)
struct cpuinfo_x86 *c = cpu_data + cpu;
/*
* For perf, we return last level cache shared map.
- * TBD: when power saving sched policy is added, we will return
- * cpu_core_map when power saving policy is enabled
+ * And for power savings, we return cpu_core_map
*/
- return c->llc_shared_map;
+ if (sched_mc_power_savings || sched_smt_power_savings)
+ return cpu_core_map[cpu];
+ else
+ return c->llc_shared_map;
}
/* representing cpus for which sibling maps can be computed */
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index 02add1d1dfa88a..95bd232ff0cf3d 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -23,6 +23,7 @@
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/memory_hotplug.h>
@@ -506,8 +507,6 @@ void __init clear_kernel_mapping(unsigned long address, unsigned long size)
/*
* Memory hotplug specific functions
*/
-#if defined(CONFIG_ACPI_HOTPLUG_MEMORY) || defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)
-
void online_page(struct page *page)
{
ClearPageReserved(page);
@@ -517,31 +516,17 @@ void online_page(struct page *page)
num_physpages++;
}
-#ifndef CONFIG_MEMORY_HOTPLUG
+#ifdef CONFIG_MEMORY_HOTPLUG
/*
- * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
- * just online the pages.
+ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
+ * via probe interface of sysfs. If acpi notifies hot-add event, then it
+ * can tell node id by searching dsdt. But, probe interface doesn't have
+ * node id. So, return 0 as node id at this time.
*/
-int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
{
- int err = -EIO;
- unsigned long pfn;
- unsigned long total = 0, mem = 0;
- for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
- if (pfn_valid(pfn)) {
- online_page(pfn_to_page(pfn));
- err = 0;
- mem++;
- }
- total++;
- }
- if (!err) {
- z->spanned_pages += total;
- z->present_pages += mem;
- z->zone_pgdat->node_spanned_pages += total;
- z->zone_pgdat->node_present_pages += mem;
- }
- return err;
+ return 0;
}
#endif
@@ -549,9 +534,9 @@ int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
* Memory is added always to NORMAL zone. This means you will never get
* additional DMA/DMA32 memory.
*/
-int add_memory(u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
{
- struct pglist_data *pgdat = NODE_DATA(0);
+ struct pglist_data *pgdat = NODE_DATA(nid);
struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -568,7 +553,7 @@ error:
printk("%s: Problem encountered in __add_pages!\n", __func__);
return ret;
}
-EXPORT_SYMBOL_GPL(add_memory);
+EXPORT_SYMBOL_GPL(arch_add_memory);
int remove_memory(u64 start, u64 size)
{
@@ -576,7 +561,33 @@ int remove_memory(u64 start, u64 size)
}
EXPORT_SYMBOL_GPL(remove_memory);
-#endif
+#else /* CONFIG_MEMORY_HOTPLUG */
+/*
+ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
+ * just online the pages.
+ */
+int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
+{
+ int err = -EIO;
+ unsigned long pfn;
+ unsigned long total = 0, mem = 0;
+ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
+ if (pfn_valid(pfn)) {
+ online_page(pfn_to_page(pfn));
+ err = 0;
+ mem++;
+ }
+ total++;
+ }
+ if (!err) {
+ z->spanned_pages += total;
+ z->present_pages += mem;
+ z->zone_pgdat->node_spanned_pages += total;
+ z->zone_pgdat->node_present_pages += mem;
+ }
+ return err;
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
kcore_vsyscall;
@@ -650,7 +661,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)(addr & ~(PAGE_SIZE-1)), 0xcc, PAGE_SIZE);
+ memset((void *)(addr & ~(PAGE_SIZE-1)),
+ POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
@@ -658,7 +670,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
void free_initmem(void)
{
- memset(__initdata_begin, 0xba, __initdata_end - __initdata_begin);
+ memset(__initdata_begin, POISON_FREE_INITDATA,
+ __initdata_end - __initdata_begin);
free_init_pages("unused kernel memory",
(unsigned long)(&__init_begin),
(unsigned long)(&__init_end));
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 937d81f62f43e7..fe14909f45e057 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -29,7 +29,7 @@
extern volatile unsigned long wall_jiffies;
-spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(rtc_lock);
EXPORT_SYMBOL(rtc_lock);
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 225d64d73f04c8..27e409089a7b98 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -461,7 +461,7 @@ void show_code(unsigned int *pc)
}
}
-spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index c04422a502da8e..eee03a3876a3f2 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -3403,7 +3403,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
}
-static struct notifier_block blk_cpu_notifier = {
+static struct notifier_block __devinitdata blk_cpu_notifier = {
.notifier_call = blk_cpu_notify,
};
@@ -3541,9 +3541,7 @@ int __init blk_dev_init(void)
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
-#ifdef CONFIG_HOTPLUG_CPU
- register_cpu_notifier(&blk_cpu_notifier);
-#endif
+ register_hotcpu_notifier(&blk_cpu_notifier);
blk_max_low_pfn = max_low_pfn;
blk_max_pfn = max_pfn;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 94b8d820c51229..610d2cc02cf8f5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -328,7 +328,7 @@ config ACPI_CONTAINER
config ACPI_HOTPLUG_MEMORY
tristate "Memory Hotplug"
depends on ACPI
- depends on MEMORY_HOTPLUG || X86_64
+ depends on MEMORY_HOTPLUG
default n
help
This driver adds supports for ACPI Memory Hotplug. This driver
diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c
index e0a95ba7237156..1012284ff4f7eb 100644
--- a/drivers/acpi/acpi_memhotplug.c
+++ b/drivers/acpi/acpi_memhotplug.c
@@ -57,6 +57,7 @@ MODULE_LICENSE("GPL");
static int acpi_memory_device_add(struct acpi_device *device);
static int acpi_memory_device_remove(struct acpi_device *device, int type);
+static int acpi_memory_device_start(struct acpi_device *device);
static struct acpi_driver acpi_memory_device_driver = {
.name = ACPI_MEMORY_DEVICE_DRIVER_NAME,
@@ -65,48 +66,79 @@ static struct acpi_driver acpi_memory_device_driver = {
.ops = {
.add = acpi_memory_device_add,
.remove = acpi_memory_device_remove,
+ .start = acpi_memory_device_start,
},
};
+struct acpi_memory_info {
+ struct list_head list;
+ u64 start_addr; /* Memory Range start physical addr */
+ u64 length; /* Memory Range length */
+ unsigned short caching; /* memory cache attribute */
+ unsigned short write_protect; /* memory read/write attribute */
+ unsigned int enabled:1;
+};
+
struct acpi_memory_device {
acpi_handle handle;
unsigned int state; /* State of the memory device */
- unsigned short caching; /* memory cache attribute */
- unsigned short write_protect; /* memory read/write attribute */
- u64 start_addr; /* Memory Range start physical addr */
- u64 length; /* Memory Range length */
+ struct list_head res_list;
};
+static acpi_status
+acpi_memory_get_resource(struct acpi_resource *resource, void *context)
+{
+ struct acpi_memory_device *mem_device = context;
+ struct acpi_resource_address64 address64;
+ struct acpi_memory_info *info, *new;
+ acpi_status status;
+
+ status = acpi_resource_to_address64(resource, &address64);
+ if (ACPI_FAILURE(status) ||
+ (address64.resource_type != ACPI_MEMORY_RANGE))
+ return AE_OK;
+
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ /* Can we combine the resource range information? */
+ if ((info->caching == address64.info.mem.caching) &&
+ (info->write_protect == address64.info.mem.write_protect) &&
+ (info->start_addr + info->length == address64.minimum)) {
+ info->length += address64.address_length;
+ return AE_OK;
+ }
+ }
+
+ new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
+ if (!new)
+ return AE_ERROR;
+
+ INIT_LIST_HEAD(&new->list);
+ new->caching = address64.info.mem.caching;
+ new->write_protect = address64.info.mem.write_protect;
+ new->start_addr = address64.minimum;
+ new->length = address64.address_length;
+ list_add_tail(&new->list, &mem_device->res_list);
+
+ return AE_OK;
+}
+
static int
acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
{
acpi_status status;
- struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- struct acpi_resource *resource = NULL;
- struct acpi_resource_address64 address64;
+ struct acpi_memory_info *info, *n;
ACPI_FUNCTION_TRACE("acpi_memory_get_device_resources");
- /* Get the range from the _CRS */
- status = acpi_get_current_resources(mem_device->handle, &buffer);
- if (ACPI_FAILURE(status))
- return_VALUE(-EINVAL);
-
- resource = (struct acpi_resource *)buffer.pointer;
- status = acpi_resource_to_address64(resource, &address64);
- if (ACPI_SUCCESS(status)) {
- if (address64.resource_type == ACPI_MEMORY_RANGE) {
- /* Populate the structure */
- mem_device->caching = address64.info.mem.caching;
- mem_device->write_protect =
- address64.info.mem.write_protect;
- mem_device->start_addr = address64.minimum;
- mem_device->length = address64.address_length;
- }
+ status = acpi_walk_resources(mem_device->handle, METHOD_NAME__CRS,
+ acpi_memory_get_resource, mem_device);
+ if (ACPI_FAILURE(status)) {
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list)
+ kfree(info);
+ return -EINVAL;
}
- acpi_os_free(buffer.pointer);
- return_VALUE(0);
+ return 0;
}
static int
@@ -181,7 +213,9 @@ static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
{
- int result;
+ int result, num_enabled = 0;
+ struct acpi_memory_info *info;
+ int node;
ACPI_FUNCTION_TRACE("acpi_memory_enable_device");
@@ -194,15 +228,35 @@ static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
return result;
}
+ node = acpi_get_node(mem_device->handle);
/*
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
+ * We don't have memory-hot-add rollback function,now.
+ * (i.e. memory-hot-remove function)
*/
- result = add_memory(mem_device->start_addr, mem_device->length);
- if (result) {
+ list_for_each_entry(info, &mem_device->res_list, list) {
+ u64 start_pfn, end_pfn;
+
+ start_pfn = info->start_addr >> PAGE_SHIFT;
+ end_pfn = (info->start_addr + info->length - 1) >> PAGE_SHIFT;
+
+ if (pfn_valid(start_pfn) || pfn_valid(end_pfn)) {
+ /* already enabled. try next area */
+ num_enabled++;
+ continue;
+ }
+
+ result = add_memory(node, info->start_addr, info->length);
+ if (result)
+ continue;
+ info->enabled = 1;
+ num_enabled++;
+ }
+ if (!num_enabled) {
ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "\nadd_memory failed\n"));
mem_device->state = MEMORY_INVALID_STATE;
- return result;
+ return -EINVAL;
}
return result;
@@ -246,8 +300,7 @@ static int acpi_memory_powerdown_device(struct acpi_memory_device *mem_device)
static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
{
int result;
- u64 start = mem_device->start_addr;
- u64 len = mem_device->length;
+ struct acpi_memory_info *info, *n;
ACPI_FUNCTION_TRACE("acpi_memory_disable_device");
@@ -255,10 +308,13 @@ static int acpi_memory_disable_device(struct acpi_memory_device *mem_device)
* Ask the VM to offline this memory range.
* Note: Assume that this function returns zero on success
*/
- result = remove_memory(start, len);
- if (result) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Hot-Remove failed.\n"));
- return_VALUE(result);
+ list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
+ if (info->enabled) {
+ result = remove_memory(info->start_addr, info->length);
+ if (result)
+ return result;
+ }
+ kfree(info);
}
/* Power-off and eject the device */
@@ -356,6 +412,7 @@ static int acpi_memory_device_add(struct acpi_device *device)
return_VALUE(-ENOMEM);
memset(mem_device, 0, sizeof(struct acpi_memory_device));
+ INIT_LIST_HEAD(&mem_device->res_list);
mem_device->handle = device->handle;
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
@@ -391,6 +448,25 @@ static int acpi_memory_device_remove(struct acpi_device *device, int type)
return_VALUE(0);
}
+static int acpi_memory_device_start (struct acpi_device *device)
+{
+ struct acpi_memory_device *mem_device;
+ int result = 0;
+
+ ACPI_FUNCTION_TRACE("acpi_memory_device_start");
+
+ mem_device = acpi_driver_data(device);
+
+ if (!acpi_memory_check_device(mem_device)) {
+ /* call add_memory func */
+ result = acpi_memory_enable_device(mem_device);
+ if (result)
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
+ "Error in acpi_memory_enable_device\n"));
+ }
+ return_VALUE(result);
+}
+
/*
* Helper function to check for memory device
*/
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index e2c1a16078c990..13d6d5bdea264f 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -254,5 +254,18 @@ int acpi_get_pxm(acpi_handle h)
} while (ACPI_SUCCESS(status));
return -1;
}
-
EXPORT_SYMBOL(acpi_get_pxm);
+
+int acpi_get_node(acpi_handle *handle)
+{
+ int pxm, node = -1;
+
+ ACPI_FUNCTION_TRACE("acpi_get_node");
+
+ pxm = acpi_get_pxm(handle);
+ if (pxm >= 0)
+ node = acpi_map_pxm_to_node(pxm);
+
+ return_VALUE(node);
+}
+EXPORT_SYMBOL(acpi_get_node);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index f2eeaf9dc56a4f..1bca86edf5708b 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -33,6 +33,7 @@
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/errno.h>
#include <linux/atm.h>
#include <linux/atmdev.h>
@@ -754,7 +755,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
fs_kfree_skb (skb);
fs_dprintk (FS_DEBUG_ALLOC, "Free trans-d: %p\n", td);
- memset (td, 0x12, sizeof (struct FS_BPENTRY));
+ memset (td, ATM_POISON_FREE, sizeof(struct FS_BPENTRY));
kfree (td);
break;
default:
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index dd712b24ec9190..4bef76a2f3f2a7 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -8,6 +8,7 @@
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/device.h>
+#include <linux/node.h>
#include "base.h"
@@ -57,13 +58,12 @@ static void __devinit register_cpu_control(struct cpu *cpu)
{
sysdev_create_file(&cpu->sysdev, &attr_online);
}
-void unregister_cpu(struct cpu *cpu, struct node *root)
+void unregister_cpu(struct cpu *cpu)
{
int logical_cpu = cpu->sysdev.id;
- if (root)
- sysfs_remove_link(&root->sysdev.kobj,
- kobject_name(&cpu->sysdev.kobj));
+ unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
+
sysdev_remove_file(&cpu->sysdev, &attr_online);
sysdev_unregister(&cpu->sysdev);
@@ -109,23 +109,21 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
*
* Initialize and register the CPU device.
*/
-int __devinit register_cpu(struct cpu *cpu, int num, struct node *root)
+int __devinit register_cpu(struct cpu *cpu, int num)
{
int error;
-
cpu->node_id = cpu_to_node(num);
cpu->sysdev.id = num;
cpu->sysdev.cls = &cpu_sysdev_class;
error = sysdev_register(&cpu->sysdev);
- if (!error && root)
- error = sysfs_create_link(&root->sysdev.kobj,
- &cpu->sysdev.kobj,
- kobject_name(&cpu->sysdev.kobj));
+
if (!error && !cpu->no_control)
register_cpu_control(cpu);
if (!error)
cpu_sys_devices[num] = &cpu->sysdev;
+ if (!error)
+ register_cpu_under_node(num, cpu_to_node(num));
#ifdef CONFIG_KEXEC
if (!error)
@@ -145,5 +143,13 @@ EXPORT_SYMBOL_GPL(get_cpu_sysdev);
int __init cpu_dev_init(void)
{
- return sysdev_class_register(&cpu_sysdev_class);
+ int err;
+
+ err = sysdev_class_register(&cpu_sysdev_class);
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ if (!err)
+ err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
+#endif
+
+ return err;
}
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index e2f64f91ed0558..33c5cce1560b26 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -7,6 +7,7 @@
#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/poison.h>
/*
* Pool allocator ... wraps the dma_alloc_coherent page allocator, so
@@ -35,8 +36,6 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
};
#define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000)
-#define POOL_POISON_FREED 0xa7 /* !inuse */
-#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
static DECLARE_MUTEX (pools_lock);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dd547af4681a50..c6b7d9c4b65115 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -306,11 +306,13 @@ static ssize_t
memory_probe_store(struct class *class, const char *buf, size_t count)
{
u64 phys_addr;
+ int nid;
int ret;
phys_addr = simple_strtoull(buf, NULL, 0);
- ret = add_memory(phys_addr, PAGES_PER_SECTION << PAGE_SHIFT);
+ nid = memory_add_physaddr_to_nid(phys_addr);
+ ret = add_memory(nid, phys_addr, PAGES_PER_SECTION << PAGE_SHIFT);
if (ret)
count = ret;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index c80c3aeed004a5..eae2bdc183bb77 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -11,6 +11,7 @@
#include <linux/cpumask.h>
#include <linux/topology.h>
#include <linux/nodemask.h>
+#include <linux/cpu.h>
static struct sysdev_class node_class = {
set_kset_name("node"),
@@ -190,6 +191,66 @@ void unregister_node(struct node *node)
sysdev_unregister(&node->sysdev);
}
+struct node node_devices[MAX_NUMNODES];
+
+/*
+ * register cpu under node
+ */
+int register_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ if (node_online(nid)) {
+ struct sys_device *obj = get_cpu_sysdev(cpu);
+ if (!obj)
+ return 0;
+ return sysfs_create_link(&node_devices[nid].sysdev.kobj,
+ &obj->kobj,
+ kobject_name(&obj->kobj));
+ }
+
+ return 0;
+}
+
+int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ if (node_online(nid)) {
+ struct sys_device *obj = get_cpu_sysdev(cpu);
+ if (obj)
+ sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+ kobject_name(&obj->kobj));
+ }
+ return 0;
+}
+
+int register_one_node(int nid)
+{
+ int error = 0;
+ int cpu;
+
+ if (node_online(nid)) {
+ int p_node = parent_node(nid);
+ struct node *parent = NULL;
+
+ if (p_node != nid)
+ parent = &node_devices[p_node];
+
+ error = register_node(&node_devices[nid], nid, parent);
+
+ /* link cpu under this node */
+ for_each_present_cpu(cpu) {
+ if (cpu_to_node(cpu) == nid)
+ register_cpu_under_node(cpu, nid);
+ }
+ }
+
+ return error;
+
+}
+
+void unregister_one_node(int nid)
+{
+ unregister_node(&node_devices[nid]);
+}
+
static int __init register_node_type(void)
{
return sysdev_class_register(&node_class);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 8c52421cbc545b..c2d62163238330 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -107,7 +107,7 @@ static int __cpuinit topology_remove_dev(struct sys_device * sys_dev)
return 0;
}
-static int topology_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -125,7 +125,7 @@ static int topology_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block topology_cpu_notifier =
+static struct notifier_block __cpuinitdata topology_cpu_notifier =
{
.notifier_call = topology_cpu_callback,
};
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 3610c57295533c..410d70cb76fbee 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -939,12 +939,35 @@ config MWAVE
config SCx200_GPIO
tristate "NatSemi SCx200 GPIO Support"
depends on SCx200
+ select NSC_GPIO
help
Give userspace access to the GPIO pins on the National
Semiconductor SCx200 processors.
If compiled as a module, it will be called scx200_gpio.
+config PC8736x_GPIO
+ tristate "NatSemi PC8736x GPIO Support"
+ depends on X86
+ default SCx200_GPIO # mostly N
+ select NSC_GPIO # needed for support routines
+ help
+ Give userspace access to the GPIO pins on the National
+ Semiconductor PC-8736x (x=[03456]) SuperIO chip. The chip
+ has multiple functional units, inc several managed by
+ hwmon/pc87360 driver. Tested with PC-87366
+
+ If compiled as a module, it will be called pc8736x_gpio.
+
+config NSC_GPIO
+ tristate "NatSemi Base GPIO Support"
+ # selected by SCx200_GPIO and PC8736x_GPIO
+ # what about 2 selectors differing: m != y
+ help
+ Common support used (and needed) by scx200_gpio and
+ pc8736x_gpio drivers. If those drivers are built as
+ modules, this one will be too, named nsc_gpio
+
config CS5535_GPIO
tristate "AMD CS5535/CS5536 GPIO (Geode Companion Device)"
depends on X86_32
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 524105597ea7d7..6e0f4469d8bbdb 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -82,6 +82,8 @@ obj-$(CONFIG_PPDEV) += ppdev.o
obj-$(CONFIG_NWBUTTON) += nwbutton.o
obj-$(CONFIG_NWFLASH) += nwflash.o
obj-$(CONFIG_SCx200_GPIO) += scx200_gpio.o
+obj-$(CONFIG_PC8736x_GPIO) += pc8736x_gpio.o
+obj-$(CONFIG_NSC_GPIO) += nsc_gpio.o
obj-$(CONFIG_CS5535_GPIO) += cs5535_gpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_TANBAC_TB0219) += tb0219.o
diff --git a/drivers/char/agp/sgi-agp.c b/drivers/char/agp/sgi-agp.c
index cfa7922cb431f5..d73be4c2db8a9c 100644
--- a/drivers/char/agp/sgi-agp.c
+++ b/drivers/char/agp/sgi-agp.c
@@ -329,9 +329,8 @@ static int __devinit agp_sgi_init(void)
static void __devexit agp_sgi_cleanup(void)
{
- if (sgi_tioca_agp_bridges)
- kfree(sgi_tioca_agp_bridges);
- sgi_tioca_agp_bridges=NULL;
+ kfree(sgi_tioca_agp_bridges);
+ sgi_tioca_agp_bridges = NULL;
}
module_init(agp_sgi_init);
diff --git a/drivers/char/drm/drm_memory_debug.h b/drivers/char/drm/drm_memory_debug.h
index 6543b9a14c42e6..d117cc9971922f 100644
--- a/drivers/char/drm/drm_memory_debug.h
+++ b/drivers/char/drm/drm_memory_debug.h
@@ -43,7 +43,7 @@ typedef struct drm_mem_stats {
unsigned long bytes_freed;
} drm_mem_stats_t;
-static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(drm_mem_lock);
static unsigned long drm_ram_available = 0; /* In pages */
static unsigned long drm_ram_used = 0;
static drm_mem_stats_t drm_mem_stats[] =
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
index b7f17457b4243b..78a81a4a99c5c6 100644
--- a/drivers/char/drm/via_dmablit.c
+++ b/drivers/char/drm/via_dmablit.c
@@ -557,7 +557,7 @@ via_init_dmablit(drm_device_t *dev)
blitq->num_outstanding = 0;
blitq->is_active = 0;
blitq->aborting = 0;
- blitq->blit_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&blitq->blit_lock);
for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
}
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index 9cad8501d62c9f..dc0602ae850304 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -80,7 +80,7 @@ static int invalid_lilo_config;
/* The ISA boards do window flipping into the same spaces so its only sane
with a single lock. It's still pretty efficient */
-static spinlock_t epca_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(epca_lock);
/* -----------------------------------------------------------------------
MAXBOARDS is typically 12, but ISA and EISA cards are restricted to
diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
index 8d97b3911293bf..afa26b65dac3b7 100644
--- a/drivers/char/hvcs.c
+++ b/drivers/char/hvcs.c
@@ -1320,11 +1320,12 @@ static struct tty_operations hvcs_ops = {
static int hvcs_alloc_index_list(int n)
{
int i;
+
hvcs_index_list = kmalloc(n * sizeof(hvcs_index_count),GFP_KERNEL);
if (!hvcs_index_list)
return -ENOMEM;
hvcs_index_count = n;
- for(i = 0; i < hvcs_index_count; i++)
+ for (i = 0; i < hvcs_index_count; i++)
hvcs_index_list[i] = -1;
return 0;
}
@@ -1332,11 +1333,9 @@ static int hvcs_alloc_index_list(int n)
static void hvcs_free_index_list(void)
{
/* Paranoia check to be thorough. */
- if (hvcs_index_list) {
- kfree(hvcs_index_list);
- hvcs_index_list = NULL;
- hvcs_index_count = 0;
- }
+ kfree(hvcs_index_list);
+ hvcs_index_list = NULL;
+ hvcs_index_count = 0;
}
static int __init hvcs_module_init(void)
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index b03ddab1bef57b..83ed6ae466a56c 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -57,8 +57,7 @@ static int ipmi_init_msghandler(void);
static int initialized = 0;
#ifdef CONFIG_PROC_FS
-struct proc_dir_entry *proc_ipmi_root = NULL;
-EXPORT_SYMBOL(proc_ipmi_root);
+static struct proc_dir_entry *proc_ipmi_root = NULL;
#endif /* CONFIG_PROC_FS */
#define MAX_EVENTS_IN_QUEUE 25
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 02a7dd7a8a5557..101c14b9b26de4 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -809,7 +809,7 @@ static int ipmi_thread(void *data)
/* do nothing */
}
else if (smi_result == SI_SM_CALL_WITH_DELAY)
- udelay(1);
+ schedule();
else
schedule_timeout_interruptible(1);
}
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index f43c2e04eadd38..01247cccb89f26 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -301,7 +301,7 @@ static struct tty_operations moxa_ops = {
.tiocmset = moxa_tiocmset,
};
-static spinlock_t moxa_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(moxa_lock);
#ifdef CONFIG_PCI
static int moxa_get_PCI_conf(struct pci_dev *p, int board_type, moxa_board_conf * board)
diff --git a/drivers/char/nsc_gpio.c b/drivers/char/nsc_gpio.c
new file mode 100644
index 00000000000000..5b91e4e25641d2
--- /dev/null
+++ b/drivers/char/nsc_gpio.c
@@ -0,0 +1,142 @@
+/* linux/drivers/char/nsc_gpio.c
+
+ National Semiconductor common GPIO device-file/VFS methods.
+ Allows a user space process to control the GPIO pins.
+
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+*/
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#define NAME "nsc_gpio"
+
+void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index)
+{
+ /* retrieve current config w/o changing it */
+ u32 config = amp->gpio_config(index, ~0, 0);
+
+ /* user requested via 'v' command, so its INFO */
+ dev_info(amp->dev, "io%02u: 0x%04x %s %s %s %s %s %s %s\tio:%d/%d\n",
+ index, config,
+ (config & 1) ? "OE" : "TS", /* output-enabled/tristate */
+ (config & 2) ? "PP" : "OD", /* push pull / open drain */
+ (config & 4) ? "PUE" : "PUD", /* pull up enabled/disabled */
+ (config & 8) ? "LOCKED" : "", /* locked / unlocked */
+ (config & 16) ? "LEVEL" : "EDGE",/* level/edge input */
+ (config & 32) ? "HI" : "LO", /* trigger on rise/fall edge */
+ (config & 64) ? "DEBOUNCE" : "", /* debounce */
+
+ amp->gpio_get(index), amp->gpio_current(index));
+}
+
+ssize_t nsc_gpio_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos)
+{
+ unsigned m = iminor(file->f_dentry->d_inode);
+ struct nsc_gpio_ops *amp = file->private_data;
+ struct device *dev = amp->dev;
+ size_t i;
+ int err = 0;
+
+ for (i = 0; i < len; ++i) {
+ char c;
+ if (get_user(c, data + i))
+ return -EFAULT;
+ switch (c) {
+ case '0':
+ amp->gpio_set(m, 0);
+ break;
+ case '1':
+ amp->gpio_set(m, 1);
+ break;
+ case 'O':
+ dev_dbg(dev, "GPIO%d output enabled\n", m);
+ amp->gpio_config(m, ~1, 1);
+ break;
+ case 'o':
+ dev_dbg(dev, "GPIO%d output disabled\n", m);
+ amp->gpio_config(m, ~1, 0);
+ break;
+ case 'T':
+ dev_dbg(dev, "GPIO%d output is push pull\n",
+ m);
+ amp->gpio_config(m, ~2, 2);
+ break;
+ case 't':
+ dev_dbg(dev, "GPIO%d output is open drain\n",
+ m);
+ amp->gpio_config(m, ~2, 0);
+ break;
+ case 'P':
+ dev_dbg(dev, "GPIO%d pull up enabled\n", m);
+ amp->gpio_config(m, ~4, 4);
+ break;
+ case 'p':
+ dev_dbg(dev, "GPIO%d pull up disabled\n", m);
+ amp->gpio_config(m, ~4, 0);
+ break;
+ case 'v':
+ /* View Current pin settings */
+ amp->gpio_dump(amp, m);
+ break;
+ case '\n':
+ /* end of settings string, do nothing */
+ break;
+ default:
+ dev_err(dev, "io%2d bad setting: chr<0x%2x>\n",
+ m, (int)c);
+ err++;
+ }
+ }
+ if (err)
+ return -EINVAL; /* full string handled, report error */
+
+ return len;
+}
+
+ssize_t nsc_gpio_read(struct file *file, char __user * buf,
+ size_t len, loff_t * ppos)
+{
+ unsigned m = iminor(file->f_dentry->d_inode);
+ int value;
+ struct nsc_gpio_ops *amp = file->private_data;
+
+ value = amp->gpio_get(m);
+ if (put_user(value ? '1' : '0', buf))
+ return -EFAULT;
+
+ return 1;
+}
+
+/* common file-ops routines for both scx200_gpio and pc87360_gpio */
+EXPORT_SYMBOL(nsc_gpio_write);
+EXPORT_SYMBOL(nsc_gpio_read);
+EXPORT_SYMBOL(nsc_gpio_dump);
+
+static int __init nsc_gpio_init(void)
+{
+ printk(KERN_DEBUG NAME " initializing\n");
+ return 0;
+}
+
+static void __exit nsc_gpio_cleanup(void)
+{
+ printk(KERN_DEBUG NAME " cleanup\n");
+}
+
+module_init(nsc_gpio_init);
+module_exit(nsc_gpio_cleanup);
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi GPIO Common Methods");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/pc8736x_gpio.c b/drivers/char/pc8736x_gpio.c
new file mode 100644
index 00000000000000..1c706ccfdbb362
--- /dev/null
+++ b/drivers/char/pc8736x_gpio.c
@@ -0,0 +1,340 @@
+/* linux/drivers/char/pc8736x_gpio.c
+
+ National Semiconductor PC8736x GPIO driver. Allows a user space
+ process to play with the GPIO pins.
+
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+
+ adapted from linux/drivers/char/scx200_gpio.c
+ Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>,
+*/
+
+#include <linux/config.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/mutex.h>
+#include <linux/nsc_gpio.h>
+#include <linux/platform_device.h>
+#include <asm/uaccess.h>
+
+#define DEVNAME "pc8736x_gpio"
+
+MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
+MODULE_DESCRIPTION("NatSemi PC-8736x GPIO Pin Driver");
+MODULE_LICENSE("GPL");
+
+static int major; /* default to dynamic major */
+module_param(major, int, 0);
+MODULE_PARM_DESC(major, "Major device number");
+
+static DEFINE_MUTEX(pc8736x_gpio_config_lock);
+static unsigned pc8736x_gpio_base;
+static u8 pc8736x_gpio_shadow[4];
+
+#define SIO_BASE1 0x2E /* 1st command-reg to check */
+#define SIO_BASE2 0x4E /* alt command-reg to check */
+#define SIO_BASE_OFFSET 0x20
+
+#define SIO_SID 0x20 /* SuperI/O ID Register */
+#define SIO_SID_VALUE 0xe9 /* Expected value in SuperI/O ID Register */
+
+#define SIO_CF1 0x21 /* chip config, bit0 is chip enable */
+
+#define PC8736X_GPIO_SIZE 16
+
+#define SIO_UNIT_SEL 0x7 /* unit select reg */
+#define SIO_UNIT_ACT 0x30 /* unit enable */
+#define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */
+#define SIO_VLM_UNIT 0x0D
+#define SIO_TMS_UNIT 0x0E
+
+/* config-space addrs to read/write each unit's runtime addr */
+#define SIO_BASE_HADDR 0x60
+#define SIO_BASE_LADDR 0x61
+
+/* GPIO config-space pin-control addresses */
+#define SIO_GPIO_PIN_SELECT 0xF0
+#define SIO_GPIO_PIN_CONFIG 0xF1
+#define SIO_GPIO_PIN_EVENT 0xF2
+
+static unsigned char superio_cmd = 0;
+static unsigned char selected_device = 0xFF; /* bogus start val */
+
+/* GPIO port runtime access, functionality */
+static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */
+/* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */
+
+#define PORT_OUT 0
+#define PORT_IN 1
+#define PORT_EVT_EN 2
+#define PORT_EVT_STST 3
+
+static struct platform_device *pdev; /* use in dev_*() */
+
+static inline void superio_outb(int addr, int val)
+{
+ outb_p(addr, superio_cmd);
+ outb_p(val, superio_cmd + 1);
+}
+
+static inline int superio_inb(int addr)
+{
+ outb_p(addr, superio_cmd);
+ return inb_p(superio_cmd + 1);
+}
+
+static int pc8736x_superio_present(void)
+{
+ /* try the 2 possible values, read a hardware reg to verify */
+ superio_cmd = SIO_BASE1;
+ if (superio_inb(SIO_SID) == SIO_SID_VALUE)
+ return superio_cmd;
+
+ superio_cmd = SIO_BASE2;
+ if (superio_inb(SIO_SID) == SIO_SID_VALUE)
+ return superio_cmd;
+
+ return 0;
+}
+
+static void device_select(unsigned devldn)
+{
+ superio_outb(SIO_UNIT_SEL, devldn);
+ selected_device = devldn;
+}
+
+static void select_pin(unsigned iminor)
+{
+ /* select GPIO port/pin from device minor number */
+ device_select(SIO_GPIO_UNIT);
+ superio_outb(SIO_GPIO_PIN_SELECT,
+ ((iminor << 1) & 0xF0) | (iminor & 0x7));
+}
+
+static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits,
+ u32 func_slct)
+{
+ u32 config, new_config;
+
+ mutex_lock(&pc8736x_gpio_config_lock);
+
+ device_select(SIO_GPIO_UNIT);
+ select_pin(index);
+
+ /* read current config value */
+ config = superio_inb(func_slct);
+
+ /* set new config */
+ new_config = (config & mask) | bits;
+ superio_outb(func_slct, new_config);
+
+ mutex_unlock(&pc8736x_gpio_config_lock);
+
+ return config;
+}
+
+static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits)
+{
+ return pc8736x_gpio_configure_fn(index, mask, bits,
+ SIO_GPIO_PIN_CONFIG);
+}
+
+static int pc8736x_gpio_get(unsigned minor)
+{
+ int port, bit, val;
+
+ port = minor >> 3;
+ bit = minor & 7;
+ val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+ val >>= bit;
+ val &= 1;
+
+ dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n",
+ minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit,
+ val);
+
+ return val;
+}
+
+static void pc8736x_gpio_set(unsigned minor, int val)
+{
+ int port, bit, curval;
+
+ minor &= 0x1f;
+ port = minor >> 3;
+ bit = minor & 7;
+ curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+ dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n",
+ pc8736x_gpio_base + port_offset[port] + PORT_OUT,
+ curval, bit, (curval & ~(1 << bit)), val, (val << bit));
+
+ val = (curval & ~(1 << bit)) | (val << bit);
+
+ dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)"
+ " %2x -> %2x\n", minor, port, bit, curval, val);
+
+ outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+
+ curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
+ val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
+
+ dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val);
+ pc8736x_gpio_shadow[port] = val;
+}
+
+static void pc8736x_gpio_set_high(unsigned index)
+{
+ pc8736x_gpio_set(index, 1);
+}
+
+static void pc8736x_gpio_set_low(unsigned index)
+{
+ pc8736x_gpio_set(index, 0);
+}
+
+static int pc8736x_gpio_current(unsigned minor)
+{
+ int port, bit;
+ minor &= 0x1f;
+ port = minor >> 3;
+ bit = minor & 7;
+ return ((pc8736x_gpio_shadow[port] >> bit) & 0x01);
+}
+
+static void pc8736x_gpio_change(unsigned index)
+{
+ pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
+}
+
+static struct nsc_gpio_ops pc8736x_access = {
+ .owner = THIS_MODULE,
+ .gpio_config = pc8736x_gpio_configure,
+ .gpio_dump = nsc_gpio_dump,
+ .gpio_get = pc8736x_gpio_get,
+ .gpio_set = pc8736x_gpio_set,
+ .gpio_set_high = pc8736x_gpio_set_high,
+ .gpio_set_low = pc8736x_gpio_set_low,
+ .gpio_change = pc8736x_gpio_change,
+ .gpio_current = pc8736x_gpio_current
+};
+
+static int pc8736x_gpio_open(struct inode *inode, struct file *file)
+{
+ unsigned m = iminor(inode);
+ file->private_data = &pc8736x_access;
+
+ dev_dbg(&pdev->dev, "open %d\n", m);
+
+ if (m > 63)
+ return -EINVAL;
+ return nonseekable_open(inode, file);
+}
+
+static struct file_operations pc8736x_gpio_fops = {
+ .owner = THIS_MODULE,
+ .open = pc8736x_gpio_open,
+ .write = nsc_gpio_write,
+ .read = nsc_gpio_read,
+};
+
+static void __init pc8736x_init_shadow(void)
+{
+ int port;
+
+ /* read the current values driven on the GPIO signals */
+ for (port = 0; port < 4; ++port)
+ pc8736x_gpio_shadow[port]
+ = inb_p(pc8736x_gpio_base + port_offset[port]
+ + PORT_OUT);
+
+}
+
+static int __init pc8736x_gpio_init(void)
+{
+ int rc = 0;
+
+ pdev = platform_device_alloc(DEVNAME, 0);
+ if (!pdev)
+ return -ENOMEM;
+
+ rc = platform_device_add(pdev);
+ if (rc) {
+ rc = -ENODEV;
+ goto undo_platform_dev_alloc;
+ }
+ dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n");
+
+ if (!pc8736x_superio_present()) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "no device found\n");
+ goto undo_platform_dev_add;
+ }
+ pc8736x_access.dev = &pdev->dev;
+
+ /* Verify that chip and it's GPIO unit are both enabled.
+ My BIOS does this, so I take minimum action here
+ */
+ rc = superio_inb(SIO_CF1);
+ if (!(rc & 0x01)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "device not enabled\n");
+ goto undo_platform_dev_add;
+ }
+ device_select(SIO_GPIO_UNIT);
+ if (!superio_inb(SIO_UNIT_ACT)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "GPIO unit not enabled\n");
+ goto undo_platform_dev_add;
+ }
+
+ /* read the GPIO unit base addr that chip responds to */
+ pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8
+ | superio_inb(SIO_BASE_LADDR));
+
+ if (!request_region(pc8736x_gpio_base, 16, DEVNAME)) {
+ rc = -ENODEV;
+ dev_err(&pdev->dev, "GPIO ioport %x busy\n",
+ pc8736x_gpio_base);
+ goto undo_platform_dev_add;
+ }
+ dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base);
+
+ rc = register_chrdev(major, DEVNAME, &pc8736x_gpio_fops);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc);
+ goto undo_platform_dev_add;
+ }
+ if (!major) {
+ major = rc;
+ dev_dbg(&pdev->dev, "got dynamic major %d\n", major);
+ }
+
+ pc8736x_init_shadow();
+ return 0;
+
+undo_platform_dev_add:
+ platform_device_put(pdev);
+undo_platform_dev_alloc:
+ kfree(pdev);
+ return rc;
+}
+
+static void __exit pc8736x_gpio_cleanup(void)
+{
+ dev_dbg(&pdev->dev, " cleanup\n");
+
+ release_region(pc8736x_gpio_base, 16);
+
+ unregister_chrdev(major, DEVNAME);
+}
+
+EXPORT_SYMBOL(pc8736x_access);
+
+module_init(pc8736x_gpio_init);
+module_exit(pc8736x_gpio_cleanup);
diff --git a/drivers/char/scx200_gpio.c b/drivers/char/scx200_gpio.c
index 664a6e97eb1a80..5a280a330401f7 100644
--- a/drivers/char/scx200_gpio.c
+++ b/drivers/char/scx200_gpio.c
@@ -1,4 +1,4 @@
-/* linux/drivers/char/scx200_gpio.c
+/* linux/drivers/char/scx200_gpio.c
National Semiconductor SCx200 GPIO driver. Allows a user space
process to play with the GPIO pins.
@@ -6,17 +6,26 @@
Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com> */
#include <linux/config.h>
+#include <linux/device.h>
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
+#include <linux/platform_device.h>
#include <asm/uaccess.h>
#include <asm/io.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+
#include <linux/scx200_gpio.h>
+#include <linux/nsc_gpio.h>
#define NAME "scx200_gpio"
+#define DEVNAME NAME
+
+static struct platform_device *pdev;
MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
MODULE_DESCRIPTION("NatSemi SCx200 GPIO Pin Driver");
@@ -26,70 +35,23 @@ static int major = 0; /* default to dynamic major */
module_param(major, int, 0);
MODULE_PARM_DESC(major, "Major device number");
-static ssize_t scx200_gpio_write(struct file *file, const char __user *data,
- size_t len, loff_t *ppos)
-{
- unsigned m = iminor(file->f_dentry->d_inode);
- size_t i;
-
- for (i = 0; i < len; ++i) {
- char c;
- if (get_user(c, data+i))
- return -EFAULT;
- switch (c)
- {
- case '0':
- scx200_gpio_set(m, 0);
- break;
- case '1':
- scx200_gpio_set(m, 1);
- break;
- case 'O':
- printk(KERN_INFO NAME ": GPIO%d output enabled\n", m);
- scx200_gpio_configure(m, ~1, 1);
- break;
- case 'o':
- printk(KERN_INFO NAME ": GPIO%d output disabled\n", m);
- scx200_gpio_configure(m, ~1, 0);
- break;
- case 'T':
- printk(KERN_INFO NAME ": GPIO%d output is push pull\n", m);
- scx200_gpio_configure(m, ~2, 2);
- break;
- case 't':
- printk(KERN_INFO NAME ": GPIO%d output is open drain\n", m);
- scx200_gpio_configure(m, ~2, 0);
- break;
- case 'P':
- printk(KERN_INFO NAME ": GPIO%d pull up enabled\n", m);
- scx200_gpio_configure(m, ~4, 4);
- break;
- case 'p':
- printk(KERN_INFO NAME ": GPIO%d pull up disabled\n", m);
- scx200_gpio_configure(m, ~4, 0);
- break;
- }
- }
-
- return len;
-}
-
-static ssize_t scx200_gpio_read(struct file *file, char __user *buf,
- size_t len, loff_t *ppos)
-{
- unsigned m = iminor(file->f_dentry->d_inode);
- int value;
-
- value = scx200_gpio_get(m);
- if (put_user(value ? '1' : '0', buf))
- return -EFAULT;
-
- return 1;
-}
+struct nsc_gpio_ops scx200_access = {
+ .owner = THIS_MODULE,
+ .gpio_config = scx200_gpio_configure,
+ .gpio_dump = nsc_gpio_dump,
+ .gpio_get = scx200_gpio_get,
+ .gpio_set = scx200_gpio_set,
+ .gpio_set_high = scx200_gpio_set_high,
+ .gpio_set_low = scx200_gpio_set_low,
+ .gpio_change = scx200_gpio_change,
+ .gpio_current = scx200_gpio_current
+};
static int scx200_gpio_open(struct inode *inode, struct file *file)
{
unsigned m = iminor(inode);
+ file->private_data = &scx200_access;
+
if (m > 63)
return -EINVAL;
return nonseekable_open(inode, file);
@@ -103,47 +65,81 @@ static int scx200_gpio_release(struct inode *inode, struct file *file)
static struct file_operations scx200_gpio_fops = {
.owner = THIS_MODULE,
- .write = scx200_gpio_write,
- .read = scx200_gpio_read,
+ .write = nsc_gpio_write,
+ .read = nsc_gpio_read,
.open = scx200_gpio_open,
.release = scx200_gpio_release,
};
+struct cdev *scx200_devices;
+static int num_pins = 32;
+
static int __init scx200_gpio_init(void)
{
- int r;
-
- printk(KERN_DEBUG NAME ": NatSemi SCx200 GPIO Driver\n");
+ int rc, i;
+ dev_t dev = MKDEV(major, 0);
if (!scx200_gpio_present()) {
- printk(KERN_ERR NAME ": no SCx200 gpio pins available\n");
+ printk(KERN_ERR NAME ": no SCx200 gpio present\n");
return -ENODEV;
}
- r = register_chrdev(major, NAME, &scx200_gpio_fops);
- if (r < 0) {
- printk(KERN_ERR NAME ": unable to register character device\n");
- return r;
+ /* support dev_dbg() with pdev->dev */
+ pdev = platform_device_alloc(DEVNAME, 0);
+ if (!pdev)
+ return -ENOMEM;
+
+ rc = platform_device_add(pdev);
+ if (rc)
+ goto undo_malloc;
+
+ /* nsc_gpio uses dev_dbg(), so needs this */
+ scx200_access.dev = &pdev->dev;
+
+ if (major)
+ rc = register_chrdev_region(dev, num_pins, "scx200_gpio");
+ else {
+ rc = alloc_chrdev_region(&dev, 0, num_pins, "scx200_gpio");
+ major = MAJOR(dev);
}
- if (!major) {
- major = r;
- printk(KERN_DEBUG NAME ": got dynamic major %d\n", major);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "SCx200 chrdev_region err: %d\n", rc);
+ goto undo_platform_device_add;
+ }
+ scx200_devices = kzalloc(num_pins * sizeof(struct cdev), GFP_KERNEL);
+ if (!scx200_devices) {
+ rc = -ENOMEM;
+ goto undo_chrdev_region;
+ }
+ for (i = 0; i < num_pins; i++) {
+ struct cdev *cdev = &scx200_devices[i];
+ cdev_init(cdev, &scx200_gpio_fops);
+ cdev->owner = THIS_MODULE;
+ rc = cdev_add(cdev, MKDEV(major, i), 1);
+ /* tolerate 'minor' errors */
+ if (rc)
+ dev_err(&pdev->dev, "Error %d on minor %d", rc, i);
}
- return 0;
+ return 0; /* succeed */
+
+undo_chrdev_region:
+ unregister_chrdev_region(dev, num_pins);
+undo_platform_device_add:
+ platform_device_put(pdev);
+undo_malloc:
+ kfree(pdev);
+ return rc;
}
static void __exit scx200_gpio_cleanup(void)
{
- unregister_chrdev(major, NAME);
+ kfree(scx200_devices);
+ unregister_chrdev_region(MKDEV(major, 0), num_pins);
+ platform_device_put(pdev);
+ platform_device_unregister(pdev);
+ /* kfree(pdev); */
}
module_init(scx200_gpio_init);
module_exit(scx200_gpio_cleanup);
-
-/*
- Local variables:
- compile-command: "make -k -C ../.. SUBDIRS=drivers/char modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 1b5330299e30fa..d2d6b01dcd05a1 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -2477,7 +2477,7 @@ static int __init specialix_init(void)
#endif
for (i = 0; i < SX_NBOARD; i++)
- sx_board[i].lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&sx_board[i].lock);
if (sx_init_drivers()) {
func_exit();
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
index a9c5a7230f8958..bf361a5ba70d66 100644
--- a/drivers/char/stallion.c
+++ b/drivers/char/stallion.c
@@ -141,15 +141,6 @@ static char *stl_drvversion = "5.6.0";
static struct tty_driver *stl_serial;
/*
- * We will need to allocate a temporary write buffer for chars that
- * come direct from user space. The problem is that a copy from user
- * space might cause a page fault (typically on a system that is
- * swapping!). All ports will share one buffer - since if the system
- * is already swapping a shared buffer won't make things any worse.
- */
-static char *stl_tmpwritebuf;
-
-/*
* Define a local default termios struct. All ports will be created
* with this termios initially. Basically all it defines is a raw port
* at 9600, 8 data bits, 1 stop bit.
@@ -363,6 +354,14 @@ static unsigned char stl_vecmap[] = {
};
/*
+ * Lock ordering is that you may not take stallion_lock holding
+ * brd_lock.
+ */
+
+static spinlock_t brd_lock; /* Guard the board mapping */
+static spinlock_t stallion_lock; /* Guard the tty driver */
+
+/*
* Set up enable and disable macros for the ECH boards. They require
* the secondary io address space to be activated and deactivated.
* This way all ECH boards can share their secondary io region.
@@ -725,17 +724,7 @@ static struct class *stallion_class;
static int __init stallion_module_init(void)
{
- unsigned long flags;
-
-#ifdef DEBUG
- printk("init_module()\n");
-#endif
-
- save_flags(flags);
- cli();
stl_init();
- restore_flags(flags);
-
return 0;
}
@@ -746,7 +735,6 @@ static void __exit stallion_module_exit(void)
stlbrd_t *brdp;
stlpanel_t *panelp;
stlport_t *portp;
- unsigned long flags;
int i, j, k;
#ifdef DEBUG
@@ -756,9 +744,6 @@ static void __exit stallion_module_exit(void)
printk(KERN_INFO "Unloading %s: version %s\n", stl_drvtitle,
stl_drvversion);
- save_flags(flags);
- cli();
-
/*
* Free up all allocated resources used by the ports. This includes
* memory and interrupts. As part of this process we will also do
@@ -770,7 +755,6 @@ static void __exit stallion_module_exit(void)
if (i) {
printk("STALLION: failed to un-register tty driver, "
"errno=%d\n", -i);
- restore_flags(flags);
return;
}
for (i = 0; i < 4; i++) {
@@ -783,8 +767,6 @@ static void __exit stallion_module_exit(void)
"errno=%d\n", -i);
class_destroy(stallion_class);
- kfree(stl_tmpwritebuf);
-
for (i = 0; (i < stl_nrbrds); i++) {
if ((brdp = stl_brds[i]) == (stlbrd_t *) NULL)
continue;
@@ -814,8 +796,6 @@ static void __exit stallion_module_exit(void)
kfree(brdp);
stl_brds[i] = (stlbrd_t *) NULL;
}
-
- restore_flags(flags);
}
module_init(stallion_module_init);
@@ -948,7 +928,7 @@ static stlbrd_t *stl_allocbrd(void)
brdp = kzalloc(sizeof(stlbrd_t), GFP_KERNEL);
if (!brdp) {
- printk("STALLION: failed to allocate memory (size=%d)\n",
+ printk("STALLION: failed to allocate memory (size=%Zd)\n",
sizeof(stlbrd_t));
return NULL;
}
@@ -1066,16 +1046,17 @@ static int stl_waitcarrier(stlport_t *portp, struct file *filp)
rc = 0;
doclocal = 0;
+ spin_lock_irqsave(&stallion_lock, flags);
+
if (portp->tty->termios->c_cflag & CLOCAL)
doclocal++;
- save_flags(flags);
- cli();
portp->openwaitcnt++;
if (! tty_hung_up_p(filp))
portp->refcount--;
for (;;) {
+ /* Takes brd_lock internally */
stl_setsignals(portp, 1, 1);
if (tty_hung_up_p(filp) ||
((portp->flags & ASYNC_INITIALIZED) == 0)) {
@@ -1093,13 +1074,14 @@ static int stl_waitcarrier(stlport_t *portp, struct file *filp)
rc = -ERESTARTSYS;
break;
}
+ /* FIXME */
interruptible_sleep_on(&portp->open_wait);
}
if (! tty_hung_up_p(filp))
portp->refcount++;
portp->openwaitcnt--;
- restore_flags(flags);
+ spin_unlock_irqrestore(&stallion_lock, flags);
return rc;
}
@@ -1119,16 +1101,15 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
if (portp == (stlport_t *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&stallion_lock, flags);
if (tty_hung_up_p(filp)) {
- restore_flags(flags);
+ spin_unlock_irqrestore(&stallion_lock, flags);
return;
}
if ((tty->count == 1) && (portp->refcount != 1))
portp->refcount = 1;
if (portp->refcount-- > 1) {
- restore_flags(flags);
+ spin_unlock_irqrestore(&stallion_lock, flags);
return;
}
@@ -1142,11 +1123,18 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
* (The sc26198 has no "end-of-data" interrupt only empty FIFO)
*/
tty->closing = 1;
+
+ spin_unlock_irqrestore(&stallion_lock, flags);
+
if (portp->closing_wait != ASYNC_CLOSING_WAIT_NONE)
tty_wait_until_sent(tty, portp->closing_wait);
stl_waituntilsent(tty, (HZ / 2));
+
+ spin_lock_irqsave(&stallion_lock, flags);
portp->flags &= ~ASYNC_INITIALIZED;
+ spin_unlock_irqrestore(&stallion_lock, flags);
+
stl_disableintrs(portp);
if (tty->termios->c_cflag & HUPCL)
stl_setsignals(portp, 0, 0);
@@ -1173,7 +1161,6 @@ static void stl_close(struct tty_struct *tty, struct file *filp)
portp->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
wake_up_interruptible(&portp->close_wait);
- restore_flags(flags);
}
/*****************************************************************************/
@@ -1195,9 +1182,6 @@ static int stl_write(struct tty_struct *tty, const unsigned char *buf, int count
(int) tty, (int) buf, count);
#endif
- if ((tty == (struct tty_struct *) NULL) ||
- (stl_tmpwritebuf == (char *) NULL))
- return 0;
portp = tty->driver_data;
if (portp == (stlport_t *) NULL)
return 0;
@@ -1302,11 +1286,6 @@ static void stl_flushchars(struct tty_struct *tty)
if (portp->tx.buf == (char *) NULL)
return;
-#if 0
- if (tty->stopped || tty->hw_stopped ||
- (portp->tx.head == portp->tx.tail))
- return;
-#endif
stl_startrxtx(portp, -1, 1);
}
@@ -1977,12 +1956,14 @@ static int stl_eiointr(stlbrd_t *brdp)
unsigned int iobase;
int handled = 0;
+ spin_lock(&brd_lock);
panelp = brdp->panels[0];
iobase = panelp->iobase;
while (inb(brdp->iostatus) & EIO_INTRPEND) {
handled = 1;
(* panelp->isr)(panelp, iobase);
}
+ spin_unlock(&brd_lock);
return handled;
}
@@ -2168,7 +2149,7 @@ static int __init stl_initports(stlbrd_t *brdp, stlpanel_t *panelp)
portp = kzalloc(sizeof(stlport_t), GFP_KERNEL);
if (!portp) {
printk("STALLION: failed to allocate memory "
- "(size=%d)\n", sizeof(stlport_t));
+ "(size=%Zd)\n", sizeof(stlport_t));
break;
}
@@ -2304,7 +2285,7 @@ static inline int stl_initeio(stlbrd_t *brdp)
panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
if (!panelp) {
printk(KERN_WARNING "STALLION: failed to allocate memory "
- "(size=%d)\n", sizeof(stlpanel_t));
+ "(size=%Zd)\n", sizeof(stlpanel_t));
return -ENOMEM;
}
@@ -2478,7 +2459,7 @@ static inline int stl_initech(stlbrd_t *brdp)
panelp = kzalloc(sizeof(stlpanel_t), GFP_KERNEL);
if (!panelp) {
printk("STALLION: failed to allocate memory "
- "(size=%d)\n", sizeof(stlpanel_t));
+ "(size=%Zd)\n", sizeof(stlpanel_t));
break;
}
panelp->magic = STL_PANELMAGIC;
@@ -2879,8 +2860,7 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
portp->stats.lflags = 0;
portp->stats.rxbuffered = 0;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&stallion_lock, flags);
if (portp->tty != (struct tty_struct *) NULL) {
if (portp->tty->driver_data == portp) {
portp->stats.ttystate = portp->tty->flags;
@@ -2894,7 +2874,7 @@ static int stl_getportstats(stlport_t *portp, comstats_t __user *cp)
}
}
}
- restore_flags(flags);
+ spin_unlock_irqrestore(&stallion_lock, flags);
head = portp->tx.head;
tail = portp->tx.tail;
@@ -3056,14 +3036,6 @@ static int __init stl_init(void)
return -1;
/*
- * Allocate a temporary write buffer.
- */
- stl_tmpwritebuf = kmalloc(STL_TXBUFSIZE, GFP_KERNEL);
- if (!stl_tmpwritebuf)
- printk("STALLION: failed to allocate memory (size=%d)\n",
- STL_TXBUFSIZE);
-
-/*
* Set up a character driver for per board stuff. This is mainly used
* to do stats ioctls on the ports.
*/
@@ -3147,11 +3119,13 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
unsigned int gfrcr;
int chipmask, i, j;
int nrchips, uartaddr, ioaddr;
+ unsigned long flags;
#ifdef DEBUG
printk("stl_panelinit(brdp=%x,panelp=%x)\n", (int) brdp, (int) panelp);
#endif
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(panelp->brdnr, panelp->pagenr);
/*
@@ -3189,6 +3163,7 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
}
BRDDISABLE(panelp->brdnr);
+ spin_unlock_irqrestore(&brd_lock, flags);
return chipmask;
}
@@ -3200,6 +3175,7 @@ static int stl_cd1400panelinit(stlbrd_t *brdp, stlpanel_t *panelp)
static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *portp)
{
+ unsigned long flags;
#ifdef DEBUG
printk("stl_cd1400portinit(brdp=%x,panelp=%x,portp=%x)\n",
(int) brdp, (int) panelp, (int) portp);
@@ -3209,6 +3185,7 @@ static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *po
(portp == (stlport_t *) NULL))
return;
+ spin_lock_irqsave(&brd_lock, flags);
portp->ioaddr = panelp->iobase + (((brdp->brdtype == BRD_ECHPCI) ||
(portp->portnr < 8)) ? 0 : EREG_BANKSIZE);
portp->uartaddr = (portp->portnr & 0x04) << 5;
@@ -3219,6 +3196,7 @@ static void stl_cd1400portinit(stlbrd_t *brdp, stlpanel_t *panelp, stlport_t *po
stl_cd1400setreg(portp, LIVR, (portp->portnr << 3));
portp->hwid = stl_cd1400getreg(portp, GFRCR);
BRDDISABLE(portp->brdnr);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3428,8 +3406,7 @@ static void stl_cd1400setport(stlport_t *portp, struct termios *tiosp)
tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x3));
srer = stl_cd1400getreg(portp, SRER);
@@ -3466,7 +3443,7 @@ static void stl_cd1400setport(stlport_t *portp, struct termios *tiosp)
portp->sigs &= ~TIOCM_CD;
stl_cd1400setreg(portp, SRER, ((srer & ~sreroff) | sreron));
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3492,8 +3469,7 @@ static void stl_cd1400setsignals(stlport_t *portp, int dtr, int rts)
if (rts > 0)
msvr2 = MSVR2_RTS;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
if (rts >= 0)
@@ -3501,7 +3477,7 @@ static void stl_cd1400setsignals(stlport_t *portp, int dtr, int rts)
if (dtr >= 0)
stl_cd1400setreg(portp, MSVR1, msvr1);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3520,14 +3496,13 @@ static int stl_cd1400getsignals(stlport_t *portp)
printk("stl_cd1400getsignals(portp=%x)\n", (int) portp);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
msvr1 = stl_cd1400getreg(portp, MSVR1);
msvr2 = stl_cd1400getreg(portp, MSVR2);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
sigs = 0;
sigs |= (msvr1 & MSVR1_DCD) ? TIOCM_CD : 0;
@@ -3569,15 +3544,14 @@ static void stl_cd1400enablerxtx(stlport_t *portp, int rx, int tx)
else if (rx > 0)
ccr |= CCR_RXENABLE;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400ccrwait(portp);
stl_cd1400setreg(portp, CCR, ccr);
stl_cd1400ccrwait(portp);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3609,8 +3583,7 @@ static void stl_cd1400startrxtx(stlport_t *portp, int rx, int tx)
else if (rx > 0)
sreron |= SRER_RXDATA;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400setreg(portp, SRER,
@@ -3618,7 +3591,7 @@ static void stl_cd1400startrxtx(stlport_t *portp, int rx, int tx)
BRDDISABLE(portp->brdnr);
if (tx > 0)
set_bit(ASYI_TXBUSY, &portp->istate);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3634,13 +3607,12 @@ static void stl_cd1400disableintrs(stlport_t *portp)
#ifdef DEBUG
printk("stl_cd1400disableintrs(portp=%x)\n", (int) portp);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400setreg(portp, SRER, 0);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3653,8 +3625,7 @@ static void stl_cd1400sendbreak(stlport_t *portp, int len)
printk("stl_cd1400sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400setreg(portp, SRER,
@@ -3664,7 +3635,7 @@ static void stl_cd1400sendbreak(stlport_t *portp, int len)
portp->brklen = len;
if (len == 1)
portp->stats.txbreaks++;
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3688,8 +3659,7 @@ static void stl_cd1400flowctrl(stlport_t *portp, int state)
if (tty == (struct tty_struct *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
@@ -3729,7 +3699,7 @@ static void stl_cd1400flowctrl(stlport_t *portp, int state)
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3753,8 +3723,7 @@ static void stl_cd1400sendflow(stlport_t *portp, int state)
if (tty == (struct tty_struct *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
if (state) {
@@ -3769,7 +3738,7 @@ static void stl_cd1400sendflow(stlport_t *portp, int state)
stl_cd1400ccrwait(portp);
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3785,8 +3754,7 @@ static void stl_cd1400flush(stlport_t *portp)
if (portp == (stlport_t *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_cd1400setreg(portp, CAR, (portp->portnr & 0x03));
stl_cd1400ccrwait(portp);
@@ -3794,7 +3762,7 @@ static void stl_cd1400flush(stlport_t *portp)
stl_cd1400ccrwait(portp);
portp->tx.tail = portp->tx.head;
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -3833,6 +3801,7 @@ static void stl_cd1400eiointr(stlpanel_t *panelp, unsigned int iobase)
(int) panelp, iobase);
#endif
+ spin_lock(&brd_lock);
outb(SVRR, iobase);
svrtype = inb(iobase + EREG_DATA);
if (panelp->nrports > 4) {
@@ -3846,6 +3815,8 @@ static void stl_cd1400eiointr(stlpanel_t *panelp, unsigned int iobase)
stl_cd1400txisr(panelp, iobase);
else if (svrtype & SVRR_MDM)
stl_cd1400mdmisr(panelp, iobase);
+
+ spin_unlock(&brd_lock);
}
/*****************************************************************************/
@@ -4433,8 +4404,7 @@ static void stl_sc26198setport(stlport_t *portp, struct termios *tiosp)
tiosp->c_cc[VSTART], tiosp->c_cc[VSTOP]);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, IMR, 0);
stl_sc26198updatereg(portp, MR0, mr0);
@@ -4461,7 +4431,7 @@ static void stl_sc26198setport(stlport_t *portp, struct termios *tiosp)
portp->imr = (portp->imr & ~imroff) | imron;
stl_sc26198setreg(portp, IMR, portp->imr);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4491,13 +4461,12 @@ static void stl_sc26198setsignals(stlport_t *portp, int dtr, int rts)
else if (rts > 0)
iopioron |= IPR_RTS;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, IOPIOR,
((stl_sc26198getreg(portp, IOPIOR) & ~iopioroff) | iopioron));
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4516,12 +4485,11 @@ static int stl_sc26198getsignals(stlport_t *portp)
printk("stl_sc26198getsignals(portp=%x)\n", (int) portp);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
ipr = stl_sc26198getreg(portp, IPR);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
sigs = 0;
sigs |= (ipr & IPR_DCD) ? 0 : TIOCM_CD;
@@ -4558,13 +4526,12 @@ static void stl_sc26198enablerxtx(stlport_t *portp, int rx, int tx)
else if (rx > 0)
ccr |= CR_RXENABLE;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, SCCR, ccr);
BRDDISABLE(portp->brdnr);
portp->crenable = ccr;
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4593,15 +4560,14 @@ static void stl_sc26198startrxtx(stlport_t *portp, int rx, int tx)
else if (rx > 0)
imr |= IR_RXRDY | IR_RXBREAK | IR_RXWATCHDOG;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, IMR, imr);
BRDDISABLE(portp->brdnr);
portp->imr = imr;
if (tx > 0)
set_bit(ASYI_TXBUSY, &portp->istate);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4618,13 +4584,12 @@ static void stl_sc26198disableintrs(stlport_t *portp)
printk("stl_sc26198disableintrs(portp=%x)\n", (int) portp);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
portp->imr = 0;
stl_sc26198setreg(portp, IMR, 0);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4637,8 +4602,7 @@ static void stl_sc26198sendbreak(stlport_t *portp, int len)
printk("stl_sc26198sendbreak(portp=%x,len=%d)\n", (int) portp, len);
#endif
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
if (len == 1) {
stl_sc26198setreg(portp, SCCR, CR_TXSTARTBREAK);
@@ -4647,7 +4611,7 @@ static void stl_sc26198sendbreak(stlport_t *portp, int len)
stl_sc26198setreg(portp, SCCR, CR_TXSTOPBREAK);
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4672,8 +4636,7 @@ static void stl_sc26198flowctrl(stlport_t *portp, int state)
if (tty == (struct tty_struct *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
if (state) {
@@ -4719,7 +4682,7 @@ static void stl_sc26198flowctrl(stlport_t *portp, int state)
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4744,8 +4707,7 @@ static void stl_sc26198sendflow(stlport_t *portp, int state)
if (tty == (struct tty_struct *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
if (state) {
mr0 = stl_sc26198getreg(portp, MR0);
@@ -4765,7 +4727,7 @@ static void stl_sc26198sendflow(stlport_t *portp, int state)
stl_sc26198setreg(portp, MR0, mr0);
}
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4781,14 +4743,13 @@ static void stl_sc26198flush(stlport_t *portp)
if (portp == (stlport_t *) NULL)
return;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
stl_sc26198setreg(portp, SCCR, CR_TXRESET);
stl_sc26198setreg(portp, SCCR, portp->crenable);
BRDDISABLE(portp->brdnr);
portp->tx.tail = portp->tx.head;
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
}
/*****************************************************************************/
@@ -4815,12 +4776,11 @@ static int stl_sc26198datastate(stlport_t *portp)
if (test_bit(ASYI_TXBUSY, &portp->istate))
return 1;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&brd_lock, flags);
BRDENABLE(portp->brdnr, portp->pagenr);
sr = stl_sc26198getreg(portp, SR);
BRDDISABLE(portp->brdnr);
- restore_flags(flags);
+ spin_unlock_irqrestore(&brd_lock, flags);
return (sr & SR_TXEMPTY) ? 0 : 1;
}
@@ -4878,6 +4838,8 @@ static void stl_sc26198intr(stlpanel_t *panelp, unsigned int iobase)
stlport_t *portp;
unsigned int iack;
+ spin_lock(&brd_lock);
+
/*
* Work around bug in sc26198 chip... Cannot have A6 address
* line of UART high, else iack will be returned as 0.
@@ -4893,6 +4855,8 @@ static void stl_sc26198intr(stlpanel_t *panelp, unsigned int iobase)
stl_sc26198txisr(portp);
else
stl_sc26198otherisr(portp, iack);
+
+ spin_unlock(&brd_lock);
}
/*****************************************************************************/
diff --git a/drivers/char/sx.c b/drivers/char/sx.c
index 3b4747230270a3..76b9107f7f814b 100644
--- a/drivers/char/sx.c
+++ b/drivers/char/sx.c
@@ -2320,7 +2320,7 @@ static int sx_init_portstructs (int nboards, int nports)
#ifdef NEW_WRITE_LOCKING
port->gs.port_write_mutex = MUTEX;
#endif
- port->gs.driver_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&port->gs.driver_lock);
/*
* Initializing wait queue
*/
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index 8b2a5996986819..bd74e82d8a72a4 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -2621,10 +2621,9 @@ int tty_ioctl(struct inode * inode, struct file * file,
tty->driver->break_ctl(tty, 0);
return 0;
case TCSBRK: /* SVID version: non-zero arg --> no break */
- /*
- * XXX is the above comment correct, or the
- * code below correct? Is this ioctl used at
- * all by anyone?
+ /* non-zero arg means wait for all output data
+ * to be sent (performed above) but don't send break.
+ * This is used by the tcdrain() termios function.
*/
if (!arg)
return send_break(tty, 250);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 44d1eca83a7250..35e0b9ceecf7fa 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1497,6 +1497,7 @@ int cpufreq_update_policy(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_update_policy);
+#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
@@ -1532,10 +1533,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cpufreq_cpu_notifier =
+static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
{
.notifier_call = cpufreq_cpu_callback,
};
+#endif /* CONFIG_HOTPLUG_CPU */
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
@@ -1596,7 +1598,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
}
if (!ret) {
- register_cpu_notifier(&cpufreq_cpu_notifier);
+ register_hotcpu_notifier(&cpufreq_cpu_notifier);
dprintk("driver %s up and running\n", driver_data->name);
cpufreq_debug_enable_ratelimit();
}
@@ -1628,7 +1630,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
dprintk("unregistering driver %s\n", driver->name);
sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
- unregister_cpu_notifier(&cpufreq_cpu_notifier);
+ unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c576c0b3f45262..145061b8472a81 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -350,7 +350,7 @@ __init cpufreq_stats_init(void)
return ret;
}
- register_cpu_notifier(&cpufreq_stat_cpu_notifier);
+ register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
lock_cpu_hotplug();
for_each_online_cpu(cpu) {
cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_ONLINE,
@@ -368,7 +368,7 @@ __exit cpufreq_stats_exit(void)
CPUFREQ_POLICY_NOTIFIER);
cpufreq_unregister_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
- unregister_cpu_notifier(&cpufreq_stat_cpu_notifier);
+ unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
lock_cpu_hotplug();
for_each_online_cpu(cpu) {
cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, CPU_DEAD,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 3e0d04d5a800b5..8b46ef7d9ff836 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -488,7 +488,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
dev_err(&dev->dev, "SMBus base address uninitialized, "
"upgrade BIOS\n");
err = -ENODEV;
- goto exit_disable;
+ goto exit;
}
err = pci_request_region(dev, SMBBAR, i801_driver.name);
@@ -496,7 +496,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
dev_err(&dev->dev, "Failed to request SMBus region "
"0x%lx-0x%lx\n", i801_smba,
pci_resource_end(dev, SMBBAR));
- goto exit_disable;
+ goto exit;
}
pci_read_config_byte(I801_dev, SMBHSTCFG, &temp);
@@ -520,11 +520,12 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
err = i2c_add_adapter(&i801_adapter);
if (err) {
dev_err(&dev->dev, "Failed to add SMBus adapter\n");
- goto exit_disable;
+ goto exit_release;
}
+ return 0;
-exit_disable:
- pci_disable_device(dev);
+exit_release:
+ pci_release_region(dev, SMBBAR);
exit:
return err;
}
@@ -533,7 +534,10 @@ static void __devexit i801_remove(struct pci_dev *dev)
{
i2c_del_adapter(&i801_adapter);
pci_release_region(dev, SMBBAR);
- pci_disable_device(dev);
+ /*
+ * do not call pci_disable_device(dev) since it can cause hard hangs on
+ * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
+ */
}
static struct pci_driver i801_driver = {
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 6e9dbf4d80775b..85007cb12c5235 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -75,6 +75,7 @@ static struct amd_ide_chip {
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 },
+ { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 },
{ 0 }
};
@@ -490,7 +491,8 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
/* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
/* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
/* 17 */ DECLARE_NV_DEV("NFORCE-MCP61"),
- /* 18 */ DECLARE_AMD_DEV("AMD5536"),
+ /* 18 */ DECLARE_NV_DEV("NFORCE-MCP65"),
+ /* 19 */ DECLARE_AMD_DEV("AMD5536"),
};
static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -528,7 +530,8 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
- { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
+ { PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18 },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 19 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index de2e7546b491a8..a90486f5e49127 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -998,12 +998,13 @@ void input_unregister_device(struct input_dev *dev)
sysfs_remove_group(&dev->cdev.kobj, &input_dev_caps_attr_group);
sysfs_remove_group(&dev->cdev.kobj, &input_dev_id_attr_group);
sysfs_remove_group(&dev->cdev.kobj, &input_dev_attr_group);
- class_device_unregister(&dev->cdev);
mutex_lock(&dev->mutex);
dev->name = dev->phys = dev->uniq = NULL;
mutex_unlock(&dev->mutex);
+ class_device_unregister(&dev->cdev);
+
input_wakeup_procfs_readers();
}
EXPORT_SYMBOL(input_unregister_device);
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index acb7e265678027..2a56bf33a6738a 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -981,7 +981,7 @@ exit:
EXPORT_SYMBOL_GPL(gigaset_stop);
static LIST_HEAD(drivers);
-static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(driver_lock);
struct cardstate *gigaset_get_cs_by_id(int id)
{
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index 2ac90242d26371..433389daedb2ab 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -82,7 +82,7 @@ isdn_tty_try_read(modem_info * info, struct sk_buff *skb)
int l = skb->len;
unsigned char *dp = skb->data;
while (--l) {
- if (*skb->data == DLE)
+ if (*dp == DLE)
tty_insert_flip_char(tty, DLE, 0);
tty_insert_flip_char(tty, *dp++, 0);
}
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index fe6541326c717f..9b015f9af351e4 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -18,7 +18,7 @@
#include <linux/leds.h>
#include "leds.h"
-rwlock_t leds_list_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(leds_list_lock);
LIST_HEAD(leds_list);
EXPORT_SYMBOL_GPL(leds_list);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 5e2cd8be1191b9..1b1ce6523960cb 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -26,7 +26,7 @@
/*
* Nests outside led_cdev->trigger_lock
*/
-static rwlock_t triggers_list_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(triggers_list_lock);
static LIST_HEAD(trigger_list);
ssize_t led_trigger_store(struct class_device *dev, const char *buf,
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index e4290491fa9e15..6d532f170ce5c8 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -445,6 +445,8 @@ endmenu # encoder / decoder chips
menu "V4L USB devices"
depends on USB && VIDEO_DEV
+source "drivers/media/video/pvrusb2/Kconfig"
+
source "drivers/media/video/em28xx/Kconfig"
config USB_DSBR
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 6c401b46398a11..353d61cfac1b21 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -47,6 +47,7 @@ obj-$(CONFIG_VIDEO_SAA7134) += ir-kbd-i2c.o saa7134/
obj-$(CONFIG_VIDEO_CX88) += cx88/
obj-$(CONFIG_VIDEO_EM28XX) += em28xx/
obj-$(CONFIG_VIDEO_EM28XX) += tvp5150.o
+obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2/
obj-$(CONFIG_VIDEO_MSP3400) += msp3400.o
obj-$(CONFIG_VIDEO_CS53L32A) += cs53l32a.o
obj-$(CONFIG_VIDEO_TLV320AIC23B) += tlv320aic23b.o
diff --git a/drivers/media/video/cx2341x.c b/drivers/media/video/cx2341x.c
index 01b22eab572574..65f00fc08fa961 100644
--- a/drivers/media/video/cx2341x.c
+++ b/drivers/media/video/cx2341x.c
@@ -601,7 +601,7 @@ static void cx2341x_calc_audio_properties(struct cx2341x_mpeg_params *params)
}
int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params,
- struct v4l2_ext_controls *ctrls, int cmd)
+ struct v4l2_ext_controls *ctrls, unsigned int cmd)
{
int err = 0;
int i;
@@ -847,22 +847,22 @@ invalid:
return "<invalid>";
}
-void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id)
+void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix)
{
int is_mpeg1 = p->video_encoding == V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
/* Stream */
- printk(KERN_INFO "cx2341x-%d: Stream: %s\n",
- card_id,
+ printk(KERN_INFO "%s: Stream: %s\n",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_STREAM_TYPE));
/* Video */
- printk(KERN_INFO "cx2341x-%d: Video: %dx%d, %d fps\n",
- card_id,
+ printk(KERN_INFO "%s: Video: %dx%d, %d fps\n",
+ prefix,
p->width / (is_mpeg1 ? 2 : 1), p->height / (is_mpeg1 ? 2 : 1),
p->is_50hz ? 25 : 30);
- printk(KERN_INFO "cx2341x-%d: Video: %s, %s, %s, %d",
- card_id,
+ printk(KERN_INFO "%s: Video: %s, %s, %s, %d",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ENCODING),
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_ASPECT),
cx2341x_menu_item(p, V4L2_CID_MPEG_VIDEO_BITRATE_MODE),
@@ -871,19 +871,19 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id)
printk(", Peak %d", p->video_bitrate_peak);
}
printk("\n");
- printk(KERN_INFO "cx2341x-%d: Video: GOP Size %d, %d B-Frames, %sGOP Closure, %s3:2 Pulldown\n",
- card_id,
+ printk(KERN_INFO "%s: Video: GOP Size %d, %d B-Frames, %sGOP Closure, %s3:2 Pulldown\n",
+ prefix,
p->video_gop_size, p->video_b_frames,
p->video_gop_closure ? "" : "No ",
p->video_pulldown ? "" : "No ");
if (p->video_temporal_decimation) {
- printk(KERN_INFO "cx2341x-%d: Video: Temporal Decimation %d\n",
- card_id, p->video_temporal_decimation);
+ printk(KERN_INFO "%s: Video: Temporal Decimation %d\n",
+ prefix, p->video_temporal_decimation);
}
/* Audio */
- printk(KERN_INFO "cx2341x-%d: Audio: %s, %s, %s, %s",
- card_id,
+ printk(KERN_INFO "%s: Audio: %s, %s, %s, %s",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_ENCODING),
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_L2_BITRATE),
@@ -897,18 +897,18 @@ void cx2341x_log_status(struct cx2341x_mpeg_params *p, int card_id)
cx2341x_menu_item(p, V4L2_CID_MPEG_AUDIO_CRC));
/* Encoding filters */
- printk(KERN_INFO "cx2341x-%d: Spatial Filter: %s, Luma %s, Chroma %s, %d\n",
- card_id,
+ printk(KERN_INFO "%s: Spatial Filter: %s, Luma %s, Chroma %s, %d\n",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE),
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE),
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE),
p->video_spatial_filter);
- printk(KERN_INFO "cx2341x-%d: Temporal Filter: %s, %d\n",
- card_id,
+ printk(KERN_INFO "%s: Temporal Filter: %s, %d\n",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE),
p->video_temporal_filter);
- printk(KERN_INFO "cx2341x-%d: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n",
- card_id,
+ printk(KERN_INFO "%s: Median Filter: %s, Luma [%d, %d], Chroma [%d, %d]\n",
+ prefix,
cx2341x_menu_item(p, V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE),
p->video_luma_median_filter_bottom,
p->video_luma_median_filter_top,
diff --git a/drivers/media/video/cx88/cx88-blackbird.c b/drivers/media/video/cx88/cx88-blackbird.c
index 78df66671ea2f3..4ff81582ec56ee 100644
--- a/drivers/media/video/cx88/cx88-blackbird.c
+++ b/drivers/media/video/cx88/cx88-blackbird.c
@@ -853,6 +853,19 @@ static int mpeg_do_ioctl(struct inode *inode, struct file *file,
fh->mpegq.field);
return 0;
}
+ case VIDIOC_LOG_STATUS:
+ {
+ char name[32 + 2];
+
+ snprintf(name, sizeof(name), "%s/2", core->name);
+ printk("%s/2: ============ START LOG STATUS ============\n",
+ core->name);
+ cx88_call_i2c_clients(core, VIDIOC_LOG_STATUS, 0);
+ cx2341x_log_status(&dev->params, name);
+ printk("%s/2: ============= END LOG STATUS =============\n",
+ core->name);
+ return 0;
+ }
default:
return cx88_do_ioctl(inode, file, 0, dev->core, cmd, arg, mpeg_do_ioctl);
diff --git a/drivers/media/video/pvrusb2/Kconfig b/drivers/media/video/pvrusb2/Kconfig
new file mode 100644
index 00000000000000..7e727fe14b3228
--- /dev/null
+++ b/drivers/media/video/pvrusb2/Kconfig
@@ -0,0 +1,62 @@
+config VIDEO_PVRUSB2
+ tristate "Hauppauge WinTV-PVR USB2 support"
+ depends on VIDEO_V4L2 && USB && I2C && EXPERIMENTAL
+ select FW_LOADER
+ select VIDEO_TUNER
+ select VIDEO_TVEEPROM
+ select VIDEO_CX2341X
+ select VIDEO_SAA711X
+ select VIDEO_MSP3400
+ ---help---
+ This is a video4linux driver for Conexant 23416 based
+ usb2 personal video recorder devices.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pvrusb2
+
+config VIDEO_PVRUSB2_24XXX
+ bool "Hauppauge WinTV-PVR USB2 support for 24xxx model series"
+ depends on VIDEO_PVRUSB2 && EXPERIMENTAL
+ select VIDEO_CX25840
+ select VIDEO_WM8775
+ ---help---
+ This option enables inclusion of additional logic to operate
+ newer WinTV-PVR USB2 devices whose model number is of the
+ form "24xxx" (leading prefix of "24" followed by 3 digits).
+ To see if you may need this option, examine the white
+ sticker on the underside of your device. Enabling this
+ option will not harm support for older devices, however it
+ is a separate option because of the experimental nature of
+ this new feature.
+
+ If you are in doubt, say N.
+
+ Note: This feature is _very_ experimental. You have been
+ warned.
+
+config VIDEO_PVRUSB2_SYSFS
+ bool "pvrusb2 sysfs support (EXPERIMENTAL)"
+ default y
+ depends on VIDEO_PVRUSB2 && SYSFS && EXPERIMENTAL
+ ---help---
+ This option enables the operation of a sysfs based
+ interface for query and control of the pvrusb2 driver.
+
+ This is not generally needed for v4l applications,
+ although certain applications are optimized to take
+ advantage of this feature.
+
+ If you are in doubt, say Y.
+
+ Note: This feature is experimental and subject to change.
+
+config VIDEO_PVRUSB2_DEBUGIFC
+ bool "pvrusb2 debug interface"
+ depends on VIDEO_PVRUSB2_SYSFS
+ ---help---
+ This option enables the inclusion of a debug interface
+ in the pvrusb2 driver, hosted through sysfs.
+
+ You do not need to select this option unless you plan
+ on debugging the driver or performing a manual firmware
+ extraction.
diff --git a/drivers/media/video/pvrusb2/Makefile b/drivers/media/video/pvrusb2/Makefile
new file mode 100644
index 00000000000000..fed603ad0a6721
--- /dev/null
+++ b/drivers/media/video/pvrusb2/Makefile
@@ -0,0 +1,18 @@
+obj-pvrusb2-sysfs-$(CONFIG_VIDEO_PVRUSB2_SYSFS) := pvrusb2-sysfs.o
+obj-pvrusb2-debugifc-$(CONFIG_VIDEO_PVRUSB2_DEBUGIFC) := pvrusb2-debugifc.o
+
+obj-pvrusb2-24xxx-$(CONFIG_VIDEO_PVRUSB2_24XXX) := \
+ pvrusb2-cx2584x-v4l.o \
+ pvrusb2-wm8775.o
+
+pvrusb2-objs := pvrusb2-i2c-core.o pvrusb2-i2c-cmd-v4l2.o \
+ pvrusb2-audio.o pvrusb2-i2c-chips-v4l2.o \
+ pvrusb2-encoder.o pvrusb2-video-v4l.o \
+ pvrusb2-eeprom.o pvrusb2-tuner.o pvrusb2-demod.o \
+ pvrusb2-main.o pvrusb2-hdw.o pvrusb2-v4l2.o \
+ pvrusb2-ctrl.o pvrusb2-std.o \
+ pvrusb2-context.o pvrusb2-io.o pvrusb2-ioread.o \
+ $(obj-pvrusb2-24xxx-y) \
+ $(obj-pvrusb2-sysfs-y) $(obj-pvrusb2-debugifc-y)
+
+obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.c b/drivers/media/video/pvrusb2/pvrusb2-audio.c
new file mode 100644
index 00000000000000..313d2dcf9e4bfb
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.c
@@ -0,0 +1,204 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-audio.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/msp3400.h>
+#include <media/v4l2-common.h>
+
+struct pvr2_msp3400_handler {
+ struct pvr2_hdw *hdw;
+ struct pvr2_i2c_client *client;
+ struct pvr2_i2c_handler i2c_handler;
+ struct pvr2_audio_stat astat;
+ unsigned long stale_mask;
+};
+
+
+/* This function selects the correct audio input source */
+static void set_stereo(struct pvr2_msp3400_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ struct v4l2_routing route;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c msp3400 v4l2 set_stereo");
+
+ if (hdw->input_val == PVR2_CVAL_INPUT_TV) {
+ struct v4l2_tuner vt;
+ memset(&vt,0,sizeof(vt));
+ vt.audmode = hdw->audiomode_val;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_S_TUNER,&vt);
+ }
+
+ route.input = MSP_INPUT_DEFAULT;
+ route.output = MSP_OUTPUT(MSP_SC_IN_DSP_SCART1);
+ switch (hdw->input_val) {
+ case PVR2_CVAL_INPUT_TV:
+ break;
+ case PVR2_CVAL_INPUT_RADIO:
+ /* Assume that msp34xx also handle FM decoding, in which case
+ we're still using the tuner. */
+ /* HV: actually it is more likely to be the SCART2 input if
+ the ivtv experience is any indication. */
+ route.input = MSP_INPUT(MSP_IN_SCART2, MSP_IN_TUNER1,
+ MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
+ break;
+ case PVR2_CVAL_INPUT_SVIDEO:
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ /* SCART 1 input */
+ route.input = MSP_INPUT(MSP_IN_SCART1, MSP_IN_TUNER1,
+ MSP_DSP_IN_SCART, MSP_DSP_IN_SCART);
+ break;
+ }
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
+}
+
+
+static int check_stereo(struct pvr2_msp3400_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return (hdw->input_dirty ||
+ hdw->audiomode_dirty);
+}
+
+
+struct pvr2_msp3400_ops {
+ void (*update)(struct pvr2_msp3400_handler *);
+ int (*check)(struct pvr2_msp3400_handler *);
+};
+
+
+static const struct pvr2_msp3400_ops msp3400_ops[] = {
+ { .update = set_stereo, .check = check_stereo},
+};
+
+
+static int msp3400_check(struct pvr2_msp3400_handler *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(msp3400_ops)/sizeof(msp3400_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (ctxt->stale_mask & msk) continue;
+ if (msp3400_ops[idx].check(ctxt)) {
+ ctxt->stale_mask |= msk;
+ }
+ }
+ return ctxt->stale_mask != 0;
+}
+
+
+static void msp3400_update(struct pvr2_msp3400_handler *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(msp3400_ops)/sizeof(msp3400_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (!(ctxt->stale_mask & msk)) continue;
+ ctxt->stale_mask &= ~msk;
+ msp3400_ops[idx].update(ctxt);
+ }
+}
+
+
+/* This reads back the current signal type */
+static int get_audio_status(struct pvr2_msp3400_handler *ctxt)
+{
+ struct v4l2_tuner vt;
+ int stat;
+
+ memset(&vt,0,sizeof(vt));
+ stat = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
+ if (stat < 0) return stat;
+
+ ctxt->hdw->flag_stereo = (vt.audmode & V4L2_TUNER_MODE_STEREO) != 0;
+ ctxt->hdw->flag_bilingual =
+ (vt.audmode & V4L2_TUNER_MODE_LANG2) != 0;
+ return 0;
+}
+
+
+static void pvr2_msp3400_detach(struct pvr2_msp3400_handler *ctxt)
+{
+ ctxt->client->handler = 0;
+ ctxt->hdw->audio_stat = 0;
+ kfree(ctxt);
+}
+
+
+static unsigned int pvr2_msp3400_describe(struct pvr2_msp3400_handler *ctxt,
+ char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-audio v4l2");
+}
+
+
+const static struct pvr2_i2c_handler_functions msp3400_funcs = {
+ .detach = (void (*)(void *))pvr2_msp3400_detach,
+ .check = (int (*)(void *))msp3400_check,
+ .update = (void (*)(void *))msp3400_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))pvr2_msp3400_describe,
+};
+
+
+int pvr2_i2c_msp3400_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ struct pvr2_msp3400_handler *ctxt;
+ if (hdw->audio_stat) return 0;
+ if (cp->handler) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->i2c_handler.func_data = ctxt;
+ ctxt->i2c_handler.func_table = &msp3400_funcs;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ ctxt->astat.ctxt = ctxt;
+ ctxt->astat.status = (int (*)(void *))get_audio_status;
+ ctxt->astat.detach = (void (*)(void *))pvr2_msp3400_detach;
+ ctxt->stale_mask = (1 << (sizeof(msp3400_ops)/
+ sizeof(msp3400_ops[0]))) - 1;
+ cp->handler = &ctxt->i2c_handler;
+ hdw->audio_stat = &ctxt->astat;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x msp3400 V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-audio.h b/drivers/media/video/pvrusb2/pvrusb2-audio.h
new file mode 100644
index 00000000000000..536339b688432e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-audio.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_AUDIO_H
+#define __PVRUSB2_AUDIO_H
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_msp3400_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+#endif /* __PVRUSB2_AUDIO_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.c b/drivers/media/video/pvrusb2/pvrusb2-context.c
new file mode 100644
index 00000000000000..40dc59871a458e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.c
@@ -0,0 +1,230 @@
+/*
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-context.h"
+#include "pvrusb2-io.h"
+#include "pvrusb2-ioread.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-debug.h"
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/semaphore.h>
+
+
+static void pvr2_context_destroy(struct pvr2_context *mp)
+{
+ if (mp->hdw) pvr2_hdw_destroy(mp->hdw);
+ pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr_main id=%p",mp);
+ flush_workqueue(mp->workqueue);
+ destroy_workqueue(mp->workqueue);
+ kfree(mp);
+}
+
+
+static void pvr2_context_trigger_poll(struct pvr2_context *mp)
+{
+ queue_work(mp->workqueue,&mp->workpoll);
+}
+
+
+static void pvr2_context_poll(struct pvr2_context *mp)
+{
+ pvr2_context_enter(mp); do {
+ pvr2_hdw_poll(mp->hdw);
+ } while (0); pvr2_context_exit(mp);
+}
+
+
+static void pvr2_context_setup(struct pvr2_context *mp)
+{
+ pvr2_context_enter(mp); do {
+ if (!pvr2_hdw_dev_ok(mp->hdw)) break;
+ pvr2_hdw_setup(mp->hdw);
+ pvr2_hdw_setup_poll_trigger(
+ mp->hdw,
+ (void (*)(void *))pvr2_context_trigger_poll,
+ mp);
+ if (!pvr2_hdw_dev_ok(mp->hdw)) break;
+ if (!pvr2_hdw_init_ok(mp->hdw)) break;
+ mp->video_stream.stream = pvr2_hdw_get_video_stream(mp->hdw);
+ if (mp->setup_func) {
+ mp->setup_func(mp);
+ }
+ } while (0); pvr2_context_exit(mp);
+}
+
+
+struct pvr2_context *pvr2_context_create(
+ struct usb_interface *intf,
+ const struct usb_device_id *devid,
+ void (*setup_func)(struct pvr2_context *))
+{
+ struct pvr2_context *mp = 0;
+ mp = kmalloc(sizeof(*mp),GFP_KERNEL);
+ if (!mp) goto done;
+ memset(mp,0,sizeof(*mp));
+ pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_main id=%p",mp);
+ mp->setup_func = setup_func;
+ mutex_init(&mp->mutex);
+ mp->hdw = pvr2_hdw_create(intf,devid);
+ if (!mp->hdw) {
+ pvr2_context_destroy(mp);
+ mp = 0;
+ goto done;
+ }
+
+ mp->workqueue = create_singlethread_workqueue("pvrusb2");
+ INIT_WORK(&mp->workinit,(void (*)(void*))pvr2_context_setup,mp);
+ INIT_WORK(&mp->workpoll,(void (*)(void*))pvr2_context_poll,mp);
+ queue_work(mp->workqueue,&mp->workinit);
+ done:
+ return mp;
+}
+
+
+void pvr2_context_enter(struct pvr2_context *mp)
+{
+ mutex_lock(&mp->mutex);
+ pvr2_trace(PVR2_TRACE_CREG,"pvr2_context_enter(id=%p)",mp);
+}
+
+
+void pvr2_context_exit(struct pvr2_context *mp)
+{
+ int destroy_flag = 0;
+ if (!(mp->mc_first || !mp->disconnect_flag)) {
+ destroy_flag = !0;
+ }
+ pvr2_trace(PVR2_TRACE_CREG,"pvr2_context_exit(id=%p) outside",mp);
+ mutex_unlock(&mp->mutex);
+ if (destroy_flag) pvr2_context_destroy(mp);
+}
+
+
+static void pvr2_context_run_checks(struct pvr2_context *mp)
+{
+ struct pvr2_channel *ch1,*ch2;
+ for (ch1 = mp->mc_first; ch1; ch1 = ch2) {
+ ch2 = ch1->mc_next;
+ if (ch1->check_func) {
+ ch1->check_func(ch1);
+ }
+ }
+}
+
+
+void pvr2_context_disconnect(struct pvr2_context *mp)
+{
+ pvr2_context_enter(mp); do {
+ pvr2_hdw_disconnect(mp->hdw);
+ mp->disconnect_flag = !0;
+ pvr2_context_run_checks(mp);
+ } while (0); pvr2_context_exit(mp);
+}
+
+
+void pvr2_channel_init(struct pvr2_channel *cp,struct pvr2_context *mp)
+{
+ cp->hdw = mp->hdw;
+ cp->mc_head = mp;
+ cp->mc_next = 0;
+ cp->mc_prev = mp->mc_last;
+ if (mp->mc_last) {
+ mp->mc_last->mc_next = cp;
+ } else {
+ mp->mc_first = cp;
+ }
+ mp->mc_last = cp;
+}
+
+
+static void pvr2_channel_disclaim_stream(struct pvr2_channel *cp)
+{
+ if (!cp->stream) return;
+ pvr2_stream_kill(cp->stream->stream);
+ cp->stream->user = 0;
+ cp->stream = 0;
+}
+
+
+void pvr2_channel_done(struct pvr2_channel *cp)
+{
+ struct pvr2_context *mp = cp->mc_head;
+ pvr2_channel_disclaim_stream(cp);
+ if (cp->mc_next) {
+ cp->mc_next->mc_prev = cp->mc_prev;
+ } else {
+ mp->mc_last = cp->mc_prev;
+ }
+ if (cp->mc_prev) {
+ cp->mc_prev->mc_next = cp->mc_next;
+ } else {
+ mp->mc_first = cp->mc_next;
+ }
+ cp->hdw = 0;
+}
+
+
+int pvr2_channel_claim_stream(struct pvr2_channel *cp,
+ struct pvr2_context_stream *sp)
+{
+ int code = 0;
+ pvr2_context_enter(cp->mc_head); do {
+ if (sp == cp->stream) break;
+ if (sp->user) {
+ code = -EBUSY;
+ break;
+ }
+ pvr2_channel_disclaim_stream(cp);
+ if (!sp) break;
+ sp->user = cp;
+ cp->stream = sp;
+ } while (0); pvr2_context_exit(cp->mc_head);
+ return code;
+}
+
+
+// This is the marker for the real beginning of a legitimate mpeg2 stream.
+static char stream_sync_key[] = {
+ 0x00, 0x00, 0x01, 0xba,
+};
+
+struct pvr2_ioread *pvr2_channel_create_mpeg_stream(
+ struct pvr2_context_stream *sp)
+{
+ struct pvr2_ioread *cp;
+ cp = pvr2_ioread_create();
+ if (!cp) return 0;
+ pvr2_ioread_setup(cp,sp->stream);
+ pvr2_ioread_set_sync_key(cp,stream_sync_key,sizeof(stream_sync_key));
+ return cp;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-context.h b/drivers/media/video/pvrusb2/pvrusb2-context.h
new file mode 100644
index 00000000000000..6327fa1f7e4f24
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-context.h
@@ -0,0 +1,92 @@
+/*
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_BASE_H
+#define __PVRUSB2_BASE_H
+
+#include <linux/mutex.h>
+#include <linux/usb.h>
+#include <linux/workqueue.h>
+
+struct pvr2_hdw; /* hardware interface - defined elsewhere */
+struct pvr2_stream; /* stream interface - defined elsewhere */
+
+struct pvr2_context; /* All central state */
+struct pvr2_channel; /* One I/O pathway to a user */
+struct pvr2_context_stream; /* Wrapper for a stream */
+struct pvr2_crit_reg; /* Critical region pointer */
+struct pvr2_ioread; /* Low level stream structure */
+
+struct pvr2_context_stream {
+ struct pvr2_channel *user;
+ struct pvr2_stream *stream;
+};
+
+struct pvr2_context {
+ struct pvr2_channel *mc_first;
+ struct pvr2_channel *mc_last;
+ struct pvr2_hdw *hdw;
+ struct pvr2_context_stream video_stream;
+ struct mutex mutex;
+ int disconnect_flag;
+
+ /* Called after pvr2_context initialization is complete */
+ void (*setup_func)(struct pvr2_context *);
+
+ /* Work queue overhead for out-of-line processing */
+ struct workqueue_struct *workqueue;
+ struct work_struct workinit;
+ struct work_struct workpoll;
+};
+
+struct pvr2_channel {
+ struct pvr2_context *mc_head;
+ struct pvr2_channel *mc_next;
+ struct pvr2_channel *mc_prev;
+ struct pvr2_context_stream *stream;
+ struct pvr2_hdw *hdw;
+ void (*check_func)(struct pvr2_channel *);
+};
+
+void pvr2_context_enter(struct pvr2_context *);
+void pvr2_context_exit(struct pvr2_context *);
+
+struct pvr2_context *pvr2_context_create(struct usb_interface *intf,
+ const struct usb_device_id *devid,
+ void (*setup_func)(struct pvr2_context *));
+void pvr2_context_disconnect(struct pvr2_context *);
+
+void pvr2_channel_init(struct pvr2_channel *,struct pvr2_context *);
+void pvr2_channel_done(struct pvr2_channel *);
+int pvr2_channel_claim_stream(struct pvr2_channel *,
+ struct pvr2_context_stream *);
+struct pvr2_ioread *pvr2_channel_create_mpeg_stream(
+ struct pvr2_context_stream *);
+
+
+#endif /* __PVRUSB2_CONTEXT_H */
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.c b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
new file mode 100644
index 00000000000000..d5df9fbeba2ff4
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.c
@@ -0,0 +1,593 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-ctrl.h"
+#include "pvrusb2-hdw-internal.h"
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mutex.h>
+
+
+/* Set the given control. */
+int pvr2_ctrl_set_value(struct pvr2_ctrl *cptr,int val)
+{
+ return pvr2_ctrl_set_mask_value(cptr,~0,val);
+}
+
+
+/* Set/clear specific bits of the given control. */
+int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *cptr,int mask,int val)
+{
+ int ret = 0;
+ if (!cptr) return -EINVAL;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->set_value != 0) {
+ if (cptr->info->type == pvr2_ctl_bitmask) {
+ mask &= cptr->info->def.type_bitmask.valid_bits;
+ } else if (cptr->info->type == pvr2_ctl_int) {
+ if (val < cptr->info->def.type_int.min_value) {
+ break;
+ }
+ if (val > cptr->info->def.type_int.max_value) {
+ break;
+ }
+ } else if (cptr->info->type == pvr2_ctl_enum) {
+ if (val >= cptr->info->def.type_enum.count) {
+ break;
+ }
+ } else if (cptr->info->type != pvr2_ctl_bool) {
+ break;
+ }
+ ret = cptr->info->set_value(cptr,mask,val);
+ } else {
+ ret = -EPERM;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Get the current value of the given control. */
+int pvr2_ctrl_get_value(struct pvr2_ctrl *cptr,int *valptr)
+{
+ int ret = 0;
+ if (!cptr) return -EINVAL;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ ret = cptr->info->get_value(cptr,valptr);
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's type */
+enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return pvr2_ctl_int;
+ return cptr->info->type;
+}
+
+
+/* Retrieve control's maximum value (int type) */
+int pvr2_ctrl_get_max(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_int) {
+ ret = cptr->info->def.type_int.max_value;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's minimum value (int type) */
+int pvr2_ctrl_get_min(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_int) {
+ ret = cptr->info->def.type_int.min_value;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's default value (any type) */
+int pvr2_ctrl_get_def(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_int) {
+ ret = cptr->info->default_value;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's enumeration count (enum only) */
+int pvr2_ctrl_get_cnt(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_enum) {
+ ret = cptr->info->def.type_enum.count;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve control's valid mask bits (bit mask only) */
+int pvr2_ctrl_get_mask(struct pvr2_ctrl *cptr)
+{
+ int ret = 0;
+ if (!cptr) return 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_bitmask) {
+ ret = cptr->info->def.type_bitmask.valid_bits;
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Retrieve the control's name */
+const char *pvr2_ctrl_get_name(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ return cptr->info->name;
+}
+
+
+/* Retrieve the control's desc */
+const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ return cptr->info->desc;
+}
+
+
+/* Retrieve a control enumeration or bit mask value */
+int pvr2_ctrl_get_valname(struct pvr2_ctrl *cptr,int val,
+ char *bptr,unsigned int bmax,
+ unsigned int *blen)
+{
+ int ret = -EINVAL;
+ if (!cptr) return 0;
+ *blen = 0;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_enum) {
+ const char **names;
+ names = cptr->info->def.type_enum.value_names;
+ if ((val >= 0) &&
+ (val < cptr->info->def.type_enum.count)) {
+ if (names[val]) {
+ *blen = scnprintf(
+ bptr,bmax,"%s",
+ names[val]);
+ } else {
+ *blen = 0;
+ }
+ ret = 0;
+ }
+ } else if (cptr->info->type == pvr2_ctl_bitmask) {
+ const char **names;
+ unsigned int idx;
+ int msk;
+ names = cptr->info->def.type_bitmask.bit_names;
+ val &= cptr->info->def.type_bitmask.valid_bits;
+ for (idx = 0, msk = 1; val; idx++, msk <<= 1) {
+ if (val & msk) {
+ *blen = scnprintf(bptr,bmax,"%s",
+ names[idx]);
+ ret = 0;
+ break;
+ }
+ }
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Return V4L ID for this control or zero if none */
+int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ return cptr->info->v4l_id;
+}
+
+
+unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *cptr)
+{
+ unsigned int flags = 0;
+
+ if (cptr->info->get_v4lflags) {
+ flags = cptr->info->get_v4lflags(cptr);
+ }
+
+ if (cptr->info->set_value) {
+ flags &= ~V4L2_CTRL_FLAG_READ_ONLY;
+ } else {
+ flags |= V4L2_CTRL_FLAG_READ_ONLY;
+ }
+
+ return flags;
+}
+
+
+/* Return true if control is writable */
+int pvr2_ctrl_is_writable(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ return cptr->info->set_value != 0;
+}
+
+
+/* Return true if control has custom symbolic representation */
+int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *cptr)
+{
+ if (!cptr) return 0;
+ if (!cptr->info->val_to_sym) return 0;
+ if (!cptr->info->sym_to_val) return 0;
+ return !0;
+}
+
+
+/* Convert a given mask/val to a custom symbolic value */
+int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *cptr,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len)
+{
+ if (!cptr) return -EINVAL;
+ if (!cptr->info->val_to_sym) return -EINVAL;
+ return cptr->info->val_to_sym(cptr,mask,val,buf,maxlen,len);
+}
+
+
+/* Convert a symbolic value to a mask/value pair */
+int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *cptr,
+ const char *buf,unsigned int len,
+ int *maskptr,int *valptr)
+{
+ if (!cptr) return -EINVAL;
+ if (!cptr->info->sym_to_val) return -EINVAL;
+ return cptr->info->sym_to_val(cptr,buf,len,maskptr,valptr);
+}
+
+
+static unsigned int gen_bitmask_string(int msk,int val,int msk_only,
+ const char **names,
+ char *ptr,unsigned int len)
+{
+ unsigned int idx;
+ long sm,um;
+ int spcFl;
+ unsigned int uc,cnt;
+ const char *idStr;
+
+ spcFl = 0;
+ uc = 0;
+ um = 0;
+ for (idx = 0, sm = 1; msk; idx++, sm <<= 1) {
+ if (sm & msk) {
+ msk &= ~sm;
+ idStr = names[idx];
+ if (idStr) {
+ cnt = scnprintf(ptr,len,"%s%s%s",
+ (spcFl ? " " : ""),
+ (msk_only ? "" :
+ ((val & sm) ? "+" : "-")),
+ idStr);
+ ptr += cnt; len -= cnt; uc += cnt;
+ spcFl = !0;
+ } else {
+ um |= sm;
+ }
+ }
+ }
+ if (um) {
+ if (msk_only) {
+ cnt = scnprintf(ptr,len,"%s0x%lx",
+ (spcFl ? " " : ""),
+ um);
+ ptr += cnt; len -= cnt; uc += cnt;
+ spcFl = !0;
+ } else if (um & val) {
+ cnt = scnprintf(ptr,len,"%s+0x%lx",
+ (spcFl ? " " : ""),
+ um & val);
+ ptr += cnt; len -= cnt; uc += cnt;
+ spcFl = !0;
+ } else if (um & ~val) {
+ cnt = scnprintf(ptr,len,"%s+0x%lx",
+ (spcFl ? " " : ""),
+ um & ~val);
+ ptr += cnt; len -= cnt; uc += cnt;
+ spcFl = !0;
+ }
+ }
+ return uc;
+}
+
+
+static const char *boolNames[] = {
+ "false",
+ "true",
+ "no",
+ "yes",
+};
+
+
+static int parse_token(const char *ptr,unsigned int len,
+ int *valptr,
+ const char **names,unsigned int namecnt)
+{
+ char buf[33];
+ unsigned int slen;
+ unsigned int idx;
+ int negfl;
+ char *p2;
+ *valptr = 0;
+ if (!names) namecnt = 0;
+ for (idx = 0; idx < namecnt; idx++) {
+ if (!names[idx]) continue;
+ slen = strlen(names[idx]);
+ if (slen != len) continue;
+ if (memcmp(names[idx],ptr,slen)) continue;
+ *valptr = idx;
+ return 0;
+ }
+ negfl = 0;
+ if ((*ptr == '-') || (*ptr == '+')) {
+ negfl = (*ptr == '-');
+ ptr++; len--;
+ }
+ if (len >= sizeof(buf)) return -EINVAL;
+ memcpy(buf,ptr,len);
+ buf[len] = 0;
+ *valptr = simple_strtol(buf,&p2,0);
+ if (negfl) *valptr = -(*valptr);
+ if (*p2) return -EINVAL;
+ return 1;
+}
+
+
+static int parse_mtoken(const char *ptr,unsigned int len,
+ int *valptr,
+ const char **names,int valid_bits)
+{
+ char buf[33];
+ unsigned int slen;
+ unsigned int idx;
+ char *p2;
+ int msk;
+ *valptr = 0;
+ for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) {
+ if (!msk & valid_bits) continue;
+ valid_bits &= ~msk;
+ if (!names[idx]) continue;
+ slen = strlen(names[idx]);
+ if (slen != len) continue;
+ if (memcmp(names[idx],ptr,slen)) continue;
+ *valptr = msk;
+ return 0;
+ }
+ if (len >= sizeof(buf)) return -EINVAL;
+ memcpy(buf,ptr,len);
+ buf[len] = 0;
+ *valptr = simple_strtol(buf,&p2,0);
+ if (*p2) return -EINVAL;
+ return 0;
+}
+
+
+static int parse_tlist(const char *ptr,unsigned int len,
+ int *maskptr,int *valptr,
+ const char **names,int valid_bits)
+{
+ unsigned int cnt;
+ int mask,val,kv,mode,ret;
+ mask = 0;
+ val = 0;
+ ret = 0;
+ while (len) {
+ cnt = 0;
+ while ((cnt < len) &&
+ ((ptr[cnt] <= 32) ||
+ (ptr[cnt] >= 127))) cnt++;
+ ptr += cnt;
+ len -= cnt;
+ mode = 0;
+ if ((*ptr == '-') || (*ptr == '+')) {
+ mode = (*ptr == '-') ? -1 : 1;
+ ptr++;
+ len--;
+ }
+ cnt = 0;
+ while (cnt < len) {
+ if (ptr[cnt] <= 32) break;
+ if (ptr[cnt] >= 127) break;
+ cnt++;
+ }
+ if (!cnt) break;
+ if (parse_mtoken(ptr,cnt,&kv,names,valid_bits)) {
+ ret = -EINVAL;
+ break;
+ }
+ ptr += cnt;
+ len -= cnt;
+ switch (mode) {
+ case 0:
+ mask = valid_bits;
+ val |= kv;
+ break;
+ case -1:
+ mask |= kv;
+ val &= ~kv;
+ break;
+ case 1:
+ mask |= kv;
+ val |= kv;
+ break;
+ default:
+ break;
+ }
+ }
+ *maskptr = mask;
+ *valptr = val;
+ return ret;
+}
+
+
+/* Convert a symbolic value to a mask/value pair */
+int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *cptr,
+ const char *ptr,unsigned int len,
+ int *maskptr,int *valptr)
+{
+ int ret = -EINVAL;
+ unsigned int cnt;
+
+ *maskptr = 0;
+ *valptr = 0;
+
+ cnt = 0;
+ while ((cnt < len) && ((ptr[cnt] <= 32) || (ptr[cnt] >= 127))) cnt++;
+ len -= cnt; ptr += cnt;
+ cnt = 0;
+ while ((cnt < len) && ((ptr[len-(cnt+1)] <= 32) ||
+ (ptr[len-(cnt+1)] >= 127))) cnt++;
+ len -= cnt;
+
+ if (!len) return -EINVAL;
+
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ if (cptr->info->type == pvr2_ctl_int) {
+ ret = parse_token(ptr,len,valptr,0,0);
+ if ((ret >= 0) &&
+ ((*valptr < cptr->info->def.type_int.min_value) ||
+ (*valptr > cptr->info->def.type_int.max_value))) {
+ ret = -ERANGE;
+ }
+ if (maskptr) *maskptr = ~0;
+ } else if (cptr->info->type == pvr2_ctl_bool) {
+ ret = parse_token(
+ ptr,len,valptr,boolNames,
+ sizeof(boolNames)/sizeof(boolNames[0]));
+ if (ret == 1) {
+ *valptr = *valptr ? !0 : 0;
+ } else if (ret == 0) {
+ *valptr = (*valptr & 1) ? !0 : 0;
+ }
+ if (maskptr) *maskptr = 1;
+ } else if (cptr->info->type == pvr2_ctl_enum) {
+ ret = parse_token(
+ ptr,len,valptr,
+ cptr->info->def.type_enum.value_names,
+ cptr->info->def.type_enum.count);
+ if ((ret >= 0) &&
+ ((*valptr < 0) ||
+ (*valptr >= cptr->info->def.type_enum.count))) {
+ ret = -ERANGE;
+ }
+ if (maskptr) *maskptr = ~0;
+ } else if (cptr->info->type == pvr2_ctl_bitmask) {
+ ret = parse_tlist(
+ ptr,len,maskptr,valptr,
+ cptr->info->def.type_bitmask.bit_names,
+ cptr->info->def.type_bitmask.valid_bits);
+ }
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/* Convert a given mask/val to a symbolic value */
+int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *cptr,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len)
+{
+ int ret = -EINVAL;
+
+ *len = 0;
+ if (cptr->info->type == pvr2_ctl_int) {
+ *len = scnprintf(buf,maxlen,"%d",val);
+ ret = 0;
+ } else if (cptr->info->type == pvr2_ctl_bool) {
+ *len = scnprintf(buf,maxlen,"%s",val ? "true" : "false");
+ ret = 0;
+ } else if (cptr->info->type == pvr2_ctl_enum) {
+ const char **names;
+ names = cptr->info->def.type_enum.value_names;
+ if ((val >= 0) &&
+ (val < cptr->info->def.type_enum.count)) {
+ if (names[val]) {
+ *len = scnprintf(
+ buf,maxlen,"%s",
+ names[val]);
+ } else {
+ *len = 0;
+ }
+ ret = 0;
+ }
+ } else if (cptr->info->type == pvr2_ctl_bitmask) {
+ *len = gen_bitmask_string(
+ val & mask & cptr->info->def.type_bitmask.valid_bits,
+ ~0,!0,
+ cptr->info->def.type_bitmask.bit_names,
+ buf,maxlen);
+ }
+ return ret;
+}
+
+
+/* Convert a given mask/val to a symbolic value */
+int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *cptr,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len)
+{
+ int ret;
+ LOCK_TAKE(cptr->hdw->big_lock); do {
+ ret = pvr2_ctrl_value_to_sym_internal(cptr,mask,val,
+ buf,maxlen,len);
+ } while(0); LOCK_GIVE(cptr->hdw->big_lock);
+ return ret;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ctrl.h b/drivers/media/video/pvrusb2/pvrusb2-ctrl.h
new file mode 100644
index 00000000000000..c1680053cd644f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ctrl.h
@@ -0,0 +1,123 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_CTRL_H
+#define __PVRUSB2_CTRL_H
+
+struct pvr2_ctrl;
+
+enum pvr2_ctl_type {
+ pvr2_ctl_int = 0,
+ pvr2_ctl_enum = 1,
+ pvr2_ctl_bitmask = 2,
+ pvr2_ctl_bool = 3,
+};
+
+
+/* Set the given control. */
+int pvr2_ctrl_set_value(struct pvr2_ctrl *,int val);
+
+/* Set/clear specific bits of the given control. */
+int pvr2_ctrl_set_mask_value(struct pvr2_ctrl *,int mask,int val);
+
+/* Get the current value of the given control. */
+int pvr2_ctrl_get_value(struct pvr2_ctrl *,int *valptr);
+
+/* Retrieve control's type */
+enum pvr2_ctl_type pvr2_ctrl_get_type(struct pvr2_ctrl *);
+
+/* Retrieve control's maximum value (int type) */
+int pvr2_ctrl_get_max(struct pvr2_ctrl *);
+
+/* Retrieve control's minimum value (int type) */
+int pvr2_ctrl_get_min(struct pvr2_ctrl *);
+
+/* Retrieve control's default value (any type) */
+int pvr2_ctrl_get_def(struct pvr2_ctrl *);
+
+/* Retrieve control's enumeration count (enum only) */
+int pvr2_ctrl_get_cnt(struct pvr2_ctrl *);
+
+/* Retrieve control's valid mask bits (bit mask only) */
+int pvr2_ctrl_get_mask(struct pvr2_ctrl *);
+
+/* Retrieve the control's name */
+const char *pvr2_ctrl_get_name(struct pvr2_ctrl *);
+
+/* Retrieve the control's desc */
+const char *pvr2_ctrl_get_desc(struct pvr2_ctrl *);
+
+/* Retrieve a control enumeration or bit mask value */
+int pvr2_ctrl_get_valname(struct pvr2_ctrl *,int,char *,unsigned int,
+ unsigned int *);
+
+/* Return true if control is writable */
+int pvr2_ctrl_is_writable(struct pvr2_ctrl *);
+
+/* Return V4L flags value for control (or zero if there is no v4l control
+ actually under this control) */
+unsigned int pvr2_ctrl_get_v4lflags(struct pvr2_ctrl *);
+
+/* Return V4L ID for this control or zero if none */
+int pvr2_ctrl_get_v4lid(struct pvr2_ctrl *);
+
+/* Return true if control has custom symbolic representation */
+int pvr2_ctrl_has_custom_symbols(struct pvr2_ctrl *);
+
+/* Convert a given mask/val to a custom symbolic value */
+int pvr2_ctrl_custom_value_to_sym(struct pvr2_ctrl *,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len);
+
+/* Convert a symbolic value to a mask/value pair */
+int pvr2_ctrl_custom_sym_to_value(struct pvr2_ctrl *,
+ const char *buf,unsigned int len,
+ int *maskptr,int *valptr);
+
+/* Convert a given mask/val to a symbolic value */
+int pvr2_ctrl_value_to_sym(struct pvr2_ctrl *,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len);
+
+/* Convert a symbolic value to a mask/value pair */
+int pvr2_ctrl_sym_to_value(struct pvr2_ctrl *,
+ const char *buf,unsigned int len,
+ int *maskptr,int *valptr);
+
+/* Convert a given mask/val to a symbolic value - must already be
+ inside of critical region. */
+int pvr2_ctrl_value_to_sym_internal(struct pvr2_ctrl *,
+ int mask,int val,
+ char *buf,unsigned int maxlen,
+ unsigned int *len);
+
+#endif /* __PVRUSB2_CTRL_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
new file mode 100644
index 00000000000000..27eadaff75a0ee
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
@@ -0,0 +1,279 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+
+ This source file is specifically designed to interface with the
+ cx2584x, in kernels 2.6.16 or newer.
+
+*/
+
+#include "pvrusb2-cx2584x-v4l.h"
+#include "pvrusb2-video-v4l.h"
+#include "pvrusb2-i2c-cmd-v4l2.h"
+
+
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <media/cx25840.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+struct pvr2_v4l_cx2584x {
+ struct pvr2_i2c_handler handler;
+ struct pvr2_decoder_ctrl ctrl;
+ struct pvr2_i2c_client *client;
+ struct pvr2_hdw *hdw;
+ unsigned long stale_mask;
+};
+
+
+static void set_input(struct pvr2_v4l_cx2584x *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ struct v4l2_routing route;
+ enum cx25840_video_input vid_input;
+ enum cx25840_audio_input aud_input;
+
+ memset(&route,0,sizeof(route));
+
+ switch(hdw->input_val) {
+ case PVR2_CVAL_INPUT_TV:
+ vid_input = CX25840_COMPOSITE7;
+ aud_input = CX25840_AUDIO8;
+ break;
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ vid_input = CX25840_COMPOSITE3;
+ aud_input = CX25840_AUDIO_SERIAL;
+ break;
+ case PVR2_CVAL_INPUT_SVIDEO:
+ vid_input = CX25840_SVIDEO1;
+ aud_input = CX25840_AUDIO_SERIAL;
+ break;
+ case PVR2_CVAL_INPUT_RADIO:
+ default:
+ // Just set it to be composite input for now...
+ vid_input = CX25840_COMPOSITE3;
+ aud_input = CX25840_AUDIO_SERIAL;
+ break;
+ }
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx2584x set_input vid=0x%x aud=0x%x",
+ vid_input,aud_input);
+ route.input = (u32)vid_input;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_VIDEO_ROUTING,&route);
+ route.input = (u32)aud_input;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
+}
+
+
+static int check_input(struct pvr2_v4l_cx2584x *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->input_dirty != 0;
+}
+
+
+static void set_audio(struct pvr2_v4l_cx2584x *ctxt)
+{
+ u32 val;
+ struct pvr2_hdw *hdw = ctxt->hdw;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx2584x set_audio %d",
+ hdw->srate_val);
+ switch (hdw->srate_val) {
+ default:
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000:
+ val = 48000;
+ break;
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100:
+ val = 44100;
+ break;
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000:
+ val = 32000;
+ break;
+ }
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_AUDIO_CLOCK_FREQ,&val);
+}
+
+
+static int check_audio(struct pvr2_v4l_cx2584x *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->srate_dirty != 0;
+}
+
+
+struct pvr2_v4l_cx2584x_ops {
+ void (*update)(struct pvr2_v4l_cx2584x *);
+ int (*check)(struct pvr2_v4l_cx2584x *);
+};
+
+
+static const struct pvr2_v4l_cx2584x_ops decoder_ops[] = {
+ { .update = set_input, .check = check_input},
+ { .update = set_audio, .check = check_audio},
+};
+
+
+static void decoder_detach(struct pvr2_v4l_cx2584x *ctxt)
+{
+ ctxt->client->handler = 0;
+ ctxt->hdw->decoder_ctrl = 0;
+ kfree(ctxt);
+}
+
+
+static int decoder_check(struct pvr2_v4l_cx2584x *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (ctxt->stale_mask & msk) continue;
+ if (decoder_ops[idx].check(ctxt)) {
+ ctxt->stale_mask |= msk;
+ }
+ }
+ return ctxt->stale_mask != 0;
+}
+
+
+static void decoder_update(struct pvr2_v4l_cx2584x *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (!(ctxt->stale_mask & msk)) continue;
+ ctxt->stale_mask &= ~msk;
+ decoder_ops[idx].update(ctxt);
+ }
+}
+
+
+static void decoder_enable(struct pvr2_v4l_cx2584x *ctxt,int fl)
+{
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx25840 decoder_enable(%d)",fl);
+ pvr2_v4l2_cmd_stream(ctxt->client,fl);
+}
+
+
+static int decoder_detect(struct pvr2_i2c_client *cp)
+{
+ int ret;
+ /* Attempt to query the decoder - let's see if it will answer */
+ struct v4l2_queryctrl qc;
+
+ memset(&qc,0,sizeof(qc));
+
+ qc.id = V4L2_CID_BRIGHTNESS;
+
+ ret = pvr2_i2c_client_cmd(cp,VIDIOC_QUERYCTRL,&qc);
+ return ret == 0; /* Return true if it answered */
+}
+
+
+static int decoder_is_tuned(struct pvr2_v4l_cx2584x *ctxt)
+{
+ struct v4l2_tuner vt;
+ int ret;
+
+ memset(&vt,0,sizeof(vt));
+ ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
+ if (ret < 0) return -EINVAL;
+ return vt.signal ? 1 : 0;
+}
+
+
+static unsigned int decoder_describe(struct pvr2_v4l_cx2584x *ctxt,
+ char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-cx2584x-v4l");
+}
+
+
+static void decoder_reset(struct pvr2_v4l_cx2584x *ctxt)
+{
+ int ret;
+ ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_RESET,0);
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c cx25840 decoder_reset (ret=%d)",ret);
+}
+
+
+const static struct pvr2_i2c_handler_functions hfuncs = {
+ .detach = (void (*)(void *))decoder_detach,
+ .check = (int (*)(void *))decoder_check,
+ .update = (void (*)(void *))decoder_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))decoder_describe,
+};
+
+
+int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *hdw,
+ struct pvr2_i2c_client *cp)
+{
+ struct pvr2_v4l_cx2584x *ctxt;
+
+ if (hdw->decoder_ctrl) return 0;
+ if (cp->handler) return 0;
+ if (!decoder_detect(cp)) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->handler.func_data = ctxt;
+ ctxt->handler.func_table = &hfuncs;
+ ctxt->ctrl.ctxt = ctxt;
+ ctxt->ctrl.detach = (void (*)(void *))decoder_detach;
+ ctxt->ctrl.enable = (void (*)(void *,int))decoder_enable;
+ ctxt->ctrl.tuned = (int (*)(void *))decoder_is_tuned;
+ ctxt->ctrl.force_reset = (void (*)(void*))decoder_reset;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ ctxt->stale_mask = (1 << (sizeof(decoder_ops)/
+ sizeof(decoder_ops[0]))) - 1;
+ hdw->decoder_ctrl = &ctxt->ctrl;
+ cp->handler = &ctxt->handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x cx2584x V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h
new file mode 100644
index 00000000000000..54b2844e7a716f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_CX2584X_V4L_H
+#define __PVRUSB2_CX2584X_V4L_H
+
+/*
+
+ This module connects the pvrusb2 driver to the I2C chip level
+ driver which handles combined device audio & video processing.
+ This interface is used internally by the driver; higher level code
+ should only interact through the interface provided by
+ pvrusb2-hdw.h.
+
+*/
+
+
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_cx2584x_v4l_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+
+#endif /* __PVRUSB2_CX2584X_V4L_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debug.h b/drivers/media/video/pvrusb2/pvrusb2-debug.h
new file mode 100644
index 00000000000000..d95a8588e4f840
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debug.h
@@ -0,0 +1,67 @@
+/*
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_DEBUG_H
+#define __PVRUSB2_DEBUG_H
+
+extern int pvrusb2_debug;
+
+#define pvr2_trace(msk, fmt, arg...) do {if(msk & pvrusb2_debug) printk(KERN_INFO "pvrusb2: " fmt "\n", ##arg); } while (0)
+
+/* These are listed in *rough* order of decreasing usefulness and
+ increasing noise level. */
+#define PVR2_TRACE_INFO (1 << 0) // Normal messages
+#define PVR2_TRACE_ERROR_LEGS (1 << 1) // error messages
+#define PVR2_TRACE_TOLERANCE (1 << 2) // track tolerance-affected errors
+#define PVR2_TRACE_TRAP (1 << 3) // Trap & report misbehavior from app
+#define PVR2_TRACE_INIT (1 << 4) // misc initialization steps
+#define PVR2_TRACE_START_STOP (1 << 5) // Streaming start / stop
+#define PVR2_TRACE_CTL (1 << 6) // commit of control changes
+#define PVR2_TRACE_DEBUG (1 << 7) // Temporary debug code
+#define PVR2_TRACE_EEPROM (1 << 8) // eeprom parsing / report
+#define PVR2_TRACE_STRUCT (1 << 9) // internal struct creation
+#define PVR2_TRACE_OPEN_CLOSE (1 << 10) // application open / close
+#define PVR2_TRACE_CREG (1 << 11) // Main critical region entry / exit
+#define PVR2_TRACE_SYSFS (1 << 12) // Sysfs driven I/O
+#define PVR2_TRACE_FIRMWARE (1 << 13) // firmware upload actions
+#define PVR2_TRACE_CHIPS (1 << 14) // chip broadcast operation
+#define PVR2_TRACE_I2C (1 << 15) // I2C related stuff
+#define PVR2_TRACE_I2C_CMD (1 << 16) // Software commands to I2C modules
+#define PVR2_TRACE_I2C_CORE (1 << 17) // I2C core debugging
+#define PVR2_TRACE_I2C_TRAF (1 << 18) // I2C traffic through the adapter
+#define PVR2_TRACE_V4LIOCTL (1 << 19) // v4l ioctl details
+#define PVR2_TRACE_ENCODER (1 << 20) // mpeg2 encoder operation
+#define PVR2_TRACE_BUF_POOL (1 << 21) // Track buffer pool management
+#define PVR2_TRACE_BUF_FLOW (1 << 22) // Track buffer flow in system
+#define PVR2_TRACE_DATA_FLOW (1 << 23) // Track data flow
+#define PVR2_TRACE_DEBUGIFC (1 << 24) // Debug interface actions
+#define PVR2_TRACE_GPIO (1 << 25) // GPIO state bit changes
+
+
+#endif /* __PVRUSB2_HDW_INTERNAL_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.c b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
new file mode 100644
index 00000000000000..586900e365ff2e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.c
@@ -0,0 +1,478 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/string.h>
+#include <linux/slab.h>
+#include "pvrusb2-debugifc.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-debug.h"
+#include "pvrusb2-i2c-core.h"
+
+struct debugifc_mask_item {
+ const char *name;
+ unsigned long msk;
+};
+
+static struct debugifc_mask_item mask_items[] = {
+ {"ENC_FIRMWARE",(1<<PVR2_SUBSYS_B_ENC_FIRMWARE)},
+ {"ENC_CFG",(1<<PVR2_SUBSYS_B_ENC_CFG)},
+ {"DIG_RUN",(1<<PVR2_SUBSYS_B_DIGITIZER_RUN)},
+ {"USB_RUN",(1<<PVR2_SUBSYS_B_USBSTREAM_RUN)},
+ {"ENC_RUN",(1<<PVR2_SUBSYS_B_ENC_RUN)},
+};
+
+
+static unsigned int debugifc_count_whitespace(const char *buf,
+ unsigned int count)
+{
+ unsigned int scnt;
+ char ch;
+
+ for (scnt = 0; scnt < count; scnt++) {
+ ch = buf[scnt];
+ if (ch == ' ') continue;
+ if (ch == '\t') continue;
+ if (ch == '\n') continue;
+ break;
+ }
+ return scnt;
+}
+
+
+static unsigned int debugifc_count_nonwhitespace(const char *buf,
+ unsigned int count)
+{
+ unsigned int scnt;
+ char ch;
+
+ for (scnt = 0; scnt < count; scnt++) {
+ ch = buf[scnt];
+ if (ch == ' ') break;
+ if (ch == '\t') break;
+ if (ch == '\n') break;
+ }
+ return scnt;
+}
+
+
+static unsigned int debugifc_isolate_word(const char *buf,unsigned int count,
+ const char **wstrPtr,
+ unsigned int *wlenPtr)
+{
+ const char *wptr;
+ unsigned int consume_cnt = 0;
+ unsigned int wlen;
+ unsigned int scnt;
+
+ wptr = 0;
+ wlen = 0;
+ scnt = debugifc_count_whitespace(buf,count);
+ consume_cnt += scnt; count -= scnt; buf += scnt;
+ if (!count) goto done;
+
+ scnt = debugifc_count_nonwhitespace(buf,count);
+ if (!scnt) goto done;
+ wptr = buf;
+ wlen = scnt;
+ consume_cnt += scnt; count -= scnt; buf += scnt;
+
+ done:
+ *wstrPtr = wptr;
+ *wlenPtr = wlen;
+ return consume_cnt;
+}
+
+
+static int debugifc_parse_unsigned_number(const char *buf,unsigned int count,
+ u32 *num_ptr)
+{
+ u32 result = 0;
+ u32 val;
+ int ch;
+ int radix = 10;
+ if ((count >= 2) && (buf[0] == '0') &&
+ ((buf[1] == 'x') || (buf[1] == 'X'))) {
+ radix = 16;
+ count -= 2;
+ buf += 2;
+ } else if ((count >= 1) && (buf[0] == '0')) {
+ radix = 8;
+ }
+
+ while (count--) {
+ ch = *buf++;
+ if ((ch >= '0') && (ch <= '9')) {
+ val = ch - '0';
+ } else if ((ch >= 'a') && (ch <= 'f')) {
+ val = ch - 'a' + 10;
+ } else if ((ch >= 'A') && (ch <= 'F')) {
+ val = ch - 'A' + 10;
+ } else {
+ return -EINVAL;
+ }
+ if (val >= radix) return -EINVAL;
+ result *= radix;
+ result += val;
+ }
+ *num_ptr = result;
+ return 0;
+}
+
+
+static int debugifc_match_keyword(const char *buf,unsigned int count,
+ const char *keyword)
+{
+ unsigned int kl;
+ if (!keyword) return 0;
+ kl = strlen(keyword);
+ if (kl != count) return 0;
+ return !memcmp(buf,keyword,kl);
+}
+
+
+static unsigned long debugifc_find_mask(const char *buf,unsigned int count)
+{
+ struct debugifc_mask_item *mip;
+ unsigned int idx;
+ for (idx = 0; idx < sizeof(mask_items)/sizeof(mask_items[0]); idx++) {
+ mip = mask_items + idx;
+ if (debugifc_match_keyword(buf,count,mip->name)) {
+ return mip->msk;
+ }
+ }
+ return 0;
+}
+
+
+static int debugifc_print_mask(char *buf,unsigned int sz,
+ unsigned long msk,unsigned long val)
+{
+ struct debugifc_mask_item *mip;
+ unsigned int idx;
+ int bcnt = 0;
+ int ccnt;
+ for (idx = 0; idx < sizeof(mask_items)/sizeof(mask_items[0]); idx++) {
+ mip = mask_items + idx;
+ if (!(mip->msk & msk)) continue;
+ ccnt = scnprintf(buf,sz,"%s%c%s",
+ (bcnt ? " " : ""),
+ ((mip->msk & val) ? '+' : '-'),
+ mip->name);
+ sz -= ccnt;
+ buf += ccnt;
+ bcnt += ccnt;
+ }
+ return bcnt;
+}
+
+static unsigned int debugifc_parse_subsys_mask(const char *buf,
+ unsigned int count,
+ unsigned long *mskPtr,
+ unsigned long *valPtr)
+{
+ const char *wptr;
+ unsigned int consume_cnt = 0;
+ unsigned int scnt;
+ unsigned int wlen;
+ int mode;
+ unsigned long m1,msk,val;
+
+ msk = 0;
+ val = 0;
+
+ while (count) {
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) break;
+ consume_cnt += scnt; count -= scnt; buf += scnt;
+ if (!wptr) break;
+
+ mode = 0;
+ if (wlen) switch (wptr[0]) {
+ case '+':
+ wptr++;
+ wlen--;
+ break;
+ case '-':
+ mode = 1;
+ wptr++;
+ wlen--;
+ break;
+ }
+ if (!wlen) continue;
+ m1 = debugifc_find_mask(wptr,wlen);
+ if (!m1) break;
+ msk |= m1;
+ if (!mode) val |= m1;
+ }
+ *mskPtr = msk;
+ *valPtr = val;
+ return consume_cnt;
+}
+
+
+int pvr2_debugifc_print_info(struct pvr2_hdw *hdw,char *buf,unsigned int acnt)
+{
+ int bcnt = 0;
+ int ccnt;
+ struct pvr2_hdw_debug_info dbg;
+
+ pvr2_hdw_get_debug_info(hdw,&dbg);
+
+ ccnt = scnprintf(buf,acnt,"big lock %s; ctl lock %s",
+ (dbg.big_lock_held ? "held" : "free"),
+ (dbg.ctl_lock_held ? "held" : "free"));
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ if (dbg.ctl_lock_held) {
+ ccnt = scnprintf(buf,acnt,"; cmd_state=%d cmd_code=%d"
+ " cmd_wlen=%d cmd_rlen=%d"
+ " wpend=%d rpend=%d tmout=%d rstatus=%d"
+ " wstatus=%d",
+ dbg.cmd_debug_state,dbg.cmd_code,
+ dbg.cmd_debug_write_len,
+ dbg.cmd_debug_read_len,
+ dbg.cmd_debug_write_pend,
+ dbg.cmd_debug_read_pend,
+ dbg.cmd_debug_timeout,
+ dbg.cmd_debug_rstatus,
+ dbg.cmd_debug_wstatus);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ }
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(
+ buf,acnt,"driver flags: %s %s %s\n",
+ (dbg.flag_init_ok ? "initialized" : "uninitialized"),
+ (dbg.flag_ok ? "ok" : "fail"),
+ (dbg.flag_disconnected ? "disconnected" : "connected"));
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"Subsystems enabled / configured: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,dbg.subsys_flags,dbg.subsys_flags);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"Subsystems disabled / unconfigured: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,~dbg.subsys_flags,dbg.subsys_flags);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ ccnt = scnprintf(buf,acnt,"Attached I2C modules:\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = pvr2_i2c_report(hdw,buf,acnt);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ return bcnt;
+}
+
+
+int pvr2_debugifc_print_status(struct pvr2_hdw *hdw,
+ char *buf,unsigned int acnt)
+{
+ int bcnt = 0;
+ int ccnt;
+ unsigned long msk;
+ int ret;
+ u32 gpio_dir,gpio_in,gpio_out;
+
+ ret = pvr2_hdw_is_hsm(hdw);
+ ccnt = scnprintf(buf,acnt,"USB link speed: %s\n",
+ (ret < 0 ? "FAIL" : (ret ? "high" : "full")));
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ gpio_dir = 0; gpio_in = 0; gpio_out = 0;
+ pvr2_hdw_gpio_get_dir(hdw,&gpio_dir);
+ pvr2_hdw_gpio_get_out(hdw,&gpio_out);
+ pvr2_hdw_gpio_get_in(hdw,&gpio_in);
+ ccnt = scnprintf(buf,acnt,"GPIO state: dir=0x%x in=0x%x out=0x%x\n",
+ gpio_dir,gpio_in,gpio_out);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ ccnt = scnprintf(buf,acnt,"Streaming is %s\n",
+ pvr2_hdw_get_streaming(hdw) ? "on" : "off");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ msk = pvr2_hdw_subsys_get(hdw);
+ ccnt = scnprintf(buf,acnt,"Subsystems enabled / configured: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,msk,msk);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"Subsystems disabled / unconfigured: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,~msk,msk);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ msk = pvr2_hdw_subsys_stream_get(hdw);
+ ccnt = scnprintf(buf,acnt,"Subsystems stopped on stream shutdown: ");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = debugifc_print_mask(buf,acnt,msk,msk);
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+ ccnt = scnprintf(buf,acnt,"\n");
+ bcnt += ccnt; acnt -= ccnt; buf += ccnt;
+
+ return bcnt;
+}
+
+
+int pvr2_debugifc_do1cmd(struct pvr2_hdw *hdw,const char *buf,
+ unsigned int count)
+{
+ const char *wptr;
+ unsigned int wlen;
+ unsigned int scnt;
+
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return 0;
+ count -= scnt; buf += scnt;
+ if (!wptr) return 0;
+
+ pvr2_trace(PVR2_TRACE_DEBUGIFC,"debugifc cmd: \"%.*s\"",wlen,wptr);
+ if (debugifc_match_keyword(wptr,wlen,"reset")) {
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return -EINVAL;
+ count -= scnt; buf += scnt;
+ if (!wptr) return -EINVAL;
+ if (debugifc_match_keyword(wptr,wlen,"cpu")) {
+ pvr2_hdw_cpureset_assert(hdw,!0);
+ pvr2_hdw_cpureset_assert(hdw,0);
+ return 0;
+ } else if (debugifc_match_keyword(wptr,wlen,"bus")) {
+ pvr2_hdw_device_reset(hdw);
+ } else if (debugifc_match_keyword(wptr,wlen,"soft")) {
+ return pvr2_hdw_cmd_powerup(hdw);
+ } else if (debugifc_match_keyword(wptr,wlen,"deep")) {
+ return pvr2_hdw_cmd_deep_reset(hdw);
+ } else if (debugifc_match_keyword(wptr,wlen,"firmware")) {
+ return pvr2_upload_firmware2(hdw);
+ } else if (debugifc_match_keyword(wptr,wlen,"decoder")) {
+ return pvr2_hdw_cmd_decoder_reset(hdw);
+ }
+ return -EINVAL;
+ } else if (debugifc_match_keyword(wptr,wlen,"subsys_flags")) {
+ unsigned long msk = 0;
+ unsigned long val = 0;
+ if (debugifc_parse_subsys_mask(buf,count,&msk,&val) != count) {
+ pvr2_trace(PVR2_TRACE_DEBUGIFC,
+ "debugifc parse error on subsys mask");
+ return -EINVAL;
+ }
+ pvr2_hdw_subsys_bit_chg(hdw,msk,val);
+ return 0;
+ } else if (debugifc_match_keyword(wptr,wlen,"stream_flags")) {
+ unsigned long msk = 0;
+ unsigned long val = 0;
+ if (debugifc_parse_subsys_mask(buf,count,&msk,&val) != count) {
+ pvr2_trace(PVR2_TRACE_DEBUGIFC,
+ "debugifc parse error on stream mask");
+ return -EINVAL;
+ }
+ pvr2_hdw_subsys_stream_bit_chg(hdw,msk,val);
+ return 0;
+ } else if (debugifc_match_keyword(wptr,wlen,"cpufw")) {
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return -EINVAL;
+ count -= scnt; buf += scnt;
+ if (!wptr) return -EINVAL;
+ if (debugifc_match_keyword(wptr,wlen,"fetch")) {
+ pvr2_hdw_cpufw_set_enabled(hdw,!0);
+ return 0;
+ } else if (debugifc_match_keyword(wptr,wlen,"done")) {
+ pvr2_hdw_cpufw_set_enabled(hdw,0);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+ } else if (debugifc_match_keyword(wptr,wlen,"gpio")) {
+ int dir_fl = 0;
+ int ret;
+ u32 msk,val;
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return -EINVAL;
+ count -= scnt; buf += scnt;
+ if (!wptr) return -EINVAL;
+ if (debugifc_match_keyword(wptr,wlen,"dir")) {
+ dir_fl = !0;
+ } else if (!debugifc_match_keyword(wptr,wlen,"out")) {
+ return -EINVAL;
+ }
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (!scnt) return -EINVAL;
+ count -= scnt; buf += scnt;
+ if (!wptr) return -EINVAL;
+ ret = debugifc_parse_unsigned_number(wptr,wlen,&msk);
+ if (ret) return ret;
+ scnt = debugifc_isolate_word(buf,count,&wptr,&wlen);
+ if (wptr) {
+ ret = debugifc_parse_unsigned_number(wptr,wlen,&val);
+ if (ret) return ret;
+ } else {
+ val = msk;
+ msk = 0xffffffff;
+ }
+ if (dir_fl) {
+ ret = pvr2_hdw_gpio_chg_dir(hdw,msk,val);
+ } else {
+ ret = pvr2_hdw_gpio_chg_out(hdw,msk,val);
+ }
+ return ret;
+ }
+ pvr2_trace(PVR2_TRACE_DEBUGIFC,
+ "debugifc failed to recognize cmd: \"%.*s\"",wlen,wptr);
+ return -EINVAL;
+}
+
+
+int pvr2_debugifc_docmd(struct pvr2_hdw *hdw,const char *buf,
+ unsigned int count)
+{
+ unsigned int bcnt = 0;
+ int ret;
+
+ while (count) {
+ for (bcnt = 0; bcnt < count; bcnt++) {
+ if (buf[bcnt] == '\n') break;
+ }
+
+ ret = pvr2_debugifc_do1cmd(hdw,buf,bcnt);
+ if (ret < 0) return ret;
+ if (bcnt < count) bcnt++;
+ buf += bcnt;
+ count -= bcnt;
+ }
+
+ return 0;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-debugifc.h b/drivers/media/video/pvrusb2/pvrusb2-debugifc.h
new file mode 100644
index 00000000000000..990b02d35d3690
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-debugifc.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_DEBUGIFC_H
+#define __PVRUSB2_DEBUGIFC_H
+
+struct pvr2_hdw;
+
+/* Non-intrusively print some useful debugging info from inside the
+ driver. This should work even if the driver appears to be
+ wedged. */
+int pvr2_debugifc_print_info(struct pvr2_hdw *,
+ char *buf_ptr,unsigned int buf_size);
+
+/* Print general status of driver. This will also trigger a probe of
+ the USB link. Unlike print_info(), this one synchronizes with the
+ driver so the information should be self-consistent (but it will
+ hang if the driver is wedged). */
+int pvr2_debugifc_print_status(struct pvr2_hdw *,
+ char *buf_ptr,unsigned int buf_size);
+
+/* Parse a string command into a driver action. */
+int pvr2_debugifc_docmd(struct pvr2_hdw *,
+ const char *buf_ptr,unsigned int buf_size);
+
+#endif /* __PVRUSB2_DEBUGIFC_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-demod.c b/drivers/media/video/pvrusb2/pvrusb2-demod.c
new file mode 100644
index 00000000000000..9686569a11f61d
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-demod.c
@@ -0,0 +1,126 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2.h"
+#include "pvrusb2-util.h"
+#include "pvrusb2-demod.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/tuner.h>
+#include <media/v4l2-common.h>
+
+
+struct pvr2_demod_handler {
+ struct pvr2_hdw *hdw;
+ struct pvr2_i2c_client *client;
+ struct pvr2_i2c_handler i2c_handler;
+ int type_update_fl;
+};
+
+
+static void set_config(struct pvr2_demod_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ int cfg = 0;
+
+ switch (hdw->tuner_type) {
+ case TUNER_PHILIPS_FM1216ME_MK3:
+ case TUNER_PHILIPS_FM1236_MK3:
+ cfg = TDA9887_PORT1_ACTIVE|TDA9887_PORT2_ACTIVE;
+ break;
+ default:
+ break;
+ }
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c demod set_config(0x%x)",cfg);
+ pvr2_i2c_client_cmd(ctxt->client,TDA9887_SET_CONFIG,&cfg);
+ ctxt->type_update_fl = 0;
+}
+
+
+static int demod_check(struct pvr2_demod_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ if (hdw->tuner_updated) ctxt->type_update_fl = !0;
+ return ctxt->type_update_fl != 0;
+}
+
+
+static void demod_update(struct pvr2_demod_handler *ctxt)
+{
+ if (ctxt->type_update_fl) set_config(ctxt);
+}
+
+
+static void demod_detach(struct pvr2_demod_handler *ctxt)
+{
+ ctxt->client->handler = 0;
+ kfree(ctxt);
+}
+
+
+static unsigned int demod_describe(struct pvr2_demod_handler *ctxt,char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-demod");
+}
+
+
+const static struct pvr2_i2c_handler_functions tuner_funcs = {
+ .detach = (void (*)(void *))demod_detach,
+ .check = (int (*)(void *))demod_check,
+ .update = (void (*)(void *))demod_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))demod_describe,
+};
+
+
+int pvr2_i2c_demod_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ struct pvr2_demod_handler *ctxt;
+ if (cp->handler) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->i2c_handler.func_data = ctxt;
+ ctxt->i2c_handler.func_table = &tuner_funcs;
+ ctxt->type_update_fl = !0;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ cp->handler = &ctxt->i2c_handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x tda9887 V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-demod.h b/drivers/media/video/pvrusb2/pvrusb2-demod.h
new file mode 100644
index 00000000000000..4c4e40ffbf0399
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-demod.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_DEMOD_H
+#define __PVRUSB2_DEMOD_H
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_demod_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+#endif /* __PVRUSB2_DEMOD_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
new file mode 100644
index 00000000000000..94d383ff98897f
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
@@ -0,0 +1,164 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-eeprom.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+
+#define trace_eeprom(...) pvr2_trace(PVR2_TRACE_EEPROM,__VA_ARGS__)
+
+
+
+/*
+
+ Read and analyze data in the eeprom. Use tveeprom to figure out
+ the packet structure, since this is another Hauppauge device and
+ internally it has a family resemblence to ivtv-type devices
+
+*/
+
+#include <media/tveeprom.h>
+
+/* We seem to only be interested in the last 128 bytes of the EEPROM */
+#define EEPROM_SIZE 128
+
+/* Grab EEPROM contents, needed for direct method. */
+static u8 *pvr2_eeprom_fetch(struct pvr2_hdw *hdw)
+{
+ struct i2c_msg msg[2];
+ u8 *eeprom;
+ u8 iadd[2];
+ u8 addr;
+ u16 eepromSize;
+ unsigned int offs;
+ int ret;
+ int mode16 = 0;
+ unsigned pcnt,tcnt;
+ eeprom = kmalloc(EEPROM_SIZE,GFP_KERNEL);
+ if (!eeprom) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to allocate memory"
+ " required to read eeprom");
+ return 0;
+ }
+
+ trace_eeprom("Value for eeprom addr from controller was 0x%x",
+ hdw->eeprom_addr);
+ addr = hdw->eeprom_addr;
+ /* Seems that if the high bit is set, then the *real* eeprom
+ address is shifted right now bit position (noticed this in
+ newer PVR USB2 hardware) */
+ if (addr & 0x80) addr >>= 1;
+
+ /* FX2 documentation states that a 16bit-addressed eeprom is
+ expected if the I2C address is an odd number (yeah, this is
+ strange but it's what they do) */
+ mode16 = (addr & 1);
+ eepromSize = (mode16 ? 4096 : 256);
+ trace_eeprom("Examining %d byte eeprom at location 0x%x"
+ " using %d bit addressing",eepromSize,addr,
+ mode16 ? 16 : 8);
+
+ msg[0].addr = addr;
+ msg[0].flags = 0;
+ msg[0].len = mode16 ? 2 : 1;
+ msg[0].buf = iadd;
+ msg[1].addr = addr;
+ msg[1].flags = I2C_M_RD;
+
+ /* We have to do the actual eeprom data fetch ourselves, because
+ (1) we're only fetching part of the eeprom, and (2) if we were
+ getting the whole thing our I2C driver can't grab it in one
+ pass - which is what tveeprom is otherwise going to attempt */
+ memset(eeprom,0,EEPROM_SIZE);
+ for (tcnt = 0; tcnt < EEPROM_SIZE; tcnt += pcnt) {
+ pcnt = 16;
+ if (pcnt + tcnt > EEPROM_SIZE) pcnt = EEPROM_SIZE-tcnt;
+ offs = tcnt + (eepromSize - EEPROM_SIZE);
+ if (mode16) {
+ iadd[0] = offs >> 8;
+ iadd[1] = offs;
+ } else {
+ iadd[0] = offs;
+ }
+ msg[1].len = pcnt;
+ msg[1].buf = eeprom+tcnt;
+ if ((ret = i2c_transfer(
+ &hdw->i2c_adap,
+ msg,sizeof(msg)/sizeof(msg[0]))) != 2) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "eeprom fetch set offs err=%d",ret);
+ kfree(eeprom);
+ return 0;
+ }
+ }
+ return eeprom;
+}
+
+
+/* Directly call eeprom analysis function within tveeprom. */
+int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
+{
+ u8 *eeprom;
+ struct tveeprom tvdata;
+
+ memset(&tvdata,0,sizeof(tvdata));
+
+ eeprom = pvr2_eeprom_fetch(hdw);
+ if (!eeprom) return -EINVAL;
+
+ {
+ struct i2c_client fake_client;
+ /* Newer version expects a useless client interface */
+ fake_client.addr = hdw->eeprom_addr;
+ fake_client.adapter = &hdw->i2c_adap;
+ tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
+ }
+
+ trace_eeprom("eeprom assumed v4l tveeprom module");
+ trace_eeprom("eeprom direct call results:");
+ trace_eeprom("has_radio=%d",tvdata.has_radio);
+ trace_eeprom("tuner_type=%d",tvdata.tuner_type);
+ trace_eeprom("tuner_formats=0x%x",tvdata.tuner_formats);
+ trace_eeprom("audio_processor=%d",tvdata.audio_processor);
+ trace_eeprom("model=%d",tvdata.model);
+ trace_eeprom("revision=%d",tvdata.revision);
+ trace_eeprom("serial_number=%d",tvdata.serial_number);
+ trace_eeprom("rev_str=%s",tvdata.rev_str);
+ hdw->tuner_type = tvdata.tuner_type;
+ hdw->serial_number = tvdata.serial_number;
+ hdw->std_mask_eeprom = tvdata.tuner_formats;
+
+ kfree(eeprom);
+
+ return 0;
+}
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.h b/drivers/media/video/pvrusb2/pvrusb2-eeprom.h
new file mode 100644
index 00000000000000..84242975dea7f7
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_EEPROM_H
+#define __PVRUSB2_EEPROM_H
+
+struct pvr2_hdw;
+
+int pvr2_eeprom_analyze(struct pvr2_hdw *);
+
+#endif /* __PVRUSB2_EEPROM_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.c b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
new file mode 100644
index 00000000000000..2cc31695b435ca
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.c
@@ -0,0 +1,418 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/device.h> // for linux/firmware.h
+#include <linux/firmware.h>
+#include "pvrusb2-util.h"
+#include "pvrusb2-encoder.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+
+
+
+/* Firmware mailbox flags - definitions found from ivtv */
+#define IVTV_MBOX_FIRMWARE_DONE 0x00000004
+#define IVTV_MBOX_DRIVER_DONE 0x00000002
+#define IVTV_MBOX_DRIVER_BUSY 0x00000001
+
+
+static int pvr2_encoder_write_words(struct pvr2_hdw *hdw,
+ const u32 *data, unsigned int dlen)
+{
+ unsigned int idx;
+ int ret;
+ unsigned int offs = 0;
+ unsigned int chunkCnt;
+
+ /*
+
+ Format: First byte must be 0x01. Remaining 32 bit words are
+ spread out into chunks of 7 bytes each, little-endian ordered,
+ offset at zero within each 2 blank bytes following and a
+ single byte that is 0x44 plus the offset of the word. Repeat
+ request for additional words, with offset adjusted
+ accordingly.
+
+ */
+ while (dlen) {
+ chunkCnt = 8;
+ if (chunkCnt > dlen) chunkCnt = dlen;
+ memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer));
+ hdw->cmd_buffer[0] = 0x01;
+ for (idx = 0; idx < chunkCnt; idx++) {
+ hdw->cmd_buffer[1+(idx*7)+6] = 0x44 + idx + offs;
+ PVR2_DECOMPOSE_LE(hdw->cmd_buffer, 1+(idx*7),
+ data[idx]);
+ }
+ ret = pvr2_send_request(hdw,
+ hdw->cmd_buffer,1+(chunkCnt*7),
+ 0,0);
+ if (ret) return ret;
+ data += chunkCnt;
+ dlen -= chunkCnt;
+ offs += chunkCnt;
+ }
+
+ return 0;
+}
+
+
+static int pvr2_encoder_read_words(struct pvr2_hdw *hdw,int statusFl,
+ u32 *data, unsigned int dlen)
+{
+ unsigned int idx;
+ int ret;
+ unsigned int offs = 0;
+ unsigned int chunkCnt;
+
+ /*
+
+ Format: First byte must be 0x02 (status check) or 0x28 (read
+ back block of 32 bit words). Next 6 bytes must be zero,
+ followed by a single byte of 0x44+offset for portion to be
+ read. Returned data is packed set of 32 bits words that were
+ read.
+
+ */
+
+ while (dlen) {
+ chunkCnt = 16;
+ if (chunkCnt > dlen) chunkCnt = dlen;
+ memset(hdw->cmd_buffer,0,sizeof(hdw->cmd_buffer));
+ hdw->cmd_buffer[0] = statusFl ? 0x02 : 0x28;
+ hdw->cmd_buffer[7] = 0x44 + offs;
+ ret = pvr2_send_request(hdw,
+ hdw->cmd_buffer,8,
+ hdw->cmd_buffer,chunkCnt * 4);
+ if (ret) return ret;
+
+ for (idx = 0; idx < chunkCnt; idx++) {
+ data[idx] = PVR2_COMPOSE_LE(hdw->cmd_buffer,idx*4);
+ }
+ data += chunkCnt;
+ dlen -= chunkCnt;
+ offs += chunkCnt;
+ }
+
+ return 0;
+}
+
+
+/* This prototype is set up to be compatible with the
+ cx2341x_mbox_func prototype in cx2341x.h, which should be in
+ kernels 2.6.18 or later. We do this so that we can enable
+ cx2341x.ko to write to our encoder (by handing it a pointer to this
+ function). For earlier kernels this doesn't really matter. */
+static int pvr2_encoder_cmd(void *ctxt,
+ int cmd,
+ int arg_cnt_send,
+ int arg_cnt_recv,
+ u32 *argp)
+{
+ unsigned int poll_count;
+ int ret = 0;
+ unsigned int idx;
+ /* These sizes look to be limited by the FX2 firmware implementation */
+ u32 wrData[16];
+ u32 rdData[16];
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)ctxt;
+
+
+ /*
+
+ The encoder seems to speak entirely using blocks 32 bit words.
+ In ivtv driver terms, this is a mailbox which we populate with
+ data and watch what the hardware does with it. The first word
+ is a set of flags used to control the transaction, the second
+ word is the command to execute, the third byte is zero (ivtv
+ driver suggests that this is some kind of return value), and
+ the fourth byte is a specified timeout (windows driver always
+ uses 0x00060000 except for one case when it is zero). All
+ successive words are the argument words for the command.
+
+ First, write out the entire set of words, with the first word
+ being zero.
+
+ Next, write out just the first word again, but set it to
+ IVTV_MBOX_DRIVER_DONE | IVTV_DRIVER_BUSY this time (which
+ probably means "go").
+
+ Next, read back 16 words as status. Check the first word,
+ which should have IVTV_MBOX_FIRMWARE_DONE set. If however
+ that bit is not set, then the command isn't done so repeat the
+ read.
+
+ Next, read back 32 words and compare with the original
+ arugments. Hopefully they will match.
+
+ Finally, write out just the first word again, but set it to
+ 0x0 this time (which probably means "idle").
+
+ */
+
+ if (arg_cnt_send > (sizeof(wrData)/sizeof(wrData[0]))-4) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Failed to write cx23416 command"
+ " - too many input arguments"
+ " (was given %u limit %u)",
+ arg_cnt_send,
+ (unsigned int)(sizeof(wrData)/sizeof(wrData[0])) - 4);
+ return -EINVAL;
+ }
+
+ if (arg_cnt_recv > (sizeof(rdData)/sizeof(rdData[0]))-4) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Failed to write cx23416 command"
+ " - too many return arguments"
+ " (was given %u limit %u)",
+ arg_cnt_recv,
+ (unsigned int)(sizeof(rdData)/sizeof(rdData[0])) - 4);
+ return -EINVAL;
+ }
+
+
+ LOCK_TAKE(hdw->ctl_lock); do {
+
+ wrData[0] = 0;
+ wrData[1] = cmd;
+ wrData[2] = 0;
+ wrData[3] = 0x00060000;
+ for (idx = 0; idx < arg_cnt_send; idx++) {
+ wrData[idx+4] = argp[idx];
+ }
+ for (; idx < (sizeof(wrData)/sizeof(wrData[0]))-4; idx++) {
+ wrData[idx+4] = 0;
+ }
+
+ ret = pvr2_encoder_write_words(hdw,wrData,idx);
+ if (ret) break;
+ wrData[0] = IVTV_MBOX_DRIVER_DONE|IVTV_MBOX_DRIVER_BUSY;
+ ret = pvr2_encoder_write_words(hdw,wrData,1);
+ if (ret) break;
+ poll_count = 0;
+ while (1) {
+ if (poll_count < 10000000) poll_count++;
+ ret = pvr2_encoder_read_words(hdw,!0,rdData,1);
+ if (ret) break;
+ if (rdData[0] & IVTV_MBOX_FIRMWARE_DONE) {
+ break;
+ }
+ if (poll_count == 100) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "***WARNING*** device's encoder"
+ " appears to be stuck"
+ " (status=0%08x)",rdData[0]);
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Encoder command: 0x%02x",cmd);
+ for (idx = 4; idx < arg_cnt_send; idx++) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Encoder arg%d: 0x%08x",
+ idx-3,wrData[idx]);
+ }
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Giving up waiting."
+ " It is likely that"
+ " this is a bad idea...");
+ ret = -EBUSY;
+ break;
+ }
+ }
+ if (ret) break;
+ wrData[0] = 0x7;
+ ret = pvr2_encoder_read_words(
+ hdw,0,rdData,
+ sizeof(rdData)/sizeof(rdData[0]));
+ if (ret) break;
+ for (idx = 0; idx < arg_cnt_recv; idx++) {
+ argp[idx] = rdData[idx+4];
+ }
+
+ wrData[0] = 0x0;
+ ret = pvr2_encoder_write_words(hdw,wrData,1);
+ if (ret) break;
+
+ } while(0); LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+static int pvr2_encoder_vcmd(struct pvr2_hdw *hdw, int cmd,
+ int args, ...)
+{
+ va_list vl;
+ unsigned int idx;
+ u32 data[12];
+
+ if (args > sizeof(data)/sizeof(data[0])) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Failed to write cx23416 command"
+ " - too many arguments"
+ " (was given %u limit %u)",
+ args,(unsigned int)(sizeof(data)/sizeof(data[0])));
+ return -EINVAL;
+ }
+
+ va_start(vl, args);
+ for (idx = 0; idx < args; idx++) {
+ data[idx] = va_arg(vl, u32);
+ }
+ va_end(vl);
+
+ return pvr2_encoder_cmd(hdw,cmd,args,0,data);
+}
+
+int pvr2_encoder_configure(struct pvr2_hdw *hdw)
+{
+ int ret;
+ pvr2_trace(PVR2_TRACE_ENCODER,"pvr2_encoder_configure"
+ " (cx2341x module)");
+ hdw->enc_ctl_state.port = CX2341X_PORT_STREAMING;
+ hdw->enc_ctl_state.width = hdw->res_hor_val;
+ hdw->enc_ctl_state.height = hdw->res_ver_val;
+ hdw->enc_ctl_state.is_50hz = ((hdw->std_mask_cur &
+ (V4L2_STD_NTSC|V4L2_STD_PAL_M)) ?
+ 0 : 1);
+
+ ret = 0;
+
+ if (!ret) ret = pvr2_encoder_vcmd(
+ hdw,CX2341X_ENC_SET_NUM_VSYNC_LINES, 2,
+ 0xf0, 0xf0);
+
+ /* setup firmware to notify us about some events (don't know why...) */
+ if (!ret) ret = pvr2_encoder_vcmd(
+ hdw,CX2341X_ENC_SET_EVENT_NOTIFICATION, 4,
+ 0, 0, 0x10000000, 0xffffffff);
+
+ if (!ret) ret = pvr2_encoder_vcmd(
+ hdw,CX2341X_ENC_SET_VBI_LINE, 5,
+ 0xffffffff,0,0,0,0);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to configure cx32416");
+ return ret;
+ }
+
+ ret = cx2341x_update(hdw,pvr2_encoder_cmd,
+ (hdw->enc_cur_valid ? &hdw->enc_cur_state : 0),
+ &hdw->enc_ctl_state);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Error from cx2341x module code=%d",ret);
+ return ret;
+ }
+
+ ret = 0;
+
+ if (!ret) ret = pvr2_encoder_vcmd(
+ hdw, CX2341X_ENC_INITIALIZE_INPUT, 0);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to initialize cx32416 video input");
+ return ret;
+ }
+
+ hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
+ memcpy(&hdw->enc_cur_state,&hdw->enc_ctl_state,
+ sizeof(struct cx2341x_mpeg_params));
+ hdw->enc_cur_valid = !0;
+ return 0;
+}
+
+
+int pvr2_encoder_start(struct pvr2_hdw *hdw)
+{
+ int status;
+
+ /* unmask some interrupts */
+ pvr2_write_register(hdw, 0x0048, 0xbfffffff);
+
+ /* change some GPIO data */
+ pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000481);
+ pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000000);
+
+ if (hdw->config == pvr2_config_vbi) {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
+ 0x01,0x14);
+ } else if (hdw->config == pvr2_config_mpeg) {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
+ 0,0x13);
+ } else {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_START_CAPTURE,2,
+ 0,0x13);
+ }
+ if (!status) {
+ hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_RUN);
+ }
+ return status;
+}
+
+int pvr2_encoder_stop(struct pvr2_hdw *hdw)
+{
+ int status;
+
+ /* mask all interrupts */
+ pvr2_write_register(hdw, 0x0048, 0xffffffff);
+
+ if (hdw->config == pvr2_config_vbi) {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
+ 0x01,0x01,0x14);
+ } else if (hdw->config == pvr2_config_mpeg) {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
+ 0x01,0,0x13);
+ } else {
+ status = pvr2_encoder_vcmd(hdw,CX2341X_ENC_STOP_CAPTURE,3,
+ 0x01,0,0x13);
+ }
+
+ /* change some GPIO data */
+ /* Note: Bit d7 of dir appears to control the LED. So we shut it
+ off here. */
+ pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000401);
+ pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000000);
+
+ if (!status) {
+ hdw->subsys_enabled_mask &= ~(1<<PVR2_SUBSYS_B_ENC_RUN);
+ }
+ return status;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-encoder.h b/drivers/media/video/pvrusb2/pvrusb2-encoder.h
new file mode 100644
index 00000000000000..01b5a0b89c0389
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-encoder.h
@@ -0,0 +1,42 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_ENCODER_H
+#define __PVRUSB2_ENCODER_H
+
+struct pvr2_hdw;
+
+int pvr2_encoder_configure(struct pvr2_hdw *);
+int pvr2_encoder_start(struct pvr2_hdw *);
+int pvr2_encoder_stop(struct pvr2_hdw *);
+
+#endif /* __PVRUSB2_ENCODER_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
new file mode 100644
index 00000000000000..ba2afbfe32c5b9
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
@@ -0,0 +1,384 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_HDW_INTERNAL_H
+#define __PVRUSB2_HDW_INTERNAL_H
+
+/*
+
+ This header sets up all the internal structures and definitions needed to
+ track and coordinate the driver's interaction with the hardware. ONLY
+ source files which actually implement part of that whole circus should be
+ including this header. Higher levels, like the external layers to the
+ various public APIs (V4L, sysfs, etc) should NOT ever include this
+ private, internal header. This means that pvrusb2-hdw, pvrusb2-encoder,
+ etc will include this, but pvrusb2-v4l should not.
+
+*/
+
+#include <linux/config.h>
+#include <linux/videodev2.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-io.h"
+#include <media/cx2341x.h>
+
+/* Legal values for the SRATE state variable */
+#define PVR2_CVAL_SRATE_48 0
+#define PVR2_CVAL_SRATE_44_1 1
+
+/* Legal values for the AUDIOBITRATE state variable */
+#define PVR2_CVAL_AUDIOBITRATE_384 0
+#define PVR2_CVAL_AUDIOBITRATE_320 1
+#define PVR2_CVAL_AUDIOBITRATE_256 2
+#define PVR2_CVAL_AUDIOBITRATE_224 3
+#define PVR2_CVAL_AUDIOBITRATE_192 4
+#define PVR2_CVAL_AUDIOBITRATE_160 5
+#define PVR2_CVAL_AUDIOBITRATE_128 6
+#define PVR2_CVAL_AUDIOBITRATE_112 7
+#define PVR2_CVAL_AUDIOBITRATE_96 8
+#define PVR2_CVAL_AUDIOBITRATE_80 9
+#define PVR2_CVAL_AUDIOBITRATE_64 10
+#define PVR2_CVAL_AUDIOBITRATE_56 11
+#define PVR2_CVAL_AUDIOBITRATE_48 12
+#define PVR2_CVAL_AUDIOBITRATE_32 13
+#define PVR2_CVAL_AUDIOBITRATE_VBR 14
+
+/* Legal values for the AUDIOEMPHASIS state variable */
+#define PVR2_CVAL_AUDIOEMPHASIS_NONE 0
+#define PVR2_CVAL_AUDIOEMPHASIS_50_15 1
+#define PVR2_CVAL_AUDIOEMPHASIS_CCITT 2
+
+/* Legal values for PVR2_CID_HSM */
+#define PVR2_CVAL_HSM_FAIL 0
+#define PVR2_CVAL_HSM_FULL 1
+#define PVR2_CVAL_HSM_HIGH 2
+
+#define PVR2_VID_ENDPOINT 0x84
+#define PVR2_UNK_ENDPOINT 0x86 /* maybe raw yuv ? */
+#define PVR2_VBI_ENDPOINT 0x88
+
+#define PVR2_CTL_BUFFSIZE 64
+
+#define FREQTABLE_SIZE 500
+
+#define LOCK_TAKE(x) do { mutex_lock(&x##_mutex); x##_held = !0; } while (0)
+#define LOCK_GIVE(x) do { x##_held = 0; mutex_unlock(&x##_mutex); } while (0)
+
+struct pvr2_decoder;
+
+typedef int (*pvr2_ctlf_is_dirty)(struct pvr2_ctrl *);
+typedef void (*pvr2_ctlf_clear_dirty)(struct pvr2_ctrl *);
+typedef int (*pvr2_ctlf_get_value)(struct pvr2_ctrl *,int *);
+typedef int (*pvr2_ctlf_set_value)(struct pvr2_ctrl *,int msk,int val);
+typedef int (*pvr2_ctlf_val_to_sym)(struct pvr2_ctrl *,int msk,int val,
+ char *,unsigned int,unsigned int *);
+typedef int (*pvr2_ctlf_sym_to_val)(struct pvr2_ctrl *,
+ const char *,unsigned int,
+ int *mskp,int *valp);
+typedef unsigned int (*pvr2_ctlf_get_v4lflags)(struct pvr2_ctrl *);
+
+/* This structure describes a specific control. A table of these is set up
+ in pvrusb2-hdw.c. */
+struct pvr2_ctl_info {
+ /* Control's name suitable for use as an identifier */
+ const char *name;
+
+ /* Short description of control */
+ const char *desc;
+
+ /* Control's implementation */
+ pvr2_ctlf_get_value get_value; /* Get its value */
+ pvr2_ctlf_set_value set_value; /* Set its value */
+ pvr2_ctlf_val_to_sym val_to_sym; /* Custom convert value->symbol */
+ pvr2_ctlf_sym_to_val sym_to_val; /* Custom convert symbol->value */
+ pvr2_ctlf_is_dirty is_dirty; /* Return true if dirty */
+ pvr2_ctlf_clear_dirty clear_dirty; /* Clear dirty state */
+ pvr2_ctlf_get_v4lflags get_v4lflags;/* Retrieve v4l flags */
+
+ /* Control's type (int, enum, bitmask) */
+ enum pvr2_ctl_type type;
+
+ /* Associated V4L control ID, if any */
+ int v4l_id;
+
+ /* Associated driver internal ID, if any */
+ int internal_id;
+
+ /* Don't implicitly initialize this control's value */
+ int skip_init;
+
+ /* Starting value for this control */
+ int default_value;
+
+ /* Type-specific control information */
+ union {
+ struct { /* Integer control */
+ long min_value; /* lower limit */
+ long max_value; /* upper limit */
+ } type_int;
+ struct { /* enumerated control */
+ unsigned int count; /* enum value count */
+ const char **value_names; /* symbol names */
+ } type_enum;
+ struct { /* bitmask control */
+ unsigned int valid_bits; /* bits in use */
+ const char **bit_names; /* symbol name/bit */
+ } type_bitmask;
+ } def;
+};
+
+
+/* Same as pvr2_ctl_info, but includes storage for the control description */
+#define PVR2_CTLD_INFO_DESC_SIZE 32
+struct pvr2_ctld_info {
+ struct pvr2_ctl_info info;
+ char desc[PVR2_CTLD_INFO_DESC_SIZE];
+};
+
+struct pvr2_ctrl {
+ const struct pvr2_ctl_info *info;
+ struct pvr2_hdw *hdw;
+};
+
+
+struct pvr2_audio_stat {
+ void *ctxt;
+ void (*detach)(void *);
+ int (*status)(void *);
+};
+
+struct pvr2_decoder_ctrl {
+ void *ctxt;
+ void (*detach)(void *);
+ void (*enable)(void *,int);
+ int (*tuned)(void *);
+ void (*force_reset)(void *);
+};
+
+#define PVR2_I2C_PEND_DETECT 0x01 /* Need to detect a client type */
+#define PVR2_I2C_PEND_CLIENT 0x02 /* Client needs a specific update */
+#define PVR2_I2C_PEND_REFRESH 0x04 /* Client has specific pending bits */
+#define PVR2_I2C_PEND_STALE 0x08 /* Broadcast pending bits */
+
+#define PVR2_I2C_PEND_ALL (PVR2_I2C_PEND_DETECT |\
+ PVR2_I2C_PEND_CLIENT |\
+ PVR2_I2C_PEND_REFRESH |\
+ PVR2_I2C_PEND_STALE)
+
+/* Disposition of firmware1 loading situation */
+#define FW1_STATE_UNKNOWN 0
+#define FW1_STATE_MISSING 1
+#define FW1_STATE_FAILED 2
+#define FW1_STATE_RELOAD 3
+#define FW1_STATE_OK 4
+
+/* Known major hardware variants, keyed from device ID */
+#define PVR2_HDW_TYPE_29XXX 0
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+#define PVR2_HDW_TYPE_24XXX 1
+#endif
+
+typedef int (*pvr2_i2c_func)(struct pvr2_hdw *,u8,u8 *,u16,u8 *, u16);
+#define PVR2_I2C_FUNC_CNT 128
+
+/* This structure contains all state data directly needed to
+ manipulate the hardware (as opposed to complying with a kernel
+ interface) */
+struct pvr2_hdw {
+ /* Underlying USB device handle */
+ struct usb_device *usb_dev;
+ struct usb_interface *usb_intf;
+
+ /* Device type, one of PVR2_HDW_TYPE_xxxxx */
+ unsigned int hdw_type;
+
+ /* Video spigot */
+ struct pvr2_stream *vid_stream;
+
+ /* Mutex for all hardware state control */
+ struct mutex big_lock_mutex;
+ int big_lock_held; /* For debugging */
+
+ void (*poll_trigger_func)(void *);
+ void *poll_trigger_data;
+
+ char name[32];
+
+ /* I2C stuff */
+ struct i2c_adapter i2c_adap;
+ struct i2c_algorithm i2c_algo;
+ pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
+ int i2c_cx25840_hack_state;
+ int i2c_linked;
+ unsigned int i2c_pend_types; /* Which types of update are needed */
+ unsigned long i2c_pend_mask; /* Change bits we need to scan */
+ unsigned long i2c_stale_mask; /* Pending broadcast change bits */
+ unsigned long i2c_active_mask; /* All change bits currently in use */
+ struct list_head i2c_clients;
+ struct mutex i2c_list_lock;
+
+ /* Frequency table */
+ unsigned int freqTable[FREQTABLE_SIZE];
+ unsigned int freqProgSlot;
+ unsigned int freqSlot;
+
+ /* Stuff for handling low level control interaction with device */
+ struct mutex ctl_lock_mutex;
+ int ctl_lock_held; /* For debugging */
+ struct urb *ctl_write_urb;
+ struct urb *ctl_read_urb;
+ unsigned char *ctl_write_buffer;
+ unsigned char *ctl_read_buffer;
+ volatile int ctl_write_pend_flag;
+ volatile int ctl_read_pend_flag;
+ volatile int ctl_timeout_flag;
+ struct completion ctl_done;
+ unsigned char cmd_buffer[PVR2_CTL_BUFFSIZE];
+ int cmd_debug_state; // Low level command debugging info
+ unsigned char cmd_debug_code; //
+ unsigned int cmd_debug_write_len; //
+ unsigned int cmd_debug_read_len; //
+
+ int flag_ok; // device in known good state
+ int flag_disconnected; // flag_ok == 0 due to disconnect
+ int flag_init_ok; // true if structure is fully initialized
+ int flag_streaming_enabled; // true if streaming should be on
+ int fw1_state; // current situation with fw1
+
+ int flag_decoder_is_tuned;
+
+ struct pvr2_decoder_ctrl *decoder_ctrl;
+
+ // CPU firmware info (used to help find / save firmware data)
+ char *fw_buffer;
+ unsigned int fw_size;
+
+ // Which subsystem pieces have been enabled / configured
+ unsigned long subsys_enabled_mask;
+
+ // Which subsystems are manipulated to enable streaming
+ unsigned long subsys_stream_mask;
+
+ // True if there is a request to trigger logging of state in each
+ // module.
+ int log_requested;
+
+ /* Tuner / frequency control stuff */
+ unsigned int tuner_type;
+ int tuner_updated;
+ unsigned int freqVal;
+ int freqDirty;
+
+ /* Video standard handling */
+ v4l2_std_id std_mask_eeprom; // Hardware supported selections
+ v4l2_std_id std_mask_avail; // Which standards we may select from
+ v4l2_std_id std_mask_cur; // Currently selected standard(s)
+ unsigned int std_enum_cnt; // # of enumerated standards
+ int std_enum_cur; // selected standard enumeration value
+ int std_dirty; // True if std_mask_cur has changed
+ struct pvr2_ctl_info std_info_enum;
+ struct pvr2_ctl_info std_info_avail;
+ struct pvr2_ctl_info std_info_cur;
+ struct v4l2_standard *std_defs;
+ const char **std_enum_names;
+
+ // Generated string names, one per actual V4L2 standard
+ const char *std_mask_ptrs[32];
+ char std_mask_names[32][10];
+
+ int unit_number; /* ID for driver instance */
+ unsigned long serial_number; /* ID for hardware itself */
+
+ /* Minor number used by v4l logic (yes, this is a hack, as there should
+ be no v4l junk here). Probably a better way to do this. */
+ int v4l_minor_number;
+
+ /* Location of eeprom or a negative number if none */
+ int eeprom_addr;
+
+ enum pvr2_config config;
+
+ /* Information about what audio signal we're hearing */
+ int flag_stereo;
+ int flag_bilingual;
+ struct pvr2_audio_stat *audio_stat;
+
+ /* Control state needed for cx2341x module */
+ struct cx2341x_mpeg_params enc_cur_state;
+ struct cx2341x_mpeg_params enc_ctl_state;
+ /* True if an encoder attribute has changed */
+ int enc_stale;
+ /* True if enc_cur_state is valid */
+ int enc_cur_valid;
+
+ /* Control state */
+#define VCREATE_DATA(lab) int lab##_val; int lab##_dirty
+ VCREATE_DATA(brightness);
+ VCREATE_DATA(contrast);
+ VCREATE_DATA(saturation);
+ VCREATE_DATA(hue);
+ VCREATE_DATA(volume);
+ VCREATE_DATA(balance);
+ VCREATE_DATA(bass);
+ VCREATE_DATA(treble);
+ VCREATE_DATA(mute);
+ VCREATE_DATA(input);
+ VCREATE_DATA(audiomode);
+ VCREATE_DATA(res_hor);
+ VCREATE_DATA(res_ver);
+ VCREATE_DATA(srate);
+#undef VCREATE_DATA
+
+ struct pvr2_ctld_info *mpeg_ctrl_info;
+
+ struct pvr2_ctrl *controls;
+ unsigned int control_cnt;
+};
+
+int pvr2_hdw_commit_ctl_internal(struct pvr2_hdw *hdw);
+
+unsigned int pvr2_hdw_get_signal_status_internal(struct pvr2_hdw *);
+
+void pvr2_hdw_subsys_bit_chg_no_lock(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val);
+void pvr2_hdw_subsys_stream_bit_chg_no_lock(struct pvr2_hdw *hdw,
+ unsigned long msk,
+ unsigned long val);
+
+void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw);
+void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw);
+
+int pvr2_i2c_basic_op(struct pvr2_hdw *,u8 i2c_addr,
+ u8 *wdata,u16 wlen,
+ u8 *rdata,u16 rlen);
+
+#endif /* __PVRUSB2_HDW_INTERNAL_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
new file mode 100644
index 00000000000000..643c471375da12
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -0,0 +1,3120 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/firmware.h>
+#include <linux/videodev2.h>
+#include <asm/semaphore.h>
+#include "pvrusb2.h"
+#include "pvrusb2-std.h"
+#include "pvrusb2-util.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-i2c-core.h"
+#include "pvrusb2-tuner.h"
+#include "pvrusb2-eeprom.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-encoder.h"
+#include "pvrusb2-debug.h"
+
+struct usb_device_id pvr2_device_table[] = {
+ [PVR2_HDW_TYPE_29XXX] = { USB_DEVICE(0x2040, 0x2900) },
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ [PVR2_HDW_TYPE_24XXX] = { USB_DEVICE(0x2040, 0x2400) },
+#endif
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, pvr2_device_table);
+
+static const char *pvr2_device_names[] = {
+ [PVR2_HDW_TYPE_29XXX] = "WinTV PVR USB2 Model Category 29xxxx",
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ [PVR2_HDW_TYPE_24XXX] = "WinTV PVR USB2 Model Category 24xxxx",
+#endif
+};
+
+struct pvr2_string_table {
+ const char **lst;
+ unsigned int cnt;
+};
+
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+// Names of other client modules to request for 24xxx model hardware
+static const char *pvr2_client_24xxx[] = {
+ "cx25840",
+ "tuner",
+ "tda9887",
+ "wm8775",
+};
+#endif
+
+// Names of other client modules to request for 29xxx model hardware
+static const char *pvr2_client_29xxx[] = {
+ "msp3400",
+ "saa7115",
+ "tuner",
+ "tda9887",
+};
+
+static struct pvr2_string_table pvr2_client_lists[] = {
+ [PVR2_HDW_TYPE_29XXX] = {
+ pvr2_client_29xxx,
+ sizeof(pvr2_client_29xxx)/sizeof(pvr2_client_29xxx[0]),
+ },
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ [PVR2_HDW_TYPE_24XXX] = {
+ pvr2_client_24xxx,
+ sizeof(pvr2_client_24xxx)/sizeof(pvr2_client_24xxx[0]),
+ },
+#endif
+};
+
+static struct pvr2_hdw *unit_pointers[PVR_NUM] = {[ 0 ... PVR_NUM-1 ] = 0};
+DECLARE_MUTEX(pvr2_unit_sem);
+
+static int ctlchg = 0;
+static int initusbreset = 1;
+static int procreload = 0;
+static int tuner[PVR_NUM] = { [0 ... PVR_NUM-1] = -1 };
+static int tolerance[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
+static int video_std[PVR_NUM] = { [0 ... PVR_NUM-1] = 0 };
+static int init_pause_msec = 0;
+
+module_param(ctlchg, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ctlchg, "0=optimize ctl change 1=always accept new ctl value");
+module_param(init_pause_msec, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(init_pause_msec, "hardware initialization settling delay");
+module_param(initusbreset, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(initusbreset, "Do USB reset device on probe");
+module_param(procreload, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(procreload,
+ "Attempt init failure recovery with firmware reload");
+module_param_array(tuner, int, NULL, 0444);
+MODULE_PARM_DESC(tuner,"specify installed tuner type");
+module_param_array(video_std, int, NULL, 0444);
+MODULE_PARM_DESC(video_std,"specify initial video standard");
+module_param_array(tolerance, int, NULL, 0444);
+MODULE_PARM_DESC(tolerance,"specify stream error tolerance");
+
+#define PVR2_CTL_WRITE_ENDPOINT 0x01
+#define PVR2_CTL_READ_ENDPOINT 0x81
+
+#define PVR2_GPIO_IN 0x9008
+#define PVR2_GPIO_OUT 0x900c
+#define PVR2_GPIO_DIR 0x9020
+
+#define trace_firmware(...) pvr2_trace(PVR2_TRACE_FIRMWARE,__VA_ARGS__)
+
+#define PVR2_FIRMWARE_ENDPOINT 0x02
+
+/* size of a firmware chunk */
+#define FIRMWARE_CHUNK_SIZE 0x2000
+
+/* Define the list of additional controls we'll dynamically construct based
+ on query of the cx2341x module. */
+struct pvr2_mpeg_ids {
+ const char *strid;
+ int id;
+};
+static const struct pvr2_mpeg_ids mpeg_ids[] = {
+ {
+ .strid = "audio_layer",
+ .id = V4L2_CID_MPEG_AUDIO_ENCODING,
+ },{
+ .strid = "audio_bitrate",
+ .id = V4L2_CID_MPEG_AUDIO_L2_BITRATE,
+ },{
+ /* Already using audio_mode elsewhere :-( */
+ .strid = "mpeg_audio_mode",
+ .id = V4L2_CID_MPEG_AUDIO_MODE,
+ },{
+ .strid = "mpeg_audio_mode_extension",
+ .id = V4L2_CID_MPEG_AUDIO_MODE_EXTENSION,
+ },{
+ .strid = "audio_emphasis",
+ .id = V4L2_CID_MPEG_AUDIO_EMPHASIS,
+ },{
+ .strid = "audio_crc",
+ .id = V4L2_CID_MPEG_AUDIO_CRC,
+ },{
+ .strid = "video_aspect",
+ .id = V4L2_CID_MPEG_VIDEO_ASPECT,
+ },{
+ .strid = "video_b_frames",
+ .id = V4L2_CID_MPEG_VIDEO_B_FRAMES,
+ },{
+ .strid = "video_gop_size",
+ .id = V4L2_CID_MPEG_VIDEO_GOP_SIZE,
+ },{
+ .strid = "video_gop_closure",
+ .id = V4L2_CID_MPEG_VIDEO_GOP_CLOSURE,
+ },{
+ .strid = "video_pulldown",
+ .id = V4L2_CID_MPEG_VIDEO_PULLDOWN,
+ },{
+ .strid = "video_bitrate_mode",
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE_MODE,
+ },{
+ .strid = "video_bitrate",
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE,
+ },{
+ .strid = "video_bitrate_peak",
+ .id = V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
+ },{
+ .strid = "video_temporal_decimation",
+ .id = V4L2_CID_MPEG_VIDEO_TEMPORAL_DECIMATION,
+ },{
+ .strid = "stream_type",
+ .id = V4L2_CID_MPEG_STREAM_TYPE,
+ },{
+ .strid = "video_spatial_filter_mode",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE,
+ },{
+ .strid = "video_spatial_filter",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER,
+ },{
+ .strid = "video_luma_spatial_filter_type",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_SPATIAL_FILTER_TYPE,
+ },{
+ .strid = "video_chroma_spatial_filter_type",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_SPATIAL_FILTER_TYPE,
+ },{
+ .strid = "video_temporal_filter_mode",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER_MODE,
+ },{
+ .strid = "video_temporal_filter",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_TEMPORAL_FILTER,
+ },{
+ .strid = "video_median_filter_type",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_MEDIAN_FILTER_TYPE,
+ },{
+ .strid = "video_luma_median_filter_top",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_TOP,
+ },{
+ .strid = "video_luma_median_filter_bottom",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_LUMA_MEDIAN_FILTER_BOTTOM,
+ },{
+ .strid = "video_chroma_median_filter_top",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_TOP,
+ },{
+ .strid = "video_chroma_median_filter_bottom",
+ .id = V4L2_CID_MPEG_CX2341X_VIDEO_CHROMA_MEDIAN_FILTER_BOTTOM,
+ }
+};
+#define MPEGDEF_COUNT (sizeof(mpeg_ids)/sizeof(mpeg_ids[0]))
+
+static const char *control_values_srate[] = {
+ [PVR2_CVAL_SRATE_48] = "48KHz",
+ [PVR2_CVAL_SRATE_44_1] = "44.1KHz",
+};
+
+
+
+
+static const char *control_values_input[] = {
+ [PVR2_CVAL_INPUT_TV] = "television", /*xawtv needs this name*/
+ [PVR2_CVAL_INPUT_RADIO] = "radio",
+ [PVR2_CVAL_INPUT_SVIDEO] = "s-video",
+ [PVR2_CVAL_INPUT_COMPOSITE] = "composite",
+};
+
+
+static const char *control_values_audiomode[] = {
+ [V4L2_TUNER_MODE_MONO] = "Mono",
+ [V4L2_TUNER_MODE_STEREO] = "Stereo",
+ [V4L2_TUNER_MODE_LANG1] = "Lang1",
+ [V4L2_TUNER_MODE_LANG2] = "Lang2",
+ [V4L2_TUNER_MODE_LANG1_LANG2] = "Lang1+Lang2",
+};
+
+
+static const char *control_values_hsm[] = {
+ [PVR2_CVAL_HSM_FAIL] = "Fail",
+ [PVR2_CVAL_HSM_HIGH] = "High",
+ [PVR2_CVAL_HSM_FULL] = "Full",
+};
+
+
+static const char *control_values_subsystem[] = {
+ [PVR2_SUBSYS_B_ENC_FIRMWARE] = "enc_firmware",
+ [PVR2_SUBSYS_B_ENC_CFG] = "enc_config",
+ [PVR2_SUBSYS_B_DIGITIZER_RUN] = "digitizer_run",
+ [PVR2_SUBSYS_B_USBSTREAM_RUN] = "usbstream_run",
+ [PVR2_SUBSYS_B_ENC_RUN] = "enc_run",
+};
+
+
+static int ctrl_channelfreq_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ if ((hdw->freqProgSlot > 0) && (hdw->freqProgSlot <= FREQTABLE_SIZE)) {
+ *vp = hdw->freqTable[hdw->freqProgSlot-1];
+ } else {
+ *vp = 0;
+ }
+ return 0;
+}
+
+static int ctrl_channelfreq_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ if ((hdw->freqProgSlot > 0) && (hdw->freqProgSlot <= FREQTABLE_SIZE)) {
+ hdw->freqTable[hdw->freqProgSlot-1] = v;
+ }
+ return 0;
+}
+
+static int ctrl_channelprog_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->freqProgSlot;
+ return 0;
+}
+
+static int ctrl_channelprog_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ if ((v >= 0) && (v <= FREQTABLE_SIZE)) {
+ hdw->freqProgSlot = v;
+ }
+ return 0;
+}
+
+static int ctrl_channel_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->freqSlot;
+ return 0;
+}
+
+static int ctrl_channel_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ unsigned freq = 0;
+ struct pvr2_hdw *hdw = cptr->hdw;
+ hdw->freqSlot = v;
+ if ((hdw->freqSlot > 0) && (hdw->freqSlot <= FREQTABLE_SIZE)) {
+ freq = hdw->freqTable[hdw->freqSlot-1];
+ }
+ if (freq && (freq != hdw->freqVal)) {
+ hdw->freqVal = freq;
+ hdw->freqDirty = !0;
+ }
+ return 0;
+}
+
+static int ctrl_freq_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->freqVal;
+ return 0;
+}
+
+static int ctrl_freq_is_dirty(struct pvr2_ctrl *cptr)
+{
+ return cptr->hdw->freqDirty != 0;
+}
+
+static void ctrl_freq_clear_dirty(struct pvr2_ctrl *cptr)
+{
+ cptr->hdw->freqDirty = 0;
+}
+
+static int ctrl_freq_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ hdw->freqVal = v;
+ hdw->freqDirty = !0;
+ hdw->freqSlot = 0;
+ return 0;
+}
+
+static int ctrl_cx2341x_is_dirty(struct pvr2_ctrl *cptr)
+{
+ return cptr->hdw->enc_stale != 0;
+}
+
+static void ctrl_cx2341x_clear_dirty(struct pvr2_ctrl *cptr)
+{
+ cptr->hdw->enc_stale = 0;
+}
+
+static int ctrl_cx2341x_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ int ret;
+ struct v4l2_ext_controls cs;
+ struct v4l2_ext_control c1;
+ memset(&cs,0,sizeof(cs));
+ memset(&c1,0,sizeof(c1));
+ cs.controls = &c1;
+ cs.count = 1;
+ c1.id = cptr->info->v4l_id;
+ ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state,&cs,
+ VIDIOC_G_EXT_CTRLS);
+ if (ret) return ret;
+ *vp = c1.value;
+ return 0;
+}
+
+static int ctrl_cx2341x_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ int ret;
+ struct v4l2_ext_controls cs;
+ struct v4l2_ext_control c1;
+ memset(&cs,0,sizeof(cs));
+ memset(&c1,0,sizeof(c1));
+ cs.controls = &c1;
+ cs.count = 1;
+ c1.id = cptr->info->v4l_id;
+ c1.value = v;
+ ret = cx2341x_ext_ctrls(&cptr->hdw->enc_ctl_state,&cs,
+ VIDIOC_S_EXT_CTRLS);
+ if (ret) return ret;
+ cptr->hdw->enc_stale = !0;
+ return 0;
+}
+
+static unsigned int ctrl_cx2341x_getv4lflags(struct pvr2_ctrl *cptr)
+{
+ struct v4l2_queryctrl qctrl;
+ struct pvr2_ctl_info *info;
+ qctrl.id = cptr->info->v4l_id;
+ cx2341x_ctrl_query(&cptr->hdw->enc_ctl_state,&qctrl);
+ /* Strip out the const so we can adjust a function pointer. It's
+ OK to do this here because we know this is a dynamically created
+ control, so the underlying storage for the info pointer is (a)
+ private to us, and (b) not in read-only storage. Either we do
+ this or we significantly complicate the underlying control
+ implementation. */
+ info = (struct pvr2_ctl_info *)(cptr->info);
+ if (qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY) {
+ if (info->set_value) {
+ info->set_value = 0;
+ }
+ } else {
+ if (!(info->set_value)) {
+ info->set_value = ctrl_cx2341x_set;
+ }
+ }
+ return qctrl.flags;
+}
+
+static int ctrl_streamingenabled_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->flag_streaming_enabled;
+ return 0;
+}
+
+static int ctrl_hsm_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ int result = pvr2_hdw_is_hsm(cptr->hdw);
+ *vp = PVR2_CVAL_HSM_FULL;
+ if (result < 0) *vp = PVR2_CVAL_HSM_FAIL;
+ if (result) *vp = PVR2_CVAL_HSM_HIGH;
+ return 0;
+}
+
+static int ctrl_stdavail_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->std_mask_avail;
+ return 0;
+}
+
+static int ctrl_stdavail_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ v4l2_std_id ns;
+ ns = hdw->std_mask_avail;
+ ns = (ns & ~m) | (v & m);
+ if (ns == hdw->std_mask_avail) return 0;
+ hdw->std_mask_avail = ns;
+ pvr2_hdw_internal_set_std_avail(hdw);
+ pvr2_hdw_internal_find_stdenum(hdw);
+ return 0;
+}
+
+static int ctrl_std_val_to_sym(struct pvr2_ctrl *cptr,int msk,int val,
+ char *bufPtr,unsigned int bufSize,
+ unsigned int *len)
+{
+ *len = pvr2_std_id_to_str(bufPtr,bufSize,msk & val);
+ return 0;
+}
+
+static int ctrl_std_sym_to_val(struct pvr2_ctrl *cptr,
+ const char *bufPtr,unsigned int bufSize,
+ int *mskp,int *valp)
+{
+ int ret;
+ v4l2_std_id id;
+ ret = pvr2_std_str_to_id(&id,bufPtr,bufSize);
+ if (ret < 0) return ret;
+ if (mskp) *mskp = id;
+ if (valp) *valp = id;
+ return 0;
+}
+
+static int ctrl_stdcur_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->std_mask_cur;
+ return 0;
+}
+
+static int ctrl_stdcur_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ v4l2_std_id ns;
+ ns = hdw->std_mask_cur;
+ ns = (ns & ~m) | (v & m);
+ if (ns == hdw->std_mask_cur) return 0;
+ hdw->std_mask_cur = ns;
+ hdw->std_dirty = !0;
+ pvr2_hdw_internal_find_stdenum(hdw);
+ return 0;
+}
+
+static int ctrl_stdcur_is_dirty(struct pvr2_ctrl *cptr)
+{
+ return cptr->hdw->std_dirty != 0;
+}
+
+static void ctrl_stdcur_clear_dirty(struct pvr2_ctrl *cptr)
+{
+ cptr->hdw->std_dirty = 0;
+}
+
+static int ctrl_signal_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = ((pvr2_hdw_get_signal_status_internal(cptr->hdw) &
+ PVR2_SIGNAL_OK) ? 1 : 0);
+ return 0;
+}
+
+static int ctrl_subsys_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->subsys_enabled_mask;
+ return 0;
+}
+
+static int ctrl_subsys_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ pvr2_hdw_subsys_bit_chg_no_lock(cptr->hdw,m,v);
+ return 0;
+}
+
+static int ctrl_subsys_stream_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->subsys_stream_mask;
+ return 0;
+}
+
+static int ctrl_subsys_stream_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ pvr2_hdw_subsys_stream_bit_chg_no_lock(cptr->hdw,m,v);
+ return 0;
+}
+
+static int ctrl_stdenumcur_set(struct pvr2_ctrl *cptr,int m,int v)
+{
+ struct pvr2_hdw *hdw = cptr->hdw;
+ if (v < 0) return -EINVAL;
+ if (v > hdw->std_enum_cnt) return -EINVAL;
+ hdw->std_enum_cur = v;
+ if (!v) return 0;
+ v--;
+ if (hdw->std_mask_cur == hdw->std_defs[v].id) return 0;
+ hdw->std_mask_cur = hdw->std_defs[v].id;
+ hdw->std_dirty = !0;
+ return 0;
+}
+
+
+static int ctrl_stdenumcur_get(struct pvr2_ctrl *cptr,int *vp)
+{
+ *vp = cptr->hdw->std_enum_cur;
+ return 0;
+}
+
+
+static int ctrl_stdenumcur_is_dirty(struct pvr2_ctrl *cptr)
+{
+ return cptr->hdw->std_dirty != 0;
+}
+
+
+static void ctrl_stdenumcur_clear_dirty(struct pvr2_ctrl *cptr)
+{
+ cptr->hdw->std_dirty = 0;
+}
+
+
+#define DEFINT(vmin,vmax) \
+ .type = pvr2_ctl_int, \
+ .def.type_int.min_value = vmin, \
+ .def.type_int.max_value = vmax
+
+#define DEFENUM(tab) \
+ .type = pvr2_ctl_enum, \
+ .def.type_enum.count = (sizeof(tab)/sizeof((tab)[0])), \
+ .def.type_enum.value_names = tab
+
+#define DEFBOOL \
+ .type = pvr2_ctl_bool
+
+#define DEFMASK(msk,tab) \
+ .type = pvr2_ctl_bitmask, \
+ .def.type_bitmask.valid_bits = msk, \
+ .def.type_bitmask.bit_names = tab
+
+#define DEFREF(vname) \
+ .set_value = ctrl_set_##vname, \
+ .get_value = ctrl_get_##vname, \
+ .is_dirty = ctrl_isdirty_##vname, \
+ .clear_dirty = ctrl_cleardirty_##vname
+
+
+#define VCREATE_FUNCS(vname) \
+static int ctrl_get_##vname(struct pvr2_ctrl *cptr,int *vp) \
+{*vp = cptr->hdw->vname##_val; return 0;} \
+static int ctrl_set_##vname(struct pvr2_ctrl *cptr,int m,int v) \
+{cptr->hdw->vname##_val = v; cptr->hdw->vname##_dirty = !0; return 0;} \
+static int ctrl_isdirty_##vname(struct pvr2_ctrl *cptr) \
+{return cptr->hdw->vname##_dirty != 0;} \
+static void ctrl_cleardirty_##vname(struct pvr2_ctrl *cptr) \
+{cptr->hdw->vname##_dirty = 0;}
+
+VCREATE_FUNCS(brightness)
+VCREATE_FUNCS(contrast)
+VCREATE_FUNCS(saturation)
+VCREATE_FUNCS(hue)
+VCREATE_FUNCS(volume)
+VCREATE_FUNCS(balance)
+VCREATE_FUNCS(bass)
+VCREATE_FUNCS(treble)
+VCREATE_FUNCS(mute)
+VCREATE_FUNCS(input)
+VCREATE_FUNCS(audiomode)
+VCREATE_FUNCS(res_hor)
+VCREATE_FUNCS(res_ver)
+VCREATE_FUNCS(srate)
+
+#define MIN_FREQ 55250000L
+#define MAX_FREQ 850000000L
+
+/* Table definition of all controls which can be manipulated */
+static const struct pvr2_ctl_info control_defs[] = {
+ {
+ .v4l_id = V4L2_CID_BRIGHTNESS,
+ .desc = "Brightness",
+ .name = "brightness",
+ .default_value = 128,
+ DEFREF(brightness),
+ DEFINT(0,255),
+ },{
+ .v4l_id = V4L2_CID_CONTRAST,
+ .desc = "Contrast",
+ .name = "contrast",
+ .default_value = 68,
+ DEFREF(contrast),
+ DEFINT(0,127),
+ },{
+ .v4l_id = V4L2_CID_SATURATION,
+ .desc = "Saturation",
+ .name = "saturation",
+ .default_value = 64,
+ DEFREF(saturation),
+ DEFINT(0,127),
+ },{
+ .v4l_id = V4L2_CID_HUE,
+ .desc = "Hue",
+ .name = "hue",
+ .default_value = 0,
+ DEFREF(hue),
+ DEFINT(-128,127),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_VOLUME,
+ .desc = "Volume",
+ .name = "volume",
+ .default_value = 65535,
+ DEFREF(volume),
+ DEFINT(0,65535),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_BALANCE,
+ .desc = "Balance",
+ .name = "balance",
+ .default_value = 0,
+ DEFREF(balance),
+ DEFINT(-32768,32767),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_BASS,
+ .desc = "Bass",
+ .name = "bass",
+ .default_value = 0,
+ DEFREF(bass),
+ DEFINT(-32768,32767),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_TREBLE,
+ .desc = "Treble",
+ .name = "treble",
+ .default_value = 0,
+ DEFREF(treble),
+ DEFINT(-32768,32767),
+ },{
+ .v4l_id = V4L2_CID_AUDIO_MUTE,
+ .desc = "Mute",
+ .name = "mute",
+ .default_value = 0,
+ DEFREF(mute),
+ DEFBOOL,
+ },{
+ .desc = "Video Source",
+ .name = "input",
+ .internal_id = PVR2_CID_INPUT,
+ .default_value = PVR2_CVAL_INPUT_TV,
+ DEFREF(input),
+ DEFENUM(control_values_input),
+ },{
+ .desc = "Audio Mode",
+ .name = "audio_mode",
+ .internal_id = PVR2_CID_AUDIOMODE,
+ .default_value = V4L2_TUNER_MODE_STEREO,
+ DEFREF(audiomode),
+ DEFENUM(control_values_audiomode),
+ },{
+ .desc = "Horizontal capture resolution",
+ .name = "resolution_hor",
+ .internal_id = PVR2_CID_HRES,
+ .default_value = 720,
+ DEFREF(res_hor),
+ DEFINT(320,720),
+ },{
+ .desc = "Vertical capture resolution",
+ .name = "resolution_ver",
+ .internal_id = PVR2_CID_VRES,
+ .default_value = 480,
+ DEFREF(res_ver),
+ DEFINT(200,625),
+ },{
+ .v4l_id = V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ,
+ .desc = "Sample rate",
+ .name = "srate",
+ .default_value = PVR2_CVAL_SRATE_48,
+ DEFREF(srate),
+ DEFENUM(control_values_srate),
+ },{
+ .desc = "Tuner Frequency (Hz)",
+ .name = "frequency",
+ .internal_id = PVR2_CID_FREQUENCY,
+ .default_value = 175250000L,
+ .set_value = ctrl_freq_set,
+ .get_value = ctrl_freq_get,
+ .is_dirty = ctrl_freq_is_dirty,
+ .clear_dirty = ctrl_freq_clear_dirty,
+ DEFINT(MIN_FREQ,MAX_FREQ),
+ },{
+ .desc = "Channel",
+ .name = "channel",
+ .set_value = ctrl_channel_set,
+ .get_value = ctrl_channel_get,
+ DEFINT(0,FREQTABLE_SIZE),
+ },{
+ .desc = "Channel Program Frequency",
+ .name = "freq_table_value",
+ .set_value = ctrl_channelfreq_set,
+ .get_value = ctrl_channelfreq_get,
+ DEFINT(MIN_FREQ,MAX_FREQ),
+ },{
+ .desc = "Channel Program ID",
+ .name = "freq_table_channel",
+ .set_value = ctrl_channelprog_set,
+ .get_value = ctrl_channelprog_get,
+ DEFINT(0,FREQTABLE_SIZE),
+ },{
+ .desc = "Streaming Enabled",
+ .name = "streaming_enabled",
+ .get_value = ctrl_streamingenabled_get,
+ DEFBOOL,
+ },{
+ .desc = "USB Speed",
+ .name = "usb_speed",
+ .get_value = ctrl_hsm_get,
+ DEFENUM(control_values_hsm),
+ },{
+ .desc = "Signal Present",
+ .name = "signal_present",
+ .get_value = ctrl_signal_get,
+ DEFBOOL,
+ },{
+ .desc = "Video Standards Available Mask",
+ .name = "video_standard_mask_available",
+ .internal_id = PVR2_CID_STDAVAIL,
+ .skip_init = !0,
+ .get_value = ctrl_stdavail_get,
+ .set_value = ctrl_stdavail_set,
+ .val_to_sym = ctrl_std_val_to_sym,
+ .sym_to_val = ctrl_std_sym_to_val,
+ .type = pvr2_ctl_bitmask,
+ },{
+ .desc = "Video Standards In Use Mask",
+ .name = "video_standard_mask_active",
+ .internal_id = PVR2_CID_STDCUR,
+ .skip_init = !0,
+ .get_value = ctrl_stdcur_get,
+ .set_value = ctrl_stdcur_set,
+ .is_dirty = ctrl_stdcur_is_dirty,
+ .clear_dirty = ctrl_stdcur_clear_dirty,
+ .val_to_sym = ctrl_std_val_to_sym,
+ .sym_to_val = ctrl_std_sym_to_val,
+ .type = pvr2_ctl_bitmask,
+ },{
+ .desc = "Subsystem enabled mask",
+ .name = "debug_subsys_mask",
+ .skip_init = !0,
+ .get_value = ctrl_subsys_get,
+ .set_value = ctrl_subsys_set,
+ DEFMASK(PVR2_SUBSYS_ALL,control_values_subsystem),
+ },{
+ .desc = "Subsystem stream mask",
+ .name = "debug_subsys_stream_mask",
+ .skip_init = !0,
+ .get_value = ctrl_subsys_stream_get,
+ .set_value = ctrl_subsys_stream_set,
+ DEFMASK(PVR2_SUBSYS_ALL,control_values_subsystem),
+ },{
+ .desc = "Video Standard Name",
+ .name = "video_standard",
+ .internal_id = PVR2_CID_STDENUM,
+ .skip_init = !0,
+ .get_value = ctrl_stdenumcur_get,
+ .set_value = ctrl_stdenumcur_set,
+ .is_dirty = ctrl_stdenumcur_is_dirty,
+ .clear_dirty = ctrl_stdenumcur_clear_dirty,
+ .type = pvr2_ctl_enum,
+ }
+};
+
+#define CTRLDEF_COUNT (sizeof(control_defs)/sizeof(control_defs[0]))
+
+
+const char *pvr2_config_get_name(enum pvr2_config cfg)
+{
+ switch (cfg) {
+ case pvr2_config_empty: return "empty";
+ case pvr2_config_mpeg: return "mpeg";
+ case pvr2_config_vbi: return "vbi";
+ case pvr2_config_radio: return "radio";
+ }
+ return "<unknown>";
+}
+
+
+struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *hdw)
+{
+ return hdw->usb_dev;
+}
+
+
+unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *hdw)
+{
+ return hdw->serial_number;
+}
+
+
+struct pvr2_hdw *pvr2_hdw_find(int unit_number)
+{
+ if (unit_number < 0) return 0;
+ if (unit_number >= PVR_NUM) return 0;
+ return unit_pointers[unit_number];
+}
+
+
+int pvr2_hdw_get_unit_number(struct pvr2_hdw *hdw)
+{
+ return hdw->unit_number;
+}
+
+
+/* Attempt to locate one of the given set of files. Messages are logged
+ appropriate to what has been found. The return value will be 0 or
+ greater on success (it will be the index of the file name found) and
+ fw_entry will be filled in. Otherwise a negative error is returned on
+ failure. If the return value is -ENOENT then no viable firmware file
+ could be located. */
+static int pvr2_locate_firmware(struct pvr2_hdw *hdw,
+ const struct firmware **fw_entry,
+ const char *fwtypename,
+ unsigned int fwcount,
+ const char *fwnames[])
+{
+ unsigned int idx;
+ int ret = -EINVAL;
+ for (idx = 0; idx < fwcount; idx++) {
+ ret = request_firmware(fw_entry,
+ fwnames[idx],
+ &hdw->usb_dev->dev);
+ if (!ret) {
+ trace_firmware("Located %s firmware: %s;"
+ " uploading...",
+ fwtypename,
+ fwnames[idx]);
+ return idx;
+ }
+ if (ret == -ENOENT) continue;
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "request_firmware fatal error with code=%d",ret);
+ return ret;
+ }
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "***WARNING***"
+ " Device %s firmware"
+ " seems to be missing.",
+ fwtypename);
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Did you install the pvrusb2 firmware files"
+ " in their proper location?");
+ if (fwcount == 1) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "request_firmware unable to locate %s file %s",
+ fwtypename,fwnames[0]);
+ } else {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "request_firmware unable to locate"
+ " one of the following %s files:",
+ fwtypename);
+ for (idx = 0; idx < fwcount; idx++) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "request_firmware: Failed to find %s",
+ fwnames[idx]);
+ }
+ }
+ return ret;
+}
+
+
+/*
+ * pvr2_upload_firmware1().
+ *
+ * Send the 8051 firmware to the device. After the upload, arrange for
+ * device to re-enumerate.
+ *
+ * NOTE : the pointer to the firmware data given by request_firmware()
+ * is not suitable for an usb transaction.
+ *
+ */
+int pvr2_upload_firmware1(struct pvr2_hdw *hdw)
+{
+ const struct firmware *fw_entry = 0;
+ void *fw_ptr;
+ unsigned int pipe;
+ int ret;
+ u16 address;
+ static const char *fw_files_29xxx[] = {
+ "v4l-pvrusb2-29xxx-01.fw",
+ };
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ static const char *fw_files_24xxx[] = {
+ "v4l-pvrusb2-24xxx-01.fw",
+ };
+#endif
+ static const struct pvr2_string_table fw_file_defs[] = {
+ [PVR2_HDW_TYPE_29XXX] = {
+ fw_files_29xxx,
+ sizeof(fw_files_29xxx)/sizeof(fw_files_29xxx[0]),
+ },
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ [PVR2_HDW_TYPE_24XXX] = {
+ fw_files_24xxx,
+ sizeof(fw_files_24xxx)/sizeof(fw_files_24xxx[0]),
+ },
+#endif
+ };
+ hdw->fw1_state = FW1_STATE_FAILED; // default result
+
+ trace_firmware("pvr2_upload_firmware1");
+
+ ret = pvr2_locate_firmware(hdw,&fw_entry,"fx2 controller",
+ fw_file_defs[hdw->hdw_type].cnt,
+ fw_file_defs[hdw->hdw_type].lst);
+ if (ret < 0) {
+ if (ret == -ENOENT) hdw->fw1_state = FW1_STATE_MISSING;
+ return ret;
+ }
+
+ usb_settoggle(hdw->usb_dev, 0 & 0xf, !(0 & USB_DIR_IN), 0);
+ usb_clear_halt(hdw->usb_dev, usb_sndbulkpipe(hdw->usb_dev, 0 & 0x7f));
+
+ pipe = usb_sndctrlpipe(hdw->usb_dev, 0);
+
+ if (fw_entry->size != 0x2000){
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,"wrong fx2 firmware size");
+ release_firmware(fw_entry);
+ return -ENOMEM;
+ }
+
+ fw_ptr = kmalloc(0x800, GFP_KERNEL);
+ if (fw_ptr == NULL){
+ release_firmware(fw_entry);
+ return -ENOMEM;
+ }
+
+ /* We have to hold the CPU during firmware upload. */
+ pvr2_hdw_cpureset_assert(hdw,1);
+
+ /* upload the firmware to address 0000-1fff in 2048 (=0x800) bytes
+ chunk. */
+
+ ret = 0;
+ for(address = 0; address < fw_entry->size; address += 0x800) {
+ memcpy(fw_ptr, fw_entry->data + address, 0x800);
+ ret += usb_control_msg(hdw->usb_dev, pipe, 0xa0, 0x40, address,
+ 0, fw_ptr, 0x800, HZ);
+ }
+
+ trace_firmware("Upload done, releasing device's CPU");
+
+ /* Now release the CPU. It will disconnect and reconnect later. */
+ pvr2_hdw_cpureset_assert(hdw,0);
+
+ kfree(fw_ptr);
+ release_firmware(fw_entry);
+
+ trace_firmware("Upload done (%d bytes sent)",ret);
+
+ /* We should have written 8192 bytes */
+ if (ret == 8192) {
+ hdw->fw1_state = FW1_STATE_RELOAD;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+
+/*
+ * pvr2_upload_firmware2()
+ *
+ * This uploads encoder firmware on endpoint 2.
+ *
+ */
+
+int pvr2_upload_firmware2(struct pvr2_hdw *hdw)
+{
+ const struct firmware *fw_entry = 0;
+ void *fw_ptr;
+ unsigned int pipe, fw_len, fw_done;
+ int actual_length;
+ int ret = 0;
+ int fwidx;
+ static const char *fw_files[] = {
+ CX2341X_FIRM_ENC_FILENAME,
+ };
+
+ trace_firmware("pvr2_upload_firmware2");
+
+ ret = pvr2_locate_firmware(hdw,&fw_entry,"encoder",
+ sizeof(fw_files)/sizeof(fw_files[0]),
+ fw_files);
+ if (ret < 0) return ret;
+ fwidx = ret;
+ ret = 0;
+ /* Since we're about to completely reinitialize the encoder,
+ invalidate our cached copy of its configuration state. Next
+ time we configure the encoder, then we'll fully configure it. */
+ hdw->enc_cur_valid = 0;
+
+ /* First prepare firmware loading */
+ ret |= pvr2_write_register(hdw, 0x0048, 0xffffffff); /*interrupt mask*/
+ ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000088); /*gpio dir*/
+ ret |= pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000008); /*gpio output state*/
+ ret |= pvr2_hdw_cmd_deep_reset(hdw);
+ ret |= pvr2_write_register(hdw, 0xa064, 0x00000000); /*APU command*/
+ ret |= pvr2_hdw_gpio_chg_dir(hdw,0xffffffff,0x00000408); /*gpio dir*/
+ ret |= pvr2_hdw_gpio_chg_out(hdw,0xffffffff,0x00000008); /*gpio output state*/
+ ret |= pvr2_write_register(hdw, 0x9058, 0xffffffed); /*VPU ctrl*/
+ ret |= pvr2_write_register(hdw, 0x9054, 0xfffffffd); /*reset hw blocks*/
+ ret |= pvr2_write_register(hdw, 0x07f8, 0x80000800); /*encoder SDRAM refresh*/
+ ret |= pvr2_write_register(hdw, 0x07fc, 0x0000001a); /*encoder SDRAM pre-charge*/
+ ret |= pvr2_write_register(hdw, 0x0700, 0x00000000); /*I2C clock*/
+ ret |= pvr2_write_register(hdw, 0xaa00, 0x00000000); /*unknown*/
+ ret |= pvr2_write_register(hdw, 0xaa04, 0x00057810); /*unknown*/
+ ret |= pvr2_write_register(hdw, 0xaa10, 0x00148500); /*unknown*/
+ ret |= pvr2_write_register(hdw, 0xaa18, 0x00840000); /*unknown*/
+ ret |= pvr2_write_u8(hdw, 0x52, 0);
+ ret |= pvr2_write_u16(hdw, 0x0600, 0);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "firmware2 upload prep failed, ret=%d",ret);
+ release_firmware(fw_entry);
+ return ret;
+ }
+
+ /* Now send firmware */
+
+ fw_len = fw_entry->size;
+
+ if (fw_len % FIRMWARE_CHUNK_SIZE) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "size of %s firmware"
+ " must be a multiple of 8192B",
+ fw_files[fwidx]);
+ release_firmware(fw_entry);
+ return -1;
+ }
+
+ fw_ptr = kmalloc(FIRMWARE_CHUNK_SIZE, GFP_KERNEL);
+ if (fw_ptr == NULL){
+ release_firmware(fw_entry);
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "failed to allocate memory for firmware2 upload");
+ return -ENOMEM;
+ }
+
+ pipe = usb_sndbulkpipe(hdw->usb_dev, PVR2_FIRMWARE_ENDPOINT);
+
+ for (fw_done = 0 ; (fw_done < fw_len) && !ret ;
+ fw_done += FIRMWARE_CHUNK_SIZE ) {
+ int i;
+ memcpy(fw_ptr, fw_entry->data + fw_done, FIRMWARE_CHUNK_SIZE);
+ /* Usbsnoop log shows that we must swap bytes... */
+ for (i = 0; i < FIRMWARE_CHUNK_SIZE/4 ; i++)
+ ((u32 *)fw_ptr)[i] = ___swab32(((u32 *)fw_ptr)[i]);
+
+ ret |= usb_bulk_msg(hdw->usb_dev, pipe, fw_ptr,
+ FIRMWARE_CHUNK_SIZE,
+ &actual_length, HZ);
+ ret |= (actual_length != FIRMWARE_CHUNK_SIZE);
+ }
+
+ trace_firmware("upload of %s : %i / %i ",
+ fw_files[fwidx],fw_done,fw_len);
+
+ kfree(fw_ptr);
+ release_firmware(fw_entry);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "firmware2 upload transfer failure");
+ return ret;
+ }
+
+ /* Finish upload */
+
+ ret |= pvr2_write_register(hdw, 0x9054, 0xffffffff); /*reset hw blocks*/
+ ret |= pvr2_write_register(hdw, 0x9058, 0xffffffe8); /*VPU ctrl*/
+ ret |= pvr2_write_u16(hdw, 0x0600, 0);
+
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "firmware2 upload post-proc failure");
+ } else {
+ hdw->subsys_enabled_mask |= (1<<PVR2_SUBSYS_B_ENC_FIRMWARE);
+ }
+ return ret;
+}
+
+
+#define FIRMWARE_RECOVERY_BITS \
+ ((1<<PVR2_SUBSYS_B_ENC_CFG) | \
+ (1<<PVR2_SUBSYS_B_ENC_RUN) | \
+ (1<<PVR2_SUBSYS_B_ENC_FIRMWARE) | \
+ (1<<PVR2_SUBSYS_B_USBSTREAM_RUN))
+
+/*
+
+ This single function is key to pretty much everything. The pvrusb2
+ device can logically be viewed as a series of subsystems which can be
+ stopped / started or unconfigured / configured. To get things streaming,
+ one must configure everything and start everything, but there may be
+ various reasons over time to deconfigure something or stop something.
+ This function handles all of this activity. Everything EVERYWHERE that
+ must affect a subsystem eventually comes here to do the work.
+
+ The current state of all subsystems is represented by a single bit mask,
+ known as subsys_enabled_mask. The bit positions are defined by the
+ PVR2_SUBSYS_xxxx macros, with one subsystem per bit position. At any
+ time the set of configured or active subsystems can be queried just by
+ looking at that mask. To change bits in that mask, this function here
+ must be called. The "msk" argument indicates which bit positions to
+ change, and the "val" argument defines the new values for the positions
+ defined by "msk".
+
+ There is a priority ordering of starting / stopping things, and for
+ multiple requested changes, this function implements that ordering.
+ (Thus we will act on a request to load encoder firmware before we
+ configure the encoder.) In addition to priority ordering, there is a
+ recovery strategy implemented here. If a particular step fails and we
+ detect that failure, this function will clear the affected subsystem bits
+ and restart. Thus we have a means for recovering from a dead encoder:
+ Clear all bits that correspond to subsystems that we need to restart /
+ reconfigure and start over.
+
+*/
+void pvr2_hdw_subsys_bit_chg_no_lock(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val)
+{
+ unsigned long nmsk;
+ unsigned long vmsk;
+ int ret;
+ unsigned int tryCount = 0;
+
+ if (!hdw->flag_ok) return;
+
+ msk &= PVR2_SUBSYS_ALL;
+ nmsk = (hdw->subsys_enabled_mask & ~msk) | (val & msk);
+ nmsk &= PVR2_SUBSYS_ALL;
+
+ for (;;) {
+ tryCount++;
+ if (!((nmsk ^ hdw->subsys_enabled_mask) &
+ PVR2_SUBSYS_ALL)) break;
+ if (tryCount > 4) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Too many retries when configuring device;"
+ " giving up");
+ pvr2_hdw_render_useless(hdw);
+ break;
+ }
+ if (tryCount > 1) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Retrying device reconfiguration");
+ }
+ pvr2_trace(PVR2_TRACE_INIT,
+ "subsys mask changing 0x%lx:0x%lx"
+ " from 0x%lx to 0x%lx",
+ msk,val,hdw->subsys_enabled_mask,nmsk);
+
+ vmsk = (nmsk ^ hdw->subsys_enabled_mask) &
+ hdw->subsys_enabled_mask;
+ if (vmsk) {
+ if (vmsk & (1<<PVR2_SUBSYS_B_ENC_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_encoder_stop");
+ ret = pvr2_encoder_stop(hdw);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Error recovery initiated");
+ hdw->subsys_enabled_mask &=
+ ~FIRMWARE_RECOVERY_BITS;
+ continue;
+ }
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_hdw_cmd_usbstream(0)");
+ pvr2_hdw_cmd_usbstream(hdw,0);
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_DIGITIZER_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " decoder disable");
+ if (hdw->decoder_ctrl) {
+ hdw->decoder_ctrl->enable(
+ hdw->decoder_ctrl->ctxt,0);
+ } else {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING:"
+ " No decoder present");
+ }
+ hdw->subsys_enabled_mask &=
+ ~(1<<PVR2_SUBSYS_B_DIGITIZER_RUN);
+ }
+ if (vmsk & PVR2_SUBSYS_CFG_ALL) {
+ hdw->subsys_enabled_mask &=
+ ~(vmsk & PVR2_SUBSYS_CFG_ALL);
+ }
+ }
+ vmsk = (nmsk ^ hdw->subsys_enabled_mask) & nmsk;
+ if (vmsk) {
+ if (vmsk & (1<<PVR2_SUBSYS_B_ENC_FIRMWARE)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_upload_firmware2");
+ ret = pvr2_upload_firmware2(hdw);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failure uploading encoder"
+ " firmware");
+ pvr2_hdw_render_useless(hdw);
+ break;
+ }
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_ENC_CFG)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_encoder_configure");
+ ret = pvr2_encoder_configure(hdw);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Error recovery initiated");
+ hdw->subsys_enabled_mask &=
+ ~FIRMWARE_RECOVERY_BITS;
+ continue;
+ }
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_DIGITIZER_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " decoder enable");
+ if (hdw->decoder_ctrl) {
+ hdw->decoder_ctrl->enable(
+ hdw->decoder_ctrl->ctxt,!0);
+ } else {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING:"
+ " No decoder present");
+ }
+ hdw->subsys_enabled_mask |=
+ (1<<PVR2_SUBSYS_B_DIGITIZER_RUN);
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_hdw_cmd_usbstream(1)");
+ pvr2_hdw_cmd_usbstream(hdw,!0);
+ }
+ if (vmsk & (1<<PVR2_SUBSYS_B_ENC_RUN)) {
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*---TRACE_CTL----*/"
+ " pvr2_encoder_start");
+ ret = pvr2_encoder_start(hdw);
+ if (ret) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Error recovery initiated");
+ hdw->subsys_enabled_mask &=
+ ~FIRMWARE_RECOVERY_BITS;
+ continue;
+ }
+ }
+ }
+ }
+}
+
+
+void pvr2_hdw_subsys_bit_chg(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,msk,val);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+void pvr2_hdw_subsys_bit_set(struct pvr2_hdw *hdw,unsigned long msk)
+{
+ pvr2_hdw_subsys_bit_chg(hdw,msk,msk);
+}
+
+
+void pvr2_hdw_subsys_bit_clr(struct pvr2_hdw *hdw,unsigned long msk)
+{
+ pvr2_hdw_subsys_bit_chg(hdw,msk,0);
+}
+
+
+unsigned long pvr2_hdw_subsys_get(struct pvr2_hdw *hdw)
+{
+ return hdw->subsys_enabled_mask;
+}
+
+
+unsigned long pvr2_hdw_subsys_stream_get(struct pvr2_hdw *hdw)
+{
+ return hdw->subsys_stream_mask;
+}
+
+
+void pvr2_hdw_subsys_stream_bit_chg_no_lock(struct pvr2_hdw *hdw,
+ unsigned long msk,
+ unsigned long val)
+{
+ unsigned long val2;
+ msk &= PVR2_SUBSYS_ALL;
+ val2 = ((hdw->subsys_stream_mask & ~msk) | (val & msk));
+ pvr2_trace(PVR2_TRACE_INIT,
+ "stream mask changing 0x%lx:0x%lx from 0x%lx to 0x%lx",
+ msk,val,hdw->subsys_stream_mask,val2);
+ hdw->subsys_stream_mask = val2;
+}
+
+
+void pvr2_hdw_subsys_stream_bit_chg(struct pvr2_hdw *hdw,
+ unsigned long msk,
+ unsigned long val)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_subsys_stream_bit_chg_no_lock(hdw,msk,val);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+int pvr2_hdw_set_streaming_no_lock(struct pvr2_hdw *hdw,int enableFl)
+{
+ if ((!enableFl) == !(hdw->flag_streaming_enabled)) return 0;
+ if (enableFl) {
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*--TRACE_STREAM--*/ enable");
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,~0);
+ } else {
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*--TRACE_STREAM--*/ disable");
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,hdw->subsys_stream_mask,0);
+ }
+ if (!hdw->flag_ok) return -EIO;
+ hdw->flag_streaming_enabled = enableFl != 0;
+ return 0;
+}
+
+
+int pvr2_hdw_get_streaming(struct pvr2_hdw *hdw)
+{
+ return hdw->flag_streaming_enabled != 0;
+}
+
+
+int pvr2_hdw_set_streaming(struct pvr2_hdw *hdw,int enable_flag)
+{
+ int ret;
+ LOCK_TAKE(hdw->big_lock); do {
+ ret = pvr2_hdw_set_streaming_no_lock(hdw,enable_flag);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ return ret;
+}
+
+
+int pvr2_hdw_set_stream_type_no_lock(struct pvr2_hdw *hdw,
+ enum pvr2_config config)
+{
+ unsigned long sm = hdw->subsys_enabled_mask;
+ if (!hdw->flag_ok) return -EIO;
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,hdw->subsys_stream_mask,0);
+ hdw->config = config;
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,sm);
+ return 0;
+}
+
+
+int pvr2_hdw_set_stream_type(struct pvr2_hdw *hdw,enum pvr2_config config)
+{
+ int ret;
+ if (!hdw->flag_ok) return -EIO;
+ LOCK_TAKE(hdw->big_lock);
+ ret = pvr2_hdw_set_stream_type_no_lock(hdw,config);
+ LOCK_GIVE(hdw->big_lock);
+ return ret;
+}
+
+
+static int get_default_tuner_type(struct pvr2_hdw *hdw)
+{
+ int unit_number = hdw->unit_number;
+ int tp = -1;
+ if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
+ tp = tuner[unit_number];
+ }
+ if (tp < 0) return -EINVAL;
+ hdw->tuner_type = tp;
+ return 0;
+}
+
+
+static v4l2_std_id get_default_standard(struct pvr2_hdw *hdw)
+{
+ int unit_number = hdw->unit_number;
+ int tp = 0;
+ if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
+ tp = video_std[unit_number];
+ }
+ return tp;
+}
+
+
+static unsigned int get_default_error_tolerance(struct pvr2_hdw *hdw)
+{
+ int unit_number = hdw->unit_number;
+ int tp = 0;
+ if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
+ tp = tolerance[unit_number];
+ }
+ return tp;
+}
+
+
+static int pvr2_hdw_check_firmware(struct pvr2_hdw *hdw)
+{
+ /* Try a harmless request to fetch the eeprom's address over
+ endpoint 1. See what happens. Only the full FX2 image can
+ respond to this. If this probe fails then likely the FX2
+ firmware needs be loaded. */
+ int result;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ hdw->cmd_buffer[0] = 0xeb;
+ result = pvr2_send_request_ex(hdw,HZ*1,!0,
+ hdw->cmd_buffer,1,
+ hdw->cmd_buffer,1);
+ if (result < 0) break;
+ } while(0); LOCK_GIVE(hdw->ctl_lock);
+ if (result) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Probe of device endpoint 1 result status %d",
+ result);
+ } else {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Probe of device endpoint 1 succeeded");
+ }
+ return result == 0;
+}
+
+static void pvr2_hdw_setup_std(struct pvr2_hdw *hdw)
+{
+ char buf[40];
+ unsigned int bcnt;
+ v4l2_std_id std1,std2;
+
+ std1 = get_default_standard(hdw);
+
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),hdw->std_mask_eeprom);
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Supported video standard(s) reported by eeprom: %.*s",
+ bcnt,buf);
+
+ hdw->std_mask_avail = hdw->std_mask_eeprom;
+
+ std2 = std1 & ~hdw->std_mask_avail;
+ if (std2) {
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std2);
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Expanding supported video standards"
+ " to include: %.*s",
+ bcnt,buf);
+ hdw->std_mask_avail |= std2;
+ }
+
+ pvr2_hdw_internal_set_std_avail(hdw);
+
+ if (std1) {
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),std1);
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Initial video standard forced to %.*s",
+ bcnt,buf);
+ hdw->std_mask_cur = std1;
+ hdw->std_dirty = !0;
+ pvr2_hdw_internal_find_stdenum(hdw);
+ return;
+ }
+
+ if (hdw->std_enum_cnt > 1) {
+ // Autoselect the first listed standard
+ hdw->std_enum_cur = 1;
+ hdw->std_mask_cur = hdw->std_defs[hdw->std_enum_cur-1].id;
+ hdw->std_dirty = !0;
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Initial video standard auto-selected to %s",
+ hdw->std_defs[hdw->std_enum_cur-1].name);
+ return;
+ }
+
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Unable to select a viable initial video standard");
+}
+
+
+static void pvr2_hdw_setup_low(struct pvr2_hdw *hdw)
+{
+ int ret;
+ unsigned int idx;
+ struct pvr2_ctrl *cptr;
+ int reloadFl = 0;
+ if (!reloadFl) {
+ reloadFl = (hdw->usb_intf->cur_altsetting->desc.bNumEndpoints
+ == 0);
+ if (reloadFl) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "USB endpoint config looks strange"
+ "; possibly firmware needs to be loaded");
+ }
+ }
+ if (!reloadFl) {
+ reloadFl = !pvr2_hdw_check_firmware(hdw);
+ if (reloadFl) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Check for FX2 firmware failed"
+ "; possibly firmware needs to be loaded");
+ }
+ }
+ if (reloadFl) {
+ if (pvr2_upload_firmware1(hdw) != 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failure uploading firmware1");
+ }
+ return;
+ }
+ hdw->fw1_state = FW1_STATE_OK;
+
+ if (initusbreset) {
+ pvr2_hdw_device_reset(hdw);
+ }
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ for (idx = 0; idx < pvr2_client_lists[hdw->hdw_type].cnt; idx++) {
+ request_module(pvr2_client_lists[hdw->hdw_type].lst[idx]);
+ }
+
+ pvr2_hdw_cmd_powerup(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ if (pvr2_upload_firmware2(hdw)){
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,"device unstable!!");
+ pvr2_hdw_render_useless(hdw);
+ return;
+ }
+
+ // This step MUST happen after the earlier powerup step.
+ pvr2_i2c_core_init(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ for (idx = 0; idx < CTRLDEF_COUNT; idx++) {
+ cptr = hdw->controls + idx;
+ if (cptr->info->skip_init) continue;
+ if (!cptr->info->set_value) continue;
+ cptr->info->set_value(cptr,~0,cptr->info->default_value);
+ }
+
+ // Do not use pvr2_reset_ctl_endpoints() here. It is not
+ // thread-safe against the normal pvr2_send_request() mechanism.
+ // (We should make it thread safe).
+
+ ret = pvr2_hdw_get_eeprom_addr(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+ if (ret < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Unable to determine location of eeprom, skipping");
+ } else {
+ hdw->eeprom_addr = ret;
+ pvr2_eeprom_analyze(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+ }
+
+ pvr2_hdw_setup_std(hdw);
+
+ if (!get_default_tuner_type(hdw)) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "pvr2_hdw_setup: Tuner type overridden to %d",
+ hdw->tuner_type);
+ }
+
+ hdw->tuner_updated = !0;
+ pvr2_i2c_core_check_stale(hdw);
+ hdw->tuner_updated = 0;
+
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ pvr2_hdw_commit_ctl_internal(hdw);
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ hdw->vid_stream = pvr2_stream_create();
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+ pvr2_trace(PVR2_TRACE_INIT,
+ "pvr2_hdw_setup: video stream is %p",hdw->vid_stream);
+ if (hdw->vid_stream) {
+ idx = get_default_error_tolerance(hdw);
+ if (idx) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "pvr2_hdw_setup: video stream %p"
+ " setting tolerance %u",
+ hdw->vid_stream,idx);
+ }
+ pvr2_stream_setup(hdw->vid_stream,hdw->usb_dev,
+ PVR2_VID_ENDPOINT,idx);
+ }
+
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ /* Make sure everything is up to date */
+ pvr2_i2c_core_sync(hdw);
+
+ if (!pvr2_hdw_dev_ok(hdw)) return;
+
+ hdw->flag_init_ok = !0;
+}
+
+
+int pvr2_hdw_setup(struct pvr2_hdw *hdw)
+{
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) begin",hdw);
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_setup_low(hdw);
+ pvr2_trace(PVR2_TRACE_INIT,
+ "pvr2_hdw_setup(hdw=%p) done, ok=%d init_ok=%d",
+ hdw,hdw->flag_ok,hdw->flag_init_ok);
+ if (pvr2_hdw_dev_ok(hdw)) {
+ if (pvr2_hdw_init_ok(hdw)) {
+ pvr2_trace(
+ PVR2_TRACE_INFO,
+ "Device initialization"
+ " completed successfully.");
+ break;
+ }
+ if (hdw->fw1_state == FW1_STATE_RELOAD) {
+ pvr2_trace(
+ PVR2_TRACE_INFO,
+ "Device microcontroller firmware"
+ " (re)loaded; it should now reset"
+ " and reconnect.");
+ break;
+ }
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Device initialization was not successful.");
+ if (hdw->fw1_state == FW1_STATE_MISSING) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Giving up since device"
+ " microcontroller firmware"
+ " appears to be missing.");
+ break;
+ }
+ }
+ if (procreload) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Attempting pvrusb2 recovery by reloading"
+ " primary firmware.");
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "If this works, device should disconnect"
+ " and reconnect in a sane state.");
+ hdw->fw1_state = FW1_STATE_UNKNOWN;
+ pvr2_upload_firmware1(hdw);
+ } else {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "***WARNING*** pvrusb2 device hardware"
+ " appears to be jammed"
+ " and I can't clear it.");
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "You might need to power cycle"
+ " the pvrusb2 device"
+ " in order to recover.");
+ }
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_setup(hdw=%p) end",hdw);
+ return hdw->flag_init_ok;
+}
+
+
+/* Create and return a structure for interacting with the underlying
+ hardware */
+struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
+ const struct usb_device_id *devid)
+{
+ unsigned int idx,cnt1,cnt2;
+ struct pvr2_hdw *hdw;
+ unsigned int hdw_type;
+ int valid_std_mask;
+ struct pvr2_ctrl *cptr;
+ __u8 ifnum;
+ struct v4l2_queryctrl qctrl;
+ struct pvr2_ctl_info *ciptr;
+
+ hdw_type = devid - pvr2_device_table;
+ if (hdw_type >=
+ sizeof(pvr2_device_names)/sizeof(pvr2_device_names[0])) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Bogus device type of %u reported",hdw_type);
+ return 0;
+ }
+
+ hdw = kmalloc(sizeof(*hdw),GFP_KERNEL);
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_create: hdw=%p, type \"%s\"",
+ hdw,pvr2_device_names[hdw_type]);
+ if (!hdw) goto fail;
+ memset(hdw,0,sizeof(*hdw));
+ cx2341x_fill_defaults(&hdw->enc_ctl_state);
+
+ hdw->control_cnt = CTRLDEF_COUNT;
+ hdw->control_cnt += MPEGDEF_COUNT;
+ hdw->controls = kmalloc(sizeof(struct pvr2_ctrl) * hdw->control_cnt,
+ GFP_KERNEL);
+ if (!hdw->controls) goto fail;
+ memset(hdw->controls,0,sizeof(struct pvr2_ctrl) * hdw->control_cnt);
+ hdw->hdw_type = hdw_type;
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ cptr->hdw = hdw;
+ }
+ for (idx = 0; idx < 32; idx++) {
+ hdw->std_mask_ptrs[idx] = hdw->std_mask_names[idx];
+ }
+ for (idx = 0; idx < CTRLDEF_COUNT; idx++) {
+ cptr = hdw->controls + idx;
+ cptr->info = control_defs+idx;
+ }
+ /* Define and configure additional controls from cx2341x module. */
+ hdw->mpeg_ctrl_info = kmalloc(
+ sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT, GFP_KERNEL);
+ if (!hdw->mpeg_ctrl_info) goto fail;
+ memset(hdw->mpeg_ctrl_info,0,
+ sizeof(*(hdw->mpeg_ctrl_info)) * MPEGDEF_COUNT);
+ for (idx = 0; idx < MPEGDEF_COUNT; idx++) {
+ cptr = hdw->controls + idx + CTRLDEF_COUNT;
+ ciptr = &(hdw->mpeg_ctrl_info[idx].info);
+ ciptr->desc = hdw->mpeg_ctrl_info[idx].desc;
+ ciptr->name = mpeg_ids[idx].strid;
+ ciptr->v4l_id = mpeg_ids[idx].id;
+ ciptr->skip_init = !0;
+ ciptr->get_value = ctrl_cx2341x_get;
+ ciptr->get_v4lflags = ctrl_cx2341x_getv4lflags;
+ ciptr->is_dirty = ctrl_cx2341x_is_dirty;
+ if (!idx) ciptr->clear_dirty = ctrl_cx2341x_clear_dirty;
+ qctrl.id = ciptr->v4l_id;
+ cx2341x_ctrl_query(&hdw->enc_ctl_state,&qctrl);
+ if (!(qctrl.flags & V4L2_CTRL_FLAG_READ_ONLY)) {
+ ciptr->set_value = ctrl_cx2341x_set;
+ }
+ strncpy(hdw->mpeg_ctrl_info[idx].desc,qctrl.name,
+ PVR2_CTLD_INFO_DESC_SIZE);
+ hdw->mpeg_ctrl_info[idx].desc[PVR2_CTLD_INFO_DESC_SIZE-1] = 0;
+ ciptr->default_value = qctrl.default_value;
+ switch (qctrl.type) {
+ default:
+ case V4L2_CTRL_TYPE_INTEGER:
+ ciptr->type = pvr2_ctl_int;
+ ciptr->def.type_int.min_value = qctrl.minimum;
+ ciptr->def.type_int.max_value = qctrl.maximum;
+ break;
+ case V4L2_CTRL_TYPE_BOOLEAN:
+ ciptr->type = pvr2_ctl_bool;
+ break;
+ case V4L2_CTRL_TYPE_MENU:
+ ciptr->type = pvr2_ctl_enum;
+ ciptr->def.type_enum.value_names =
+ cx2341x_ctrl_get_menu(ciptr->v4l_id);
+ for (cnt1 = 0;
+ ciptr->def.type_enum.value_names[cnt1] != NULL;
+ cnt1++) { }
+ ciptr->def.type_enum.count = cnt1;
+ break;
+ }
+ cptr->info = ciptr;
+ }
+
+ // Initialize video standard enum dynamic control
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDENUM);
+ if (cptr) {
+ memcpy(&hdw->std_info_enum,cptr->info,
+ sizeof(hdw->std_info_enum));
+ cptr->info = &hdw->std_info_enum;
+
+ }
+ // Initialize control data regarding video standard masks
+ valid_std_mask = pvr2_std_get_usable();
+ for (idx = 0; idx < 32; idx++) {
+ if (!(valid_std_mask & (1 << idx))) continue;
+ cnt1 = pvr2_std_id_to_str(
+ hdw->std_mask_names[idx],
+ sizeof(hdw->std_mask_names[idx])-1,
+ 1 << idx);
+ hdw->std_mask_names[idx][cnt1] = 0;
+ }
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDAVAIL);
+ if (cptr) {
+ memcpy(&hdw->std_info_avail,cptr->info,
+ sizeof(hdw->std_info_avail));
+ cptr->info = &hdw->std_info_avail;
+ hdw->std_info_avail.def.type_bitmask.bit_names =
+ hdw->std_mask_ptrs;
+ hdw->std_info_avail.def.type_bitmask.valid_bits =
+ valid_std_mask;
+ }
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR);
+ if (cptr) {
+ memcpy(&hdw->std_info_cur,cptr->info,
+ sizeof(hdw->std_info_cur));
+ cptr->info = &hdw->std_info_cur;
+ hdw->std_info_cur.def.type_bitmask.bit_names =
+ hdw->std_mask_ptrs;
+ hdw->std_info_avail.def.type_bitmask.valid_bits =
+ valid_std_mask;
+ }
+
+ hdw->eeprom_addr = -1;
+ hdw->unit_number = -1;
+ hdw->v4l_minor_number = -1;
+ hdw->ctl_write_buffer = kmalloc(PVR2_CTL_BUFFSIZE,GFP_KERNEL);
+ if (!hdw->ctl_write_buffer) goto fail;
+ hdw->ctl_read_buffer = kmalloc(PVR2_CTL_BUFFSIZE,GFP_KERNEL);
+ if (!hdw->ctl_read_buffer) goto fail;
+ hdw->ctl_write_urb = usb_alloc_urb(0,GFP_KERNEL);
+ if (!hdw->ctl_write_urb) goto fail;
+ hdw->ctl_read_urb = usb_alloc_urb(0,GFP_KERNEL);
+ if (!hdw->ctl_read_urb) goto fail;
+
+ down(&pvr2_unit_sem); do {
+ for (idx = 0; idx < PVR_NUM; idx++) {
+ if (unit_pointers[idx]) continue;
+ hdw->unit_number = idx;
+ unit_pointers[idx] = hdw;
+ break;
+ }
+ } while (0); up(&pvr2_unit_sem);
+
+ cnt1 = 0;
+ cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"pvrusb2");
+ cnt1 += cnt2;
+ if (hdw->unit_number >= 0) {
+ cnt2 = scnprintf(hdw->name+cnt1,sizeof(hdw->name)-cnt1,"_%c",
+ ('a' + hdw->unit_number));
+ cnt1 += cnt2;
+ }
+ if (cnt1 >= sizeof(hdw->name)) cnt1 = sizeof(hdw->name)-1;
+ hdw->name[cnt1] = 0;
+
+ pvr2_trace(PVR2_TRACE_INIT,"Driver unit number is %d, name is %s",
+ hdw->unit_number,hdw->name);
+
+ hdw->tuner_type = -1;
+ hdw->flag_ok = !0;
+ /* Initialize the mask of subsystems that we will shut down when we
+ stop streaming. */
+ hdw->subsys_stream_mask = PVR2_SUBSYS_RUN_ALL;
+ hdw->subsys_stream_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
+
+ pvr2_trace(PVR2_TRACE_INIT,"subsys_stream_mask: 0x%lx",
+ hdw->subsys_stream_mask);
+
+ hdw->usb_intf = intf;
+ hdw->usb_dev = interface_to_usbdev(intf);
+
+ ifnum = hdw->usb_intf->cur_altsetting->desc.bInterfaceNumber;
+ usb_set_interface(hdw->usb_dev,ifnum,0);
+
+ mutex_init(&hdw->ctl_lock_mutex);
+ mutex_init(&hdw->big_lock_mutex);
+
+ return hdw;
+ fail:
+ if (hdw) {
+ if (hdw->ctl_read_urb) usb_free_urb(hdw->ctl_read_urb);
+ if (hdw->ctl_write_urb) usb_free_urb(hdw->ctl_write_urb);
+ if (hdw->ctl_read_buffer) kfree(hdw->ctl_read_buffer);
+ if (hdw->ctl_write_buffer) kfree(hdw->ctl_write_buffer);
+ if (hdw->controls) kfree(hdw->controls);
+ if (hdw->mpeg_ctrl_info) kfree(hdw->mpeg_ctrl_info);
+ kfree(hdw);
+ }
+ return 0;
+}
+
+
+/* Remove _all_ associations between this driver and the underlying USB
+ layer. */
+void pvr2_hdw_remove_usb_stuff(struct pvr2_hdw *hdw)
+{
+ if (hdw->flag_disconnected) return;
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_remove_usb_stuff: hdw=%p",hdw);
+ if (hdw->ctl_read_urb) {
+ usb_kill_urb(hdw->ctl_read_urb);
+ usb_free_urb(hdw->ctl_read_urb);
+ hdw->ctl_read_urb = 0;
+ }
+ if (hdw->ctl_write_urb) {
+ usb_kill_urb(hdw->ctl_write_urb);
+ usb_free_urb(hdw->ctl_write_urb);
+ hdw->ctl_write_urb = 0;
+ }
+ if (hdw->ctl_read_buffer) {
+ kfree(hdw->ctl_read_buffer);
+ hdw->ctl_read_buffer = 0;
+ }
+ if (hdw->ctl_write_buffer) {
+ kfree(hdw->ctl_write_buffer);
+ hdw->ctl_write_buffer = 0;
+ }
+ pvr2_hdw_render_useless_unlocked(hdw);
+ hdw->flag_disconnected = !0;
+ hdw->usb_dev = 0;
+ hdw->usb_intf = 0;
+}
+
+
+/* Destroy hardware interaction structure */
+void pvr2_hdw_destroy(struct pvr2_hdw *hdw)
+{
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_destroy: hdw=%p",hdw);
+ if (hdw->fw_buffer) {
+ kfree(hdw->fw_buffer);
+ hdw->fw_buffer = 0;
+ }
+ if (hdw->vid_stream) {
+ pvr2_stream_destroy(hdw->vid_stream);
+ hdw->vid_stream = 0;
+ }
+ if (hdw->audio_stat) {
+ hdw->audio_stat->detach(hdw->audio_stat->ctxt);
+ }
+ if (hdw->decoder_ctrl) {
+ hdw->decoder_ctrl->detach(hdw->decoder_ctrl->ctxt);
+ }
+ pvr2_i2c_core_done(hdw);
+ pvr2_hdw_remove_usb_stuff(hdw);
+ down(&pvr2_unit_sem); do {
+ if ((hdw->unit_number >= 0) &&
+ (hdw->unit_number < PVR_NUM) &&
+ (unit_pointers[hdw->unit_number] == hdw)) {
+ unit_pointers[hdw->unit_number] = 0;
+ }
+ } while (0); up(&pvr2_unit_sem);
+ if (hdw->controls) kfree(hdw->controls);
+ if (hdw->mpeg_ctrl_info) kfree(hdw->mpeg_ctrl_info);
+ if (hdw->std_defs) kfree(hdw->std_defs);
+ if (hdw->std_enum_names) kfree(hdw->std_enum_names);
+ kfree(hdw);
+}
+
+
+int pvr2_hdw_init_ok(struct pvr2_hdw *hdw)
+{
+ return hdw->flag_init_ok;
+}
+
+
+int pvr2_hdw_dev_ok(struct pvr2_hdw *hdw)
+{
+ return (hdw && hdw->flag_ok);
+}
+
+
+/* Called when hardware has been unplugged */
+void pvr2_hdw_disconnect(struct pvr2_hdw *hdw)
+{
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_hdw_disconnect(hdw=%p)",hdw);
+ LOCK_TAKE(hdw->big_lock);
+ LOCK_TAKE(hdw->ctl_lock);
+ pvr2_hdw_remove_usb_stuff(hdw);
+ LOCK_GIVE(hdw->ctl_lock);
+ LOCK_GIVE(hdw->big_lock);
+}
+
+
+// Attempt to autoselect an appropriate value for std_enum_cur given
+// whatever is currently in std_mask_cur
+void pvr2_hdw_internal_find_stdenum(struct pvr2_hdw *hdw)
+{
+ unsigned int idx;
+ for (idx = 1; idx < hdw->std_enum_cnt; idx++) {
+ if (hdw->std_defs[idx-1].id == hdw->std_mask_cur) {
+ hdw->std_enum_cur = idx;
+ return;
+ }
+ }
+ hdw->std_enum_cur = 0;
+}
+
+
+// Calculate correct set of enumerated standards based on currently known
+// set of available standards bits.
+void pvr2_hdw_internal_set_std_avail(struct pvr2_hdw *hdw)
+{
+ struct v4l2_standard *newstd;
+ unsigned int std_cnt;
+ unsigned int idx;
+
+ newstd = pvr2_std_create_enum(&std_cnt,hdw->std_mask_avail);
+
+ if (hdw->std_defs) {
+ kfree(hdw->std_defs);
+ hdw->std_defs = 0;
+ }
+ hdw->std_enum_cnt = 0;
+ if (hdw->std_enum_names) {
+ kfree(hdw->std_enum_names);
+ hdw->std_enum_names = 0;
+ }
+
+ if (!std_cnt) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "WARNING: Failed to identify any viable standards");
+ }
+ hdw->std_enum_names = kmalloc(sizeof(char *)*(std_cnt+1),GFP_KERNEL);
+ hdw->std_enum_names[0] = "none";
+ for (idx = 0; idx < std_cnt; idx++) {
+ hdw->std_enum_names[idx+1] =
+ newstd[idx].name;
+ }
+ // Set up the dynamic control for this standard
+ hdw->std_info_enum.def.type_enum.value_names = hdw->std_enum_names;
+ hdw->std_info_enum.def.type_enum.count = std_cnt+1;
+ hdw->std_defs = newstd;
+ hdw->std_enum_cnt = std_cnt+1;
+ hdw->std_enum_cur = 0;
+ hdw->std_info_cur.def.type_bitmask.valid_bits = hdw->std_mask_avail;
+}
+
+
+int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,
+ struct v4l2_standard *std,
+ unsigned int idx)
+{
+ int ret = -EINVAL;
+ if (!idx) return ret;
+ LOCK_TAKE(hdw->big_lock); do {
+ if (idx >= hdw->std_enum_cnt) break;
+ idx--;
+ memcpy(std,hdw->std_defs+idx,sizeof(*std));
+ ret = 0;
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ return ret;
+}
+
+
+/* Get the number of defined controls */
+unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *hdw)
+{
+ return hdw->control_cnt;
+}
+
+
+/* Retrieve a control handle given its index (0..count-1) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_index(struct pvr2_hdw *hdw,
+ unsigned int idx)
+{
+ if (idx >= hdw->control_cnt) return 0;
+ return hdw->controls + idx;
+}
+
+
+/* Retrieve a control handle given its index (0..count-1) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_id(struct pvr2_hdw *hdw,
+ unsigned int ctl_id)
+{
+ struct pvr2_ctrl *cptr;
+ unsigned int idx;
+ int i;
+
+ /* This could be made a lot more efficient, but for now... */
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ i = cptr->info->internal_id;
+ if (i && (i == ctl_id)) return cptr;
+ }
+ return 0;
+}
+
+
+/* Given a V4L ID, retrieve the control structure associated with it. */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_v4l(struct pvr2_hdw *hdw,unsigned int ctl_id)
+{
+ struct pvr2_ctrl *cptr;
+ unsigned int idx;
+ int i;
+
+ /* This could be made a lot more efficient, but for now... */
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ i = cptr->info->v4l_id;
+ if (i && (i == ctl_id)) return cptr;
+ }
+ return 0;
+}
+
+
+/* Given a V4L ID for its immediate predecessor, retrieve the control
+ structure associated with it. */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_nextv4l(struct pvr2_hdw *hdw,
+ unsigned int ctl_id)
+{
+ struct pvr2_ctrl *cptr,*cp2;
+ unsigned int idx;
+ int i;
+
+ /* This could be made a lot more efficient, but for now... */
+ cp2 = 0;
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ i = cptr->info->v4l_id;
+ if (!i) continue;
+ if (i <= ctl_id) continue;
+ if (cp2 && (cp2->info->v4l_id < i)) continue;
+ cp2 = cptr;
+ }
+ return cp2;
+ return 0;
+}
+
+
+static const char *get_ctrl_typename(enum pvr2_ctl_type tp)
+{
+ switch (tp) {
+ case pvr2_ctl_int: return "integer";
+ case pvr2_ctl_enum: return "enum";
+ case pvr2_ctl_bool: return "boolean";
+ case pvr2_ctl_bitmask: return "bitmask";
+ }
+ return "";
+}
+
+
+/* Commit all control changes made up to this point. Subsystems can be
+ indirectly affected by these changes. For a given set of things being
+ committed, we'll clear the affected subsystem bits and then once we're
+ done committing everything we'll make a request to restore the subsystem
+ state(s) back to their previous value before this function was called.
+ Thus we can automatically reconfigure affected pieces of the driver as
+ controls are changed. */
+int pvr2_hdw_commit_ctl_internal(struct pvr2_hdw *hdw)
+{
+ unsigned long saved_subsys_mask = hdw->subsys_enabled_mask;
+ unsigned long stale_subsys_mask = 0;
+ unsigned int idx;
+ struct pvr2_ctrl *cptr;
+ int value;
+ int commit_flag = 0;
+ char buf[100];
+ unsigned int bcnt,ccnt;
+
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ if (cptr->info->is_dirty == 0) continue;
+ if (!cptr->info->is_dirty(cptr)) continue;
+ if (!commit_flag) {
+ commit_flag = !0;
+ }
+
+ bcnt = scnprintf(buf,sizeof(buf),"\"%s\" <-- ",
+ cptr->info->name);
+ value = 0;
+ cptr->info->get_value(cptr,&value);
+ pvr2_ctrl_value_to_sym_internal(cptr,~0,value,
+ buf+bcnt,
+ sizeof(buf)-bcnt,&ccnt);
+ bcnt += ccnt;
+ bcnt += scnprintf(buf+bcnt,sizeof(buf)-bcnt," <%s>",
+ get_ctrl_typename(cptr->info->type));
+ pvr2_trace(PVR2_TRACE_CTL,
+ "/*--TRACE_COMMIT--*/ %.*s",
+ bcnt,buf);
+ }
+
+ if (!commit_flag) {
+ /* Nothing has changed */
+ return 0;
+ }
+
+ /* When video standard changes, reset the hres and vres values -
+ but if the user has pending changes there, then let the changes
+ take priority. */
+ if (hdw->std_dirty) {
+ /* Rewrite the vertical resolution to be appropriate to the
+ video standard that has been selected. */
+ int nvres;
+ if (hdw->std_mask_cur & V4L2_STD_525_60) {
+ nvres = 480;
+ } else {
+ nvres = 576;
+ }
+ if (nvres != hdw->res_ver_val) {
+ hdw->res_ver_val = nvres;
+ hdw->res_ver_dirty = !0;
+ }
+ }
+
+ if (hdw->std_dirty ||
+ 0) {
+ /* If any of this changes, then the encoder needs to be
+ reconfigured, and we need to reset the stream. */
+ stale_subsys_mask |= (1<<PVR2_SUBSYS_B_ENC_CFG);
+ stale_subsys_mask |= hdw->subsys_stream_mask;
+ }
+
+ if (hdw->srate_dirty) {
+ /* Write new sample rate into control structure since
+ * the master copy is stale. We must track srate
+ * separate from the mpeg control structure because
+ * other logic also uses this value. */
+ struct v4l2_ext_controls cs;
+ struct v4l2_ext_control c1;
+ memset(&cs,0,sizeof(cs));
+ memset(&c1,0,sizeof(c1));
+ cs.controls = &c1;
+ cs.count = 1;
+ c1.id = V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ;
+ c1.value = hdw->srate_val;
+ cx2341x_ext_ctrls(&hdw->enc_ctl_state,&cs,VIDIOC_S_EXT_CTRLS);
+ }
+
+ /* Scan i2c core at this point - before we clear all the dirty
+ bits. Various parts of the i2c core will notice dirty bits as
+ appropriate and arrange to broadcast or directly send updates to
+ the client drivers in order to keep everything in sync */
+ pvr2_i2c_core_check_stale(hdw);
+
+ for (idx = 0; idx < hdw->control_cnt; idx++) {
+ cptr = hdw->controls + idx;
+ if (!cptr->info->clear_dirty) continue;
+ cptr->info->clear_dirty(cptr);
+ }
+
+ /* Now execute i2c core update */
+ pvr2_i2c_core_sync(hdw);
+
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,stale_subsys_mask,0);
+ pvr2_hdw_subsys_bit_chg_no_lock(hdw,~0,saved_subsys_mask);
+
+ return 0;
+}
+
+
+int pvr2_hdw_commit_ctl(struct pvr2_hdw *hdw)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_commit_ctl_internal(hdw);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ return 0;
+}
+
+
+void pvr2_hdw_poll(struct pvr2_hdw *hdw)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_i2c_core_sync(hdw);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+void pvr2_hdw_setup_poll_trigger(struct pvr2_hdw *hdw,
+ void (*func)(void *),
+ void *data)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ hdw->poll_trigger_func = func;
+ hdw->poll_trigger_data = data;
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+void pvr2_hdw_poll_trigger_unlocked(struct pvr2_hdw *hdw)
+{
+ if (hdw->poll_trigger_func) {
+ hdw->poll_trigger_func(hdw->poll_trigger_data);
+ }
+}
+
+
+void pvr2_hdw_poll_trigger(struct pvr2_hdw *hdw)
+{
+ LOCK_TAKE(hdw->big_lock); do {
+ pvr2_hdw_poll_trigger_unlocked(hdw);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+/* Return name for this driver instance */
+const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *hdw)
+{
+ return hdw->name;
+}
+
+
+/* Return bit mask indicating signal status */
+unsigned int pvr2_hdw_get_signal_status_internal(struct pvr2_hdw *hdw)
+{
+ unsigned int msk = 0;
+ switch (hdw->input_val) {
+ case PVR2_CVAL_INPUT_TV:
+ case PVR2_CVAL_INPUT_RADIO:
+ if (hdw->decoder_ctrl &&
+ hdw->decoder_ctrl->tuned(hdw->decoder_ctrl->ctxt)) {
+ msk |= PVR2_SIGNAL_OK;
+ if (hdw->audio_stat &&
+ hdw->audio_stat->status(hdw->audio_stat->ctxt)) {
+ if (hdw->flag_stereo) {
+ msk |= PVR2_SIGNAL_STEREO;
+ }
+ if (hdw->flag_bilingual) {
+ msk |= PVR2_SIGNAL_SAP;
+ }
+ }
+ }
+ break;
+ default:
+ msk |= PVR2_SIGNAL_OK | PVR2_SIGNAL_STEREO;
+ }
+ return msk;
+}
+
+
+int pvr2_hdw_is_hsm(struct pvr2_hdw *hdw)
+{
+ int result;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ hdw->cmd_buffer[0] = 0x0b;
+ result = pvr2_send_request(hdw,
+ hdw->cmd_buffer,1,
+ hdw->cmd_buffer,1);
+ if (result < 0) break;
+ result = (hdw->cmd_buffer[0] != 0);
+ } while(0); LOCK_GIVE(hdw->ctl_lock);
+ return result;
+}
+
+
+/* Return bit mask indicating signal status */
+unsigned int pvr2_hdw_get_signal_status(struct pvr2_hdw *hdw)
+{
+ unsigned int msk = 0;
+ LOCK_TAKE(hdw->big_lock); do {
+ msk = pvr2_hdw_get_signal_status_internal(hdw);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+ return msk;
+}
+
+
+/* Get handle to video output stream */
+struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *hp)
+{
+ return hp->vid_stream;
+}
+
+
+void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw)
+{
+ int nr = pvr2_hdw_get_unit_number(hdw);
+ LOCK_TAKE(hdw->big_lock); do {
+ hdw->log_requested = !0;
+ printk(KERN_INFO "pvrusb2: ================= START STATUS CARD #%d =================\n", nr);
+ pvr2_i2c_core_check_stale(hdw);
+ hdw->log_requested = 0;
+ pvr2_i2c_core_sync(hdw);
+ pvr2_trace(PVR2_TRACE_INFO,"cx2341x config:");
+ cx2341x_log_status(&hdw->enc_ctl_state, "pvrusb2");
+ printk(KERN_INFO "pvrusb2: ================== END STATUS CARD #%d ==================\n", nr);
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *hdw, int enable_flag)
+{
+ int ret;
+ u16 address;
+ unsigned int pipe;
+ LOCK_TAKE(hdw->big_lock); do {
+ if ((hdw->fw_buffer == 0) == !enable_flag) break;
+
+ if (!enable_flag) {
+ pvr2_trace(PVR2_TRACE_FIRMWARE,
+ "Cleaning up after CPU firmware fetch");
+ kfree(hdw->fw_buffer);
+ hdw->fw_buffer = 0;
+ hdw->fw_size = 0;
+ /* Now release the CPU. It will disconnect and
+ reconnect later. */
+ pvr2_hdw_cpureset_assert(hdw,0);
+ break;
+ }
+
+ pvr2_trace(PVR2_TRACE_FIRMWARE,
+ "Preparing to suck out CPU firmware");
+ hdw->fw_size = 0x2000;
+ hdw->fw_buffer = kmalloc(hdw->fw_size,GFP_KERNEL);
+ if (!hdw->fw_buffer) {
+ hdw->fw_size = 0;
+ break;
+ }
+
+ memset(hdw->fw_buffer,0,hdw->fw_size);
+
+ /* We have to hold the CPU during firmware upload. */
+ pvr2_hdw_cpureset_assert(hdw,1);
+
+ /* download the firmware from address 0000-1fff in 2048
+ (=0x800) bytes chunk. */
+
+ pvr2_trace(PVR2_TRACE_FIRMWARE,"Grabbing CPU firmware");
+ pipe = usb_rcvctrlpipe(hdw->usb_dev, 0);
+ for(address = 0; address < hdw->fw_size; address += 0x800) {
+ ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0xc0,
+ address,0,
+ hdw->fw_buffer+address,0x800,HZ);
+ if (ret < 0) break;
+ }
+
+ pvr2_trace(PVR2_TRACE_FIRMWARE,"Done grabbing CPU firmware");
+
+ } while (0); LOCK_GIVE(hdw->big_lock);
+}
+
+
+/* Return true if we're in a mode for retrieval CPU firmware */
+int pvr2_hdw_cpufw_get_enabled(struct pvr2_hdw *hdw)
+{
+ return hdw->fw_buffer != 0;
+}
+
+
+int pvr2_hdw_cpufw_get(struct pvr2_hdw *hdw,unsigned int offs,
+ char *buf,unsigned int cnt)
+{
+ int ret = -EINVAL;
+ LOCK_TAKE(hdw->big_lock); do {
+ if (!buf) break;
+ if (!cnt) break;
+
+ if (!hdw->fw_buffer) {
+ ret = -EIO;
+ break;
+ }
+
+ if (offs >= hdw->fw_size) {
+ pvr2_trace(PVR2_TRACE_FIRMWARE,
+ "Read firmware data offs=%d EOF",
+ offs);
+ ret = 0;
+ break;
+ }
+
+ if (offs + cnt > hdw->fw_size) cnt = hdw->fw_size - offs;
+
+ memcpy(buf,hdw->fw_buffer+offs,cnt);
+
+ pvr2_trace(PVR2_TRACE_FIRMWARE,
+ "Read firmware data offs=%d cnt=%d",
+ offs,cnt);
+ ret = cnt;
+ } while (0); LOCK_GIVE(hdw->big_lock);
+
+ return ret;
+}
+
+
+int pvr2_hdw_v4l_get_minor_number(struct pvr2_hdw *hdw)
+{
+ return hdw->v4l_minor_number;
+}
+
+
+/* Store the v4l minor device number */
+void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *hdw,int v)
+{
+ hdw->v4l_minor_number = v;
+}
+
+
+void pvr2_reset_ctl_endpoints(struct pvr2_hdw *hdw)
+{
+ if (!hdw->usb_dev) return;
+ usb_settoggle(hdw->usb_dev, PVR2_CTL_WRITE_ENDPOINT & 0xf,
+ !(PVR2_CTL_WRITE_ENDPOINT & USB_DIR_IN), 0);
+ usb_settoggle(hdw->usb_dev, PVR2_CTL_READ_ENDPOINT & 0xf,
+ !(PVR2_CTL_READ_ENDPOINT & USB_DIR_IN), 0);
+ usb_clear_halt(hdw->usb_dev,
+ usb_rcvbulkpipe(hdw->usb_dev,
+ PVR2_CTL_READ_ENDPOINT & 0x7f));
+ usb_clear_halt(hdw->usb_dev,
+ usb_sndbulkpipe(hdw->usb_dev,
+ PVR2_CTL_WRITE_ENDPOINT & 0x7f));
+}
+
+
+static void pvr2_ctl_write_complete(struct urb *urb, struct pt_regs *regs)
+{
+ struct pvr2_hdw *hdw = urb->context;
+ hdw->ctl_write_pend_flag = 0;
+ if (hdw->ctl_read_pend_flag) return;
+ complete(&hdw->ctl_done);
+}
+
+
+static void pvr2_ctl_read_complete(struct urb *urb, struct pt_regs *regs)
+{
+ struct pvr2_hdw *hdw = urb->context;
+ hdw->ctl_read_pend_flag = 0;
+ if (hdw->ctl_write_pend_flag) return;
+ complete(&hdw->ctl_done);
+}
+
+
+static void pvr2_ctl_timeout(unsigned long data)
+{
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)data;
+ if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
+ hdw->ctl_timeout_flag = !0;
+ if (hdw->ctl_write_pend_flag && hdw->ctl_write_urb) {
+ usb_unlink_urb(hdw->ctl_write_urb);
+ }
+ if (hdw->ctl_read_pend_flag && hdw->ctl_read_urb) {
+ usb_unlink_urb(hdw->ctl_read_urb);
+ }
+ }
+}
+
+
+int pvr2_send_request_ex(struct pvr2_hdw *hdw,
+ unsigned int timeout,int probe_fl,
+ void *write_data,unsigned int write_len,
+ void *read_data,unsigned int read_len)
+{
+ unsigned int idx;
+ int status = 0;
+ struct timer_list timer;
+ if (!hdw->ctl_lock_held) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute control transfer"
+ " without lock!!");
+ return -EDEADLK;
+ }
+ if ((!hdw->flag_ok) && !probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute control transfer"
+ " when device not ok");
+ return -EIO;
+ }
+ if (!(hdw->ctl_read_urb && hdw->ctl_write_urb)) {
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute control transfer"
+ " when USB is disconnected");
+ }
+ return -ENOTTY;
+ }
+
+ /* Ensure that we have sane parameters */
+ if (!write_data) write_len = 0;
+ if (!read_data) read_len = 0;
+ if (write_len > PVR2_CTL_BUFFSIZE) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute %d byte"
+ " control-write transfer (limit=%d)",
+ write_len,PVR2_CTL_BUFFSIZE);
+ return -EINVAL;
+ }
+ if (read_len > PVR2_CTL_BUFFSIZE) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute %d byte"
+ " control-read transfer (limit=%d)",
+ write_len,PVR2_CTL_BUFFSIZE);
+ return -EINVAL;
+ }
+ if ((!write_len) && (!read_len)) {
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "Attempted to execute null control transfer?");
+ return -EINVAL;
+ }
+
+
+ hdw->cmd_debug_state = 1;
+ if (write_len) {
+ hdw->cmd_debug_code = ((unsigned char *)write_data)[0];
+ } else {
+ hdw->cmd_debug_code = 0;
+ }
+ hdw->cmd_debug_write_len = write_len;
+ hdw->cmd_debug_read_len = read_len;
+
+ /* Initialize common stuff */
+ init_completion(&hdw->ctl_done);
+ hdw->ctl_timeout_flag = 0;
+ hdw->ctl_write_pend_flag = 0;
+ hdw->ctl_read_pend_flag = 0;
+ init_timer(&timer);
+ timer.expires = jiffies + timeout;
+ timer.data = (unsigned long)hdw;
+ timer.function = pvr2_ctl_timeout;
+
+ if (write_len) {
+ hdw->cmd_debug_state = 2;
+ /* Transfer write data to internal buffer */
+ for (idx = 0; idx < write_len; idx++) {
+ hdw->ctl_write_buffer[idx] =
+ ((unsigned char *)write_data)[idx];
+ }
+ /* Initiate a write request */
+ usb_fill_bulk_urb(hdw->ctl_write_urb,
+ hdw->usb_dev,
+ usb_sndbulkpipe(hdw->usb_dev,
+ PVR2_CTL_WRITE_ENDPOINT),
+ hdw->ctl_write_buffer,
+ write_len,
+ pvr2_ctl_write_complete,
+ hdw);
+ hdw->ctl_write_urb->actual_length = 0;
+ hdw->ctl_write_pend_flag = !0;
+ status = usb_submit_urb(hdw->ctl_write_urb,GFP_KERNEL);
+ if (status < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to submit write-control"
+ " URB status=%d",status);
+ hdw->ctl_write_pend_flag = 0;
+ goto done;
+ }
+ }
+
+ if (read_len) {
+ hdw->cmd_debug_state = 3;
+ memset(hdw->ctl_read_buffer,0x43,read_len);
+ /* Initiate a read request */
+ usb_fill_bulk_urb(hdw->ctl_read_urb,
+ hdw->usb_dev,
+ usb_rcvbulkpipe(hdw->usb_dev,
+ PVR2_CTL_READ_ENDPOINT),
+ hdw->ctl_read_buffer,
+ read_len,
+ pvr2_ctl_read_complete,
+ hdw);
+ hdw->ctl_read_urb->actual_length = 0;
+ hdw->ctl_read_pend_flag = !0;
+ status = usb_submit_urb(hdw->ctl_read_urb,GFP_KERNEL);
+ if (status < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to submit read-control"
+ " URB status=%d",status);
+ hdw->ctl_read_pend_flag = 0;
+ goto done;
+ }
+ }
+
+ /* Start timer */
+ add_timer(&timer);
+
+ /* Now wait for all I/O to complete */
+ hdw->cmd_debug_state = 4;
+ while (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) {
+ wait_for_completion(&hdw->ctl_done);
+ }
+ hdw->cmd_debug_state = 5;
+
+ /* Stop timer */
+ del_timer_sync(&timer);
+
+ hdw->cmd_debug_state = 6;
+ status = 0;
+
+ if (hdw->ctl_timeout_flag) {
+ status = -ETIMEDOUT;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Timed out control-write");
+ }
+ goto done;
+ }
+
+ if (write_len) {
+ /* Validate results of write request */
+ if ((hdw->ctl_write_urb->status != 0) &&
+ (hdw->ctl_write_urb->status != -ENOENT) &&
+ (hdw->ctl_write_urb->status != -ESHUTDOWN) &&
+ (hdw->ctl_write_urb->status != -ECONNRESET)) {
+ /* USB subsystem is reporting some kind of failure
+ on the write */
+ status = hdw->ctl_write_urb->status;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "control-write URB failure,"
+ " status=%d",
+ status);
+ }
+ goto done;
+ }
+ if (hdw->ctl_write_urb->actual_length < write_len) {
+ /* Failed to write enough data */
+ status = -EIO;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "control-write URB short,"
+ " expected=%d got=%d",
+ write_len,
+ hdw->ctl_write_urb->actual_length);
+ }
+ goto done;
+ }
+ }
+ if (read_len) {
+ /* Validate results of read request */
+ if ((hdw->ctl_read_urb->status != 0) &&
+ (hdw->ctl_read_urb->status != -ENOENT) &&
+ (hdw->ctl_read_urb->status != -ESHUTDOWN) &&
+ (hdw->ctl_read_urb->status != -ECONNRESET)) {
+ /* USB subsystem is reporting some kind of failure
+ on the read */
+ status = hdw->ctl_read_urb->status;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "control-read URB failure,"
+ " status=%d",
+ status);
+ }
+ goto done;
+ }
+ if (hdw->ctl_read_urb->actual_length < read_len) {
+ /* Failed to read enough data */
+ status = -EIO;
+ if (!probe_fl) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "control-read URB short,"
+ " expected=%d got=%d",
+ read_len,
+ hdw->ctl_read_urb->actual_length);
+ }
+ goto done;
+ }
+ /* Transfer retrieved data out from internal buffer */
+ for (idx = 0; idx < read_len; idx++) {
+ ((unsigned char *)read_data)[idx] =
+ hdw->ctl_read_buffer[idx];
+ }
+ }
+
+ done:
+
+ hdw->cmd_debug_state = 0;
+ if ((status < 0) && (!probe_fl)) {
+ pvr2_hdw_render_useless_unlocked(hdw);
+ }
+ return status;
+}
+
+
+int pvr2_send_request(struct pvr2_hdw *hdw,
+ void *write_data,unsigned int write_len,
+ void *read_data,unsigned int read_len)
+{
+ return pvr2_send_request_ex(hdw,HZ*4,0,
+ write_data,write_len,
+ read_data,read_len);
+}
+
+int pvr2_write_register(struct pvr2_hdw *hdw, u16 reg, u32 data)
+{
+ int ret;
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ hdw->cmd_buffer[0] = 0x04; /* write register prefix */
+ PVR2_DECOMPOSE_LE(hdw->cmd_buffer,1,data);
+ hdw->cmd_buffer[5] = 0;
+ hdw->cmd_buffer[6] = (reg >> 8) & 0xff;
+ hdw->cmd_buffer[7] = reg & 0xff;
+
+
+ ret = pvr2_send_request(hdw, hdw->cmd_buffer, 8, hdw->cmd_buffer, 0);
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+int pvr2_read_register(struct pvr2_hdw *hdw, u16 reg, u32 *data)
+{
+ int ret = 0;
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ hdw->cmd_buffer[0] = 0x05; /* read register prefix */
+ hdw->cmd_buffer[1] = 0;
+ hdw->cmd_buffer[2] = 0;
+ hdw->cmd_buffer[3] = 0;
+ hdw->cmd_buffer[4] = 0;
+ hdw->cmd_buffer[5] = 0;
+ hdw->cmd_buffer[6] = (reg >> 8) & 0xff;
+ hdw->cmd_buffer[7] = reg & 0xff;
+
+ ret |= pvr2_send_request(hdw, hdw->cmd_buffer, 8, hdw->cmd_buffer, 4);
+ *data = PVR2_COMPOSE_LE(hdw->cmd_buffer,0);
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+int pvr2_write_u16(struct pvr2_hdw *hdw, u16 data, int res)
+{
+ int ret;
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ hdw->cmd_buffer[0] = (data >> 8) & 0xff;
+ hdw->cmd_buffer[1] = data & 0xff;
+
+ ret = pvr2_send_request(hdw, hdw->cmd_buffer, 2, hdw->cmd_buffer, res);
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+int pvr2_write_u8(struct pvr2_hdw *hdw, u8 data, int res)
+{
+ int ret;
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ hdw->cmd_buffer[0] = data;
+
+ ret = pvr2_send_request(hdw, hdw->cmd_buffer, 1, hdw->cmd_buffer, res);
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+
+void pvr2_hdw_render_useless_unlocked(struct pvr2_hdw *hdw)
+{
+ if (!hdw->flag_ok) return;
+ pvr2_trace(PVR2_TRACE_INIT,"render_useless");
+ hdw->flag_ok = 0;
+ if (hdw->vid_stream) {
+ pvr2_stream_setup(hdw->vid_stream,0,0,0);
+ }
+ hdw->flag_streaming_enabled = 0;
+ hdw->subsys_enabled_mask = 0;
+}
+
+
+void pvr2_hdw_render_useless(struct pvr2_hdw *hdw)
+{
+ LOCK_TAKE(hdw->ctl_lock);
+ pvr2_hdw_render_useless_unlocked(hdw);
+ LOCK_GIVE(hdw->ctl_lock);
+}
+
+
+void pvr2_hdw_device_reset(struct pvr2_hdw *hdw)
+{
+ int ret;
+ pvr2_trace(PVR2_TRACE_INIT,"Performing a device reset...");
+ ret = usb_lock_device_for_reset(hdw->usb_dev,0);
+ if (ret == 1) {
+ ret = usb_reset_device(hdw->usb_dev);
+ usb_unlock_device(hdw->usb_dev);
+ } else {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to lock USB device ret=%d",ret);
+ }
+ if (init_pause_msec) {
+ pvr2_trace(PVR2_TRACE_INFO,
+ "Waiting %u msec for hardware to settle",
+ init_pause_msec);
+ msleep(init_pause_msec);
+ }
+
+}
+
+
+void pvr2_hdw_cpureset_assert(struct pvr2_hdw *hdw,int val)
+{
+ char da[1];
+ unsigned int pipe;
+ int ret;
+
+ if (!hdw->usb_dev) return;
+
+ pvr2_trace(PVR2_TRACE_INIT,"cpureset_assert(%d)",val);
+
+ da[0] = val ? 0x01 : 0x00;
+
+ /* Write the CPUCS register on the 8051. The lsb of the register
+ is the reset bit; a 1 asserts reset while a 0 clears it. */
+ pipe = usb_sndctrlpipe(hdw->usb_dev, 0);
+ ret = usb_control_msg(hdw->usb_dev,pipe,0xa0,0x40,0xe600,0,da,1,HZ);
+ if (ret < 0) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "cpureset_assert(%d) error=%d",val,ret);
+ pvr2_hdw_render_useless(hdw);
+ }
+}
+
+
+int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *hdw)
+{
+ int status;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ pvr2_trace(PVR2_TRACE_INIT,"Requesting uproc hard reset");
+ hdw->flag_ok = !0;
+ hdw->cmd_buffer[0] = 0xdd;
+ status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
+ } while (0); LOCK_GIVE(hdw->ctl_lock);
+ return status;
+}
+
+
+int pvr2_hdw_cmd_powerup(struct pvr2_hdw *hdw)
+{
+ int status;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ pvr2_trace(PVR2_TRACE_INIT,"Requesting powerup");
+ hdw->cmd_buffer[0] = 0xde;
+ status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
+ } while (0); LOCK_GIVE(hdw->ctl_lock);
+ return status;
+}
+
+
+int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *hdw)
+{
+ if (!hdw->decoder_ctrl) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Unable to reset decoder: nothing attached");
+ return -ENOTTY;
+ }
+
+ if (!hdw->decoder_ctrl->force_reset) {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Unable to reset decoder: not implemented");
+ return -ENOTTY;
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,
+ "Requesting decoder reset");
+ hdw->decoder_ctrl->force_reset(hdw->decoder_ctrl->ctxt);
+ return 0;
+}
+
+
+int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl)
+{
+ int status;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ hdw->cmd_buffer[0] = (runFl ? 0x36 : 0x37);
+ status = pvr2_send_request(hdw,hdw->cmd_buffer,1,0,0);
+ } while (0); LOCK_GIVE(hdw->ctl_lock);
+ if (!status) {
+ hdw->subsys_enabled_mask =
+ ((hdw->subsys_enabled_mask &
+ ~(1<<PVR2_SUBSYS_B_USBSTREAM_RUN)) |
+ (runFl ? (1<<PVR2_SUBSYS_B_USBSTREAM_RUN) : 0));
+ }
+ return status;
+}
+
+
+void pvr2_hdw_get_debug_info(const struct pvr2_hdw *hdw,
+ struct pvr2_hdw_debug_info *ptr)
+{
+ ptr->big_lock_held = hdw->big_lock_held;
+ ptr->ctl_lock_held = hdw->ctl_lock_held;
+ ptr->flag_ok = hdw->flag_ok;
+ ptr->flag_disconnected = hdw->flag_disconnected;
+ ptr->flag_init_ok = hdw->flag_init_ok;
+ ptr->flag_streaming_enabled = hdw->flag_streaming_enabled;
+ ptr->subsys_flags = hdw->subsys_enabled_mask;
+ ptr->cmd_debug_state = hdw->cmd_debug_state;
+ ptr->cmd_code = hdw->cmd_debug_code;
+ ptr->cmd_debug_write_len = hdw->cmd_debug_write_len;
+ ptr->cmd_debug_read_len = hdw->cmd_debug_read_len;
+ ptr->cmd_debug_timeout = hdw->ctl_timeout_flag;
+ ptr->cmd_debug_write_pend = hdw->ctl_write_pend_flag;
+ ptr->cmd_debug_read_pend = hdw->ctl_read_pend_flag;
+ ptr->cmd_debug_rstatus = hdw->ctl_read_urb->status;
+ ptr->cmd_debug_wstatus = hdw->ctl_read_urb->status;
+}
+
+
+int pvr2_hdw_gpio_get_dir(struct pvr2_hdw *hdw,u32 *dp)
+{
+ return pvr2_read_register(hdw,PVR2_GPIO_DIR,dp);
+}
+
+
+int pvr2_hdw_gpio_get_out(struct pvr2_hdw *hdw,u32 *dp)
+{
+ return pvr2_read_register(hdw,PVR2_GPIO_OUT,dp);
+}
+
+
+int pvr2_hdw_gpio_get_in(struct pvr2_hdw *hdw,u32 *dp)
+{
+ return pvr2_read_register(hdw,PVR2_GPIO_IN,dp);
+}
+
+
+int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val)
+{
+ u32 cval,nval;
+ int ret;
+ if (~msk) {
+ ret = pvr2_read_register(hdw,PVR2_GPIO_DIR,&cval);
+ if (ret) return ret;
+ nval = (cval & ~msk) | (val & msk);
+ pvr2_trace(PVR2_TRACE_GPIO,
+ "GPIO direction changing 0x%x:0x%x"
+ " from 0x%x to 0x%x",
+ msk,val,cval,nval);
+ } else {
+ nval = val;
+ pvr2_trace(PVR2_TRACE_GPIO,
+ "GPIO direction changing to 0x%x",nval);
+ }
+ return pvr2_write_register(hdw,PVR2_GPIO_DIR,nval);
+}
+
+
+int pvr2_hdw_gpio_chg_out(struct pvr2_hdw *hdw,u32 msk,u32 val)
+{
+ u32 cval,nval;
+ int ret;
+ if (~msk) {
+ ret = pvr2_read_register(hdw,PVR2_GPIO_OUT,&cval);
+ if (ret) return ret;
+ nval = (cval & ~msk) | (val & msk);
+ pvr2_trace(PVR2_TRACE_GPIO,
+ "GPIO output changing 0x%x:0x%x from 0x%x to 0x%x",
+ msk,val,cval,nval);
+ } else {
+ nval = val;
+ pvr2_trace(PVR2_TRACE_GPIO,
+ "GPIO output changing to 0x%x",nval);
+ }
+ return pvr2_write_register(hdw,PVR2_GPIO_OUT,nval);
+}
+
+
+int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw)
+{
+ int result;
+ LOCK_TAKE(hdw->ctl_lock); do {
+ hdw->cmd_buffer[0] = 0xeb;
+ result = pvr2_send_request(hdw,
+ hdw->cmd_buffer,1,
+ hdw->cmd_buffer,1);
+ if (result < 0) break;
+ result = hdw->cmd_buffer[0];
+ } while(0); LOCK_GIVE(hdw->ctl_lock);
+ return result;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.h b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
new file mode 100644
index 00000000000000..63f529154431d5
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.h
@@ -0,0 +1,335 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_HDW_H
+#define __PVRUSB2_HDW_H
+
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+#include "pvrusb2-io.h"
+#include "pvrusb2-ctrl.h"
+
+
+/* Private internal control ids, look these up with
+ pvr2_hdw_get_ctrl_by_id() - these are NOT visible in V4L */
+#define PVR2_CID_STDENUM 1
+#define PVR2_CID_STDCUR 2
+#define PVR2_CID_STDAVAIL 3
+#define PVR2_CID_INPUT 4
+#define PVR2_CID_AUDIOMODE 5
+#define PVR2_CID_FREQUENCY 6
+#define PVR2_CID_HRES 7
+#define PVR2_CID_VRES 8
+
+/* Legal values for the INPUT state variable */
+#define PVR2_CVAL_INPUT_TV 0
+#define PVR2_CVAL_INPUT_SVIDEO 1
+#define PVR2_CVAL_INPUT_COMPOSITE 2
+#define PVR2_CVAL_INPUT_RADIO 3
+
+/* Values that pvr2_hdw_get_signal_status() returns */
+#define PVR2_SIGNAL_OK 0x0001
+#define PVR2_SIGNAL_STEREO 0x0002
+#define PVR2_SIGNAL_SAP 0x0004
+
+
+/* Subsystem definitions - these are various pieces that can be
+ independently stopped / started. Usually you don't want to mess with
+ this directly (let the driver handle things itself), but it is useful
+ for debugging. */
+#define PVR2_SUBSYS_B_ENC_FIRMWARE 0
+#define PVR2_SUBSYS_B_ENC_CFG 1
+#define PVR2_SUBSYS_B_DIGITIZER_RUN 2
+#define PVR2_SUBSYS_B_USBSTREAM_RUN 3
+#define PVR2_SUBSYS_B_ENC_RUN 4
+
+#define PVR2_SUBSYS_CFG_ALL ( \
+ (1 << PVR2_SUBSYS_B_ENC_FIRMWARE) | \
+ (1 << PVR2_SUBSYS_B_ENC_CFG) )
+#define PVR2_SUBSYS_RUN_ALL ( \
+ (1 << PVR2_SUBSYS_B_DIGITIZER_RUN) | \
+ (1 << PVR2_SUBSYS_B_USBSTREAM_RUN) | \
+ (1 << PVR2_SUBSYS_B_ENC_RUN) )
+#define PVR2_SUBSYS_ALL ( \
+ PVR2_SUBSYS_CFG_ALL | \
+ PVR2_SUBSYS_RUN_ALL )
+
+enum pvr2_config {
+ pvr2_config_empty,
+ pvr2_config_mpeg,
+ pvr2_config_vbi,
+ pvr2_config_radio,
+};
+
+const char *pvr2_config_get_name(enum pvr2_config);
+
+struct pvr2_hdw;
+
+/* Create and return a structure for interacting with the underlying
+ hardware */
+struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
+ const struct usb_device_id *devid);
+
+/* Poll for background activity (if any) */
+void pvr2_hdw_poll(struct pvr2_hdw *);
+
+/* Trigger a poll to take place later at a convenient time */
+void pvr2_hdw_poll_trigger(struct pvr2_hdw *);
+void pvr2_hdw_poll_trigger_unlocked(struct pvr2_hdw *);
+
+/* Register a callback used to trigger a future poll */
+void pvr2_hdw_setup_poll_trigger(struct pvr2_hdw *,
+ void (*func)(void *),
+ void *data);
+
+/* Get pointer to structure given unit number */
+struct pvr2_hdw *pvr2_hdw_find(int unit_number);
+
+/* Destroy hardware interaction structure */
+void pvr2_hdw_destroy(struct pvr2_hdw *);
+
+/* Set up the structure and attempt to put the device into a usable state.
+ This can be a time-consuming operation, which is why it is not done
+ internally as part of the create() step. Return value is exactly the
+ same as pvr2_hdw_init_ok(). */
+int pvr2_hdw_setup(struct pvr2_hdw *);
+
+/* Initialization succeeded */
+int pvr2_hdw_init_ok(struct pvr2_hdw *);
+
+/* Return true if in the ready (normal) state */
+int pvr2_hdw_dev_ok(struct pvr2_hdw *);
+
+/* Return small integer number [1..N] for logical instance number of this
+ device. This is useful for indexing array-valued module parameters. */
+int pvr2_hdw_get_unit_number(struct pvr2_hdw *);
+
+/* Get pointer to underlying USB device */
+struct usb_device *pvr2_hdw_get_dev(struct pvr2_hdw *);
+
+/* Retrieve serial number of device */
+unsigned long pvr2_hdw_get_sn(struct pvr2_hdw *);
+
+/* Called when hardware has been unplugged */
+void pvr2_hdw_disconnect(struct pvr2_hdw *);
+
+/* Get the number of defined controls */
+unsigned int pvr2_hdw_get_ctrl_count(struct pvr2_hdw *);
+
+/* Retrieve a control handle given its index (0..count-1) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_index(struct pvr2_hdw *,unsigned int);
+
+/* Retrieve a control handle given its internal ID (if any) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_by_id(struct pvr2_hdw *,unsigned int);
+
+/* Retrieve a control handle given its V4L ID (if any) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_v4l(struct pvr2_hdw *,unsigned int ctl_id);
+
+/* Retrieve a control handle given its immediate predecessor V4L ID (if any) */
+struct pvr2_ctrl *pvr2_hdw_get_ctrl_nextv4l(struct pvr2_hdw *,
+ unsigned int ctl_id);
+
+/* Commit all control changes made up to this point */
+int pvr2_hdw_commit_ctl(struct pvr2_hdw *);
+
+/* Return name for this driver instance */
+const char *pvr2_hdw_get_driver_name(struct pvr2_hdw *);
+
+/* Return PVR2_SIGNAL_XXXX bit mask indicating signal status */
+unsigned int pvr2_hdw_get_signal_status(struct pvr2_hdw *);
+
+/* Query device and see if it thinks it is on a high-speed USB link */
+int pvr2_hdw_is_hsm(struct pvr2_hdw *);
+
+/* Turn streaming on/off */
+int pvr2_hdw_set_streaming(struct pvr2_hdw *,int);
+
+/* Find out if streaming is on */
+int pvr2_hdw_get_streaming(struct pvr2_hdw *);
+
+/* Configure the type of stream to generate */
+int pvr2_hdw_set_stream_type(struct pvr2_hdw *, enum pvr2_config);
+
+/* Get handle to video output stream */
+struct pvr2_stream *pvr2_hdw_get_video_stream(struct pvr2_hdw *);
+
+/* Emit a video standard struct */
+int pvr2_hdw_get_stdenum_value(struct pvr2_hdw *hdw,struct v4l2_standard *std,
+ unsigned int idx);
+
+/* Enable / disable various pieces of hardware. Items to change are
+ identified by bit positions within msk, and new state for each item is
+ identified by corresponding bit positions within val. */
+void pvr2_hdw_subsys_bit_chg(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val);
+
+/* Shortcut for pvr2_hdw_subsys_bit_chg(hdw,msk,msk) */
+void pvr2_hdw_subsys_bit_set(struct pvr2_hdw *hdw,unsigned long msk);
+
+/* Shortcut for pvr2_hdw_subsys_bit_chg(hdw,msk,0) */
+void pvr2_hdw_subsys_bit_clr(struct pvr2_hdw *hdw,unsigned long msk);
+
+/* Retrieve mask indicating which pieces of hardware are currently enabled
+ / configured. */
+unsigned long pvr2_hdw_subsys_get(struct pvr2_hdw *);
+
+/* Adjust mask of what get shut down when streaming is stopped. This is a
+ debugging aid. */
+void pvr2_hdw_subsys_stream_bit_chg(struct pvr2_hdw *hdw,
+ unsigned long msk,unsigned long val);
+
+/* Retrieve mask indicating which pieces of hardware are disabled when
+ streaming is turned off. */
+unsigned long pvr2_hdw_subsys_stream_get(struct pvr2_hdw *);
+
+
+/* Enable / disable retrieval of CPU firmware. This must be enabled before
+ pvr2_hdw_cpufw_get() will function. Note that doing this may prevent
+ the device from running (and leaving this mode may imply a device
+ reset). */
+void pvr2_hdw_cpufw_set_enabled(struct pvr2_hdw *, int enable_flag);
+
+/* Return true if we're in a mode for retrieval CPU firmware */
+int pvr2_hdw_cpufw_get_enabled(struct pvr2_hdw *);
+
+/* Retrieve a piece of the CPU's firmware at the given offset. Return
+ value is the number of bytes retrieved or zero if we're past the end or
+ an error otherwise (e.g. if firmware retrieval is not enabled). */
+int pvr2_hdw_cpufw_get(struct pvr2_hdw *,unsigned int offs,
+ char *buf,unsigned int cnt);
+
+/* Retrieve previously stored v4l minor device number */
+int pvr2_hdw_v4l_get_minor_number(struct pvr2_hdw *);
+
+/* Store the v4l minor device number */
+void pvr2_hdw_v4l_store_minor_number(struct pvr2_hdw *,int);
+
+
+/* The following entry points are all lower level things you normally don't
+ want to worry about. */
+
+/* Attempt to recover from a USB foul-up (in practice I find that if you
+ have to do this, then it's already too late). */
+void pvr2_reset_ctl_endpoints(struct pvr2_hdw *hdw);
+
+/* Issue a command and get a response from the device. LOTS of higher
+ level stuff is built on this. */
+int pvr2_send_request(struct pvr2_hdw *,
+ void *write_ptr,unsigned int write_len,
+ void *read_ptr,unsigned int read_len);
+
+/* Issue a command and get a response from the device. This extended
+ version includes a probe flag (which if set means that device errors
+ should not be logged or treated as fatal) and a timeout in jiffies.
+ This can be used to non-lethally probe the health of endpoint 1. */
+int pvr2_send_request_ex(struct pvr2_hdw *,unsigned int timeout,int probe_fl,
+ void *write_ptr,unsigned int write_len,
+ void *read_ptr,unsigned int read_len);
+
+/* Slightly higher level device communication functions. */
+int pvr2_write_register(struct pvr2_hdw *, u16, u32);
+int pvr2_read_register(struct pvr2_hdw *, u16, u32 *);
+int pvr2_write_u16(struct pvr2_hdw *, u16, int);
+int pvr2_write_u8(struct pvr2_hdw *, u8, int);
+
+/* Call if for any reason we can't talk to the hardware anymore - this will
+ cause the driver to stop flailing on the device. */
+void pvr2_hdw_render_useless(struct pvr2_hdw *);
+void pvr2_hdw_render_useless_unlocked(struct pvr2_hdw *);
+
+/* Set / clear 8051's reset bit */
+void pvr2_hdw_cpureset_assert(struct pvr2_hdw *,int);
+
+/* Execute a USB-commanded device reset */
+void pvr2_hdw_device_reset(struct pvr2_hdw *);
+
+/* Execute hard reset command (after this point it's likely that the
+ encoder will have to be reconfigured). This also clears the "useless"
+ state. */
+int pvr2_hdw_cmd_deep_reset(struct pvr2_hdw *);
+
+/* Execute simple reset command */
+int pvr2_hdw_cmd_powerup(struct pvr2_hdw *);
+
+/* Order decoder to reset */
+int pvr2_hdw_cmd_decoder_reset(struct pvr2_hdw *);
+
+/* Stop / start video stream transport */
+int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl);
+
+/* Find I2C address of eeprom */
+int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *);
+
+/* Direct manipulation of GPIO bits */
+int pvr2_hdw_gpio_get_dir(struct pvr2_hdw *hdw,u32 *);
+int pvr2_hdw_gpio_get_out(struct pvr2_hdw *hdw,u32 *);
+int pvr2_hdw_gpio_get_in(struct pvr2_hdw *hdw,u32 *);
+int pvr2_hdw_gpio_chg_dir(struct pvr2_hdw *hdw,u32 msk,u32 val);
+int pvr2_hdw_gpio_chg_out(struct pvr2_hdw *hdw,u32 msk,u32 val);
+
+/* This data structure is specifically for the next function... */
+struct pvr2_hdw_debug_info {
+ int big_lock_held;
+ int ctl_lock_held;
+ int flag_ok;
+ int flag_disconnected;
+ int flag_init_ok;
+ int flag_streaming_enabled;
+ unsigned long subsys_flags;
+ int cmd_debug_state;
+ int cmd_debug_write_len;
+ int cmd_debug_read_len;
+ int cmd_debug_write_pend;
+ int cmd_debug_read_pend;
+ int cmd_debug_timeout;
+ int cmd_debug_rstatus;
+ int cmd_debug_wstatus;
+ unsigned char cmd_code;
+};
+
+/* Non-intrusively retrieve internal state info - this is useful for
+ diagnosing lockups. Note that this operation is completed without any
+ kind of locking and so it is not atomic and may yield inconsistent
+ results. This is *purely* a debugging aid. */
+void pvr2_hdw_get_debug_info(const struct pvr2_hdw *hdw,
+ struct pvr2_hdw_debug_info *);
+
+/* Cause modules to log their state once */
+void pvr2_hdw_trigger_module_log(struct pvr2_hdw *hdw);
+
+/* Cause encoder firmware to be uploaded into the device. This is normally
+ done autonomously, but the interface is exported here because it is also
+ a debugging aid. */
+int pvr2_upload_firmware2(struct pvr2_hdw *hdw);
+
+/* List of device types that we can match */
+extern struct usb_device_id pvr2_device_table[];
+
+#endif /* __PVRUSB2_HDW_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c
new file mode 100644
index 00000000000000..1dd4f6249b9950
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-chips-v4l2.c
@@ -0,0 +1,115 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-i2c-core.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include "pvrusb2-i2c-cmd-v4l2.h"
+#include "pvrusb2-audio.h"
+#include "pvrusb2-tuner.h"
+#include "pvrusb2-demod.h"
+#include "pvrusb2-video-v4l.h"
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+#include "pvrusb2-cx2584x-v4l.h"
+#include "pvrusb2-wm8775.h"
+#endif
+
+#define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__)
+
+#define OP_STANDARD 0
+#define OP_BCSH 1
+#define OP_VOLUME 2
+#define OP_FREQ 3
+#define OP_AUDIORATE 4
+#define OP_SIZE 5
+#define OP_LOG 6
+
+static const struct pvr2_i2c_op * const ops[] = {
+ [OP_STANDARD] = &pvr2_i2c_op_v4l2_standard,
+ [OP_BCSH] = &pvr2_i2c_op_v4l2_bcsh,
+ [OP_VOLUME] = &pvr2_i2c_op_v4l2_volume,
+ [OP_FREQ] = &pvr2_i2c_op_v4l2_frequency,
+ [OP_SIZE] = &pvr2_i2c_op_v4l2_size,
+ [OP_LOG] = &pvr2_i2c_op_v4l2_log,
+};
+
+void pvr2_i2c_probe(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ int id;
+ id = cp->client->driver->id;
+ cp->ctl_mask = ((1 << OP_STANDARD) |
+ (1 << OP_BCSH) |
+ (1 << OP_VOLUME) |
+ (1 << OP_FREQ) |
+ (1 << OP_SIZE) |
+ (1 << OP_LOG));
+
+ if (id == I2C_DRIVERID_MSP3400) {
+ if (pvr2_i2c_msp3400_setup(hdw,cp)) {
+ return;
+ }
+ }
+ if (id == I2C_DRIVERID_TUNER) {
+ if (pvr2_i2c_tuner_setup(hdw,cp)) {
+ return;
+ }
+ }
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ if (id == I2C_DRIVERID_CX25840) {
+ if (pvr2_i2c_cx2584x_v4l_setup(hdw,cp)) {
+ return;
+ }
+ }
+ if (id == I2C_DRIVERID_WM8775) {
+ if (pvr2_i2c_wm8775_setup(hdw,cp)) {
+ return;
+ }
+ }
+#endif
+ if (id == I2C_DRIVERID_SAA711X) {
+ if (pvr2_i2c_decoder_v4l_setup(hdw,cp)) {
+ return;
+ }
+ }
+ if (id == I2C_DRIVERID_TDA9887) {
+ if (pvr2_i2c_demod_setup(hdw,cp)) {
+ return;
+ }
+ }
+}
+
+
+const struct pvr2_i2c_op *pvr2_i2c_get_op(unsigned int idx)
+{
+ if (idx >= sizeof(ops)/sizeof(ops[0])) return 0;
+ return ops[idx];
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c
new file mode 100644
index 00000000000000..9f81aff2b38ac9
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.c
@@ -0,0 +1,232 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-i2c-cmd-v4l2.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+
+
+static void set_standard(struct pvr2_hdw *hdw)
+{
+ v4l2_std_id vs;
+ vs = hdw->std_mask_cur;
+ pvr2_trace(PVR2_TRACE_CHIPS,
+ "i2c v4l2 set_standard(0x%llx)",(__u64)vs);
+
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_STD,&vs);
+}
+
+
+static int check_standard(struct pvr2_hdw *hdw)
+{
+ return hdw->std_dirty != 0;
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_standard = {
+ .check = check_standard,
+ .update = set_standard,
+ .name = "v4l2_standard",
+};
+
+
+static void set_bcsh(struct pvr2_hdw *hdw)
+{
+ struct v4l2_control ctrl;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_bcsh"
+ " b=%d c=%d s=%d h=%d",
+ hdw->brightness_val,hdw->contrast_val,
+ hdw->saturation_val,hdw->hue_val);
+ memset(&ctrl,0,sizeof(ctrl));
+ ctrl.id = V4L2_CID_BRIGHTNESS;
+ ctrl.value = hdw->brightness_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_CONTRAST;
+ ctrl.value = hdw->contrast_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_SATURATION;
+ ctrl.value = hdw->saturation_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_HUE;
+ ctrl.value = hdw->hue_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+}
+
+
+static int check_bcsh(struct pvr2_hdw *hdw)
+{
+ return (hdw->brightness_dirty ||
+ hdw->contrast_dirty ||
+ hdw->saturation_dirty ||
+ hdw->hue_dirty);
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_bcsh = {
+ .check = check_bcsh,
+ .update = set_bcsh,
+ .name = "v4l2_bcsh",
+};
+
+
+static void set_volume(struct pvr2_hdw *hdw)
+{
+ struct v4l2_control ctrl;
+ pvr2_trace(PVR2_TRACE_CHIPS,
+ "i2c v4l2 set_volume"
+ "(vol=%d bal=%d bas=%d treb=%d mute=%d)",
+ hdw->volume_val,
+ hdw->balance_val,
+ hdw->bass_val,
+ hdw->treble_val,
+ hdw->mute_val);
+ memset(&ctrl,0,sizeof(ctrl));
+ ctrl.id = V4L2_CID_AUDIO_MUTE;
+ ctrl.value = hdw->mute_val ? 1 : 0;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_AUDIO_VOLUME;
+ ctrl.value = hdw->volume_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_AUDIO_BALANCE;
+ ctrl.value = hdw->balance_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_AUDIO_BASS;
+ ctrl.value = hdw->bass_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+ ctrl.id = V4L2_CID_AUDIO_TREBLE;
+ ctrl.value = hdw->treble_val;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_CTRL,&ctrl);
+}
+
+
+static int check_volume(struct pvr2_hdw *hdw)
+{
+ return (hdw->volume_dirty ||
+ hdw->balance_dirty ||
+ hdw->bass_dirty ||
+ hdw->treble_dirty ||
+ hdw->mute_dirty);
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_volume = {
+ .check = check_volume,
+ .update = set_volume,
+ .name = "v4l2_volume",
+};
+
+
+static void set_frequency(struct pvr2_hdw *hdw)
+{
+ unsigned long fv;
+ struct v4l2_frequency freq;
+ fv = hdw->freqVal;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_freq(%lu)",fv);
+ memset(&freq,0,sizeof(freq));
+ freq.frequency = fv / 62500;
+ freq.tuner = 0;
+ freq.type = V4L2_TUNER_ANALOG_TV;
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_FREQUENCY,&freq);
+}
+
+
+static int check_frequency(struct pvr2_hdw *hdw)
+{
+ return hdw->freqDirty != 0;
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_frequency = {
+ .check = check_frequency,
+ .update = set_frequency,
+ .name = "v4l2_freq",
+};
+
+
+static void set_size(struct pvr2_hdw *hdw)
+{
+ struct v4l2_format fmt;
+
+ memset(&fmt,0,sizeof(fmt));
+
+ fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ fmt.fmt.pix.width = hdw->res_hor_val;
+ fmt.fmt.pix.height = hdw->res_ver_val;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_size(%dx%d)",
+ fmt.fmt.pix.width,fmt.fmt.pix.height);
+
+ pvr2_i2c_core_cmd(hdw,VIDIOC_S_FMT,&fmt);
+}
+
+
+static int check_size(struct pvr2_hdw *hdw)
+{
+ return (hdw->res_hor_dirty || hdw->res_ver_dirty);
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_size = {
+ .check = check_size,
+ .update = set_size,
+ .name = "v4l2_size",
+};
+
+
+static void do_log(struct pvr2_hdw *hdw)
+{
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 do_log()");
+ pvr2_i2c_core_cmd(hdw,VIDIOC_LOG_STATUS,0);
+
+}
+
+
+static int check_log(struct pvr2_hdw *hdw)
+{
+ return hdw->log_requested != 0;
+}
+
+
+const struct pvr2_i2c_op pvr2_i2c_op_v4l2_log = {
+ .check = check_log,
+ .update = do_log,
+ .name = "v4l2_log",
+};
+
+
+void pvr2_v4l2_cmd_stream(struct pvr2_i2c_client *cp,int fl)
+{
+ pvr2_i2c_client_cmd(cp,
+ (fl ? VIDIOC_STREAMON : VIDIOC_STREAMOFF),0);
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h
new file mode 100644
index 00000000000000..ecabddba1ec50a
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-cmd-v4l2.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_CMD_V4L2_H
+#define __PVRUSB2_CMD_V4L2_H
+
+#include "pvrusb2-i2c-core.h"
+
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_standard;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_bcsh;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_volume;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_frequency;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_size;
+extern const struct pvr2_i2c_op pvr2_i2c_op_v4l2_log;
+
+void pvr2_v4l2_cmd_stream(struct pvr2_i2c_client *,int);
+
+#endif /* __PVRUSB2_CMD_V4L2_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
new file mode 100644
index 00000000000000..c8d0bdee3ff195
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.c
@@ -0,0 +1,937 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-i2c-core.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+
+#define trace_i2c(...) pvr2_trace(PVR2_TRACE_I2C,__VA_ARGS__)
+
+/*
+
+ This module attempts to implement a compliant I2C adapter for the pvrusb2
+ device. By doing this we can then make use of existing functionality in
+ V4L (e.g. tuner.c) rather than rolling our own.
+
+*/
+
+static unsigned int i2c_scan = 0;
+module_param(i2c_scan, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(i2c_scan,"scan i2c bus at insmod time");
+
+static int pvr2_i2c_write(struct pvr2_hdw *hdw, /* Context */
+ u8 i2c_addr, /* I2C address we're talking to */
+ u8 *data, /* Data to write */
+ u16 length) /* Size of data to write */
+{
+ /* Return value - default 0 means success */
+ int ret;
+
+
+ if (!data) length = 0;
+ if (length > (sizeof(hdw->cmd_buffer) - 3)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Killing an I2C write to %u that is too large"
+ " (desired=%u limit=%u)",
+ i2c_addr,
+ length,(unsigned int)(sizeof(hdw->cmd_buffer) - 3));
+ return -ENOTSUPP;
+ }
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ /* Clear the command buffer (likely to be paranoia) */
+ memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer));
+
+ /* Set up command buffer for an I2C write */
+ hdw->cmd_buffer[0] = 0x08; /* write prefix */
+ hdw->cmd_buffer[1] = i2c_addr; /* i2c addr of chip */
+ hdw->cmd_buffer[2] = length; /* length of what follows */
+ if (length) memcpy(hdw->cmd_buffer + 3, data, length);
+
+ /* Do the operation */
+ ret = pvr2_send_request(hdw,
+ hdw->cmd_buffer,
+ length + 3,
+ hdw->cmd_buffer,
+ 1);
+ if (!ret) {
+ if (hdw->cmd_buffer[0] != 8) {
+ ret = -EIO;
+ if (hdw->cmd_buffer[0] != 7) {
+ trace_i2c("unexpected status"
+ " from i2_write[%d]: %d",
+ i2c_addr,hdw->cmd_buffer[0]);
+ }
+ }
+ }
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+static int pvr2_i2c_read(struct pvr2_hdw *hdw, /* Context */
+ u8 i2c_addr, /* I2C address we're talking to */
+ u8 *data, /* Data to write */
+ u16 dlen, /* Size of data to write */
+ u8 *res, /* Where to put data we read */
+ u16 rlen) /* Amount of data to read */
+{
+ /* Return value - default 0 means success */
+ int ret;
+
+
+ if (!data) dlen = 0;
+ if (dlen > (sizeof(hdw->cmd_buffer) - 4)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Killing an I2C read to %u that has wlen too large"
+ " (desired=%u limit=%u)",
+ i2c_addr,
+ dlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 4));
+ return -ENOTSUPP;
+ }
+ if (res && (rlen > (sizeof(hdw->cmd_buffer) - 1))) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Killing an I2C read to %u that has rlen too large"
+ " (desired=%u limit=%u)",
+ i2c_addr,
+ rlen,(unsigned int)(sizeof(hdw->cmd_buffer) - 1));
+ return -ENOTSUPP;
+ }
+
+ LOCK_TAKE(hdw->ctl_lock);
+
+ /* Clear the command buffer (likely to be paranoia) */
+ memset(hdw->cmd_buffer, 0, sizeof(hdw->cmd_buffer));
+
+ /* Set up command buffer for an I2C write followed by a read */
+ hdw->cmd_buffer[0] = 0x09; /* read prefix */
+ hdw->cmd_buffer[1] = dlen; /* arg length */
+ hdw->cmd_buffer[2] = rlen; /* answer length. Device will send one
+ more byte (status). */
+ hdw->cmd_buffer[3] = i2c_addr; /* i2c addr of chip */
+ if (dlen) memcpy(hdw->cmd_buffer + 4, data, dlen);
+
+ /* Do the operation */
+ ret = pvr2_send_request(hdw,
+ hdw->cmd_buffer,
+ 4 + dlen,
+ hdw->cmd_buffer,
+ rlen + 1);
+ if (!ret) {
+ if (hdw->cmd_buffer[0] != 8) {
+ ret = -EIO;
+ if (hdw->cmd_buffer[0] != 7) {
+ trace_i2c("unexpected status"
+ " from i2_read[%d]: %d",
+ i2c_addr,hdw->cmd_buffer[0]);
+ }
+ }
+ }
+
+ /* Copy back the result */
+ if (res && rlen) {
+ if (ret) {
+ /* Error, just blank out the return buffer */
+ memset(res, 0, rlen);
+ } else {
+ memcpy(res, hdw->cmd_buffer + 1, rlen);
+ }
+ }
+
+ LOCK_GIVE(hdw->ctl_lock);
+
+ return ret;
+}
+
+/* This is the common low level entry point for doing I2C operations to the
+ hardware. */
+int pvr2_i2c_basic_op(struct pvr2_hdw *hdw,
+ u8 i2c_addr,
+ u8 *wdata,
+ u16 wlen,
+ u8 *rdata,
+ u16 rlen)
+{
+ if (!rdata) rlen = 0;
+ if (!wdata) wlen = 0;
+ if (rlen || !wlen) {
+ return pvr2_i2c_read(hdw,i2c_addr,wdata,wlen,rdata,rlen);
+ } else {
+ return pvr2_i2c_write(hdw,i2c_addr,wdata,wlen);
+ }
+}
+
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+
+/* This is a special entry point that is entered if an I2C operation is
+ attempted to a wm8775 chip on model 24xxx hardware. Autodetect of this
+ part doesn't work, but we know it is really there. So let's look for
+ the autodetect attempt and just return success if we see that. */
+static int i2c_hack_wm8775(struct pvr2_hdw *hdw,
+ u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
+{
+ if (!(rlen || wlen)) {
+ // This is a probe attempt. Just let it succeed.
+ return 0;
+ }
+ return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen);
+}
+
+/* This is a special entry point that is entered if an I2C operation is
+ attempted to a cx25840 chip on model 24xxx hardware. This chip can
+ sometimes wedge itself. Worse still, when this happens msp3400 can
+ falsely detect this part and then the system gets hosed up after msp3400
+ gets confused and dies. What we want to do here is try to keep msp3400
+ away and also try to notice if the chip is wedged and send a warning to
+ the system log. */
+static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
+ u8 i2c_addr,u8 *wdata,u16 wlen,u8 *rdata,u16 rlen)
+{
+ int ret;
+ unsigned int subaddr;
+ u8 wbuf[2];
+ int state = hdw->i2c_cx25840_hack_state;
+
+ if (!(rlen || wlen)) {
+ // Probe attempt - always just succeed and don't bother the
+ // hardware (this helps to make the state machine further
+ // down somewhat easier).
+ return 0;
+ }
+
+ if (state == 3) {
+ return pvr2_i2c_basic_op(hdw,i2c_addr,wdata,wlen,rdata,rlen);
+ }
+
+ /* We're looking for the exact pattern where the revision register
+ is being read. The cx25840 module will always look at the
+ revision register first. Any other pattern of access therefore
+ has to be a probe attempt from somebody else so we'll reject it.
+ Normally we could just let each client just probe the part
+ anyway, but when the cx25840 is wedged, msp3400 will get a false
+ positive and that just screws things up... */
+
+ if (wlen == 0) {
+ switch (state) {
+ case 1: subaddr = 0x0100; break;
+ case 2: subaddr = 0x0101; break;
+ default: goto fail;
+ }
+ } else if (wlen == 2) {
+ subaddr = (wdata[0] << 8) | wdata[1];
+ switch (subaddr) {
+ case 0x0100: state = 1; break;
+ case 0x0101: state = 2; break;
+ default: goto fail;
+ }
+ } else {
+ goto fail;
+ }
+ if (!rlen) goto success;
+ state = 0;
+ if (rlen != 1) goto fail;
+
+ /* If we get to here then we have a legitimate read for one of the
+ two revision bytes, so pass it through. */
+ wbuf[0] = subaddr >> 8;
+ wbuf[1] = subaddr;
+ ret = pvr2_i2c_basic_op(hdw,i2c_addr,wbuf,2,rdata,rlen);
+
+ if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING: Detected a wedged cx25840 chip;"
+ " the device will not work.");
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING: Try power cycling the pvrusb2 device.");
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "WARNING: Disabling further access to the device"
+ " to prevent other foul-ups.");
+ // This blocks all further communication with the part.
+ hdw->i2c_func[0x44] = 0;
+ pvr2_hdw_render_useless(hdw);
+ goto fail;
+ }
+
+ /* Success! */
+ pvr2_trace(PVR2_TRACE_CHIPS,"cx25840 appears to be OK.");
+ state = 3;
+
+ success:
+ hdw->i2c_cx25840_hack_state = state;
+ return 0;
+
+ fail:
+ hdw->i2c_cx25840_hack_state = state;
+ return -EIO;
+}
+
+#endif /* CONFIG_VIDEO_PVRUSB2_24XXX */
+
+/* This is a very, very limited I2C adapter implementation. We can only
+ support what we actually know will work on the device... */
+static int pvr2_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg msgs[],
+ int num)
+{
+ int ret = -ENOTSUPP;
+ pvr2_i2c_func funcp = 0;
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)(i2c_adap->algo_data);
+
+ if (!num) {
+ ret = -EINVAL;
+ goto done;
+ }
+ if ((msgs[0].flags & I2C_M_NOSTART)) {
+ trace_i2c("i2c refusing I2C_M_NOSTART");
+ goto done;
+ }
+ if (msgs[0].addr < PVR2_I2C_FUNC_CNT) {
+ funcp = hdw->i2c_func[msgs[0].addr];
+ }
+ if (!funcp) {
+ ret = -EIO;
+ goto done;
+ }
+
+ if (num == 1) {
+ if (msgs[0].flags & I2C_M_RD) {
+ /* Simple read */
+ u16 tcnt,bcnt,offs;
+ if (!msgs[0].len) {
+ /* Length == 0 read. This is a probe. */
+ if (funcp(hdw,msgs[0].addr,0,0,0,0)) {
+ ret = -EIO;
+ goto done;
+ }
+ ret = 1;
+ goto done;
+ }
+ /* If the read is short enough we'll do the whole
+ thing atomically. Otherwise we have no choice
+ but to break apart the reads. */
+ tcnt = msgs[0].len;
+ offs = 0;
+ while (tcnt) {
+ bcnt = tcnt;
+ if (bcnt > sizeof(hdw->cmd_buffer)-1) {
+ bcnt = sizeof(hdw->cmd_buffer)-1;
+ }
+ if (funcp(hdw,msgs[0].addr,0,0,
+ msgs[0].buf+offs,bcnt)) {
+ ret = -EIO;
+ goto done;
+ }
+ offs += bcnt;
+ tcnt -= bcnt;
+ }
+ ret = 1;
+ goto done;
+ } else {
+ /* Simple write */
+ ret = 1;
+ if (funcp(hdw,msgs[0].addr,
+ msgs[0].buf,msgs[0].len,0,0)) {
+ ret = -EIO;
+ }
+ goto done;
+ }
+ } else if (num == 2) {
+ if (msgs[0].addr != msgs[1].addr) {
+ trace_i2c("i2c refusing 2 phase transfer with"
+ " conflicting target addresses");
+ ret = -ENOTSUPP;
+ goto done;
+ }
+ if ((!((msgs[0].flags & I2C_M_RD))) &&
+ (msgs[1].flags & I2C_M_RD)) {
+ u16 tcnt,bcnt,wcnt,offs;
+ /* Write followed by atomic read. If the read
+ portion is short enough we'll do the whole thing
+ atomically. Otherwise we have no choice but to
+ break apart the reads. */
+ tcnt = msgs[1].len;
+ wcnt = msgs[0].len;
+ offs = 0;
+ while (tcnt || wcnt) {
+ bcnt = tcnt;
+ if (bcnt > sizeof(hdw->cmd_buffer)-1) {
+ bcnt = sizeof(hdw->cmd_buffer)-1;
+ }
+ if (funcp(hdw,msgs[0].addr,
+ msgs[0].buf,wcnt,
+ msgs[1].buf+offs,bcnt)) {
+ ret = -EIO;
+ goto done;
+ }
+ offs += bcnt;
+ tcnt -= bcnt;
+ wcnt = 0;
+ }
+ ret = 2;
+ goto done;
+ } else {
+ trace_i2c("i2c refusing complex transfer"
+ " read0=%d read1=%d",
+ (msgs[0].flags & I2C_M_RD),
+ (msgs[1].flags & I2C_M_RD));
+ }
+ } else {
+ trace_i2c("i2c refusing %d phase transfer",num);
+ }
+
+ done:
+ if (pvrusb2_debug & PVR2_TRACE_I2C_TRAF) {
+ unsigned int idx,offs,cnt;
+ for (idx = 0; idx < num; idx++) {
+ cnt = msgs[idx].len;
+ printk(KERN_INFO
+ "pvrusb2 i2c xfer %u/%u:"
+ " addr=0x%x len=%d %s%s",
+ idx+1,num,
+ msgs[idx].addr,
+ cnt,
+ (msgs[idx].flags & I2C_M_RD ?
+ "read" : "write"),
+ (msgs[idx].flags & I2C_M_NOSTART ?
+ " nostart" : ""));
+ if ((ret > 0) || !(msgs[idx].flags & I2C_M_RD)) {
+ if (cnt > 8) cnt = 8;
+ printk(" [");
+ for (offs = 0; offs < (cnt>8?8:cnt); offs++) {
+ if (offs) printk(" ");
+ printk("%02x",msgs[idx].buf[offs]);
+ }
+ if (offs < cnt) printk(" ...");
+ printk("]");
+ }
+ if (idx+1 == num) {
+ printk(" result=%d",ret);
+ }
+ printk("\n");
+ }
+ if (!num) {
+ printk(KERN_INFO
+ "pvrusb2 i2c xfer null transfer result=%d\n",
+ ret);
+ }
+ }
+ return ret;
+}
+
+static int pvr2_i2c_control(struct i2c_adapter *adapter,
+ unsigned int cmd, unsigned long arg)
+{
+ return 0;
+}
+
+static u32 pvr2_i2c_functionality(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_SMBUS_EMUL | I2C_FUNC_I2C | I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static int pvr2_i2c_core_singleton(struct i2c_client *cp,
+ unsigned int cmd,void *arg)
+{
+ int stat;
+ if (!cp) return -EINVAL;
+ if (!(cp->driver)) return -EINVAL;
+ if (!(cp->driver->command)) return -EINVAL;
+ if (!try_module_get(cp->driver->driver.owner)) return -EAGAIN;
+ stat = cp->driver->command(cp,cmd,arg);
+ module_put(cp->driver->driver.owner);
+ return stat;
+}
+
+int pvr2_i2c_client_cmd(struct pvr2_i2c_client *cp,unsigned int cmd,void *arg)
+{
+ int stat;
+ if (pvrusb2_debug & PVR2_TRACE_I2C_CMD) {
+ char buf[100];
+ unsigned int cnt;
+ cnt = pvr2_i2c_client_describe(cp,PVR2_I2C_DETAIL_DEBUG,
+ buf,sizeof(buf));
+ pvr2_trace(PVR2_TRACE_I2C_CMD,
+ "i2c COMMAND (code=%u 0x%x) to %.*s",
+ cmd,cmd,cnt,buf);
+ }
+ stat = pvr2_i2c_core_singleton(cp->client,cmd,arg);
+ if (pvrusb2_debug & PVR2_TRACE_I2C_CMD) {
+ char buf[100];
+ unsigned int cnt;
+ cnt = pvr2_i2c_client_describe(cp,PVR2_I2C_DETAIL_DEBUG,
+ buf,sizeof(buf));
+ pvr2_trace(PVR2_TRACE_I2C_CMD,
+ "i2c COMMAND to %.*s (ret=%d)",cnt,buf,stat);
+ }
+ return stat;
+}
+
+int pvr2_i2c_core_cmd(struct pvr2_hdw *hdw,unsigned int cmd,void *arg)
+{
+ struct list_head *item,*nc;
+ struct pvr2_i2c_client *cp;
+ int stat = -EINVAL;
+
+ if (!hdw) return stat;
+
+ mutex_lock(&hdw->i2c_list_lock);
+ list_for_each_safe(item,nc,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,list);
+ if (!cp->recv_enable) continue;
+ mutex_unlock(&hdw->i2c_list_lock);
+ stat = pvr2_i2c_client_cmd(cp,cmd,arg);
+ mutex_lock(&hdw->i2c_list_lock);
+ }
+ mutex_unlock(&hdw->i2c_list_lock);
+ return stat;
+}
+
+
+static int handler_check(struct pvr2_i2c_client *cp)
+{
+ struct pvr2_i2c_handler *hp = cp->handler;
+ if (!hp) return 0;
+ if (!hp->func_table->check) return 0;
+ return hp->func_table->check(hp->func_data) != 0;
+}
+
+#define BUFSIZE 500
+
+void pvr2_i2c_core_sync(struct pvr2_hdw *hdw)
+{
+ unsigned long msk;
+ unsigned int idx;
+ struct list_head *item,*nc;
+ struct pvr2_i2c_client *cp;
+
+ if (!hdw->i2c_linked) return;
+ if (!(hdw->i2c_pend_types & PVR2_I2C_PEND_ALL)) {
+ return;
+ }
+ mutex_lock(&hdw->i2c_list_lock); do {
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: core_sync BEGIN");
+ if (hdw->i2c_pend_types & PVR2_I2C_PEND_DETECT) {
+ /* One or more I2C clients have attached since we
+ last synced. So scan the list and identify the
+ new clients. */
+ char *buf;
+ unsigned int cnt;
+ unsigned long amask = 0;
+ buf = kmalloc(BUFSIZE,GFP_KERNEL);
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_DETECT");
+ hdw->i2c_pend_types &= ~PVR2_I2C_PEND_DETECT;
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,
+ list);
+ if (!cp->detected_flag) {
+ cp->ctl_mask = 0;
+ pvr2_i2c_probe(hdw,cp);
+ cp->detected_flag = !0;
+ msk = cp->ctl_mask;
+ cnt = 0;
+ if (buf) {
+ cnt = pvr2_i2c_client_describe(
+ cp,
+ PVR2_I2C_DETAIL_ALL,
+ buf,BUFSIZE);
+ }
+ trace_i2c("Probed: %.*s",cnt,buf);
+ if (handler_check(cp)) {
+ hdw->i2c_pend_types |=
+ PVR2_I2C_PEND_CLIENT;
+ }
+ cp->pend_mask = msk;
+ hdw->i2c_pend_mask |= msk;
+ hdw->i2c_pend_types |=
+ PVR2_I2C_PEND_REFRESH;
+ }
+ amask |= cp->ctl_mask;
+ }
+ hdw->i2c_active_mask = amask;
+ if (buf) kfree(buf);
+ }
+ if (hdw->i2c_pend_types & PVR2_I2C_PEND_STALE) {
+ /* Need to do one or more global updates. Arrange
+ for this to happen. */
+ unsigned long m2;
+ pvr2_trace(PVR2_TRACE_I2C_CORE,
+ "i2c: PEND_STALE (0x%lx)",
+ hdw->i2c_stale_mask);
+ hdw->i2c_pend_types &= ~PVR2_I2C_PEND_STALE;
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,
+ list);
+ m2 = hdw->i2c_stale_mask;
+ m2 &= cp->ctl_mask;
+ m2 &= ~cp->pend_mask;
+ if (m2) {
+ pvr2_trace(PVR2_TRACE_I2C_CORE,
+ "i2c: cp=%p setting 0x%lx",
+ cp,m2);
+ cp->pend_mask |= m2;
+ }
+ }
+ hdw->i2c_pend_mask |= hdw->i2c_stale_mask;
+ hdw->i2c_stale_mask = 0;
+ hdw->i2c_pend_types |= PVR2_I2C_PEND_REFRESH;
+ }
+ if (hdw->i2c_pend_types & PVR2_I2C_PEND_CLIENT) {
+ /* One or more client handlers are asking for an
+ update. Run through the list of known clients
+ and update each one. */
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_CLIENT");
+ hdw->i2c_pend_types &= ~PVR2_I2C_PEND_CLIENT;
+ list_for_each_safe(item,nc,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,
+ list);
+ if (!cp->handler) continue;
+ if (!cp->handler->func_table->update) continue;
+ pvr2_trace(PVR2_TRACE_I2C_CORE,
+ "i2c: cp=%p update",cp);
+ mutex_unlock(&hdw->i2c_list_lock);
+ cp->handler->func_table->update(
+ cp->handler->func_data);
+ mutex_lock(&hdw->i2c_list_lock);
+ /* If client's update function set some
+ additional pending bits, account for that
+ here. */
+ if (cp->pend_mask & ~hdw->i2c_pend_mask) {
+ hdw->i2c_pend_mask |= cp->pend_mask;
+ hdw->i2c_pend_types |=
+ PVR2_I2C_PEND_REFRESH;
+ }
+ }
+ }
+ if (hdw->i2c_pend_types & PVR2_I2C_PEND_REFRESH) {
+ const struct pvr2_i2c_op *opf;
+ unsigned long pm;
+ /* Some actual updates are pending. Walk through
+ each update type and perform it. */
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: PEND_REFRESH"
+ " (0x%lx)",hdw->i2c_pend_mask);
+ hdw->i2c_pend_types &= ~PVR2_I2C_PEND_REFRESH;
+ pm = hdw->i2c_pend_mask;
+ hdw->i2c_pend_mask = 0;
+ for (idx = 0, msk = 1; pm; idx++, msk <<= 1) {
+ if (!(pm & msk)) continue;
+ pm &= ~msk;
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,
+ struct pvr2_i2c_client,
+ list);
+ if (cp->pend_mask & msk) {
+ cp->pend_mask &= ~msk;
+ cp->recv_enable = !0;
+ } else {
+ cp->recv_enable = 0;
+ }
+ }
+ opf = pvr2_i2c_get_op(idx);
+ if (!opf) continue;
+ mutex_unlock(&hdw->i2c_list_lock);
+ opf->update(hdw);
+ mutex_lock(&hdw->i2c_list_lock);
+ }
+ }
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"i2c: core_sync END");
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+}
+
+int pvr2_i2c_core_check_stale(struct pvr2_hdw *hdw)
+{
+ unsigned long msk,sm,pm;
+ unsigned int idx;
+ const struct pvr2_i2c_op *opf;
+ struct list_head *item;
+ struct pvr2_i2c_client *cp;
+ unsigned int pt = 0;
+
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"pvr2_i2c_core_check_stale BEGIN");
+
+ pm = hdw->i2c_active_mask;
+ sm = 0;
+ for (idx = 0, msk = 1; pm; idx++, msk <<= 1) {
+ if (!(msk & pm)) continue;
+ pm &= ~msk;
+ opf = pvr2_i2c_get_op(idx);
+ if (!opf) continue;
+ if (opf->check(hdw)) {
+ sm |= msk;
+ }
+ }
+ if (sm) pt |= PVR2_I2C_PEND_STALE;
+
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,list);
+ if (!handler_check(cp)) continue;
+ pt |= PVR2_I2C_PEND_CLIENT;
+ }
+
+ if (pt) {
+ mutex_lock(&hdw->i2c_list_lock); do {
+ hdw->i2c_pend_types |= pt;
+ hdw->i2c_stale_mask |= sm;
+ hdw->i2c_pend_mask |= hdw->i2c_stale_mask;
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+ }
+
+ pvr2_trace(PVR2_TRACE_I2C_CORE,
+ "i2c: types=0x%x stale=0x%lx pend=0x%lx",
+ hdw->i2c_pend_types,
+ hdw->i2c_stale_mask,
+ hdw->i2c_pend_mask);
+ pvr2_trace(PVR2_TRACE_I2C_CORE,"pvr2_i2c_core_check_stale END");
+
+ return (hdw->i2c_pend_types & PVR2_I2C_PEND_ALL) != 0;
+}
+
+unsigned int pvr2_i2c_client_describe(struct pvr2_i2c_client *cp,
+ unsigned int detail,
+ char *buf,unsigned int maxlen)
+{
+ unsigned int ccnt,bcnt;
+ int spcfl = 0;
+ const struct pvr2_i2c_op *opf;
+
+ ccnt = 0;
+ if (detail & PVR2_I2C_DETAIL_DEBUG) {
+ bcnt = scnprintf(buf,maxlen,
+ "ctxt=%p ctl_mask=0x%lx",
+ cp,cp->ctl_mask);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ spcfl = !0;
+ }
+ bcnt = scnprintf(buf,maxlen,
+ "%s%s @ 0x%x",
+ (spcfl ? " " : ""),
+ cp->client->name,
+ cp->client->addr);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ if ((detail & PVR2_I2C_DETAIL_HANDLER) &&
+ cp->handler && cp->handler->func_table->describe) {
+ bcnt = scnprintf(buf,maxlen," (");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ bcnt = cp->handler->func_table->describe(
+ cp->handler->func_data,buf,maxlen);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ bcnt = scnprintf(buf,maxlen,")");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ }
+ if ((detail & PVR2_I2C_DETAIL_CTLMASK) && cp->ctl_mask) {
+ unsigned int idx;
+ unsigned long msk,sm;
+ int spcfl;
+ bcnt = scnprintf(buf,maxlen," [");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ sm = 0;
+ spcfl = 0;
+ for (idx = 0, msk = 1; msk; idx++, msk <<= 1) {
+ if (!(cp->ctl_mask & msk)) continue;
+ opf = pvr2_i2c_get_op(idx);
+ if (opf) {
+ bcnt = scnprintf(buf,maxlen,"%s%s",
+ spcfl ? " " : "",
+ opf->name);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ spcfl = !0;
+ } else {
+ sm |= msk;
+ }
+ }
+ if (sm) {
+ bcnt = scnprintf(buf,maxlen,"%s%lx",
+ idx != 0 ? " " : "",sm);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ }
+ bcnt = scnprintf(buf,maxlen,"]");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ }
+ return ccnt;
+}
+
+unsigned int pvr2_i2c_report(struct pvr2_hdw *hdw,
+ char *buf,unsigned int maxlen)
+{
+ unsigned int ccnt,bcnt;
+ struct list_head *item;
+ struct pvr2_i2c_client *cp;
+ ccnt = 0;
+ mutex_lock(&hdw->i2c_list_lock); do {
+ list_for_each(item,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,list);
+ bcnt = pvr2_i2c_client_describe(
+ cp,
+ (PVR2_I2C_DETAIL_HANDLER|
+ PVR2_I2C_DETAIL_CTLMASK),
+ buf,maxlen);
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ bcnt = scnprintf(buf,maxlen,"\n");
+ ccnt += bcnt; buf += bcnt; maxlen -= bcnt;
+ }
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+ return ccnt;
+}
+
+static int pvr2_i2c_attach_inform(struct i2c_client *client)
+{
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)(client->adapter->algo_data);
+ struct pvr2_i2c_client *cp;
+ int fl = !(hdw->i2c_pend_types & PVR2_I2C_PEND_ALL);
+ cp = kmalloc(sizeof(*cp),GFP_KERNEL);
+ trace_i2c("i2c_attach [client=%s @ 0x%x ctxt=%p]",
+ client->name,
+ client->addr,cp);
+ if (!cp) return -ENOMEM;
+ memset(cp,0,sizeof(*cp));
+ INIT_LIST_HEAD(&cp->list);
+ cp->client = client;
+ mutex_lock(&hdw->i2c_list_lock); do {
+ list_add_tail(&cp->list,&hdw->i2c_clients);
+ hdw->i2c_pend_types |= PVR2_I2C_PEND_DETECT;
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+ if (fl) pvr2_hdw_poll_trigger_unlocked(hdw);
+ return 0;
+}
+
+static int pvr2_i2c_detach_inform(struct i2c_client *client)
+{
+ struct pvr2_hdw *hdw = (struct pvr2_hdw *)(client->adapter->algo_data);
+ struct pvr2_i2c_client *cp;
+ struct list_head *item,*nc;
+ unsigned long amask = 0;
+ int foundfl = 0;
+ mutex_lock(&hdw->i2c_list_lock); do {
+ list_for_each_safe(item,nc,&hdw->i2c_clients) {
+ cp = list_entry(item,struct pvr2_i2c_client,list);
+ if (cp->client == client) {
+ trace_i2c("pvr2_i2c_detach"
+ " [client=%s @ 0x%x ctxt=%p]",
+ client->name,
+ client->addr,cp);
+ if (cp->handler &&
+ cp->handler->func_table->detach) {
+ cp->handler->func_table->detach(
+ cp->handler->func_data);
+ }
+ list_del(&cp->list);
+ kfree(cp);
+ foundfl = !0;
+ continue;
+ }
+ amask |= cp->ctl_mask;
+ }
+ hdw->i2c_active_mask = amask;
+ } while (0); mutex_unlock(&hdw->i2c_list_lock);
+ if (!foundfl) {
+ trace_i2c("pvr2_i2c_detach [client=%s @ 0x%x ctxt=<unknown>]",
+ client->name,
+ client->addr);
+ }
+ return 0;
+}
+
+static struct i2c_algorithm pvr2_i2c_algo_template = {
+ .master_xfer = pvr2_i2c_xfer,
+ .algo_control = pvr2_i2c_control,
+ .functionality = pvr2_i2c_functionality,
+};
+
+static struct i2c_adapter pvr2_i2c_adap_template = {
+ .owner = THIS_MODULE,
+ .class = I2C_CLASS_TV_ANALOG,
+ .id = I2C_HW_B_BT848,
+ .client_register = pvr2_i2c_attach_inform,
+ .client_unregister = pvr2_i2c_detach_inform,
+};
+
+static void do_i2c_scan(struct pvr2_hdw *hdw)
+{
+ struct i2c_msg msg[1];
+ int i,rc;
+ msg[0].addr = 0;
+ msg[0].flags = I2C_M_RD;
+ msg[0].len = 0;
+ msg[0].buf = 0;
+ printk("%s: i2c scan beginning\n",hdw->name);
+ for (i = 0; i < 128; i++) {
+ msg[0].addr = i;
+ rc = i2c_transfer(&hdw->i2c_adap,msg,
+ sizeof(msg)/sizeof(msg[0]));
+ if (rc != 1) continue;
+ printk("%s: i2c scan: found device @ 0x%x\n",hdw->name,i);
+ }
+ printk("%s: i2c scan done.\n",hdw->name);
+}
+
+void pvr2_i2c_core_init(struct pvr2_hdw *hdw)
+{
+ unsigned int idx;
+
+ // The default action for all possible I2C addresses is just to do
+ // the transfer normally.
+ for (idx = 0; idx < PVR2_I2C_FUNC_CNT; idx++) {
+ hdw->i2c_func[idx] = pvr2_i2c_basic_op;
+ }
+
+#ifdef CONFIG_VIDEO_PVRUSB2_24XXX
+ // If however we're dealing with new hardware, insert some hacks in
+ // the I2C transfer stack to let things work better.
+ if (hdw->hdw_type == PVR2_HDW_TYPE_24XXX) {
+ hdw->i2c_func[0x1b] = i2c_hack_wm8775;
+ hdw->i2c_func[0x44] = i2c_hack_cx25840;
+ }
+#endif
+
+ // Configure the adapter and set up everything else related to it.
+ memcpy(&hdw->i2c_adap,&pvr2_i2c_adap_template,sizeof(hdw->i2c_adap));
+ memcpy(&hdw->i2c_algo,&pvr2_i2c_algo_template,sizeof(hdw->i2c_algo));
+ strlcpy(hdw->i2c_adap.name,hdw->name,sizeof(hdw->i2c_adap.name));
+ hdw->i2c_adap.algo = &hdw->i2c_algo;
+ hdw->i2c_adap.algo_data = hdw;
+ hdw->i2c_pend_mask = 0;
+ hdw->i2c_stale_mask = 0;
+ hdw->i2c_active_mask = 0;
+ INIT_LIST_HEAD(&hdw->i2c_clients);
+ mutex_init(&hdw->i2c_list_lock);
+ hdw->i2c_linked = !0;
+ i2c_add_adapter(&hdw->i2c_adap);
+ if (i2c_scan) do_i2c_scan(hdw);
+}
+
+void pvr2_i2c_core_done(struct pvr2_hdw *hdw)
+{
+ if (hdw->i2c_linked) {
+ i2c_del_adapter(&hdw->i2c_adap);
+ hdw->i2c_linked = 0;
+ }
+}
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h
new file mode 100644
index 00000000000000..e8af5b0ed3ce3c
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-i2c-core.h
@@ -0,0 +1,96 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_I2C_CORE_H
+#define __PVRUSB2_I2C_CORE_H
+
+#include <linux/list.h>
+#include <linux/i2c.h>
+
+struct pvr2_hdw;
+struct pvr2_i2c_client;
+struct pvr2_i2c_handler;
+struct pvr2_i2c_handler_functions;
+struct pvr2_i2c_op;
+struct pvr2_i2c_op_functions;
+
+struct pvr2_i2c_client {
+ struct i2c_client *client;
+ struct pvr2_i2c_handler *handler;
+ struct list_head list;
+ int detected_flag;
+ int recv_enable;
+ unsigned long pend_mask;
+ unsigned long ctl_mask;
+};
+
+struct pvr2_i2c_handler {
+ void *func_data;
+ const struct pvr2_i2c_handler_functions *func_table;
+};
+
+struct pvr2_i2c_handler_functions {
+ void (*detach)(void *);
+ int (*check)(void *);
+ void (*update)(void *);
+ unsigned int (*describe)(void *,char *,unsigned int);
+};
+
+struct pvr2_i2c_op {
+ int (*check)(struct pvr2_hdw *);
+ void (*update)(struct pvr2_hdw *);
+ const char *name;
+};
+
+void pvr2_i2c_core_init(struct pvr2_hdw *);
+void pvr2_i2c_core_done(struct pvr2_hdw *);
+
+int pvr2_i2c_client_cmd(struct pvr2_i2c_client *,unsigned int cmd,void *arg);
+int pvr2_i2c_core_cmd(struct pvr2_hdw *,unsigned int cmd,void *arg);
+
+int pvr2_i2c_core_check_stale(struct pvr2_hdw *);
+void pvr2_i2c_core_sync(struct pvr2_hdw *);
+unsigned int pvr2_i2c_report(struct pvr2_hdw *,char *buf,unsigned int maxlen);
+#define PVR2_I2C_DETAIL_DEBUG 0x0001
+#define PVR2_I2C_DETAIL_HANDLER 0x0002
+#define PVR2_I2C_DETAIL_CTLMASK 0x0004
+#define PVR2_I2C_DETAIL_ALL (\
+ PVR2_I2C_DETAIL_DEBUG |\
+ PVR2_I2C_DETAIL_HANDLER |\
+ PVR2_I2C_DETAIL_CTLMASK)
+unsigned int pvr2_i2c_client_describe(struct pvr2_i2c_client *,
+ unsigned int detail_mask,
+ char *buf,unsigned int maxlen);
+
+void pvr2_i2c_probe(struct pvr2_hdw *,struct pvr2_i2c_client *);
+const struct pvr2_i2c_op *pvr2_i2c_get_op(unsigned int idx);
+
+#endif /* __PVRUSB2_I2C_CORE_H */
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.c b/drivers/media/video/pvrusb2/pvrusb2-io.c
new file mode 100644
index 00000000000000..a984c91f571c2e
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-io.c
@@ -0,0 +1,695 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-io.h"
+#include "pvrusb2-debug.h"
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+
+#define BUFFER_SIG 0x47653271
+
+// #define SANITY_CHECK_BUFFERS
+
+
+#ifdef SANITY_CHECK_BUFFERS
+#define BUFFER_CHECK(bp) do { \
+ if ((bp)->signature != BUFFER_SIG) { \
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
+ "Buffer %p is bad at %s:%d", \
+ (bp),__FILE__,__LINE__); \
+ pvr2_buffer_describe(bp,"BadSig"); \
+ BUG(); \
+ } \
+} while (0)
+#else
+#define BUFFER_CHECK(bp) do {} while(0)
+#endif
+
+struct pvr2_stream {
+ /* Buffers queued for reading */
+ struct list_head queued_list;
+ unsigned int q_count;
+ unsigned int q_bcount;
+ /* Buffers with retrieved data */
+ struct list_head ready_list;
+ unsigned int r_count;
+ unsigned int r_bcount;
+ /* Buffers available for use */
+ struct list_head idle_list;
+ unsigned int i_count;
+ unsigned int i_bcount;
+ /* Pointers to all buffers */
+ struct pvr2_buffer **buffers;
+ /* Array size of buffers */
+ unsigned int buffer_slot_count;
+ /* Total buffers actually in circulation */
+ unsigned int buffer_total_count;
+ /* Designed number of buffers to be in circulation */
+ unsigned int buffer_target_count;
+ /* Executed when ready list become non-empty */
+ pvr2_stream_callback callback_func;
+ void *callback_data;
+ /* Context for transfer endpoint */
+ struct usb_device *dev;
+ int endpoint;
+ /* Overhead for mutex enforcement */
+ spinlock_t list_lock;
+ struct mutex mutex;
+ /* Tracking state for tolerating errors */
+ unsigned int fail_count;
+ unsigned int fail_tolerance;
+};
+
+struct pvr2_buffer {
+ int id;
+ int signature;
+ enum pvr2_buffer_state state;
+ void *ptr; /* Pointer to storage area */
+ unsigned int max_count; /* Size of storage area */
+ unsigned int used_count; /* Amount of valid data in storage area */
+ int status; /* Transfer result status */
+ struct pvr2_stream *stream;
+ struct list_head list_overhead;
+ struct urb *purb;
+};
+
+const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
+{
+ switch (st) {
+ case pvr2_buffer_state_none: return "none";
+ case pvr2_buffer_state_idle: return "idle";
+ case pvr2_buffer_state_queued: return "queued";
+ case pvr2_buffer_state_ready: return "ready";
+ }
+ return "unknown";
+}
+
+void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg)
+{
+ pvr2_trace(PVR2_TRACE_INFO,
+ "buffer%s%s %p state=%s id=%d status=%d"
+ " stream=%p purb=%p sig=0x%x",
+ (msg ? " " : ""),
+ (msg ? msg : ""),
+ bp,
+ (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"),
+ (bp ? bp->id : 0),
+ (bp ? bp->status : 0),
+ (bp ? bp->stream : 0),
+ (bp ? bp->purb : 0),
+ (bp ? bp->signature : 0));
+}
+
+static void pvr2_buffer_remove(struct pvr2_buffer *bp)
+{
+ unsigned int *cnt;
+ unsigned int *bcnt;
+ unsigned int ccnt;
+ struct pvr2_stream *sp = bp->stream;
+ switch (bp->state) {
+ case pvr2_buffer_state_idle:
+ cnt = &sp->i_count;
+ bcnt = &sp->i_bcount;
+ ccnt = bp->max_count;
+ break;
+ case pvr2_buffer_state_queued:
+ cnt = &sp->q_count;
+ bcnt = &sp->q_bcount;
+ ccnt = bp->max_count;
+ break;
+ case pvr2_buffer_state_ready:
+ cnt = &sp->r_count;
+ bcnt = &sp->r_bcount;
+ ccnt = bp->used_count;
+ break;
+ default:
+ return;
+ }
+ list_del_init(&bp->list_overhead);
+ (*cnt)--;
+ (*bcnt) -= ccnt;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/"
+ " bufferPool %8s dec cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(bp->state),*bcnt,*cnt);
+ bp->state = pvr2_buffer_state_none;
+}
+
+static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
+{
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
+ bp,
+ pvr2_buffer_state_decode(bp->state),
+ pvr2_buffer_state_decode(pvr2_buffer_state_none));
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ pvr2_buffer_remove(bp);
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+}
+
+static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
+{
+ int fl;
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
+ bp,
+ pvr2_buffer_state_decode(bp->state),
+ pvr2_buffer_state_decode(pvr2_buffer_state_ready));
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ fl = (sp->r_count == 0);
+ pvr2_buffer_remove(bp);
+ list_add_tail(&bp->list_overhead,&sp->ready_list);
+ bp->state = pvr2_buffer_state_ready;
+ (sp->r_count)++;
+ sp->r_bcount += bp->used_count;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/"
+ " bufferPool %8s inc cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(bp->state),
+ sp->r_bcount,sp->r_count);
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ return fl;
+}
+
+static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
+{
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
+ bp,
+ pvr2_buffer_state_decode(bp->state),
+ pvr2_buffer_state_decode(pvr2_buffer_state_idle));
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ pvr2_buffer_remove(bp);
+ list_add_tail(&bp->list_overhead,&sp->idle_list);
+ bp->state = pvr2_buffer_state_idle;
+ (sp->i_count)++;
+ sp->i_bcount += bp->max_count;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/"
+ " bufferPool %8s inc cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(bp->state),
+ sp->i_bcount,sp->i_count);
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+}
+
+static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
+{
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s",
+ bp,
+ pvr2_buffer_state_decode(bp->state),
+ pvr2_buffer_state_decode(pvr2_buffer_state_queued));
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ pvr2_buffer_remove(bp);
+ list_add_tail(&bp->list_overhead,&sp->queued_list);
+ bp->state = pvr2_buffer_state_queued;
+ (sp->q_count)++;
+ sp->q_bcount += bp->max_count;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/"
+ " bufferPool %8s inc cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(bp->state),
+ sp->q_bcount,sp->q_count);
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+}
+
+static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
+{
+ if (bp->state == pvr2_buffer_state_queued) {
+ usb_kill_urb(bp->purb);
+ }
+}
+
+static int pvr2_buffer_init(struct pvr2_buffer *bp,
+ struct pvr2_stream *sp,
+ unsigned int id)
+{
+ memset(bp,0,sizeof(*bp));
+ bp->signature = BUFFER_SIG;
+ bp->id = id;
+ pvr2_trace(PVR2_TRACE_BUF_POOL,
+ "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp);
+ bp->stream = sp;
+ bp->state = pvr2_buffer_state_none;
+ INIT_LIST_HEAD(&bp->list_overhead);
+ bp->purb = usb_alloc_urb(0,GFP_KERNEL);
+ if (! bp->purb) return -ENOMEM;
+#ifdef SANITY_CHECK_BUFFERS
+ pvr2_buffer_describe(bp,"create");
+#endif
+ return 0;
+}
+
+static void pvr2_buffer_done(struct pvr2_buffer *bp)
+{
+#ifdef SANITY_CHECK_BUFFERS
+ pvr2_buffer_describe(bp,"delete");
+#endif
+ pvr2_buffer_wipe(bp);
+ pvr2_buffer_set_none(bp);
+ bp->signature = 0;
+ bp->stream = 0;
+ if (bp->purb) usb_free_urb(bp->purb);
+ pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/"
+ " bufferDone %p",bp);
+}
+
+static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
+{
+ int ret;
+ unsigned int scnt;
+
+ /* Allocate buffers pointer array in multiples of 32 entries */
+ if (cnt == sp->buffer_total_count) return 0;
+
+ pvr2_trace(PVR2_TRACE_BUF_POOL,
+ "/*---TRACE_FLOW---*/ poolResize "
+ " stream=%p cur=%d adj=%+d",
+ sp,
+ sp->buffer_total_count,
+ cnt-sp->buffer_total_count);
+
+ scnt = cnt & ~0x1f;
+ if (cnt > scnt) scnt += 0x20;
+
+ if (cnt > sp->buffer_total_count) {
+ if (scnt > sp->buffer_slot_count) {
+ struct pvr2_buffer **nb;
+ nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
+ if (!nb) return -ENOMEM;
+ if (sp->buffer_slot_count) {
+ memcpy(nb,sp->buffers,
+ sp->buffer_slot_count * sizeof(*nb));
+ kfree(sp->buffers);
+ }
+ sp->buffers = nb;
+ sp->buffer_slot_count = scnt;
+ }
+ while (sp->buffer_total_count < cnt) {
+ struct pvr2_buffer *bp;
+ bp = kmalloc(sizeof(*bp),GFP_KERNEL);
+ if (!bp) return -ENOMEM;
+ ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count);
+ if (ret) {
+ kfree(bp);
+ return -ENOMEM;
+ }
+ sp->buffers[sp->buffer_total_count] = bp;
+ (sp->buffer_total_count)++;
+ pvr2_buffer_set_idle(bp);
+ }
+ } else {
+ while (sp->buffer_total_count > cnt) {
+ struct pvr2_buffer *bp;
+ bp = sp->buffers[sp->buffer_total_count - 1];
+ /* Paranoia */
+ sp->buffers[sp->buffer_total_count - 1] = 0;
+ (sp->buffer_total_count)--;
+ pvr2_buffer_done(bp);
+ kfree(bp);
+ }
+ if (scnt < sp->buffer_slot_count) {
+ struct pvr2_buffer **nb = 0;
+ if (scnt) {
+ nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL);
+ if (!nb) return -ENOMEM;
+ memcpy(nb,sp->buffers,scnt * sizeof(*nb));
+ }
+ kfree(sp->buffers);
+ sp->buffers = nb;
+ sp->buffer_slot_count = scnt;
+ }
+ }
+ return 0;
+}
+
+static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
+{
+ struct pvr2_buffer *bp;
+ unsigned int cnt;
+
+ if (sp->buffer_total_count == sp->buffer_target_count) return 0;
+
+ pvr2_trace(PVR2_TRACE_BUF_POOL,
+ "/*---TRACE_FLOW---*/"
+ " poolCheck stream=%p cur=%d tgt=%d",
+ sp,sp->buffer_total_count,sp->buffer_target_count);
+
+ if (sp->buffer_total_count < sp->buffer_target_count) {
+ return pvr2_stream_buffer_count(sp,sp->buffer_target_count);
+ }
+
+ cnt = 0;
+ while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
+ bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
+ if (bp->state != pvr2_buffer_state_idle) break;
+ cnt++;
+ }
+ if (cnt) {
+ pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt);
+ }
+
+ return 0;
+}
+
+static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
+{
+ struct list_head *lp;
+ struct pvr2_buffer *bp1;
+ while ((lp = sp->queued_list.next) != &sp->queued_list) {
+ bp1 = list_entry(lp,struct pvr2_buffer,list_overhead);
+ pvr2_buffer_wipe(bp1);
+ /* At this point, we should be guaranteed that no
+ completion callback may happen on this buffer. But it's
+ possible that it might have completed after we noticed
+ it but before we wiped it. So double check its status
+ here first. */
+ if (bp1->state != pvr2_buffer_state_queued) continue;
+ pvr2_buffer_set_idle(bp1);
+ }
+ if (sp->buffer_total_count != sp->buffer_target_count) {
+ pvr2_stream_achieve_buffer_count(sp);
+ }
+}
+
+static void pvr2_stream_init(struct pvr2_stream *sp)
+{
+ spin_lock_init(&sp->list_lock);
+ mutex_init(&sp->mutex);
+ INIT_LIST_HEAD(&sp->queued_list);
+ INIT_LIST_HEAD(&sp->ready_list);
+ INIT_LIST_HEAD(&sp->idle_list);
+}
+
+static void pvr2_stream_done(struct pvr2_stream *sp)
+{
+ mutex_lock(&sp->mutex); do {
+ pvr2_stream_internal_flush(sp);
+ pvr2_stream_buffer_count(sp,0);
+ } while (0); mutex_unlock(&sp->mutex);
+}
+
+static void buffer_complete(struct urb *urb, struct pt_regs *regs)
+{
+ struct pvr2_buffer *bp = urb->context;
+ struct pvr2_stream *sp;
+ unsigned long irq_flags;
+ BUFFER_CHECK(bp);
+ sp = bp->stream;
+ bp->used_count = 0;
+ bp->status = 0;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
+ bp,urb->status,urb->actual_length);
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ if ((!(urb->status)) ||
+ (urb->status == -ENOENT) ||
+ (urb->status == -ECONNRESET) ||
+ (urb->status == -ESHUTDOWN)) {
+ bp->used_count = urb->actual_length;
+ if (sp->fail_count) {
+ pvr2_trace(PVR2_TRACE_TOLERANCE,
+ "stream %p transfer ok"
+ " - fail count reset",sp);
+ sp->fail_count = 0;
+ }
+ } else if (sp->fail_count < sp->fail_tolerance) {
+ // We can tolerate this error, because we're below the
+ // threshold...
+ (sp->fail_count)++;
+ pvr2_trace(PVR2_TRACE_TOLERANCE,
+ "stream %p ignoring error %d"
+ " - fail count increased to %u",
+ sp,urb->status,sp->fail_count);
+ } else {
+ bp->status = urb->status;
+ }
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ pvr2_buffer_set_ready(bp);
+ if (sp && sp->callback_func) {
+ sp->callback_func(sp->callback_data);
+ }
+}
+
+struct pvr2_stream *pvr2_stream_create(void)
+{
+ struct pvr2_stream *sp;
+ sp = kmalloc(sizeof(*sp),GFP_KERNEL);
+ if (!sp) return sp;
+ memset(sp,0,sizeof(*sp));
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp);
+ pvr2_stream_init(sp);
+ return sp;
+}
+
+void pvr2_stream_destroy(struct pvr2_stream *sp)
+{
+ if (!sp) return;
+ pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp);
+ pvr2_stream_done(sp);
+ kfree(sp);
+}
+
+void pvr2_stream_setup(struct pvr2_stream *sp,
+ struct usb_device *dev,
+ int endpoint,
+ unsigned int tolerance)
+{
+ mutex_lock(&sp->mutex); do {
+ pvr2_stream_internal_flush(sp);
+ sp->dev = dev;
+ sp->endpoint = endpoint;
+ sp->fail_tolerance = tolerance;
+ } while(0); mutex_unlock(&sp->mutex);
+}
+
+void pvr2_stream_set_callback(struct pvr2_stream *sp,
+ pvr2_stream_callback func,
+ void *data)
+{
+ unsigned long irq_flags;
+ mutex_lock(&sp->mutex); do {
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ sp->callback_data = data;
+ sp->callback_func = func;
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ } while(0); mutex_unlock(&sp->mutex);
+}
+
+/* Query / set the nominal buffer count */
+int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
+{
+ return sp->buffer_target_count;
+}
+
+int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt)
+{
+ int ret;
+ if (sp->buffer_target_count == cnt) return 0;
+ mutex_lock(&sp->mutex); do {
+ sp->buffer_target_count = cnt;
+ ret = pvr2_stream_achieve_buffer_count(sp);
+ } while(0); mutex_unlock(&sp->mutex);
+ return ret;
+}
+
+struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
+{
+ struct list_head *lp = sp->idle_list.next;
+ if (lp == &sp->idle_list) return 0;
+ return list_entry(lp,struct pvr2_buffer,list_overhead);
+}
+
+struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
+{
+ struct list_head *lp = sp->ready_list.next;
+ if (lp == &sp->ready_list) return 0;
+ return list_entry(lp,struct pvr2_buffer,list_overhead);
+}
+
+struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id)
+{
+ if (id < 0) return 0;
+ if (id >= sp->buffer_total_count) return 0;
+ return sp->buffers[id];
+}
+
+int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
+{
+ return sp->r_count;
+}
+
+int pvr2_stream_get_idle_count(struct pvr2_stream *sp)
+{
+ return sp->i_count;
+}
+
+void pvr2_stream_flush(struct pvr2_stream *sp)
+{
+ mutex_lock(&sp->mutex); do {
+ pvr2_stream_internal_flush(sp);
+ } while(0); mutex_unlock(&sp->mutex);
+}
+
+void pvr2_stream_kill(struct pvr2_stream *sp)
+{
+ struct pvr2_buffer *bp;
+ mutex_lock(&sp->mutex); do {
+ pvr2_stream_internal_flush(sp);
+ while ((bp = pvr2_stream_get_ready_buffer(sp)) != 0) {
+ pvr2_buffer_set_idle(bp);
+ }
+ if (sp->buffer_total_count != sp->buffer_target_count) {
+ pvr2_stream_achieve_buffer_count(sp);
+ }
+ } while(0); mutex_unlock(&sp->mutex);
+}
+
+int pvr2_buffer_queue(struct pvr2_buffer *bp)
+{
+#undef SEED_BUFFER
+#ifdef SEED_BUFFER
+ unsigned int idx;
+ unsigned int val;
+#endif
+ int ret = 0;
+ struct pvr2_stream *sp;
+ if (!bp) return -EINVAL;
+ sp = bp->stream;
+ mutex_lock(&sp->mutex); do {
+ pvr2_buffer_wipe(bp);
+ if (!sp->dev) {
+ ret = -EIO;
+ break;
+ }
+ pvr2_buffer_set_queued(bp);
+#ifdef SEED_BUFFER
+ for (idx = 0; idx < (bp->max_count) / 4; idx++) {
+ val = bp->id << 24;
+ val |= idx;
+ ((unsigned int *)(bp->ptr))[idx] = val;
+ }
+#endif
+ bp->status = -EINPROGRESS;
+ usb_fill_bulk_urb(bp->purb, // struct urb *urb
+ sp->dev, // struct usb_device *dev
+ // endpoint (below)
+ usb_rcvbulkpipe(sp->dev,sp->endpoint),
+ bp->ptr, // void *transfer_buffer
+ bp->max_count, // int buffer_length
+ buffer_complete,
+ bp);
+ usb_submit_urb(bp->purb,GFP_KERNEL);
+ } while(0); mutex_unlock(&sp->mutex);
+ return ret;
+}
+
+int pvr2_buffer_idle(struct pvr2_buffer *bp)
+{
+ struct pvr2_stream *sp;
+ if (!bp) return -EINVAL;
+ sp = bp->stream;
+ mutex_lock(&sp->mutex); do {
+ pvr2_buffer_wipe(bp);
+ pvr2_buffer_set_idle(bp);
+ if (sp->buffer_total_count != sp->buffer_target_count) {
+ pvr2_stream_achieve_buffer_count(sp);
+ }
+ } while(0); mutex_unlock(&sp->mutex);
+ return 0;
+}
+
+int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt)
+{
+ int ret = 0;
+ unsigned long irq_flags;
+ struct pvr2_stream *sp;
+ if (!bp) return -EINVAL;
+ sp = bp->stream;
+ mutex_lock(&sp->mutex); do {
+ spin_lock_irqsave(&sp->list_lock,irq_flags);
+ if (bp->state != pvr2_buffer_state_idle) {
+ ret = -EPERM;
+ } else {
+ bp->ptr = ptr;
+ bp->stream->i_bcount -= bp->max_count;
+ bp->max_count = cnt;
+ bp->stream->i_bcount += bp->max_count;
+ pvr2_trace(PVR2_TRACE_BUF_FLOW,
+ "/*---TRACE_FLOW---*/ bufferPool "
+ " %8s cap cap=%07d cnt=%02d",
+ pvr2_buffer_state_decode(
+ pvr2_buffer_state_idle),
+ bp->stream->i_bcount,bp->stream->i_count);
+ }
+ spin_unlock_irqrestore(&sp->list_lock,irq_flags);
+ } while(0); mutex_unlock(&sp->mutex);
+ return ret;
+}
+
+unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp)
+{
+ return bp->used_count;
+}
+
+int pvr2_buffer_get_status(struct pvr2_buffer *bp)
+{
+ return bp->status;
+}
+
+enum pvr2_buffer_state pvr2_buffer_get_state(struct pvr2_buffer *bp)
+{
+ return bp->state;
+}
+
+int pvr2_buffer_get_id(struct pvr2_buffer *bp)
+{
+ return bp->id;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-io.h b/drivers/media/video/pvrusb2/pvrusb2-io.h
new file mode 100644
index 00000000000000..65e11385b2b333
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-io.h
@@ -0,0 +1,102 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_IO_H
+#define __PVRUSB2_IO_H
+
+#include <linux/usb.h>
+#include <linux/list.h>
+
+typedef void (*pvr2_stream_callback)(void *);
+
+enum pvr2_buffer_state {
+ pvr2_buffer_state_none = 0, // Not on any list
+ pvr2_buffer_state_idle = 1, // Buffer is ready to be used again
+ pvr2_buffer_state_queued = 2, // Buffer has been queued for filling
+ pvr2_buffer_state_ready = 3, // Buffer has data available
+};
+
+struct pvr2_stream;
+struct pvr2_buffer;
+
+const char *pvr2_buffer_state_decode(enum pvr2_buffer_state);
+
+/* Initialize / tear down stream structure */
+struct pvr2_stream *pvr2_stream_create(void);
+void pvr2_stream_destroy(struct pvr2_stream *);
+void pvr2_stream_setup(struct pvr2_stream *,
+ struct usb_device *dev,int endpoint,
+ unsigned int tolerance);
+void pvr2_stream_set_callback(struct pvr2_stream *,
+ pvr2_stream_callback func,
+ void *data);
+
+/* Query / set the nominal buffer count */
+int pvr2_stream_get_buffer_count(struct pvr2_stream *);
+int pvr2_stream_set_buffer_count(struct pvr2_stream *,unsigned int);
+
+/* Get a pointer to a buffer that is either idle, ready, or is specified
+ named. */
+struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *);
+struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *);
+struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id);
+
+/* Find out how many buffers are idle or ready */
+int pvr2_stream_get_idle_count(struct pvr2_stream *);
+int pvr2_stream_get_ready_count(struct pvr2_stream *);
+
+/* Kill all pending operations */
+void pvr2_stream_flush(struct pvr2_stream *);
+
+/* Kill all pending buffers and throw away any ready buffers as well */
+void pvr2_stream_kill(struct pvr2_stream *);
+
+/* Set up the actual storage for a buffer */
+int pvr2_buffer_set_buffer(struct pvr2_buffer *,void *ptr,unsigned int cnt);
+
+/* Find out size of data in the given ready buffer */
+unsigned int pvr2_buffer_get_count(struct pvr2_buffer *);
+
+/* Retrieve completion code for given ready buffer */
+int pvr2_buffer_get_status(struct pvr2_buffer *);
+
+/* Retrieve state of given buffer */
+enum pvr2_buffer_state pvr2_buffer_get_state(struct pvr2_buffer *);
+
+/* Retrieve ID of given buffer */
+int pvr2_buffer_get_id(struct pvr2_buffer *);
+
+/* Start reading into given buffer (kill it if needed) */
+int pvr2_buffer_queue(struct pvr2_buffer *);
+
+/* Move buffer back to idle pool (kill it if needed) */
+int pvr2_buffer_idle(struct pvr2_buffer *);
+
+#endif /* __PVRUSB2_IO_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.c b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
new file mode 100644
index 00000000000000..49da062e327100
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.c
@@ -0,0 +1,513 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-ioread.h"
+#include "pvrusb2-debug.h"
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <asm/uaccess.h>
+
+#define BUFFER_COUNT 32
+#define BUFFER_SIZE PAGE_ALIGN(0x4000)
+
+struct pvr2_ioread {
+ struct pvr2_stream *stream;
+ char *buffer_storage[BUFFER_COUNT];
+ char *sync_key_ptr;
+ unsigned int sync_key_len;
+ unsigned int sync_buf_offs;
+ unsigned int sync_state;
+ unsigned int sync_trashed_count;
+ int enabled; // Streaming is on
+ int spigot_open; // OK to pass data to client
+ int stream_running; // Passing data to client now
+
+ /* State relevant to current buffer being read */
+ struct pvr2_buffer *c_buf;
+ char *c_data_ptr;
+ unsigned int c_data_len;
+ unsigned int c_data_offs;
+ struct mutex mutex;
+};
+
+static int pvr2_ioread_init(struct pvr2_ioread *cp)
+{
+ unsigned int idx;
+
+ cp->stream = 0;
+ mutex_init(&cp->mutex);
+
+ for (idx = 0; idx < BUFFER_COUNT; idx++) {
+ cp->buffer_storage[idx] = kmalloc(BUFFER_SIZE,GFP_KERNEL);
+ if (!(cp->buffer_storage[idx])) break;
+ }
+
+ if (idx < BUFFER_COUNT) {
+ // An allocation appears to have failed
+ for (idx = 0; idx < BUFFER_COUNT; idx++) {
+ if (!(cp->buffer_storage[idx])) continue;
+ kfree(cp->buffer_storage[idx]);
+ }
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void pvr2_ioread_done(struct pvr2_ioread *cp)
+{
+ unsigned int idx;
+
+ pvr2_ioread_setup(cp,0);
+ for (idx = 0; idx < BUFFER_COUNT; idx++) {
+ if (!(cp->buffer_storage[idx])) continue;
+ kfree(cp->buffer_storage[idx]);
+ }
+}
+
+struct pvr2_ioread *pvr2_ioread_create(void)
+{
+ struct pvr2_ioread *cp;
+ cp = kmalloc(sizeof(*cp),GFP_KERNEL);
+ if (!cp) return 0;
+ pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_create id=%p",cp);
+ memset(cp,0,sizeof(*cp));
+ if (pvr2_ioread_init(cp) < 0) {
+ kfree(cp);
+ return 0;
+ }
+ return cp;
+}
+
+void pvr2_ioread_destroy(struct pvr2_ioread *cp)
+{
+ if (!cp) return;
+ pvr2_ioread_done(cp);
+ pvr2_trace(PVR2_TRACE_STRUCT,"pvr2_ioread_destroy id=%p",cp);
+ if (cp->sync_key_ptr) {
+ kfree(cp->sync_key_ptr);
+ cp->sync_key_ptr = 0;
+ }
+ kfree(cp);
+}
+
+void pvr2_ioread_set_sync_key(struct pvr2_ioread *cp,
+ const char *sync_key_ptr,
+ unsigned int sync_key_len)
+{
+ if (!cp) return;
+
+ if (!sync_key_ptr) sync_key_len = 0;
+ if ((sync_key_len == cp->sync_key_len) &&
+ ((!sync_key_len) ||
+ (!memcmp(sync_key_ptr,cp->sync_key_ptr,sync_key_len)))) return;
+
+ if (sync_key_len != cp->sync_key_len) {
+ if (cp->sync_key_ptr) {
+ kfree(cp->sync_key_ptr);
+ cp->sync_key_ptr = 0;
+ }
+ cp->sync_key_len = 0;
+ if (sync_key_len) {
+ cp->sync_key_ptr = kmalloc(sync_key_len,GFP_KERNEL);
+ if (cp->sync_key_ptr) {
+ cp->sync_key_len = sync_key_len;
+ }
+ }
+ }
+ if (!cp->sync_key_len) return;
+ memcpy(cp->sync_key_ptr,sync_key_ptr,cp->sync_key_len);
+}
+
+static void pvr2_ioread_stop(struct pvr2_ioread *cp)
+{
+ if (!(cp->enabled)) return;
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*---TRACE_READ---*/ pvr2_ioread_stop id=%p",cp);
+ pvr2_stream_kill(cp->stream);
+ cp->c_buf = 0;
+ cp->c_data_ptr = 0;
+ cp->c_data_len = 0;
+ cp->c_data_offs = 0;
+ cp->enabled = 0;
+ cp->stream_running = 0;
+ cp->spigot_open = 0;
+ if (cp->sync_state) {
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/ sync_state <== 0");
+ cp->sync_state = 0;
+ }
+}
+
+static int pvr2_ioread_start(struct pvr2_ioread *cp)
+{
+ int stat;
+ struct pvr2_buffer *bp;
+ if (cp->enabled) return 0;
+ if (!(cp->stream)) return 0;
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*---TRACE_READ---*/ pvr2_ioread_start id=%p",cp);
+ while ((bp = pvr2_stream_get_idle_buffer(cp->stream)) != 0) {
+ stat = pvr2_buffer_queue(bp);
+ if (stat < 0) {
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_start id=%p"
+ " error=%d",
+ cp,stat);
+ pvr2_ioread_stop(cp);
+ return stat;
+ }
+ }
+ cp->enabled = !0;
+ cp->c_buf = 0;
+ cp->c_data_ptr = 0;
+ cp->c_data_len = 0;
+ cp->c_data_offs = 0;
+ cp->stream_running = 0;
+ if (cp->sync_key_len) {
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/ sync_state <== 1");
+ cp->sync_state = 1;
+ cp->sync_trashed_count = 0;
+ cp->sync_buf_offs = 0;
+ }
+ cp->spigot_open = 0;
+ return 0;
+}
+
+struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *cp)
+{
+ return cp->stream;
+}
+
+int pvr2_ioread_setup(struct pvr2_ioread *cp,struct pvr2_stream *sp)
+{
+ int ret;
+ unsigned int idx;
+ struct pvr2_buffer *bp;
+
+ mutex_lock(&cp->mutex); do {
+ if (cp->stream) {
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_setup (tear-down) id=%p",cp);
+ pvr2_ioread_stop(cp);
+ pvr2_stream_kill(cp->stream);
+ pvr2_stream_set_buffer_count(cp->stream,0);
+ cp->stream = 0;
+ }
+ if (sp) {
+ pvr2_trace(PVR2_TRACE_START_STOP,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_setup (setup) id=%p",cp);
+ pvr2_stream_kill(sp);
+ ret = pvr2_stream_set_buffer_count(sp,BUFFER_COUNT);
+ if (ret < 0) return ret;
+ for (idx = 0; idx < BUFFER_COUNT; idx++) {
+ bp = pvr2_stream_get_buffer(sp,idx);
+ pvr2_buffer_set_buffer(bp,
+ cp->buffer_storage[idx],
+ BUFFER_SIZE);
+ }
+ cp->stream = sp;
+ }
+ } while (0); mutex_unlock(&cp->mutex);
+
+ return 0;
+}
+
+int pvr2_ioread_set_enabled(struct pvr2_ioread *cp,int fl)
+{
+ int ret = 0;
+ if ((!fl) == (!(cp->enabled))) return ret;
+
+ mutex_lock(&cp->mutex); do {
+ if (fl) {
+ ret = pvr2_ioread_start(cp);
+ } else {
+ pvr2_ioread_stop(cp);
+ }
+ } while (0); mutex_unlock(&cp->mutex);
+ return ret;
+}
+
+int pvr2_ioread_get_enabled(struct pvr2_ioread *cp)
+{
+ return cp->enabled != 0;
+}
+
+int pvr2_ioread_get_buffer(struct pvr2_ioread *cp)
+{
+ int stat;
+
+ while (cp->c_data_len <= cp->c_data_offs) {
+ if (cp->c_buf) {
+ // Flush out current buffer first.
+ stat = pvr2_buffer_queue(cp->c_buf);
+ if (stat < 0) {
+ // Streaming error...
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_read id=%p"
+ " queue_error=%d",
+ cp,stat);
+ pvr2_ioread_stop(cp);
+ return 0;
+ }
+ cp->c_buf = 0;
+ cp->c_data_ptr = 0;
+ cp->c_data_len = 0;
+ cp->c_data_offs = 0;
+ }
+ // Now get a freshly filled buffer.
+ cp->c_buf = pvr2_stream_get_ready_buffer(cp->stream);
+ if (!cp->c_buf) break; // Nothing ready; done.
+ cp->c_data_len = pvr2_buffer_get_count(cp->c_buf);
+ if (!cp->c_data_len) {
+ // Nothing transferred. Was there an error?
+ stat = pvr2_buffer_get_status(cp->c_buf);
+ if (stat < 0) {
+ // Streaming error...
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " pvr2_ioread_read id=%p"
+ " buffer_error=%d",
+ cp,stat);
+ pvr2_ioread_stop(cp);
+ // Give up.
+ return 0;
+ }
+ // Start over...
+ continue;
+ }
+ cp->c_data_offs = 0;
+ cp->c_data_ptr = cp->buffer_storage[
+ pvr2_buffer_get_id(cp->c_buf)];
+ }
+ return !0;
+}
+
+void pvr2_ioread_filter(struct pvr2_ioread *cp)
+{
+ unsigned int idx;
+ if (!cp->enabled) return;
+ if (cp->sync_state != 1) return;
+
+ // Search the stream for our synchronization key. This is made
+ // complicated by the fact that in order to be honest with
+ // ourselves here we must search across buffer boundaries...
+ mutex_lock(&cp->mutex); while (1) {
+ // Ensure we have a buffer
+ if (!pvr2_ioread_get_buffer(cp)) break;
+ if (!cp->c_data_len) break;
+
+ // Now walk the buffer contents until we match the key or
+ // run out of buffer data.
+ for (idx = cp->c_data_offs; idx < cp->c_data_len; idx++) {
+ if (cp->sync_buf_offs >= cp->sync_key_len) break;
+ if (cp->c_data_ptr[idx] ==
+ cp->sync_key_ptr[cp->sync_buf_offs]) {
+ // Found the next key byte
+ (cp->sync_buf_offs)++;
+ } else {
+ // Whoops, mismatched. Start key over...
+ cp->sync_buf_offs = 0;
+ }
+ }
+
+ // Consume what we've walked through
+ cp->c_data_offs += idx;
+ cp->sync_trashed_count += idx;
+
+ // If we've found the key, then update state and get out.
+ if (cp->sync_buf_offs >= cp->sync_key_len) {
+ cp->sync_trashed_count -= cp->sync_key_len;
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " sync_state <== 2 (skipped %u bytes)",
+ cp->sync_trashed_count);
+ cp->sync_state = 2;
+ cp->sync_buf_offs = 0;
+ break;
+ }
+
+ if (cp->c_data_offs < cp->c_data_len) {
+ // Sanity check - should NEVER get here
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "ERROR: pvr2_ioread filter sync problem"
+ " len=%u offs=%u",
+ cp->c_data_len,cp->c_data_offs);
+ // Get out so we don't get stuck in an infinite
+ // loop.
+ break;
+ }
+
+ continue; // (for clarity)
+ } mutex_unlock(&cp->mutex);
+}
+
+int pvr2_ioread_avail(struct pvr2_ioread *cp)
+{
+ int ret;
+ if (!(cp->enabled)) {
+ // Stream is not enabled; so this is an I/O error
+ return -EIO;
+ }
+
+ if (cp->sync_state == 1) {
+ pvr2_ioread_filter(cp);
+ if (cp->sync_state == 1) return -EAGAIN;
+ }
+
+ ret = 0;
+ if (cp->stream_running) {
+ if (!pvr2_stream_get_ready_count(cp->stream)) {
+ // No data available at all right now.
+ ret = -EAGAIN;
+ }
+ } else {
+ if (pvr2_stream_get_ready_count(cp->stream) < BUFFER_COUNT/2) {
+ // Haven't buffered up enough yet; try again later
+ ret = -EAGAIN;
+ }
+ }
+
+ if ((!(cp->spigot_open)) != (!(ret == 0))) {
+ cp->spigot_open = (ret == 0);
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/ data is %s",
+ cp->spigot_open ? "available" : "pending");
+ }
+
+ return ret;
+}
+
+int pvr2_ioread_read(struct pvr2_ioread *cp,void __user *buf,unsigned int cnt)
+{
+ unsigned int copied_cnt;
+ unsigned int bcnt;
+ const char *src;
+ int stat;
+ int ret = 0;
+ unsigned int req_cnt = cnt;
+
+ if (!cnt) {
+ pvr2_trace(PVR2_TRACE_TRAP,
+ "/*---TRACE_READ---*/ pvr2_ioread_read id=%p"
+ " ZERO Request? Returning zero.",cp);
+ return 0;
+ }
+
+ stat = pvr2_ioread_avail(cp);
+ if (stat < 0) return stat;
+
+ cp->stream_running = !0;
+
+ mutex_lock(&cp->mutex); do {
+
+ // Suck data out of the buffers and copy to the user
+ copied_cnt = 0;
+ if (!buf) cnt = 0;
+ while (1) {
+ if (!pvr2_ioread_get_buffer(cp)) {
+ ret = -EIO;
+ break;
+ }
+
+ if (!cnt) break;
+
+ if (cp->sync_state == 2) {
+ // We're repeating the sync key data into
+ // the stream.
+ src = cp->sync_key_ptr + cp->sync_buf_offs;
+ bcnt = cp->sync_key_len - cp->sync_buf_offs;
+ } else {
+ // Normal buffer copy
+ src = cp->c_data_ptr + cp->c_data_offs;
+ bcnt = cp->c_data_len - cp->c_data_offs;
+ }
+
+ if (!bcnt) break;
+
+ // Don't run past user's buffer
+ if (bcnt > cnt) bcnt = cnt;
+
+ if (copy_to_user(buf,src,bcnt)) {
+ // User supplied a bad pointer?
+ // Give up - this *will* cause data
+ // to be lost.
+ ret = -EFAULT;
+ break;
+ }
+ cnt -= bcnt;
+ buf += bcnt;
+ copied_cnt += bcnt;
+
+ if (cp->sync_state == 2) {
+ // Update offset inside sync key that we're
+ // repeating back out.
+ cp->sync_buf_offs += bcnt;
+ if (cp->sync_buf_offs >= cp->sync_key_len) {
+ // Consumed entire key; switch mode
+ // to normal.
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/"
+ " sync_state <== 0");
+ cp->sync_state = 0;
+ }
+ } else {
+ // Update buffer offset.
+ cp->c_data_offs += bcnt;
+ }
+ }
+
+ } while (0); mutex_unlock(&cp->mutex);
+
+ if (!ret) {
+ if (copied_cnt) {
+ // If anything was copied, return that count
+ ret = copied_cnt;
+ } else {
+ // Nothing copied; suggest to caller that another
+ // attempt should be tried again later
+ ret = -EAGAIN;
+ }
+ }
+
+ pvr2_trace(PVR2_TRACE_DATA_FLOW,
+ "/*---TRACE_READ---*/ pvr2_ioread_read"
+ " id=%p request=%d result=%d",
+ cp,req_cnt,ret);
+ return ret;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-ioread.h b/drivers/media/video/pvrusb2/pvrusb2-ioread.h
new file mode 100644
index 00000000000000..6b002597f5de56
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-ioread.h
@@ -0,0 +1,50 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_IOREAD_H
+#define __PVRUSB2_IOREAD_H
+
+#include "pvrusb2-io.h"
+
+struct pvr2_ioread;
+
+struct pvr2_ioread *pvr2_ioread_create(void);
+void pvr2_ioread_destroy(struct pvr2_ioread *);
+int pvr2_ioread_setup(struct pvr2_ioread *,struct pvr2_stream *);
+struct pvr2_stream *pvr2_ioread_get_stream(struct pvr2_ioread *);
+void pvr2_ioread_set_sync_key(struct pvr2_ioread *,
+ const char *sync_key_ptr,
+ unsigned int sync_key_len);
+int pvr2_ioread_set_enabled(struct pvr2_ioread *,int fl);
+int pvr2_ioread_get_enabled(struct pvr2_ioread *);
+int pvr2_ioread_read(struct pvr2_ioread *,void __user *buf,unsigned int cnt);
+int pvr2_ioread_avail(struct pvr2_ioread *);
+
+#endif /* __PVRUSB2_IOREAD_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-main.c b/drivers/media/video/pvrusb2/pvrusb2-main.c
new file mode 100644
index 00000000000000..b95248274ed096
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-main.c
@@ -0,0 +1,172 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/smp_lock.h>
+#include <linux/usb.h>
+#include <linux/videodev2.h>
+
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-context.h"
+#include "pvrusb2-debug.h"
+#include "pvrusb2-v4l2.h"
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+#include "pvrusb2-sysfs.h"
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+
+#define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>"
+#define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner"
+#define DRIVER_VERSION "V4L in-tree version"
+
+#define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \
+ PVR2_TRACE_INFO| \
+ PVR2_TRACE_TOLERANCE| \
+ PVR2_TRACE_TRAP| \
+ 0)
+
+int pvrusb2_debug = DEFAULT_DEBUG_MASK;
+
+module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug trace mask");
+
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+static struct pvr2_sysfs_class *class_ptr = 0;
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+
+static void pvr_setup_attach(struct pvr2_context *pvr)
+{
+ /* Create association with v4l layer */
+ pvr2_v4l2_create(pvr);
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+ pvr2_sysfs_create(pvr,class_ptr);
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+}
+
+static int pvr_probe(struct usb_interface *intf,
+ const struct usb_device_id *devid)
+{
+ struct pvr2_context *pvr;
+
+ /* Create underlying hardware interface */
+ pvr = pvr2_context_create(intf,devid,pvr_setup_attach);
+ if (!pvr) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "Failed to create hdw handler");
+ return -ENOMEM;
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr);
+
+ usb_set_intfdata(intf, pvr);
+
+ return 0;
+}
+
+/*
+ * pvr_disconnect()
+ *
+ */
+static void pvr_disconnect(struct usb_interface *intf)
+{
+ struct pvr2_context *pvr = usb_get_intfdata(intf);
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr);
+
+ usb_set_intfdata (intf, NULL);
+ pvr2_context_disconnect(pvr);
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr);
+
+}
+
+static struct usb_driver pvr_driver = {
+ name: "pvrusb2",
+ id_table: pvr2_device_table,
+ probe: pvr_probe,
+ disconnect: pvr_disconnect
+};
+
+/*
+ * pvr_init() / pvr_exit()
+ *
+ * This code is run to initialize/exit the driver.
+ *
+ */
+static int __init pvr_init(void)
+{
+ int ret;
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_init");
+
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+ class_ptr = pvr2_sysfs_class_create();
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+
+ ret = usb_register(&pvr_driver);
+
+ if (ret == 0)
+ info(DRIVER_DESC " : " DRIVER_VERSION);
+ if (pvrusb2_debug) info("Debug mask is %d (0x%x)",
+ pvrusb2_debug,pvrusb2_debug);
+
+ return ret;
+}
+
+static void __exit pvr_exit(void)
+{
+
+ pvr2_trace(PVR2_TRACE_INIT,"pvr_exit");
+
+#ifdef CONFIG_VIDEO_PVRUSB2_SYSFS
+ pvr2_sysfs_class_destroy(class_ptr);
+#endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */
+
+ usb_deregister(&pvr_driver);
+}
+
+module_init(pvr_init);
+module_exit(pvr_exit);
+
+/* Mike Isely <mcisely@pobox.com> 11-Mar-2006: See pvrusb2-hdw.c for
+ MODULE_DEVICE_TABLE(). We have to declare that attribute there
+ because that's where the device table actually is now and it seems
+ that certain gcc configurations get angry if MODULE_DEVICE_TABLE()
+ is used on what ends up being an external symbol. */
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-std.c b/drivers/media/video/pvrusb2/pvrusb2-std.c
new file mode 100644
index 00000000000000..13406369364385
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-std.c
@@ -0,0 +1,408 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2-std.h"
+#include "pvrusb2-debug.h"
+#include <asm/string.h>
+#include <linux/slab.h>
+
+struct std_name {
+ const char *name;
+ v4l2_std_id id;
+};
+
+
+#define CSTD_PAL \
+ (V4L2_STD_PAL_B| \
+ V4L2_STD_PAL_B1| \
+ V4L2_STD_PAL_G| \
+ V4L2_STD_PAL_H| \
+ V4L2_STD_PAL_I| \
+ V4L2_STD_PAL_D| \
+ V4L2_STD_PAL_D1| \
+ V4L2_STD_PAL_K| \
+ V4L2_STD_PAL_M| \
+ V4L2_STD_PAL_N| \
+ V4L2_STD_PAL_Nc| \
+ V4L2_STD_PAL_60)
+
+#define CSTD_NTSC \
+ (V4L2_STD_NTSC_M| \
+ V4L2_STD_NTSC_M_JP| \
+ V4L2_STD_NTSC_M_KR| \
+ V4L2_STD_NTSC_443)
+
+#define CSTD_SECAM \
+ (V4L2_STD_SECAM_B| \
+ V4L2_STD_SECAM_D| \
+ V4L2_STD_SECAM_G| \
+ V4L2_STD_SECAM_H| \
+ V4L2_STD_SECAM_K| \
+ V4L2_STD_SECAM_K1| \
+ V4L2_STD_SECAM_L| \
+ V4L2_STD_SECAM_LC)
+
+#define TSTD_B (V4L2_STD_PAL_B|V4L2_STD_SECAM_B)
+#define TSTD_B1 (V4L2_STD_PAL_B1)
+#define TSTD_D (V4L2_STD_PAL_D|V4L2_STD_SECAM_D)
+#define TSTD_D1 (V4L2_STD_PAL_D1)
+#define TSTD_G (V4L2_STD_PAL_G|V4L2_STD_SECAM_G)
+#define TSTD_H (V4L2_STD_PAL_H|V4L2_STD_SECAM_H)
+#define TSTD_I (V4L2_STD_PAL_I)
+#define TSTD_K (V4L2_STD_PAL_K|V4L2_STD_SECAM_K)
+#define TSTD_K1 (V4L2_STD_SECAM_K1)
+#define TSTD_L (V4L2_STD_SECAM_L)
+#define TSTD_M (V4L2_STD_PAL_M|V4L2_STD_NTSC_M)
+#define TSTD_N (V4L2_STD_PAL_N)
+#define TSTD_Nc (V4L2_STD_PAL_Nc)
+#define TSTD_60 (V4L2_STD_PAL_60)
+
+#define CSTD_ALL (CSTD_PAL|CSTD_NTSC|CSTD_SECAM)
+
+/* Mapping of standard bits to color system */
+const static struct std_name std_groups[] = {
+ {"PAL",CSTD_PAL},
+ {"NTSC",CSTD_NTSC},
+ {"SECAM",CSTD_SECAM},
+};
+
+/* Mapping of standard bits to modulation system */
+const static struct std_name std_items[] = {
+ {"B",TSTD_B},
+ {"B1",TSTD_B1},
+ {"D",TSTD_D},
+ {"D1",TSTD_D1},
+ {"G",TSTD_G},
+ {"H",TSTD_H},
+ {"I",TSTD_I},
+ {"K",TSTD_K},
+ {"K1",TSTD_K1},
+ {"L",TSTD_L},
+ {"LC",V4L2_STD_SECAM_LC},
+ {"M",TSTD_M},
+ {"Mj",V4L2_STD_NTSC_M_JP},
+ {"443",V4L2_STD_NTSC_443},
+ {"Mk",V4L2_STD_NTSC_M_KR},
+ {"N",TSTD_N},
+ {"Nc",TSTD_Nc},
+ {"60",TSTD_60},
+};
+
+
+// Search an array of std_name structures and return a pointer to the
+// element with the matching name.
+static const struct std_name *find_std_name(const struct std_name *arrPtr,
+ unsigned int arrSize,
+ const char *bufPtr,
+ unsigned int bufSize)
+{
+ unsigned int idx;
+ const struct std_name *p;
+ for (idx = 0; idx < arrSize; idx++) {
+ p = arrPtr + idx;
+ if (strlen(p->name) != bufSize) continue;
+ if (!memcmp(bufPtr,p->name,bufSize)) return p;
+ }
+ return 0;
+}
+
+
+int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr,
+ unsigned int bufSize)
+{
+ v4l2_std_id id = 0;
+ v4l2_std_id cmsk = 0;
+ v4l2_std_id t;
+ int mMode = 0;
+ unsigned int cnt;
+ char ch;
+ const struct std_name *sp;
+
+ while (bufSize) {
+ if (!mMode) {
+ cnt = 0;
+ while ((cnt < bufSize) && (bufPtr[cnt] != '-')) cnt++;
+ if (cnt >= bufSize) return 0; // No more characters
+ sp = find_std_name(
+ std_groups,
+ sizeof(std_groups)/sizeof(std_groups[0]),
+ bufPtr,cnt);
+ if (!sp) return 0; // Illegal color system name
+ cnt++;
+ bufPtr += cnt;
+ bufSize -= cnt;
+ mMode = !0;
+ cmsk = sp->id;
+ continue;
+ }
+ cnt = 0;
+ while (cnt < bufSize) {
+ ch = bufPtr[cnt];
+ if (ch == ';') {
+ mMode = 0;
+ break;
+ }
+ if (ch == '/') break;
+ cnt++;
+ }
+ sp = find_std_name(std_items,
+ sizeof(std_items)/sizeof(std_items[0]),
+ bufPtr,cnt);
+ if (!sp) return 0; // Illegal modulation system ID
+ t = sp->id & cmsk;
+ if (!t) return 0; // Specific color + modulation system illegal
+ id |= t;
+ if (cnt < bufSize) cnt++;
+ bufPtr += cnt;
+ bufSize -= cnt;
+ }
+
+ if (idPtr) *idPtr = id;
+ return !0;
+}
+
+
+unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
+ v4l2_std_id id)
+{
+ unsigned int idx1,idx2;
+ const struct std_name *ip,*gp;
+ int gfl,cfl;
+ unsigned int c1,c2;
+ cfl = 0;
+ c1 = 0;
+ for (idx1 = 0;
+ idx1 < sizeof(std_groups)/sizeof(std_groups[0]);
+ idx1++) {
+ gp = std_groups + idx1;
+ gfl = 0;
+ for (idx2 = 0;
+ idx2 < sizeof(std_items)/sizeof(std_items[0]);
+ idx2++) {
+ ip = std_items + idx2;
+ if (!(gp->id & ip->id & id)) continue;
+ if (!gfl) {
+ if (cfl) {
+ c2 = scnprintf(bufPtr,bufSize,";");
+ c1 += c2;
+ bufSize -= c2;
+ bufPtr += c2;
+ }
+ cfl = !0;
+ c2 = scnprintf(bufPtr,bufSize,
+ "%s-",gp->name);
+ gfl = !0;
+ } else {
+ c2 = scnprintf(bufPtr,bufSize,"/");
+ }
+ c1 += c2;
+ bufSize -= c2;
+ bufPtr += c2;
+ c2 = scnprintf(bufPtr,bufSize,
+ ip->name);
+ c1 += c2;
+ bufSize -= c2;
+ bufPtr += c2;
+ }
+ }
+ return c1;
+}
+
+
+// Template data for possible enumerated video standards. Here we group
+// standards which share common frame rates and resolution.
+static struct v4l2_standard generic_standards[] = {
+ {
+ .id = (TSTD_B|TSTD_B1|
+ TSTD_D|TSTD_D1|
+ TSTD_G|
+ TSTD_H|
+ TSTD_I|
+ TSTD_K|TSTD_K1|
+ TSTD_L|
+ V4L2_STD_SECAM_LC |
+ TSTD_N|TSTD_Nc),
+ .frameperiod =
+ {
+ .numerator = 1,
+ .denominator= 25
+ },
+ .framelines = 625,
+ .reserved = {0,0,0,0}
+ }, {
+ .id = (TSTD_M|
+ V4L2_STD_NTSC_M_JP|
+ V4L2_STD_NTSC_M_KR),
+ .frameperiod =
+ {
+ .numerator = 1001,
+ .denominator= 30000
+ },
+ .framelines = 525,
+ .reserved = {0,0,0,0}
+ }, { // This is a total wild guess
+ .id = (TSTD_60),
+ .frameperiod =
+ {
+ .numerator = 1001,
+ .denominator= 30000
+ },
+ .framelines = 525,
+ .reserved = {0,0,0,0}
+ }, { // This is total wild guess
+ .id = V4L2_STD_NTSC_443,
+ .frameperiod =
+ {
+ .numerator = 1001,
+ .denominator= 30000
+ },
+ .framelines = 525,
+ .reserved = {0,0,0,0}
+ }
+};
+
+#define generic_standards_cnt (sizeof(generic_standards)/sizeof(generic_standards[0]))
+
+static struct v4l2_standard *match_std(v4l2_std_id id)
+{
+ unsigned int idx;
+ for (idx = 0; idx < generic_standards_cnt; idx++) {
+ if (generic_standards[idx].id & id) {
+ return generic_standards + idx;
+ }
+ }
+ return 0;
+}
+
+static int pvr2_std_fill(struct v4l2_standard *std,v4l2_std_id id)
+{
+ struct v4l2_standard *template;
+ int idx;
+ unsigned int bcnt;
+ template = match_std(id);
+ if (!template) return 0;
+ idx = std->index;
+ memcpy(std,template,sizeof(*template));
+ std->index = idx;
+ std->id = id;
+ bcnt = pvr2_std_id_to_str(std->name,sizeof(std->name)-1,id);
+ std->name[bcnt] = 0;
+ pvr2_trace(PVR2_TRACE_INIT,"Set up standard idx=%u name=%s",
+ std->index,std->name);
+ return !0;
+}
+
+/* These are special cases of combined standards that we should enumerate
+ separately if the component pieces are present. */
+static v4l2_std_id std_mixes[] = {
+ V4L2_STD_PAL_B | V4L2_STD_PAL_G,
+ V4L2_STD_PAL_D | V4L2_STD_PAL_K,
+ V4L2_STD_SECAM_B | V4L2_STD_SECAM_G,
+ V4L2_STD_SECAM_D | V4L2_STD_SECAM_K,
+};
+
+struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
+ v4l2_std_id id)
+{
+ unsigned int std_cnt = 0;
+ unsigned int idx,bcnt,idx2;
+ v4l2_std_id idmsk,cmsk,fmsk;
+ struct v4l2_standard *stddefs;
+
+ if (pvrusb2_debug & PVR2_TRACE_INIT) {
+ char buf[50];
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),id);
+ pvr2_trace(
+ PVR2_TRACE_INIT,"Mapping standards mask=0x%x (%.*s)",
+ (int)id,bcnt,buf);
+ }
+
+ *countptr = 0;
+ std_cnt = 0;
+ fmsk = 0;
+ for (idmsk = 1, cmsk = id; cmsk; idmsk <<= 1) {
+ if (!(idmsk & cmsk)) continue;
+ cmsk &= ~idmsk;
+ if (match_std(idmsk)) {
+ std_cnt++;
+ continue;
+ }
+ fmsk |= idmsk;
+ }
+
+ for (idx2 = 0; idx2 < sizeof(std_mixes)/sizeof(std_mixes[0]); idx2++) {
+ if ((id & std_mixes[idx2]) == std_mixes[idx2]) std_cnt++;
+ }
+
+ if (fmsk) {
+ char buf[50];
+ bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
+ pvr2_trace(
+ PVR2_TRACE_ERROR_LEGS,
+ "WARNING:"
+ " Failed to classify the following standard(s): %.*s",
+ bcnt,buf);
+ }
+
+ pvr2_trace(PVR2_TRACE_INIT,"Setting up %u unique standard(s)",
+ std_cnt);
+ if (!std_cnt) return 0; // paranoia
+
+ stddefs = kmalloc(sizeof(struct v4l2_standard) * std_cnt,
+ GFP_KERNEL);
+ memset(stddefs,0,sizeof(struct v4l2_standard) * std_cnt);
+ for (idx = 0; idx < std_cnt; idx++) stddefs[idx].index = idx;
+
+ idx = 0;
+
+ /* Enumerate potential special cases */
+ for (idx2 = 0; ((idx2 < sizeof(std_mixes)/sizeof(std_mixes[0])) &&
+ (idx < std_cnt)); idx2++) {
+ if (!(id & std_mixes[idx2])) continue;
+ if (pvr2_std_fill(stddefs+idx,std_mixes[idx2])) idx++;
+ }
+ /* Now enumerate individual pieces */
+ for (idmsk = 1, cmsk = id; cmsk && (idx < std_cnt); idmsk <<= 1) {
+ if (!(idmsk & cmsk)) continue;
+ cmsk &= ~idmsk;
+ if (!pvr2_std_fill(stddefs+idx,idmsk)) continue;
+ idx++;
+ }
+
+ *countptr = std_cnt;
+ return stddefs;
+}
+
+v4l2_std_id pvr2_std_get_usable(void)
+{
+ return CSTD_ALL;
+}
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-std.h b/drivers/media/video/pvrusb2/pvrusb2-std.h
new file mode 100644
index 00000000000000..07c39937534115
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-std.h
@@ -0,0 +1,60 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_STD_H
+#define __PVRUSB2_STD_H
+
+#include <linux/videodev2.h>
+
+// Convert string describing one or more video standards into a mask of V4L
+// standard bits. Return true if conversion succeeds otherwise return
+// false. String is expected to be of the form: C1-x/y;C2-a/b where C1 and
+// C2 are color system names (e.g. "PAL", "NTSC") and x, y, a, and b are
+// modulation schemes (e.g. "M", "B", "G", etc).
+int pvr2_std_str_to_id(v4l2_std_id *idPtr,const char *bufPtr,
+ unsigned int bufSize);
+
+// Convert any arbitrary set of video standard bits into an unambiguous
+// readable string. Return value is the number of bytes consumed in the
+// buffer. The formatted string is of a form that can be parsed by our
+// sibling std_std_to_id() function.
+unsigned int pvr2_std_id_to_str(char *bufPtr, unsigned int bufSize,
+ v4l2_std_id id);
+
+// Create an array of suitable v4l2_standard structures given a bit mask of
+// video standards to support. The array is allocated from the heap, and
+// the number of elements is returned in the first argument.
+struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
+ v4l2_std_id id);
+
+// Return mask of which video standard bits are valid
+v4l2_std_id pvr2_std_get_usable(void);
+
+#endif /* __PVRUSB2_STD_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.c b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
new file mode 100644
index 00000000000000..c6e6523d74b43d
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.c
@@ -0,0 +1,865 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/semaphore.h>
+#include "pvrusb2-sysfs.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2-debug.h"
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+#include "pvrusb2-debugifc.h"
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+
+#define pvr2_sysfs_trace(...) pvr2_trace(PVR2_TRACE_SYSFS,__VA_ARGS__)
+
+struct pvr2_sysfs {
+ struct pvr2_channel channel;
+ struct class_device *class_dev;
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+ struct pvr2_sysfs_debugifc *debugifc;
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+ struct pvr2_sysfs_ctl_item *item_first;
+ struct pvr2_sysfs_ctl_item *item_last;
+ struct sysfs_ops kops;
+ struct kobj_type ktype;
+ struct class_device_attribute attr_v4l_minor_number;
+ struct class_device_attribute attr_unit_number;
+};
+
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+struct pvr2_sysfs_debugifc {
+ struct class_device_attribute attr_debugcmd;
+ struct class_device_attribute attr_debuginfo;
+};
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+
+struct pvr2_sysfs_ctl_item {
+ struct class_device_attribute attr_name;
+ struct class_device_attribute attr_type;
+ struct class_device_attribute attr_min;
+ struct class_device_attribute attr_max;
+ struct class_device_attribute attr_enum;
+ struct class_device_attribute attr_bits;
+ struct class_device_attribute attr_val;
+ struct class_device_attribute attr_custom;
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *chptr;
+ struct pvr2_sysfs_ctl_item *item_next;
+ struct attribute *attr_gen[7];
+ struct attribute_group grp;
+ char name[80];
+};
+
+struct pvr2_sysfs_class {
+ struct class class;
+};
+
+static ssize_t show_name(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ const char *name;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+
+ name = pvr2_ctrl_get_desc(cptr);
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_name(cid=%d) is %s",sfp,id,name);
+
+ if (!name) return -EINVAL;
+
+ return scnprintf(buf,PAGE_SIZE,"%s\n",name);
+}
+
+static ssize_t show_type(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ const char *name;
+ enum pvr2_ctl_type tp;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+
+ tp = pvr2_ctrl_get_type(cptr);
+ switch (tp) {
+ case pvr2_ctl_int: name = "integer"; break;
+ case pvr2_ctl_enum: name = "enum"; break;
+ case pvr2_ctl_bitmask: name = "bitmask"; break;
+ case pvr2_ctl_bool: name = "boolean"; break;
+ default: name = "?"; break;
+ }
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_type(cid=%d) is %s",sfp,id,name);
+
+ if (!name) return -EINVAL;
+
+ return scnprintf(buf,PAGE_SIZE,"%s\n",name);
+}
+
+static ssize_t show_min(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ long val;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+ val = pvr2_ctrl_get_min(cptr);
+
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_min(cid=%d) is %ld",sfp,id,val);
+
+ return scnprintf(buf,PAGE_SIZE,"%ld\n",val);
+}
+
+static ssize_t show_max(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ long val;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+ val = pvr2_ctrl_get_max(cptr);
+
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_max(cid=%d) is %ld",sfp,id,val);
+
+ return scnprintf(buf,PAGE_SIZE,"%ld\n",val);
+}
+
+static ssize_t show_val_norm(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ int val,ret;
+ unsigned int cnt = 0;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+
+ ret = pvr2_ctrl_get_value(cptr,&val);
+ if (ret < 0) return ret;
+
+ ret = pvr2_ctrl_value_to_sym(cptr,~0,val,
+ buf,PAGE_SIZE-1,&cnt);
+
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_val_norm(cid=%d) is %.*s (%d)",
+ sfp,id,cnt,buf,val);
+ buf[cnt] = '\n';
+ return cnt+1;
+}
+
+static ssize_t show_val_custom(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ int val,ret;
+ unsigned int cnt = 0;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+
+ ret = pvr2_ctrl_get_value(cptr,&val);
+ if (ret < 0) return ret;
+
+ ret = pvr2_ctrl_custom_value_to_sym(cptr,~0,val,
+ buf,PAGE_SIZE-1,&cnt);
+
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_val_custom(cid=%d) is %.*s (%d)",
+ sfp,id,cnt,buf,val);
+ buf[cnt] = '\n';
+ return cnt+1;
+}
+
+static ssize_t show_enum(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ long val;
+ unsigned int bcnt,ccnt,ecnt;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+ ecnt = pvr2_ctrl_get_cnt(cptr);
+ bcnt = 0;
+ for (val = 0; val < ecnt; val++) {
+ pvr2_ctrl_get_valname(cptr,val,buf+bcnt,PAGE_SIZE-bcnt,&ccnt);
+ if (!ccnt) continue;
+ bcnt += ccnt;
+ if (bcnt >= PAGE_SIZE) break;
+ buf[bcnt] = '\n';
+ bcnt++;
+ }
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_enum(cid=%d)",sfp,id);
+ return bcnt;
+}
+
+static ssize_t show_bits(int id,struct class_device *class_dev,char *buf)
+{
+ struct pvr2_ctrl *cptr;
+ struct pvr2_sysfs *sfp;
+ int valid_bits,msk;
+ unsigned int bcnt,ccnt;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (!cptr) return -EINVAL;
+ valid_bits = pvr2_ctrl_get_mask(cptr);
+ bcnt = 0;
+ for (msk = 1; valid_bits; msk <<= 1) {
+ if (!(msk & valid_bits)) continue;
+ valid_bits &= ~msk;
+ pvr2_ctrl_get_valname(cptr,msk,buf+bcnt,PAGE_SIZE-bcnt,&ccnt);
+ bcnt += ccnt;
+ if (bcnt >= PAGE_SIZE) break;
+ buf[bcnt] = '\n';
+ bcnt++;
+ }
+ pvr2_sysfs_trace("pvr2_sysfs(%p) show_bits(cid=%d)",sfp,id);
+ return bcnt;
+}
+
+static int store_val_any(int id,int customfl,struct pvr2_sysfs *sfp,
+ const char *buf,unsigned int count)
+{
+ struct pvr2_ctrl *cptr;
+ int ret;
+ int mask,val;
+
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,id);
+ if (customfl) {
+ ret = pvr2_ctrl_custom_sym_to_value(cptr,buf,count,&mask,&val);
+ } else {
+ ret = pvr2_ctrl_sym_to_value(cptr,buf,count,&mask,&val);
+ }
+ if (ret < 0) return ret;
+ ret = pvr2_ctrl_set_mask_value(cptr,mask,val);
+ pvr2_hdw_commit_ctl(sfp->channel.hdw);
+ return ret;
+}
+
+static ssize_t store_val_norm(int id,struct class_device *class_dev,
+ const char *buf,size_t count)
+{
+ struct pvr2_sysfs *sfp;
+ int ret;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ ret = store_val_any(id,0,sfp,buf,count);
+ if (!ret) ret = count;
+ return ret;
+}
+
+static ssize_t store_val_custom(int id,struct class_device *class_dev,
+ const char *buf,size_t count)
+{
+ struct pvr2_sysfs *sfp;
+ int ret;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ ret = store_val_any(id,1,sfp,buf,count);
+ if (!ret) ret = count;
+ return ret;
+}
+
+/*
+ Mike Isely <isely@pobox.com> 30-April-2005
+
+ This next batch of horrible preprocessor hackery is needed because the
+ kernel's class_device_attribute mechanism fails to pass the actual
+ attribute through to the show / store functions, which means we have no
+ way to package up any attribute-specific parameters, like for example the
+ control id. So we work around this brain-damage by encoding the control
+ id into the show / store functions themselves and pick the function based
+ on the control id we're setting up. These macros try to ease the pain.
+ Yuck.
+*/
+
+#define CREATE_SHOW_INSTANCE(sf_name,ctl_id) \
+static ssize_t sf_name##_##ctl_id(struct class_device *class_dev,char *buf) \
+{ return sf_name(ctl_id,class_dev,buf); }
+
+#define CREATE_STORE_INSTANCE(sf_name,ctl_id) \
+static ssize_t sf_name##_##ctl_id(struct class_device *class_dev,const char *buf,size_t count) \
+{ return sf_name(ctl_id,class_dev,buf,count); }
+
+#define CREATE_BATCH(ctl_id) \
+CREATE_SHOW_INSTANCE(show_name,ctl_id) \
+CREATE_SHOW_INSTANCE(show_type,ctl_id) \
+CREATE_SHOW_INSTANCE(show_min,ctl_id) \
+CREATE_SHOW_INSTANCE(show_max,ctl_id) \
+CREATE_SHOW_INSTANCE(show_val_norm,ctl_id) \
+CREATE_SHOW_INSTANCE(show_val_custom,ctl_id) \
+CREATE_SHOW_INSTANCE(show_enum,ctl_id) \
+CREATE_SHOW_INSTANCE(show_bits,ctl_id) \
+CREATE_STORE_INSTANCE(store_val_norm,ctl_id) \
+CREATE_STORE_INSTANCE(store_val_custom,ctl_id) \
+
+CREATE_BATCH(0)
+CREATE_BATCH(1)
+CREATE_BATCH(2)
+CREATE_BATCH(3)
+CREATE_BATCH(4)
+CREATE_BATCH(5)
+CREATE_BATCH(6)
+CREATE_BATCH(7)
+CREATE_BATCH(8)
+CREATE_BATCH(9)
+CREATE_BATCH(10)
+CREATE_BATCH(11)
+CREATE_BATCH(12)
+CREATE_BATCH(13)
+CREATE_BATCH(14)
+CREATE_BATCH(15)
+CREATE_BATCH(16)
+CREATE_BATCH(17)
+CREATE_BATCH(18)
+CREATE_BATCH(19)
+CREATE_BATCH(20)
+CREATE_BATCH(21)
+CREATE_BATCH(22)
+CREATE_BATCH(23)
+CREATE_BATCH(24)
+CREATE_BATCH(25)
+CREATE_BATCH(26)
+CREATE_BATCH(27)
+CREATE_BATCH(28)
+CREATE_BATCH(29)
+CREATE_BATCH(30)
+CREATE_BATCH(31)
+CREATE_BATCH(32)
+CREATE_BATCH(33)
+CREATE_BATCH(34)
+CREATE_BATCH(35)
+CREATE_BATCH(36)
+CREATE_BATCH(37)
+CREATE_BATCH(38)
+CREATE_BATCH(39)
+CREATE_BATCH(40)
+CREATE_BATCH(41)
+CREATE_BATCH(42)
+CREATE_BATCH(43)
+CREATE_BATCH(44)
+CREATE_BATCH(45)
+CREATE_BATCH(46)
+CREATE_BATCH(47)
+CREATE_BATCH(48)
+CREATE_BATCH(49)
+CREATE_BATCH(50)
+CREATE_BATCH(51)
+CREATE_BATCH(52)
+CREATE_BATCH(53)
+CREATE_BATCH(54)
+CREATE_BATCH(55)
+CREATE_BATCH(56)
+CREATE_BATCH(57)
+CREATE_BATCH(58)
+CREATE_BATCH(59)
+
+struct pvr2_sysfs_func_set {
+ ssize_t (*show_name)(struct class_device *,char *);
+ ssize_t (*show_type)(struct class_device *,char *);
+ ssize_t (*show_min)(struct class_device *,char *);
+ ssize_t (*show_max)(struct class_device *,char *);
+ ssize_t (*show_enum)(struct class_device *,char *);
+ ssize_t (*show_bits)(struct class_device *,char *);
+ ssize_t (*show_val_norm)(struct class_device *,char *);
+ ssize_t (*store_val_norm)(struct class_device *,
+ const char *,size_t);
+ ssize_t (*show_val_custom)(struct class_device *,char *);
+ ssize_t (*store_val_custom)(struct class_device *,
+ const char *,size_t);
+};
+
+#define INIT_BATCH(ctl_id) \
+[ctl_id] = { \
+ .show_name = show_name_##ctl_id, \
+ .show_type = show_type_##ctl_id, \
+ .show_min = show_min_##ctl_id, \
+ .show_max = show_max_##ctl_id, \
+ .show_enum = show_enum_##ctl_id, \
+ .show_bits = show_bits_##ctl_id, \
+ .show_val_norm = show_val_norm_##ctl_id, \
+ .store_val_norm = store_val_norm_##ctl_id, \
+ .show_val_custom = show_val_custom_##ctl_id, \
+ .store_val_custom = store_val_custom_##ctl_id, \
+} \
+
+static struct pvr2_sysfs_func_set funcs[] = {
+ INIT_BATCH(0),
+ INIT_BATCH(1),
+ INIT_BATCH(2),
+ INIT_BATCH(3),
+ INIT_BATCH(4),
+ INIT_BATCH(5),
+ INIT_BATCH(6),
+ INIT_BATCH(7),
+ INIT_BATCH(8),
+ INIT_BATCH(9),
+ INIT_BATCH(10),
+ INIT_BATCH(11),
+ INIT_BATCH(12),
+ INIT_BATCH(13),
+ INIT_BATCH(14),
+ INIT_BATCH(15),
+ INIT_BATCH(16),
+ INIT_BATCH(17),
+ INIT_BATCH(18),
+ INIT_BATCH(19),
+ INIT_BATCH(20),
+ INIT_BATCH(21),
+ INIT_BATCH(22),
+ INIT_BATCH(23),
+ INIT_BATCH(24),
+ INIT_BATCH(25),
+ INIT_BATCH(26),
+ INIT_BATCH(27),
+ INIT_BATCH(28),
+ INIT_BATCH(29),
+ INIT_BATCH(30),
+ INIT_BATCH(31),
+ INIT_BATCH(32),
+ INIT_BATCH(33),
+ INIT_BATCH(34),
+ INIT_BATCH(35),
+ INIT_BATCH(36),
+ INIT_BATCH(37),
+ INIT_BATCH(38),
+ INIT_BATCH(39),
+ INIT_BATCH(40),
+ INIT_BATCH(41),
+ INIT_BATCH(42),
+ INIT_BATCH(43),
+ INIT_BATCH(44),
+ INIT_BATCH(45),
+ INIT_BATCH(46),
+ INIT_BATCH(47),
+ INIT_BATCH(48),
+ INIT_BATCH(49),
+ INIT_BATCH(50),
+ INIT_BATCH(51),
+ INIT_BATCH(52),
+ INIT_BATCH(53),
+ INIT_BATCH(54),
+ INIT_BATCH(55),
+ INIT_BATCH(56),
+ INIT_BATCH(57),
+ INIT_BATCH(58),
+ INIT_BATCH(59),
+};
+
+
+static void pvr2_sysfs_add_control(struct pvr2_sysfs *sfp,int ctl_id)
+{
+ struct pvr2_sysfs_ctl_item *cip;
+ struct pvr2_sysfs_func_set *fp;
+ struct pvr2_ctrl *cptr;
+ unsigned int cnt,acnt;
+
+ if ((ctl_id < 0) || (ctl_id >= (sizeof(funcs)/sizeof(funcs[0])))) {
+ return;
+ }
+
+ fp = funcs + ctl_id;
+ cptr = pvr2_hdw_get_ctrl_by_index(sfp->channel.hdw,ctl_id);
+ if (!cptr) return;
+
+ cip = kmalloc(sizeof(*cip),GFP_KERNEL);
+ if (!cip) return;
+ memset(cip,0,sizeof(*cip));
+ pvr2_sysfs_trace("Creating pvr2_sysfs_ctl_item id=%p",cip);
+
+ cip->cptr = cptr;
+
+ cip->chptr = sfp;
+ cip->item_next = 0;
+ if (sfp->item_last) {
+ sfp->item_last->item_next = cip;
+ } else {
+ sfp->item_first = cip;
+ }
+ sfp->item_last = cip;
+
+ cip->attr_name.attr.owner = THIS_MODULE;
+ cip->attr_name.attr.name = "name";
+ cip->attr_name.attr.mode = S_IRUGO;
+ cip->attr_name.show = fp->show_name;
+
+ cip->attr_type.attr.owner = THIS_MODULE;
+ cip->attr_type.attr.name = "type";
+ cip->attr_type.attr.mode = S_IRUGO;
+ cip->attr_type.show = fp->show_type;
+
+ cip->attr_min.attr.owner = THIS_MODULE;
+ cip->attr_min.attr.name = "min_val";
+ cip->attr_min.attr.mode = S_IRUGO;
+ cip->attr_min.show = fp->show_min;
+
+ cip->attr_max.attr.owner = THIS_MODULE;
+ cip->attr_max.attr.name = "max_val";
+ cip->attr_max.attr.mode = S_IRUGO;
+ cip->attr_max.show = fp->show_max;
+
+ cip->attr_val.attr.owner = THIS_MODULE;
+ cip->attr_val.attr.name = "cur_val";
+ cip->attr_val.attr.mode = S_IRUGO;
+
+ cip->attr_custom.attr.owner = THIS_MODULE;
+ cip->attr_custom.attr.name = "custom_val";
+ cip->attr_custom.attr.mode = S_IRUGO;
+
+ cip->attr_enum.attr.owner = THIS_MODULE;
+ cip->attr_enum.attr.name = "enum_val";
+ cip->attr_enum.attr.mode = S_IRUGO;
+ cip->attr_enum.show = fp->show_enum;
+
+ cip->attr_bits.attr.owner = THIS_MODULE;
+ cip->attr_bits.attr.name = "bit_val";
+ cip->attr_bits.attr.mode = S_IRUGO;
+ cip->attr_bits.show = fp->show_bits;
+
+ if (pvr2_ctrl_is_writable(cptr)) {
+ cip->attr_val.attr.mode |= S_IWUSR|S_IWGRP;
+ cip->attr_custom.attr.mode |= S_IWUSR|S_IWGRP;
+ }
+
+ acnt = 0;
+ cip->attr_gen[acnt++] = &cip->attr_name.attr;
+ cip->attr_gen[acnt++] = &cip->attr_type.attr;
+ cip->attr_gen[acnt++] = &cip->attr_val.attr;
+ cip->attr_val.show = fp->show_val_norm;
+ cip->attr_val.store = fp->store_val_norm;
+ if (pvr2_ctrl_has_custom_symbols(cptr)) {
+ cip->attr_gen[acnt++] = &cip->attr_custom.attr;
+ cip->attr_custom.show = fp->show_val_custom;
+ cip->attr_custom.store = fp->store_val_custom;
+ }
+ switch (pvr2_ctrl_get_type(cptr)) {
+ case pvr2_ctl_enum:
+ // Control is an enumeration
+ cip->attr_gen[acnt++] = &cip->attr_enum.attr;
+ break;
+ case pvr2_ctl_int:
+ // Control is an integer
+ cip->attr_gen[acnt++] = &cip->attr_min.attr;
+ cip->attr_gen[acnt++] = &cip->attr_max.attr;
+ break;
+ case pvr2_ctl_bitmask:
+ // Control is an bitmask
+ cip->attr_gen[acnt++] = &cip->attr_bits.attr;
+ break;
+ default: break;
+ }
+
+ cnt = scnprintf(cip->name,sizeof(cip->name)-1,"ctl_%s",
+ pvr2_ctrl_get_name(cptr));
+ cip->name[cnt] = 0;
+ cip->grp.name = cip->name;
+ cip->grp.attrs = cip->attr_gen;
+
+ sysfs_create_group(&sfp->class_dev->kobj,&cip->grp);
+}
+
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+static ssize_t debuginfo_show(struct class_device *,char *);
+static ssize_t debugcmd_show(struct class_device *,char *);
+static ssize_t debugcmd_store(struct class_device *,const char *,size_t count);
+
+static void pvr2_sysfs_add_debugifc(struct pvr2_sysfs *sfp)
+{
+ struct pvr2_sysfs_debugifc *dip;
+ dip = kmalloc(sizeof(*dip),GFP_KERNEL);
+ if (!dip) return;
+ memset(dip,0,sizeof(*dip));
+ dip->attr_debugcmd.attr.owner = THIS_MODULE;
+ dip->attr_debugcmd.attr.name = "debugcmd";
+ dip->attr_debugcmd.attr.mode = S_IRUGO|S_IWUSR|S_IWGRP;
+ dip->attr_debugcmd.show = debugcmd_show;
+ dip->attr_debugcmd.store = debugcmd_store;
+ dip->attr_debuginfo.attr.owner = THIS_MODULE;
+ dip->attr_debuginfo.attr.name = "debuginfo";
+ dip->attr_debuginfo.attr.mode = S_IRUGO;
+ dip->attr_debuginfo.show = debuginfo_show;
+ sfp->debugifc = dip;
+ class_device_create_file(sfp->class_dev,&dip->attr_debugcmd);
+ class_device_create_file(sfp->class_dev,&dip->attr_debuginfo);
+}
+
+
+static void pvr2_sysfs_tear_down_debugifc(struct pvr2_sysfs *sfp)
+{
+ if (!sfp->debugifc) return;
+ class_device_remove_file(sfp->class_dev,
+ &sfp->debugifc->attr_debuginfo);
+ class_device_remove_file(sfp->class_dev,&sfp->debugifc->attr_debugcmd);
+ kfree(sfp->debugifc);
+ sfp->debugifc = 0;
+}
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+
+
+static void pvr2_sysfs_add_controls(struct pvr2_sysfs *sfp)
+{
+ unsigned int idx,cnt;
+ cnt = pvr2_hdw_get_ctrl_count(sfp->channel.hdw);
+ for (idx = 0; idx < cnt; idx++) {
+ pvr2_sysfs_add_control(sfp,idx);
+ }
+}
+
+
+static void pvr2_sysfs_tear_down_controls(struct pvr2_sysfs *sfp)
+{
+ struct pvr2_sysfs_ctl_item *cip1,*cip2;
+ for (cip1 = sfp->item_first; cip1; cip1 = cip2) {
+ cip2 = cip1->item_next;
+ sysfs_remove_group(&sfp->class_dev->kobj,&cip1->grp);
+ pvr2_sysfs_trace("Destroying pvr2_sysfs_ctl_item id=%p",cip1);
+ kfree(cip1);
+ }
+}
+
+
+static void pvr2_sysfs_class_release(struct class *class)
+{
+ struct pvr2_sysfs_class *clp;
+ clp = container_of(class,struct pvr2_sysfs_class,class);
+ pvr2_sysfs_trace("Destroying pvr2_sysfs_class id=%p",clp);
+ kfree(clp);
+}
+
+
+static void pvr2_sysfs_release(struct class_device *class_dev)
+{
+ pvr2_sysfs_trace("Releasing class_dev id=%p",class_dev);
+ kfree(class_dev);
+}
+
+
+static void class_dev_destroy(struct pvr2_sysfs *sfp)
+{
+ if (!sfp->class_dev) return;
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+ pvr2_sysfs_tear_down_debugifc(sfp);
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+ pvr2_sysfs_tear_down_controls(sfp);
+ class_device_remove_file(sfp->class_dev,&sfp->attr_v4l_minor_number);
+ class_device_remove_file(sfp->class_dev,&sfp->attr_unit_number);
+ pvr2_sysfs_trace("Destroying class_dev id=%p",sfp->class_dev);
+ sfp->class_dev->class_data = 0;
+ class_device_unregister(sfp->class_dev);
+ sfp->class_dev = 0;
+}
+
+
+static ssize_t v4l_minor_number_show(struct class_device *class_dev,char *buf)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ return scnprintf(buf,PAGE_SIZE,"%d\n",
+ pvr2_hdw_v4l_get_minor_number(sfp->channel.hdw));
+}
+
+
+static ssize_t unit_number_show(struct class_device *class_dev,char *buf)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ return scnprintf(buf,PAGE_SIZE,"%d\n",
+ pvr2_hdw_get_unit_number(sfp->channel.hdw));
+}
+
+
+static void class_dev_create(struct pvr2_sysfs *sfp,
+ struct pvr2_sysfs_class *class_ptr)
+{
+ struct usb_device *usb_dev;
+ struct class_device *class_dev;
+ usb_dev = pvr2_hdw_get_dev(sfp->channel.hdw);
+ if (!usb_dev) return;
+ class_dev = kmalloc(sizeof(*class_dev),GFP_KERNEL);
+ if (!class_dev) return;
+ memset(class_dev,0,sizeof(*class_dev));
+
+ pvr2_sysfs_trace("Creating class_dev id=%p",class_dev);
+
+ class_dev->class = &class_ptr->class;
+ if (pvr2_hdw_get_sn(sfp->channel.hdw)) {
+ snprintf(class_dev->class_id,BUS_ID_SIZE,"sn-%lu",
+ pvr2_hdw_get_sn(sfp->channel.hdw));
+ } else if (pvr2_hdw_get_unit_number(sfp->channel.hdw) >= 0) {
+ snprintf(class_dev->class_id,BUS_ID_SIZE,"unit-%c",
+ pvr2_hdw_get_unit_number(sfp->channel.hdw) + 'a');
+ } else {
+ kfree(class_dev);
+ return;
+ }
+
+ class_dev->dev = &usb_dev->dev;
+
+ sfp->class_dev = class_dev;
+ class_dev->class_data = sfp;
+ class_device_register(class_dev);
+
+ sfp->attr_v4l_minor_number.attr.owner = THIS_MODULE;
+ sfp->attr_v4l_minor_number.attr.name = "v4l_minor_number";
+ sfp->attr_v4l_minor_number.attr.mode = S_IRUGO;
+ sfp->attr_v4l_minor_number.show = v4l_minor_number_show;
+ sfp->attr_v4l_minor_number.store = 0;
+ class_device_create_file(sfp->class_dev,&sfp->attr_v4l_minor_number);
+ sfp->attr_unit_number.attr.owner = THIS_MODULE;
+ sfp->attr_unit_number.attr.name = "unit_number";
+ sfp->attr_unit_number.attr.mode = S_IRUGO;
+ sfp->attr_unit_number.show = unit_number_show;
+ sfp->attr_unit_number.store = 0;
+ class_device_create_file(sfp->class_dev,&sfp->attr_unit_number);
+
+ pvr2_sysfs_add_controls(sfp);
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+ pvr2_sysfs_add_debugifc(sfp);
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+}
+
+
+static void pvr2_sysfs_internal_check(struct pvr2_channel *chp)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = container_of(chp,struct pvr2_sysfs,channel);
+ if (!sfp->channel.mc_head->disconnect_flag) return;
+ pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_sysfs id=%p",sfp);
+ class_dev_destroy(sfp);
+ pvr2_channel_done(&sfp->channel);
+ kfree(sfp);
+}
+
+
+struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *mp,
+ struct pvr2_sysfs_class *class_ptr)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = kmalloc(sizeof(*sfp),GFP_KERNEL);
+ if (!sfp) return sfp;
+ memset(sfp,0,sizeof(*sfp));
+ pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_sysfs id=%p",sfp);
+ pvr2_channel_init(&sfp->channel,mp);
+ sfp->channel.check_func = pvr2_sysfs_internal_check;
+
+ class_dev_create(sfp,class_ptr);
+ return sfp;
+}
+
+
+static int pvr2_sysfs_hotplug(struct class_device *cd,char **envp,
+ int numenvp,char *buf,int size)
+{
+ /* Even though we don't do anything here, we still need this function
+ because sysfs will still try to call it. */
+ return 0;
+}
+
+struct pvr2_sysfs_class *pvr2_sysfs_class_create(void)
+{
+ struct pvr2_sysfs_class *clp;
+ clp = kmalloc(sizeof(*clp),GFP_KERNEL);
+ if (!clp) return clp;
+ memset(clp,0,sizeof(*clp));
+ pvr2_sysfs_trace("Creating pvr2_sysfs_class id=%p",clp);
+ clp->class.name = "pvrusb2";
+ clp->class.class_release = pvr2_sysfs_class_release;
+ clp->class.release = pvr2_sysfs_release;
+ clp->class.uevent = pvr2_sysfs_hotplug;
+ if (class_register(&clp->class)) {
+ pvr2_sysfs_trace(
+ "Registration failed for pvr2_sysfs_class id=%p",clp);
+ kfree(clp);
+ clp = 0;
+ }
+ return clp;
+}
+
+
+void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *clp)
+{
+ class_unregister(&clp->class);
+}
+
+
+#ifdef CONFIG_VIDEO_PVRUSB2_DEBUGIFC
+static ssize_t debuginfo_show(struct class_device *class_dev,char *buf)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ pvr2_hdw_trigger_module_log(sfp->channel.hdw);
+ return pvr2_debugifc_print_info(sfp->channel.hdw,buf,PAGE_SIZE);
+}
+
+
+static ssize_t debugcmd_show(struct class_device *class_dev,char *buf)
+{
+ struct pvr2_sysfs *sfp;
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+ return pvr2_debugifc_print_status(sfp->channel.hdw,buf,PAGE_SIZE);
+}
+
+
+static ssize_t debugcmd_store(struct class_device *class_dev,
+ const char *buf,size_t count)
+{
+ struct pvr2_sysfs *sfp;
+ int ret;
+
+ sfp = (struct pvr2_sysfs *)class_dev->class_data;
+ if (!sfp) return -EINVAL;
+
+ ret = pvr2_debugifc_docmd(sfp->channel.hdw,buf,count);
+ if (ret < 0) return ret;
+ return count;
+}
+#endif /* CONFIG_VIDEO_PVRUSB2_DEBUGIFC */
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-sysfs.h b/drivers/media/video/pvrusb2/pvrusb2-sysfs.h
new file mode 100644
index 00000000000000..ff9373b47f8fb4
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-sysfs.h
@@ -0,0 +1,47 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_SYSFS_H
+#define __PVRUSB2_SYSFS_H
+
+#include <linux/list.h>
+#include <linux/sysfs.h>
+#include "pvrusb2-context.h"
+
+struct pvr2_sysfs;
+struct pvr2_sysfs_class;
+
+struct pvr2_sysfs_class *pvr2_sysfs_class_create(void);
+void pvr2_sysfs_class_destroy(struct pvr2_sysfs_class *);
+
+struct pvr2_sysfs *pvr2_sysfs_create(struct pvr2_context *,
+ struct pvr2_sysfs_class *);
+
+#endif /* __PVRUSB2_SYSFS_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-tuner.c b/drivers/media/video/pvrusb2/pvrusb2-tuner.c
new file mode 100644
index 00000000000000..f4aba8144ce058
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-tuner.c
@@ -0,0 +1,122 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include "pvrusb2.h"
+#include "pvrusb2-util.h"
+#include "pvrusb2-tuner.h"
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/tuner.h>
+#include <media/v4l2-common.h>
+
+struct pvr2_tuner_handler {
+ struct pvr2_hdw *hdw;
+ struct pvr2_i2c_client *client;
+ struct pvr2_i2c_handler i2c_handler;
+ int type_update_fl;
+};
+
+
+static void set_type(struct pvr2_tuner_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ struct tuner_setup setup;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c tuner set_type(%d)",hdw->tuner_type);
+ if (((int)(hdw->tuner_type)) < 0) return;
+
+ setup.addr = ADDR_UNSET;
+ setup.type = hdw->tuner_type;
+ setup.mode_mask = T_RADIO | T_ANALOG_TV;
+ /* We may really want mode_mask to be T_ANALOG_TV for now */
+ pvr2_i2c_client_cmd(ctxt->client,TUNER_SET_TYPE_ADDR,&setup);
+ ctxt->type_update_fl = 0;
+}
+
+
+static int tuner_check(struct pvr2_tuner_handler *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ if (hdw->tuner_updated) ctxt->type_update_fl = !0;
+ return ctxt->type_update_fl != 0;
+}
+
+
+static void tuner_update(struct pvr2_tuner_handler *ctxt)
+{
+ if (ctxt->type_update_fl) set_type(ctxt);
+}
+
+
+static void pvr2_tuner_detach(struct pvr2_tuner_handler *ctxt)
+{
+ ctxt->client->handler = 0;
+ kfree(ctxt);
+}
+
+
+static unsigned int pvr2_tuner_describe(struct pvr2_tuner_handler *ctxt,char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-tuner");
+}
+
+
+const static struct pvr2_i2c_handler_functions tuner_funcs = {
+ .detach = (void (*)(void *))pvr2_tuner_detach,
+ .check = (int (*)(void *))tuner_check,
+ .update = (void (*)(void *))tuner_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))pvr2_tuner_describe,
+};
+
+
+int pvr2_i2c_tuner_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ struct pvr2_tuner_handler *ctxt;
+ if (cp->handler) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->i2c_handler.func_data = ctxt;
+ ctxt->i2c_handler.func_table = &tuner_funcs;
+ ctxt->type_update_fl = !0;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ cp->handler = &ctxt->i2c_handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x tuner handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-tuner.h b/drivers/media/video/pvrusb2/pvrusb2-tuner.h
new file mode 100644
index 00000000000000..556f12aa916068
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-tuner.h
@@ -0,0 +1,38 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_TUNER_H
+#define __PVRUSB2_TUNER_H
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_tuner_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+#endif /* __PVRUSB2_TUNER_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-util.h b/drivers/media/video/pvrusb2/pvrusb2-util.h
new file mode 100644
index 00000000000000..e53aee416f5659
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-util.h
@@ -0,0 +1,63 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_UTIL_H
+#define __PVRUSB2_UTIL_H
+
+#define PVR2_DECOMPOSE_LE(t,i,d) \
+ do { \
+ (t)[i] = (d) & 0xff;\
+ (t)[i+1] = ((d) >> 8) & 0xff;\
+ (t)[i+2] = ((d) >> 16) & 0xff;\
+ (t)[i+3] = ((d) >> 24) & 0xff;\
+ } while(0)
+
+#define PVR2_DECOMPOSE_BE(t,i,d) \
+ do { \
+ (t)[i+3] = (d) & 0xff;\
+ (t)[i+2] = ((d) >> 8) & 0xff;\
+ (t)[i+1] = ((d) >> 16) & 0xff;\
+ (t)[i] = ((d) >> 24) & 0xff;\
+ } while(0)
+
+#define PVR2_COMPOSE_LE(t,i) \
+ ((((u32)((t)[i+3])) << 24) | \
+ (((u32)((t)[i+2])) << 16) | \
+ (((u32)((t)[i+1])) << 8) | \
+ ((u32)((t)[i])))
+
+#define PVR2_COMPOSE_BE(t,i) \
+ ((((u32)((t)[i])) << 24) | \
+ (((u32)((t)[i+1])) << 16) | \
+ (((u32)((t)[i+2])) << 8) | \
+ ((u32)((t)[i+3])))
+
+
+#endif /* __PVRUSB2_UTIL_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.c b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
new file mode 100644
index 00000000000000..961951010c27f9
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.c
@@ -0,0 +1,1126 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include "pvrusb2-context.h"
+#include "pvrusb2-hdw.h"
+#include "pvrusb2.h"
+#include "pvrusb2-debug.h"
+#include "pvrusb2-v4l2.h"
+#include "pvrusb2-ioread.h"
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+
+struct pvr2_v4l2_dev;
+struct pvr2_v4l2_fh;
+struct pvr2_v4l2;
+
+/* V4L no longer provide the ability to set / get a private context pointer
+ (i.e. video_get_drvdata / video_set_drvdata), which means we have to
+ concoct our own context locating mechanism. Supposedly this is intended
+ to simplify driver implementation. It's not clear to me how that can
+ possibly be true. Our solution here is to maintain a lookup table of
+ our context instances, indexed by the minor device number of the V4L
+ device. See pvr2_v4l2_open() for some implications of this approach. */
+static struct pvr2_v4l2_dev *devices[256];
+static DEFINE_MUTEX(device_lock);
+
+struct pvr2_v4l2_dev {
+ struct pvr2_v4l2 *v4lp;
+ struct video_device *vdev;
+ struct pvr2_context_stream *stream;
+ int ctxt_idx;
+ enum pvr2_config config;
+};
+
+struct pvr2_v4l2_fh {
+ struct pvr2_channel channel;
+ struct pvr2_v4l2_dev *dev_info;
+ enum v4l2_priority prio;
+ struct pvr2_ioread *rhp;
+ struct file *file;
+ struct pvr2_v4l2 *vhead;
+ struct pvr2_v4l2_fh *vnext;
+ struct pvr2_v4l2_fh *vprev;
+ wait_queue_head_t wait_data;
+ int fw_mode_flag;
+};
+
+struct pvr2_v4l2 {
+ struct pvr2_channel channel;
+ struct pvr2_v4l2_fh *vfirst;
+ struct pvr2_v4l2_fh *vlast;
+
+ struct v4l2_prio_state prio;
+
+ /* streams */
+ struct pvr2_v4l2_dev video_dev;
+};
+
+static int video_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
+module_param_array(video_nr, int, NULL, 0444);
+MODULE_PARM_DESC(video_nr, "Offset for device's minor");
+
+struct v4l2_capability pvr_capability ={
+ .driver = "pvrusb2",
+ .card = "Hauppauge WinTV pvr-usb2",
+ .bus_info = "usb",
+ .version = KERNEL_VERSION(0,8,0),
+ .capabilities = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE |
+ V4L2_CAP_TUNER | V4L2_CAP_AUDIO |
+ V4L2_CAP_READWRITE),
+ .reserved = {0,0,0,0}
+};
+
+static struct v4l2_tuner pvr_v4l2_tuners[]= {
+ {
+ .index = 0,
+ .name = "TV Tuner",
+ .type = V4L2_TUNER_ANALOG_TV,
+ .capability = (V4L2_TUNER_CAP_NORM |
+ V4L2_TUNER_CAP_STEREO |
+ V4L2_TUNER_CAP_LANG1 |
+ V4L2_TUNER_CAP_LANG2),
+ .rangelow = 0,
+ .rangehigh = 0,
+ .rxsubchans = V4L2_TUNER_SUB_STEREO,
+ .audmode = V4L2_TUNER_MODE_STEREO,
+ .signal = 0,
+ .afc = 0,
+ .reserved = {0,0,0,0}
+ }
+};
+
+struct v4l2_fmtdesc pvr_fmtdesc [] = {
+ {
+ .index = 0,
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .flags = V4L2_FMT_FLAG_COMPRESSED,
+ .description = "MPEG1/2",
+ // This should really be V4L2_PIX_FMT_MPEG, but xawtv
+ // breaks when I do that.
+ .pixelformat = 0, // V4L2_PIX_FMT_MPEG,
+ .reserved = { 0, 0, 0, 0 }
+ }
+};
+
+#define PVR_FORMAT_PIX 0
+#define PVR_FORMAT_VBI 1
+
+struct v4l2_format pvr_format [] = {
+ [PVR_FORMAT_PIX] = {
+ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ .fmt = {
+ .pix = {
+ .width = 720,
+ .height = 576,
+ // This should really be V4L2_PIX_FMT_MPEG,
+ // but xawtv breaks when I do that.
+ .pixelformat = 0, // V4L2_PIX_FMT_MPEG,
+ .field = V4L2_FIELD_INTERLACED,
+ .bytesperline = 0, // doesn't make sense
+ // here
+ //FIXME : Don't know what to put here...
+ .sizeimage = (32*1024),
+ .colorspace = 0, // doesn't make sense here
+ .priv = 0
+ }
+ }
+ },
+ [PVR_FORMAT_VBI] = {
+ .type = V4L2_BUF_TYPE_VBI_CAPTURE,
+ .fmt = {
+ .vbi = {
+ .sampling_rate = 27000000,
+ .offset = 248,
+ .samples_per_line = 1443,
+ .sample_format = V4L2_PIX_FMT_GREY,
+ .start = { 0, 0 },
+ .count = { 0, 0 },
+ .flags = 0,
+ .reserved = { 0, 0 }
+ }
+ }
+ }
+};
+
+/*
+ * pvr_ioctl()
+ *
+ * This is part of Video 4 Linux API. The procedure handles ioctl() calls.
+ *
+ */
+static int pvr2_v4l2_do_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, void *arg)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ struct pvr2_v4l2 *vp = fh->vhead;
+ struct pvr2_v4l2_dev *dev_info = fh->dev_info;
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ int ret = -EINVAL;
+
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),cmd);
+ }
+
+ if (!pvr2_hdw_dev_ok(hdw)) {
+ pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+ "ioctl failed - bad or no context");
+ return -EFAULT;
+ }
+
+ /* check priority */
+ switch (cmd) {
+ case VIDIOC_S_CTRL:
+ case VIDIOC_S_STD:
+ case VIDIOC_S_INPUT:
+ case VIDIOC_S_TUNER:
+ case VIDIOC_S_FREQUENCY:
+ ret = v4l2_prio_check(&vp->prio, &fh->prio);
+ if (ret)
+ return ret;
+ }
+
+ switch (cmd) {
+ case VIDIOC_QUERYCAP:
+ {
+ struct v4l2_capability *cap = arg;
+
+ memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_G_PRIORITY:
+ {
+ enum v4l2_priority *p = arg;
+
+ *p = v4l2_prio_max(&vp->prio);
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_S_PRIORITY:
+ {
+ enum v4l2_priority *prio = arg;
+
+ ret = v4l2_prio_change(&vp->prio, &fh->prio, *prio);
+ break;
+ }
+
+ case VIDIOC_ENUMSTD:
+ {
+ struct v4l2_standard *vs = (struct v4l2_standard *)arg;
+ int idx = vs->index;
+ ret = pvr2_hdw_get_stdenum_value(hdw,vs,idx+1);
+ break;
+ }
+
+ case VIDIOC_G_STD:
+ {
+ int val = 0;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),&val);
+ *(v4l2_std_id *)arg = val;
+ break;
+ }
+
+ case VIDIOC_S_STD:
+ {
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_STDCUR),
+ *(v4l2_std_id *)arg);
+ break;
+ }
+
+ case VIDIOC_ENUMINPUT:
+ {
+ struct pvr2_ctrl *cptr;
+ struct v4l2_input *vi = (struct v4l2_input *)arg;
+ struct v4l2_input tmp;
+ unsigned int cnt;
+
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
+
+ memset(&tmp,0,sizeof(tmp));
+ tmp.index = vi->index;
+ ret = 0;
+ switch (vi->index) {
+ case PVR2_CVAL_INPUT_TV:
+ case PVR2_CVAL_INPUT_RADIO:
+ tmp.type = V4L2_INPUT_TYPE_TUNER;
+ break;
+ case PVR2_CVAL_INPUT_SVIDEO:
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ tmp.type = V4L2_INPUT_TYPE_CAMERA;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret < 0) break;
+
+ cnt = 0;
+ pvr2_ctrl_get_valname(cptr,vi->index,
+ tmp.name,sizeof(tmp.name)-1,&cnt);
+ tmp.name[cnt] = 0;
+
+ /* Don't bother with audioset, since this driver currently
+ always switches the audio whenever the video is
+ switched. */
+
+ /* Handling std is a tougher problem. It doesn't make
+ sense in cases where a device might be multi-standard.
+ We could just copy out the current value for the
+ standard, but it can change over time. For now just
+ leave it zero. */
+
+ memcpy(vi, &tmp, sizeof(tmp));
+
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_G_INPUT:
+ {
+ struct pvr2_ctrl *cptr;
+ struct v4l2_input *vi = (struct v4l2_input *)arg;
+ int val;
+ cptr = pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT);
+ val = 0;
+ ret = pvr2_ctrl_get_value(cptr,&val);
+ vi->index = val;
+ break;
+ }
+
+ case VIDIOC_S_INPUT:
+ {
+ struct v4l2_input *vi = (struct v4l2_input *)arg;
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_INPUT),
+ vi->index);
+ break;
+ }
+
+ case VIDIOC_ENUMAUDIO:
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ case VIDIOC_G_AUDIO:
+ {
+ ret = -EINVAL;
+ break;
+ }
+
+ case VIDIOC_S_AUDIO:
+ {
+ ret = -EINVAL;
+ break;
+ }
+ case VIDIOC_G_TUNER:
+ {
+ struct v4l2_tuner *vt = (struct v4l2_tuner *)arg;
+ unsigned int status_mask;
+ int val;
+ if (vt->index !=0) break;
+
+ status_mask = pvr2_hdw_get_signal_status(hdw);
+
+ memcpy(vt, &pvr_v4l2_tuners[vt->index],
+ sizeof(struct v4l2_tuner));
+
+ vt->signal = 0;
+ if (status_mask & PVR2_SIGNAL_OK) {
+ if (status_mask & PVR2_SIGNAL_STEREO) {
+ vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
+ } else {
+ vt->rxsubchans = V4L2_TUNER_SUB_MONO;
+ }
+ if (status_mask & PVR2_SIGNAL_SAP) {
+ vt->rxsubchans |= (V4L2_TUNER_SUB_LANG1 |
+ V4L2_TUNER_SUB_LANG2);
+ }
+ vt->signal = 65535;
+ }
+
+ val = 0;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE),
+ &val);
+ vt->audmode = val;
+ break;
+ }
+
+ case VIDIOC_S_TUNER:
+ {
+ struct v4l2_tuner *vt=(struct v4l2_tuner *)arg;
+
+ if (vt->index != 0)
+ break;
+
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_AUDIOMODE),
+ vt->audmode);
+ }
+
+ case VIDIOC_S_FREQUENCY:
+ {
+ const struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),
+ vf->frequency * 62500);
+ break;
+ }
+
+ case VIDIOC_G_FREQUENCY:
+ {
+ struct v4l2_frequency *vf = (struct v4l2_frequency *)arg;
+ int val = 0;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_FREQUENCY),
+ &val);
+ val /= 62500;
+ vf->frequency = val;
+ break;
+ }
+
+ case VIDIOC_ENUM_FMT:
+ {
+ struct v4l2_fmtdesc *fd = (struct v4l2_fmtdesc *)arg;
+
+ /* Only one format is supported : mpeg.*/
+ if (fd->index != 0)
+ break;
+
+ memcpy(fd, pvr_fmtdesc, sizeof(struct v4l2_fmtdesc));
+ ret = 0;
+ break;
+ }
+
+ case VIDIOC_G_FMT:
+ {
+ struct v4l2_format *vf = (struct v4l2_format *)arg;
+ int val;
+ switch(vf->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
+ sizeof(struct v4l2_format));
+ val = 0;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_HRES),
+ &val);
+ vf->fmt.pix.width = val;
+ val = 0;
+ pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,PVR2_CID_VRES),
+ &val);
+ vf->fmt.pix.height = val;
+ ret = 0;
+ break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ // ????? Still need to figure out to do VBI correctly
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+
+ case VIDIOC_TRY_FMT:
+ case VIDIOC_S_FMT:
+ {
+ struct v4l2_format *vf = (struct v4l2_format *)arg;
+
+ ret = 0;
+ switch(vf->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE: {
+ int h = vf->fmt.pix.height;
+ int w = vf->fmt.pix.width;
+
+ if (h < 200) {
+ h = 200;
+ } else if (h > 625) {
+ h = 625;
+ }
+ if (w < 320) {
+ w = 320;
+ } else if (w > 720) {
+ w = 720;
+ }
+
+ memcpy(vf, &pvr_format[PVR_FORMAT_PIX],
+ sizeof(struct v4l2_format));
+ vf->fmt.pix.width = w;
+ vf->fmt.pix.height = h;
+
+ if (cmd == VIDIOC_S_FMT) {
+ pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,
+ PVR2_CID_HRES),
+ vf->fmt.pix.width);
+ pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_by_id(hdw,
+ PVR2_CID_VRES),
+ vf->fmt.pix.height);
+ }
+ } break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ // ????? Still need to figure out to do VBI correctly
+ ret = -EINVAL;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+
+ case VIDIOC_STREAMON:
+ {
+ ret = pvr2_hdw_set_stream_type(hdw,dev_info->config);
+ if (ret < 0) return ret;
+ ret = pvr2_hdw_set_streaming(hdw,!0);
+ break;
+ }
+
+ case VIDIOC_STREAMOFF:
+ {
+ ret = pvr2_hdw_set_streaming(hdw,0);
+ break;
+ }
+
+ case VIDIOC_QUERYCTRL:
+ {
+ struct pvr2_ctrl *cptr;
+ struct v4l2_queryctrl *vc = (struct v4l2_queryctrl *)arg;
+ ret = 0;
+ if (vc->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
+ cptr = pvr2_hdw_get_ctrl_nextv4l(
+ hdw,(vc->id & ~V4L2_CTRL_FLAG_NEXT_CTRL));
+ if (cptr) vc->id = pvr2_ctrl_get_v4lid(cptr);
+ } else {
+ cptr = pvr2_hdw_get_ctrl_v4l(hdw,vc->id);
+ }
+ if (!cptr) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x not implemented here",
+ vc->id);
+ ret = -EINVAL;
+ break;
+ }
+
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x mapping name=%s (%s)",
+ vc->id,pvr2_ctrl_get_name(cptr),
+ pvr2_ctrl_get_desc(cptr));
+ strlcpy(vc->name,pvr2_ctrl_get_desc(cptr),sizeof(vc->name));
+ vc->flags = pvr2_ctrl_get_v4lflags(cptr);
+ vc->default_value = pvr2_ctrl_get_def(cptr);
+ switch (pvr2_ctrl_get_type(cptr)) {
+ case pvr2_ctl_enum:
+ vc->type = V4L2_CTRL_TYPE_MENU;
+ vc->minimum = 0;
+ vc->maximum = pvr2_ctrl_get_cnt(cptr) - 1;
+ vc->step = 1;
+ break;
+ case pvr2_ctl_bool:
+ vc->type = V4L2_CTRL_TYPE_BOOLEAN;
+ vc->minimum = 0;
+ vc->maximum = 1;
+ vc->step = 1;
+ break;
+ case pvr2_ctl_int:
+ vc->type = V4L2_CTRL_TYPE_INTEGER;
+ vc->minimum = pvr2_ctrl_get_min(cptr);
+ vc->maximum = pvr2_ctrl_get_max(cptr);
+ vc->step = 1;
+ break;
+ default:
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "QUERYCTRL id=0x%x name=%s not mappable",
+ vc->id,pvr2_ctrl_get_name(cptr));
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ }
+
+ case VIDIOC_QUERYMENU:
+ {
+ struct v4l2_querymenu *vm = (struct v4l2_querymenu *)arg;
+ unsigned int cnt = 0;
+ ret = pvr2_ctrl_get_valname(pvr2_hdw_get_ctrl_v4l(hdw,vm->id),
+ vm->index,
+ vm->name,sizeof(vm->name)-1,
+ &cnt);
+ vm->name[cnt] = 0;
+ break;
+ }
+
+ case VIDIOC_G_CTRL:
+ {
+ struct v4l2_control *vc = (struct v4l2_control *)arg;
+ int val = 0;
+ ret = pvr2_ctrl_get_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
+ &val);
+ vc->value = val;
+ break;
+ }
+
+ case VIDIOC_S_CTRL:
+ {
+ struct v4l2_control *vc = (struct v4l2_control *)arg;
+ ret = pvr2_ctrl_set_value(pvr2_hdw_get_ctrl_v4l(hdw,vc->id),
+ vc->value);
+ break;
+ }
+
+ case VIDIOC_G_EXT_CTRLS:
+ {
+ struct v4l2_ext_controls *ctls =
+ (struct v4l2_ext_controls *)arg;
+ struct v4l2_ext_control *ctrl;
+ unsigned int idx;
+ int val;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ ret = pvr2_ctrl_get_value(
+ pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),&val);
+ if (ret) {
+ ctls->error_idx = idx;
+ break;
+ }
+ /* Ensure that if read as a 64 bit value, the user
+ will still get a hopefully sane value */
+ ctrl->value64 = 0;
+ ctrl->value = val;
+ }
+ break;
+ }
+
+ case VIDIOC_S_EXT_CTRLS:
+ {
+ struct v4l2_ext_controls *ctls =
+ (struct v4l2_ext_controls *)arg;
+ struct v4l2_ext_control *ctrl;
+ unsigned int idx;
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ ret = pvr2_ctrl_set_value(
+ pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id),
+ ctrl->value);
+ if (ret) {
+ ctls->error_idx = idx;
+ break;
+ }
+ }
+ break;
+ }
+
+ case VIDIOC_TRY_EXT_CTRLS:
+ {
+ struct v4l2_ext_controls *ctls =
+ (struct v4l2_ext_controls *)arg;
+ struct v4l2_ext_control *ctrl;
+ struct pvr2_ctrl *pctl;
+ unsigned int idx;
+ /* For the moment just validate that the requested control
+ actually exists. */
+ for (idx = 0; idx < ctls->count; idx++) {
+ ctrl = ctls->controls + idx;
+ pctl = pvr2_hdw_get_ctrl_v4l(hdw,ctrl->id);
+ if (!pctl) {
+ ret = -EINVAL;
+ ctls->error_idx = idx;
+ break;
+ }
+ }
+ break;
+ }
+
+ case VIDIOC_LOG_STATUS:
+ {
+ pvr2_hdw_trigger_module_log(hdw);
+ ret = 0;
+ break;
+ }
+
+ default :
+ ret = v4l_compat_translate_ioctl(inode,file,cmd,
+ arg,pvr2_v4l2_do_ioctl);
+ }
+
+ pvr2_hdw_commit_ctl(hdw);
+
+ if (ret < 0) {
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl failure, ret=%d",ret);
+ } else {
+ if (pvrusb2_debug & PVR2_TRACE_V4LIOCTL) {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl failure, ret=%d"
+ " command was:",ret);
+ v4l_print_ioctl(pvr2_hdw_get_driver_name(hdw),
+ cmd);
+ }
+ }
+ } else {
+ pvr2_trace(PVR2_TRACE_V4LIOCTL,
+ "pvr2_v4l2_do_ioctl complete, ret=%d (0x%x)",
+ ret,ret);
+ }
+ return ret;
+}
+
+
+static void pvr2_v4l2_dev_destroy(struct pvr2_v4l2_dev *dip)
+{
+ pvr2_trace(PVR2_TRACE_INIT,
+ "unregistering device video%d [%s]",
+ dip->vdev->minor,pvr2_config_get_name(dip->config));
+ if (dip->ctxt_idx >= 0) {
+ mutex_lock(&device_lock);
+ devices[dip->ctxt_idx] = NULL;
+ dip->ctxt_idx = -1;
+ mutex_unlock(&device_lock);
+ }
+ video_unregister_device(dip->vdev);
+}
+
+
+static void pvr2_v4l2_destroy_no_lock(struct pvr2_v4l2 *vp)
+{
+ pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,-1);
+ pvr2_v4l2_dev_destroy(&vp->video_dev);
+
+ pvr2_trace(PVR2_TRACE_STRUCT,"Destroying pvr2_v4l2 id=%p",vp);
+ pvr2_channel_done(&vp->channel);
+ kfree(vp);
+}
+
+
+void pvr2_v4l2_internal_check(struct pvr2_channel *chp)
+{
+ struct pvr2_v4l2 *vp;
+ vp = container_of(chp,struct pvr2_v4l2,channel);
+ if (!vp->channel.mc_head->disconnect_flag) return;
+ if (vp->vfirst) return;
+ pvr2_v4l2_destroy_no_lock(vp);
+}
+
+
+int pvr2_v4l2_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+
+/* Temporary hack : use ivtv api until a v4l2 one is available. */
+#define IVTV_IOC_G_CODEC 0xFFEE7703
+#define IVTV_IOC_S_CODEC 0xFFEE7704
+ if (cmd == IVTV_IOC_G_CODEC || cmd == IVTV_IOC_S_CODEC) return 0;
+ return video_usercopy(inode, file, cmd, arg, pvr2_v4l2_do_ioctl);
+}
+
+
+int pvr2_v4l2_release(struct inode *inode, struct file *file)
+{
+ struct pvr2_v4l2_fh *fhp = file->private_data;
+ struct pvr2_v4l2 *vp = fhp->vhead;
+ struct pvr2_context *mp = fhp->vhead->channel.mc_head;
+
+ pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_release");
+
+ if (fhp->rhp) {
+ struct pvr2_stream *sp;
+ struct pvr2_hdw *hdw;
+ hdw = fhp->channel.mc_head->hdw;
+ pvr2_hdw_set_streaming(hdw,0);
+ sp = pvr2_ioread_get_stream(fhp->rhp);
+ if (sp) pvr2_stream_set_callback(sp,0,0);
+ pvr2_ioread_destroy(fhp->rhp);
+ fhp->rhp = 0;
+ }
+ v4l2_prio_close(&vp->prio, &fhp->prio);
+ file->private_data = NULL;
+
+ pvr2_context_enter(mp); do {
+ if (fhp->vnext) {
+ fhp->vnext->vprev = fhp->vprev;
+ } else {
+ vp->vlast = fhp->vprev;
+ }
+ if (fhp->vprev) {
+ fhp->vprev->vnext = fhp->vnext;
+ } else {
+ vp->vfirst = fhp->vnext;
+ }
+ fhp->vnext = 0;
+ fhp->vprev = 0;
+ fhp->vhead = 0;
+ pvr2_channel_done(&fhp->channel);
+ pvr2_trace(PVR2_TRACE_STRUCT,
+ "Destroying pvr_v4l2_fh id=%p",fhp);
+ kfree(fhp);
+ if (vp->channel.mc_head->disconnect_flag && !vp->vfirst) {
+ pvr2_v4l2_destroy_no_lock(vp);
+ }
+ } while (0); pvr2_context_exit(mp);
+ return 0;
+}
+
+
+int pvr2_v4l2_open(struct inode *inode, struct file *file)
+{
+ struct pvr2_v4l2_dev *dip = 0; /* Our own context pointer */
+ struct pvr2_v4l2_fh *fhp;
+ struct pvr2_v4l2 *vp;
+ struct pvr2_hdw *hdw;
+
+ mutex_lock(&device_lock);
+ /* MCI 7-Jun-2006 Even though we're just doing what amounts to an
+ atomic read of the device mapping array here, we still need the
+ mutex. The problem is that there is a tiny race possible when
+ we register the device. We can't update the device mapping
+ array until after the device has been registered, owing to the
+ fact that we can't know the minor device number until after the
+ registration succeeds. And if another thread tries to open the
+ device in the window of time after registration but before the
+ map is updated, then it will get back an erroneous null pointer
+ and the open will result in a spurious failure. The only way to
+ prevent that is to (a) be inside the mutex here before we access
+ the array, and (b) cover the entire registration process later
+ on with this same mutex. Thus if we get inside the mutex here,
+ then we can be assured that the registration process actually
+ completed correctly. This is an unhappy complication from the
+ use of global data in a driver that lives in a preemptible
+ environment. It sure would be nice if the video device itself
+ had a means for storing and retrieving a local context pointer.
+ Oh wait. It did. But now it's gone. Silly me. */
+ {
+ unsigned int midx = iminor(file->f_dentry->d_inode);
+ if (midx < sizeof(devices)/sizeof(devices[0])) {
+ dip = devices[midx];
+ }
+ }
+ mutex_unlock(&device_lock);
+
+ if (!dip) return -ENODEV; /* Should be impossible but I'm paranoid */
+
+ vp = dip->v4lp;
+ hdw = vp->channel.hdw;
+
+ pvr2_trace(PVR2_TRACE_OPEN_CLOSE,"pvr2_v4l2_open");
+
+ if (!pvr2_hdw_dev_ok(hdw)) {
+ pvr2_trace(PVR2_TRACE_OPEN_CLOSE,
+ "pvr2_v4l2_open: hardware not ready");
+ return -EIO;
+ }
+
+ fhp = kmalloc(sizeof(*fhp),GFP_KERNEL);
+ if (!fhp) {
+ return -ENOMEM;
+ }
+ memset(fhp,0,sizeof(*fhp));
+
+ init_waitqueue_head(&fhp->wait_data);
+ fhp->dev_info = dip;
+
+ pvr2_context_enter(vp->channel.mc_head); do {
+ pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr_v4l2_fh id=%p",fhp);
+ pvr2_channel_init(&fhp->channel,vp->channel.mc_head);
+ fhp->vnext = 0;
+ fhp->vprev = vp->vlast;
+ if (vp->vlast) {
+ vp->vlast->vnext = fhp;
+ } else {
+ vp->vfirst = fhp;
+ }
+ vp->vlast = fhp;
+ fhp->vhead = vp;
+ } while (0); pvr2_context_exit(vp->channel.mc_head);
+
+ fhp->file = file;
+ file->private_data = fhp;
+ v4l2_prio_open(&vp->prio,&fhp->prio);
+
+ fhp->fw_mode_flag = pvr2_hdw_cpufw_get_enabled(hdw);
+
+ return 0;
+}
+
+
+static void pvr2_v4l2_notify(struct pvr2_v4l2_fh *fhp)
+{
+ wake_up(&fhp->wait_data);
+}
+
+static int pvr2_v4l2_iosetup(struct pvr2_v4l2_fh *fh)
+{
+ int ret;
+ struct pvr2_stream *sp;
+ struct pvr2_hdw *hdw;
+ if (fh->rhp) return 0;
+
+ /* First read() attempt. Try to claim the stream and start
+ it... */
+ if ((ret = pvr2_channel_claim_stream(&fh->channel,
+ fh->dev_info->stream)) != 0) {
+ /* Someone else must already have it */
+ return ret;
+ }
+
+ fh->rhp = pvr2_channel_create_mpeg_stream(fh->dev_info->stream);
+ if (!fh->rhp) {
+ pvr2_channel_claim_stream(&fh->channel,0);
+ return -ENOMEM;
+ }
+
+ hdw = fh->channel.mc_head->hdw;
+ sp = fh->dev_info->stream->stream;
+ pvr2_stream_set_callback(sp,(pvr2_stream_callback)pvr2_v4l2_notify,fh);
+ pvr2_hdw_set_stream_type(hdw,fh->dev_info->config);
+ pvr2_hdw_set_streaming(hdw,!0);
+ ret = pvr2_ioread_set_enabled(fh->rhp,!0);
+
+ return ret;
+}
+
+
+static ssize_t pvr2_v4l2_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ int ret;
+
+ if (fh->fw_mode_flag) {
+ struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
+ char *tbuf;
+ int c1,c2;
+ int tcnt = 0;
+ unsigned int offs = *ppos;
+
+ tbuf = kmalloc(PAGE_SIZE,GFP_KERNEL);
+ if (!tbuf) return -ENOMEM;
+
+ while (count) {
+ c1 = count;
+ if (c1 > PAGE_SIZE) c1 = PAGE_SIZE;
+ c2 = pvr2_hdw_cpufw_get(hdw,offs,tbuf,c1);
+ if (c2 < 0) {
+ tcnt = c2;
+ break;
+ }
+ if (!c2) break;
+ if (copy_to_user(buff,tbuf,c2)) {
+ tcnt = -EFAULT;
+ break;
+ }
+ offs += c2;
+ tcnt += c2;
+ buff += c2;
+ count -= c2;
+ *ppos += c2;
+ }
+ kfree(tbuf);
+ return tcnt;
+ }
+
+ if (!fh->rhp) {
+ ret = pvr2_v4l2_iosetup(fh);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ for (;;) {
+ ret = pvr2_ioread_read(fh->rhp,buff,count);
+ if (ret >= 0) break;
+ if (ret != -EAGAIN) break;
+ if (file->f_flags & O_NONBLOCK) break;
+ /* Doing blocking I/O. Wait here. */
+ ret = wait_event_interruptible(
+ fh->wait_data,
+ pvr2_ioread_avail(fh->rhp) >= 0);
+ if (ret < 0) break;
+ }
+
+ return ret;
+}
+
+
+static unsigned int pvr2_v4l2_poll(struct file *file, poll_table *wait)
+{
+ unsigned int mask = 0;
+ struct pvr2_v4l2_fh *fh = file->private_data;
+ int ret;
+
+ if (fh->fw_mode_flag) {
+ mask |= POLLIN | POLLRDNORM;
+ return mask;
+ }
+
+ if (!fh->rhp) {
+ ret = pvr2_v4l2_iosetup(fh);
+ if (ret) return POLLERR;
+ }
+
+ poll_wait(file,&fh->wait_data,wait);
+
+ if (pvr2_ioread_avail(fh->rhp) >= 0) {
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ return mask;
+}
+
+
+static struct file_operations vdev_fops = {
+ .owner = THIS_MODULE,
+ .open = pvr2_v4l2_open,
+ .release = pvr2_v4l2_release,
+ .read = pvr2_v4l2_read,
+ .ioctl = pvr2_v4l2_ioctl,
+ .llseek = no_llseek,
+ .poll = pvr2_v4l2_poll,
+};
+
+
+#define VID_HARDWARE_PVRUSB2 38 /* FIXME : need a good value */
+
+static struct video_device vdev_template = {
+ .owner = THIS_MODULE,
+ .type = VID_TYPE_CAPTURE | VID_TYPE_TUNER,
+ .type2 = (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE
+ | V4L2_CAP_TUNER | V4L2_CAP_AUDIO
+ | V4L2_CAP_READWRITE),
+ .hardware = VID_HARDWARE_PVRUSB2,
+ .fops = &vdev_fops,
+};
+
+
+static void pvr2_v4l2_dev_init(struct pvr2_v4l2_dev *dip,
+ struct pvr2_v4l2 *vp,
+ enum pvr2_config cfg)
+{
+ int mindevnum;
+ int unit_number;
+ int v4l_type;
+ dip->v4lp = vp;
+ dip->config = cfg;
+
+
+ switch (cfg) {
+ case pvr2_config_mpeg:
+ v4l_type = VFL_TYPE_GRABBER;
+ dip->stream = &vp->channel.mc_head->video_stream;
+ break;
+ case pvr2_config_vbi:
+ v4l_type = VFL_TYPE_VBI;
+ break;
+ case pvr2_config_radio:
+ v4l_type = VFL_TYPE_RADIO;
+ break;
+ default:
+ /* Bail out (this should be impossible) */
+ err("Failed to set up pvrusb2 v4l dev"
+ " due to unrecognized config");
+ return;
+ }
+
+ if (!dip->stream) {
+ err("Failed to set up pvrusb2 v4l dev"
+ " due to missing stream instance");
+ return;
+ }
+
+ dip->vdev = video_device_alloc();
+ if (!dip->vdev) {
+ err("Alloc of pvrusb2 v4l video device failed");
+ return;
+ }
+
+ memcpy(dip->vdev,&vdev_template,sizeof(vdev_template));
+ dip->vdev->release = video_device_release;
+ mutex_lock(&device_lock);
+
+ mindevnum = -1;
+ unit_number = pvr2_hdw_get_unit_number(vp->channel.mc_head->hdw);
+ if ((unit_number >= 0) && (unit_number < PVR_NUM)) {
+ mindevnum = video_nr[unit_number];
+ }
+ if ((video_register_device(dip->vdev, v4l_type, mindevnum) < 0) &&
+ (video_register_device(dip->vdev, v4l_type, -1) < 0)) {
+ err("Failed to register pvrusb2 v4l video device");
+ } else {
+ pvr2_trace(PVR2_TRACE_INIT,
+ "registered device video%d [%s]",
+ dip->vdev->minor,pvr2_config_get_name(dip->config));
+ }
+
+ if ((dip->vdev->minor < sizeof(devices)/sizeof(devices[0])) &&
+ (devices[dip->vdev->minor] == NULL)) {
+ dip->ctxt_idx = dip->vdev->minor;
+ devices[dip->ctxt_idx] = dip;
+ }
+ mutex_unlock(&device_lock);
+
+ pvr2_hdw_v4l_store_minor_number(vp->channel.mc_head->hdw,
+ dip->vdev->minor);
+}
+
+
+struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp)
+{
+ struct pvr2_v4l2 *vp;
+
+ vp = kmalloc(sizeof(*vp),GFP_KERNEL);
+ if (!vp) return vp;
+ memset(vp,0,sizeof(*vp));
+ vp->video_dev.ctxt_idx = -1;
+ pvr2_channel_init(&vp->channel,mnp);
+ pvr2_trace(PVR2_TRACE_STRUCT,"Creating pvr2_v4l2 id=%p",vp);
+
+ vp->channel.check_func = pvr2_v4l2_internal_check;
+
+ /* register streams */
+ pvr2_v4l2_dev_init(&vp->video_dev,vp,pvr2_config_mpeg);
+
+
+ return vp;
+}
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-v4l2.h b/drivers/media/video/pvrusb2/pvrusb2-v4l2.h
new file mode 100644
index 00000000000000..9a995e2d2256d2
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-v4l2.h
@@ -0,0 +1,40 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#ifndef __PVRUSB2_V4L2_H
+#define __PVRUSB2_V4L2_H
+
+#include "pvrusb2-context.h"
+
+struct pvr2_v4l2;
+
+struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *);
+
+#endif /* __PVRUSB2_V4L2_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 75 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
new file mode 100644
index 00000000000000..e4ec7f25194c56
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
@@ -0,0 +1,253 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+
+ This source file is specifically designed to interface with the
+ saa711x support that is available in the v4l available starting
+ with linux 2.6.15.
+
+*/
+
+#include "pvrusb2-video-v4l.h"
+#include "pvrusb2-i2c-cmd-v4l2.h"
+
+
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <media/saa7115.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+struct pvr2_v4l_decoder {
+ struct pvr2_i2c_handler handler;
+ struct pvr2_decoder_ctrl ctrl;
+ struct pvr2_i2c_client *client;
+ struct pvr2_hdw *hdw;
+ unsigned long stale_mask;
+};
+
+
+static void set_input(struct pvr2_v4l_decoder *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ struct v4l2_routing route;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_input(%d)",hdw->input_val);
+ switch(hdw->input_val) {
+ case PVR2_CVAL_INPUT_TV:
+ route.input = SAA7115_COMPOSITE4;
+ break;
+ case PVR2_CVAL_INPUT_COMPOSITE:
+ route.input = SAA7115_COMPOSITE5;
+ break;
+ case PVR2_CVAL_INPUT_SVIDEO:
+ route.input = SAA7115_SVIDEO2;
+ break;
+ case PVR2_CVAL_INPUT_RADIO:
+ // ????? No idea yet what to do here
+ default:
+ return;
+ }
+ route.output = 0;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_VIDEO_ROUTING,&route);
+}
+
+
+static int check_input(struct pvr2_v4l_decoder *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->input_dirty != 0;
+}
+
+
+static void set_audio(struct pvr2_v4l_decoder *ctxt)
+{
+ u32 val;
+ struct pvr2_hdw *hdw = ctxt->hdw;
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_audio %d",
+ hdw->srate_val);
+ switch (hdw->srate_val) {
+ default:
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_48000:
+ val = 48000;
+ break;
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_44100:
+ val = 44100;
+ break;
+ case V4L2_MPEG_AUDIO_SAMPLING_FREQ_32000:
+ val = 32000;
+ break;
+ }
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_AUDIO_CLOCK_FREQ,&val);
+}
+
+
+static int check_audio(struct pvr2_v4l_decoder *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->srate_dirty != 0;
+}
+
+
+struct pvr2_v4l_decoder_ops {
+ void (*update)(struct pvr2_v4l_decoder *);
+ int (*check)(struct pvr2_v4l_decoder *);
+};
+
+
+static const struct pvr2_v4l_decoder_ops decoder_ops[] = {
+ { .update = set_input, .check = check_input},
+ { .update = set_audio, .check = check_audio},
+};
+
+
+static void decoder_detach(struct pvr2_v4l_decoder *ctxt)
+{
+ ctxt->client->handler = 0;
+ ctxt->hdw->decoder_ctrl = 0;
+ kfree(ctxt);
+}
+
+
+static int decoder_check(struct pvr2_v4l_decoder *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (ctxt->stale_mask & msk) continue;
+ if (decoder_ops[idx].check(ctxt)) {
+ ctxt->stale_mask |= msk;
+ }
+ }
+ return ctxt->stale_mask != 0;
+}
+
+
+static void decoder_update(struct pvr2_v4l_decoder *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(decoder_ops)/sizeof(decoder_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (!(ctxt->stale_mask & msk)) continue;
+ ctxt->stale_mask &= ~msk;
+ decoder_ops[idx].update(ctxt);
+ }
+}
+
+
+static int decoder_detect(struct pvr2_i2c_client *cp)
+{
+ /* Attempt to query the decoder - let's see if it will answer */
+ struct v4l2_tuner vt;
+ int ret;
+
+ memset(&vt,0,sizeof(vt));
+ ret = pvr2_i2c_client_cmd(cp,VIDIOC_G_TUNER,&vt);
+ return ret == 0; /* Return true if it answered */
+}
+
+
+static void decoder_enable(struct pvr2_v4l_decoder *ctxt,int fl)
+{
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 decoder_enable(%d)",fl);
+ pvr2_v4l2_cmd_stream(ctxt->client,fl);
+}
+
+
+static int decoder_is_tuned(struct pvr2_v4l_decoder *ctxt)
+{
+ struct v4l2_tuner vt;
+ int ret;
+
+ memset(&vt,0,sizeof(vt));
+ ret = pvr2_i2c_client_cmd(ctxt->client,VIDIOC_G_TUNER,&vt);
+ if (ret < 0) return -EINVAL;
+ return vt.signal ? 1 : 0;
+}
+
+
+static unsigned int decoder_describe(struct pvr2_v4l_decoder *ctxt,char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-video-v4l");
+}
+
+
+const static struct pvr2_i2c_handler_functions hfuncs = {
+ .detach = (void (*)(void *))decoder_detach,
+ .check = (int (*)(void *))decoder_check,
+ .update = (void (*)(void *))decoder_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))decoder_describe,
+};
+
+
+int pvr2_i2c_decoder_v4l_setup(struct pvr2_hdw *hdw,
+ struct pvr2_i2c_client *cp)
+{
+ struct pvr2_v4l_decoder *ctxt;
+
+ if (hdw->decoder_ctrl) return 0;
+ if (cp->handler) return 0;
+ if (!decoder_detect(cp)) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->handler.func_data = ctxt;
+ ctxt->handler.func_table = &hfuncs;
+ ctxt->ctrl.ctxt = ctxt;
+ ctxt->ctrl.detach = (void (*)(void *))decoder_detach;
+ ctxt->ctrl.enable = (void (*)(void *,int))decoder_enable;
+ ctxt->ctrl.tuned = (int (*)(void *))decoder_is_tuned;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ ctxt->stale_mask = (1 << (sizeof(decoder_ops)/
+ sizeof(decoder_ops[0]))) - 1;
+ hdw->decoder_ctrl = &ctxt->ctrl;
+ cp->handler = &ctxt->handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x saa711x V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h
new file mode 100644
index 00000000000000..2b917fda02e402
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-video-v4l.h
@@ -0,0 +1,52 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_VIDEO_V4L_H
+#define __PVRUSB2_VIDEO_V4L_H
+
+/*
+
+ This module connects the pvrusb2 driver to the I2C chip level
+ driver which handles device video processing. This interface is
+ used internally by the driver; higher level code should only
+ interact through the interface provided by pvrusb2-hdw.h.
+
+*/
+
+
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_decoder_v4l_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+
+#endif /* __PVRUSB2_VIDEO_V4L_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-wm8775.c b/drivers/media/video/pvrusb2/pvrusb2-wm8775.c
new file mode 100644
index 00000000000000..fcad346e395503
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-wm8775.c
@@ -0,0 +1,170 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+
+ This source file is specifically designed to interface with the
+ wm8775.
+
+*/
+
+#include "pvrusb2-wm8775.h"
+#include "pvrusb2-i2c-cmd-v4l2.h"
+
+
+#include "pvrusb2-hdw-internal.h"
+#include "pvrusb2-debug.h"
+#include <linux/videodev2.h>
+#include <media/v4l2-common.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+struct pvr2_v4l_wm8775 {
+ struct pvr2_i2c_handler handler;
+ struct pvr2_i2c_client *client;
+ struct pvr2_hdw *hdw;
+ unsigned long stale_mask;
+};
+
+
+static void set_input(struct pvr2_v4l_wm8775 *ctxt)
+{
+ struct v4l2_routing route;
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ int msk = 0;
+
+ memset(&route,0,sizeof(route));
+
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c wm8775 set_input(val=%d msk=0x%x)",
+ hdw->input_val,msk);
+
+ // Always point to input #1 no matter what
+ route.input = 2;
+ pvr2_i2c_client_cmd(ctxt->client,VIDIOC_INT_S_AUDIO_ROUTING,&route);
+}
+
+static int check_input(struct pvr2_v4l_wm8775 *ctxt)
+{
+ struct pvr2_hdw *hdw = ctxt->hdw;
+ return hdw->input_dirty != 0;
+}
+
+
+struct pvr2_v4l_wm8775_ops {
+ void (*update)(struct pvr2_v4l_wm8775 *);
+ int (*check)(struct pvr2_v4l_wm8775 *);
+};
+
+
+static const struct pvr2_v4l_wm8775_ops wm8775_ops[] = {
+ { .update = set_input, .check = check_input},
+};
+
+
+static unsigned int wm8775_describe(struct pvr2_v4l_wm8775 *ctxt,
+ char *buf,unsigned int cnt)
+{
+ return scnprintf(buf,cnt,"handler: pvrusb2-wm8775");
+}
+
+
+static void wm8775_detach(struct pvr2_v4l_wm8775 *ctxt)
+{
+ ctxt->client->handler = 0;
+ kfree(ctxt);
+}
+
+
+static int wm8775_check(struct pvr2_v4l_wm8775 *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(wm8775_ops)/sizeof(wm8775_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (ctxt->stale_mask & msk) continue;
+ if (wm8775_ops[idx].check(ctxt)) {
+ ctxt->stale_mask |= msk;
+ }
+ }
+ return ctxt->stale_mask != 0;
+}
+
+
+static void wm8775_update(struct pvr2_v4l_wm8775 *ctxt)
+{
+ unsigned long msk;
+ unsigned int idx;
+
+ for (idx = 0; idx < sizeof(wm8775_ops)/sizeof(wm8775_ops[0]);
+ idx++) {
+ msk = 1 << idx;
+ if (!(ctxt->stale_mask & msk)) continue;
+ ctxt->stale_mask &= ~msk;
+ wm8775_ops[idx].update(ctxt);
+ }
+}
+
+
+const static struct pvr2_i2c_handler_functions hfuncs = {
+ .detach = (void (*)(void *))wm8775_detach,
+ .check = (int (*)(void *))wm8775_check,
+ .update = (void (*)(void *))wm8775_update,
+ .describe = (unsigned int (*)(void *,char *,unsigned int))wm8775_describe,
+};
+
+
+int pvr2_i2c_wm8775_setup(struct pvr2_hdw *hdw,struct pvr2_i2c_client *cp)
+{
+ struct pvr2_v4l_wm8775 *ctxt;
+
+ if (cp->handler) return 0;
+
+ ctxt = kmalloc(sizeof(*ctxt),GFP_KERNEL);
+ if (!ctxt) return 0;
+ memset(ctxt,0,sizeof(*ctxt));
+
+ ctxt->handler.func_data = ctxt;
+ ctxt->handler.func_table = &hfuncs;
+ ctxt->client = cp;
+ ctxt->hdw = hdw;
+ ctxt->stale_mask = (1 << (sizeof(wm8775_ops)/
+ sizeof(wm8775_ops[0]))) - 1;
+ cp->handler = &ctxt->handler;
+ pvr2_trace(PVR2_TRACE_CHIPS,"i2c 0x%x wm8775 V4L2 handler set up",
+ cp->client->addr);
+ return !0;
+}
+
+
+
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2-wm8775.h b/drivers/media/video/pvrusb2/pvrusb2-wm8775.h
new file mode 100644
index 00000000000000..8aaeff4e1e2068
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2-wm8775.h
@@ -0,0 +1,53 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_WM8775_H
+#define __PVRUSB2_WM8775_H
+
+/*
+
+ This module connects the pvrusb2 driver to the I2C chip level
+ driver which performs analog -> digital audio conversion for
+ external audio inputs. This interface is used internally by the
+ driver; higher level code should only interact through the
+ interface provided by pvrusb2-hdw.h.
+
+*/
+
+
+
+#include "pvrusb2-i2c-core.h"
+
+int pvr2_i2c_wm8775_setup(struct pvr2_hdw *,struct pvr2_i2c_client *);
+
+
+#endif /* __PVRUSB2_WM8775_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/pvrusb2/pvrusb2.h b/drivers/media/video/pvrusb2/pvrusb2.h
new file mode 100644
index 00000000000000..074533e9c21ee6
--- /dev/null
+++ b/drivers/media/video/pvrusb2/pvrusb2.h
@@ -0,0 +1,43 @@
+/*
+ *
+ * $Id$
+ *
+ * Copyright (C) 2005 Mike Isely <isely@pobox.com>
+ * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#ifndef __PVRUSB2_H
+#define __PVRUSB2_H
+
+/* Maximum number of pvrusb2 instances we can track at once. You
+ might want to increase this - however the driver operation will not
+ be impaired if it is too small. Instead additional units just
+ won't have an ID assigned and it might not be possible to specify
+ module paramters for those extra units. */
+#define PVR_NUM 20
+
+#endif /* __PVRUSB2_H */
+
+/*
+ Stuff for Emacs to see, in order to encourage consistent editing style:
+ *** Local Variables: ***
+ *** mode: c ***
+ *** fill-column: 70 ***
+ *** tab-width: 8 ***
+ *** c-basic-offset: 8 ***
+ *** End: ***
+ */
diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
index de7b9e6e932a9b..afc8f352b8e713 100644
--- a/drivers/media/video/saa7134/saa6752hs.c
+++ b/drivers/media/video/saa7134/saa6752hs.c
@@ -432,10 +432,10 @@ static void saa6752hs_old_set_params(struct i2c_client* client,
}
static int handle_ctrl(struct saa6752hs_mpeg_params *params,
- struct v4l2_ext_control *ctrl, int cmd)
+ struct v4l2_ext_control *ctrl, unsigned int cmd)
{
int old = 0, new;
- int set = cmd == VIDIOC_S_EXT_CTRLS;
+ int set = (cmd == VIDIOC_S_EXT_CTRLS);
new = ctrl->value;
switch (ctrl->id) {
diff --git a/drivers/media/video/stradis.c b/drivers/media/video/stradis.c
index 6be9c1131e1fc1..c18b31d9928c8d 100644
--- a/drivers/media/video/stradis.c
+++ b/drivers/media/video/stradis.c
@@ -2190,7 +2190,7 @@ static struct pci_driver stradis_driver = {
.remove = __devexit_p(stradis_remove)
};
-int __init stradis_init(void)
+static int __init stradis_init(void)
{
int retval;
@@ -2203,7 +2203,7 @@ int __init stradis_init(void)
return retval;
}
-void __exit stradis_exit(void)
+static void __exit stradis_exit(void)
{
pci_unregister_driver(&stradis_driver);
printk(KERN_INFO "stradis: module cleanup complete\n");
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index b6ae969563b2ba..2fadabf99688c7 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -22,11 +22,11 @@
*/
#define tda9887_info(fmt, arg...) do {\
- printk(KERN_INFO "%s %d-%04x (tda9887): " fmt, t->i2c.name, \
+ printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \
i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
#define tda9887_dbg(fmt, arg...) do {\
if (tuner_debug) \
- printk(KERN_INFO "%s %d-%04x (tda9887): " fmt, t->i2c.name, \
+ printk(KERN_INFO "%s %d-%04x: " fmt, t->i2c.name, \
i2c_adapter_id(t->i2c.adapter), t->i2c.addr , ##arg); } while (0)
@@ -84,8 +84,7 @@ struct tvnorm {
#define cAudioGain6 0x80 // bit c7
#define cTopMask 0x1f // bit c0:4
-#define cTopPalSecamDefault 0x14 // bit c0:4
-#define cTopNtscRadioDefault 0x10 // bit c0:4
+#define cTopDefault 0x10 // bit c0:4
//// third reg (e)
#define cAudioIF_4_5 0x00 // bit e0:1
@@ -123,7 +122,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopPalSecamDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_5_5 |
cVideoIF_38_90 ),
@@ -134,7 +133,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopPalSecamDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_0 |
cVideoIF_38_90 ),
@@ -145,7 +144,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopPalSecamDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
@@ -156,7 +155,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_45_75 ),
@@ -165,7 +164,7 @@ static struct tvnorm tvnorms[] = {
.name = "SECAM-BGH",
.b = ( cPositiveAmTV |
cQSS ),
- .c = ( cTopPalSecamDefault),
+ .c = ( cTopDefault),
.e = ( cGating_36 |
cAudioIF_5_5 |
cVideoIF_38_90 ),
@@ -174,7 +173,7 @@ static struct tvnorm tvnorms[] = {
.name = "SECAM-L",
.b = ( cPositiveAmTV |
cQSS ),
- .c = ( cTopPalSecamDefault),
+ .c = ( cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
@@ -184,7 +183,7 @@ static struct tvnorm tvnorms[] = {
.b = ( cOutputPort2Inactive |
cPositiveAmTV |
cQSS ),
- .c = ( cTopPalSecamDefault),
+ .c = ( cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_33_90 ),
@@ -195,7 +194,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopPalSecamDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_6_5 |
cVideoIF_38_90 ),
@@ -206,7 +205,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_45_75 ),
@@ -217,7 +216,7 @@ static struct tvnorm tvnorms[] = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis50 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cGating_36 |
cAudioIF_4_5 |
cVideoIF_58_75 ),
@@ -230,7 +229,7 @@ static struct tvnorm radio_stereo = {
cQSS ),
.c = ( cDeemphasisOFF |
cAudioGain6 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cTunerGainLow |
cAudioIF_5_5 |
cRadioIF_38_90 ),
@@ -242,7 +241,7 @@ static struct tvnorm radio_mono = {
cQSS ),
.c = ( cDeemphasisON |
cDeemphasis75 |
- cTopNtscRadioDefault),
+ cTopDefault),
.e = ( cTunerGainLow |
cAudioIF_5_5 |
cRadioIF_38_90 ),
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index a26ded7d6faef8..011413cf34a8ad 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -40,7 +40,6 @@ static unsigned int no_autodetect = 0;
static unsigned int show_i2c = 0;
/* insmod options used at runtime => read/write */
-static unsigned int tuner_debug_old = 0;
int tuner_debug = 0;
static unsigned int tv_range[2] = { 44, 958 };
@@ -54,8 +53,6 @@ static char ntsc[] = "-";
module_param(addr, int, 0444);
module_param(no_autodetect, int, 0444);
module_param(show_i2c, int, 0444);
-/* Note: tuner_debug is deprecated and will be removed in 2.6.17 */
-module_param_named(tuner_debug,tuner_debug_old, int, 0444);
module_param_named(debug,tuner_debug, int, 0644);
module_param_string(pal, pal, sizeof(pal), 0644);
module_param_string(secam, secam, sizeof(secam), 0644);
@@ -442,11 +439,6 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
t->audmode = V4L2_TUNER_MODE_STEREO;
t->mode_mask = T_UNINITIALIZED;
t->tuner_status = tuner_status;
- if (tuner_debug_old) {
- tuner_debug = tuner_debug_old;
- printk(KERN_ERR "tuner: tuner_debug is deprecated and will be removed in 2.6.17.\n");
- printk(KERN_ERR "tuner: use the debug option instead.\n");
- }
if (show_i2c) {
unsigned char buffer[16];
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index f4b3d64ebf73ff..97f946db85978a 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -1103,7 +1103,7 @@ const char **v4l2_ctrl_get_menu(u32 id)
};
static const char *mpeg_stream_vbi_fmt[] = {
"No VBI",
- "VBI in private packets, IVTV format",
+ "Private packet, IVTV format",
NULL
};
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 74714e5bcf0329..3ff8378ea660ad 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -305,10 +305,8 @@ mptfc_GetFcDevPage0(MPT_ADAPTER *ioc, int ioc_port,
}
out:
- if (pp0_array)
- kfree(pp0_array);
- if (p0_array)
- kfree(p0_array);
+ kfree(pp0_array);
+ kfree(p0_array);
return rc;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index af6ec553ff7ca5..85689ab46cbc26 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1378,8 +1378,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
return 0;
out_free_port_info:
- if (hba)
- kfree(hba);
+ kfree(hba);
out:
return error;
}
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index febbdd4e0605dd..c74e5460f83406 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -1239,7 +1239,6 @@ EXPORT_SYMBOL(i2o_cntxt_list_remove);
EXPORT_SYMBOL(i2o_cntxt_list_get_ptr);
#endif
EXPORT_SYMBOL(i2o_msg_get_wait);
-EXPORT_SYMBOL(i2o_msg_nop);
EXPORT_SYMBOL(i2o_find_iop);
EXPORT_SYMBOL(i2o_iop_find_device);
EXPORT_SYMBOL(i2o_event_register);
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index 1fdf03fd2da751..9706cc19134a1b 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -85,7 +85,7 @@ static int __devinit ibmasm_init_one(struct pci_dev *pdev, const struct pci_devi
}
memset(sp, 0, sizeof(struct service_processor));
- sp->lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&sp->lock);
INIT_LIST_HEAD(&sp->command_queue);
pci_set_drvdata(pdev, (void *)sp);
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 0d435814aaa137..39edb8250fbc13 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -357,6 +357,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
mtd->resume = cfi_intelext_resume;
mtd->flags = MTD_CAP_NORFLASH;
mtd->name = map->name;
+ mtd->writesize = 1;
mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
diff --git a/drivers/mtd/chips/jedec.c b/drivers/mtd/chips/jedec.c
index c40b48dabed362..2c3f019197c116 100644
--- a/drivers/mtd/chips/jedec.c
+++ b/drivers/mtd/chips/jedec.c
@@ -256,6 +256,7 @@ static struct mtd_info *jedec_probe(struct map_info *map)
MTD->name = map->name;
MTD->type = MTD_NORFLASH;
MTD->flags = MTD_CAP_NORFLASH;
+ MTD->writesize = 1;
MTD->erasesize = SectorSize*(map->buswidth);
// printk("MTD->erasesize is %x\n",(unsigned int)MTD->erasesize);
MTD->size = priv->size;
diff --git a/drivers/mtd/chips/map_absent.c b/drivers/mtd/chips/map_absent.c
index a611de9b151593..ac01a949b687a4 100644
--- a/drivers/mtd/chips/map_absent.c
+++ b/drivers/mtd/chips/map_absent.c
@@ -64,7 +64,8 @@ static struct mtd_info *map_absent_probe(struct map_info *map)
mtd->write = map_absent_write;
mtd->sync = map_absent_sync;
mtd->flags = 0;
- mtd->erasesize = PAGE_SIZE;
+ mtd->erasesize = PAGE_SIZE;
+ mtd->writesize = 1;
__module_get(THIS_MODULE);
return mtd;
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c
index 763925747db6f6..3a66680abfd06c 100644
--- a/drivers/mtd/chips/map_ram.c
+++ b/drivers/mtd/chips/map_ram.c
@@ -71,6 +71,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map)
mtd->write = mapram_write;
mtd->sync = mapram_nop;
mtd->flags = MTD_CAP_RAM;
+ mtd->writesize = 1;
mtd->erasesize = PAGE_SIZE;
while(mtd->size & (mtd->erasesize - 1))
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c
index bc6ee9ef8a31ac..1b328b1378fda2 100644
--- a/drivers/mtd/chips/map_rom.c
+++ b/drivers/mtd/chips/map_rom.c
@@ -47,6 +47,7 @@ static struct mtd_info *map_rom_probe(struct map_info *map)
mtd->sync = maprom_nop;
mtd->flags = MTD_CAP_ROM;
mtd->erasesize = map->size;
+ mtd->writesize = 1;
__module_get(THIS_MODULE);
return mtd;
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index 0d98c223c5fc2e..be3f1c136d0211 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -324,6 +324,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
dev->mtd.erasesize = erase_size;
+ dev->mtd.writesize = 1;
dev->mtd.type = MTD_RAM;
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd.erase = block2mtd_erase;
diff --git a/drivers/mtd/devices/ms02-nv.c b/drivers/mtd/devices/ms02-nv.c
index 4ab7670770e43f..08dfb899b27204 100644
--- a/drivers/mtd/devices/ms02-nv.c
+++ b/drivers/mtd/devices/ms02-nv.c
@@ -225,6 +225,7 @@ static int __init ms02nv_init_one(ulong addr)
mtd->owner = THIS_MODULE;
mtd->read = ms02nv_read;
mtd->write = ms02nv_write;
+ mtd->writesize = 1;
ret = -EIO;
if (add_mtd_device(mtd)) {
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index a19480d07888df..04271d02b6b6d9 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -478,6 +478,7 @@ add_dataflash(struct spi_device *spi, char *name,
device->name = (pdata && pdata->name) ? pdata->name : priv->name;
device->size = nr_pages * pagesize;
device->erasesize = pagesize;
+ device->writesize = pagesize;
device->owner = THIS_MODULE;
device->type = MTD_DATAFLASH;
device->flags = MTD_CAP_NORFLASH;
diff --git a/drivers/mtd/devices/phram.c b/drivers/mtd/devices/phram.c
index e09e416667d38d..6c7337f9ebbbfc 100644
--- a/drivers/mtd/devices/phram.c
+++ b/drivers/mtd/devices/phram.c
@@ -151,6 +151,7 @@ static int register_device(char *name, unsigned long start, unsigned long len)
new->mtd.owner = THIS_MODULE;
new->mtd.type = MTD_RAM;
new->mtd.erasesize = PAGE_SIZE;
+ new->mtd.writesize = 1;
ret = -EAGAIN;
if (add_mtd_device(&new->mtd)) {
diff --git a/drivers/mtd/devices/pmc551.c b/drivers/mtd/devices/pmc551.c
index 666cce1bf60c0d..f620d74f100424 100644
--- a/drivers/mtd/devices/pmc551.c
+++ b/drivers/mtd/devices/pmc551.c
@@ -778,7 +778,8 @@ static int __init init_pmc551(void)
mtd->type = MTD_RAM;
mtd->name = "PMC551 RAM board";
mtd->erasesize = 0x10000;
- mtd->owner = THIS_MODULE;
+ mtd->writesize = 1;
+ mtd->owner = THIS_MODULE;
if (add_mtd_device(mtd)) {
printk(KERN_NOTICE "pmc551: Failed to register new device\n");
diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c
index b3f665e3c38bfe..542a0c009006d4 100644
--- a/drivers/mtd/devices/slram.c
+++ b/drivers/mtd/devices/slram.c
@@ -209,6 +209,7 @@ static int register_device(char *name, unsigned long start, unsigned long length
(*curmtd)->mtdinfo->owner = THIS_MODULE;
(*curmtd)->mtdinfo->type = MTD_RAM;
(*curmtd)->mtdinfo->erasesize = SLRAM_BLK_SZ;
+ (*curmtd)->mtdinfo->writesize = 1;
if (add_mtd_device((*curmtd)->mtdinfo)) {
E("slram: Failed to register new device\n");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 2c9cc7f37e9216..c26488a1793abd 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -42,7 +42,6 @@ struct ixp2000_flash_info {
struct map_info map;
struct mtd_partition *partitions;
struct resource *res;
- int nr_banks;
};
static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs)
@@ -183,7 +182,6 @@ static int ixp2000_flash_probe(struct platform_device *dev)
*/
info->map.phys = NO_XIP;
- info->nr_banks = ixp_data->nr_banks;
info->map.size = ixp_data->nr_banks * window_size;
info->map.bankwidth = 1;
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 433c3cac3ca959..d6301f08906dff 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -182,7 +182,7 @@ static struct physmap_flash_data physmap_flash_data = {
static struct resource physmap_flash_resource = {
.start = CONFIG_MTD_PHYSMAP_START,
- .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN,
+ .end = CONFIG_MTD_PHYSMAP_START + CONFIG_MTD_PHYSMAP_LEN - 1,
.flags = IORESOURCE_MEM,
};
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index aa18d45b264bb2..9a4b59d925252a 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -78,7 +78,7 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
return -EINVAL;
}
- if (offset >= 0 && offset < mtd->size)
+ if (offset >= 0 && offset <= mtd->size)
return file->f_pos = offset;
return -EINVAL;
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 27083ed0a017a6..80a76654d96374 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1176,7 +1176,7 @@ static int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
status = chip->waitfunc(mtd, chip);
- return status;
+ return status & NAND_STATUS_FAIL ? -EIO : 0;
}
/**
@@ -1271,10 +1271,6 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd);
buf = nand_transfer_oob(chip, buf, ops);
- readlen -= ops->ooblen;
- if (!readlen)
- break;
-
if (!(chip->options & NAND_NO_READRDY)) {
/*
* Apply delay or wait for ready/busy pin. Do this
@@ -1288,6 +1284,10 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
nand_wait_ready(mtd);
}
+ readlen -= ops->ooblen;
+ if (!readlen)
+ break;
+
/* Increment page address */
realpage++;
@@ -1610,13 +1610,13 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
if (!writelen)
return 0;
+ chipnr = (int)(to >> chip->chip_shift);
+ chip->select_chip(mtd, chipnr);
+
/* Check, if it is write protected */
if (nand_check_wp(mtd))
return -EIO;
- chipnr = (int)(to >> chip->chip_shift);
- chip->select_chip(mtd, chipnr);
-
realpage = (int)(to >> chip->page_shift);
page = realpage & chip->pagemask;
blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index fe8d38514ba655..e5bd88f2d560ae 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -61,15 +61,15 @@ static void ndfc_select_chip(struct mtd_info *mtd, int chip)
static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
- struct nand_chip *chip = mtd->priv;
+ struct ndfc_controller *ndfc = &ndfc_ctrl;
if (cmd == NAND_CMD_NONE)
return;
if (ctrl & NAND_CLE)
- writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_CMD);
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
else
- writel(cmd & 0xFF, chip->IO_ADDR_W + NDFC_ALE);
+ writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
}
static int ndfc_ready(struct mtd_info *mtd)
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
index 2c262fe03d8af6..ff5cef24d5bb3c 100644
--- a/drivers/mtd/nand/s3c2410.c
+++ b/drivers/mtd/nand/s3c2410.c
@@ -63,8 +63,6 @@
#include <asm/arch/regs-nand.h>
#include <asm/arch/nand.h>
-#define PFX "s3c2410-nand: "
-
#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
static int hardware_ecc = 1;
#else
@@ -99,6 +97,12 @@ struct s3c2410_nand_mtd {
int scan_res;
};
+enum s3c_cpu_type {
+ TYPE_S3C2410,
+ TYPE_S3C2412,
+ TYPE_S3C2440,
+};
+
/* overview of the s3c2410 nand state */
struct s3c2410_nand_info {
@@ -112,9 +116,11 @@ struct s3c2410_nand_info {
struct resource *area;
struct clk *clk;
void __iomem *regs;
+ void __iomem *sel_reg;
+ int sel_bit;
int mtd_count;
- unsigned char is_s3c2440;
+ enum s3c_cpu_type cpu_type;
};
/* conversion functions */
@@ -148,7 +154,7 @@ static inline int allow_clk_stop(struct s3c2410_nand_info *info)
#define NS_IN_KHZ 1000000
-static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
+static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
{
int result;
@@ -172,53 +178,58 @@ static int s3c2410_nand_calc_rate(int wanted, unsigned long clk, int max)
/* controller setup */
-static int s3c2410_nand_inithw(struct s3c2410_nand_info *info, struct platform_device *pdev)
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info,
+ struct platform_device *pdev)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
unsigned long clkrate = clk_get_rate(info->clk);
+ int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
int tacls, twrph0, twrph1;
- unsigned long cfg;
+ unsigned long cfg = 0;
/* calculate the timing information for the controller */
clkrate /= 1000; /* turn clock into kHz for ease of use */
if (plat != NULL) {
- tacls = s3c2410_nand_calc_rate(plat->tacls, clkrate, 4);
- twrph0 = s3c2410_nand_calc_rate(plat->twrph0, clkrate, 8);
- twrph1 = s3c2410_nand_calc_rate(plat->twrph1, clkrate, 8);
+ tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
+ twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
+ twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
} else {
/* default timings */
- tacls = 4;
+ tacls = tacls_max;
twrph0 = 8;
twrph1 = 8;
}
if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
- printk(KERN_ERR PFX "cannot get timings suitable for board\n");
+ dev_err(info->device, "cannot get suitable timings\n");
return -EINVAL;
}
- printk(KERN_INFO PFX "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
+ dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate));
- if (!info->is_s3c2440) {
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
cfg = S3C2410_NFCONF_EN;
cfg |= S3C2410_NFCONF_TACLS(tacls - 1);
cfg |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
cfg |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
- } else {
+ break;
+
+ case TYPE_S3C2440:
+ case TYPE_S3C2412:
cfg = S3C2440_NFCONF_TACLS(tacls - 1);
cfg |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
cfg |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
/* enable the controller and de-assert nFCE */
- writel(S3C2440_NFCONT_ENABLE | S3C2440_NFCONT_ENABLE,
- info->regs + S3C2440_NFCONT);
+ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
}
- pr_debug(PFX "NF_CONF is 0x%lx\n", cfg);
+ dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
writel(cfg, info->regs + S3C2410_NFCONF);
return 0;
@@ -231,26 +242,21 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
struct s3c2410_nand_info *info;
struct s3c2410_nand_mtd *nmtd;
struct nand_chip *this = mtd->priv;
- void __iomem *reg;
unsigned long cur;
- unsigned long bit;
nmtd = this->priv;
info = nmtd->info;
- bit = (info->is_s3c2440) ? S3C2440_NFCONT_nFCE : S3C2410_NFCONF_nFCE;
- reg = info->regs + ((info->is_s3c2440) ? S3C2440_NFCONT : S3C2410_NFCONF);
-
if (chip != -1 && allow_clk_stop(info))
clk_enable(info->clk);
- cur = readl(reg);
+ cur = readl(info->sel_reg);
if (chip == -1) {
- cur |= bit;
+ cur |= info->sel_bit;
} else {
if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
- printk(KERN_ERR PFX "chip %d out of range\n", chip);
+ dev_err(info->device, "invalid chip %d\n", chip);
return;
}
@@ -259,10 +265,10 @@ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
(info->platform->select_chip) (nmtd->set, chip);
}
- cur &= ~bit;
+ cur &= ~info->sel_bit;
}
- writel(cur, reg);
+ writel(cur, info->sel_reg);
if (chip == -1 && allow_clk_stop(info))
clk_disable(info->clk);
@@ -311,15 +317,25 @@ static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
static int s3c2410_nand_devready(struct mtd_info *mtd)
{
struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
- if (info->is_s3c2440)
- return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
}
+static int s3c2440_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
+}
+
+static int s3c2412_nand_devready(struct mtd_info *mtd)
+{
+ struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+ return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
+}
+
/* ECC handling functions */
-static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc)
+static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc)
{
pr_debug("s3c2410_nand_correct_data(%p,%p,%p,%p)\n", mtd, dat, read_ecc, calc_ecc);
@@ -487,11 +503,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
struct s3c2410_nand_set *set)
{
struct nand_chip *chip = &nmtd->chip;
+ void __iomem *regs = info->regs;
- chip->IO_ADDR_R = info->regs + S3C2410_NFDATA;
- chip->IO_ADDR_W = info->regs + S3C2410_NFDATA;
- chip->cmd_ctrl = s3c2410_nand_hwcontrol;
- chip->dev_ready = s3c2410_nand_devready;
chip->write_buf = s3c2410_nand_write_buf;
chip->read_buf = s3c2410_nand_read_buf;
chip->select_chip = s3c2410_nand_select_chip;
@@ -500,11 +513,37 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
chip->options = 0;
chip->controller = &info->controller;
- if (info->is_s3c2440) {
- chip->IO_ADDR_R = info->regs + S3C2440_NFDATA;
- chip->IO_ADDR_W = info->regs + S3C2440_NFDATA;
- chip->cmd_ctrl = s3c2440_nand_hwcontrol;
- }
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->IO_ADDR_W = regs + S3C2410_NFDATA;
+ info->sel_reg = regs + S3C2410_NFCONF;
+ info->sel_bit = S3C2410_NFCONF_nFCE;
+ chip->cmd_ctrl = s3c2410_nand_hwcontrol;
+ chip->dev_ready = s3c2410_nand_devready;
+ break;
+
+ case TYPE_S3C2440:
+ chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2440_NFCONT_nFCE;
+ chip->cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->dev_ready = s3c2440_nand_devready;
+ break;
+
+ case TYPE_S3C2412:
+ chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+ info->sel_reg = regs + S3C2440_NFCONT;
+ info->sel_bit = S3C2412_NFCONT_nFCE0;
+ chip->cmd_ctrl = s3c2440_nand_hwcontrol;
+ chip->dev_ready = s3c2412_nand_devready;
+
+ if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
+ dev_info(info->device, "System booted from NAND\n");
+
+ break;
+ }
+
+ chip->IO_ADDR_R = chip->IO_ADDR_W;
nmtd->info = info;
nmtd->mtd.priv = chip;
@@ -512,17 +551,25 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
nmtd->set = set;
if (hardware_ecc) {
- chip->ecc.correct = s3c2410_nand_correct_data;
- chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ chip->ecc.correct = s3c2410_nand_correct_data;
chip->ecc.mode = NAND_ECC_HW;
chip->ecc.size = 512;
chip->ecc.bytes = 3;
chip->ecc.layout = &nand_hw_eccoob;
- if (info->is_s3c2440) {
- chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
- chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ switch (info->cpu_type) {
+ case TYPE_S3C2410:
+ chip->ecc.hwctl = s3c2410_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+ break;
+
+ case TYPE_S3C2412:
+ case TYPE_S3C2440:
+ chip->ecc.hwctl = s3c2440_nand_enable_hwecc;
+ chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+ break;
+
}
} else {
chip->ecc.mode = NAND_ECC_SOFT;
@@ -537,7 +584,8 @@ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
* nand layer to look for devices
*/
-static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
+static int s3c24xx_nand_probe(struct platform_device *pdev,
+ enum s3c_cpu_type cpu_type)
{
struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
struct s3c2410_nand_info *info;
@@ -592,7 +640,7 @@ static int s3c24xx_nand_probe(struct platform_device *pdev, int is_s3c2440)
info->device = &pdev->dev;
info->platform = plat;
info->regs = ioremap(res->start, size);
- info->is_s3c2440 = is_s3c2440;
+ info->cpu_type = cpu_type;
if (info->regs == NULL) {
dev_err(&pdev->dev, "cannot reserve register region\n");
@@ -699,12 +747,17 @@ static int s3c24xx_nand_resume(struct platform_device *dev)
static int s3c2410_nand_probe(struct platform_device *dev)
{
- return s3c24xx_nand_probe(dev, 0);
+ return s3c24xx_nand_probe(dev, TYPE_S3C2410);
}
static int s3c2440_nand_probe(struct platform_device *dev)
{
- return s3c24xx_nand_probe(dev, 1);
+ return s3c24xx_nand_probe(dev, TYPE_S3C2440);
+}
+
+static int s3c2412_nand_probe(struct platform_device *dev)
+{
+ return s3c24xx_nand_probe(dev, TYPE_S3C2412);
}
static struct platform_driver s3c2410_nand_driver = {
@@ -729,16 +782,29 @@ static struct platform_driver s3c2440_nand_driver = {
},
};
+static struct platform_driver s3c2412_nand_driver = {
+ .probe = s3c2412_nand_probe,
+ .remove = s3c2410_nand_remove,
+ .suspend = s3c24xx_nand_suspend,
+ .resume = s3c24xx_nand_resume,
+ .driver = {
+ .name = "s3c2412-nand",
+ .owner = THIS_MODULE,
+ },
+};
+
static int __init s3c2410_nand_init(void)
{
printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n");
+ platform_driver_register(&s3c2412_nand_driver);
platform_driver_register(&s3c2440_nand_driver);
return platform_driver_register(&s3c2410_nand_driver);
}
static void __exit s3c2410_nand_exit(void)
{
+ platform_driver_unregister(&s3c2412_nand_driver);
platform_driver_unregister(&s3c2440_nand_driver);
platform_driver_unregister(&s3c2410_nand_driver);
}
diff --git a/drivers/mtd/nand/ts7250.c b/drivers/mtd/nand/ts7250.c
index a0b4b1edcb0d10..f40081069ab2c6 100644
--- a/drivers/mtd/nand/ts7250.c
+++ b/drivers/mtd/nand/ts7250.c
@@ -97,7 +97,7 @@ static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
unsigned char bits;
- bits = (ctrl & NAND_CNE) << 2;
+ bits = (ctrl & NAND_NCE) << 2;
bits |= ctrl & NAND_CLE;
bits |= (ctrl & NAND_ALE) >> 2;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index bd6983d1afbac7..db694c83298961 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -22,7 +22,7 @@
* Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
*
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
- * Copyright (c) 2004-2005 Macq Electronique SA.
+ * Copyright (c) 2004-2006 Macq Electronique SA.
*/
#include <linux/config.h>
@@ -51,7 +51,7 @@
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || \
defined(CONFIG_M5272) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x)
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include "fec.h"
@@ -80,6 +80,8 @@ static unsigned int fec_hw[] = {
(MCF_MBAR + 0x1000),
#elif defined(CONFIG_M520x)
(MCF_MBAR+0x30000),
+#elif defined(CONFIG_M532x)
+ (MCF_MBAR+0xfc030000),
#else
&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec),
#endif
@@ -143,7 +145,7 @@ typedef struct {
#define TX_RING_MOD_MASK 15 /* for this to work */
#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
-#error "FEC: descriptor ring size contants too large"
+#error "FEC: descriptor ring size constants too large"
#endif
/* Interrupt events/masks.
@@ -167,12 +169,12 @@ typedef struct {
/*
- * The 5270/5271/5280/5282 RX control register also contains maximum frame
+ * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
* size bits. Other FEC hardware does not, so we need to take that into
* account when setting it.
*/
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
- defined(CONFIG_M520x)
+ defined(CONFIG_M520x) || defined(CONFIG_M532x)
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
#else
#define OPT_FRAME_SIZE 0
@@ -308,6 +310,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct fec_enet_private *fep;
volatile fec_t *fecp;
volatile cbd_t *bdp;
+ unsigned short status;
fep = netdev_priv(dev);
fecp = (volatile fec_t*)dev->base_addr;
@@ -320,8 +323,9 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
+ status = bdp->cbd_sc;
#ifndef final_version
- if (bdp->cbd_sc & BD_ENET_TX_READY) {
+ if (status & BD_ENET_TX_READY) {
/* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since dev->tbusy should be set.
*/
@@ -332,7 +336,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Clear all of the status flags.
*/
- bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+ status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer.
*/
@@ -366,21 +370,22 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irq(&fep->lock);
- /* Send it on its way. Tell FEC its ready, interrupt when done,
- * its the last BD of the frame, and to put the CRC on the end.
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
*/
- bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
dev->trans_start = jiffies;
/* Trigger transmission start */
- fecp->fec_x_des_active = 0x01000000;
+ fecp->fec_x_des_active = 0;
/* If this was the last BD in the ring, start at the beginning again.
*/
- if (bdp->cbd_sc & BD_ENET_TX_WRAP) {
+ if (status & BD_ENET_TX_WRAP) {
bdp = fep->tx_bd_base;
} else {
bdp++;
@@ -491,43 +496,44 @@ fec_enet_tx(struct net_device *dev)
{
struct fec_enet_private *fep;
volatile cbd_t *bdp;
+ unsigned short status;
struct sk_buff *skb;
fep = netdev_priv(dev);
spin_lock(&fep->lock);
bdp = fep->dirty_tx;
- while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) {
+ while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
if (bdp == fep->cur_tx && fep->tx_full == 0) break;
skb = fep->tx_skbuff[fep->skb_dirty];
/* Check for errors. */
- if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
BD_ENET_TX_RL | BD_ENET_TX_UN |
BD_ENET_TX_CSL)) {
fep->stats.tx_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_HB) /* No heartbeat */
+ if (status & BD_ENET_TX_HB) /* No heartbeat */
fep->stats.tx_heartbeat_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_LC) /* Late collision */
+ if (status & BD_ENET_TX_LC) /* Late collision */
fep->stats.tx_window_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_RL) /* Retrans limit */
+ if (status & BD_ENET_TX_RL) /* Retrans limit */
fep->stats.tx_aborted_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_UN) /* Underrun */
+ if (status & BD_ENET_TX_UN) /* Underrun */
fep->stats.tx_fifo_errors++;
- if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */
+ if (status & BD_ENET_TX_CSL) /* Carrier lost */
fep->stats.tx_carrier_errors++;
} else {
fep->stats.tx_packets++;
}
#ifndef final_version
- if (bdp->cbd_sc & BD_ENET_TX_READY)
+ if (status & BD_ENET_TX_READY)
printk("HEY! Enet xmit interrupt and TX_READY.\n");
#endif
/* Deferred means some collisions occurred during transmit,
* but we eventually sent the packet OK.
*/
- if (bdp->cbd_sc & BD_ENET_TX_DEF)
+ if (status & BD_ENET_TX_DEF)
fep->stats.collisions++;
/* Free the sk buffer associated with this last transmit.
@@ -538,7 +544,7 @@ fec_enet_tx(struct net_device *dev)
/* Update pointer to next buffer descriptor to be transmitted.
*/
- if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+ if (status & BD_ENET_TX_WRAP)
bdp = fep->tx_bd_base;
else
bdp++;
@@ -568,9 +574,14 @@ fec_enet_rx(struct net_device *dev)
struct fec_enet_private *fep;
volatile fec_t *fecp;
volatile cbd_t *bdp;
+ unsigned short status;
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
+
+#ifdef CONFIG_M532x
+ flush_cache_all();
+#endif
fep = netdev_priv(dev);
fecp = (volatile fec_t*)dev->base_addr;
@@ -580,13 +591,13 @@ fec_enet_rx(struct net_device *dev)
*/
bdp = fep->cur_rx;
-while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
+while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
#ifndef final_version
/* Since we have allocated space to hold a complete frame,
* the last indicator should be set.
*/
- if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0)
+ if ((status & BD_ENET_RX_LAST) == 0)
printk("FEC ENET: rcv is not +last\n");
#endif
@@ -594,26 +605,26 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
goto rx_processing_done;
/* Check for errors. */
- if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
BD_ENET_RX_CR | BD_ENET_RX_OV)) {
fep->stats.rx_errors++;
- if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+ if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
/* Frame too long or too short. */
fep->stats.rx_length_errors++;
}
- if (bdp->cbd_sc & BD_ENET_RX_NO) /* Frame alignment */
+ if (status & BD_ENET_RX_NO) /* Frame alignment */
fep->stats.rx_frame_errors++;
- if (bdp->cbd_sc & BD_ENET_RX_CR) /* CRC Error */
- fep->stats.rx_crc_errors++;
- if (bdp->cbd_sc & BD_ENET_RX_OV) /* FIFO overrun */
+ if (status & BD_ENET_RX_CR) /* CRC Error */
fep->stats.rx_crc_errors++;
+ if (status & BD_ENET_RX_OV) /* FIFO overrun */
+ fep->stats.rx_fifo_errors++;
}
/* Report late collisions as a frame error.
* On this error, the BD is closed, but we don't know what we
* have in the buffer. So, just drop this frame on the floor.
*/
- if (bdp->cbd_sc & BD_ENET_RX_CL) {
+ if (status & BD_ENET_RX_CL) {
fep->stats.rx_errors++;
fep->stats.rx_frame_errors++;
goto rx_processing_done;
@@ -639,9 +650,7 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
} else {
skb->dev = dev;
skb_put(skb,pkt_len-4); /* Make room */
- eth_copy_and_sum(skb,
- (unsigned char *)__va(bdp->cbd_bufaddr),
- pkt_len-4, 0);
+ eth_copy_and_sum(skb, data, pkt_len-4, 0);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
@@ -649,15 +658,16 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
/* Clear the status flags for this buffer.
*/
- bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+ status &= ~BD_ENET_RX_STATS;
/* Mark the buffer empty.
*/
- bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+ status |= BD_ENET_RX_EMPTY;
+ bdp->cbd_sc = status;
/* Update BD pointer to next entry.
*/
- if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+ if (status & BD_ENET_RX_WRAP)
bdp = fep->rx_bd_base;
else
bdp++;
@@ -667,9 +677,9 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
*/
- fecp->fec_r_des_active = 0x01000000;
+ fecp->fec_r_des_active = 0;
#endif
- } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */
+ } /* while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) */
fep->cur_rx = (cbd_t *)bdp;
#if 0
@@ -680,11 +690,12 @@ while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
* our way back to the interrupt return only to come right back
* here.
*/
- fecp->fec_r_des_active = 0x01000000;
+ fecp->fec_r_des_active = 0;
#endif
}
+/* called from interrupt context */
static void
fec_enet_mii(struct net_device *dev)
{
@@ -696,10 +707,12 @@ fec_enet_mii(struct net_device *dev)
fep = netdev_priv(dev);
ep = fep->hwp;
mii_reg = ep->fec_mii_data;
+
+ spin_lock(&fep->lock);
if ((mip = mii_head) == NULL) {
printk("MII and no head!\n");
- return;
+ goto unlock;
}
if (mip->mii_func != NULL)
@@ -711,6 +724,9 @@ fec_enet_mii(struct net_device *dev)
if ((mip = mii_head) != NULL)
ep->fec_mii_data = mip->mii_regval;
+
+unlock:
+ spin_unlock(&fep->lock);
}
static int
@@ -728,8 +744,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
retval = 0;
- save_flags(flags);
- cli();
+ spin_lock_irqsave(&fep->lock,flags);
if ((mip = mii_free) != NULL) {
mii_free = mip->mii_next;
@@ -749,7 +764,7 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
retval = 1;
}
- restore_flags(flags);
+ spin_unlock_irqrestore(&fep->lock,flags);
return(retval);
}
@@ -1216,7 +1231,7 @@ static phy_info_t const * const phy_info[] = {
};
/* ------------------------------------------------------------------------- */
-
+#if !defined(CONFIG_M532x)
#ifdef CONFIG_RPXCLASSIC
static void
mii_link_interrupt(void *dev_id);
@@ -1224,6 +1239,7 @@ mii_link_interrupt(void *dev_id);
static irqreturn_t
mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs);
#endif
+#endif
#if defined(CONFIG_M5272)
@@ -1384,13 +1400,13 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
{
volatile unsigned char *icrp;
volatile unsigned long *imrp;
- int i;
+ int i, ilip;
b = (fep->index) ? MCFICM_INTC1 : MCFICM_INTC0;
icrp = (volatile unsigned char *) (MCF_IPSBAR + b +
MCFINTC_ICR0);
- for (i = 23; (i < 36); i++)
- icrp[i] = 0x23;
+ for (i = 23, ilip = 0x28; (i < 36); i++)
+ icrp[i] = ilip--;
imrp = (volatile unsigned long *) (MCF_IPSBAR + b +
MCFINTC_IMRH);
@@ -1618,6 +1634,159 @@ static void __inline__ fec_uncache(unsigned long addr)
/* ------------------------------------------------------------------------- */
+#elif defined(CONFIG_M532x)
+/*
+ * Code specific for M532x
+ */
+static void __inline__ fec_request_intrs(struct net_device *dev)
+{
+ struct fec_enet_private *fep;
+ int b;
+ static const struct idesc {
+ char *name;
+ unsigned short irq;
+ } *idp, id[] = {
+ { "fec(TXF)", 36 },
+ { "fec(TXB)", 37 },
+ { "fec(TXFIFO)", 38 },
+ { "fec(TXCR)", 39 },
+ { "fec(RXF)", 40 },
+ { "fec(RXB)", 41 },
+ { "fec(MII)", 42 },
+ { "fec(LC)", 43 },
+ { "fec(HBERR)", 44 },
+ { "fec(GRA)", 45 },
+ { "fec(EBERR)", 46 },
+ { "fec(BABT)", 47 },
+ { "fec(BABR)", 48 },
+ { NULL },
+ };
+
+ fep = netdev_priv(dev);
+ b = (fep->index) ? 128 : 64;
+
+ /* Setup interrupt handlers. */
+ for (idp = id; idp->name; idp++) {
+ if (request_irq(b+idp->irq,fec_enet_interrupt,0,idp->name,dev)!=0)
+ printk("FEC: Could not allocate %s IRQ(%d)!\n",
+ idp->name, b+idp->irq);
+ }
+
+ /* Unmask interrupts */
+ MCF_INTC0_ICR36 = 0x2;
+ MCF_INTC0_ICR37 = 0x2;
+ MCF_INTC0_ICR38 = 0x2;
+ MCF_INTC0_ICR39 = 0x2;
+ MCF_INTC0_ICR40 = 0x2;
+ MCF_INTC0_ICR41 = 0x2;
+ MCF_INTC0_ICR42 = 0x2;
+ MCF_INTC0_ICR43 = 0x2;
+ MCF_INTC0_ICR44 = 0x2;
+ MCF_INTC0_ICR45 = 0x2;
+ MCF_INTC0_ICR46 = 0x2;
+ MCF_INTC0_ICR47 = 0x2;
+ MCF_INTC0_ICR48 = 0x2;
+
+ MCF_INTC0_IMRH &= ~(
+ MCF_INTC_IMRH_INT_MASK36 |
+ MCF_INTC_IMRH_INT_MASK37 |
+ MCF_INTC_IMRH_INT_MASK38 |
+ MCF_INTC_IMRH_INT_MASK39 |
+ MCF_INTC_IMRH_INT_MASK40 |
+ MCF_INTC_IMRH_INT_MASK41 |
+ MCF_INTC_IMRH_INT_MASK42 |
+ MCF_INTC_IMRH_INT_MASK43 |
+ MCF_INTC_IMRH_INT_MASK44 |
+ MCF_INTC_IMRH_INT_MASK45 |
+ MCF_INTC_IMRH_INT_MASK46 |
+ MCF_INTC_IMRH_INT_MASK47 |
+ MCF_INTC_IMRH_INT_MASK48 );
+
+ /* Set up gpio outputs for MII lines */
+ MCF_GPIO_PAR_FECI2C |= (0 |
+ MCF_GPIO_PAR_FECI2C_PAR_MDC_EMDC |
+ MCF_GPIO_PAR_FECI2C_PAR_MDIO_EMDIO);
+ MCF_GPIO_PAR_FEC = (0 |
+ MCF_GPIO_PAR_FEC_PAR_FEC_7W_FEC |
+ MCF_GPIO_PAR_FEC_PAR_FEC_MII_FEC);
+}
+
+static void __inline__ fec_set_mii(struct net_device *dev, struct fec_enet_private *fep)
+{
+ volatile fec_t *fecp;
+
+ fecp = fep->hwp;
+ fecp->fec_r_cntrl = OPT_FRAME_SIZE | 0x04;
+ fecp->fec_x_cntrl = 0x00;
+
+ /*
+ * Set MII speed to 2.5 MHz
+ */
+ fep->phy_speed = ((((MCF_CLK / 2) / (2500000 / 10)) + 5) / 10) * 2;
+ fecp->fec_mii_speed = fep->phy_speed;
+
+ fec_restart(dev, 0);
+}
+
+static void __inline__ fec_get_mac(struct net_device *dev)
+{
+ struct fec_enet_private *fep = netdev_priv(dev);
+ volatile fec_t *fecp;
+ unsigned char *iap, tmpaddr[ETH_ALEN];
+
+ fecp = fep->hwp;
+
+ if (FEC_FLASHMAC) {
+ /*
+ * Get MAC address from FLASH.
+ * If it is all 1's or 0's, use the default.
+ */
+ iap = FEC_FLASHMAC;
+ if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
+ (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
+ iap = fec_mac_default;
+ if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
+ (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
+ iap = fec_mac_default;
+ } else {
+ *((unsigned long *) &tmpaddr[0]) = fecp->fec_addr_low;
+ *((unsigned short *) &tmpaddr[4]) = (fecp->fec_addr_high >> 16);
+ iap = &tmpaddr[0];
+ }
+
+ memcpy(dev->dev_addr, iap, ETH_ALEN);
+
+ /* Adjust MAC if using default MAC address */
+ if (iap == fec_mac_default)
+ dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
+}
+
+static void __inline__ fec_enable_phy_intr(void)
+{
+}
+
+static void __inline__ fec_disable_phy_intr(void)
+{
+}
+
+static void __inline__ fec_phy_ack_intr(void)
+{
+}
+
+static void __inline__ fec_localhw_setup(void)
+{
+}
+
+/*
+ * Do not need to make region uncached on 532x.
+ */
+static void __inline__ fec_uncache(unsigned long addr)
+{
+}
+
+/* ------------------------------------------------------------------------- */
+
+
#else
/*
@@ -1985,9 +2154,12 @@ fec_enet_open(struct net_device *dev)
mii_do_cmd(dev, fep->phy->config);
mii_do_cmd(dev, phy_cmd_config); /* display configuration */
- /* FIXME: use netif_carrier_{on,off} ; this polls
- * until link is up which is wrong... could be
- * 30 seconds or more we are trapped in here. -jgarzik
+ /* Poll until the PHY tells us its configuration
+ * (not link state).
+ * Request is initiated by mii_do_cmd above, but answer
+ * comes by interrupt.
+ * This should take about 25 usec per register at 2.5 MHz,
+ * and we read approximately 5 registers.
*/
while(!fep->sequence_done)
schedule();
@@ -2253,15 +2425,11 @@ int __init fec_enet_init(struct net_device *dev)
*/
fec_request_intrs(dev);
- /* Clear and enable interrupts */
- fecp->fec_ievent = 0xffc00000;
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
- FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
fecp->fec_hash_table_high = 0;
fecp->fec_hash_table_low = 0;
fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0x01000000;
+ fecp->fec_r_des_active = 0;
dev->base_addr = (unsigned long)fecp;
@@ -2281,6 +2449,11 @@ int __init fec_enet_init(struct net_device *dev)
/* setup MII interface */
fec_set_mii(dev, fep);
+ /* Clear and enable interrupts */
+ fecp->fec_ievent = 0xffc00000;
+ fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
+
/* Queue up command to detect the PHY and initialize the
* remainder of the interface.
*/
@@ -2312,11 +2485,6 @@ fec_restart(struct net_device *dev, int duplex)
fecp->fec_ecntrl = 1;
udelay(10);
- /* Enable interrupts we wish to service.
- */
- fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
- FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
-
/* Clear any outstanding interrupt.
*/
fecp->fec_ievent = 0xffc00000;
@@ -2408,7 +2576,12 @@ fec_restart(struct net_device *dev, int duplex)
/* And last, enable the transmit and receive processing.
*/
fecp->fec_ecntrl = 2;
- fecp->fec_r_des_active = 0x01000000;
+ fecp->fec_r_des_active = 0;
+
+ /* Enable interrupts we wish to service.
+ */
+ fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
+ FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
}
static void
@@ -2420,9 +2593,16 @@ fec_stop(struct net_device *dev)
fep = netdev_priv(dev);
fecp = fep->hwp;
- fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */
-
- while(!(fecp->fec_ievent & FEC_ENET_GRA));
+ /*
+ ** We cannot expect a graceful transmit stop without link !!!
+ */
+ if (fep->link)
+ {
+ fecp->fec_x_cntrl = 0x01; /* Graceful transmit stop */
+ udelay(10);
+ if (!(fecp->fec_ievent & FEC_ENET_GRA))
+ printk("fec_stop : Graceful transmit stop did not complete !\n");
+ }
/* Whack a reset. We should wait for this.
*/
diff --git a/drivers/net/fs_enet/fs_enet-mii.c b/drivers/net/fs_enet/fs_enet-mii.c
index c6770377ef8746..0cd07150bf4aee 100644
--- a/drivers/net/fs_enet/fs_enet-mii.c
+++ b/drivers/net/fs_enet/fs_enet-mii.c
@@ -431,8 +431,7 @@ static struct fs_enet_mii_bus *create_bus(const struct fs_mii_bus_info *bi)
return bus;
err:
- if (bus)
- kfree(bus);
+ kfree(bus);
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index 081a8999666e7e..a8a8f975432fe1 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -1229,12 +1229,6 @@ static struct ipw_fw_error *ipw_alloc_error_log(struct ipw_priv *priv)
return error;
}
-static void ipw_free_error_log(struct ipw_fw_error *error)
-{
- if (error)
- kfree(error);
-}
-
static ssize_t show_event_log(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -1296,10 +1290,9 @@ static ssize_t clear_error(struct device *d,
const char *buf, size_t count)
{
struct ipw_priv *priv = dev_get_drvdata(d);
- if (priv->error) {
- ipw_free_error_log(priv->error);
- priv->error = NULL;
- }
+
+ kfree(priv->error);
+ priv->error = NULL;
return count;
}
@@ -1970,8 +1963,7 @@ static void ipw_irq_tasklet(struct ipw_priv *priv)
struct ipw_fw_error *error =
ipw_alloc_error_log(priv);
ipw_dump_error_log(priv, error);
- if (error)
- ipw_free_error_log(error);
+ kfree(error);
}
#endif
} else {
@@ -11693,10 +11685,8 @@ static void ipw_pci_remove(struct pci_dev *pdev)
}
}
- if (priv->error) {
- ipw_free_error_log(priv->error);
- priv->error = NULL;
- }
+ kfree(priv->error);
+ priv->error = NULL;
#ifdef CONFIG_IPW2200_PROMISCUOUS
ipw_prom_free(priv);
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 0e07d9535116db..d0f68ab8f04119 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -157,7 +157,7 @@ MODULE_LICENSE("Dual MPL/GPL");
static int pcmcia_schlvl = PCMCIA_SCHLVL;
-static spinlock_t events_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(events_lock);
#define PCMCIA_SOCKET_KEY_5V 1
@@ -644,7 +644,7 @@ static struct platform_device m8xx_device = {
};
static u32 pending_events[PCMCIA_SOCKETS_NO];
-static spinlock_t pending_event_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(pending_event_lock);
static irqreturn_t m8xx_interrupt(int irq, void *dev, struct pt_regs *regs)
{
diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c
index b9fab2ae3a3610..8b56bbdd011ec0 100644
--- a/drivers/rapidio/rio-access.c
+++ b/drivers/rapidio/rio-access.c
@@ -17,8 +17,8 @@
* These interrupt-safe spinlocks protect all accesses to RIO
* configuration space and doorbell access.
*/
-static spinlock_t rio_config_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t rio_doorbell_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rio_config_lock);
+static DEFINE_SPINLOCK(rio_doorbell_lock);
/*
* Wrappers for all RIO configuration access functions. They just check
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index 5396beec30d0ca..1cb61a761cb284 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -94,7 +94,9 @@ exit_kfree:
kfree(rtc);
exit_idr:
+ mutex_lock(&idr_lock);
idr_remove(&rtc_idr, id);
+ mutex_unlock(&idr_lock);
exit:
dev_err(dev, "rtc core: unable to register %s, err = %d\n",
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index ecafbad41a242c..762521a1419cf2 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -226,7 +226,7 @@ static int ds1553_rtc_ioctl(struct device *dev, unsigned int cmd,
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
if (pdata->irq < 0)
- return -ENOIOCTLCMD;
+ return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */
switch (cmd) {
case RTC_AIE_OFF:
pdata->irqen &= ~RTC_AF;
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index ab486fbc828dab..9cd1cb304bb2a7 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -45,7 +45,7 @@
static unsigned long rtc_freq = 1024;
static struct rtc_time rtc_alarm;
-static spinlock_t sa1100_rtc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(sa1100_rtc_lock);
static int rtc_update_alarm(struct rtc_time *alrm)
{
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index 33e029207e261c..4b9291dd444354 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -93,7 +93,7 @@ static void __iomem *rtc2_base;
static unsigned long epoch = 1970; /* Jan 1 1970 00:00:00 */
-static spinlock_t rtc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(rtc_lock);
static char rtc_name[] = "RTC";
static unsigned long periodic_frequency;
static unsigned long periodic_count;
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 2d946b6ca074cd..2d8af709947fb0 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -89,7 +89,7 @@ struct eerbuffer {
};
static LIST_HEAD(bufferlist);
-static spinlock_t bufferlock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bufferlock);
static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
/*
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index c84b02aec1f35f..96a81cd17617aa 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -501,7 +501,7 @@ config SCSI_ATA_PIIX
tristate "Intel PIIX/ICH SATA support"
depends on SCSI_SATA && PCI
help
- This option enables support for ICH5 Serial ATA.
+ This option enables support for ICH5/6/7/8 Serial ATA.
If PATA support was enabled previously, this enables
support for select Intel PIIX/ICH PATA host controllers.
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 4bb77f62b3b9be..f059467777183f 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -48,7 +48,7 @@
#include <asm/io.h>
#define DRV_NAME "ahci"
-#define DRV_VERSION "1.3"
+#define DRV_VERSION "2.0"
enum {
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index 521b718763f6c9..94b1261a259d93 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -93,7 +93,7 @@
#include <linux/libata.h>
#define DRV_NAME "ata_piix"
-#define DRV_VERSION "1.10"
+#define DRV_VERSION "2.00"
enum {
PIIX_IOCFG = 0x54, /* IDE I/O configuration register */
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 6c66877be2bffe..d1c1c30d123f36 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -88,6 +88,10 @@ int libata_fua = 0;
module_param_named(fua, libata_fua, int, 0444);
MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
+static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
+module_param(ata_probe_timeout, int, 0444);
+MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
+
MODULE_AUTHOR("Jeff Garzik");
MODULE_DESCRIPTION("Library module for ATA devices");
MODULE_LICENSE("GPL");
@@ -777,11 +781,9 @@ void ata_std_dev_select (struct ata_port *ap, unsigned int device)
void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep)
{
- if (ata_msg_probe(ap)) {
+ if (ata_msg_probe(ap))
ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
- "device %u, wait %u\n",
- ap->id, device, wait);
- }
+ "device %u, wait %u\n", ap->id, device, wait);
if (wait)
ata_wait_idle(ap);
@@ -950,7 +952,8 @@ void ata_port_flush_task(struct ata_port *ap)
*/
if (!cancel_delayed_work(&ap->port_task)) {
if (ata_msg_ctl(ap))
- ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n", __FUNCTION__);
+ ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
+ __FUNCTION__);
flush_workqueue(ata_wq);
}
@@ -1059,7 +1062,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags);
- rc = wait_for_completion_timeout(&wait, ATA_TMOUT_INTERNAL);
+ rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
ata_port_flush_task(ap);
@@ -1081,7 +1084,7 @@ unsigned ata_exec_internal(struct ata_device *dev,
if (ata_msg_warn(ap))
ata_dev_printk(dev, KERN_WARNING,
- "qc timeout (cmd 0x%x)\n", command);
+ "qc timeout (cmd 0x%x)\n", command);
}
spin_unlock_irqrestore(ap->lock, flags);
@@ -1093,9 +1096,9 @@ unsigned ata_exec_internal(struct ata_device *dev,
if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
+ ata_dev_printk(dev, KERN_WARNING,
"zero err_mask for failed "
- "internal command, assuming AC_ERR_OTHER\n");
+ "internal command, assuming AC_ERR_OTHER\n");
qc->err_mask |= AC_ERR_OTHER;
}
@@ -1132,6 +1135,33 @@ unsigned ata_exec_internal(struct ata_device *dev,
}
/**
+ * ata_do_simple_cmd - execute simple internal command
+ * @dev: Device to which the command is sent
+ * @cmd: Opcode to execute
+ *
+ * Execute a 'simple' command, that only consists of the opcode
+ * 'cmd' itself, without filling any other registers
+ *
+ * LOCKING:
+ * Kernel thread context (may sleep).
+ *
+ * RETURNS:
+ * Zero on success, AC_ERR_* mask on failure
+ */
+unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
+{
+ struct ata_taskfile tf;
+
+ ata_tf_init(dev, &tf);
+
+ tf.command = cmd;
+ tf.flags |= ATA_TFLAG_DEVICE;
+ tf.protocol = ATA_PROT_NODATA;
+
+ return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
+}
+
+/**
* ata_pio_need_iordy - check if iordy needed
* @adev: ATA device
*
@@ -1193,8 +1223,8 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int rc;
if (ata_msg_ctl(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
+ __FUNCTION__, ap->id, dev->devno);
ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
@@ -1263,9 +1293,9 @@ int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
return 0;
err_out:
- if (ata_msg_warn(ap))
+ if (ata_msg_warn(ap))
ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
- "(%s, err_mask=0x%x)\n", reason, err_mask);
+ "(%s, err_mask=0x%x)\n", reason, err_mask);
return rc;
}
@@ -1318,19 +1348,21 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
int i, rc;
if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
- ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_INFO,
+ "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
+ __FUNCTION__, ap->id, dev->devno);
return 0;
}
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
- __FUNCTION__, ap->id, dev->devno);
+ ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
+ __FUNCTION__, ap->id, dev->devno);
/* print device capabilities */
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG, "%s: cfg 49:%04x 82:%04x 83:%04x "
- "84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
+ ata_dev_printk(dev, KERN_DEBUG,
+ "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
+ "85:%04x 86:%04x 87:%04x 88:%04x\n",
__FUNCTION__,
id[49], id[82], id[83], id[84],
id[85], id[86], id[87], id[88]);
@@ -1402,14 +1434,16 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
ata_id_major_version(id),
ata_mode_string(xfer_mask),
(unsigned long long)dev->n_sectors,
- dev->cylinders, dev->heads, dev->sectors);
+ dev->cylinders, dev->heads,
+ dev->sectors);
}
if (dev->id[59] & 0x100) {
dev->multi_count = dev->id[59] & 0xff;
if (ata_msg_info(ap))
- ata_dev_printk(dev, KERN_INFO, "ata%u: dev %u multi count %u\n",
- ap->id, dev->devno, dev->multi_count);
+ ata_dev_printk(dev, KERN_INFO,
+ "ata%u: dev %u multi count %u\n",
+ ap->id, dev->devno, dev->multi_count);
}
dev->cdb_len = 16;
@@ -1422,8 +1456,8 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
rc = atapi_cdb_len(id);
if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
if (ata_msg_warn(ap))
- ata_dev_printk(dev, KERN_WARNING,
- "unsupported CDB len\n");
+ ata_dev_printk(dev, KERN_WARNING,
+ "unsupported CDB len\n");
rc = -EINVAL;
goto err_out_nosup;
}
@@ -1466,8 +1500,8 @@ int ata_dev_configure(struct ata_device *dev, int print_info)
err_out_nosup:
if (ata_msg_probe(ap))
- ata_dev_printk(dev, KERN_DEBUG,
- "%s: EXIT, err\n", __FUNCTION__);
+ ata_dev_printk(dev, KERN_DEBUG,
+ "%s: EXIT, err\n", __FUNCTION__);
return rc;
}
@@ -3527,7 +3561,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
* Inherited from caller.
*/
-void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
+void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data)
{
struct ata_port *ap = adev->ap;
@@ -3573,7 +3607,7 @@ void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
* Inherited from caller.
*/
-void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
+void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
unsigned int buflen, int write_data)
{
struct ata_port *ap = adev->ap;
@@ -3607,7 +3641,7 @@ void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
* @buflen: buffer length
* @write_data: read/write
*
- * Transfer data from/to the device data register by PIO. Do the
+ * Transfer data from/to the device data register by PIO. Do the
* transfer with interrupts disabled.
*
* LOCKING:
@@ -4946,31 +4980,9 @@ int ata_port_offline(struct ata_port *ap)
return 0;
}
-/*
- * Execute a 'simple' command, that only consists of the opcode 'cmd' itself,
- * without filling any other registers
- */
-static int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
-{
- struct ata_taskfile tf;
- int err;
-
- ata_tf_init(dev, &tf);
-
- tf.command = cmd;
- tf.flags |= ATA_TFLAG_DEVICE;
- tf.protocol = ATA_PROT_NODATA;
-
- err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
- if (err)
- ata_dev_printk(dev, KERN_ERR, "%s: ata command failed: %d\n",
- __FUNCTION__, err);
-
- return err;
-}
-
-static int ata_flush_cache(struct ata_device *dev)
+int ata_flush_cache(struct ata_device *dev)
{
+ unsigned int err_mask;
u8 cmd;
if (!ata_try_flush_cache(dev))
@@ -4981,17 +4993,41 @@ static int ata_flush_cache(struct ata_device *dev)
else
cmd = ATA_CMD_FLUSH;
- return ata_do_simple_cmd(dev, cmd);
+ err_mask = ata_do_simple_cmd(dev, cmd);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
+ return -EIO;
+ }
+
+ return 0;
}
static int ata_standby_drive(struct ata_device *dev)
{
- return ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
+ unsigned int err_mask;
+
+ err_mask = ata_do_simple_cmd(dev, ATA_CMD_STANDBYNOW1);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to standby drive "
+ "(err_mask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+
+ return 0;
}
static int ata_start_drive(struct ata_device *dev)
{
- return ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
+ unsigned int err_mask;
+
+ err_mask = ata_do_simple_cmd(dev, ATA_CMD_IDLEIMMEDIATE);
+ if (err_mask) {
+ ata_dev_printk(dev, KERN_ERR, "failed to start drive "
+ "(err_mask=0x%x)\n", err_mask);
+ return -EIO;
+ }
+
+ return 0;
}
/**
@@ -5212,7 +5248,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
ap->msg_enable = 0x00FF;
#elif defined(ATA_DEBUG)
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
-#else
+#else
ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
#endif
@@ -5709,6 +5745,7 @@ int ata_pci_device_resume(struct pci_dev *pdev)
static int __init ata_init(void)
{
+ ata_probe_timeout *= HZ;
ata_wq = create_workqueue("ata");
if (!ata_wq)
return -ENOMEM;
@@ -5733,7 +5770,7 @@ module_init(ata_init);
module_exit(ata_exit);
static unsigned long ratelimit_time;
-static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ata_ratelimit_lock);
int ata_ratelimit(void)
{
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
index 823385981a7a59..bf5a72aca8a497 100644
--- a/drivers/scsi/libata-eh.c
+++ b/drivers/scsi/libata-eh.c
@@ -93,6 +93,38 @@ static int ata_ering_map(struct ata_ering *ering,
return rc;
}
+static unsigned int ata_eh_dev_action(struct ata_device *dev)
+{
+ struct ata_eh_context *ehc = &dev->ap->eh_context;
+
+ return ehc->i.action | ehc->i.dev_action[dev->devno];
+}
+
+static void ata_eh_clear_action(struct ata_device *dev,
+ struct ata_eh_info *ehi, unsigned int action)
+{
+ int i;
+
+ if (!dev) {
+ ehi->action &= ~action;
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ ehi->dev_action[i] &= ~action;
+ } else {
+ /* doesn't make sense for port-wide EH actions */
+ WARN_ON(!(action & ATA_EH_PERDEV_MASK));
+
+ /* break ehi->action into ehi->dev_action */
+ if (ehi->action & action) {
+ for (i = 0; i < ATA_MAX_DEVICES; i++)
+ ehi->dev_action[i] |= ehi->action & action;
+ ehi->action &= ~action;
+ }
+
+ /* turn off the specified per-dev action */
+ ehi->dev_action[dev->devno] &= ~action;
+ }
+}
+
/**
* ata_scsi_timed_out - SCSI layer time out callback
* @cmd: timed out SCSI command
@@ -702,32 +734,11 @@ static void ata_eh_detach_dev(struct ata_device *dev)
ap->flags |= ATA_FLAG_SCSI_HOTPLUG;
}
- spin_unlock_irqrestore(ap->lock, flags);
-}
-
-static void ata_eh_clear_action(struct ata_device *dev,
- struct ata_eh_info *ehi, unsigned int action)
-{
- int i;
+ /* clear per-dev EH actions */
+ ata_eh_clear_action(dev, &ap->eh_info, ATA_EH_PERDEV_MASK);
+ ata_eh_clear_action(dev, &ap->eh_context.i, ATA_EH_PERDEV_MASK);
- if (!dev) {
- ehi->action &= ~action;
- for (i = 0; i < ATA_MAX_DEVICES; i++)
- ehi->dev_action[i] &= ~action;
- } else {
- /* doesn't make sense for port-wide EH actions */
- WARN_ON(!(action & ATA_EH_PERDEV_MASK));
-
- /* break ehi->action into ehi->dev_action */
- if (ehi->action & action) {
- for (i = 0; i < ATA_MAX_DEVICES; i++)
- ehi->dev_action[i] |= ehi->action & action;
- ehi->action &= ~action;
- }
-
- /* turn off the specified per-dev action */
- ehi->dev_action[dev->devno] &= ~action;
- }
+ spin_unlock_irqrestore(ap->lock, flags);
}
/**
@@ -1592,7 +1603,7 @@ static int ata_eh_revalidate_and_attach(struct ata_port *ap,
unsigned int action;
dev = &ap->device[i];
- action = ehc->i.action | ehc->i.dev_action[dev->devno];
+ action = ata_eh_dev_action(dev);
if (action & ATA_EH_REVALIDATE && ata_dev_enabled(dev)) {
if (ata_port_offline(ap)) {
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index 93d18a74c401c3..2915bca691e8e9 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -222,9 +222,7 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
&& copy_to_user(arg + sizeof(args), argbuf, argsize))
rc = -EFAULT;
error:
- if (argbuf)
- kfree(argbuf);
-
+ kfree(argbuf);
return rc;
}
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index bdd48889709678..c325679d9b545d 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -29,7 +29,7 @@
#define __LIBATA_H__
#define DRV_NAME "libata"
-#define DRV_VERSION "1.30" /* must be exactly four chars */
+#define DRV_VERSION "2.00" /* must be exactly four chars */
struct ata_scsi_args {
struct ata_device *dev;
@@ -50,6 +50,7 @@ extern void ata_port_flush_task(struct ata_port *ap);
extern unsigned ata_exec_internal(struct ata_device *dev,
struct ata_taskfile *tf, const u8 *cdb,
int dma_dir, void *buf, unsigned int buflen);
+extern unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd);
extern int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
int post_reset, u16 *id);
extern int ata_dev_configure(struct ata_device *dev, int print_info);
@@ -64,6 +65,7 @@ extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
extern void ata_dev_select(struct ata_port *ap, unsigned int device,
unsigned int wait, unsigned int can_sleep);
extern void swap_buf_le16(u16 *buf, unsigned int buf_words);
+extern int ata_flush_cache(struct ata_device *dev);
extern void ata_dev_init(struct ata_device *dev);
extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg);
extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index d18e7e0932effc..5cc42c6054eb66 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -44,7 +44,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_nv"
-#define DRV_VERSION "0.9"
+#define DRV_VERSION "2.0"
enum {
NV_PORTS = 2,
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index bc9f918a7f286e..51d86d750e84f6 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -46,12 +46,13 @@
#include <linux/libata.h>
#define DRV_NAME "sata_sil"
-#define DRV_VERSION "1.0"
+#define DRV_VERSION "2.0"
enum {
/*
* host flags
*/
+ SIL_FLAG_NO_SATA_IRQ = (1 << 28),
SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29),
SIL_FLAG_MOD15WRITE = (1 << 30),
@@ -62,8 +63,9 @@ enum {
* Controller IDs
*/
sil_3112 = 0,
- sil_3512 = 1,
- sil_3114 = 2,
+ sil_3112_no_sata_irq = 1,
+ sil_3512 = 2,
+ sil_3114 = 3,
/*
* Register offsets
@@ -123,8 +125,8 @@ static const struct pci_device_id sil_pci_tbl[] = {
{ 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3512 },
{ 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 },
{ 0x1002, 0x436e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
- { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
- { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 },
+ { 0x1002, 0x4379, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
+ { 0x1002, 0x437a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_no_sata_irq },
{ } /* terminate list */
};
@@ -217,6 +219,16 @@ static const struct ata_port_info sil_port_info[] = {
.udma_mask = 0x3f, /* udma0-5 */
.port_ops = &sil_ops,
},
+ /* sil_3112_no_sata_irq */
+ {
+ .sht = &sil_sht,
+ .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE |
+ SIL_FLAG_NO_SATA_IRQ,
+ .pio_mask = 0x1f, /* pio0-4 */
+ .mwdma_mask = 0x07, /* mwdma0-2 */
+ .udma_mask = 0x3f, /* udma0-5 */
+ .port_ops = &sil_ops,
+ },
/* sil_3512 */
{
.sht = &sil_sht,
@@ -437,6 +449,10 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance,
if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED))
continue;
+ /* turn off SATA_IRQ if not supported */
+ if (ap->flags & SIL_FLAG_NO_SATA_IRQ)
+ bmdma2 &= ~SIL_DMA_SATA_IRQ;
+
if (bmdma2 == 0xffffffff ||
!(bmdma2 & (SIL_DMA_COMPLETE | SIL_DMA_SATA_IRQ)))
continue;
@@ -474,8 +490,9 @@ static void sil_thaw(struct ata_port *ap)
ata_chk_status(ap);
ata_bmdma_irq_clear(ap);
- /* turn on SATA IRQ */
- writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
+ /* turn on SATA IRQ if supported */
+ if (!(ap->flags & SIL_FLAG_NO_SATA_IRQ))
+ writel(SIL_SIEN_N, mmio_base + sil_port[ap->port_no].sien);
/* turn on IRQ */
tmp = readl(mmio_base + SIL_SYSCFG);
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index c8b477c672475d..b5f8fa95567906 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -31,7 +31,7 @@
#include <asm/io.h>
#define DRV_NAME "sata_sil24"
-#define DRV_VERSION "0.24"
+#define DRV_VERSION "0.3"
/*
* Port request block (PRB) 32 bytes
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index c94b870cf378af..7566c2cabaf725 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -54,7 +54,7 @@
#endif /* CONFIG_PPC_OF */
#define DRV_NAME "sata_svw"
-#define DRV_VERSION "1.8"
+#define DRV_VERSION "2.0"
enum {
/* Taskfile registers offsets */
diff --git a/drivers/scsi/sata_uli.c b/drivers/scsi/sata_uli.c
index f668c997e9af0b..64f3c1aeed217d 100644
--- a/drivers/scsi/sata_uli.c
+++ b/drivers/scsi/sata_uli.c
@@ -37,7 +37,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_uli"
-#define DRV_VERSION "0.6"
+#define DRV_VERSION "1.0"
enum {
uli_5289 = 0,
diff --git a/drivers/scsi/sata_via.c b/drivers/scsi/sata_via.c
index 322890b400a600..67c3d299977582 100644
--- a/drivers/scsi/sata_via.c
+++ b/drivers/scsi/sata_via.c
@@ -47,7 +47,7 @@
#include <asm/io.h>
#define DRV_NAME "sata_via"
-#define DRV_VERSION "1.2"
+#define DRV_VERSION "2.0"
enum board_ids_enum {
vt6420,
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 6d0c4f18e652ed..616fd9634b4b0d 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -47,7 +47,7 @@
#include <linux/libata.h>
#define DRV_NAME "sata_vsc"
-#define DRV_VERSION "1.2"
+#define DRV_VERSION "2.0"
enum {
/* Interrupt register offsets (from chip base address) */
@@ -443,16 +443,12 @@ err_out:
}
-/*
- * Intel 31244 is supposed to be identical.
- * Compatibility is untested as of yet.
- */
static const struct pci_device_id vsc_sata_pci_tbl[] = {
- { PCI_VENDOR_ID_VITESSE, PCI_DEVICE_ID_VITESSE_VSC7174,
+ { PCI_VENDOR_ID_VITESSE, 0x7174,
PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
- { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GD31244,
+ { PCI_VENDOR_ID_INTEL, 0x3200,
PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 },
- { }
+ { } /* terminate list */
};
diff --git a/drivers/sn/ioc3.c b/drivers/sn/ioc3.c
index 501316b198e511..ed946311d3a47e 100644
--- a/drivers/sn/ioc3.c
+++ b/drivers/sn/ioc3.c
@@ -26,7 +26,7 @@ static DECLARE_RWSEM(ioc3_devices_rwsem);
static struct ioc3_submodule *ioc3_submodules[IOC3_MAX_SUBMODULES];
static struct ioc3_submodule *ioc3_ethernet;
-static rwlock_t ioc3_submodules_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ioc3_submodules_lock);
/* NIC probing code */
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index d63c3f485853e5..9ef68cd83bb4d0 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -743,8 +743,7 @@ void __exit au1100fb_cleanup(void)
{
driver_unregister(&au1100fb_driver);
- if (drv_info.opt_mode)
- kfree(drv_info.opt_mode);
+ kfree(drv_info.opt_mode);
}
module_init(au1100fb_init);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index a71e984c93d47a..ffc72ae3ada80a 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -27,7 +27,7 @@
static int hp680bl_suspended;
static int current_intensity = 0;
-static spinlock_t bl_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bl_lock);
static struct backlight_device *hp680_backlight_device;
static void hp680bl_send_intensity(struct backlight_device *bd)
diff --git a/fs/Kconfig b/fs/Kconfig
index 6c5051802bd285..6dc8cfd6d80cf7 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1116,7 +1116,7 @@ config JFFS2_SUMMARY
config JFFS2_FS_XATTR
bool "JFFS2 XATTR support (EXPERIMENTAL)"
- depends on JFFS2_FS && EXPERIMENTAL && !JFFS2_FS_WRITEBUFFER
+ depends on JFFS2_FS && EXPERIMENTAL
default n
help
Extended attributes are name:value pairs associated with inodes by
@@ -1722,7 +1722,7 @@ config CIFS_STATS
mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
config CIFS_STATS2
- bool "CIFS extended statistics"
+ bool "Extended statistics"
depends on CIFS_STATS
help
Enabling this option will allow more detailed statistics on SMB
@@ -1735,6 +1735,32 @@ config CIFS_STATS2
Unless you are a developer or are doing network performance analysis
or tuning, say N.
+config CIFS_WEAK_PW_HASH
+ bool "Support legacy servers which use weaker LANMAN security"
+ depends on CIFS
+ help
+ Modern CIFS servers including Samba and most Windows versions
+ (since 1997) support stronger NTLM (and even NTLMv2 and Kerberos)
+ security mechanisms. These hash the password more securely
+ than the mechanisms used in the older LANMAN version of the
+ SMB protocol needed to establish sessions with old SMB servers.
+
+ Enabling this option allows the cifs module to mount to older
+ LANMAN based servers such as OS/2 and Windows 95, but such
+ mounts may be less secure than mounts using NTLM or more recent
+ security mechanisms if you are on a public network. Unless you
+ have a need to access old SMB servers (and are on a private
+ network) you probably want to say N. Even if this support
+ is enabled in the kernel build, they will not be used
+ automatically. At runtime LANMAN mounts are disabled but
+ can be set to required (or optional) either in
+ /proc/fs/cifs (see fs/cifs/README for more detail) or via an
+ option on the mount command. This support is disabled by
+ default in order to reduce the possibility of a downgrade
+ attack.
+
+ If unsure, say N.
+
config CIFS_XATTR
bool "CIFS extended attributes"
depends on CIFS
@@ -1763,6 +1789,16 @@ config CIFS_POSIX
(such as Samba 3.10 and later) which can negotiate
CIFS POSIX ACL support. If unsure, say N.
+config CIFS_DEBUG2
+ bool "Enable additional CIFS debugging routines"
+ help
+ Enabling this option adds a few more debugging routines
+ to the cifs code which slightly increases the size of
+ the cifs module and can cause additional logging of debug
+ messages in some error paths, slowing performance. This
+ option can be turned off unless you are debugging
+ cifs problems. If unsure, say N.
+
config CIFS_EXPERIMENTAL
bool "CIFS Experimental Features (EXPERIMENTAL)"
depends on CIFS && EXPERIMENTAL
@@ -1778,7 +1814,7 @@ config CIFS_EXPERIMENTAL
If unsure, say N.
config CIFS_UPCALL
- bool "CIFS Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)"
+ bool "Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)"
depends on CIFS_EXPERIMENTAL
select CONNECTOR
help
diff --git a/fs/buffer.c b/fs/buffer.c
index 373bb6292bdc13..f23bb647db470a 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -564,7 +564,7 @@ still_busy:
* Completion handler for block_write_full_page() - pages which are unlocked
* during I/O, and which have PageWriteback cleared upon I/O completion.
*/
-void end_buffer_async_write(struct buffer_head *bh, int uptodate)
+static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
{
char b[BDEVNAME_SIZE];
unsigned long flags;
@@ -3166,7 +3166,6 @@ EXPORT_SYMBOL(block_sync_page);
EXPORT_SYMBOL(block_truncate_page);
EXPORT_SYMBOL(block_write_full_page);
EXPORT_SYMBOL(cont_prepare_write);
-EXPORT_SYMBOL(end_buffer_async_write);
EXPORT_SYMBOL(end_buffer_read_sync);
EXPORT_SYMBOL(end_buffer_write_sync);
EXPORT_SYMBOL(file_fsync);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 7271bb0257f64f..a61d17ed1827e3 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,9 +1,24 @@
+Version 1.44
+------------
+Rewritten sessionsetup support, including support for legacy SMB
+session setup needed for OS/2 and older servers such as Windows 95 and 98.
+Fix oops on ls to OS/2 servers. Add support for level 1 FindFirst
+so we can do search (ls etc.) to OS/2. Do not send NTCreateX
+or recent levels of FindFirst unless server says it supports NT SMBs
+(instead use legacy equivalents from LANMAN dialect). Fix to allow
+NTLMv2 authentication support (now can use stronger password hashing
+on mount if corresponding /proc/fs/cifs/SecurityFlags is set (0x4004).
+Allow override of global cifs security flags on mount via "sec=" option(s).
+
Version 1.43
------------
POSIX locking to servers which support CIFS POSIX Extensions
(disabled by default controlled by proc/fs/cifs/Experimental).
Handle conversion of long share names (especially Asian languages)
-to Unicode during mount.
+to Unicode during mount. Fix memory leak in sess struct on reconnect.
+Fix rare oops after acpi suspend. Fix O_TRUNC opens to overwrite on
+cifs open which helps rare case when setpathinfo fails or server does
+not support it.
Version 1.42
------------
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 58c77254a23b3d..a26f26ed5a17e7 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -3,4 +3,4 @@
#
obj-$(CONFIG_CIFS) += cifs.o
-cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o ntlmssp.o
+cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o sess.o
diff --git a/fs/cifs/README b/fs/cifs/README
index 0355003f4f0a3b..7986d0d97ace4f 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -443,7 +443,10 @@ A partial list of the supported mount options follows:
SFU does). In the future the bottom 9 bits of the mode
mode also will be emulated using queries of the security
descriptor (ACL).
-sec Security mode. Allowed values are:
+ sign Must use packet signing (helps avoid unwanted data modification
+ by intermediate systems in the route). Note that signing
+ does not work with lanman or plaintext authentication.
+ sec Security mode. Allowed values are:
none attempt to connection as a null user (no name)
krb5 Use Kerberos version 5 authentication
krb5i Use Kerberos authentication and packet signing
@@ -453,6 +456,8 @@ sec Security mode. Allowed values are:
server requires signing also can be the default)
ntlmv2 Use NTLMv2 password hashing
ntlmv2i Use NTLMv2 password hashing with packet signing
+ lanman (if configured in kernel config) use older
+ lanman hash
The mount.cifs mount helper also accepts a few mount options before -o
including:
@@ -485,14 +490,34 @@ PacketSigningEnabled If set to one, cifs packet signing is enabled
it. If set to two, cifs packet signing is
required even if the server considers packet
signing optional. (default 1)
+SecurityFlags Flags which control security negotiation and
+ also packet signing. Authentication (may/must)
+ flags (e.g. for NTLM and/or NTLMv2) may be combined with
+ the signing flags. Specifying two different password
+ hashing mechanisms (as "must use") on the other hand
+ does not make much sense. Default flags are
+ 0x07007
+ (NTLM, NTLMv2 and packet signing allowed). Maximum
+ allowable flags if you want to allow mounts to servers
+ using weaker password hashes is 0x37037 (lanman,
+ plaintext, ntlm, ntlmv2, signing allowed):
+
+ may use packet signing 0x00001
+ must use packet signing 0x01001
+ may use NTLM (most common password hash) 0x00002
+ must use NTLM 0x02002
+ may use NTLMv2 0x00004
+ must use NTLMv2 0x04004
+ may use Kerberos security (not implemented yet) 0x00008
+ must use Kerberos (not implemented yet) 0x08008
+ may use lanman (weak) password hash 0x00010
+ must use lanman password hash 0x10010
+ may use plaintext passwords 0x00020
+ must use plaintext passwords 0x20020
+ (reserved for future packet encryption) 0x00040
+
cifsFYI If set to one, additional debug information is
logged to the system error log. (default 0)
-ExtendedSecurity If set to one, SPNEGO session establishment
- is allowed which enables more advanced
- secure CIFS session establishment (default 0)
-NTLMV2Enabled If set to one, more secure password hashes
- are used when the server supports them and
- when kerberos is not negotiated (default 0)
traceSMB If set to one, debug information is logged to the
system error log with the start of smb requests
and responses (default 0)
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index 086ae8f4a207a2..031cdf2932568a 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -467,7 +467,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
asn1_open(&ctx, security_blob, length);
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit header "));
+ cFYI(1, ("Error decoding negTokenInit header"));
return 0;
} else if ((cls != ASN1_APL) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
@@ -495,7 +495,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit "));
+ cFYI(1, ("Error decoding negTokenInit"));
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
@@ -505,7 +505,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding negTokenInit "));
+ cFYI(1, ("Error decoding negTokenInit"));
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
@@ -515,7 +515,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
}
if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding 2nd part of negTokenInit "));
+ cFYI(1, ("Error decoding 2nd part of negTokenInit"));
return 0;
} else if ((cls != ASN1_CTX) || (con != ASN1_CON)
|| (tag != ASN1_EOC)) {
@@ -527,7 +527,7 @@ decode_negTokenInit(unsigned char *security_blob, int length,
if (asn1_header_decode
(&ctx, &sequence_end, &cls, &con, &tag) == 0) {
- cFYI(1, ("Error decoding 2nd part of negTokenInit "));
+ cFYI(1, ("Error decoding 2nd part of negTokenInit"));
return 0;
} else if ((cls != ASN1_UNI) || (con != ASN1_CON)
|| (tag != ASN1_SEQ)) {
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index f4124a32bef897..96abeb7389784d 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -39,7 +39,7 @@ cifs_dump_mem(char *label, void *data, int length)
char *charptr = data;
char buf[10], line[80];
- printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n\n",
+ printk(KERN_DEBUG "%s: dump of %d bytes of data at 0x%p\n",
label, length, data);
for (i = 0; i < length; i += 16) {
line[0] = 0;
@@ -57,6 +57,57 @@ cifs_dump_mem(char *label, void *data, int length)
}
}
+#ifdef CONFIG_CIFS_DEBUG2
+void cifs_dump_detail(struct smb_hdr * smb)
+{
+ cERROR(1,("Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
+ smb->Command, smb->Status.CifsError,
+ smb->Flags, smb->Flags2, smb->Mid, smb->Pid));
+ cERROR(1,("smb buf %p len %d", smb, smbCalcSize_LE(smb)));
+}
+
+
+void cifs_dump_mids(struct TCP_Server_Info * server)
+{
+ struct list_head *tmp;
+ struct mid_q_entry * mid_entry;
+
+ if(server == NULL)
+ return;
+
+ cERROR(1,("Dump pending requests:"));
+ spin_lock(&GlobalMid_Lock);
+ list_for_each(tmp, &server->pending_mid_q) {
+ mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
+ if(mid_entry) {
+ cERROR(1,("State: %d Cmd: %d Pid: %d Tsk: %p Mid %d",
+ mid_entry->midState,
+ (int)mid_entry->command,
+ mid_entry->pid,
+ mid_entry->tsk,
+ mid_entry->mid));
+#ifdef CONFIG_CIFS_STATS2
+ cERROR(1,("IsLarge: %d buf: %p time rcv: %ld now: %ld",
+ mid_entry->largeBuf,
+ mid_entry->resp_buf,
+ mid_entry->when_received,
+ jiffies));
+#endif /* STATS2 */
+ cERROR(1,("IsMult: %d IsEnd: %d", mid_entry->multiRsp,
+ mid_entry->multiEnd));
+ if(mid_entry->resp_buf) {
+ cifs_dump_detail(mid_entry->resp_buf);
+ cifs_dump_mem("existing buf: ",
+ mid_entry->resp_buf,
+ 62 /* fixme */);
+ }
+
+ }
+ }
+ spin_unlock(&GlobalMid_Lock);
+}
+#endif /* CONFIG_CIFS_DEBUG2 */
+
#ifdef CONFIG_PROC_FS
static int
cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
@@ -73,7 +124,6 @@ cifs_debug_data_read(char *buf, char **beginBuffer, off_t offset,
*beginBuffer = buf + offset;
-
length =
sprintf(buf,
"Display Internal CIFS Data Structures for Debugging\n"
@@ -395,12 +445,12 @@ static read_proc_t traceSMB_read;
static write_proc_t traceSMB_write;
static read_proc_t multiuser_mount_read;
static write_proc_t multiuser_mount_write;
-static read_proc_t extended_security_read;
-static write_proc_t extended_security_write;
-static read_proc_t ntlmv2_enabled_read;
+static read_proc_t security_flags_read;
+static write_proc_t security_flags_write;
+/* static read_proc_t ntlmv2_enabled_read;
static write_proc_t ntlmv2_enabled_write;
static read_proc_t packet_signing_enabled_read;
-static write_proc_t packet_signing_enabled_write;
+static write_proc_t packet_signing_enabled_write;*/
static read_proc_t experimEnabled_read;
static write_proc_t experimEnabled_write;
static read_proc_t linuxExtensionsEnabled_read;
@@ -458,10 +508,10 @@ cifs_proc_init(void)
pde->write_proc = multiuser_mount_write;
pde =
- create_proc_read_entry("ExtendedSecurity", 0, proc_fs_cifs,
- extended_security_read, NULL);
+ create_proc_read_entry("SecurityFlags", 0, proc_fs_cifs,
+ security_flags_read, NULL);
if (pde)
- pde->write_proc = extended_security_write;
+ pde->write_proc = security_flags_write;
pde =
create_proc_read_entry("LookupCacheEnabled", 0, proc_fs_cifs,
@@ -469,7 +519,7 @@ cifs_proc_init(void)
if (pde)
pde->write_proc = lookupFlag_write;
- pde =
+/* pde =
create_proc_read_entry("NTLMV2Enabled", 0, proc_fs_cifs,
ntlmv2_enabled_read, NULL);
if (pde)
@@ -479,7 +529,7 @@ cifs_proc_init(void)
create_proc_read_entry("PacketSigningEnabled", 0, proc_fs_cifs,
packet_signing_enabled_read, NULL);
if (pde)
- pde->write_proc = packet_signing_enabled_write;
+ pde->write_proc = packet_signing_enabled_write;*/
}
void
@@ -496,9 +546,9 @@ cifs_proc_clean(void)
#endif
remove_proc_entry("MultiuserMount", proc_fs_cifs);
remove_proc_entry("OplockEnabled", proc_fs_cifs);
- remove_proc_entry("NTLMV2Enabled",proc_fs_cifs);
- remove_proc_entry("ExtendedSecurity",proc_fs_cifs);
- remove_proc_entry("PacketSigningEnabled",proc_fs_cifs);
+/* remove_proc_entry("NTLMV2Enabled",proc_fs_cifs); */
+ remove_proc_entry("SecurityFlags",proc_fs_cifs);
+/* remove_proc_entry("PacketSigningEnabled",proc_fs_cifs); */
remove_proc_entry("LinuxExtensionsEnabled",proc_fs_cifs);
remove_proc_entry("Experimental",proc_fs_cifs);
remove_proc_entry("LookupCacheEnabled",proc_fs_cifs);
@@ -782,12 +832,12 @@ multiuser_mount_write(struct file *file, const char __user *buffer,
}
static int
-extended_security_read(char *page, char **start, off_t off,
+security_flags_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len;
- len = sprintf(page, "%d\n", extended_security);
+ len = sprintf(page, "0x%x\n", extended_security);
len -= off;
*start = page + off;
@@ -803,24 +853,52 @@ extended_security_read(char *page, char **start, off_t off,
return len;
}
static int
-extended_security_write(struct file *file, const char __user *buffer,
+security_flags_write(struct file *file, const char __user *buffer,
unsigned long count, void *data)
{
+ unsigned int flags;
+ char flags_string[12];
char c;
- int rc;
- rc = get_user(c, buffer);
- if (rc)
- return rc;
- if (c == '0' || c == 'n' || c == 'N')
- extended_security = 0;
- else if (c == '1' || c == 'y' || c == 'Y')
- extended_security = 1;
+ if((count < 1) || (count > 11))
+ return -EINVAL;
+
+ memset(flags_string, 0, 12);
+
+ if(copy_from_user(flags_string, buffer, count))
+ return -EFAULT;
+
+ if(count < 3) {
+ /* single char or single char followed by null */
+ c = flags_string[0];
+ if (c == '0' || c == 'n' || c == 'N')
+ extended_security = CIFSSEC_DEF; /* default */
+ else if (c == '1' || c == 'y' || c == 'Y')
+ extended_security = CIFSSEC_MAX;
+ return count;
+ }
+ /* else we have a number */
+
+ flags = simple_strtoul(flags_string, NULL, 0);
+
+ cFYI(1,("sec flags 0x%x", flags));
+
+ if(flags <= 0) {
+ cERROR(1,("invalid security flags %s",flags_string));
+ return -EINVAL;
+ }
+ if(flags & ~CIFSSEC_MASK) {
+ cERROR(1,("attempt to set unsupported security flags 0x%x",
+ flags & ~CIFSSEC_MASK));
+ return -EINVAL;
+ }
+ /* flags look ok - update the global security flags for cifs module */
+ extended_security = flags;
return count;
}
-static int
+/* static int
ntlmv2_enabled_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
@@ -855,6 +933,8 @@ ntlmv2_enabled_write(struct file *file, const char __user *buffer,
ntlmv2_support = 0;
else if (c == '1' || c == 'y' || c == 'Y')
ntlmv2_support = 1;
+ else if (c == '2')
+ ntlmv2_support = 2;
return count;
}
@@ -898,7 +978,7 @@ packet_signing_enabled_write(struct file *file, const char __user *buffer,
sign_CIFS_PDUs = 2;
return count;
-}
+} */
#endif
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 4304d9dcfb6c65..c26cd0d2c6d525 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -24,6 +24,10 @@
#define _H_CIFS_DEBUG
void cifs_dump_mem(char *label, void *data, int length);
+#ifdef CONFIG_CIFS_DEBUG2
+void cifs_dump_detail(struct smb_hdr *);
+void cifs_dump_mids(struct TCP_Server_Info *);
+#endif
extern int traceSMB; /* flag which enables the function below */
void dump_smb(struct smb_hdr *, int);
#define CIFS_INFO 0x01
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index d2b12825594426..d2a8b2941fc26a 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -22,6 +22,7 @@
#include "cifs_unicode.h"
#include "cifs_uniupr.h"
#include "cifspdu.h"
+#include "cifsglob.h"
#include "cifs_debug.h"
/*
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index e7d63737e65110..a89efaf78a266e 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -26,6 +26,8 @@
#include "md5.h"
#include "cifs_unicode.h"
#include "cifsproto.h"
+#include <linux/ctype.h>
+#include <linux/random.h>
/* Calculate and return the CIFS signature based on the mac key and the smb pdu */
/* the 16 byte signature must be allocated by the caller */
@@ -35,6 +37,8 @@
extern void mdfour(unsigned char *out, unsigned char *in, int n);
extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
+extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
+ unsigned char *p24);
static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
const char * key, char * signature)
@@ -45,7 +49,7 @@ static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
return -EINVAL;
MD5Init(&context);
- MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+ MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length);
MD5Final(signature,&context);
return 0;
@@ -90,7 +94,7 @@ static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
return -EINVAL;
MD5Init(&context);
- MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
+ MD5Update(&context,key,CIFS_SESS_KEY_SIZE+16);
for(i=0;i<n_vec;i++) {
if(iov[i].iov_base == NULL) {
cERROR(1,("null iovec entry"));
@@ -204,11 +208,12 @@ int cifs_calculate_mac_key(char * key, const char * rn, const char * password)
E_md4hash(password, temp_key);
mdfour(key,temp_key,16);
- memcpy(key+16,rn, CIFS_SESSION_KEY_SIZE);
+ memcpy(key+16,rn, CIFS_SESS_KEY_SIZE);
return 0;
}
-int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_info)
+int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses,
+ const struct nls_table * nls_info)
{
char temp_hash[16];
struct HMACMD5Context ctx;
@@ -225,6 +230,8 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_
user_name_len = strlen(ses->userName);
if(user_name_len > MAX_USERNAME_SIZE)
return -EINVAL;
+ if(ses->domainName == NULL)
+ return -EINVAL; /* BB should we use CIFS_LINUX_DOM */
dom_name_len = strlen(ses->domainName);
if(dom_name_len > MAX_USERNAME_SIZE)
return -EINVAL;
@@ -259,16 +266,131 @@ int CalcNTLMv2_partial_mac_key(struct cifsSesInfo * ses, struct nls_table * nls_
kfree(unicode_buf);
return 0;
}
-void CalcNTLMv2_response(const struct cifsSesInfo * ses,char * v2_session_response)
+
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key)
+{
+ int i;
+ char password_with_pad[CIFS_ENCPWD_SIZE];
+
+ if(ses->server == NULL)
+ return;
+
+ memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
+ strncpy(password_with_pad, ses->password, CIFS_ENCPWD_SIZE);
+
+ if((ses->server->secMode & SECMODE_PW_ENCRYPT) == 0)
+ if(extended_security & CIFSSEC_MAY_PLNTXT) {
+ memcpy(lnm_session_key, password_with_pad, CIFS_ENCPWD_SIZE);
+ return;
+ }
+
+ /* calculate old style session key */
+ /* calling toupper is less broken than repeatedly
+ calling nls_toupper would be since that will never
+ work for UTF8, but neither handles multibyte code pages
+ but the only alternative would be converting to UCS-16 (Unicode)
+ (using a routine something like UniStrupr) then
+ uppercasing and then converting back from Unicode - which
+ would only worth doing it if we knew it were utf8. Basically
+ utf8 and other multibyte codepages each need their own strupper
+ function since a byte at a time will ont work. */
+
+ for(i = 0; i < CIFS_ENCPWD_SIZE; i++) {
+ password_with_pad[i] = toupper(password_with_pad[i]);
+ }
+
+ SMBencrypt(password_with_pad, ses->server->cryptKey, lnm_session_key);
+ /* clear password before we return/free memory */
+ memset(password_with_pad, 0, CIFS_ENCPWD_SIZE);
+}
+#endif /* CIFS_WEAK_PW_HASH */
+
+static int calc_ntlmv2_hash(struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ int rc = 0;
+ int len;
+ char nt_hash[16];
+ struct HMACMD5Context * pctxt;
+ wchar_t * user;
+ wchar_t * domain;
+
+ pctxt = kmalloc(sizeof(struct HMACMD5Context), GFP_KERNEL);
+
+ if(pctxt == NULL)
+ return -ENOMEM;
+
+ /* calculate md4 hash of password */
+ E_md4hash(ses->password, nt_hash);
+
+ /* convert Domainname to unicode and uppercase */
+ hmac_md5_init_limK_to_64(nt_hash, 16, pctxt);
+
+ /* convert ses->userName to unicode and uppercase */
+ len = strlen(ses->userName);
+ user = kmalloc(2 + (len * 2), GFP_KERNEL);
+ if(user == NULL)
+ goto calc_exit_2;
+ len = cifs_strtoUCS(user, ses->userName, len, nls_cp);
+ UniStrupr(user);
+ hmac_md5_update((char *)user, 2*len, pctxt);
+
+ /* convert ses->domainName to unicode and uppercase */
+ if(ses->domainName) {
+ len = strlen(ses->domainName);
+
+ domain = kmalloc(2 + (len * 2), GFP_KERNEL);
+ if(domain == NULL)
+ goto calc_exit_1;
+ len = cifs_strtoUCS(domain, ses->domainName, len, nls_cp);
+ UniStrupr(domain);
+
+ hmac_md5_update((char *)domain, 2*len, pctxt);
+
+ kfree(domain);
+ }
+calc_exit_1:
+ kfree(user);
+calc_exit_2:
+ /* BB FIXME what about bytes 24 through 40 of the signing key?
+ compare with the NTLM example */
+ hmac_md5_final(ses->server->mac_signing_key, pctxt);
+
+ return rc;
+}
+
+void setup_ntlmv2_rsp(struct cifsSesInfo * ses, char * resp_buf,
+ const struct nls_table * nls_cp)
+{
+ int rc;
+ struct ntlmv2_resp * buf = (struct ntlmv2_resp *)resp_buf;
+
+ buf->blob_signature = cpu_to_le32(0x00000101);
+ buf->reserved = 0;
+ buf->time = cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ get_random_bytes(&buf->client_chal, sizeof(buf->client_chal));
+ buf->reserved2 = 0;
+ buf->names[0].type = 0;
+ buf->names[0].length = 0;
+
+ /* calculate buf->ntlmv2_hash */
+ rc = calc_ntlmv2_hash(ses, nls_cp);
+ if(rc)
+ cERROR(1,("could not get v2 hash rc %d",rc));
+ CalcNTLMv2_response(ses, resp_buf);
+}
+
+void CalcNTLMv2_response(const struct cifsSesInfo * ses, char * v2_session_response)
{
struct HMACMD5Context context;
+ /* rest of v2 struct already generated */
memcpy(v2_session_response + 8, ses->server->cryptKey,8);
- /* gen_blob(v2_session_response + 16); */
hmac_md5_init_limK_to_64(ses->server->mac_signing_key, 16, &context);
- hmac_md5_update(ses->server->cryptKey,8,&context);
-/* hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */
+ hmac_md5_update(v2_session_response+8,
+ sizeof(struct ntlmv2_resp) - 8, &context);
hmac_md5_final(v2_session_response,&context);
- cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); /* BB removeme BB */
+/* cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); */
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 8b4de6eaabd0ee..c28ede59994684 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -56,8 +56,8 @@ unsigned int experimEnabled = 0;
unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0;
-unsigned int extended_security = 0;
-unsigned int ntlmv2_support = 0;
+unsigned int extended_security = CIFSSEC_DEF;
+/* unsigned int ntlmv2_support = 0; */
unsigned int sign_CIFS_PDUs = 1;
extern struct task_struct * oplockThread; /* remove sparse warning */
struct task_struct * oplockThread = NULL;
@@ -908,7 +908,7 @@ static int cifs_dnotify_thread(void * dummyarg)
struct cifsSesInfo *ses;
do {
- if(try_to_freeze())
+ if (try_to_freeze())
continue;
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(15*HZ);
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index d56c0577c7103f..a6384d83fdefb0 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -33,6 +33,7 @@
#endif
extern struct address_space_operations cifs_addr_ops;
+extern struct address_space_operations cifs_addr_ops_smallbuf;
/* Functions related to super block operations */
extern struct super_operations cifs_super_ops;
@@ -99,5 +100,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
extern int cifs_ioctl (struct inode * inode, struct file * filep,
unsigned int command, unsigned long arg);
-#define CIFS_VERSION "1.43"
+#define CIFS_VERSION "1.44"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 006eb33bff5ff7..6d7cf5f3bc0bdb 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -88,7 +88,8 @@ enum statusEnum {
};
enum securityEnum {
- NTLM = 0, /* Legacy NTLM012 auth with NTLM hash */
+ LANMAN = 0, /* Legacy LANMAN auth */
+ NTLM, /* Legacy NTLM012 auth with NTLM hash */
NTLMv2, /* Legacy NTLM auth with NTLMv2 hash */
RawNTLMSSP, /* NTLMSSP without SPNEGO */
NTLMSSP, /* NTLMSSP via SPNEGO */
@@ -157,7 +158,7 @@ struct TCP_Server_Info {
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[SERVER_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* needed for CIFS PDU signature */
- char mac_signing_key[CIFS_SESSION_KEY_SIZE + 16];
+ char mac_signing_key[CIFS_SESS_KEY_SIZE + 16];
};
/*
@@ -179,10 +180,13 @@ struct cifsUidInfo {
struct cifsSesInfo {
struct list_head cifsSessionList;
struct semaphore sesSem;
+#if 0
struct cifsUidInfo *uidInfo; /* pointer to user info */
+#endif
struct TCP_Server_Info *server; /* pointer to server info */
atomic_t inUse; /* # of mounts (tree connections) on this ses */
enum statusEnum status;
+ unsigned overrideSecFlg; /* if non-zero override global sec flags */
__u16 ipc_tid; /* special tid for connection to IPC share */
__u16 flags;
char *serverOS; /* name of operating system underlying server */
@@ -194,7 +198,7 @@ struct cifsSesInfo {
char serverName[SERVER_NAME_LEN_WITH_NULL * 2]; /* BB make bigger for
TCP names - will ipv6 and sctp addresses fit? */
char userName[MAX_USERNAME_SIZE + 1];
- char domainName[MAX_USERNAME_SIZE + 1];
+ char * domainName;
char * password;
};
/* session flags */
@@ -209,12 +213,12 @@ struct cifsTconInfo {
struct list_head openFileList;
struct semaphore tconSem;
struct cifsSesInfo *ses; /* pointer to session associated with */
- char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource (in ASCII not UTF) */
+ char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */
char *nativeFileSystem;
__u16 tid; /* The 2 byte tree id */
__u16 Flags; /* optional support bits */
enum statusEnum tidStatus;
- atomic_t useCount; /* how many mounts (explicit or implicit) to this share */
+ atomic_t useCount; /* how many explicit/implicit mounts to share */
#ifdef CONFIG_CIFS_STATS
atomic_t num_smbs_sent;
atomic_t num_writes;
@@ -254,7 +258,7 @@ struct cifsTconInfo {
spinlock_t stat_lock;
#endif /* CONFIG_CIFS_STATS */
FILE_SYSTEM_DEVICE_INFO fsDevInfo;
- FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if file system name truncated */
+ FILE_SYSTEM_ATTRIBUTE_INFO fsAttrInfo; /* ok if fs name truncated */
FILE_SYSTEM_UNIX_INFO fsUnixInfo;
unsigned retry:1;
unsigned nocase:1;
@@ -305,7 +309,6 @@ struct cifsFileInfo {
atomic_t wrtPending; /* handle in use - defer close */
struct semaphore fh_sem; /* prevents reopen race after dead ses*/
char * search_resume_name; /* BB removeme BB */
- unsigned int resume_name_length; /* BB removeme - field renamed and moved BB */
struct cifs_search_info srch_inf;
};
@@ -391,9 +394,9 @@ struct mid_q_entry {
struct smb_hdr *resp_buf; /* response buffer */
int midState; /* wish this were enum but can not pass to wait_event */
__u8 command; /* smb command code */
- unsigned multiPart:1; /* multiple responses to one SMB request */
unsigned largeBuf:1; /* if valid response, is pointer to large buf */
- unsigned multiResp:1; /* multiple trans2 responses for one request */
+ unsigned multiRsp:1; /* multiple trans2 responses for one request */
+ unsigned multiEnd:1; /* both received */
};
struct oplock_q_entry {
@@ -430,15 +433,35 @@ struct dir_notify_req {
#define CIFS_LARGE_BUFFER 2
#define CIFS_IOVEC 4 /* array of response buffers */
-/* Type of session setup needed */
-#define CIFS_PLAINTEXT 0
-#define CIFS_LANMAN 1
-#define CIFS_NTLM 2
-#define CIFS_NTLMSSP_NEG 3
-#define CIFS_NTLMSSP_AUTH 4
-#define CIFS_SPNEGO_INIT 5
-#define CIFS_SPNEGO_TARG 6
-
+/* Security Flags: indicate type of session setup needed */
+#define CIFSSEC_MAY_SIGN 0x00001
+#define CIFSSEC_MAY_NTLM 0x00002
+#define CIFSSEC_MAY_NTLMV2 0x00004
+#define CIFSSEC_MAY_KRB5 0x00008
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define CIFSSEC_MAY_LANMAN 0x00010
+#define CIFSSEC_MAY_PLNTXT 0x00020
+#endif /* weak passwords */
+#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */
+
+#define CIFSSEC_MUST_SIGN 0x01001
+/* note that only one of the following can be set so the
+result of setting MUST flags more than once will be to
+require use of the stronger protocol */
+#define CIFSSEC_MUST_NTLM 0x02002
+#define CIFSSEC_MUST_NTLMV2 0x04004
+#define CIFSSEC_MUST_KRB5 0x08008
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define CIFSSEC_MUST_LANMAN 0x10010
+#define CIFSSEC_MUST_PLNTXT 0x20020
+#define CIFSSEC_MASK 0x37037 /* current flags supported if weak */
+#else
+#define CIFSSEC_MASK 0x07007 /* flags supported if no weak config */
+#endif /* WEAK_PW_HASH */
+#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
+
+#define CIFSSEC_DEF CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLM | CIFSSEC_MAY_NTLMV2
+#define CIFSSEC_MAX CIFSSEC_MUST_SIGN | CIFSSEC_MUST_NTLMV2
/*
*****************************************************************
* All constants go here
@@ -500,16 +523,16 @@ GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
GLOBAL_EXTERN struct list_head GlobalOplock_Q;
GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; /* Outstanding dir notify requests */
-GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q; /* Dir notify response queue */
+GLOBAL_EXTERN struct list_head GlobalDnotifyRsp_Q;/* DirNotify response queue */
/*
* Global transaction id (XID) information
*/
GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
-GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
+GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
-GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above and list operations */
- /* on midQ entries */
+GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */
+ /* on midQ entries */
GLOBAL_EXTERN char Local_System_Name[15];
/*
@@ -531,7 +554,7 @@ GLOBAL_EXTERN atomic_t smBufAllocCount;
GLOBAL_EXTERN atomic_t midCount;
/* Misc globals */
-GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
+GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
to be established on existing mount if we
have the uid/password or Kerberos credential
or equivalent for current user */
@@ -540,8 +563,8 @@ GLOBAL_EXTERN unsigned int experimEnabled;
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int extended_security; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */
-GLOBAL_EXTERN unsigned int ntlmv2_support; /* better optional password hash */
GLOBAL_EXTERN unsigned int sign_CIFS_PDUs; /* enable smb packet signing */
+GLOBAL_EXTERN unsigned int secFlags;
GLOBAL_EXTERN unsigned int linuxExtEnabled;/*enable Linux/Unix CIFS extensions*/
GLOBAL_EXTERN unsigned int CIFSMaxBufSize; /* max size not including hdr */
GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index b2233ac05bd279..86239023545b2d 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -16,7 +16,7 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _CIFSPDU_H
@@ -24,8 +24,14 @@
#include <net/sock.h>
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define LANMAN_PROT 0
+#define CIFS_PROT 1
+#else
#define CIFS_PROT 0
-#define BAD_PROT CIFS_PROT+1
+#endif
+#define POSIX_PROT CIFS_PROT+1
+#define BAD_PROT 0xFFFF
/* SMB command codes */
/* Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
@@ -110,7 +116,7 @@
/*
* Size of the session key (crypto key encrypted with the password
*/
-#define CIFS_SESSION_KEY_SIZE (24)
+#define CIFS_SESS_KEY_SIZE (24)
/*
* Maximum user name length
@@ -400,6 +406,29 @@ typedef struct negotiate_req {
unsigned char DialectsArray[1];
} __attribute__((packed)) NEGOTIATE_REQ;
+/* Dialect index is 13 for LANMAN */
+
+typedef struct lanman_neg_rsp {
+ struct smb_hdr hdr; /* wct = 13 */
+ __le16 DialectIndex;
+ __le16 SecurityMode;
+ __le16 MaxBufSize;
+ __le16 MaxMpxCount;
+ __le16 MaxNumberVcs;
+ __le16 RawMode;
+ __le32 SessionKey;
+ __le32 ServerTime;
+ __le16 ServerTimeZone;
+ __le16 EncryptionKeyLength;
+ __le16 Reserved;
+ __u16 ByteCount;
+ unsigned char EncryptionKey[1];
+} __attribute__((packed)) LANMAN_NEG_RSP;
+
+#define READ_RAW_ENABLE 1
+#define WRITE_RAW_ENABLE 2
+#define RAW_ENABLE (READ_RAW_ENABLE | WRITE_RAW_ENABLE)
+
typedef struct negotiate_rsp {
struct smb_hdr hdr; /* wct = 17 */
__le16 DialectIndex;
@@ -509,7 +538,7 @@ typedef union smb_com_session_setup_andx {
/* unsigned char * NativeOS; */
/* unsigned char * NativeLanMan; */
/* unsigned char * PrimaryDomain; */
- } __attribute__((packed)) resp; /* NTLM response format (with or without extended security */
+ } __attribute__((packed)) resp; /* NTLM response with or without extended sec*/
struct { /* request format */
struct smb_hdr hdr; /* wct = 10 */
@@ -520,8 +549,8 @@ typedef union smb_com_session_setup_andx {
__le16 MaxMpxCount;
__le16 VcNumber;
__u32 SessionKey;
- __le16 PassswordLength;
- __u32 Reserved;
+ __le16 PasswordLength;
+ __u32 Reserved; /* encrypt key len and offset */
__le16 ByteCount;
unsigned char AccountPassword[1]; /* followed by */
/* STRING AccountName */
@@ -543,6 +572,26 @@ typedef union smb_com_session_setup_andx {
} __attribute__((packed)) old_resp; /* pre-NTLM (LANMAN2.1) response */
} __attribute__((packed)) SESSION_SETUP_ANDX;
+/* format of NLTMv2 Response ie "case sensitive password" hash when NTLMv2 */
+
+struct ntlmssp2_name {
+ __le16 type;
+ __le16 length;
+/* char name[length]; */
+} __attribute__((packed));
+
+struct ntlmv2_resp {
+ char ntlmv2_hash[CIFS_ENCPWD_SIZE];
+ __le32 blob_signature;
+ __u32 reserved;
+ __le64 time;
+ __u64 client_chal; /* random */
+ __u32 reserved2;
+ struct ntlmssp2_name names[1];
+ /* array of name entries could follow ending in minimum 4 byte struct */
+} __attribute__((packed));
+
+
#define CIFS_NETWORK_OPSYS "CIFS VFS Client for Linux"
/* Capabilities bits (for NTLM SessSetup request) */
@@ -573,7 +622,9 @@ typedef struct smb_com_tconx_req {
} __attribute__((packed)) TCONX_REQ;
typedef struct smb_com_tconx_rsp {
- struct smb_hdr hdr; /* wct = 3 *//* note that Win2000 has sent wct=7 in some cases on responses. Four unspecified words followed OptionalSupport */
+ struct smb_hdr hdr; /* wct = 3 note that Win2000 has sent wct = 7
+ in some cases on responses. Four unspecified
+ words followed OptionalSupport */
__u8 AndXCommand;
__u8 AndXReserved;
__le16 AndXOffset;
@@ -1323,6 +1374,9 @@ struct smb_t2_rsp {
#define SMB_FILE_MAXIMUM_INFO 0x40d
/* Find File infolevels */
+#define SMB_FIND_FILE_INFO_STANDARD 0x001
+#define SMB_FIND_FILE_QUERY_EA_SIZE 0x002
+#define SMB_FIND_FILE_QUERY_EAS_FROM_LIST 0x003
#define SMB_FIND_FILE_DIRECTORY_INFO 0x101
#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
#define SMB_FIND_FILE_NAMES_INFO 0x103
@@ -1844,13 +1898,13 @@ typedef struct {
typedef struct {
__le32 DeviceType;
__le32 DeviceCharacteristics;
-} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info, level 0x104 */
+} __attribute__((packed)) FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */
typedef struct {
__le32 Attributes;
__le32 MaxPathNameComponentLength;
__le32 FileSystemNameLen;
- char FileSystemName[52]; /* do not really need to save this - so potentially get only subset of name */
+ char FileSystemName[52]; /* do not have to save this - get subset? */
} __attribute__((packed)) FILE_SYSTEM_ATTRIBUTE_INFO;
/******************************************************************************/
@@ -1947,7 +2001,8 @@ typedef struct {
struct file_allocation_info {
__le64 AllocationSize; /* Note old Samba srvr rounds this up too much */
-} __attribute__((packed)); /* size used on disk, level 0x103 for set, 0x105 for query */
+} __attribute__((packed)); /* size used on disk, for level 0x103 for set,
+ 0x105 for query */
struct file_end_of_file_info {
__le64 FileSize; /* offset to end of file */
@@ -2054,7 +2109,7 @@ typedef struct {
__le32 ExtFileAttributes;
__le32 FileNameLength;
char FileName[1];
-} __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF response data area */
+} __attribute__((packed)) FILE_DIRECTORY_INFO; /* level 0x101 FF resp data */
typedef struct {
__le32 NextEntryOffset;
@@ -2069,7 +2124,7 @@ typedef struct {
__le32 FileNameLength;
__le32 EaSize; /* length of the xattrs */
char FileName[1];
-} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 FF response data area */
+} __attribute__((packed)) FILE_FULL_DIRECTORY_INFO; /* level 0x102 rsp data */
typedef struct {
__le32 NextEntryOffset;
@@ -2086,7 +2141,7 @@ typedef struct {
__le32 Reserved;
__u64 UniqueId; /* inode num - le since Samba puts ino in low 32 bit*/
char FileName[1];
-} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF response data area */
+} __attribute__((packed)) SEARCH_ID_FULL_DIR_INFO; /* level 0x105 FF rsp data */
typedef struct {
__le32 NextEntryOffset;
@@ -2104,7 +2159,22 @@ typedef struct {
__u8 Reserved;
__u8 ShortName[12];
char FileName[1];
-} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FF response data area */
+} __attribute__((packed)) FILE_BOTH_DIRECTORY_INFO; /* level 0x104 FFrsp data */
+
+typedef struct {
+ __u32 ResumeKey;
+ __le16 CreationDate; /* SMB Date */
+ __le16 CreationTime; /* SMB Time */
+ __le16 LastAccessDate;
+ __le16 LastAccessTime;
+ __le16 LastWriteDate;
+ __le16 LastWriteTime;
+ __le32 DataSize; /* File Size (EOF) */
+ __le32 AllocationSize;
+ __le16 Attributes; /* verify not u32 */
+ __u8 FileNameLength;
+ char FileName[1];
+} __attribute__((packed)) FIND_FILE_STANDARD_INFO; /* level 0x1 FF resp data */
struct win_dev {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 310ea2f0e0bfd2..a5ddc62d6fe6e2 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -64,14 +64,12 @@ extern int map_smb_to_linux_error(struct smb_hdr *smb);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
fixed section (word count) in two byte units */);
-#ifdef CONFIG_CIFS_EXPERIMENTAL
extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
struct cifsSesInfo *ses,
void ** request_buf);
extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
- const int stage, int * pNTLMv2_flg,
+ const int stage,
const struct nls_table *nls_cp);
-#endif
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16,
struct cifsTconInfo *);
@@ -285,8 +283,14 @@ extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
extern int cifs_verify_signature(struct smb_hdr *, const char * mac_key,
__u32 expected_sequence_number);
extern int cifs_calculate_mac_key(char * key,const char * rn,const char * pass);
-extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *, struct nls_table *);
-extern void CalcNTLMv2_response(const struct cifsSesInfo *,char * );
+extern int CalcNTLMv2_partial_mac_key(struct cifsSesInfo *,
+ const struct nls_table *);
+extern void CalcNTLMv2_response(const struct cifsSesInfo *, char * );
+extern void setup_ntlmv2_rsp(struct cifsSesInfo *, char *,
+ const struct nls_table *);
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+extern void calc_lanman_hash(struct cifsSesInfo * ses, char * lnm_session_key);
+#endif /* CIFS_WEAK_PW_HASH */
extern int CIFSSMBCopy(int xid,
struct cifsTconInfo *source_tcon,
const char *fromName,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 925881e00ff210..19678c575dfc4b 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -44,8 +44,11 @@ static struct {
int index;
char *name;
} protocols[] = {
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ {LANMAN_PROT, "\2LM1.2X002"},
+#endif /* weak password hashing for legacy clients */
{CIFS_PROT, "\2NT LM 0.12"},
- {CIFS_PROT, "\2POSIX 2"},
+ {POSIX_PROT, "\2POSIX 2"},
{BAD_PROT, "\2"}
};
#else
@@ -53,11 +56,29 @@ static struct {
int index;
char *name;
} protocols[] = {
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ {LANMAN_PROT, "\2LM1.2X002"},
+#endif /* weak password hashing for legacy clients */
{CIFS_PROT, "\2NT LM 0.12"},
{BAD_PROT, "\2"}
};
#endif
+/* define the number of elements in the cifs dialect array */
+#ifdef CONFIG_CIFS_POSIX
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define CIFS_NUM_PROT 3
+#else
+#define CIFS_NUM_PROT 2
+#endif /* CIFS_WEAK_PW_HASH */
+#else /* not posix */
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+#define CIFS_NUM_PROT 2
+#else
+#define CIFS_NUM_PROT 1
+#endif /* CONFIG_CIFS_WEAK_PW_HASH */
+#endif /* CIFS_POSIX */
+
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
@@ -188,7 +209,6 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
return rc;
}
-#ifdef CONFIG_CIFS_EXPERIMENTAL
int
small_smb_init_no_tc(const int smb_command, const int wct,
struct cifsSesInfo *ses, void **request_buf)
@@ -214,7 +234,6 @@ small_smb_init_no_tc(const int smb_command, const int wct,
return rc;
}
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
/* If the return code is zero, this function must fill in request_buf pointer */
static int
@@ -322,7 +341,8 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
/* potential retries of smb operations it turns out we can determine */
/* from the mid flags when the request buffer can be resent without */
/* having to use a second distinct buffer for the response */
- *response_buf = *request_buf;
+ if(response_buf)
+ *response_buf = *request_buf;
header_assemble((struct smb_hdr *) *request_buf, smb_command, tcon,
wct /*wct */ );
@@ -373,8 +393,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
NEGOTIATE_RSP *pSMBr;
int rc = 0;
int bytes_returned;
+ int i;
struct TCP_Server_Info * server;
u16 count;
+ unsigned int secFlags;
if(ses->server)
server = ses->server;
@@ -386,101 +408,200 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses)
(void **) &pSMB, (void **) &pSMBr);
if (rc)
return rc;
+
+ /* if any of auth flags (ie not sign or seal) are overriden use them */
+ if(ses->overrideSecFlg & (~(CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL)))
+ secFlags = ses->overrideSecFlg;
+ else /* if override flags set only sign/seal OR them with global auth */
+ secFlags = extended_security | ses->overrideSecFlg;
+
+ cFYI(1,("secFlags 0x%x",secFlags));
+
pSMB->hdr.Mid = GetNextMid(server);
pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
- if (extended_security)
+ if((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
pSMB->hdr.Flags2 |= SMBFLG2_EXT_SEC;
-
- count = strlen(protocols[0].name) + 1;
- strncpy(pSMB->DialectsArray, protocols[0].name, 30);
- /* null guaranteed to be at end of source and target buffers anyway */
-
+
+ count = 0;
+ for(i=0;i<CIFS_NUM_PROT;i++) {
+ strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
+ count += strlen(protocols[i].name) + 1;
+ /* null at end of source and target buffers anyway */
+ }
pSMB->hdr.smb_buf_length += count;
pSMB->ByteCount = cpu_to_le16(count);
rc = SendReceive(xid, ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
- if (rc == 0) {
- server->secMode = pSMBr->SecurityMode;
- if((server->secMode & SECMODE_USER) == 0)
- cFYI(1,("share mode security"));
- server->secType = NTLM; /* BB override default for
- NTLMv2 or kerberos v5 */
- /* one byte - no need to convert this or EncryptionKeyLen
- from little endian */
- server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
- /* probably no need to store and check maxvcs */
- server->maxBuf =
- min(le32_to_cpu(pSMBr->MaxBufferSize),
+ if (rc != 0)
+ goto neg_err_exit;
+
+ cFYI(1,("Dialect: %d", pSMBr->DialectIndex));
+ /* Check wct = 1 error case */
+ if((pSMBr->hdr.WordCount < 13) || (pSMBr->DialectIndex == BAD_PROT)) {
+ /* core returns wct = 1, but we do not ask for core - otherwise
+ small wct just comes when dialect index is -1 indicating we
+ could not negotiate a common dialect */
+ rc = -EOPNOTSUPP;
+ goto neg_err_exit;
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ } else if((pSMBr->hdr.WordCount == 13)
+ && (pSMBr->DialectIndex == LANMAN_PROT)) {
+ struct lanman_neg_rsp * rsp = (struct lanman_neg_rsp *)pSMBr;
+
+ if((secFlags & CIFSSEC_MAY_LANMAN) ||
+ (secFlags & CIFSSEC_MAY_PLNTXT))
+ server->secType = LANMAN;
+ else {
+ cERROR(1, ("mount failed weak security disabled"
+ " in /proc/fs/cifs/SecurityFlags"));
+ rc = -EOPNOTSUPP;
+ goto neg_err_exit;
+ }
+ server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode);
+ server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
+ server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
+ (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
+ GETU32(server->sessid) = le32_to_cpu(rsp->SessionKey);
+ /* even though we do not use raw we might as well set this
+ accurately, in case we ever find a need for it */
+ if((le16_to_cpu(rsp->RawMode) & RAW_ENABLE) == RAW_ENABLE) {
+ server->maxRw = 0xFF00;
+ server->capabilities = CAP_MPX_MODE | CAP_RAW_MODE;
+ } else {
+ server->maxRw = 0;/* we do not need to use raw anyway */
+ server->capabilities = CAP_MPX_MODE;
+ }
+ server->timeZone = le16_to_cpu(rsp->ServerTimeZone);
+
+ /* BB get server time for time conversions and add
+ code to use it and timezone since this is not UTC */
+
+ if (rsp->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
+ memcpy(server->cryptKey, rsp->EncryptionKey,
+ CIFS_CRYPTO_KEY_SIZE);
+ } else if (server->secMode & SECMODE_PW_ENCRYPT) {
+ rc = -EIO; /* need cryptkey unless plain text */
+ goto neg_err_exit;
+ }
+
+ cFYI(1,("LANMAN negotiated"));
+ /* we will not end up setting signing flags - as no signing
+ was in LANMAN and server did not return the flags on */
+ goto signing_check;
+#else /* weak security disabled */
+ } else if(pSMBr->hdr.WordCount == 13) {
+ cERROR(1,("mount failed, cifs module not built "
+ "with CIFS_WEAK_PW_HASH support"));
+ rc = -EOPNOTSUPP;
+#endif /* WEAK_PW_HASH */
+ goto neg_err_exit;
+ } else if(pSMBr->hdr.WordCount != 17) {
+ /* unknown wct */
+ rc = -EOPNOTSUPP;
+ goto neg_err_exit;
+ }
+ /* else wct == 17 NTLM */
+ server->secMode = pSMBr->SecurityMode;
+ if((server->secMode & SECMODE_USER) == 0)
+ cFYI(1,("share mode security"));
+
+ if((server->secMode & SECMODE_PW_ENCRYPT) == 0)
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0)
+#endif /* CIFS_WEAK_PW_HASH */
+ cERROR(1,("Server requests plain text password"
+ " but client support disabled"));
+
+ if((secFlags & CIFSSEC_MUST_NTLMV2) == CIFSSEC_MUST_NTLMV2)
+ server->secType = NTLMv2;
+ else if(secFlags & CIFSSEC_MAY_NTLM)
+ server->secType = NTLM;
+ else if(secFlags & CIFSSEC_MAY_NTLMV2)
+ server->secType = NTLMv2;
+ /* else krb5 ... any others ... */
+
+ /* one byte, so no need to convert this or EncryptionKeyLen from
+ little endian */
+ server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
+ /* probably no need to store and check maxvcs */
+ server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
- server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
- cFYI(0, ("Max buf = %d", ses->server->maxBuf));
- GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
- server->capabilities = le32_to_cpu(pSMBr->Capabilities);
- server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);
- /* BB with UTC do we ever need to be using srvr timezone? */
- if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
- memcpy(server->cryptKey, pSMBr->u.EncryptionKey,
- CIFS_CRYPTO_KEY_SIZE);
- } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
- && (pSMBr->EncryptionKeyLength == 0)) {
- /* decode security blob */
- } else
- rc = -EIO;
+ server->maxRw = le32_to_cpu(pSMBr->MaxRawSize);
+ cFYI(0, ("Max buf = %d", ses->server->maxBuf));
+ GETU32(ses->server->sessid) = le32_to_cpu(pSMBr->SessionKey);
+ server->capabilities = le32_to_cpu(pSMBr->Capabilities);
+ server->timeZone = le16_to_cpu(pSMBr->ServerTimeZone);
+ if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) {
+ memcpy(server->cryptKey, pSMBr->u.EncryptionKey,
+ CIFS_CRYPTO_KEY_SIZE);
+ } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC)
+ && (pSMBr->EncryptionKeyLength == 0)) {
+ /* decode security blob */
+ } else if (server->secMode & SECMODE_PW_ENCRYPT) {
+ rc = -EIO; /* no crypt key only if plain text pwd */
+ goto neg_err_exit;
+ }
- /* BB might be helpful to save off the domain of server here */
+ /* BB might be helpful to save off the domain of server here */
- if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
- (server->capabilities & CAP_EXTENDED_SECURITY)) {
- count = pSMBr->ByteCount;
- if (count < 16)
- rc = -EIO;
- else if (count == 16) {
- server->secType = RawNTLMSSP;
- if (server->socketUseCount.counter > 1) {
- if (memcmp
- (server->server_GUID,
- pSMBr->u.extended_response.
- GUID, 16) != 0) {
- cFYI(1, ("server UID changed"));
- memcpy(server->
- server_GUID,
- pSMBr->u.
- extended_response.
- GUID, 16);
- }
- } else
+ if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) &&
+ (server->capabilities & CAP_EXTENDED_SECURITY)) {
+ count = pSMBr->ByteCount;
+ if (count < 16)
+ rc = -EIO;
+ else if (count == 16) {
+ server->secType = RawNTLMSSP;
+ if (server->socketUseCount.counter > 1) {
+ if (memcmp(server->server_GUID,
+ pSMBr->u.extended_response.
+ GUID, 16) != 0) {
+ cFYI(1, ("server UID changed"));
memcpy(server->server_GUID,
- pSMBr->u.extended_response.
- GUID, 16);
- } else {
- rc = decode_negTokenInit(pSMBr->u.
- extended_response.
- SecurityBlob,
- count - 16,
- &server->secType);
- if(rc == 1) {
- /* BB Need to fill struct for sessetup here */
- rc = -EOPNOTSUPP;
- } else {
- rc = -EINVAL;
+ pSMBr->u.extended_response.GUID,
+ 16);
}
+ } else
+ memcpy(server->server_GUID,
+ pSMBr->u.extended_response.GUID, 16);
+ } else {
+ rc = decode_negTokenInit(pSMBr->u.extended_response.
+ SecurityBlob,
+ count - 16,
+ &server->secType);
+ if(rc == 1) {
+ /* BB Need to fill struct for sessetup here */
+ rc = -EOPNOTSUPP;
+ } else {
+ rc = -EINVAL;
}
- } else
- server->capabilities &= ~CAP_EXTENDED_SECURITY;
- if(sign_CIFS_PDUs == FALSE) {
- if(server->secMode & SECMODE_SIGN_REQUIRED)
- cERROR(1,
- ("Server requires /proc/fs/cifs/PacketSigningEnabled"));
- server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
- } else if(sign_CIFS_PDUs == 1) {
- if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
- server->secMode &= ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
}
-
+ } else
+ server->capabilities &= ~CAP_EXTENDED_SECURITY;
+
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+signing_check:
+#endif
+ if(sign_CIFS_PDUs == FALSE) {
+ if(server->secMode & SECMODE_SIGN_REQUIRED)
+ cERROR(1,("Server requires "
+ "/proc/fs/cifs/PacketSigningEnabled to be on"));
+ server->secMode &=
+ ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
+ } else if(sign_CIFS_PDUs == 1) {
+ if((server->secMode & SECMODE_SIGN_REQUIRED) == 0)
+ server->secMode &=
+ ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED);
+ } else if(sign_CIFS_PDUs == 2) {
+ if((server->secMode &
+ (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) {
+ cERROR(1,("signing required but server lacks support"));
+ }
}
-
+neg_err_exit:
cifs_buf_release(pSMB);
+
+ cFYI(1,("negprot rc %d",rc));
return rc;
}
@@ -2239,7 +2360,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon,
}
symlinkinfo[buflen] = 0; /* just in case so the caller
does not go off the end of the buffer */
- cFYI(1,("readlink result - %s ",symlinkinfo));
+ cFYI(1,("readlink result - %s",symlinkinfo));
}
}
qreparse_out:
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index bae1479318d10f..876eb9ef85fecd 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -49,8 +49,6 @@
static DECLARE_COMPLETION(cifsd_complete);
-extern void SMBencrypt(unsigned char *passwd, unsigned char *c8,
- unsigned char *p24);
extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
unsigned char *p24);
@@ -70,6 +68,7 @@ struct smb_vol {
gid_t linux_gid;
mode_t file_mode;
mode_t dir_mode;
+ unsigned secFlg;
unsigned rw:1;
unsigned retry:1;
unsigned intr:1;
@@ -83,12 +82,7 @@ struct smb_vol {
unsigned remap:1; /* set to remap seven reserved chars in filenames */
unsigned posix_paths:1; /* unset to not ask for posix pathnames. */
unsigned sfu_emul:1;
- unsigned krb5:1;
- unsigned ntlm:1;
- unsigned ntlmv2:1;
unsigned nullauth:1; /* attempt to authenticate with null user */
- unsigned sign:1;
- unsigned seal:1; /* encrypt */
unsigned nocase; /* request case insensitive filenames */
unsigned nobrl; /* disable sending byte range locks to srv */
unsigned int rsize;
@@ -369,21 +363,21 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
continue;
if (bigbuf == NULL) {
bigbuf = cifs_buf_get();
- if(bigbuf == NULL) {
- cERROR(1,("No memory for large SMB response"));
+ if (!bigbuf) {
+ cERROR(1, ("No memory for large SMB response"));
msleep(3000);
/* retry will check if exiting */
continue;
}
- } else if(isLargeBuf) {
- /* we are reusing a dirtry large buf, clear its start */
+ } else if (isLargeBuf) {
+ /* we are reusing a dirty large buf, clear its start */
memset(bigbuf, 0, sizeof (struct smb_hdr));
}
if (smallbuf == NULL) {
smallbuf = cifs_small_buf_get();
- if(smallbuf == NULL) {
- cERROR(1,("No memory for SMB response"));
+ if (!smallbuf) {
+ cERROR(1, ("No memory for SMB response"));
msleep(1000);
/* retry will check if exiting */
continue;
@@ -403,12 +397,12 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
kernel_recvmsg(csocket, &smb_msg,
&iov, 1, 4, 0 /* BB see socket.h flags */);
- if(server->tcpStatus == CifsExiting) {
+ if (server->tcpStatus == CifsExiting) {
break;
} else if (server->tcpStatus == CifsNeedReconnect) {
- cFYI(1,("Reconnect after server stopped responding"));
+ cFYI(1, ("Reconnect after server stopped responding"));
cifs_reconnect(server);
- cFYI(1,("call to reconnect done"));
+ cFYI(1, ("call to reconnect done"));
csocket = server->ssocket;
continue;
} else if ((length == -ERESTARTSYS) || (length == -EAGAIN)) {
@@ -417,15 +411,15 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
tcpStatus CifsNeedReconnect if server hung */
continue;
} else if (length <= 0) {
- if(server->tcpStatus == CifsNew) {
- cFYI(1,("tcp session abend after SMBnegprot"));
+ if (server->tcpStatus == CifsNew) {
+ cFYI(1, ("tcp session abend after SMBnegprot"));
/* some servers kill the TCP session rather than
returning an SMB negprot error, in which
case reconnecting here is not going to help,
and so simply return error to mount */
break;
}
- if(length == -EINTR) {
+ if (!try_to_freeze() && (length == -EINTR)) {
cFYI(1,("cifsd thread killed"));
break;
}
@@ -585,9 +579,11 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
/* merge response - fix up 1st*/
if(coalesce_t2(smb_buffer,
mid_entry->resp_buf)) {
+ mid_entry->multiRsp = 1;
break;
} else {
/* all parts received */
+ mid_entry->multiEnd = 1;
goto multi_t2_fnd;
}
} else {
@@ -632,9 +628,14 @@ multi_t2_fnd:
wake_up_process(task_to_wake);
} else if ((is_valid_oplock_break(smb_buffer, server) == FALSE)
&& (isMultiRsp == FALSE)) {
- cERROR(1, ("No task to wake, unknown frame rcvd!"));
+ cERROR(1, ("No task to wake, unknown frame rcvd! NumMids %d", midCount.counter));
cifs_dump_mem("Received Data is: ",(char *)smb_buffer,
sizeof(struct smb_hdr));
+#ifdef CONFIG_CIFS_DEBUG2
+ cifs_dump_detail(smb_buffer);
+ cifs_dump_mids(server);
+#endif /* CIFS_DEBUG2 */
+
}
} /* end while !EXITING */
@@ -784,7 +785,6 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
/* vol->retry default is 0 (i.e. "soft" limited retry not hard retry) */
vol->rw = TRUE;
- vol->ntlm = TRUE;
/* default is always to request posix paths. */
vol->posix_paths = 1;
@@ -915,30 +915,35 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
cERROR(1,("no security value specified"));
continue;
} else if (strnicmp(value, "krb5i", 5) == 0) {
- vol->sign = 1;
- vol->krb5 = 1;
+ vol->secFlg |= CIFSSEC_MAY_KRB5 |
+ CIFSSEC_MUST_SIGN;
} else if (strnicmp(value, "krb5p", 5) == 0) {
- /* vol->seal = 1;
- vol->krb5 = 1; */
+ /* vol->secFlg |= CIFSSEC_MUST_SEAL |
+ CIFSSEC_MAY_KRB5; */
cERROR(1,("Krb5 cifs privacy not supported"));
return 1;
} else if (strnicmp(value, "krb5", 4) == 0) {
- vol->krb5 = 1;
+ vol->secFlg |= CIFSSEC_MAY_KRB5;
} else if (strnicmp(value, "ntlmv2i", 7) == 0) {
- vol->ntlmv2 = 1;
- vol->sign = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2 |
+ CIFSSEC_MUST_SIGN;
} else if (strnicmp(value, "ntlmv2", 6) == 0) {
- vol->ntlmv2 = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2;
} else if (strnicmp(value, "ntlmi", 5) == 0) {
- vol->ntlm = 1;
- vol->sign = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLM |
+ CIFSSEC_MUST_SIGN;
} else if (strnicmp(value, "ntlm", 4) == 0) {
/* ntlm is default so can be turned off too */
- vol->ntlm = 1;
+ vol->secFlg |= CIFSSEC_MAY_NTLM;
} else if (strnicmp(value, "nontlm", 6) == 0) {
- vol->ntlm = 0;
+ /* BB is there a better way to do this? */
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2;
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ } else if (strnicmp(value, "lanman", 6) == 0) {
+ vol->secFlg |= CIFSSEC_MAY_LANMAN;
+#endif
} else if (strnicmp(value, "none", 4) == 0) {
- vol->nullauth = 1;
+ vol->nullauth = 1;
} else {
cERROR(1,("bad security option: %s", value));
return 1;
@@ -976,7 +981,7 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
}
/* BB are there cases in which a comma can be valid in
a domain name and need special handling? */
- if (strnlen(value, 65) < 65) {
+ if (strnlen(value, 256) < 256) {
vol->domainname = value;
cFYI(1, ("Domain name set"));
} else {
@@ -1168,6 +1173,10 @@ cifs_parse_mount_options(char *options, const char *devname,struct smb_vol *vol)
vol->no_psx_acl = 0;
} else if (strnicmp(data, "noacl",5) == 0) {
vol->no_psx_acl = 1;
+ } else if (strnicmp(data, "sign",4) == 0) {
+ vol->secFlg |= CIFSSEC_MUST_SIGN;
+/* } else if (strnicmp(data, "seal",4) == 0) {
+ vol->secFlg |= CIFSSEC_MUST_SEAL; */
} else if (strnicmp(data, "direct",6) == 0) {
vol->direct_io = 1;
} else if (strnicmp(data, "forcedirectio",13) == 0) {
@@ -1762,11 +1771,18 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if (volume_info.username)
strncpy(pSesInfo->userName,
volume_info.username,MAX_USERNAME_SIZE);
- if (volume_info.domainname)
- strncpy(pSesInfo->domainName,
- volume_info.domainname,MAX_USERNAME_SIZE);
+ if (volume_info.domainname) {
+ int len = strlen(volume_info.domainname);
+ pSesInfo->domainName =
+ kmalloc(len + 1, GFP_KERNEL);
+ if(pSesInfo->domainName)
+ strcpy(pSesInfo->domainName,
+ volume_info.domainname);
+ }
pSesInfo->linux_uid = volume_info.linux_uid;
+ pSesInfo->overrideSecFlg = volume_info.secFlg;
down(&pSesInfo->sesSem);
+ /* BB FIXME need to pass vol->secFlgs BB */
rc = cifs_setup_session(xid,pSesInfo, cifs_sb->local_nls);
up(&pSesInfo->sesSem);
if(!rc)
@@ -1980,7 +1996,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
static int
CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
- char session_key[CIFS_SESSION_KEY_SIZE],
+ char session_key[CIFS_SESS_KEY_SIZE],
const struct nls_table *nls_codepage)
{
struct smb_hdr *smb_buffer;
@@ -2038,15 +2054,15 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
pSMB->req_no_secext.CaseInsensitivePasswordLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
pSMB->req_no_secext.CaseSensitivePasswordLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
bcc_ptr = pByteArea(smb_buffer);
- memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE);
- bcc_ptr += CIFS_SESSION_KEY_SIZE;
- memcpy(bcc_ptr, (char *) session_key, CIFS_SESSION_KEY_SIZE);
- bcc_ptr += CIFS_SESSION_KEY_SIZE;
+ memcpy(bcc_ptr, (char *) session_key, CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+ memcpy(bcc_ptr, (char *) session_key, CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
if (ses->capabilities & CAP_UNICODE) {
if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode */
@@ -2054,7 +2070,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++;
}
if(user == NULL)
- bytes_returned = 0; /* skill null user */
+ bytes_returned = 0; /* skip null user */
else
bytes_returned =
cifs_strtoUCS((__le16 *) bcc_ptr, user, 100,
@@ -2162,8 +2178,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
if (remaining_words > 0) {
len = UniStrnlen((wchar_t *)bcc_ptr,
remaining_words-1);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS = kzalloc(2 * (len + 1),GFP_KERNEL);
if(ses->serverNOS == NULL)
goto sesssetup_nomem;
@@ -2203,12 +2218,10 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
/* if these kcallocs fail not much we
can do, but better to not fail the
sesssetup itself */
- if(ses->serverDomain)
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2, GFP_KERNEL);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(2, GFP_KERNEL);
}
@@ -2217,8 +2230,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
if (((long) bcc_ptr + len) - (long)
pByteArea(smb_buffer_response)
<= BCC(smb_buffer_response)) {
- if(ses->serverOS)
- kfree(ses->serverOS);
+ kfree(ses->serverOS);
ses->serverOS = kzalloc(len + 1,GFP_KERNEL);
if(ses->serverOS == NULL)
goto sesssetup_nomem;
@@ -2229,8 +2241,7 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
if(ses->serverNOS == NULL)
goto sesssetup_nomem;
@@ -2274,292 +2285,6 @@ sesssetup_nomem: /* do not return an error on nomem for the info strings,
}
static int
-CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
- char *SecurityBlob,int SecurityBlobLength,
- const struct nls_table *nls_codepage)
-{
- struct smb_hdr *smb_buffer;
- struct smb_hdr *smb_buffer_response;
- SESSION_SETUP_ANDX *pSMB;
- SESSION_SETUP_ANDX *pSMBr;
- char *bcc_ptr;
- char *user;
- char *domain;
- int rc = 0;
- int remaining_words = 0;
- int bytes_returned = 0;
- int len;
- __u32 capabilities;
- __u16 count;
-
- cFYI(1, ("In spnego sesssetup "));
- if(ses == NULL)
- return -EINVAL;
- user = ses->userName;
- domain = ses->domainName;
-
- smb_buffer = cifs_buf_get();
- if (smb_buffer == NULL) {
- return -ENOMEM;
- }
- smb_buffer_response = smb_buffer;
- pSMBr = pSMB = (SESSION_SETUP_ANDX *) smb_buffer;
-
- /* send SMBsessionSetup here */
- header_assemble(smb_buffer, SMB_COM_SESSION_SETUP_ANDX,
- NULL /* no tCon exists yet */ , 12 /* wct */ );
-
- smb_buffer->Mid = GetNextMid(ses->server);
- pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
- pSMB->req.AndXCommand = 0xFF;
- if(ses->server->maxBuf > 64*1024)
- ses->server->maxBuf = (64*1023);
- pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
- pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
-
- if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
-
- capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
- CAP_EXTENDED_SECURITY;
- if (ses->capabilities & CAP_UNICODE) {
- smb_buffer->Flags2 |= SMBFLG2_UNICODE;
- capabilities |= CAP_UNICODE;
- }
- if (ses->capabilities & CAP_STATUS32) {
- smb_buffer->Flags2 |= SMBFLG2_ERR_STATUS;
- capabilities |= CAP_STATUS32;
- }
- if (ses->capabilities & CAP_DFS) {
- smb_buffer->Flags2 |= SMBFLG2_DFS;
- capabilities |= CAP_DFS;
- }
- pSMB->req.Capabilities = cpu_to_le32(capabilities);
-
- pSMB->req.SecurityBlobLength = cpu_to_le16(SecurityBlobLength);
- bcc_ptr = pByteArea(smb_buffer);
- memcpy(bcc_ptr, SecurityBlob, SecurityBlobLength);
- bcc_ptr += SecurityBlobLength;
-
- if (ses->capabilities & CAP_UNICODE) {
- if ((long) bcc_ptr % 2) { /* must be word aligned for Unicode strings */
- *bcc_ptr = 0;
- bcc_ptr++;
- }
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, user, 100, nls_codepage);
- bcc_ptr += 2 * bytes_returned; /* convert num of 16 bit words to bytes */
- bcc_ptr += 2; /* trailing null */
- if (domain == NULL)
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr,
- "CIFS_LINUX_DOM", 32, nls_codepage);
- else
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, domain, 64,
- nls_codepage);
- bcc_ptr += 2 * bytes_returned;
- bcc_ptr += 2;
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, "Linux version ",
- 32, nls_codepage);
- bcc_ptr += 2 * bytes_returned;
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, system_utsname.release, 32,
- nls_codepage);
- bcc_ptr += 2 * bytes_returned;
- bcc_ptr += 2;
- bytes_returned =
- cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
- 64, nls_codepage);
- bcc_ptr += 2 * bytes_returned;
- bcc_ptr += 2;
- } else {
- strncpy(bcc_ptr, user, 200);
- bcc_ptr += strnlen(user, 200);
- *bcc_ptr = 0;
- bcc_ptr++;
- if (domain == NULL) {
- strcpy(bcc_ptr, "CIFS_LINUX_DOM");
- bcc_ptr += strlen("CIFS_LINUX_DOM") + 1;
- } else {
- strncpy(bcc_ptr, domain, 64);
- bcc_ptr += strnlen(domain, 64);
- *bcc_ptr = 0;
- bcc_ptr++;
- }
- strcpy(bcc_ptr, "Linux version ");
- bcc_ptr += strlen("Linux version ");
- strcpy(bcc_ptr, system_utsname.release);
- bcc_ptr += strlen(system_utsname.release) + 1;
- strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
- bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
- }
- count = (long) bcc_ptr - (long) pByteArea(smb_buffer);
- smb_buffer->smb_buf_length += count;
- pSMB->req.ByteCount = cpu_to_le16(count);
-
- rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response,
- &bytes_returned, 1);
- if (rc) {
-/* rc = map_smb_to_linux_error(smb_buffer_response); *//* done in SendReceive now */
- } else if ((smb_buffer_response->WordCount == 3)
- || (smb_buffer_response->WordCount == 4)) {
- __u16 action = le16_to_cpu(pSMBr->resp.Action);
- __u16 blob_len =
- le16_to_cpu(pSMBr->resp.SecurityBlobLength);
- if (action & GUEST_LOGIN)
- cFYI(1, (" Guest login")); /* BB do we want to set anything in SesInfo struct ? */
- if (ses) {
- ses->Suid = smb_buffer_response->Uid; /* UID left in wire format (le) */
- cFYI(1, ("UID = %d ", ses->Suid));
- bcc_ptr = pByteArea(smb_buffer_response); /* response can have either 3 or 4 word count - Samba sends 3 */
-
- /* BB Fix below to make endian neutral !! */
-
- if ((pSMBr->resp.hdr.WordCount == 3)
- || ((pSMBr->resp.hdr.WordCount == 4)
- && (blob_len <
- pSMBr->resp.ByteCount))) {
- if (pSMBr->resp.hdr.WordCount == 4) {
- bcc_ptr +=
- blob_len;
- cFYI(1,
- ("Security Blob Length %d ",
- blob_len));
- }
-
- if (smb_buffer->Flags2 & SMBFLG2_UNICODE) {
- if ((long) (bcc_ptr) % 2) {
- remaining_words =
- (BCC(smb_buffer_response)
- - 1) / 2;
- bcc_ptr++; /* Unicode strings must be word aligned */
- } else {
- remaining_words =
- BCC
- (smb_buffer_response) / 2;
- }
- len =
- UniStrnlen((wchar_t *) bcc_ptr,
- remaining_words - 1);
-/* We look for obvious messed up bcc or strings in response so we do not go off
- the end since (at least) WIN2K and Windows XP have a major bug in not null
- terminating last Unicode string in response */
- if(ses->serverOS)
- kfree(ses->serverOS);
- ses->serverOS =
- kzalloc(2 * (len + 1), GFP_KERNEL);
- cifs_strfromUCS_le(ses->serverOS,
- (__le16 *)
- bcc_ptr, len,
- nls_codepage);
- bcc_ptr += 2 * (len + 1);
- remaining_words -= len + 1;
- ses->serverOS[2 * len] = 0;
- ses->serverOS[1 + (2 * len)] = 0;
- if (remaining_words > 0) {
- len = UniStrnlen((wchar_t *)bcc_ptr,
- remaining_words
- - 1);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
- ses->serverNOS =
- kzalloc(2 * (len + 1),
- GFP_KERNEL);
- cifs_strfromUCS_le(ses->serverNOS,
- (__le16 *)bcc_ptr,
- len,
- nls_codepage);
- bcc_ptr += 2 * (len + 1);
- ses->serverNOS[2 * len] = 0;
- ses->serverNOS[1 + (2 * len)] = 0;
- remaining_words -= len + 1;
- if (remaining_words > 0) {
- len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
- /* last string not null terminated (e.g.Windows XP/2000) */
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(2*(len+1),GFP_KERNEL);
- cifs_strfromUCS_le(ses->serverDomain,
- (__le16 *)bcc_ptr,
- len, nls_codepage);
- bcc_ptr += 2*(len+1);
- ses->serverDomain[2*len] = 0;
- ses->serverDomain[1+(2*len)] = 0;
- } /* else no more room so create dummy domain string */
- else {
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain =
- kzalloc(2,GFP_KERNEL);
- }
- } else {/* no room use dummy domain&NOS */
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(2, GFP_KERNEL);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(2, GFP_KERNEL);
- }
- } else { /* ASCII */
-
- len = strnlen(bcc_ptr, 1024);
- if (((long) bcc_ptr + len) - (long)
- pByteArea(smb_buffer_response)
- <= BCC(smb_buffer_response)) {
- if(ses->serverOS)
- kfree(ses->serverOS);
- ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
- strncpy(ses->serverOS, bcc_ptr, len);
-
- bcc_ptr += len;
- bcc_ptr[0] = 0; /* null terminate the string */
- bcc_ptr++;
-
- len = strnlen(bcc_ptr, 1024);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(len + 1,GFP_KERNEL);
- strncpy(ses->serverNOS, bcc_ptr, len);
- bcc_ptr += len;
- bcc_ptr[0] = 0;
- bcc_ptr++;
-
- len = strnlen(bcc_ptr, 1024);
- if(ses->serverDomain)
- kfree(ses->serverDomain);
- ses->serverDomain = kzalloc(len + 1, GFP_KERNEL);
- strncpy(ses->serverDomain, bcc_ptr, len);
- bcc_ptr += len;
- bcc_ptr[0] = 0;
- bcc_ptr++;
- } else
- cFYI(1,
- ("Variable field of length %d extends beyond end of smb ",
- len));
- }
- } else {
- cERROR(1,
- (" Security Blob Length extends beyond end of SMB"));
- }
- } else {
- cERROR(1, ("No session structure passed in."));
- }
- } else {
- cERROR(1,
- (" Invalid Word count %d: ",
- smb_buffer_response->WordCount));
- rc = -EIO;
- }
-
- if (smb_buffer)
- cifs_buf_release(smb_buffer);
-
- return rc;
-}
-
-static int
CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
struct cifsSesInfo *ses, int * pNTLMv2_flag,
const struct nls_table *nls_codepage)
@@ -2635,8 +2360,8 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
/* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
if(sign_CIFS_PDUs)
negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
- if(ntlmv2_support)
- negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;
+/* if(ntlmv2_support)
+ negotiate_flags |= NTLMSSP_NEGOTIATE_NTLMV2;*/
/* setup pointers to domain name and workstation name */
bcc_ptr += SecurityBlobLength;
@@ -2783,8 +2508,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
bcc_ptr,
remaining_words
- 1);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(2 * (len + 1),
GFP_KERNEL);
@@ -2802,8 +2526,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
if (remaining_words > 0) {
len = UniStrnlen((wchar_t *) bcc_ptr, remaining_words);
/* last string is not always null terminated (for e.g. for Windows XP & 2000) */
- if(ses->serverDomain)
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2 *
(len +
@@ -2822,19 +2545,16 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
= 0;
} /* else no more room so create dummy domain string */
else {
- if(ses->serverDomain)
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2,
GFP_KERNEL);
}
} else { /* no room so create dummy domain and NOS string */
- if(ses->serverDomain);
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(2, GFP_KERNEL);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(2, GFP_KERNEL);
}
@@ -2856,8 +2576,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(len + 1,
GFP_KERNEL);
@@ -2867,8 +2586,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverDomain)
- kfree(ses->serverDomain);
+ kfree(ses->serverDomain);
ses->serverDomain =
kzalloc(len + 1,
GFP_KERNEL);
@@ -2994,14 +2712,14 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
SecurityBlob->LmChallengeResponse.Buffer = 0;
SecurityBlob->NtChallengeResponse.Length =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
SecurityBlob->NtChallengeResponse.MaximumLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
- memcpy(bcc_ptr, ntlm_session_key, CIFS_SESSION_KEY_SIZE);
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ memcpy(bcc_ptr, ntlm_session_key, CIFS_SESS_KEY_SIZE);
SecurityBlob->NtChallengeResponse.Buffer =
cpu_to_le32(SecurityBlobLength);
- SecurityBlobLength += CIFS_SESSION_KEY_SIZE;
- bcc_ptr += CIFS_SESSION_KEY_SIZE;
+ SecurityBlobLength += CIFS_SESS_KEY_SIZE;
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
if (ses->capabilities & CAP_UNICODE) {
if (domain == NULL) {
@@ -3190,8 +2908,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr,
remaining_words
- 1);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS =
kzalloc(2 * (len + 1),
GFP_KERNEL);
@@ -3244,8 +2961,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
if(ses->serverDomain)
kfree(ses->serverDomain);
ses->serverDomain = kzalloc(2, GFP_KERNEL);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS = kzalloc(2, GFP_KERNEL);
}
} else { /* ASCII */
@@ -3263,8 +2979,7 @@ CIFSNTLMSSPAuthSessSetup(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr++;
len = strnlen(bcc_ptr, 1024);
- if(ses->serverNOS)
- kfree(ses->serverNOS);
+ kfree(ses->serverNOS);
ses->serverNOS = kzalloc(len+1,GFP_KERNEL);
strncpy(ses->serverNOS, bcc_ptr, len);
bcc_ptr += len;
@@ -3340,22 +3055,33 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
bcc_ptr = &pSMB->Password[0];
if((ses->server->secMode) & SECMODE_USER) {
pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
+ *bcc_ptr = 0; /* password is null byte */
bcc_ptr++; /* skip password */
+ /* already aligned so no need to do it below */
} else {
- pSMB->PasswordLength = cpu_to_le16(CIFS_SESSION_KEY_SIZE);
+ pSMB->PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE);
/* BB FIXME add code to fail this if NTLMv2 or Kerberos
specified as required (when that support is added to
the vfs in the future) as only NTLM or the much
- weaker LANMAN (which we do not send) is accepted
+ weaker LANMAN (which we do not send by default) is accepted
by Samba (not sure whether other servers allow
NTLMv2 password here) */
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ if((extended_security & CIFSSEC_MAY_LANMAN) &&
+ (ses->server->secType == LANMAN))
+ calc_lanman_hash(ses, bcc_ptr);
+ else
+#endif /* CIFS_WEAK_PW_HASH */
SMBNTencrypt(ses->password,
ses->server->cryptKey,
bcc_ptr);
- bcc_ptr += CIFS_SESSION_KEY_SIZE;
- *bcc_ptr = 0;
- bcc_ptr++; /* align */
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+ if(ses->capabilities & CAP_UNICODE) {
+ /* must align unicode strings */
+ *bcc_ptr = 0; /* null byte password */
+ bcc_ptr++;
+ }
}
if(ses->server->secMode &
@@ -3429,7 +3155,10 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses,
}
/* else do not bother copying these informational fields */
}
- tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
+ if(smb_buffer_response->WordCount == 3)
+ tcon->Flags = le16_to_cpu(pSMBr->OptionalSupport);
+ else
+ tcon->Flags = 0;
cFYI(1, ("Tcon flags: 0x%x ", tcon->Flags));
} else if ((rc == 0) && tcon == NULL) {
/* all we need to save for IPC$ connection */
@@ -3494,7 +3223,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
struct nls_table * nls_info)
{
int rc = 0;
- char ntlm_session_key[CIFS_SESSION_KEY_SIZE];
+ char ntlm_session_key[CIFS_SESS_KEY_SIZE];
int ntlmv2_flag = FALSE;
int first_time = 0;
@@ -3526,20 +3255,13 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
pSesInfo->server->secMode,
pSesInfo->server->capabilities,
pSesInfo->server->timeZone));
-#ifdef CONFIG_CIFS_EXPERIMENTAL
- if(experimEnabled > 1)
- rc = CIFS_SessSetup(xid, pSesInfo, CIFS_NTLM /* type */,
- &ntlmv2_flag, nls_info);
- else
-#endif
- if (extended_security
+ if(experimEnabled < 2)
+ rc = CIFS_SessSetup(xid, pSesInfo,
+ first_time, nls_info);
+ else if (extended_security
&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == NTLMSSP)) {
- cFYI(1, ("New style sesssetup"));
- rc = CIFSSpnegoSessSetup(xid, pSesInfo,
- NULL /* security blob */,
- 0 /* blob length */,
- nls_info);
+ rc = -EOPNOTSUPP;
} else if (extended_security
&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == RawNTLMSSP)) {
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 82315edc77d7da..ba4cbe9b0684dd 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -113,7 +113,7 @@ cifs_bp_rename_retry:
full_path[namelen+2] = 0;
BB remove above eight lines BB */
-/* Inode operations in similar order to how they appear in the Linux file fs.h */
+/* Inode operations in similar order to how they appear in Linux file fs.h */
int
cifs_create(struct inode *inode, struct dentry *direntry, int mode,
@@ -178,11 +178,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
FreeXid(xid);
return -ENOMEM;
}
-
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+ if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
+ rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
desiredAccess, CREATE_NOT_DIR,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+ else
+ rc = -EIO; /* no NT SMB support fall into legacy open below */
+
if(rc == -EIO) {
/* old server, retry the open legacy style */
rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
@@ -191,7 +194,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
- cFYI(1, ("cifs_create returned 0x%x ", rc));
+ cFYI(1, ("cifs_create returned 0x%x", rc));
} else {
/* If Open reported that we actually created a file
then we now have to set the mode if possible */
@@ -369,6 +372,10 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ /* BB FIXME - add handling for backlevel servers
+ which need legacy open and check for all
+ calls to SMBOpen for fallback to
+ SMBLeagcyOpen */
if(!rc) {
/* BB Do not bother to decode buf since no
local inode yet to put timestamps in,
diff --git a/fs/cifs/fcntl.c b/fs/cifs/fcntl.c
index 633a938113287f..d91a3d44e9e30c 100644
--- a/fs/cifs/fcntl.c
+++ b/fs/cifs/fcntl.c
@@ -91,14 +91,14 @@ int cifs_dir_notify(struct file * file, unsigned long arg)
if(full_path == NULL) {
rc = -ENOMEM;
} else {
- cERROR(1,("cifs dir notify on file %s with arg 0x%lx",full_path,arg)); /* BB removeme BB */
+ cFYI(1,("dir notify on file %s Arg 0x%lx",full_path,arg));
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_OPEN,
GENERIC_READ | SYNCHRONIZE, 0 /* create options */,
&netfid, &oplock,NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
/* BB fixme - add this handle to a notify handle list */
if(rc) {
- cERROR(1,("Could not open directory for notify")); /* BB remove BB */
+ cFYI(1,("Could not open directory for notify"));
} else {
filter = convert_to_cifs_notify_flags(arg);
if(filter != 0) {
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index b4a18c1cab0a87..e9c1573f6aa739 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -110,7 +110,6 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file,
&pCifsInode->openFileList);
}
write_unlock(&GlobalSMBSeslock);
- write_unlock(&file->f_owner.lock);
if (pCifsInode->clientCanCacheRead) {
/* we have the inode open somewhere else
no need to discard cache data */
@@ -201,7 +200,7 @@ int cifs_open(struct inode *inode, struct file *file)
} else {
if (file->f_flags & O_EXCL)
cERROR(1, ("could not find file instance for "
- "new file %p ", file));
+ "new file %p", file));
}
}
@@ -260,10 +259,15 @@ int cifs_open(struct inode *inode, struct file *file)
rc = -ENOMEM;
goto out;
}
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition, desiredAccess,
- CREATE_NOT_DIR, &netfid, &oplock, buf,
+
+ if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
+ rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+ desiredAccess, CREATE_NOT_DIR, &netfid, &oplock, buf,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR);
+ else
+ rc = -EIO; /* no NT SMB support fall into legacy open below */
+
if (rc == -EIO) {
/* Old server, try legacy style OpenX */
rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
@@ -272,7 +276,7 @@ int cifs_open(struct inode *inode, struct file *file)
& CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (rc) {
- cFYI(1, ("cifs_open returned 0x%x ", rc));
+ cFYI(1, ("cifs_open returned 0x%x", rc));
goto out;
}
file->private_data =
@@ -282,7 +286,6 @@ int cifs_open(struct inode *inode, struct file *file)
goto out;
}
pCifsFile = cifs_init_private(file->private_data, inode, file, netfid);
- write_lock(&file->f_owner.lock);
write_lock(&GlobalSMBSeslock);
list_add(&pCifsFile->tlist, &pTcon->openFileList);
@@ -293,7 +296,6 @@ int cifs_open(struct inode *inode, struct file *file)
&oplock, buf, full_path, xid);
} else {
write_unlock(&GlobalSMBSeslock);
- write_unlock(&file->f_owner.lock);
}
if (oplock & CIFS_CREATE_ACTION) {
@@ -409,8 +411,8 @@ static int cifs_reopen_file(struct inode *inode, struct file *file,
CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
up(&pCifsFile->fh_sem);
- cFYI(1, ("cifs_open returned 0x%x ", rc));
- cFYI(1, ("oplock: %d ", oplock));
+ cFYI(1, ("cifs_open returned 0x%x", rc));
+ cFYI(1, ("oplock: %d", oplock));
} else {
pCifsFile->netfid = netfid;
pCifsFile->invalidHandle = FALSE;
@@ -472,7 +474,6 @@ int cifs_close(struct inode *inode, struct file *file)
pTcon = cifs_sb->tcon;
if (pSMBFile) {
pSMBFile->closePend = TRUE;
- write_lock(&file->f_owner.lock);
if (pTcon) {
/* no sense reconnecting to close a file that is
already closed */
@@ -487,23 +488,18 @@ int cifs_close(struct inode *inode, struct file *file)
the struct would be in each open file,
but this should give enough time to
clear the socket */
- write_unlock(&file->f_owner.lock);
cERROR(1,("close with pending writes"));
msleep(timeout);
- write_lock(&file->f_owner.lock);
timeout *= 4;
}
- write_unlock(&file->f_owner.lock);
rc = CIFSSMBClose(xid, pTcon,
pSMBFile->netfid);
- write_lock(&file->f_owner.lock);
}
}
write_lock(&GlobalSMBSeslock);
list_del(&pSMBFile->flist);
list_del(&pSMBFile->tlist);
write_unlock(&GlobalSMBSeslock);
- write_unlock(&file->f_owner.lock);
kfree(pSMBFile->search_resume_name);
kfree(file->private_data);
file->private_data = NULL;
@@ -531,7 +527,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
(struct cifsFileInfo *)file->private_data;
char *ptmp;
- cFYI(1, ("Closedir inode = 0x%p with ", inode));
+ cFYI(1, ("Closedir inode = 0x%p", inode));
xid = GetXid();
@@ -605,7 +601,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
}
if (pfLock->fl_flags & FL_ACCESS)
cFYI(1, ("Process suspended by mandatory locking - "
- "not implemented yet "));
+ "not implemented yet"));
if (pfLock->fl_flags & FL_LEASE)
cFYI(1, ("Lease on file - not implemented yet"));
if (pfLock->fl_flags &
@@ -1375,7 +1371,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
xid = GetXid();
- cFYI(1, ("Sync file - name: %s datasync: 0x%x ",
+ cFYI(1, ("Sync file - name: %s datasync: 0x%x",
dentry->d_name.name, datasync));
rc = filemap_fdatawrite(inode->i_mapping);
@@ -1404,7 +1400,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync)
/* fill in rpages then
result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */
-/* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index));
+/* cFYI(1, ("rpages is %d for sync page of Index %ld", rpages, index));
#if 0
if (rc < 0)
@@ -1836,7 +1832,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page,
if (rc < 0)
goto io_error;
else
- cFYI(1, ("Bytes read %d ",rc));
+ cFYI(1, ("Bytes read %d",rc));
file->f_dentry->d_inode->i_atime =
current_fs_time(file->f_dentry->d_inode->i_sb);
@@ -1957,3 +1953,19 @@ struct address_space_operations cifs_addr_ops = {
/* .sync_page = cifs_sync_page, */
/* .direct_IO = */
};
+
+/*
+ * cifs_readpages requires the server to support a buffer large enough to
+ * contain the header plus one complete page of data. Otherwise, we need
+ * to leave cifs_readpages out of the address space operations.
+ */
+struct address_space_operations cifs_addr_ops_smallbuf = {
+ .readpage = cifs_readpage,
+ .writepage = cifs_writepage,
+ .writepages = cifs_writepages,
+ .prepare_write = cifs_prepare_write,
+ .commit_write = cifs_commit_write,
+ .set_page_dirty = __set_page_dirty_nobuffers,
+ /* .sync_page = cifs_sync_page, */
+ /* .direct_IO = */
+};
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 4093764ef461e1..b88147c1dc27f0 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -41,7 +41,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
char *tmp_path;
pTcon = cifs_sb->tcon;
- cFYI(1, ("Getting info on %s ", search_path));
+ cFYI(1, ("Getting info on %s", search_path));
/* could have done a find first instead but this returns more info */
rc = CIFSSMBUnixQPathInfo(xid, pTcon, search_path, &findData,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
@@ -97,9 +97,9 @@ int cifs_get_inode_info_unix(struct inode **pinode,
inode = *pinode;
cifsInfo = CIFS_I(inode);
- cFYI(1, ("Old time %ld ", cifsInfo->time));
+ cFYI(1, ("Old time %ld", cifsInfo->time));
cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld ", cifsInfo->time));
+ cFYI(1, ("New time %ld", cifsInfo->time));
/* this is ok to set on every inode revalidate */
atomic_set(&cifsInfo->inUse,1);
@@ -180,11 +180,12 @@ int cifs_get_inode_info_unix(struct inode **pinode,
else /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
- inode->i_data.a_ops = &cifs_addr_ops;
/* check if server can support readpages */
if(pTcon->ses->server->maxBuf <
- 4096 + MAX_CIFS_HDR_SIZE)
- inode->i_data.a_ops->readpages = NULL;
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+ inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ inode->i_data.a_ops = &cifs_addr_ops;
} else if (S_ISDIR(inode->i_mode)) {
cFYI(1, ("Directory inode"));
inode->i_op = &cifs_dir_inode_ops;
@@ -421,23 +422,23 @@ int cifs_get_inode_info(struct inode **pinode,
inode = *pinode;
cifsInfo = CIFS_I(inode);
cifsInfo->cifsAttrs = attr;
- cFYI(1, ("Old time %ld ", cifsInfo->time));
+ cFYI(1, ("Old time %ld", cifsInfo->time));
cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld ", cifsInfo->time));
+ cFYI(1, ("New time %ld", cifsInfo->time));
/* blksize needs to be multiple of two. So safer to default to
blksize and blkbits set in superblock so 2**blkbits and blksize
will match rather than setting to:
(pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
- /* Linux can not store file creation time unfortunately so we ignore it */
+ /* Linux can not store file creation time so ignore it */
inode->i_atime =
cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
inode->i_mtime =
cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
inode->i_ctime =
cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
- cFYI(0, ("Attributes came in as 0x%x ", attr));
+ cFYI(0, ("Attributes came in as 0x%x", attr));
/* set default mode. will override for dirs below */
if (atomic_read(&cifsInfo->inUse) == 0)
@@ -519,10 +520,11 @@ int cifs_get_inode_info(struct inode **pinode,
else /* not direct, send byte range locks */
inode->i_fop = &cifs_file_ops;
- inode->i_data.a_ops = &cifs_addr_ops;
if(pTcon->ses->server->maxBuf <
- 4096 + MAX_CIFS_HDR_SIZE)
- inode->i_data.a_ops->readpages = NULL;
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE)
+ inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ inode->i_data.a_ops = &cifs_addr_ops;
} else if (S_ISDIR(inode->i_mode)) {
cFYI(1, ("Directory inode"));
inode->i_op = &cifs_dir_inode_ops;
@@ -731,7 +733,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode)
rc = CIFSSMBMkDir(xid, pTcon, full_path, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) {
- cFYI(1, ("cifs_mkdir returned 0x%x ", rc));
+ cFYI(1, ("cifs_mkdir returned 0x%x", rc));
d_drop(direntry);
} else {
inode->i_nlink++;
@@ -798,7 +800,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
char *full_path = NULL;
struct cifsInodeInfo *cifsInode;
- cFYI(1, ("cifs_rmdir, inode = 0x%p with ", inode));
+ cFYI(1, ("cifs_rmdir, inode = 0x%p", inode));
xid = GetXid();
@@ -1121,7 +1123,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
xid = GetXid();
- cFYI(1, ("In cifs_setattr, name = %s attrs->iavalid 0x%x ",
+ cFYI(1, ("setattr on file %s attrs->iavalid 0x%x",
direntry->d_name.name, attrs->ia_valid));
cifs_sb = CIFS_SB(direntry->d_inode->i_sb);
@@ -1157,6 +1159,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
when the local oplock break takes longer to flush
writebehind data than the SMB timeout for the SetPathInfo
request would allow */
+
open_file = find_writable_file(cifsInode);
if (open_file) {
__u16 nfid = open_file->netfid;
@@ -1289,7 +1292,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
it may be useful to Windows - but we do
not want to set ctime unless some other
timestamp is changing */
- cFYI(1, ("CIFS - CTIME changed "));
+ cFYI(1, ("CIFS - CTIME changed"));
time_buf.ChangeTime =
cpu_to_le64(cifs_UnixTimeToNT(attrs->ia_ctime));
} else
@@ -1356,7 +1359,7 @@ cifs_setattr_exit:
void cifs_delete_inode(struct inode *inode)
{
- cFYI(1, ("In cifs_delete_inode, inode = 0x%p ", inode));
+ cFYI(1, ("In cifs_delete_inode, inode = 0x%p", inode));
/* may have to add back in if and when safe distributed caching of
directories added e.g. via FindNotify */
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 2ec99f8331422a..a57f5d6e6213d6 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -167,7 +167,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
return -ENOMEM;
}
- cFYI(1, ("Full path: %s ", full_path));
+ cFYI(1, ("Full path: %s", full_path));
cFYI(1, ("symname is %s", symname));
/* BB what if DFS and this volume is on different share? BB */
@@ -186,8 +186,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
inode->i_sb,xid);
if (rc != 0) {
- cFYI(1,
- ("Create symlink worked but get_inode_info failed with rc = %d ",
+ cFYI(1, ("Create symlink ok, getinodeinfo fail rc = %d",
rc));
} else {
if (pTcon->nocase)
@@ -289,7 +288,7 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
else {
cFYI(1,("num referral: %d",num_referrals));
if(referrals) {
- cFYI(1,("referral string: %s ",referrals));
+ cFYI(1,("referral string: %s",referrals));
strncpy(tmpbuffer, referrals, len-1);
}
}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index fafd056426e4b1..22c937e5884f36 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -101,6 +101,7 @@ sesInfoFree(struct cifsSesInfo *buf_to_free)
kfree(buf_to_free->serverDomain);
kfree(buf_to_free->serverNOS);
kfree(buf_to_free->password);
+ kfree(buf_to_free->domainName);
kfree(buf_to_free);
}
@@ -499,11 +500,12 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
if(pSMBr->ByteCount > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
- pnotify = (struct file_notify_information *)((char *)&pSMBr->hdr.Protocol
- + data_offset);
- cFYI(1,("dnotify on %s with action: 0x%x",pnotify->FileName,
+ pnotify = (struct file_notify_information *)
+ ((char *)&pSMBr->hdr.Protocol + data_offset);
+ cFYI(1,("dnotify on %s Action: 0x%x",pnotify->FileName,
pnotify->Action)); /* BB removeme BB */
- /* cifs_dump_mem("Received notify Data is: ",buf,sizeof(struct smb_hdr)+60); */
+ /* cifs_dump_mem("Rcvd notify Data: ",buf,
+ sizeof(struct smb_hdr)+60); */
return TRUE;
}
if(pSMBr->hdr.Status.CifsError) {
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 5de74d216fdd75..b66eff5dc62445 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -84,11 +84,11 @@ static const struct smb_to_posix_error mapping_table_ERRDOS[] = {
static const struct smb_to_posix_error mapping_table_ERRSRV[] = {
{ERRerror, -EIO},
- {ERRbadpw, -EPERM},
+ {ERRbadpw, -EACCES}, /* was EPERM */
{ERRbadtype, -EREMOTE},
{ERRaccess, -EACCES},
{ERRinvtid, -ENXIO},
- {ERRinvnetname, -ENODEV},
+ {ERRinvnetname, -ENXIO},
{ERRinvdevice, -ENXIO},
{ERRqfull, -ENOSPC},
{ERRqtoobig, -ENOSPC},
diff --git a/fs/cifs/ntlmssp.c b/fs/cifs/ntlmssp.c
deleted file mode 100644
index 115359cc7a32ce..00000000000000
--- a/fs/cifs/ntlmssp.c
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * fs/cifs/ntlmssp.h
- *
- * Copyright (c) International Business Machines Corp., 2006
- * Author(s): Steve French (sfrench@us.ibm.com)
- *
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
- * the GNU Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "cifspdu.h"
-#include "cifsglob.h"
-#include "cifsproto.h"
-#include "cifs_unicode.h"
-#include "cifs_debug.h"
-#include "ntlmssp.h"
-#include "nterr.h"
-
-#ifdef CONFIG_CIFS_EXPERIMENTAL
-static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
-{
- __u32 capabilities = 0;
-
- /* init fields common to all four types of SessSetup */
- /* note that header is initialized to zero in header_assemble */
- pSMB->req.AndXCommand = 0xFF;
- pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
- pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
-
- /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
-
- /* BB verify whether signing required on neg or just on auth frame
- (and NTLM case) */
-
- capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
- CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
-
- if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
- pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
-
- if (ses->capabilities & CAP_UNICODE) {
- pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
- capabilities |= CAP_UNICODE;
- }
- if (ses->capabilities & CAP_STATUS32) {
- pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
- capabilities |= CAP_STATUS32;
- }
- if (ses->capabilities & CAP_DFS) {
- pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
- capabilities |= CAP_DFS;
- }
-
- /* BB check whether to init vcnum BB */
- return capabilities;
-}
-int
-CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, const int type,
- int * pNTLMv2_flg, const struct nls_table *nls_cp)
-{
- int rc = 0;
- int wct;
- struct smb_hdr *smb_buffer;
- char *bcc_ptr;
- SESSION_SETUP_ANDX *pSMB;
- __u32 capabilities;
-
- if(ses == NULL)
- return -EINVAL;
-
- cFYI(1,("SStp type: %d",type));
- if(type < CIFS_NTLM) {
-#ifndef CONFIG_CIFS_WEAK_PW_HASH
- /* LANMAN and plaintext are less secure and off by default.
- So we make this explicitly be turned on in kconfig (in the
- build) and turned on at runtime (changed from the default)
- in proc/fs/cifs or via mount parm. Unfortunately this is
- needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
- return -EOPNOTSUPP;
-#endif
- wct = 10; /* lanman 2 style sessionsetup */
- } else if(type < CIFS_NTLMSSP_NEG)
- wct = 13; /* old style NTLM sessionsetup */
- else /* same size for negotiate or auth, NTLMSSP or extended security */
- wct = 12;
-
- rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
- (void **)&smb_buffer);
- if(rc)
- return rc;
-
- pSMB = (SESSION_SETUP_ANDX *)smb_buffer;
-
- capabilities = cifs_ssetup_hdr(ses, pSMB);
- bcc_ptr = pByteArea(smb_buffer);
- if(type > CIFS_NTLM) {
- pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
- capabilities |= CAP_EXTENDED_SECURITY;
- pSMB->req.Capabilities = cpu_to_le32(capabilities);
- /* BB set password lengths */
- } else if(type < CIFS_NTLM) /* lanman */ {
- /* no capabilities flags in old lanman negotiation */
- /* pSMB->old_req.PasswordLength = */ /* BB fixme BB */
- } else /* type CIFS_NTLM */ {
- pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
- pSMB->req_no_secext.CaseInsensitivePasswordLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
- pSMB->req_no_secext.CaseSensitivePasswordLength =
- cpu_to_le16(CIFS_SESSION_KEY_SIZE);
- }
-
-
- /* copy session key */
-
- /* if Unicode, align strings to two byte boundary */
-
- /* copy user name */ /* BB Do we need to special case null user name? */
-
- /* copy domain name */
-
- /* copy Linux version */
-
- /* copy network operating system name */
-
- /* update bcc and smb buffer length */
-
-/* rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buf_type, 0); */
- /* SMB request buf freed in SendReceive2 */
-
- return rc;
-}
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index b689c5035124d1..03bbcb3779136b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -21,6 +21,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/smp_lock.h>
#include "cifspdu.h"
@@ -31,8 +32,8 @@
#include "cifs_fs_sb.h"
#include "cifsfs.h"
-/* BB fixme - add debug wrappers around this function to disable it fixme BB */
-/* static void dump_cifs_file_struct(struct file *file, char *label)
+#ifdef CONFIG_CIFS_DEBUG2
+static void dump_cifs_file_struct(struct file *file, char *label)
{
struct cifsFileInfo * cf;
@@ -53,7 +54,8 @@
}
}
-} */
+}
+#endif /* DEBUG2 */
/* Returns one if new inode created (which therefore needs to be hashed) */
/* Might check in the future if inode number changed so we can rehash inode */
@@ -107,32 +109,52 @@ static int construct_dentry(struct qstr *qstring, struct file *file,
return rc;
}
-static void fill_in_inode(struct inode *tmp_inode,
- FILE_DIRECTORY_INFO *pfindData, int *pobject_type, int isNewInode)
+static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
+ char * buf, int *pobject_type, int isNewInode)
{
loff_t local_size;
struct timespec local_mtime;
struct cifsInodeInfo *cifsInfo = CIFS_I(tmp_inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(tmp_inode->i_sb);
- __u32 attr = le32_to_cpu(pfindData->ExtFileAttributes);
- __u64 allocation_size = le64_to_cpu(pfindData->AllocationSize);
- __u64 end_of_file = le64_to_cpu(pfindData->EndOfFile);
-
- cifsInfo->cifsAttrs = attr;
- cifsInfo->time = jiffies;
+ __u32 attr;
+ __u64 allocation_size;
+ __u64 end_of_file;
/* save mtime and size */
local_mtime = tmp_inode->i_mtime;
local_size = tmp_inode->i_size;
+ if(new_buf_type) {
+ FILE_DIRECTORY_INFO *pfindData = (FILE_DIRECTORY_INFO *)buf;
+
+ attr = le32_to_cpu(pfindData->ExtFileAttributes);
+ allocation_size = le64_to_cpu(pfindData->AllocationSize);
+ end_of_file = le64_to_cpu(pfindData->EndOfFile);
+ tmp_inode->i_atime =
+ cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
+ tmp_inode->i_mtime =
+ cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
+ tmp_inode->i_ctime =
+ cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
+ } else { /* legacy, OS2 and DOS style */
+ FIND_FILE_STANDARD_INFO * pfindData =
+ (FIND_FILE_STANDARD_INFO *)buf;
+
+ attr = le16_to_cpu(pfindData->Attributes);
+ allocation_size = le32_to_cpu(pfindData->AllocationSize);
+ end_of_file = le32_to_cpu(pfindData->DataSize);
+ tmp_inode->i_atime = CURRENT_TIME;
+ /* tmp_inode->i_mtime = BB FIXME - add dos time handling
+ tmp_inode->i_ctime = 0; BB FIXME */
+
+ }
+
/* Linux can not store file creation time unfortunately so ignore it */
- tmp_inode->i_atime =
- cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastAccessTime));
- tmp_inode->i_mtime =
- cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
- tmp_inode->i_ctime =
- cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
+
+ cifsInfo->cifsAttrs = attr;
+ cifsInfo->time = jiffies;
+
/* treat dos attribute of read-only as read-only mode bit e.g. 555? */
/* 2767 perms - indicate mandatory locking */
/* BB fill in uid and gid here? with help from winbind?
@@ -215,11 +237,13 @@ static void fill_in_inode(struct inode *tmp_inode,
else
tmp_inode->i_fop = &cifs_file_ops;
- tmp_inode->i_data.a_ops = &cifs_addr_ops;
if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
(cifs_sb->tcon->ses->server->maxBuf <
- 4096 + MAX_CIFS_HDR_SIZE))
- tmp_inode->i_data.a_ops->readpages = NULL;
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
+ tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ tmp_inode->i_data.a_ops = &cifs_addr_ops;
+
if(isNewInode)
return; /* No sense invalidating pages for new inode
since have not started caching readahead file
@@ -338,11 +362,12 @@ static void unix_fill_in_inode(struct inode *tmp_inode,
else
tmp_inode->i_fop = &cifs_file_ops;
- tmp_inode->i_data.a_ops = &cifs_addr_ops;
if((cifs_sb->tcon) && (cifs_sb->tcon->ses) &&
(cifs_sb->tcon->ses->server->maxBuf <
- 4096 + MAX_CIFS_HDR_SIZE))
- tmp_inode->i_data.a_ops->readpages = NULL;
+ PAGE_CACHE_SIZE + MAX_CIFS_HDR_SIZE))
+ tmp_inode->i_data.a_ops = &cifs_addr_ops_smallbuf;
+ else
+ tmp_inode->i_data.a_ops = &cifs_addr_ops;
if(isNewInode)
return; /* No sense invalidating pages for new inode since we
@@ -415,7 +440,10 @@ static int initiate_cifs_search(const int xid, struct file *file)
ffirst_retry:
/* test for Unix extensions */
if (pTcon->ses->capabilities & CAP_UNIX) {
- cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_UNIX;
+ } else if ((pTcon->ses->capabilities &
+ (CAP_NT_SMBS | CAP_NT_FIND)) == 0) {
+ cifsFile->srch_inf.info_level = SMB_FIND_FILE_INFO_STANDARD;
} else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifsFile->srch_inf.info_level = SMB_FIND_FILE_ID_FULL_DIR_INFO;
} else /* not srvinos - BB fixme add check for backlevel? */ {
@@ -451,12 +479,19 @@ static int cifs_unicode_bytelen(char *str)
return len << 1;
}
-static char *nxt_dir_entry(char *old_entry, char *end_of_smb)
+static char *nxt_dir_entry(char *old_entry, char *end_of_smb, int level)
{
char * new_entry;
FILE_DIRECTORY_INFO * pDirInfo = (FILE_DIRECTORY_INFO *)old_entry;
- new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
+ if(level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO * pfData;
+ pfData = (FIND_FILE_STANDARD_INFO *)pDirInfo;
+
+ new_entry = old_entry + sizeof(FIND_FILE_STANDARD_INFO) +
+ pfData->FileNameLength;
+ } else
+ new_entry = old_entry + le32_to_cpu(pDirInfo->NextEntryOffset);
cFYI(1,("new entry %p old entry %p",new_entry,old_entry));
/* validate that new_entry is not past end of SMB */
if(new_entry >= end_of_smb) {
@@ -464,7 +499,10 @@ static char *nxt_dir_entry(char *old_entry, char *end_of_smb)
("search entry %p began after end of SMB %p old entry %p",
new_entry, end_of_smb, old_entry));
return NULL;
- } else if (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb) {
+ } else if(((level == SMB_FIND_FILE_INFO_STANDARD) &&
+ (new_entry + sizeof(FIND_FILE_STANDARD_INFO) > end_of_smb)) ||
+ ((level != SMB_FIND_FILE_INFO_STANDARD) &&
+ (new_entry + sizeof(FILE_DIRECTORY_INFO) > end_of_smb))) {
cERROR(1,("search entry %p extends after end of SMB %p",
new_entry, end_of_smb));
return NULL;
@@ -482,7 +520,7 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
char * filename = NULL;
int len = 0;
- if(cfile->srch_inf.info_level == 0x202) {
+ if(cfile->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
FILE_UNIX_INFO * pFindData = (FILE_UNIX_INFO *)current_entry;
filename = &pFindData->FileName[0];
if(cfile->srch_inf.unicode) {
@@ -491,26 +529,34 @@ static int cifs_entry_is_dot(char *current_entry, struct cifsFileInfo *cfile)
/* BB should we make this strnlen of PATH_MAX? */
len = strnlen(filename, 5);
}
- } else if(cfile->srch_inf.info_level == 0x101) {
+ } else if(cfile->srch_inf.info_level == SMB_FIND_FILE_DIRECTORY_INFO) {
FILE_DIRECTORY_INFO * pFindData =
(FILE_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(cfile->srch_inf.info_level == 0x102) {
+ } else if(cfile->srch_inf.info_level ==
+ SMB_FIND_FILE_FULL_DIRECTORY_INFO) {
FILE_FULL_DIRECTORY_INFO * pFindData =
(FILE_FULL_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(cfile->srch_inf.info_level == 0x105) {
+ } else if(cfile->srch_inf.info_level ==
+ SMB_FIND_FILE_ID_FULL_DIR_INFO) {
SEARCH_ID_FULL_DIR_INFO * pFindData =
(SEARCH_ID_FULL_DIR_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
- } else if(cfile->srch_inf.info_level == 0x104) {
+ } else if(cfile->srch_inf.info_level ==
+ SMB_FIND_FILE_BOTH_DIRECTORY_INFO) {
FILE_BOTH_DIRECTORY_INFO * pFindData =
(FILE_BOTH_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
+ } else if(cfile->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO * pFindData =
+ (FIND_FILE_STANDARD_INFO *)current_entry;
+ filename = &pFindData->FileName[0];
+ len = le32_to_cpu(pFindData->FileNameLength);
} else {
cFYI(1,("Unknown findfirst level %d",cfile->srch_inf.info_level));
}
@@ -597,7 +643,9 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
. and .. for the root of a drive and for those we need
to start two entries earlier */
-/* dump_cifs_file_struct(file, "In fce ");*/
+#ifdef CONFIG_CIFS_DEBUG2
+ dump_cifs_file_struct(file, "In fce ");
+#endif
if(((index_to_find < cifsFile->srch_inf.index_of_last_entry) &&
is_dir_changed(file)) ||
(index_to_find < first_entry_in_buffer)) {
@@ -644,10 +692,12 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
first_entry_in_buffer = cifsFile->srch_inf.index_of_last_entry
- cifsFile->srch_inf.entries_in_buffer;
pos_in_buf = index_to_find - first_entry_in_buffer;
- cFYI(1,("found entry - pos_in_buf %d",pos_in_buf));
+ cFYI(1,("found entry - pos_in_buf %d",pos_in_buf));
+
for(i=0;(i<(pos_in_buf)) && (current_entry != NULL);i++) {
/* go entry by entry figuring out which is first */
- current_entry = nxt_dir_entry(current_entry,end_of_smb);
+ current_entry = nxt_dir_entry(current_entry,end_of_smb,
+ cifsFile->srch_inf.info_level);
}
if((current_entry == NULL) && (i < pos_in_buf)) {
/* BB fixme - check if we should flag this error */
@@ -674,7 +724,7 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
/* inode num, inode type and filename returned */
static int cifs_get_name_from_search_buf(struct qstr *pqst,
char *current_entry, __u16 level, unsigned int unicode,
- struct cifs_sb_info * cifs_sb, ino_t *pinum)
+ struct cifs_sb_info * cifs_sb, int max_len, ino_t *pinum)
{
int rc = 0;
unsigned int len = 0;
@@ -718,10 +768,22 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
(FILE_BOTH_DIRECTORY_INFO *)current_entry;
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
+ } else if(level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO * pFindData =
+ (FIND_FILE_STANDARD_INFO *)current_entry;
+ filename = &pFindData->FileName[0];
+ /* one byte length, no name conversion */
+ len = (unsigned int)pFindData->FileNameLength;
} else {
cFYI(1,("Unknown findfirst level %d",level));
return -EINVAL;
}
+
+ if(len > max_len) {
+ cERROR(1,("bad search response length %d past smb end", len));
+ return -EINVAL;
+ }
+
if(unicode) {
/* BB fixme - test with long names */
/* Note converted filename can be longer than in unicode */
@@ -741,7 +803,7 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
}
static int cifs_filldir(char *pfindEntry, struct file *file,
- filldir_t filldir, void *direntry, char *scratch_buf)
+ filldir_t filldir, void *direntry, char *scratch_buf, int max_len)
{
int rc = 0;
struct qstr qstring;
@@ -777,6 +839,7 @@ static int cifs_filldir(char *pfindEntry, struct file *file,
rc = cifs_get_name_from_search_buf(&qstring,pfindEntry,
pCifsF->srch_inf.info_level,
pCifsF->srch_inf.unicode,cifs_sb,
+ max_len,
&inum /* returned */);
if(rc)
@@ -798,13 +861,16 @@ static int cifs_filldir(char *pfindEntry, struct file *file,
/* we pass in rc below, indicating whether it is a new inode,
so we can figure out whether to invalidate the inode cached
data if the file has changed */
- if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX) {
+ if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_UNIX)
unix_fill_in_inode(tmp_inode,
- (FILE_UNIX_INFO *)pfindEntry,&obj_type, rc);
- } else {
- fill_in_inode(tmp_inode,
- (FILE_DIRECTORY_INFO *)pfindEntry,&obj_type, rc);
- }
+ (FILE_UNIX_INFO *)pfindEntry,
+ &obj_type, rc);
+ else if(pCifsF->srch_inf.info_level == SMB_FIND_FILE_INFO_STANDARD)
+ fill_in_inode(tmp_inode, 0 /* old level 1 buffer type */,
+ pfindEntry, &obj_type, rc);
+ else
+ fill_in_inode(tmp_inode, 1 /* NT */, pfindEntry, &obj_type, rc);
+
rc = filldir(direntry,qstring.name,qstring.len,file->f_pos,
tmp_inode->i_ino,obj_type);
@@ -864,6 +930,12 @@ static int cifs_save_resume_key(const char *current_entry,
filename = &pFindData->FileName[0];
len = le32_to_cpu(pFindData->FileNameLength);
cifsFile->srch_inf.resume_key = pFindData->FileIndex;
+ } else if(level == SMB_FIND_FILE_INFO_STANDARD) {
+ FIND_FILE_STANDARD_INFO * pFindData =
+ (FIND_FILE_STANDARD_INFO *)current_entry;
+ filename = &pFindData->FileName[0];
+ /* one byte length, no name conversion */
+ len = (unsigned int)pFindData->FileNameLength;
} else {
cFYI(1,("Unknown findfirst level %d",level));
return -EINVAL;
@@ -884,6 +956,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
int num_to_fill = 0;
char * tmp_buf = NULL;
char * end_of_smb;
+ int max_len;
xid = GetXid();
@@ -909,7 +982,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
case 1:
if (filldir(direntry, "..", 2, file->f_pos,
file->f_dentry->d_parent->d_inode->i_ino, DT_DIR) < 0) {
- cERROR(1, ("Filldir for parent dir failed "));
+ cERROR(1, ("Filldir for parent dir failed"));
rc = -ENOMEM;
break;
}
@@ -959,10 +1032,11 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
goto rddir2_exit;
}
cFYI(1,("loop through %d times filling dir for net buf %p",
- num_to_fill,cifsFile->srch_inf.ntwrk_buf_start));
- end_of_smb = cifsFile->srch_inf.ntwrk_buf_start +
- smbCalcSize((struct smb_hdr *)
- cifsFile->srch_inf.ntwrk_buf_start);
+ num_to_fill,cifsFile->srch_inf.ntwrk_buf_start));
+ max_len = smbCalcSize((struct smb_hdr *)
+ cifsFile->srch_inf.ntwrk_buf_start);
+ end_of_smb = cifsFile->srch_inf.ntwrk_buf_start + max_len;
+
/* To be safe - for UCS to UTF-8 with strings loaded
with the rare long characters alloc more to account for
such multibyte target UTF-8 characters. cifs_unicode.c,
@@ -977,17 +1051,19 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
}
/* if buggy server returns . and .. late do
we want to check for that here? */
- rc = cifs_filldir(current_entry, file,
- filldir, direntry,tmp_buf);
+ rc = cifs_filldir(current_entry, file,
+ filldir, direntry, tmp_buf, max_len);
file->f_pos++;
- if(file->f_pos == cifsFile->srch_inf.index_of_last_entry) {
+ if(file->f_pos ==
+ cifsFile->srch_inf.index_of_last_entry) {
cFYI(1,("last entry in buf at pos %lld %s",
- file->f_pos,tmp_buf)); /* BB removeme BB */
+ file->f_pos,tmp_buf));
cifs_save_resume_key(current_entry,cifsFile);
break;
} else
- current_entry = nxt_dir_entry(current_entry,
- end_of_smb);
+ current_entry =
+ nxt_dir_entry(current_entry, end_of_smb,
+ cifsFile->srch_inf.info_level);
}
kfree(tmp_buf);
break;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
new file mode 100644
index 00000000000000..7202d534ef0bde
--- /dev/null
+++ b/fs/cifs/sess.c
@@ -0,0 +1,538 @@
+/*
+ * fs/cifs/sess.c
+ *
+ * SMB/CIFS session setup handling routines
+ *
+ * Copyright (c) International Business Machines Corp., 2006
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_unicode.h"
+#include "cifs_debug.h"
+#include "ntlmssp.h"
+#include "nterr.h"
+#include <linux/utsname.h>
+
+extern void SMBNTencrypt(unsigned char *passwd, unsigned char *c8,
+ unsigned char *p24);
+
+static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
+{
+ __u32 capabilities = 0;
+
+ /* init fields common to all four types of SessSetup */
+ /* note that header is initialized to zero in header_assemble */
+ pSMB->req.AndXCommand = 0xFF;
+ pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
+ pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
+
+ /* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
+
+ /* BB verify whether signing required on neg or just on auth frame
+ (and NTLM case) */
+
+ capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
+ CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
+
+ if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
+ pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+ if (ses->capabilities & CAP_UNICODE) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
+ capabilities |= CAP_UNICODE;
+ }
+ if (ses->capabilities & CAP_STATUS32) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
+ capabilities |= CAP_STATUS32;
+ }
+ if (ses->capabilities & CAP_DFS) {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
+ capabilities |= CAP_DFS;
+ }
+ if (ses->capabilities & CAP_UNIX) {
+ capabilities |= CAP_UNIX;
+ }
+
+ /* BB check whether to init vcnum BB */
+ return capabilities;
+}
+
+static void unicode_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ char * bcc_ptr = *pbcc_area;
+ int bytes_ret = 0;
+
+ /* BB FIXME add check that strings total less
+ than 335 or will need to send them as arrays */
+
+ /* unicode strings, must be word aligned before the call */
+/* if ((long) bcc_ptr % 2) {
+ *bcc_ptr = 0;
+ bcc_ptr++;
+ } */
+ /* copy user */
+ if(ses->userName == NULL) {
+ /* BB what about null user mounts - check that we do this BB */
+ } else { /* 300 should be long enough for any conceivable user name */
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
+ 300, nls_cp);
+ }
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* account for null termination */
+ /* copy domain */
+ if(ses->domainName == NULL)
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr,
+ "CIFS_LINUX_DOM", 32, nls_cp);
+ else
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
+ 256, nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* account for null terminator */
+
+ /* Copy OS version */
+ bytes_ret = cifs_strtoUCS((__le16 *)bcc_ptr, "Linux version ", 32,
+ nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, system_utsname.release,
+ 32, nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* trailing null */
+
+ bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
+ 32, nls_cp);
+ bcc_ptr += 2 * bytes_ret;
+ bcc_ptr += 2; /* trailing null */
+
+ *pbcc_area = bcc_ptr;
+}
+
+static void ascii_ssetup_strings(char ** pbcc_area, struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ char * bcc_ptr = *pbcc_area;
+
+ /* copy user */
+ /* BB what about null user mounts - check that we do this BB */
+ /* copy user */
+ if(ses->userName == NULL) {
+ /* BB what about null user mounts - check that we do this BB */
+ } else { /* 300 should be long enough for any conceivable user name */
+ strncpy(bcc_ptr, ses->userName, 300);
+ }
+ /* BB improve check for overflow */
+ bcc_ptr += strnlen(ses->userName, 300);
+ *bcc_ptr = 0;
+ bcc_ptr++; /* account for null termination */
+
+ /* copy domain */
+
+ if(ses->domainName == NULL) {
+ strcpy(bcc_ptr, "CIFS_LINUX_DOM");
+ bcc_ptr += 14; /* strlen(CIFS_LINUX_DOM) */
+ } else {
+ strncpy(bcc_ptr, ses->domainName, 256);
+ bcc_ptr += strnlen(ses->domainName, 256);
+ }
+ *bcc_ptr = 0;
+ bcc_ptr++;
+
+ /* BB check for overflow here */
+
+ strcpy(bcc_ptr, "Linux version ");
+ bcc_ptr += strlen("Linux version ");
+ strcpy(bcc_ptr, system_utsname.release);
+ bcc_ptr += strlen(system_utsname.release) + 1;
+
+ strcpy(bcc_ptr, CIFS_NETWORK_OPSYS);
+ bcc_ptr += strlen(CIFS_NETWORK_OPSYS) + 1;
+
+ *pbcc_area = bcc_ptr;
+}
+
+static int decode_unicode_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ int rc = 0;
+ int words_left, len;
+ char * data = *pbcc_area;
+
+
+
+ cFYI(1,("bleft %d",bleft));
+
+
+ /* word align, if bytes remaining is not even */
+ if(bleft % 2) {
+ bleft--;
+ data++;
+ }
+ words_left = bleft / 2;
+
+ /* save off server operating system */
+ len = UniStrnlen((wchar_t *) data, words_left);
+
+/* We look for obvious messed up bcc or strings in response so we do not go off
+ the end since (at least) WIN2K and Windows XP have a major bug in not null
+ terminating last Unicode string in response */
+ if(len >= words_left)
+ return rc;
+
+ if(ses->serverOS)
+ kfree(ses->serverOS);
+ /* UTF-8 string will not grow more than four times as big as UCS-16 */
+ ses->serverOS = kzalloc(4 * len, GFP_KERNEL);
+ if(ses->serverOS != NULL) {
+ cifs_strfromUCS_le(ses->serverOS, (__le16 *)data, len,
+ nls_cp);
+ }
+ data += 2 * (len + 1);
+ words_left -= len + 1;
+
+ /* save off server network operating system */
+ len = UniStrnlen((wchar_t *) data, words_left);
+
+ if(len >= words_left)
+ return rc;
+
+ if(ses->serverNOS)
+ kfree(ses->serverNOS);
+ ses->serverNOS = kzalloc(4 * len, GFP_KERNEL); /* BB this is wrong length FIXME BB */
+ if(ses->serverNOS != NULL) {
+ cifs_strfromUCS_le(ses->serverNOS, (__le16 *)data, len,
+ nls_cp);
+ if(strncmp(ses->serverNOS, "NT LAN Manager 4",16) == 0) {
+ cFYI(1,("NT4 server"));
+ ses->flags |= CIFS_SES_NT4;
+ }
+ }
+ data += 2 * (len + 1);
+ words_left -= len + 1;
+
+ /* save off server domain */
+ len = UniStrnlen((wchar_t *) data, words_left);
+
+ if(len > words_left)
+ return rc;
+
+ if(ses->serverDomain)
+ kfree(ses->serverDomain);
+ ses->serverDomain = kzalloc(2 * (len + 1), GFP_KERNEL); /* BB FIXME wrong length */
+ if(ses->serverDomain != NULL) {
+ cifs_strfromUCS_le(ses->serverDomain, (__le16 *)data, len,
+ nls_cp);
+ ses->serverDomain[2*len] = 0;
+ ses->serverDomain[(2*len) + 1] = 0;
+ }
+ data += 2 * (len + 1);
+ words_left -= len + 1;
+
+ cFYI(1,("words left: %d",words_left));
+
+ return rc;
+}
+
+static int decode_ascii_ssetup(char ** pbcc_area, int bleft, struct cifsSesInfo *ses,
+ const struct nls_table * nls_cp)
+{
+ int rc = 0;
+ int len;
+ char * bcc_ptr = *pbcc_area;
+
+ cFYI(1,("decode sessetup ascii. bleft %d", bleft));
+
+ len = strnlen(bcc_ptr, bleft);
+ if(len >= bleft)
+ return rc;
+
+ if(ses->serverOS)
+ kfree(ses->serverOS);
+
+ ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
+ if(ses->serverOS)
+ strncpy(ses->serverOS, bcc_ptr, len);
+
+ bcc_ptr += len + 1;
+ bleft -= len + 1;
+
+ len = strnlen(bcc_ptr, bleft);
+ if(len >= bleft)
+ return rc;
+
+ if(ses->serverNOS)
+ kfree(ses->serverNOS);
+
+ ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
+ if(ses->serverNOS)
+ strncpy(ses->serverNOS, bcc_ptr, len);
+
+ bcc_ptr += len + 1;
+ bleft -= len + 1;
+
+ len = strnlen(bcc_ptr, bleft);
+ if(len > bleft)
+ return rc;
+
+ if(ses->serverDomain)
+ kfree(ses->serverDomain);
+
+ ses->serverDomain = kzalloc(len + 1, GFP_KERNEL);
+ if(ses->serverOS)
+ strncpy(ses->serverOS, bcc_ptr, len);
+
+ bcc_ptr += len + 1;
+ bleft -= len + 1;
+
+ cFYI(1,("ascii: bytes left %d",bleft));
+
+ return rc;
+}
+
+int
+CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, int first_time,
+ const struct nls_table *nls_cp)
+{
+ int rc = 0;
+ int wct;
+ struct smb_hdr *smb_buf;
+ char *bcc_ptr;
+ char *str_area;
+ SESSION_SETUP_ANDX *pSMB;
+ __u32 capabilities;
+ int count;
+ int resp_buf_type = 0;
+ struct kvec iov[2];
+ enum securityEnum type;
+ __u16 action;
+ int bytes_remaining;
+
+ if(ses == NULL)
+ return -EINVAL;
+
+ type = ses->server->secType;
+
+ cFYI(1,("sess setup type %d",type));
+ if(type == LANMAN) {
+#ifndef CONFIG_CIFS_WEAK_PW_HASH
+ /* LANMAN and plaintext are less secure and off by default.
+ So we make this explicitly be turned on in kconfig (in the
+ build) and turned on at runtime (changed from the default)
+ in proc/fs/cifs or via mount parm. Unfortunately this is
+ needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
+ return -EOPNOTSUPP;
+#endif
+ wct = 10; /* lanman 2 style sessionsetup */
+ } else if((type == NTLM) || (type == NTLMv2)) {
+ /* For NTLMv2 failures eventually may need to retry NTLM */
+ wct = 13; /* old style NTLM sessionsetup */
+ } else /* same size for negotiate or auth, NTLMSSP or extended security */
+ wct = 12;
+
+ rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
+ (void **)&smb_buf);
+ if(rc)
+ return rc;
+
+ pSMB = (SESSION_SETUP_ANDX *)smb_buf;
+
+ capabilities = cifs_ssetup_hdr(ses, pSMB);
+
+ /* we will send the SMB in two pieces,
+ a fixed length beginning part, and a
+ second part which will include the strings
+ and rest of bcc area, in order to avoid having
+ to do a large buffer 17K allocation */
+ iov[0].iov_base = (char *)pSMB;
+ iov[0].iov_len = smb_buf->smb_buf_length + 4;
+
+ /* 2000 big enough to fit max user, domain, NOS name etc. */
+ str_area = kmalloc(2000, GFP_KERNEL);
+ bcc_ptr = str_area;
+
+ if(type == LANMAN) {
+#ifdef CONFIG_CIFS_WEAK_PW_HASH
+ char lnm_session_key[CIFS_SESS_KEY_SIZE];
+
+ /* no capabilities flags in old lanman negotiation */
+
+ pSMB->old_req.PasswordLength = CIFS_SESS_KEY_SIZE;
+ /* BB calculate hash with password */
+ /* and copy into bcc */
+
+ calc_lanman_hash(ses, lnm_session_key);
+
+/* #ifdef CONFIG_CIFS_DEBUG2
+ cifs_dump_mem("cryptkey: ",ses->server->cryptKey,
+ CIFS_SESS_KEY_SIZE);
+#endif */
+ memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+
+ /* can not sign if LANMAN negotiated so no need
+ to calculate signing key? but what if server
+ changed to do higher than lanman dialect and
+ we reconnected would we ever calc signing_key? */
+
+ cFYI(1,("Negotiating LANMAN setting up strings"));
+ /* Unicode not allowed for LANMAN dialects */
+ ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
+#endif
+ } else if (type == NTLM) {
+ char ntlm_session_key[CIFS_SESS_KEY_SIZE];
+
+ pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+ pSMB->req_no_secext.CaseInsensitivePasswordLength =
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(CIFS_SESS_KEY_SIZE);
+
+ /* calculate session key */
+ SMBNTencrypt(ses->password, ses->server->cryptKey,
+ ntlm_session_key);
+
+ if(first_time) /* should this be moved into common code
+ with similar ntlmv2 path? */
+ cifs_calculate_mac_key(ses->server->mac_signing_key,
+ ntlm_session_key, ses->password);
+ /* copy session key */
+
+ memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+ memcpy(bcc_ptr, (char *)ntlm_session_key,CIFS_SESS_KEY_SIZE);
+ bcc_ptr += CIFS_SESS_KEY_SIZE;
+ if(ses->capabilities & CAP_UNICODE) {
+ /* unicode strings must be word aligned */
+ if (iov[0].iov_len % 2) {
+ *bcc_ptr = 0;
+ bcc_ptr++;
+ }
+ unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ } else
+ ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ } else if (type == NTLMv2) {
+ char * v2_sess_key =
+ kmalloc(sizeof(struct ntlmv2_resp), GFP_KERNEL);
+
+ /* BB FIXME change all users of v2_sess_key to
+ struct ntlmv2_resp */
+
+ if(v2_sess_key == NULL) {
+ cifs_small_buf_release(smb_buf);
+ return -ENOMEM;
+ }
+
+ pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
+
+ /* LM2 password would be here if we supported it */
+ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0;
+ /* cpu_to_le16(LM2_SESS_KEY_SIZE); */
+
+ pSMB->req_no_secext.CaseSensitivePasswordLength =
+ cpu_to_le16(sizeof(struct ntlmv2_resp));
+
+ /* calculate session key */
+ setup_ntlmv2_rsp(ses, v2_sess_key, nls_cp);
+ if(first_time) /* should this be moved into common code
+ with similar ntlmv2 path? */
+ /* cifs_calculate_ntlmv2_mac_key(ses->server->mac_signing_key,
+ response BB FIXME, v2_sess_key); */
+
+ /* copy session key */
+
+ /* memcpy(bcc_ptr, (char *)ntlm_session_key,LM2_SESS_KEY_SIZE);
+ bcc_ptr += LM2_SESS_KEY_SIZE; */
+ memcpy(bcc_ptr, (char *)v2_sess_key, sizeof(struct ntlmv2_resp));
+ bcc_ptr += sizeof(struct ntlmv2_resp);
+ kfree(v2_sess_key);
+ if(ses->capabilities & CAP_UNICODE) {
+ if(iov[0].iov_len % 2) {
+ *bcc_ptr = 0;
+ } bcc_ptr++;
+ unicode_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ } else
+ ascii_ssetup_strings(&bcc_ptr, ses, nls_cp);
+ } else /* NTLMSSP or SPNEGO */ {
+ pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
+ capabilities |= CAP_EXTENDED_SECURITY;
+ pSMB->req.Capabilities = cpu_to_le32(capabilities);
+ /* BB set password lengths */
+ }
+
+ count = (long) bcc_ptr - (long) str_area;
+ smb_buf->smb_buf_length += count;
+
+ BCC_LE(smb_buf) = cpu_to_le16(count);
+
+ iov[1].iov_base = str_area;
+ iov[1].iov_len = count;
+ rc = SendReceive2(xid, ses, iov, 2 /* num_iovecs */, &resp_buf_type, 0);
+ /* SMB request buf freed in SendReceive2 */
+
+ cFYI(1,("ssetup rc from sendrecv2 is %d",rc));
+ if(rc)
+ goto ssetup_exit;
+
+ pSMB = (SESSION_SETUP_ANDX *)iov[0].iov_base;
+ smb_buf = (struct smb_hdr *)iov[0].iov_base;
+
+ if((smb_buf->WordCount != 3) && (smb_buf->WordCount != 4)) {
+ rc = -EIO;
+ cERROR(1,("bad word count %d", smb_buf->WordCount));
+ goto ssetup_exit;
+ }
+ action = le16_to_cpu(pSMB->resp.Action);
+ if (action & GUEST_LOGIN)
+ cFYI(1, ("Guest login")); /* BB mark SesInfo struct? */
+ ses->Suid = smb_buf->Uid; /* UID left in wire format (le) */
+ cFYI(1, ("UID = %d ", ses->Suid));
+ /* response can have either 3 or 4 word count - Samba sends 3 */
+ /* and lanman response is 3 */
+ bytes_remaining = BCC(smb_buf);
+ bcc_ptr = pByteArea(smb_buf);
+
+ if(smb_buf->WordCount == 4) {
+ __u16 blob_len;
+ blob_len = le16_to_cpu(pSMB->resp.SecurityBlobLength);
+ bcc_ptr += blob_len;
+ if(blob_len > bytes_remaining) {
+ cERROR(1,("bad security blob length %d", blob_len));
+ rc = -EINVAL;
+ goto ssetup_exit;
+ }
+ bytes_remaining -= blob_len;
+ }
+
+ /* BB check if Unicode and decode strings */
+ if(smb_buf->Flags2 & SMBFLG2_UNICODE)
+ rc = decode_unicode_ssetup(&bcc_ptr, bytes_remaining,
+ ses, nls_cp);
+ else
+ rc = decode_ascii_ssetup(&bcc_ptr, bytes_remaining, ses,nls_cp);
+
+ssetup_exit:
+ kfree(str_area);
+ if(resp_buf_type == CIFS_SMALL_BUFFER) {
+ cFYI(1,("ssetup freeing small buf %p", iov[0].iov_base));
+ cifs_small_buf_release(iov[0].iov_base);
+ } else if(resp_buf_type == CIFS_LARGE_BUFFER)
+ cifs_buf_release(iov[0].iov_base);
+
+ return rc;
+}
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 6103bcdfb16d9e..f518c5e45035c5 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -30,6 +30,7 @@
#include <linux/random.h>
#include "cifs_unicode.h"
#include "cifspdu.h"
+#include "cifsglob.h"
#include "md5.h"
#include "cifs_debug.h"
#include "cifsencrypt.h"
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 3da80409466cff..17ba329e2b3de5 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -654,8 +654,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
if (in_buf->smb_buf_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
up(&ses->server->tcpSem);
- cERROR(1,
- ("Illegal length, greater than maximum frame, %d ",
+ cERROR(1, ("Illegal length, greater than maximum frame, %d",
in_buf->smb_buf_length));
DeleteMidQEntry(midQ);
/* If not lock req, update # of requests on wire to server */
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 7f96b5cb678161..8c9b28dff1197d 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -34,6 +34,7 @@
#include <linux/suspend.h>
#include <linux/pagemap.h>
#include <linux/kthread.h>
+#include <linux/poison.h>
#include <linux/proc_fs.h>
#include <asm/uaccess.h>
@@ -1675,7 +1676,7 @@ static void journal_free_journal_head(struct journal_head *jh)
{
#ifdef CONFIG_JBD_DEBUG
atomic_dec(&nr_journal_heads);
- memset(jh, 0x5b, sizeof(*jh));
+ memset(jh, JBD_POISON_FREE, sizeof(*jh));
#endif
kmem_cache_free(journal_head_cache, jh);
}
diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c
index 320dd48b834ee4..9c2077e7e081c7 100644
--- a/fs/jffs2/acl.c
+++ b/fs/jffs2/acl.c
@@ -267,6 +267,8 @@ static int jffs2_set_acl(struct inode *inode, int type, struct posix_acl *acl)
}
rc = do_jffs2_setxattr(inode, xprefix, "", value, size, 0);
+ if (!value && rc == -ENODATA)
+ rc = 0;
if (value)
kfree(value);
if (!rc) {
diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
index b8886f048eaad0..ad0121088dde38 100644
--- a/fs/jffs2/erase.c
+++ b/fs/jffs2/erase.c
@@ -225,7 +225,6 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
at the end of the linked list. Stash it and continue
from the beginning of the list */
ic = (struct jffs2_inode_cache *)(*prev);
- BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
prev = &ic->nodes;
continue;
}
@@ -249,7 +248,8 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
/* PARANOIA */
if (!ic) {
- printk(KERN_WARNING "inode_cache not found in remove_node_refs()!!\n");
+ JFFS2_WARNING("inode_cache/xattr_datum/xattr_ref"
+ " not found in remove_node_refs()!!\n");
return;
}
@@ -274,8 +274,19 @@ static inline void jffs2_remove_node_refs_from_ino_list(struct jffs2_sb_info *c,
printk("\n");
});
- if (ic->nodes == (void *)ic && ic->nlink == 0)
- jffs2_del_ino_cache(c, ic);
+ switch (ic->class) {
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case RAWNODE_CLASS_XATTR_DATUM:
+ jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
+ break;
+ case RAWNODE_CLASS_XATTR_REF:
+ jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
+ break;
+#endif
+ default:
+ if (ic->nodes == (void *)ic && ic->nlink == 0)
+ jffs2_del_ino_cache(c, ic);
+ }
}
void jffs2_free_jeb_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index 2900ec3ec3afb7..97caa77d60cf23 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -227,8 +227,6 @@ void jffs2_clear_inode (struct inode *inode)
struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
D1(printk(KERN_DEBUG "jffs2_clear_inode(): ino #%lu mode %o\n", inode->i_ino, inode->i_mode));
-
- jffs2_xattr_delete_inode(c, f->inocache);
jffs2_do_clear_inode(c, f);
}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 477c526d638b97..daff3341ff92b5 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -165,6 +165,7 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
D1(printk(KERN_DEBUG "Skipping check of ino #%d with nlink zero\n",
ic->ino));
spin_unlock(&c->inocache_lock);
+ jffs2_xattr_delete_inode(c, ic);
continue;
}
switch(ic->state) {
@@ -275,13 +276,12 @@ int jffs2_garbage_collect_pass(struct jffs2_sb_info *c)
* We can decide whether this node is inode or xattr by ic->class. */
if (ic->class == RAWNODE_CLASS_XATTR_DATUM
|| ic->class == RAWNODE_CLASS_XATTR_REF) {
- BUG_ON(raw->next_in_ino != (void *)ic);
spin_unlock(&c->erase_completion_lock);
if (ic->class == RAWNODE_CLASS_XATTR_DATUM) {
- ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
+ ret = jffs2_garbage_collect_xattr_datum(c, (struct jffs2_xattr_datum *)ic, raw);
} else {
- ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
+ ret = jffs2_garbage_collect_xattr_ref(c, (struct jffs2_xattr_ref *)ic, raw);
}
goto release_sem;
}
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 935fec1b1201bd..b98594992eed53 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -119,8 +119,11 @@ struct jffs2_sb_info {
#ifdef CONFIG_JFFS2_FS_XATTR
#define XATTRINDEX_HASHSIZE (57)
uint32_t highest_xid;
+ uint32_t highest_xseqno;
struct list_head xattrindex[XATTRINDEX_HASHSIZE];
struct list_head xattr_unchecked;
+ struct list_head xattr_dead_list;
+ struct jffs2_xattr_ref *xref_dead_list;
struct jffs2_xattr_ref *xref_temp;
struct rw_semaphore xattr_sem;
uint32_t xdatum_mem_usage;
diff --git a/fs/jffs2/malloc.c b/fs/jffs2/malloc.c
index 4889d0700c0e6a..8310c95478e920 100644
--- a/fs/jffs2/malloc.c
+++ b/fs/jffs2/malloc.c
@@ -291,6 +291,7 @@ struct jffs2_xattr_datum *jffs2_alloc_xattr_datum(void)
memset(xd, 0, sizeof(struct jffs2_xattr_datum));
xd->class = RAWNODE_CLASS_XATTR_DATUM;
+ xd->node = (void *)xd;
INIT_LIST_HEAD(&xd->xindex);
return xd;
}
@@ -309,6 +310,7 @@ struct jffs2_xattr_ref *jffs2_alloc_xattr_ref(void)
memset(ref, 0, sizeof(struct jffs2_xattr_ref));
ref->class = RAWNODE_CLASS_XATTR_REF;
+ ref->node = (void *)ref;
return ref;
}
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c
index 927dfe42ba76ca..7675b33396c7d7 100644
--- a/fs/jffs2/nodelist.c
+++ b/fs/jffs2/nodelist.c
@@ -906,6 +906,9 @@ void jffs2_del_ino_cache(struct jffs2_sb_info *c, struct jffs2_inode_cache *old)
{
struct jffs2_inode_cache **prev;
+#ifdef CONFIG_JFFS2_FS_XATTR
+ BUG_ON(old->xref);
+#endif
dbg_inocache("del %p (ino #%u)\n", old, old->ino);
spin_lock(&c->inocache_lock);
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index ac0c350ed7d7bb..d88376992ed9b0 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -683,19 +683,26 @@ void jffs2_mark_node_obsolete(struct jffs2_sb_info *c, struct jffs2_raw_node_ref
spin_lock(&c->erase_completion_lock);
ic = jffs2_raw_ref_to_ic(ref);
- /* It seems we should never call jffs2_mark_node_obsolete() for
- XATTR nodes.... yet. Make sure we notice if/when we change
- that :) */
- BUG_ON(ic->class != RAWNODE_CLASS_INODE_CACHE);
for (p = &ic->nodes; (*p) != ref; p = &((*p)->next_in_ino))
;
*p = ref->next_in_ino;
ref->next_in_ino = NULL;
- if (ic->nodes == (void *)ic && ic->nlink == 0)
- jffs2_del_ino_cache(c, ic);
-
+ switch (ic->class) {
+#ifdef CONFIG_JFFS2_FS_XATTR
+ case RAWNODE_CLASS_XATTR_DATUM:
+ jffs2_release_xattr_datum(c, (struct jffs2_xattr_datum *)ic);
+ break;
+ case RAWNODE_CLASS_XATTR_REF:
+ jffs2_release_xattr_ref(c, (struct jffs2_xattr_ref *)ic);
+ break;
+#endif
+ default:
+ if (ic->nodes == (void *)ic && ic->nlink == 0)
+ jffs2_del_ino_cache(c, ic);
+ break;
+ }
spin_unlock(&c->erase_completion_lock);
}
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 5fec012b02ed59..cc1899268c439a 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -968,6 +968,7 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
struct jffs2_full_dirent *fd, *fds;
int deleted;
+ jffs2_xattr_delete_inode(c, f->inocache);
down(&f->sem);
deleted = f->inocache && !f->inocache->nlink;
diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c
index 61618080b86f0a..2bfdc33752d38c 100644
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -317,20 +317,23 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
struct jffs2_summary *s)
{
struct jffs2_xattr_datum *xd;
- uint32_t totlen, crc;
+ uint32_t xid, version, totlen, crc;
int err;
crc = crc32(0, rx, sizeof(struct jffs2_raw_xattr) - 4);
if (crc != je32_to_cpu(rx->node_crc)) {
- if (je32_to_cpu(rx->node_crc) != 0xffffffff)
- JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
- ofs, je32_to_cpu(rx->node_crc), crc);
+ JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ ofs, je32_to_cpu(rx->node_crc), crc);
if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(rx->totlen))))
return err;
return 0;
}
- totlen = PAD(sizeof(*rx) + rx->name_len + 1 + je16_to_cpu(rx->value_len));
+ xid = je32_to_cpu(rx->xid);
+ version = je32_to_cpu(rx->version);
+
+ totlen = PAD(sizeof(struct jffs2_raw_xattr)
+ + rx->name_len + 1 + je16_to_cpu(rx->value_len));
if (totlen != je32_to_cpu(rx->totlen)) {
JFFS2_WARNING("node length mismatch at %#08x, read=%u, calc=%u\n",
ofs, je32_to_cpu(rx->totlen), totlen);
@@ -339,22 +342,24 @@ static int jffs2_scan_xattr_node(struct jffs2_sb_info *c, struct jffs2_erasebloc
return 0;
}
- xd = jffs2_setup_xattr_datum(c, je32_to_cpu(rx->xid), je32_to_cpu(rx->version));
- if (IS_ERR(xd)) {
- if (PTR_ERR(xd) == -EEXIST) {
- if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rx->totlen)))))
- return err;
- return 0;
- }
+ xd = jffs2_setup_xattr_datum(c, xid, version);
+ if (IS_ERR(xd))
return PTR_ERR(xd);
- }
- xd->xprefix = rx->xprefix;
- xd->name_len = rx->name_len;
- xd->value_len = je16_to_cpu(rx->value_len);
- xd->data_crc = je32_to_cpu(rx->data_crc);
- xd->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
- /* FIXME */ xd->node->next_in_ino = (void *)xd;
+ if (xd->version > version) {
+ struct jffs2_raw_node_ref *raw
+ = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, NULL);
+ raw->next_in_ino = xd->node->next_in_ino;
+ xd->node->next_in_ino = raw;
+ } else {
+ xd->version = version;
+ xd->xprefix = rx->xprefix;
+ xd->name_len = rx->name_len;
+ xd->value_len = je16_to_cpu(rx->value_len);
+ xd->data_crc = je32_to_cpu(rx->data_crc);
+
+ jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, totlen, (void *)xd);
+ }
if (jffs2_sum_active())
jffs2_sum_add_xattr_mem(s, rx, ofs - jeb->offset);
@@ -373,9 +378,8 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
crc = crc32(0, rr, sizeof(*rr) - 4);
if (crc != je32_to_cpu(rr->node_crc)) {
- if (je32_to_cpu(rr->node_crc) != 0xffffffff)
- JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
- ofs, je32_to_cpu(rr->node_crc), crc);
+ JFFS2_WARNING("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ ofs, je32_to_cpu(rr->node_crc), crc);
if ((err = jffs2_scan_dirty_space(c, jeb, PAD(je32_to_cpu(rr->totlen)))))
return err;
return 0;
@@ -395,6 +399,7 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
return -ENOMEM;
/* BEFORE jffs2_build_xattr_subsystem() called,
+ * and AFTER xattr_ref is marked as a dead xref,
* ref->xid is used to store 32bit xid, xd is not used
* ref->ino is used to store 32bit inode-number, ic is not used
* Thoes variables are declared as union, thus using those
@@ -404,11 +409,13 @@ static int jffs2_scan_xref_node(struct jffs2_sb_info *c, struct jffs2_eraseblock
*/
ref->ino = je32_to_cpu(rr->ino);
ref->xid = je32_to_cpu(rr->xid);
+ ref->xseqno = je32_to_cpu(rr->xseqno);
+ if (ref->xseqno > c->highest_xseqno)
+ c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
ref->next = c->xref_temp;
c->xref_temp = ref;
- ref->node = jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), NULL);
- /* FIXME */ ref->node->next_in_ino = (void *)ref;
+ jffs2_link_node_ref(c, jeb, ofs | REF_PRISTINE, PAD(je32_to_cpu(rr->totlen)), (void *)ref);
if (jffs2_sum_active())
jffs2_sum_add_xref_mem(s, rr, ofs - jeb->offset);
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index be1acc3dad97a7..c19bd476e8ec78 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -5,7 +5,7 @@
* Zoltan Sogor <weth@inf.u-szeged.hu>,
* Patrik Kluba <pajko@halom.u-szeged.hu>,
* University of Szeged, Hungary
- * 2005 KaiGai Kohei <kaigai@ak.jp.nec.com>
+ * 2006 KaiGai Kohei <kaigai@ak.jp.nec.com>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
@@ -310,8 +310,6 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
#ifdef CONFIG_JFFS2_FS_XATTR
case JFFS2_NODETYPE_XATTR: {
struct jffs2_sum_xattr_mem *temp;
- if (je32_to_cpu(node->x.version) == 0xffffffff)
- return 0;
temp = kmalloc(sizeof(struct jffs2_sum_xattr_mem), GFP_KERNEL);
if (!temp)
goto no_mem;
@@ -327,10 +325,6 @@ int jffs2_sum_add_kvec(struct jffs2_sb_info *c, const struct kvec *invecs,
}
case JFFS2_NODETYPE_XREF: {
struct jffs2_sum_xref_mem *temp;
-
- if (je32_to_cpu(node->r.ino) == 0xffffffff
- && je32_to_cpu(node->r.xid) == 0xffffffff)
- return 0;
temp = kmalloc(sizeof(struct jffs2_sum_xref_mem), GFP_KERNEL);
if (!temp)
goto no_mem;
@@ -483,22 +477,20 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
xd = jffs2_setup_xattr_datum(c, je32_to_cpu(spx->xid),
je32_to_cpu(spx->version));
- if (IS_ERR(xd)) {
- if (PTR_ERR(xd) == -EEXIST) {
- /* a newer version of xd exists */
- if ((err = jffs2_scan_dirty_space(c, jeb, je32_to_cpu(spx->totlen))))
- return err;
- sp += JFFS2_SUMMARY_XATTR_SIZE;
- break;
- }
- JFFS2_NOTICE("allocation of xattr_datum failed\n");
+ if (IS_ERR(xd))
return PTR_ERR(xd);
+ if (xd->version > je32_to_cpu(spx->version)) {
+ /* node is not the newest one */
+ struct jffs2_raw_node_ref *raw
+ = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
+ PAD(je32_to_cpu(spx->totlen)), NULL);
+ raw->next_in_ino = xd->node->next_in_ino;
+ xd->node->next_in_ino = raw;
+ } else {
+ xd->version = je32_to_cpu(spx->version);
+ sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
+ PAD(je32_to_cpu(spx->totlen)), (void *)xd);
}
-
- xd->node = sum_link_node_ref(c, jeb, je32_to_cpu(spx->offset) | REF_UNCHECKED,
- PAD(je32_to_cpu(spx->totlen)), NULL);
- /* FIXME */ xd->node->next_in_ino = (void *)xd;
-
*pseudo_random += je32_to_cpu(spx->xid);
sp += JFFS2_SUMMARY_XATTR_SIZE;
@@ -519,14 +511,11 @@ static int jffs2_sum_process_sum_data(struct jffs2_sb_info *c, struct jffs2_eras
JFFS2_NOTICE("allocation of xattr_datum failed\n");
return -ENOMEM;
}
- ref->ino = 0xfffffffe;
- ref->xid = 0xfffffffd;
ref->next = c->xref_temp;
c->xref_temp = ref;
- ref->node = sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED,
- PAD(sizeof(struct jffs2_raw_xref)), NULL);
- /* FIXME */ ref->node->next_in_ino = (void *)ref;
+ sum_link_node_ref(c, jeb, je32_to_cpu(spr->offset) | REF_UNCHECKED,
+ PAD(sizeof(struct jffs2_raw_xref)), (void *)ref);
*pseudo_random += ref->node->flash_offset;
sp += JFFS2_SUMMARY_XREF_SIZE;
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index 2d82e250be34e2..18e66dbf23b497 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -23,18 +23,15 @@
* xattr_datum_hashkey(xprefix, xname, xvalue, xsize)
* is used to calcurate xdatum hashkey. The reminder of hashkey into XATTRINDEX_HASHSIZE is
* the index of the xattr name/value pair cache (c->xattrindex).
+ * is_xattr_datum_unchecked(c, xd)
+ * returns 1, if xdatum contains any unchecked raw nodes. if all raw nodes are not
+ * unchecked, it returns 0.
* unload_xattr_datum(c, xd)
* is used to release xattr name/value pair and detach from c->xattrindex.
* reclaim_xattr_datum(c)
* is used to reclaim xattr name/value pairs on the xattr name/value pair cache when
* memory usage by cache is over c->xdatum_mem_threshold. Currentry, this threshold
* is hard coded as 32KiB.
- * delete_xattr_datum_node(c, xd)
- * is used to delete a jffs2 node is dominated by xdatum. When EBS(Erase Block Summary) is
- * enabled, it overwrites the obsolete node by myself.
- * delete_xattr_datum(c, xd)
- * is used to delete jffs2_xattr_datum object. It must be called with 0-value of reference
- * counter. (It means how many jffs2_xattr_ref object refers this xdatum.)
* do_verify_xattr_datum(c, xd)
* is used to load the xdatum informations without name/value pair from the medium.
* It's necessary once, because those informations are not collected during mounting
@@ -53,8 +50,10 @@
* is used to write xdatum to medium. xd->version will be incremented.
* create_xattr_datum(c, xprefix, xname, xvalue, xsize)
* is used to create new xdatum and write to medium.
+ * delete_xattr_datum(c, xd)
+ * is used to delete a xdatum. It marks xd JFFS2_XFLAGS_DEAD, and allows
+ * GC to reclaim those physical nodes.
* -------------------------------------------------- */
-
static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *xvalue, int xsize)
{
int name_len = strlen(xname);
@@ -62,6 +61,22 @@ static uint32_t xattr_datum_hashkey(int xprefix, const char *xname, const char *
return crc32(xprefix, xname, name_len) ^ crc32(xprefix, xvalue, xsize);
}
+static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ struct jffs2_raw_node_ref *raw;
+ int rc = 0;
+
+ spin_lock(&c->erase_completion_lock);
+ for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
+ if (ref_flags(raw) == REF_UNCHECKED) {
+ rc = 1;
+ break;
+ }
+ }
+ spin_unlock(&c->erase_completion_lock);
+ return rc;
+}
+
static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
{
/* must be called under down_write(xattr_sem) */
@@ -107,77 +122,33 @@ static void reclaim_xattr_datum(struct jffs2_sb_info *c)
before, c->xdatum_mem_usage, before - c->xdatum_mem_usage);
}
-static void delete_xattr_datum_node(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
-{
- /* must be called under down_write(xattr_sem) */
- struct jffs2_raw_xattr rx;
- size_t length;
- int rc;
-
- if (!xd->node) {
- JFFS2_WARNING("xdatum (xid=%u) is removed twice.\n", xd->xid);
- return;
- }
- if (jffs2_sum_active()) {
- memset(&rx, 0xff, sizeof(struct jffs2_raw_xattr));
- rc = jffs2_flash_read(c, ref_offset(xd->node),
- sizeof(struct jffs2_unknown_node),
- &length, (char *)&rx);
- if (rc || length != sizeof(struct jffs2_unknown_node)) {
- JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
- rc, sizeof(struct jffs2_unknown_node),
- length, ref_offset(xd->node));
- }
- rc = jffs2_flash_write(c, ref_offset(xd->node), sizeof(rx),
- &length, (char *)&rx);
- if (rc || length != sizeof(struct jffs2_raw_xattr)) {
- JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu ar %#08x\n",
- rc, sizeof(rx), length, ref_offset(xd->node));
- }
- }
- spin_lock(&c->erase_completion_lock);
- xd->node->next_in_ino = NULL;
- spin_unlock(&c->erase_completion_lock);
- jffs2_mark_node_obsolete(c, xd->node);
- xd->node = NULL;
-}
-
-static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
-{
- /* must be called under down_write(xattr_sem) */
- BUG_ON(xd->refcnt);
-
- unload_xattr_datum(c, xd);
- if (xd->node) {
- delete_xattr_datum_node(c, xd);
- xd->node = NULL;
- }
- jffs2_free_xattr_datum(xd);
-}
-
static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
{
/* must be called under down_write(xattr_sem) */
struct jffs2_eraseblock *jeb;
+ struct jffs2_raw_node_ref *raw;
struct jffs2_raw_xattr rx;
size_t readlen;
- uint32_t crc, totlen;
+ uint32_t crc, offset, totlen;
int rc;
- BUG_ON(!xd->node);
- BUG_ON(ref_flags(xd->node) != REF_UNCHECKED);
+ spin_lock(&c->erase_completion_lock);
+ offset = ref_offset(xd->node);
+ if (ref_flags(xd->node) == REF_PRISTINE)
+ goto complete;
+ spin_unlock(&c->erase_completion_lock);
- rc = jffs2_flash_read(c, ref_offset(xd->node), sizeof(rx), &readlen, (char *)&rx);
+ rc = jffs2_flash_read(c, offset, sizeof(rx), &readlen, (char *)&rx);
if (rc || readlen != sizeof(rx)) {
JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
- rc, sizeof(rx), readlen, ref_offset(xd->node));
+ rc, sizeof(rx), readlen, offset);
return rc ? rc : -EIO;
}
crc = crc32(0, &rx, sizeof(rx) - 4);
if (crc != je32_to_cpu(rx.node_crc)) {
- if (je32_to_cpu(rx.node_crc) != 0xffffffff)
- JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
- ref_offset(xd->node), je32_to_cpu(rx.hdr_crc), crc);
+ JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ offset, je32_to_cpu(rx.hdr_crc), crc);
+ xd->flags |= JFFS2_XFLAGS_INVALID;
return EIO;
}
totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
@@ -188,11 +159,12 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
|| je32_to_cpu(rx.version) != xd->version) {
JFFS2_ERROR("inconsistent xdatum at %#08x, magic=%#04x/%#04x, "
"nodetype=%#04x/%#04x, totlen=%u/%u, xid=%u/%u, version=%u/%u\n",
- ref_offset(xd->node), je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK,
+ offset, je16_to_cpu(rx.magic), JFFS2_MAGIC_BITMASK,
je16_to_cpu(rx.nodetype), JFFS2_NODETYPE_XATTR,
je32_to_cpu(rx.totlen), totlen,
je32_to_cpu(rx.xid), xd->xid,
je32_to_cpu(rx.version), xd->version);
+ xd->flags |= JFFS2_XFLAGS_INVALID;
return EIO;
}
xd->xprefix = rx.xprefix;
@@ -200,14 +172,17 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
xd->value_len = je16_to_cpu(rx.value_len);
xd->data_crc = je32_to_cpu(rx.data_crc);
- /* This JFFS2_NODETYPE_XATTR node is checked */
- jeb = &c->blocks[ref_offset(xd->node) / c->sector_size];
- totlen = PAD(je32_to_cpu(rx.totlen));
-
spin_lock(&c->erase_completion_lock);
- c->unchecked_size -= totlen; c->used_size += totlen;
- jeb->unchecked_size -= totlen; jeb->used_size += totlen;
- xd->node->flash_offset = ref_offset(xd->node) | REF_PRISTINE;
+ complete:
+ for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
+ jeb = &c->blocks[ref_offset(raw) / c->sector_size];
+ totlen = PAD(ref_totlen(c, jeb, raw));
+ if (ref_flags(raw) == REF_UNCHECKED) {
+ c->unchecked_size -= totlen; c->used_size += totlen;
+ jeb->unchecked_size -= totlen; jeb->used_size += totlen;
+ }
+ raw->flash_offset = ref_offset(raw) | ((xd->node==raw) ? REF_PRISTINE : REF_NORMAL);
+ }
spin_unlock(&c->erase_completion_lock);
/* unchecked xdatum is chained with c->xattr_unchecked */
@@ -227,7 +202,6 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
uint32_t crc, length;
int i, ret, retry = 0;
- BUG_ON(!xd->node);
BUG_ON(ref_flags(xd->node) != REF_PRISTINE);
BUG_ON(!list_empty(&xd->xindex));
retry:
@@ -253,6 +227,7 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
" at %#08x, read: 0x%08x calculated: 0x%08x\n",
ref_offset(xd->node), xd->data_crc, crc);
kfree(data);
+ xd->flags |= JFFS2_XFLAGS_INVALID;
return EIO;
}
@@ -286,16 +261,14 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
* rc > 0 : Unrecoverable error, this node should be deleted.
*/
int rc = 0;
- BUG_ON(xd->xname);
- if (!xd->node)
+
+ BUG_ON(xd->flags & JFFS2_XFLAGS_DEAD);
+ if (xd->xname)
+ return 0;
+ if (xd->flags & JFFS2_XFLAGS_INVALID)
return EIO;
- if (unlikely(ref_flags(xd->node) != REF_PRISTINE)) {
+ if (unlikely(is_xattr_datum_unchecked(c, xd)))
rc = do_verify_xattr_datum(c, xd);
- if (rc > 0) {
- list_del_init(&xd->xindex);
- delete_xattr_datum_node(c, xd);
- }
- }
if (!rc)
rc = do_load_xattr_datum(c, xd);
return rc;
@@ -304,7 +277,6 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
{
/* must be called under down_write(xattr_sem) */
- struct jffs2_raw_node_ref *raw;
struct jffs2_raw_xattr rx;
struct kvec vecs[2];
size_t length;
@@ -312,14 +284,16 @@ static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
uint32_t phys_ofs = write_ofs(c);
BUG_ON(!xd->xname);
+ BUG_ON(xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID));
vecs[0].iov_base = &rx;
- vecs[0].iov_len = PAD(sizeof(rx));
+ vecs[0].iov_len = sizeof(rx);
vecs[1].iov_base = xd->xname;
vecs[1].iov_len = xd->name_len + 1 + xd->value_len;
totlen = vecs[0].iov_len + vecs[1].iov_len;
/* Setup raw-xattr */
+ memset(&rx, 0, sizeof(rx));
rx.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
rx.nodetype = cpu_to_je16(JFFS2_NODETYPE_XATTR);
rx.totlen = cpu_to_je32(PAD(totlen));
@@ -343,14 +317,8 @@ static int save_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
return rc;
}
-
/* success */
- raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), NULL);
- /* FIXME */ raw->next_in_ino = (void *)xd;
-
- if (xd->node)
- delete_xattr_datum_node(c, xd);
- xd->node = raw;
+ jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(totlen), (void *)xd);
dbg_xattr("success on saving xdatum (xid=%u, version=%u, xprefix=%u, xname='%s')\n",
xd->xid, xd->version, xd->xprefix, xd->xname);
@@ -377,7 +345,7 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
&& xd->value_len==xsize
&& !strcmp(xd->xname, xname)
&& !memcmp(xd->xvalue, xvalue, xsize)) {
- xd->refcnt++;
+ atomic_inc(&xd->refcnt);
return xd;
}
}
@@ -397,7 +365,7 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
strcpy(data, xname);
memcpy(data + name_len + 1, xvalue, xsize);
- xd->refcnt = 1;
+ atomic_set(&xd->refcnt, 1);
xd->xid = ++c->highest_xid;
xd->flags |= JFFS2_XFLAGS_HOT;
xd->xprefix = xprefix;
@@ -426,20 +394,36 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
return xd;
}
+static void delete_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under down_write(xattr_sem) */
+ BUG_ON(atomic_read(&xd->refcnt));
+
+ unload_xattr_datum(c, xd);
+ xd->flags |= JFFS2_XFLAGS_DEAD;
+ spin_lock(&c->erase_completion_lock);
+ if (xd->node == (void *)xd) {
+ BUG_ON(!(xd->flags & JFFS2_XFLAGS_INVALID));
+ jffs2_free_xattr_datum(xd);
+ } else {
+ list_add(&xd->xindex, &c->xattr_dead_list);
+ }
+ spin_unlock(&c->erase_completion_lock);
+ dbg_xattr("xdatum(xid=%u, version=%u) was removed.\n", xd->xid, xd->version);
+}
+
/* -------- xref related functions ------------------
* verify_xattr_ref(c, ref)
* is used to load xref information from medium. Because summary data does not
* contain xid/ino, it's necessary to verify once while mounting process.
- * delete_xattr_ref_node(c, ref)
- * is used to delete a jffs2 node is dominated by xref. When EBS is enabled,
- * it overwrites the obsolete node by myself.
- * delete_xattr_ref(c, ref)
- * is used to delete jffs2_xattr_ref object. If the reference counter of xdatum
- * is refered by this xref become 0, delete_xattr_datum() is called later.
* save_xattr_ref(c, ref)
- * is used to write xref to medium.
+ * is used to write xref to medium. If delete marker is marked, it write
+ * a delete marker of xref into medium.
* create_xattr_ref(c, ic, xd)
* is used to create a new xref and write to medium.
+ * delete_xattr_ref(c, ref)
+ * is used to delete jffs2_xattr_ref. It marks xref XREF_DELETE_MARKER,
+ * and allows GC to reclaim those physical nodes.
* jffs2_xattr_delete_inode(c, ic)
* is called to remove xrefs related to obsolete inode when inode is unlinked.
* jffs2_xattr_free_inode(c, ic)
@@ -450,25 +434,29 @@ static struct jffs2_xattr_datum *create_xattr_datum(struct jffs2_sb_info *c,
static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
{
struct jffs2_eraseblock *jeb;
+ struct jffs2_raw_node_ref *raw;
struct jffs2_raw_xref rr;
size_t readlen;
- uint32_t crc, totlen;
+ uint32_t crc, offset, totlen;
int rc;
- BUG_ON(ref_flags(ref->node) != REF_UNCHECKED);
+ spin_lock(&c->erase_completion_lock);
+ if (ref_flags(ref->node) != REF_UNCHECKED)
+ goto complete;
+ offset = ref_offset(ref->node);
+ spin_unlock(&c->erase_completion_lock);
- rc = jffs2_flash_read(c, ref_offset(ref->node), sizeof(rr), &readlen, (char *)&rr);
+ rc = jffs2_flash_read(c, offset, sizeof(rr), &readlen, (char *)&rr);
if (rc || sizeof(rr) != readlen) {
JFFS2_WARNING("jffs2_flash_read()=%d, req=%zu, read=%zu, at %#08x\n",
- rc, sizeof(rr), readlen, ref_offset(ref->node));
+ rc, sizeof(rr), readlen, offset);
return rc ? rc : -EIO;
}
/* obsolete node */
crc = crc32(0, &rr, sizeof(rr) - 4);
if (crc != je32_to_cpu(rr.node_crc)) {
- if (je32_to_cpu(rr.node_crc) != 0xffffffff)
- JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
- ref_offset(ref->node), je32_to_cpu(rr.node_crc), crc);
+ JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
+ offset, je32_to_cpu(rr.node_crc), crc);
return EIO;
}
if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
@@ -476,22 +464,28 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
|| je32_to_cpu(rr.totlen) != PAD(sizeof(rr))) {
JFFS2_ERROR("inconsistent xref at %#08x, magic=%#04x/%#04x, "
"nodetype=%#04x/%#04x, totlen=%u/%zu\n",
- ref_offset(ref->node), je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
+ offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
return EIO;
}
ref->ino = je32_to_cpu(rr.ino);
ref->xid = je32_to_cpu(rr.xid);
-
- /* fixup superblock/eraseblock info */
- jeb = &c->blocks[ref_offset(ref->node) / c->sector_size];
- totlen = PAD(sizeof(rr));
+ ref->xseqno = je32_to_cpu(rr.xseqno);
+ if (ref->xseqno > c->highest_xseqno)
+ c->highest_xseqno = (ref->xseqno & ~XREF_DELETE_MARKER);
spin_lock(&c->erase_completion_lock);
- c->unchecked_size -= totlen; c->used_size += totlen;
- jeb->unchecked_size -= totlen; jeb->used_size += totlen;
- ref->node->flash_offset = ref_offset(ref->node) | REF_PRISTINE;
+ complete:
+ for (raw=ref->node; raw != (void *)ref; raw=raw->next_in_ino) {
+ jeb = &c->blocks[ref_offset(raw) / c->sector_size];
+ totlen = PAD(ref_totlen(c, jeb, raw));
+ if (ref_flags(raw) == REF_UNCHECKED) {
+ c->unchecked_size -= totlen; c->used_size += totlen;
+ jeb->unchecked_size -= totlen; jeb->used_size += totlen;
+ }
+ raw->flash_offset = ref_offset(raw) | ((ref->node==raw) ? REF_PRISTINE : REF_NORMAL);
+ }
spin_unlock(&c->erase_completion_lock);
dbg_xattr("success on verifying xref (ino=%u, xid=%u) at %#08x\n",
@@ -499,58 +493,12 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
return 0;
}
-static void delete_xattr_ref_node(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
-{
- struct jffs2_raw_xref rr;
- size_t length;
- int rc;
-
- if (jffs2_sum_active()) {
- memset(&rr, 0xff, sizeof(rr));
- rc = jffs2_flash_read(c, ref_offset(ref->node),
- sizeof(struct jffs2_unknown_node),
- &length, (char *)&rr);
- if (rc || length != sizeof(struct jffs2_unknown_node)) {
- JFFS2_ERROR("jffs2_flash_read()=%d, req=%zu, read=%zu at %#08x\n",
- rc, sizeof(struct jffs2_unknown_node),
- length, ref_offset(ref->node));
- }
- rc = jffs2_flash_write(c, ref_offset(ref->node), sizeof(rr),
- &length, (char *)&rr);
- if (rc || length != sizeof(struct jffs2_raw_xref)) {
- JFFS2_ERROR("jffs2_flash_write()=%d, req=%zu, wrote=%zu at %#08x\n",
- rc, sizeof(rr), length, ref_offset(ref->node));
- }
- }
- spin_lock(&c->erase_completion_lock);
- ref->node->next_in_ino = NULL;
- spin_unlock(&c->erase_completion_lock);
- jffs2_mark_node_obsolete(c, ref->node);
- ref->node = NULL;
-}
-
-static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
-{
- /* must be called under down_write(xattr_sem) */
- struct jffs2_xattr_datum *xd;
-
- BUG_ON(!ref->node);
- delete_xattr_ref_node(c, ref);
-
- xd = ref->xd;
- xd->refcnt--;
- if (!xd->refcnt)
- delete_xattr_datum(c, xd);
- jffs2_free_xattr_ref(ref);
-}
-
static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
{
/* must be called under down_write(xattr_sem) */
- struct jffs2_raw_node_ref *raw;
struct jffs2_raw_xref rr;
size_t length;
- uint32_t phys_ofs = write_ofs(c);
+ uint32_t xseqno, phys_ofs = write_ofs(c);
int ret;
rr.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -558,8 +506,16 @@ static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
rr.totlen = cpu_to_je32(PAD(sizeof(rr)));
rr.hdr_crc = cpu_to_je32(crc32(0, &rr, sizeof(struct jffs2_unknown_node) - 4));
- rr.ino = cpu_to_je32(ref->ic->ino);
- rr.xid = cpu_to_je32(ref->xd->xid);
+ xseqno = (c->highest_xseqno += 2);
+ if (is_xattr_ref_dead(ref)) {
+ xseqno |= XREF_DELETE_MARKER;
+ rr.ino = cpu_to_je32(ref->ino);
+ rr.xid = cpu_to_je32(ref->xid);
+ } else {
+ rr.ino = cpu_to_je32(ref->ic->ino);
+ rr.xid = cpu_to_je32(ref->xd->xid);
+ }
+ rr.xseqno = cpu_to_je32(xseqno);
rr.node_crc = cpu_to_je32(crc32(0, &rr, sizeof(rr) - 4));
ret = jffs2_flash_write(c, phys_ofs, sizeof(rr), &length, (char *)&rr);
@@ -572,12 +528,9 @@ static int save_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
return ret;
}
-
- raw = jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), NULL);
- /* FIXME */ raw->next_in_ino = (void *)ref;
- if (ref->node)
- delete_xattr_ref_node(c, ref);
- ref->node = raw;
+ /* success */
+ ref->xseqno = xseqno;
+ jffs2_add_physical_node_ref(c, phys_ofs | REF_PRISTINE, PAD(sizeof(rr)), (void *)ref);
dbg_xattr("success on saving xref (ino=%u, xid=%u)\n", ref->ic->ino, ref->xd->xid);
@@ -610,6 +563,27 @@ static struct jffs2_xattr_ref *create_xattr_ref(struct jffs2_sb_info *c, struct
return ref; /* success */
}
+static void delete_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ /* must be called under down_write(xattr_sem) */
+ struct jffs2_xattr_datum *xd;
+
+ xd = ref->xd;
+ ref->xseqno |= XREF_DELETE_MARKER;
+ ref->ino = ref->ic->ino;
+ ref->xid = ref->xd->xid;
+ spin_lock(&c->erase_completion_lock);
+ ref->next = c->xref_dead_list;
+ c->xref_dead_list = ref;
+ spin_unlock(&c->erase_completion_lock);
+
+ dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) was removed.\n",
+ ref->ino, ref->xid, ref->xseqno);
+
+ if (atomic_dec_and_test(&xd->refcnt))
+ delete_xattr_datum(c, xd);
+}
+
void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
{
/* It's called from jffs2_clear_inode() on inode removing.
@@ -638,8 +612,7 @@ void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
for (ref = ic->xref; ref; ref = _ref) {
_ref = ref->next;
xd = ref->xd;
- xd->refcnt--;
- if (!xd->refcnt) {
+ if (atomic_dec_and_test(&xd->refcnt)) {
unload_xattr_datum(c, xd);
jffs2_free_xattr_datum(xd);
}
@@ -655,7 +628,7 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
* duplicate name/value pairs. If duplicate name/value pair would be found,
* one will be removed.
*/
- struct jffs2_xattr_ref *ref, *cmp, **pref;
+ struct jffs2_xattr_ref *ref, *cmp, **pref, **pcmp;
int rc = 0;
if (likely(ic->flags & INO_FLAGS_XATTR_CHECKED))
@@ -673,13 +646,13 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
} else if (unlikely(rc < 0))
goto out;
}
- for (cmp=ref->next, pref=&ref->next; cmp; pref=&cmp->next, cmp=cmp->next) {
+ for (cmp=ref->next, pcmp=&ref->next; cmp; pcmp=&cmp->next, cmp=cmp->next) {
if (!cmp->xd->xname) {
ref->xd->flags |= JFFS2_XFLAGS_BIND;
rc = load_xattr_datum(c, cmp->xd);
ref->xd->flags &= ~JFFS2_XFLAGS_BIND;
if (unlikely(rc > 0)) {
- *pref = cmp->next;
+ *pcmp = cmp->next;
delete_xattr_ref(c, cmp);
goto retry;
} else if (unlikely(rc < 0))
@@ -687,8 +660,13 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
}
if (ref->xd->xprefix == cmp->xd->xprefix
&& !strcmp(ref->xd->xname, cmp->xd->xname)) {
- *pref = cmp->next;
- delete_xattr_ref(c, cmp);
+ if (ref->xseqno > cmp->xseqno) {
+ *pcmp = cmp->next;
+ delete_xattr_ref(c, cmp);
+ } else {
+ *pref = ref->next;
+ delete_xattr_ref(c, ref);
+ }
goto retry;
}
}
@@ -719,9 +697,13 @@ void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c)
for (i=0; i < XATTRINDEX_HASHSIZE; i++)
INIT_LIST_HEAD(&c->xattrindex[i]);
INIT_LIST_HEAD(&c->xattr_unchecked);
+ INIT_LIST_HEAD(&c->xattr_dead_list);
+ c->xref_dead_list = NULL;
c->xref_temp = NULL;
init_rwsem(&c->xattr_sem);
+ c->highest_xid = 0;
+ c->highest_xseqno = 0;
c->xdatum_mem_usage = 0;
c->xdatum_mem_threshold = 32 * 1024; /* Default 32KB */
}
@@ -751,7 +733,11 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
_ref = ref->next;
jffs2_free_xattr_ref(ref);
}
- c->xref_temp = NULL;
+
+ for (ref=c->xref_dead_list; ref; ref = _ref) {
+ _ref = ref->next;
+ jffs2_free_xattr_ref(ref);
+ }
for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
@@ -761,100 +747,143 @@ void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c)
jffs2_free_xattr_datum(xd);
}
}
+
+ list_for_each_entry_safe(xd, _xd, &c->xattr_dead_list, xindex) {
+ list_del(&xd->xindex);
+ jffs2_free_xattr_datum(xd);
+ }
}
+#define XREF_TMPHASH_SIZE (128)
void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
{
struct jffs2_xattr_ref *ref, *_ref;
+ struct jffs2_xattr_ref *xref_tmphash[XREF_TMPHASH_SIZE];
struct jffs2_xattr_datum *xd, *_xd;
struct jffs2_inode_cache *ic;
- int i, xdatum_count =0, xdatum_unchecked_count = 0, xref_count = 0;
+ struct jffs2_raw_node_ref *raw;
+ int i, xdatum_count = 0, xdatum_unchecked_count = 0, xref_count = 0;
+ int xdatum_orphan_count = 0, xref_orphan_count = 0, xref_dead_count = 0;
BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
- /* Phase.1 */
+ /* Phase.1 : Merge same xref */
+ for (i=0; i < XREF_TMPHASH_SIZE; i++)
+ xref_tmphash[i] = NULL;
for (ref=c->xref_temp; ref; ref=_ref) {
+ struct jffs2_xattr_ref *tmp;
+
_ref = ref->next;
- /* checking REF_UNCHECKED nodes */
if (ref_flags(ref->node) != REF_PRISTINE) {
if (verify_xattr_ref(c, ref)) {
- delete_xattr_ref_node(c, ref);
+ BUG_ON(ref->node->next_in_ino != (void *)ref);
+ ref->node->next_in_ino = NULL;
+ jffs2_mark_node_obsolete(c, ref->node);
jffs2_free_xattr_ref(ref);
continue;
}
}
- /* At this point, ref->xid and ref->ino contain XID and inode number.
- ref->xd and ref->ic are not valid yet. */
- xd = jffs2_find_xattr_datum(c, ref->xid);
- ic = jffs2_get_ino_cache(c, ref->ino);
- if (!xd || !ic) {
- if (ref_flags(ref->node) != REF_UNCHECKED)
- JFFS2_WARNING("xref(ino=%u, xid=%u) is orphan. \n",
- ref->ino, ref->xid);
- delete_xattr_ref_node(c, ref);
+
+ i = (ref->ino ^ ref->xid) % XREF_TMPHASH_SIZE;
+ for (tmp=xref_tmphash[i]; tmp; tmp=tmp->next) {
+ if (tmp->ino == ref->ino && tmp->xid == ref->xid)
+ break;
+ }
+ if (tmp) {
+ raw = ref->node;
+ if (ref->xseqno > tmp->xseqno) {
+ tmp->xseqno = ref->xseqno;
+ raw->next_in_ino = tmp->node;
+ tmp->node = raw;
+ } else {
+ raw->next_in_ino = tmp->node->next_in_ino;
+ tmp->node->next_in_ino = raw;
+ }
jffs2_free_xattr_ref(ref);
continue;
+ } else {
+ ref->next = xref_tmphash[i];
+ xref_tmphash[i] = ref;
}
- ref->xd = xd;
- ref->ic = ic;
- xd->refcnt++;
- ref->next = ic->xref;
- ic->xref = ref;
- xref_count++;
}
c->xref_temp = NULL;
- /* After this, ref->xid/ino are NEVER used. */
- /* Phase.2 */
+ /* Phase.2 : Bind xref with inode_cache and xattr_datum */
+ for (i=0; i < XREF_TMPHASH_SIZE; i++) {
+ for (ref=xref_tmphash[i]; ref; ref=_ref) {
+ xref_count++;
+ _ref = ref->next;
+ if (is_xattr_ref_dead(ref)) {
+ ref->next = c->xref_dead_list;
+ c->xref_dead_list = ref;
+ xref_dead_count++;
+ continue;
+ }
+ /* At this point, ref->xid and ref->ino contain XID and inode number.
+ ref->xd and ref->ic are not valid yet. */
+ xd = jffs2_find_xattr_datum(c, ref->xid);
+ ic = jffs2_get_ino_cache(c, ref->ino);
+ if (!xd || !ic) {
+ dbg_xattr("xref(ino=%u, xid=%u, xseqno=%u) is orphan.\n",
+ ref->ino, ref->xid, ref->xseqno);
+ ref->xseqno |= XREF_DELETE_MARKER;
+ ref->next = c->xref_dead_list;
+ c->xref_dead_list = ref;
+ xref_orphan_count++;
+ continue;
+ }
+ ref->xd = xd;
+ ref->ic = ic;
+ atomic_inc(&xd->refcnt);
+ ref->next = ic->xref;
+ ic->xref = ref;
+ }
+ }
+
+ /* Phase.3 : Link unchecked xdatum to xattr_unchecked list */
for (i=0; i < XATTRINDEX_HASHSIZE; i++) {
list_for_each_entry_safe(xd, _xd, &c->xattrindex[i], xindex) {
+ xdatum_count++;
list_del_init(&xd->xindex);
- if (!xd->refcnt) {
- if (ref_flags(xd->node) != REF_UNCHECKED)
- JFFS2_WARNING("orphan xdatum(xid=%u, version=%u) at %#08x\n",
- xd->xid, xd->version, ref_offset(xd->node));
- delete_xattr_datum(c, xd);
+ if (!atomic_read(&xd->refcnt)) {
+ dbg_xattr("xdatum(xid=%u, version=%u) is orphan.\n",
+ xd->xid, xd->version);
+ xd->flags |= JFFS2_XFLAGS_DEAD;
+ list_add(&xd->xindex, &c->xattr_unchecked);
+ xdatum_orphan_count++;
continue;
}
- if (ref_flags(xd->node) != REF_PRISTINE) {
- dbg_xattr("unchecked xdatum(xid=%u) at %#08x\n",
- xd->xid, ref_offset(xd->node));
+ if (is_xattr_datum_unchecked(c, xd)) {
+ dbg_xattr("unchecked xdatum(xid=%u, version=%u)\n",
+ xd->xid, xd->version);
list_add(&xd->xindex, &c->xattr_unchecked);
xdatum_unchecked_count++;
}
- xdatum_count++;
}
}
/* build complete */
- JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum (%u unchecked) and "
- "%u of xref found.\n", xdatum_count, xdatum_unchecked_count, xref_count);
+ JFFS2_NOTICE("complete building xattr subsystem, %u of xdatum"
+ " (%u unchecked, %u orphan) and "
+ "%u of xref (%u dead, %u orphan) found.\n",
+ xdatum_count, xdatum_unchecked_count, xdatum_orphan_count,
+ xref_count, xref_dead_count, xref_orphan_count);
}
struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
uint32_t xid, uint32_t version)
{
- struct jffs2_xattr_datum *xd, *_xd;
+ struct jffs2_xattr_datum *xd;
- _xd = jffs2_find_xattr_datum(c, xid);
- if (_xd) {
- dbg_xattr("duplicate xdatum (xid=%u, version=%u/%u) at %#08x\n",
- xid, version, _xd->version, ref_offset(_xd->node));
- if (version < _xd->version)
- return ERR_PTR(-EEXIST);
- }
- xd = jffs2_alloc_xattr_datum();
- if (!xd)
- return ERR_PTR(-ENOMEM);
- xd->xid = xid;
- xd->version = version;
- if (xd->xid > c->highest_xid)
- c->highest_xid = xd->xid;
- list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
-
- if (_xd) {
- list_del_init(&_xd->xindex);
- delete_xattr_datum_node(c, _xd);
- jffs2_free_xattr_datum(_xd);
+ xd = jffs2_find_xattr_datum(c, xid);
+ if (!xd) {
+ xd = jffs2_alloc_xattr_datum();
+ if (!xd)
+ return ERR_PTR(-ENOMEM);
+ xd->xid = xid;
+ xd->version = version;
+ if (xd->xid > c->highest_xid)
+ c->highest_xid = xd->xid;
+ list_add_tail(&xd->xindex, &c->xattrindex[xid % XATTRINDEX_HASHSIZE]);
}
return xd;
}
@@ -1080,9 +1109,23 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
goto out;
}
if (!buffer) {
- *pref = ref->next;
- delete_xattr_ref(c, ref);
- rc = 0;
+ ref->ino = ic->ino;
+ ref->xid = xd->xid;
+ ref->xseqno |= XREF_DELETE_MARKER;
+ rc = save_xattr_ref(c, ref);
+ if (!rc) {
+ *pref = ref->next;
+ spin_lock(&c->erase_completion_lock);
+ ref->next = c->xref_dead_list;
+ c->xref_dead_list = ref;
+ spin_unlock(&c->erase_completion_lock);
+ if (atomic_dec_and_test(&xd->refcnt))
+ delete_xattr_datum(c, xd);
+ } else {
+ ref->ic = ic;
+ ref->xd = xd;
+ ref->xseqno &= ~XREF_DELETE_MARKER;
+ }
goto out;
}
goto found;
@@ -1094,7 +1137,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
goto out;
}
if (!buffer) {
- rc = -EINVAL;
+ rc = -ENODATA;
goto out;
}
found:
@@ -1110,16 +1153,14 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
request = PAD(sizeof(struct jffs2_raw_xref));
rc = jffs2_reserve_space(c, request, &length,
ALLOC_NORMAL, JFFS2_SUMMARY_XREF_SIZE);
+ down_write(&c->xattr_sem);
if (rc) {
JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, request);
- down_write(&c->xattr_sem);
- xd->refcnt--;
- if (!xd->refcnt)
+ if (atomic_dec_and_test(&xd->refcnt))
delete_xattr_datum(c, xd);
up_write(&c->xattr_sem);
return rc;
}
- down_write(&c->xattr_sem);
if (ref)
*pref = ref->next;
newref = create_xattr_ref(c, ic, xd);
@@ -1129,8 +1170,7 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
ic->xref = ref;
}
rc = PTR_ERR(newref);
- xd->refcnt--;
- if (!xd->refcnt)
+ if (atomic_dec_and_test(&xd->refcnt))
delete_xattr_datum(c, xd);
} else if (ref) {
delete_xattr_ref(c, ref);
@@ -1142,38 +1182,40 @@ int do_jffs2_setxattr(struct inode *inode, int xprefix, const char *xname,
}
/* -------- garbage collector functions -------------
- * jffs2_garbage_collect_xattr_datum(c, xd)
+ * jffs2_garbage_collect_xattr_datum(c, xd, raw)
* is used to move xdatum into new node.
- * jffs2_garbage_collect_xattr_ref(c, ref)
+ * jffs2_garbage_collect_xattr_ref(c, ref, raw)
* is used to move xref into new node.
* jffs2_verify_xattr(c)
* is used to call do_verify_xattr_datum() before garbage collecting.
+ * jffs2_release_xattr_datum(c, xd)
+ * is used to release an in-memory object of xdatum.
+ * jffs2_release_xattr_ref(c, ref)
+ * is used to release an in-memory object of xref.
* -------------------------------------------------- */
-int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd,
+ struct jffs2_raw_node_ref *raw)
{
uint32_t totlen, length, old_ofs;
- int rc = -EINVAL;
+ int rc = 0;
down_write(&c->xattr_sem);
- BUG_ON(!xd->node);
-
- old_ofs = ref_offset(xd->node);
- totlen = ref_totlen(c, c->gcblock, xd->node);
- if (totlen < sizeof(struct jffs2_raw_xattr))
+ if (xd->node != raw)
+ goto out;
+ if (xd->flags & (JFFS2_XFLAGS_DEAD|JFFS2_XFLAGS_INVALID))
goto out;
- if (!xd->xname) {
- rc = load_xattr_datum(c, xd);
- if (unlikely(rc > 0)) {
- delete_xattr_datum_node(c, xd);
- rc = 0;
- goto out;
- } else if (unlikely(rc < 0))
- goto out;
+ rc = load_xattr_datum(c, xd);
+ if (unlikely(rc)) {
+ rc = (rc > 0) ? 0 : rc;
+ goto out;
}
+ old_ofs = ref_offset(xd->node);
+ totlen = PAD(sizeof(struct jffs2_raw_xattr)
+ + xd->name_len + 1 + xd->value_len);
rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XATTR_SIZE);
- if (rc || length < totlen) {
- JFFS2_WARNING("jffs2_reserve_space()=%d, request=%u\n", rc, totlen);
+ if (rc) {
+ JFFS2_WARNING("jffs2_reserve_space_gc()=%d, request=%u\n", rc, totlen);
rc = rc ? rc : -EBADFD;
goto out;
}
@@ -1182,27 +1224,32 @@ int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xatt
dbg_xattr("xdatum (xid=%u, version=%u) GC'ed from %#08x to %08x\n",
xd->xid, xd->version, old_ofs, ref_offset(xd->node));
out:
+ if (!rc)
+ jffs2_mark_node_obsolete(c, raw);
up_write(&c->xattr_sem);
return rc;
}
-
-int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
+ struct jffs2_raw_node_ref *raw)
{
uint32_t totlen, length, old_ofs;
- int rc = -EINVAL;
+ int rc = 0;
down_write(&c->xattr_sem);
BUG_ON(!ref->node);
+ if (ref->node != raw)
+ goto out;
+ if (is_xattr_ref_dead(ref) && (raw->next_in_ino == (void *)ref))
+ goto out;
+
old_ofs = ref_offset(ref->node);
totlen = ref_totlen(c, c->gcblock, ref->node);
- if (totlen != sizeof(struct jffs2_raw_xref))
- goto out;
rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE);
- if (rc || length < totlen) {
- JFFS2_WARNING("%s: jffs2_reserve_space() = %d, request = %u\n",
+ if (rc) {
+ JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n",
__FUNCTION__, rc, totlen);
rc = rc ? rc : -EBADFD;
goto out;
@@ -1212,6 +1259,8 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_
dbg_xattr("xref (ino=%u, xid=%u) GC'ed from %#08x to %08x\n",
ref->ic->ino, ref->xd->xid, old_ofs, ref_offset(ref->node));
out:
+ if (!rc)
+ jffs2_mark_node_obsolete(c, raw);
up_write(&c->xattr_sem);
return rc;
}
@@ -1219,20 +1268,59 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_
int jffs2_verify_xattr(struct jffs2_sb_info *c)
{
struct jffs2_xattr_datum *xd, *_xd;
+ struct jffs2_eraseblock *jeb;
+ struct jffs2_raw_node_ref *raw;
+ uint32_t totlen;
int rc;
down_write(&c->xattr_sem);
list_for_each_entry_safe(xd, _xd, &c->xattr_unchecked, xindex) {
rc = do_verify_xattr_datum(c, xd);
- if (rc == 0) {
- list_del_init(&xd->xindex);
- break;
- } else if (rc > 0) {
- list_del_init(&xd->xindex);
- delete_xattr_datum_node(c, xd);
+ if (rc < 0)
+ continue;
+ list_del_init(&xd->xindex);
+ spin_lock(&c->erase_completion_lock);
+ for (raw=xd->node; raw != (void *)xd; raw=raw->next_in_ino) {
+ if (ref_flags(raw) != REF_UNCHECKED)
+ continue;
+ jeb = &c->blocks[ref_offset(raw) / c->sector_size];
+ totlen = PAD(ref_totlen(c, jeb, raw));
+ c->unchecked_size -= totlen; c->used_size += totlen;
+ jeb->unchecked_size -= totlen; jeb->used_size += totlen;
+ raw->flash_offset = ref_offset(raw)
+ | ((xd->node == (void *)raw) ? REF_PRISTINE : REF_NORMAL);
}
+ if (xd->flags & JFFS2_XFLAGS_DEAD)
+ list_add(&xd->xindex, &c->xattr_dead_list);
+ spin_unlock(&c->erase_completion_lock);
}
up_write(&c->xattr_sem);
-
return list_empty(&c->xattr_unchecked) ? 1 : 0;
}
+
+void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
+{
+ /* must be called under spin_lock(&c->erase_completion_lock) */
+ if (atomic_read(&xd->refcnt) || xd->node != (void *)xd)
+ return;
+
+ list_del(&xd->xindex);
+ jffs2_free_xattr_datum(xd);
+}
+
+void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
+{
+ /* must be called under spin_lock(&c->erase_completion_lock) */
+ struct jffs2_xattr_ref *tmp, **ptmp;
+
+ if (ref->node != (void *)ref)
+ return;
+
+ for (tmp=c->xref_dead_list, ptmp=&c->xref_dead_list; tmp; ptmp=&tmp->next, tmp=tmp->next) {
+ if (ref == tmp) {
+ *ptmp = tmp->next;
+ break;
+ }
+ }
+ jffs2_free_xattr_ref(ref);
+}
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 2c199856c58256..06a5c69dcf8b10 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -16,6 +16,8 @@
#define JFFS2_XFLAGS_HOT (0x01) /* This datum is HOT */
#define JFFS2_XFLAGS_BIND (0x02) /* This datum is not reclaimed */
+#define JFFS2_XFLAGS_DEAD (0x40) /* This datum is already dead */
+#define JFFS2_XFLAGS_INVALID (0x80) /* This datum contains crc error */
struct jffs2_xattr_datum
{
@@ -23,10 +25,10 @@ struct jffs2_xattr_datum
struct jffs2_raw_node_ref *node;
uint8_t class;
uint8_t flags;
- uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */
+ uint16_t xprefix; /* see JFFS2_XATTR_PREFIX_* */
struct list_head xindex; /* chained from c->xattrindex[n] */
- uint32_t refcnt; /* # of xattr_ref refers this */
+ atomic_t refcnt; /* # of xattr_ref refers this */
uint32_t xid;
uint32_t version;
@@ -47,6 +49,7 @@ struct jffs2_xattr_ref
uint8_t flags; /* Currently unused */
u16 unused;
+ uint32_t xseqno;
union {
struct jffs2_inode_cache *ic; /* reference to jffs2_inode_cache */
uint32_t ino; /* only used in scanning/building */
@@ -58,6 +61,12 @@ struct jffs2_xattr_ref
struct jffs2_xattr_ref *next; /* chained from ic->xref_list */
};
+#define XREF_DELETE_MARKER (0x00000001)
+static inline int is_xattr_ref_dead(struct jffs2_xattr_ref *ref)
+{
+ return ((ref->xseqno & XREF_DELETE_MARKER) != 0);
+}
+
#ifdef CONFIG_JFFS2_FS_XATTR
extern void jffs2_init_xattr_subsystem(struct jffs2_sb_info *c);
@@ -70,9 +79,13 @@ extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c
extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
-extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
-extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
+extern int jffs2_garbage_collect_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd,
+ struct jffs2_raw_node_ref *raw);
+extern int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref,
+ struct jffs2_raw_node_ref *raw);
extern int jffs2_verify_xattr(struct jffs2_sb_info *c);
+extern void jffs2_release_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd);
+extern void jffs2_release_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref);
extern int do_jffs2_getxattr(struct inode *inode, int xprefix, const char *xname,
char *buffer, size_t size);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 402005c35ab3fe..8ca9707be6c9d0 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -909,7 +909,7 @@ int __init nfs_init_directcache(void)
* nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
*
*/
-void __exit nfs_destroy_directcache(void)
+void nfs_destroy_directcache(void)
{
if (kmem_cache_destroy(nfs_direct_cachep))
printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 51bc88b662feab..c5b916605fb012 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1132,7 +1132,7 @@ static int __init nfs_init_inodecache(void)
return 0;
}
-static void __exit nfs_destroy_inodecache(void)
+static void nfs_destroy_inodecache(void)
{
if (kmem_cache_destroy(nfs_inode_cachep))
printk(KERN_INFO "nfs_inode_cache: not all structures were freed\n");
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index bd2815e2dec1b9..4fe51c1292bb76 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -31,15 +31,15 @@ extern struct svc_version nfs4_callback_version1;
/* pagelist.c */
extern int __init nfs_init_nfspagecache(void);
-extern void __exit nfs_destroy_nfspagecache(void);
+extern void nfs_destroy_nfspagecache(void);
extern int __init nfs_init_readpagecache(void);
-extern void __exit nfs_destroy_readpagecache(void);
+extern void nfs_destroy_readpagecache(void);
extern int __init nfs_init_writepagecache(void);
-extern void __exit nfs_destroy_writepagecache(void);
+extern void nfs_destroy_writepagecache(void);
#ifdef CONFIG_NFS_DIRECTIO
extern int __init nfs_init_directcache(void);
-extern void __exit nfs_destroy_directcache(void);
+extern void nfs_destroy_directcache(void);
#else
#define nfs_init_directcache() (0)
#define nfs_destroy_directcache() do {} while(0)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ef9429643ebcc8..d89f6fb3b3a351 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -390,7 +390,7 @@ int __init nfs_init_nfspagecache(void)
return 0;
}
-void __exit nfs_destroy_nfspagecache(void)
+void nfs_destroy_nfspagecache(void)
{
if (kmem_cache_destroy(nfs_page_cachep))
printk(KERN_INFO "nfs_page: not all structures were freed\n");
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 41c2ffee24f566..32cf3773af0cdc 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -711,7 +711,7 @@ int __init nfs_init_readpagecache(void)
return 0;
}
-void __exit nfs_destroy_readpagecache(void)
+void nfs_destroy_readpagecache(void)
{
mempool_destroy(nfs_rdata_mempool);
if (kmem_cache_destroy(nfs_rdata_cachep))
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index b383fdd3a15c1a..8fccb9cb173ba9 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1551,7 +1551,7 @@ int __init nfs_init_writepagecache(void)
return 0;
}
-void __exit nfs_destroy_writepagecache(void)
+void nfs_destroy_writepagecache(void)
{
mempool_destroy(nfs_commit_mempool);
mempool_destroy(nfs_wdata_mempool);
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 1630b5670dc265..7c7d01672d35a4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -123,7 +123,7 @@ static void release_stateid(struct nfs4_stateid *stp, int flags);
*/
/* recall_lock protects the del_recall_lru */
-static spinlock_t recall_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(recall_lock);
static struct list_head del_recall_lru;
static void
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 21f38accd0399d..1d26cfcd9f8406 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -54,7 +54,7 @@ static DECLARE_RWSEM(o2hb_callback_sem);
* multiple hb threads are watching multiple regions. A node is live
* whenever any of the threads sees activity from the node in its region.
*/
-static spinlock_t o2hb_live_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(o2hb_live_lock);
static struct list_head o2hb_live_slots[O2NM_MAX_NODES];
static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
static LIST_HEAD(o2hb_node_events);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 0f60cc0d3985d9..1591eb37a72366 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -108,7 +108,7 @@
##args); \
} while (0)
-static rwlock_t o2net_handler_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(o2net_handler_lock);
static struct rb_root o2net_handler_tree = RB_ROOT;
static struct o2net_node o2net_nodes[O2NM_MAX_NODES];
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index ba27c5c5e95939..b8c23f7ba67e1c 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -88,7 +88,7 @@ out_free:
*
*/
-spinlock_t dlm_domain_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(dlm_domain_lock);
LIST_HEAD(dlm_domains);
static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events);
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index d6f89577e25f1b..5ca57ec650c776 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -53,7 +53,7 @@
#define MLOG_MASK_PREFIX ML_DLM
#include "cluster/masklog.h"
-static spinlock_t dlm_cookie_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(dlm_cookie_lock);
static u64 dlm_next_cookie = 1;
static enum dlm_status dlm_send_remote_lock_request(struct dlm_ctxt *dlm,
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index da399013516ffe..29b2845f370d85 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -98,8 +98,8 @@ static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data);
static u64 dlm_get_next_mig_cookie(void);
-static spinlock_t dlm_reco_state_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t dlm_mig_cookie_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(dlm_reco_state_lock);
+static DEFINE_SPINLOCK(dlm_mig_cookie_lock);
static u64 dlm_mig_cookie = 1;
static u64 dlm_get_next_mig_cookie(void)
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 64cd52860c8769..4acd37286bdd7c 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -242,7 +242,7 @@ static void ocfs2_build_lock_name(enum ocfs2_lock_type type,
mlog_exit_void();
}
-static spinlock_t ocfs2_dlm_tracking_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(ocfs2_dlm_tracking_lock);
static void ocfs2_add_lockres_tracking(struct ocfs2_lock_res *res,
struct ocfs2_dlm_debug *dlm_debug)
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 3fe8781c22cb68..910a601b2e9820 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -49,7 +49,7 @@
#include "buffer_head_io.h"
-spinlock_t trans_inc_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(trans_inc_lock);
static int ocfs2_force_read_journal(struct inode *inode);
static int ocfs2_recover_node(struct ocfs2_super *osb,
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c
index ee42765a8553e9..cf70fe2075b8f5 100644
--- a/fs/ocfs2/vote.c
+++ b/fs/ocfs2/vote.c
@@ -988,9 +988,7 @@ int ocfs2_request_mount_vote(struct ocfs2_super *osb)
}
bail:
- if (request)
- kfree(request);
-
+ kfree(request);
return status;
}
@@ -1021,9 +1019,7 @@ int ocfs2_request_umount_vote(struct ocfs2_super *osb)
}
bail:
- if (request)
- kfree(request);
-
+ kfree(request);
return status;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 0137ec4c136888..0a163a4f776405 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -122,6 +122,11 @@ struct mem_size_stats
unsigned long private_dirty;
};
+__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats *mss)
{
struct proc_maps_private *priv = m->private;
@@ -158,22 +163,23 @@ static int show_map_internal(struct seq_file *m, void *v, struct mem_size_stats
pad_len_spaces(m, len);
seq_path(m, file->f_vfsmnt, file->f_dentry, "\n");
} else {
- if (mm) {
- if (vma->vm_start <= mm->start_brk &&
+ const char *name = arch_vma_name(vma);
+ if (!name) {
+ if (mm) {
+ if (vma->vm_start <= mm->start_brk &&
vma->vm_end >= mm->brk) {
- pad_len_spaces(m, len);
- seq_puts(m, "[heap]");
- } else {
- if (vma->vm_start <= mm->start_stack &&
- vma->vm_end >= mm->start_stack) {
-
- pad_len_spaces(m, len);
- seq_puts(m, "[stack]");
+ name = "[heap]";
+ } else if (vma->vm_start <= mm->start_stack &&
+ vma->vm_end >= mm->start_stack) {
+ name = "[stack]";
}
+ } else {
+ name = "[vdso]";
}
- } else {
+ }
+ if (name) {
pad_len_spaces(m, len);
- seq_puts(m, "[vdso]");
+ seq_puts(m, name);
}
}
seq_putc(m, '\n');
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index f2dbdf5a876976..259bd196099d5c 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -605,39 +605,12 @@ static void ufs_set_inode_ops(struct inode *inode)
ufs_get_inode_dev(inode->i_sb, UFS_I(inode)));
}
-void ufs_read_inode (struct inode * inode)
+static void ufs1_read_inode(struct inode *inode, struct ufs_inode *ufs_inode)
{
struct ufs_inode_info *ufsi = UFS_I(inode);
- struct super_block * sb;
- struct ufs_sb_private_info * uspi;
- struct ufs_inode * ufs_inode;
- struct ufs2_inode *ufs2_inode;
- struct buffer_head * bh;
+ struct super_block *sb = inode->i_sb;
mode_t mode;
unsigned i;
- unsigned flags;
-
- UFSD("ENTER, ino %lu\n", inode->i_ino);
-
- sb = inode->i_sb;
- uspi = UFS_SB(sb)->s_uspi;
- flags = UFS_SB(sb)->s_flags;
-
- if (inode->i_ino < UFS_ROOTINO ||
- inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
- ufs_warning (sb, "ufs_read_inode", "bad inode number (%lu)\n", inode->i_ino);
- goto bad_inode;
- }
-
- bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
- if (!bh) {
- ufs_warning (sb, "ufs_read_inode", "unable to read inode %lu\n", inode->i_ino);
- goto bad_inode;
- }
- if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2)
- goto ufs2_inode;
-
- ufs_inode = (struct ufs_inode *) (bh->b_data + sizeof(struct ufs_inode) * ufs_inotofsbo(inode->i_ino));
/*
* Copy data to the in-core inode.
@@ -661,14 +634,11 @@ void ufs_read_inode (struct inode * inode)
inode->i_atime.tv_nsec = 0;
inode->i_ctime.tv_nsec = 0;
inode->i_blocks = fs32_to_cpu(sb, ufs_inode->ui_blocks);
- inode->i_blksize = PAGE_SIZE; /* This is the optimal IO size (for stat) */
- inode->i_version++;
ufsi->i_flags = fs32_to_cpu(sb, ufs_inode->ui_flags);
ufsi->i_gen = fs32_to_cpu(sb, ufs_inode->ui_gen);
ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
- ufsi->i_lastfrag = (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
- ufsi->i_dir_start_lookup = 0;
+
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
@@ -677,24 +647,16 @@ void ufs_read_inode (struct inode * inode)
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
ufsi->i_u1.i_symlink[i] = ufs_inode->ui_u2.ui_symlink[i];
}
- ufsi->i_osync = 0;
-
- ufs_set_inode_ops(inode);
-
- brelse (bh);
-
- UFSD("EXIT\n");
- return;
+}
-bad_inode:
- make_bad_inode(inode);
- return;
+static void ufs2_read_inode(struct inode *inode, struct ufs2_inode *ufs2_inode)
+{
+ struct ufs_inode_info *ufsi = UFS_I(inode);
+ struct super_block *sb = inode->i_sb;
+ mode_t mode;
+ unsigned i;
-ufs2_inode :
UFSD("Reading ufs2 inode, ino %lu\n", inode->i_ino);
-
- ufs2_inode = (struct ufs2_inode *)(bh->b_data + sizeof(struct ufs2_inode) * ufs_inotofsbo(inode->i_ino));
-
/*
* Copy data to the in-core inode.
*/
@@ -717,26 +679,64 @@ ufs2_inode :
inode->i_atime.tv_nsec = 0;
inode->i_ctime.tv_nsec = 0;
inode->i_blocks = fs64_to_cpu(sb, ufs2_inode->ui_blocks);
- inode->i_blksize = PAGE_SIZE; /*This is the optimal IO size(for stat)*/
-
- inode->i_version++;
ufsi->i_flags = fs32_to_cpu(sb, ufs2_inode->ui_flags);
ufsi->i_gen = fs32_to_cpu(sb, ufs2_inode->ui_gen);
/*
ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow);
ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag);
*/
- ufsi->i_lastfrag= (inode->i_size + uspi->s_fsize- 1) >> uspi->s_fshift;
if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) {
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR); i++)
ufsi->i_u1.u2_i_data[i] =
ufs2_inode->ui_u2.ui_addr.ui_db[i];
- }
- else {
+ } else {
for (i = 0; i < (UFS_NDADDR + UFS_NINDIR) * 4; i++)
ufsi->i_u1.i_symlink[i] = ufs2_inode->ui_u2.ui_symlink[i];
}
+}
+
+void ufs_read_inode(struct inode * inode)
+{
+ struct ufs_inode_info *ufsi = UFS_I(inode);
+ struct super_block * sb;
+ struct ufs_sb_private_info * uspi;
+ struct buffer_head * bh;
+
+ UFSD("ENTER, ino %lu\n", inode->i_ino);
+
+ sb = inode->i_sb;
+ uspi = UFS_SB(sb)->s_uspi;
+
+ if (inode->i_ino < UFS_ROOTINO ||
+ inode->i_ino > (uspi->s_ncg * uspi->s_ipg)) {
+ ufs_warning(sb, "ufs_read_inode", "bad inode number (%lu)\n",
+ inode->i_ino);
+ goto bad_inode;
+ }
+
+ bh = sb_bread(sb, uspi->s_sbbase + ufs_inotofsba(inode->i_ino));
+ if (!bh) {
+ ufs_warning(sb, "ufs_read_inode", "unable to read inode %lu\n",
+ inode->i_ino);
+ goto bad_inode;
+ }
+ if ((UFS_SB(sb)->s_flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+ struct ufs2_inode *ufs2_inode = (struct ufs2_inode *)bh->b_data;
+
+ ufs2_read_inode(inode,
+ ufs2_inode + ufs_inotofsbo(inode->i_ino));
+ } else {
+ struct ufs_inode *ufs_inode = (struct ufs_inode *)bh->b_data;
+
+ ufs1_read_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino));
+ }
+
+ inode->i_blksize = PAGE_SIZE;/*This is the optimal IO size (for stat)*/
+ inode->i_version++;
+ ufsi->i_lastfrag =
+ (inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift;
+ ufsi->i_dir_start_lookup = 0;
ufsi->i_osync = 0;
ufs_set_inode_ops(inode);
@@ -745,6 +745,9 @@ ufs2_inode :
UFSD("EXIT\n");
return;
+
+bad_inode:
+ make_bad_inode(inode);
}
static int ufs_update_inode(struct inode * inode, int do_sync)
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 12810baeb5d4ad..d9180020de6328 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -419,16 +419,15 @@ xfs_vn_link(
int error;
ip = old_dentry->d_inode; /* inode being linked to */
- if (S_ISDIR(ip->i_mode))
- return -EPERM;
-
tdvp = vn_from_inode(dir);
vp = vn_from_inode(ip);
+ VN_HOLD(vp);
error = bhv_vop_link(tdvp, vp, dentry, NULL);
- if (likely(!error)) {
+ if (unlikely(error)) {
+ VN_RELE(vp);
+ } else {
VMODIFY(tdvp);
- VN_HOLD(vp);
xfs_validate_fields(ip, &vattr);
d_instantiate(dentry, ip);
}
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index aa26ab906c88cd..028eb17ec2edd8 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -140,9 +140,7 @@ BUFFER_FNS(PrivateStart, unwritten);
#define current_pid() (current->pid)
#define current_fsuid(cred) (current->fsuid)
#define current_fsgid(cred) (current->fsgid)
-#define current_set_flags(f) (current->flags |= (f))
#define current_test_flags(f) (current->flags & (f))
-#define current_clear_flags(f) (current->flags & ~(f))
#define current_set_flags_nested(sp, f) \
(*(sp) = current->flags, current->flags |= (f))
#define current_clear_flags_nested(sp, f) \
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 35c6a01963a77c..c42b3221b20cb9 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -93,7 +93,7 @@ typedef enum {
*/
static inline struct bhv_vnode *vn_from_inode(struct inode *inode)
{
- return (bhv_vnode_t *)list_entry(inode, bhv_vnode_t, v_inode);
+ return container_of(inode, bhv_vnode_t, v_inode);
}
static inline struct inode *vn_to_inode(struct bhv_vnode *vnode)
{
diff --git a/fs/xfs/xfs_behavior.h b/fs/xfs/xfs_behavior.h
index 1d8ff103201c3b..6e6e56fb352d2f 100644
--- a/fs/xfs/xfs_behavior.h
+++ b/fs/xfs/xfs_behavior.h
@@ -78,15 +78,12 @@
*
*/
-struct bhv_head_lock;
-
/*
* Behavior head. Head of the chain of behaviors.
* Contained within each virtualized object data structure.
*/
typedef struct bhv_head {
struct bhv_desc *bh_first; /* first behavior in chain */
- struct bhv_head_lock *bh_lockp; /* pointer to lock info struct */
} bhv_head_t;
/*
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 5fa0adb7e1737a..86c1bf0bba9ea1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1961,9 +1961,9 @@ xfs_iunlink_remove(
xfs_agino_t agino;
xfs_agino_t next_agino;
xfs_buf_t *last_ibp;
- xfs_dinode_t *last_dip;
+ xfs_dinode_t *last_dip = NULL;
short bucket_index;
- int offset, last_offset;
+ int offset, last_offset = 0;
int error;
int agi_ok;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index d8f5d4cbe8b7b7..e730328636c312 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1740,10 +1740,10 @@ xlog_write(xfs_mount_t * mp,
xlog_in_core_t **commit_iclog,
uint flags)
{
- xlog_t *log = mp->m_log;
+ xlog_t *log = mp->m_log;
xlog_ticket_t *ticket = (xlog_ticket_t *)tic;
+ xlog_in_core_t *iclog = NULL; /* ptr to current in-core log */
xlog_op_header_t *logop_head; /* ptr to log operation header */
- xlog_in_core_t *iclog; /* ptr to current in-core log */
__psint_t ptr; /* copy address into data region */
int len; /* # xlog_write() bytes 2 still copy */
int index; /* region index currently copying */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 55b4237c215397..3cb678e3a13299 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -990,6 +990,8 @@ xlog_find_zeroed(
xfs_daddr_t num_scan_bblks;
int error, log_bbnum = log->l_logBBsize;
+ *blk_no = 0;
+
/* check totally zeroed log */
bp = xlog_get_bp(log, 1);
if (!bp)
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 10dbf203c62f6f..4be5c0b2d296b2 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -1721,15 +1721,14 @@ xfs_mount_log_sbunit(
* is present to prevent thrashing).
*/
+#ifdef CONFIG_HOTPLUG_CPU
/*
* hot-plug CPU notifier support.
*
- * We cannot use the hotcpu_register() function because it does
- * not allow notifier instances. We need a notifier per filesystem
- * as we need to be able to identify the filesystem to balance
- * the counters out. This is achieved by having a notifier block
- * embedded in the xfs_mount_t and doing pointer magic to get the
- * mount pointer from the notifier block address.
+ * We need a notifier per filesystem as we need to be able to identify
+ * the filesystem to balance the counters out. This is achieved by
+ * having a notifier block embedded in the xfs_mount_t and doing pointer
+ * magic to get the mount pointer from the notifier block address.
*/
STATIC int
xfs_icsb_cpu_notify(
@@ -1779,6 +1778,7 @@ xfs_icsb_cpu_notify(
return NOTIFY_OK;
}
+#endif /* CONFIG_HOTPLUG_CPU */
int
xfs_icsb_init_counters(
@@ -1791,9 +1791,11 @@ xfs_icsb_init_counters(
if (mp->m_sb_cnts == NULL)
return -ENOMEM;
+#ifdef CONFIG_HOTPLUG_CPU
mp->m_icsb_notifier.notifier_call = xfs_icsb_cpu_notify;
mp->m_icsb_notifier.priority = 0;
- register_cpu_notifier(&mp->m_icsb_notifier);
+ register_hotcpu_notifier(&mp->m_icsb_notifier);
+#endif /* CONFIG_HOTPLUG_CPU */
for_each_online_cpu(i) {
cntp = (xfs_icsb_cnts_t *)per_cpu_ptr(mp->m_sb_cnts, i);
@@ -1812,7 +1814,7 @@ xfs_icsb_destroy_counters(
xfs_mount_t *mp)
{
if (mp->m_sb_cnts) {
- unregister_cpu_notifier(&mp->m_icsb_notifier);
+ unregister_hotcpu_notifier(&mp->m_icsb_notifier);
free_percpu(mp->m_sb_cnts);
}
}
@@ -2026,7 +2028,7 @@ xfs_icsb_balance_counter(
xfs_sb_field_t field,
int flags)
{
- uint64_t count, resid = 0;
+ uint64_t count, resid;
int weight = num_online_cpus();
int s;
@@ -2058,6 +2060,7 @@ xfs_icsb_balance_counter(
break;
default:
BUG();
+ count = resid = 0; /* quiet, gcc */
break;
}
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 0c1e42b037efe6..5a0b678956e0e3 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1929,7 +1929,7 @@ xfs_growfs_rt(
/*
* Initial error checking.
*/
- if (mp->m_rtdev_targp || mp->m_rbmip == NULL ||
+ if (mp->m_rtdev_targp == NULL || mp->m_rbmip == NULL ||
(nrblocks = in->newblocks) <= sbp->sb_rblocks ||
(sbp->sb_rblocks && (in->extsize != sbp->sb_rextsize)))
return XFS_ERROR(EINVAL);
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index cb65c3a603f56a..9dc88b380608e6 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -338,8 +338,6 @@ typedef void (*xfs_trans_callback_t)(struct xfs_trans *, void *);
typedef struct xfs_trans {
unsigned int t_magic; /* magic number */
xfs_log_callback_t t_logcb; /* log callback struct */
- struct xfs_trans *t_forw; /* async list pointers */
- struct xfs_trans *t_back; /* async list pointers */
unsigned int t_type; /* transaction type */
unsigned int t_log_res; /* amt of log space resvd */
unsigned int t_log_count; /* count for perm log res */
@@ -364,9 +362,11 @@ typedef struct xfs_trans {
long t_res_fdblocks_delta; /* on-disk only chg */
long t_frextents_delta;/* superblock freextents chg*/
long t_res_frextents_delta; /* on-disk only chg */
+#ifdef DEBUG
long t_ag_freeblks_delta; /* debugging counter */
long t_ag_flist_delta; /* debugging counter */
long t_ag_btree_delta; /* debugging counter */
+#endif
long t_dblocks_delta;/* superblock dblocks change */
long t_agcount_delta;/* superblock agcount change */
long t_imaxpct_delta;/* superblock imaxpct change */
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 00a6b7dc24a0a6..23cfa58377283b 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -2603,8 +2603,7 @@ xfs_link(
vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address);
target_namelen = VNAMELEN(dentry);
- if (VN_ISDIR(src_vp))
- return XFS_ERROR(EPERM);
+ ASSERT(!VN_ISDIR(src_vp));
sip = xfs_vtoi(src_vp);
tdp = XFS_BHVTOI(target_dir_bdp);
@@ -2699,9 +2698,8 @@ xfs_link(
xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
error = xfs_bumplink(tp, sip);
- if (error) {
+ if (error)
goto abort_return;
- }
/*
* If this is a synchronous mount, make sure that the
@@ -2719,9 +2717,8 @@ xfs_link(
}
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
- if (error) {
+ if (error)
goto std_return;
- }
/* Fall through to std_return with error = 0. */
std_return:
@@ -2742,6 +2739,8 @@ std_return:
xfs_trans_cancel(tp, cancel_flags);
goto std_return;
}
+
+
/*
* xfs_mkdir
*
diff --git a/include/asm-alpha/core_t2.h b/include/asm-alpha/core_t2.h
index dba70c62a16c0d..457c34b6eb0933 100644
--- a/include/asm-alpha/core_t2.h
+++ b/include/asm-alpha/core_t2.h
@@ -435,7 +435,7 @@ static inline void t2_outl(u32 b, unsigned long addr)
set_hae(msb); \
}
-static spinlock_t t2_hae_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(t2_hae_lock);
__EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr)
{
diff --git a/include/asm-arm/arch-s3c2410/regs-nand.h b/include/asm-arm/arch-s3c2410/regs-nand.h
index 7cff235e667aee..c1470c695c3355 100644
--- a/include/asm-arm/arch-s3c2410/regs-nand.h
+++ b/include/asm-arm/arch-s3c2410/regs-nand.h
@@ -39,10 +39,19 @@
#define S3C2440_NFESTAT1 S3C2410_NFREG(0x28)
#define S3C2440_NFMECC0 S3C2410_NFREG(0x2C)
#define S3C2440_NFMECC1 S3C2410_NFREG(0x30)
-#define S3C2440_NFSECC S3C2410_NFREG(0x34)
+#define S3C2440_NFSECC S3C24E10_NFREG(0x34)
#define S3C2440_NFSBLK S3C2410_NFREG(0x38)
#define S3C2440_NFEBLK S3C2410_NFREG(0x3C)
+#define S3C2412_NFSBLK S3C2410_NFREG(0x20)
+#define S3C2412_NFEBLK S3C2410_NFREG(0x24)
+#define S3C2412_NFSTAT S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC_ERR0 S3C2410_NFREG(0x2C)
+#define S3C2412_NFMECC_ERR1 S3C2410_NFREG(0x30)
+#define S3C2412_NFMECC0 S3C2410_NFREG(0x34)
+#define S3C2412_NFMECC1 S3C2410_NFREG(0x38)
+#define S3C2412_NFSECC S3C2410_NFREG(0x3C)
+
#define S3C2410_NFCONF_EN (1<<15)
#define S3C2410_NFCONF_512BYTE (1<<14)
#define S3C2410_NFCONF_4STEP (1<<13)
@@ -77,5 +86,42 @@
#define S3C2440_NFSTAT_RnB_CHANGE (1<<2)
#define S3C2440_NFSTAT_ILLEGAL_ACCESS (1<<3)
+#define S3C2412_NFCONF_NANDBOOT (1<<31)
+#define S3C2412_NFCONF_ECCCLKCON (1<<30)
+#define S3C2412_NFCONF_ECC_MLC (1<<24)
+#define S3C2412_NFCONF_TACLS_MASK (7<<12) /* 1 extra bit of Tacls */
+
+#define S3C2412_NFCONT_ECC4_DIRWR (1<<18)
+#define S3C2412_NFCONT_LOCKTIGHT (1<<17)
+#define S3C2412_NFCONT_SOFTLOCK (1<<16)
+#define S3C2412_NFCONT_ECC4_ENCINT (1<<13)
+#define S3C2412_NFCONT_ECC4_DECINT (1<<12)
+#define S3C2412_NFCONT_MAIN_ECC_LOCK (1<<7)
+#define S3C2412_NFCONT_INIT_MAIN_ECC (1<<5)
+#define S3C2412_NFCONT_nFCE1 (1<<2)
+#define S3C2412_NFCONT_nFCE0 (1<<1)
+
+#define S3C2412_NFSTAT_ECC_ENCDONE (1<<7)
+#define S3C2412_NFSTAT_ECC_DECDONE (1<<6)
+#define S3C2412_NFSTAT_ILLEGAL_ACCESS (1<<5)
+#define S3C2412_NFSTAT_RnB_CHANGE (1<<4)
+#define S3C2412_NFSTAT_nFCE1 (1<<3)
+#define S3C2412_NFSTAT_nFCE0 (1<<2)
+#define S3C2412_NFSTAT_Res1 (1<<1)
+#define S3C2412_NFSTAT_READY (1<<0)
+
+#define S3C2412_NFECCERR_SERRDATA(x) (((x) >> 21) & 0xf)
+#define S3C2412_NFECCERR_SERRBIT(x) (((x) >> 18) & 0x7)
+#define S3C2412_NFECCERR_MERRDATA(x) (((x) >> 7) & 0x3ff)
+#define S3C2412_NFECCERR_MERRBIT(x) (((x) >> 4) & 0x7)
+#define S3C2412_NFECCERR_SPARE_ERR(x) (((x) >> 2) & 0x3)
+#define S3C2412_NFECCERR_MAIN_ERR(x) (((x) >> 2) & 0x3)
+#define S3C2412_NFECCERR_NONE (0)
+#define S3C2412_NFECCERR_1BIT (1)
+#define S3C2412_NFECCERR_MULTIBIT (2)
+#define S3C2412_NFECCERR_ECCAREA (3)
+
+
+
#endif /* __ASM_ARM_REGS_NAND */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 845cb67ad8ea5d..8ceab7bcd8b4f2 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -51,4 +51,10 @@
__ret; \
})
+#ifdef CONFIG_SMP
+# define WARN_ON_SMP(x) WARN_ON(x)
+#else
+# define WARN_ON_SMP(x) do { } while (0)
+#endif
+
#endif
diff --git a/include/asm-i386/cpu.h b/include/asm-i386/cpu.h
index e7252c216ca81c..b1bc7b1b64b0e3 100644
--- a/include/asm-i386/cpu.h
+++ b/include/asm-i386/cpu.h
@@ -7,8 +7,6 @@
#include <linux/nodemask.h>
#include <linux/percpu.h>
-#include <asm/node.h>
-
struct i386_cpu {
struct cpu cpu;
};
diff --git a/include/asm-i386/elf.h b/include/asm-i386/elf.h
index 4153d80e4d2b86..1eac92cb5b1617 100644
--- a/include/asm-i386/elf.h
+++ b/include/asm-i386/elf.h
@@ -10,6 +10,7 @@
#include <asm/processor.h>
#include <asm/system.h> /* for savesegment */
#include <asm/auxvec.h>
+#include <asm/desc.h>
#include <linux/utsname.h>
@@ -129,15 +130,41 @@ extern int dump_task_extended_fpu (struct task_struct *, struct user_fxsr_struct
#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
#define ELF_CORE_COPY_XFPREGS(tsk, elf_xfpregs) dump_task_extended_fpu(tsk, elf_xfpregs)
-#define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL))
-#define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE)
-#define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall)
+#define VDSO_HIGH_BASE (__fix_to_virt(FIX_VDSO))
+#define VDSO_BASE ((unsigned long)current->mm->context.vdso)
+
+#ifdef CONFIG_COMPAT_VDSO
+# define VDSO_COMPAT_BASE VDSO_HIGH_BASE
+# define VDSO_PRELINK VDSO_HIGH_BASE
+#else
+# define VDSO_COMPAT_BASE VDSO_BASE
+# define VDSO_PRELINK 0
+#endif
+
+#define VDSO_COMPAT_SYM(x) \
+ (VDSO_COMPAT_BASE + (unsigned long)(x) - VDSO_PRELINK)
+
+#define VDSO_SYM(x) \
+ (VDSO_BASE + (unsigned long)(x) - VDSO_PRELINK)
+
+#define VDSO_HIGH_EHDR ((const struct elfhdr *) VDSO_HIGH_BASE)
+#define VDSO_EHDR ((const struct elfhdr *) VDSO_COMPAT_BASE)
+
extern void __kernel_vsyscall;
+#define VDSO_ENTRY VDSO_SYM(&__kernel_vsyscall)
+
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int executable_stack);
+
+extern unsigned int vdso_enabled;
+
#define ARCH_DLINFO \
-do { \
- NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \
- NEW_AUX_ENT(AT_SYSINFO_EHDR, VSYSCALL_BASE); \
+do if (vdso_enabled) { \
+ NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_COMPAT_BASE); \
} while (0)
/*
@@ -148,15 +175,15 @@ do { \
* Dumping its extra ELF program headers includes all the other information
* a debugger needs to easily find how the vsyscall DSO was being used.
*/
-#define ELF_CORE_EXTRA_PHDRS (VSYSCALL_EHDR->e_phnum)
+#define ELF_CORE_EXTRA_PHDRS (VDSO_HIGH_EHDR->e_phnum)
#define ELF_CORE_WRITE_EXTRA_PHDRS \
do { \
const struct elf_phdr *const vsyscall_phdrs = \
- (const struct elf_phdr *) (VSYSCALL_BASE \
- + VSYSCALL_EHDR->e_phoff); \
+ (const struct elf_phdr *) (VDSO_HIGH_BASE \
+ + VDSO_HIGH_EHDR->e_phoff); \
int i; \
Elf32_Off ofs = 0; \
- for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \
+ for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
struct elf_phdr phdr = vsyscall_phdrs[i]; \
if (phdr.p_type == PT_LOAD) { \
BUG_ON(ofs != 0); \
@@ -174,10 +201,10 @@ do { \
#define ELF_CORE_WRITE_EXTRA_DATA \
do { \
const struct elf_phdr *const vsyscall_phdrs = \
- (const struct elf_phdr *) (VSYSCALL_BASE \
- + VSYSCALL_EHDR->e_phoff); \
+ (const struct elf_phdr *) (VDSO_HIGH_BASE \
+ + VDSO_HIGH_EHDR->e_phoff); \
int i; \
- for (i = 0; i < VSYSCALL_EHDR->e_phnum; ++i) { \
+ for (i = 0; i < VDSO_HIGH_EHDR->e_phnum; ++i) { \
if (vsyscall_phdrs[i].p_type == PT_LOAD) \
DUMP_WRITE((void *) vsyscall_phdrs[i].p_vaddr, \
PAGE_ALIGN(vsyscall_phdrs[i].p_memsz)); \
diff --git a/include/asm-i386/fixmap.h b/include/asm-i386/fixmap.h
index f7e068f4d2f98d..a48cc3f7ccc688 100644
--- a/include/asm-i386/fixmap.h
+++ b/include/asm-i386/fixmap.h
@@ -51,7 +51,7 @@
*/
enum fixed_addresses {
FIX_HOLE,
- FIX_VSYSCALL,
+ FIX_VDSO,
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
@@ -115,14 +115,6 @@ extern void __set_fixmap (enum fixed_addresses idx,
#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-/*
- * This is the range that is readable by user mode, and things
- * acting like user mode such as get_user_pages.
- */
-#define FIXADDR_USER_START (__fix_to_virt(FIX_VSYSCALL))
-#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
-
-
extern void __this_fixmap_does_not_exist(void);
/*
diff --git a/include/asm-i386/mmu.h b/include/asm-i386/mmu.h
index f431a0b86d4c46..8358dd3df7aa22 100644
--- a/include/asm-i386/mmu.h
+++ b/include/asm-i386/mmu.h
@@ -12,6 +12,7 @@ typedef struct {
int size;
struct semaphore sem;
void *ldt;
+ void *vdso;
} mm_context_t;
#endif
diff --git a/include/asm-i386/node.h b/include/asm-i386/node.h
deleted file mode 100644
index e13c6ffa72aec8..00000000000000
--- a/include/asm-i386/node.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef _ASM_I386_NODE_H_
-#define _ASM_I386_NODE_H_
-
-#include <linux/device.h>
-#include <linux/mmzone.h>
-#include <linux/node.h>
-#include <linux/topology.h>
-#include <linux/nodemask.h>
-
-struct i386_node {
- struct node node;
-};
-extern struct i386_node node_devices[MAX_NUMNODES];
-
-static inline int arch_register_node(int num){
- int p_node;
- struct node *parent = NULL;
-
- if (!node_online(num))
- return 0;
- p_node = parent_node(num);
-
- if (p_node != num)
- parent = &node_devices[p_node].node;
-
- return register_node(&node_devices[num].node, num, parent);
-}
-
-#endif /* _ASM_I386_NODE_H_ */
diff --git a/include/asm-i386/page.h b/include/asm-i386/page.h
index e3a552fa553874..f5bf544c729a37 100644
--- a/include/asm-i386/page.h
+++ b/include/asm-i386/page.h
@@ -96,6 +96,8 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#ifndef __ASSEMBLY__
+struct vm_area_struct;
+
/*
* This much address space is reserved for vmalloc() and iomap()
* as well as fixmap mappings.
@@ -139,6 +141,7 @@ extern int page_is_ram(unsigned long pagenr);
#include <asm-generic/memory_model.h>
#include <asm-generic/page.h>
+#define __HAVE_ARCH_GATE_AREA 1
#endif /* __KERNEL__ */
#endif /* _I386_PAGE_H */
diff --git a/include/asm-i386/processor.h b/include/asm-i386/processor.h
index 55ea992da32954..b32346d62e1039 100644
--- a/include/asm-i386/processor.h
+++ b/include/asm-i386/processor.h
@@ -71,8 +71,12 @@ struct cpuinfo_x86 {
cpumask_t llc_shared_map; /* cpus sharing the last level cache */
#endif
unsigned char x86_max_cores; /* cpuid returned max cores value */
- unsigned char booted_cores; /* number of cores as seen by OS */
unsigned char apicid;
+#ifdef CONFIG_SMP
+ unsigned char booted_cores; /* number of cores as seen by OS */
+ __u8 phys_proc_id; /* Physical processor id. */
+ __u8 cpu_core_id; /* Core id */
+#endif
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
@@ -104,8 +108,6 @@ extern struct cpuinfo_x86 cpu_data[];
#define current_cpu_data boot_cpu_data
#endif
-extern int phys_proc_id[NR_CPUS];
-extern int cpu_core_id[NR_CPUS];
extern int cpu_llc_id[NR_CPUS];
extern char ignore_fpu_irq;
diff --git a/include/asm-i386/thread_info.h b/include/asm-i386/thread_info.h
index fdbc7f422ea550..2833fa2c0dd0e6 100644
--- a/include/asm-i386/thread_info.h
+++ b/include/asm-i386/thread_info.h
@@ -37,6 +37,7 @@ struct thread_info {
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
+ void *sysenter_return;
struct restart_block restart_block;
unsigned long previous_esp; /* ESP of the previous stack in case
@@ -83,17 +84,15 @@ struct thread_info {
#define init_stack (init_thread_union.stack)
+/* how to get the current stack pointer from C */
+register unsigned long current_stack_pointer asm("esp") __attribute_used__;
+
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
- struct thread_info *ti;
- __asm__("andl %%esp,%0; ":"=r" (ti) : "0" (~(THREAD_SIZE - 1)));
- return ti;
+ return (struct thread_info *)(current_stack_pointer & ~(THREAD_SIZE - 1));
}
-/* how to get the current stack pointer from C */
-register unsigned long current_stack_pointer asm("esp") __attribute_used__;
-
/* thread information allocation */
#ifdef CONFIG_DEBUG_STACK_USAGE
#define alloc_thread_info(tsk) \
diff --git a/include/asm-i386/topology.h b/include/asm-i386/topology.h
index b94e5eeef917ea..6adbd9b1ae8811 100644
--- a/include/asm-i386/topology.h
+++ b/include/asm-i386/topology.h
@@ -28,10 +28,8 @@
#define _ASM_I386_TOPOLOGY_H
#ifdef CONFIG_X86_HT
-#define topology_physical_package_id(cpu) \
- (phys_proc_id[cpu] == BAD_APICID ? -1 : phys_proc_id[cpu])
-#define topology_core_id(cpu) \
- (cpu_core_id[cpu] == BAD_APICID ? 0 : cpu_core_id[cpu])
+#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
+#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
#endif
@@ -114,4 +112,9 @@ extern unsigned long node_remap_size[];
extern cpumask_t cpu_coregroup_map(int cpu);
+#ifdef CONFIG_SMP
+#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
+#define smt_capable() (smp_num_siblings > 1)
+#endif
+
#endif /* _ASM_I386_TOPOLOGY_H */
diff --git a/include/asm-i386/unwind.h b/include/asm-i386/unwind.h
index d480f2e38215d6..69f0f1df67220e 100644
--- a/include/asm-i386/unwind.h
+++ b/include/asm-i386/unwind.h
@@ -78,8 +78,8 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
return user_mode_vm(&info->regs);
#else
return info->regs.eip < PAGE_OFFSET
- || (info->regs.eip >= __fix_to_virt(FIX_VSYSCALL)
- && info->regs.eip < __fix_to_virt(FIX_VSYSCALL) + PAGE_SIZE)
+ || (info->regs.eip >= __fix_to_virt(FIX_VDSO)
+ && info->regs.eip < __fix_to_virt(FIX_VDSO) + PAGE_SIZE)
|| info->regs.esp < PAGE_OFFSET;
#endif
}
diff --git a/include/asm-ia64/nodedata.h b/include/asm-ia64/nodedata.h
index a140310bf84dcb..2fb337b0e9b786 100644
--- a/include/asm-ia64/nodedata.h
+++ b/include/asm-ia64/nodedata.h
@@ -46,6 +46,18 @@ struct ia64_node_data {
*/
#define NODE_DATA(nid) (local_node_data->pg_data_ptrs[nid])
+/*
+ * LOCAL_DATA_ADDR - This is to calculate the address of other node's
+ * "local_node_data" at hot-plug phase. The local_node_data
+ * is pointed by per_cpu_page. Kernel usually use it for
+ * just executing cpu. However, when new node is hot-added,
+ * the addresses of local data for other nodes are necessary
+ * to update all of them.
+ */
+#define LOCAL_DATA_ADDR(pgdat) \
+ ((struct ia64_node_data *)((u64)(pgdat) + \
+ L1_CACHE_ALIGN(sizeof(struct pglist_data))))
+
#endif /* CONFIG_NUMA */
#endif /* _ASM_IA64_NODEDATA_H */
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 616b5ed2aa7277..937c212575239f 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -112,6 +112,7 @@ void build_cpu_to_node_map(void);
#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
+#define smt_capable() (smp_num_siblings > 1)
#endif
#include <asm-generic/topology.h>
diff --git a/include/asm-m68knommu/ptrace.h b/include/asm-m68knommu/ptrace.h
index 1e19c457de7de9..47258e86e8c4a6 100644
--- a/include/asm-m68knommu/ptrace.h
+++ b/include/asm-m68knommu/ptrace.h
@@ -46,11 +46,9 @@ struct pt_regs {
#else
unsigned short sr;
unsigned long pc;
-#ifndef NO_FORMAT_VEC
unsigned format : 4; /* frame format specifier */
unsigned vector : 12; /* vector offset */
#endif
-#endif
};
/*
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 92f3e5507d224c..bbc3844b086fca 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -93,5 +93,10 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
#endif /* CONFIG_NUMA */
+#ifdef CONFIG_SMP
+#include <asm/cputable.h>
+#define smt_capable() (cpu_has_feature(CPU_FTR_SMT))
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_TOPOLOGY_H */
diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h
index 0e234e201bd6b5..98a6c613589dcb 100644
--- a/include/asm-sparc64/topology.h
+++ b/include/asm-sparc64/topology.h
@@ -1,6 +1,9 @@
#ifndef _ASM_SPARC64_TOPOLOGY_H
#define _ASM_SPARC64_TOPOLOGY_H
+#include <asm/spitfire.h>
+#define smt_capable() (tlb_type == hypervisor)
+
#include <asm-generic/topology.h>
#endif /* _ASM_SPARC64_TOPOLOGY_H */
diff --git a/include/asm-x86_64/hw_irq.h b/include/asm-x86_64/hw_irq.h
index 1b2ac55d3204e9..93187746278847 100644
--- a/include/asm-x86_64/hw_irq.h
+++ b/include/asm-x86_64/hw_irq.h
@@ -124,7 +124,7 @@ asmlinkage void IRQ_NAME(nr); \
__asm__( \
"\n.p2align\n" \
"IRQ" #nr "_interrupt:\n\t" \
- "push $" #nr "-256 ; " \
+ "push $~(" #nr ") ; " \
"jmp common_interrupt");
#if defined(CONFIG_X86_IO_APIC)
diff --git a/include/asm-x86_64/topology.h b/include/asm-x86_64/topology.h
index c4e46e7fa7ba78..6e7a2e976b04fc 100644
--- a/include/asm-x86_64/topology.h
+++ b/include/asm-x86_64/topology.h
@@ -59,6 +59,8 @@ extern int __node_distance(int, int);
#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
#define topology_core_siblings(cpu) (cpu_core_map[cpu])
#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu])
+#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
+#define smt_capable() (smp_num_siblings > 1)
#endif
#include <asm-generic/topology.h>
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 90d6df1551ed06..88b5dfd8ee125b 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -528,12 +528,18 @@ static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; }
#ifdef CONFIG_ACPI_NUMA
int acpi_get_pxm(acpi_handle handle);
+int acpi_get_node(acpi_handle *handle);
#else
static inline int acpi_get_pxm(acpi_handle handle)
{
return 0;
}
+static inline int acpi_get_node(acpi_handle *handle)
+{
+ return 0;
+}
#endif
+extern int acpi_paddr_to_node(u64 start_addr, u64 size);
extern int pnpacpi_disabled;
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index fb7e9b7ccbe312..737e407d0cd11e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -149,7 +149,6 @@ void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
-void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
@@ -214,6 +213,7 @@ int nobh_truncate_page(struct address_space *, loff_t);
int nobh_writepage(struct page *page, get_block_t *get_block,
struct writeback_control *wbc);
+void buffer_init(void);
/*
* inline definitions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 08d50c53aab401..a3caf6866bae58 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -31,17 +31,23 @@ struct cpu {
struct sys_device sysdev;
};
-extern int register_cpu(struct cpu *, int, struct node *);
+extern int register_cpu(struct cpu *cpu, int num);
extern struct sys_device *get_cpu_sysdev(unsigned cpu);
#ifdef CONFIG_HOTPLUG_CPU
-extern void unregister_cpu(struct cpu *, struct node *);
+extern void unregister_cpu(struct cpu *cpu);
#endif
struct notifier_block;
#ifdef CONFIG_SMP
/* Need to know about CPUs going up/down? */
extern int register_cpu_notifier(struct notifier_block *nb);
+#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu_notifier(struct notifier_block *nb);
+#else
+static inline void unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+#endif
extern int current_in_cpu_hotplug(void);
int cpu_up(unsigned int cpu);
@@ -73,6 +79,8 @@ extern int lock_cpu_hotplug_interruptible(void);
{ .notifier_call = fn, .priority = pri }; \
register_cpu_notifier(&fn##_nb); \
}
+#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
int cpu_down(unsigned int cpu);
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#else
@@ -80,6 +88,8 @@ int cpu_down(unsigned int cpu);
#define unlock_cpu_hotplug() do { } while (0)
#define lock_cpu_hotplug_interruptible() 0
#define hotcpu_notifier(fn, pri)
+#define register_hotcpu_notifier(nb)
+#define unregister_hotcpu_notifier(nb)
/* CPUs don't go offline once they're online w/o CONFIG_HOTPLUG_CPU */
static inline int cpu_is_offline(int cpu) { return 0; }
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 78b236ca04f801..272010a6078a91 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -20,7 +20,7 @@
*/
#ifndef DMAENGINE_H
#define DMAENGINE_H
-#include <linux/config.h>
+
#ifdef CONFIG_DMA_ENGINE
#include <linux/device.h>
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 966a5b3da439b4..34c3a215f2cd9a 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -12,6 +12,9 @@
#define FUTEX_REQUEUE 3
#define FUTEX_CMP_REQUEUE 4
#define FUTEX_WAKE_OP 5
+#define FUTEX_LOCK_PI 6
+#define FUTEX_UNLOCK_PI 7
+#define FUTEX_TRYLOCK_PI 8
/*
* Support for robust futexes: the kernel cleans up held futexes at
@@ -90,18 +93,21 @@ struct robust_list_head {
*/
#define ROBUST_LIST_LIMIT 2048
-long do_futex(unsigned long uaddr, int op, int val,
- unsigned long timeout, unsigned long uaddr2, int val2,
- int val3);
+long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
extern int handle_futex_death(u32 __user *uaddr, struct task_struct *curr);
#ifdef CONFIG_FUTEX
extern void exit_robust_list(struct task_struct *curr);
+extern void exit_pi_state_list(struct task_struct *curr);
#else
static inline void exit_robust_list(struct task_struct *curr)
{
}
+static inline void exit_pi_state_list(struct task_struct *curr)
+{
+}
#endif
#define FUTEX_OP_SET 0 /* *(int *)UADDR2 = OPARG; */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e127ef7e8da834..3a256957fb56a7 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -87,6 +87,7 @@ extern struct group_info init_groups;
.lock_depth = -1, \
.prio = MAX_PRIO-20, \
.static_prio = MAX_PRIO-20, \
+ .normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
.cpus_allowed = CPU_MASK_ALL, \
.mm = NULL, \
@@ -122,6 +123,8 @@ extern struct group_info init_groups;
.journal_info = NULL, \
.cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
.fs_excl = ATOMIC_INIT(0), \
+ .pi_lock = SPIN_LOCK_UNLOCKED, \
+ INIT_RT_MUTEXES(tsk) \
}
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index cd6bd001ba4edb..edfc733b1575c4 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -105,6 +105,9 @@ extern int allocate_resource(struct resource *root, struct resource *new,
int adjust_resource(struct resource *res, unsigned long start,
unsigned long size);
+/* get registered SYSTEM_RAM resources in specified area */
+extern int find_next_system_ram(struct resource *res);
+
/* Convenience shorthand with allocation */
#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name))
#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name))
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 5653b2f23b6a6f..d09fbeabf1dc97 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -210,11 +210,7 @@ struct kernel_ipmi_msg
#include <linux/list.h>
#include <linux/module.h>
#include <linux/device.h>
-
-#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
-extern struct proc_dir_entry *proc_ipmi_root;
-#endif /* CONFIG_PROC_FS */
/* Opaque type for a IPMI message user. One of these is needed to
send and receive messages. */
diff --git a/include/linux/jffs2.h b/include/linux/jffs2.h
index c6f70660b3716e..c9c760700bc341 100644
--- a/include/linux/jffs2.h
+++ b/include/linux/jffs2.h
@@ -186,6 +186,7 @@ struct jffs2_raw_xref
jint32_t hdr_crc;
jint32_t ino; /* inode number */
jint32_t xid; /* XATTR identifier number */
+ jint32_t xseqno; /* xref sequencial number */
jint32_t node_crc;
} __attribute__((packed));
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 20b1cf527c609a..f4284bf8975857 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -30,6 +30,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
+#include <asm/scatterlist.h>
#include <asm/io.h>
#include <linux/ata.h>
#include <linux/workqueue.h>
@@ -887,6 +888,9 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
return tag == ATA_MAX_QUEUE - 1;
}
+/*
+ * device helpers
+ */
static inline unsigned int ata_class_enabled(unsigned int class)
{
return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI;
@@ -917,6 +921,17 @@ static inline unsigned int ata_dev_absent(const struct ata_device *dev)
return ata_class_absent(dev->class);
}
+/*
+ * port helpers
+ */
+static inline int ata_port_max_devices(const struct ata_port *ap)
+{
+ if (ap->flags & ATA_FLAG_SLAVE_POSS)
+ return 2;
+ return 1;
+}
+
+
static inline u8 ata_chk_status(struct ata_port *ap)
{
return ap->ops->check_status(ap);
diff --git a/include/linux/list.h b/include/linux/list.h
index 37ca31b21bb7a2..6b74adf5297f64 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -4,18 +4,11 @@
#ifdef __KERNEL__
#include <linux/stddef.h>
+#include <linux/poison.h>
#include <linux/prefetch.h>
#include <asm/system.h>
/*
- * These are non-NULL pointers that will result in page faults
- * under normal circumstances, used to verify that nobody uses
- * non-initialized list entries.
- */
-#define LIST_POISON1 ((void *) 0x00100100)
-#define LIST_POISON2 ((void *) 0x00200200)
-
-/*
* Simple doubly linked list implementation.
*
* Some of the internal functions ("__xxx") are useful when
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 911206386171cc..218501cfaeb925 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -63,6 +63,76 @@ extern int online_pages(unsigned long, unsigned long);
/* reasonably generic interface to expand the physical pages in a zone */
extern int __add_pages(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages);
+
+#ifdef CONFIG_NUMA
+extern int memory_add_physaddr_to_nid(u64 start);
+#else
+static inline int memory_add_physaddr_to_nid(u64 start)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
+/*
+ * For supporting node-hotadd, we have to allocate a new pgdat.
+ *
+ * If an arch has generic style NODE_DATA(),
+ * node_data[nid] = kzalloc() works well. But it depends on the architecture.
+ *
+ * In general, generic_alloc_nodedata() is used.
+ * Now, arch_free_nodedata() is just defined for error path of node_hot_add.
+ *
+ */
+extern pg_data_t *arch_alloc_nodedata(int nid);
+extern void arch_free_nodedata(pg_data_t *pgdat);
+extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
+
+#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
+#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
+#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
+
+#ifdef CONFIG_NUMA
+/*
+ * If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
+ * XXX: kmalloc_node() can't work well to get new node's memory at this time.
+ * Because, pgdat for the new node is not allocated/initialized yet itself.
+ * To use new node's memory, more consideration will be necessary.
+ */
+#define generic_alloc_nodedata(nid) \
+({ \
+ kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
+})
+/*
+ * This definition is just for error path in node hotadd.
+ * For node hotremove, we have to replace this.
+ */
+#define generic_free_nodedata(pgdat) kfree(pgdat)
+
+extern pg_data_t *node_data[];
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+ node_data[nid] = pgdat;
+}
+
+#else /* !CONFIG_NUMA */
+
+/* never called */
+static inline pg_data_t *generic_alloc_nodedata(int nid)
+{
+ BUG();
+ return NULL;
+}
+static inline void generic_free_nodedata(pg_data_t *pgdat)
+{
+}
+static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
+{
+}
+#endif /* CONFIG_NUMA */
+#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
+
#else /* ! CONFIG_MEMORY_HOTPLUG */
/*
* Stub functions for when hotplug is off
@@ -99,7 +169,8 @@ static inline int __remove_pages(struct zone *zone, unsigned long start_pfn,
return -ENOSYS;
}
-extern int add_memory(u64 start, u64 size);
+extern int add_memory(int nid, u64 start, u64 size);
+extern int arch_add_memory(int nid, u64 start, u64 size);
extern int remove_memory(u64 start, u64 size);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a929ea197e4844..c41a1299b8cf35 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1030,13 +1030,20 @@ static inline void vm_stat_account(struct mm_struct *mm,
}
#endif /* CONFIG_PROC_FS */
+static inline void
+debug_check_no_locks_freed(const void *from, unsigned long len)
+{
+ mutex_debug_check_no_locks_freed(from, len);
+ rt_mutex_debug_check_no_locks_freed(from, len);
+}
+
#ifndef CONFIG_DEBUG_PAGEALLOC
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!PageHighMem(page) && !enable)
- mutex_debug_check_no_locks_freed(page_address(page),
- numpages * PAGE_SIZE);
+ debug_check_no_locks_freed(page_address(page),
+ numpages * PAGE_SIZE);
}
#endif
@@ -1065,5 +1072,7 @@ void drop_slab(void);
extern int randomize_va_space;
#endif
+const char *arch_vma_name(struct vm_area_struct *vma);
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/node.h b/include/linux/node.h
index 254dc3de650b81..81dcec84cd8f00 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -26,8 +26,25 @@ struct node {
struct sys_device sysdev;
};
+extern struct node node_devices[];
+
extern int register_node(struct node *, int, struct node *);
extern void unregister_node(struct node *node);
+extern int register_one_node(int nid);
+extern void unregister_one_node(int nid);
+#ifdef CONFIG_NUMA
+extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
+extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
+#else
+static inline int register_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ return 0;
+}
+static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
+{
+ return 0;
+}
+#endif
#define to_node(sys_device) container_of(sys_device, struct node, sysdev)
diff --git a/include/linux/nsc_gpio.h b/include/linux/nsc_gpio.h
new file mode 100644
index 00000000000000..135742cfada54c
--- /dev/null
+++ b/include/linux/nsc_gpio.h
@@ -0,0 +1,42 @@
+/**
+ nsc_gpio.c
+
+ National Semiconductor GPIO common access methods.
+
+ struct nsc_gpio_ops abstracts the low-level access
+ operations for the GPIO units on 2 NSC chip families; the GEODE
+ integrated CPU, and the PC-8736[03456] integrated PC-peripheral
+ chips.
+
+ The GPIO units on these chips have the same pin architecture, but
+ the access methods differ. Thus, scx200_gpio and pc8736x_gpio
+ implement their own versions of these routines; and use the common
+ file-operations routines implemented in nsc_gpio module.
+
+ Copyright (c) 2005 Jim Cromie <jim.cromie@gmail.com>
+
+ NB: this work was tested on the Geode SC-1100 and PC-87366 chips.
+ NSC sold the GEODE line to AMD, and the PC-8736x line to Winbond.
+*/
+
+struct nsc_gpio_ops {
+ struct module* owner;
+ u32 (*gpio_config) (unsigned iminor, u32 mask, u32 bits);
+ void (*gpio_dump) (struct nsc_gpio_ops *amp, unsigned iminor);
+ int (*gpio_get) (unsigned iminor);
+ void (*gpio_set) (unsigned iminor, int state);
+ void (*gpio_set_high)(unsigned iminor);
+ void (*gpio_set_low) (unsigned iminor);
+ void (*gpio_change) (unsigned iminor);
+ int (*gpio_current) (unsigned iminor);
+ struct device* dev; /* for dev_dbg() support, set in init */
+};
+
+extern ssize_t nsc_gpio_write(struct file *file, const char __user *data,
+ size_t len, loff_t *ppos);
+
+extern ssize_t nsc_gpio_read(struct file *file, char __user *buf,
+ size_t len, loff_t *ppos);
+
+extern void nsc_gpio_dump(struct nsc_gpio_ops *amp, unsigned index);
+
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c2fd2d19938b94..9ae6b1a753668e 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1202,6 +1202,7 @@
#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448
#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
@@ -2170,7 +2171,6 @@
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
-#define PCI_DEVICE_ID_INTEL_GD31244 0x3200
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
diff --git a/include/linux/plist.h b/include/linux/plist.h
new file mode 100644
index 00000000000000..3404faef542c67
--- /dev/null
+++ b/include/linux/plist.h
@@ -0,0 +1,247 @@
+/*
+ * Descending-priority-sorted double-linked list
+ *
+ * (C) 2002-2003 Intel Corp
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
+ *
+ * 2001-2005 (c) MontaVista Software, Inc.
+ * Daniel Walker <dwalker@mvista.com>
+ *
+ * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Simplifications of the original code by
+ * Oleg Nesterov <oleg@tv-sign.ru>
+ *
+ * Licensed under the FSF's GNU Public License v2 or later.
+ *
+ * Based on simple lists (include/linux/list.h).
+ *
+ * This is a priority-sorted list of nodes; each node has a
+ * priority from INT_MIN (highest) to INT_MAX (lowest).
+ *
+ * Addition is O(K), removal is O(1), change of priority of a node is
+ * O(K) and K is the number of RT priority levels used in the system.
+ * (1 <= K <= 99)
+ *
+ * This list is really a list of lists:
+ *
+ * - The tier 1 list is the prio_list, different priority nodes.
+ *
+ * - The tier 2 list is the node_list, serialized nodes.
+ *
+ * Simple ASCII art explanation:
+ *
+ * |HEAD |
+ * | |
+ * |prio_list.prev|<------------------------------------|
+ * |prio_list.next|<->|pl|<->|pl|<--------------->|pl|<-|
+ * |10 | |10| |21| |21| |21| |40| (prio)
+ * | | | | | | | | | | | |
+ * | | | | | | | | | | | |
+ * |node_list.next|<->|nl|<->|nl|<->|nl|<->|nl|<->|nl|<-|
+ * |node_list.prev|<------------------------------------|
+ *
+ * The nodes on the prio_list list are sorted by priority to simplify
+ * the insertion of new nodes. There are no nodes with duplicate
+ * priorites on the list.
+ *
+ * The nodes on the node_list is ordered by priority and can contain
+ * entries which have the same priority. Those entries are ordered
+ * FIFO
+ *
+ * Addition means: look for the prio_list node in the prio_list
+ * for the priority of the node and insert it before the node_list
+ * entry of the next prio_list node. If it is the first node of
+ * that priority, add it to the prio_list in the right position and
+ * insert it into the serialized node_list list
+ *
+ * Removal means remove it from the node_list and remove it from
+ * the prio_list if the node_list list_head is non empty. In case
+ * of removal from the prio_list it must be checked whether other
+ * entries of the same priority are on the list or not. If there
+ * is another entry of the same priority then this entry has to
+ * replace the removed entry on the prio_list. If the entry which
+ * is removed is the only entry of this priority then a simple
+ * remove from both list is sufficient.
+ *
+ * INT_MIN is the highest priority, 0 is the medium highest, INT_MAX
+ * is lowest priority.
+ *
+ * No locking is done, up to the caller.
+ *
+ */
+#ifndef _LINUX_PLIST_H_
+#define _LINUX_PLIST_H_
+
+#include <linux/list.h>
+#include <linux/spinlock_types.h>
+
+struct plist_head {
+ struct list_head prio_list;
+ struct list_head node_list;
+#ifdef CONFIG_DEBUG_PI_LIST
+ spinlock_t *lock;
+#endif
+};
+
+struct plist_node {
+ int prio;
+ struct plist_head plist;
+};
+
+#ifdef CONFIG_DEBUG_PI_LIST
+# define PLIST_HEAD_LOCK_INIT(_lock) .lock = _lock
+#else
+# define PLIST_HEAD_LOCK_INIT(_lock)
+#endif
+
+/**
+ * #PLIST_HEAD_INIT - static struct plist_head initializer
+ *
+ * @head: struct plist_head variable name
+ */
+#define PLIST_HEAD_INIT(head, _lock) \
+{ \
+ .prio_list = LIST_HEAD_INIT((head).prio_list), \
+ .node_list = LIST_HEAD_INIT((head).node_list), \
+ PLIST_HEAD_LOCK_INIT(&(_lock)) \
+}
+
+/**
+ * #PLIST_NODE_INIT - static struct plist_node initializer
+ *
+ * @node: struct plist_node variable name
+ * @__prio: initial node priority
+ */
+#define PLIST_NODE_INIT(node, __prio) \
+{ \
+ .prio = (__prio), \
+ .plist = PLIST_HEAD_INIT((node).plist, NULL), \
+}
+
+/**
+ * plist_head_init - dynamic struct plist_head initializer
+ *
+ * @head: &struct plist_head pointer
+ */
+static inline void
+plist_head_init(struct plist_head *head, spinlock_t *lock)
+{
+ INIT_LIST_HEAD(&head->prio_list);
+ INIT_LIST_HEAD(&head->node_list);
+#ifdef CONFIG_DEBUG_PI_LIST
+ head->lock = lock;
+#endif
+}
+
+/**
+ * plist_node_init - Dynamic struct plist_node initializer
+ *
+ * @node: &struct plist_node pointer
+ * @prio: initial node priority
+ */
+static inline void plist_node_init(struct plist_node *node, int prio)
+{
+ node->prio = prio;
+ plist_head_init(&node->plist, NULL);
+}
+
+extern void plist_add(struct plist_node *node, struct plist_head *head);
+extern void plist_del(struct plist_node *node, struct plist_head *head);
+
+/**
+ * plist_for_each - iterate over the plist
+ *
+ * @pos1: the type * to use as a loop counter.
+ * @head: the head for your list.
+ */
+#define plist_for_each(pos, head) \
+ list_for_each_entry(pos, &(head)->node_list, plist.node_list)
+
+/**
+ * plist_for_each_entry_safe - iterate over a plist of given type safe
+ * against removal of list entry
+ *
+ * @pos1: the type * to use as a loop counter.
+ * @n1: another type * to use as temporary storage
+ * @head: the head for your list.
+ */
+#define plist_for_each_safe(pos, n, head) \
+ list_for_each_entry_safe(pos, n, &(head)->node_list, plist.node_list)
+
+/**
+ * plist_for_each_entry - iterate over list of given type
+ *
+ * @pos: the type * to use as a loop counter.
+ * @head: the head for your list.
+ * @member: the name of the list_struct within the struct.
+ */
+#define plist_for_each_entry(pos, head, mem) \
+ list_for_each_entry(pos, &(head)->node_list, mem.plist.node_list)
+
+/**
+ * plist_for_each_entry_safe - iterate over list of given type safe against
+ * removal of list entry
+ *
+ * @pos: the type * to use as a loop counter.
+ * @n: another type * to use as temporary storage
+ * @head: the head for your list.
+ * @m: the name of the list_struct within the struct.
+ */
+#define plist_for_each_entry_safe(pos, n, head, m) \
+ list_for_each_entry_safe(pos, n, &(head)->node_list, m.plist.node_list)
+
+/**
+ * plist_head_empty - return !0 if a plist_head is empty
+ *
+ * @head: &struct plist_head pointer
+ */
+static inline int plist_head_empty(const struct plist_head *head)
+{
+ return list_empty(&head->node_list);
+}
+
+/**
+ * plist_node_empty - return !0 if plist_node is not on a list
+ *
+ * @node: &struct plist_node pointer
+ */
+static inline int plist_node_empty(const struct plist_node *node)
+{
+ return plist_head_empty(&node->plist);
+}
+
+/* All functions below assume the plist_head is not empty. */
+
+/**
+ * plist_first_entry - get the struct for the first entry
+ *
+ * @ptr: the &struct plist_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ */
+#ifdef CONFIG_DEBUG_PI_LIST
+# define plist_first_entry(head, type, member) \
+({ \
+ WARN_ON(plist_head_empty(head)); \
+ container_of(plist_first(head), type, member); \
+})
+#else
+# define plist_first_entry(head, type, member) \
+ container_of(plist_first(head), type, member)
+#endif
+
+/**
+ * plist_first - return the first node (and thus, highest priority)
+ *
+ * @head: the &struct plist_head pointer
+ *
+ * Assumes the plist is _not_ empty.
+ */
+static inline struct plist_node* plist_first(const struct plist_head *head)
+{
+ return list_entry(head->node_list.next,
+ struct plist_node, plist.node_list);
+}
+
+#endif
diff --git a/include/linux/poison.h b/include/linux/poison.h
new file mode 100644
index 00000000000000..a5347c02432e99
--- /dev/null
+++ b/include/linux/poison.h
@@ -0,0 +1,58 @@
+#ifndef _LINUX_POISON_H
+#define _LINUX_POISON_H
+
+/********** include/linux/list.h **********/
+/*
+ * These are non-NULL pointers that will result in page faults
+ * under normal circumstances, used to verify that nobody uses
+ * non-initialized list entries.
+ */
+#define LIST_POISON1 ((void *) 0x00100100)
+#define LIST_POISON2 ((void *) 0x00200200)
+
+/********** mm/slab.c **********/
+/*
+ * Magic nums for obj red zoning.
+ * Placed in the first word before and the first word after an obj.
+ */
+#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
+#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
+
+/* ...and for poisoning */
+#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
+#define POISON_FREE 0x6b /* for use-after-free poisoning */
+#define POISON_END 0xa5 /* end-byte of poisoning */
+
+/********** arch/$ARCH/mm/init.c **********/
+#define POISON_FREE_INITMEM 0xcc
+
+/********** arch/x86_64/mm/init.c **********/
+#define POISON_FREE_INITDATA 0xba
+
+/********** arch/ia64/hp/common/sba_iommu.c **********/
+/*
+ * arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
+ * value of "SBAIOMMU POISON\0" for spill-over poisoning.
+ */
+
+/********** fs/jbd/journal.c **********/
+#define JBD_POISON_FREE 0x5b
+
+/********** drivers/base/dmapool.c **********/
+#define POOL_POISON_FREED 0xa7 /* !inuse */
+#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
+
+/********** drivers/atm/ **********/
+#define ATM_POISON_FREE 0x12
+
+/********** kernel/mutexes **********/
+#define MUTEX_DEBUG_INIT 0x11
+#define MUTEX_DEBUG_FREE 0x22
+
+/********** security/ **********/
+#define KEY_DESTROY 0xbd
+
+/********** sound/oss/ **********/
+#define OSS_POISON_FREE 0xAB
+
+#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 6312758393b61a..48dfe00070c70b 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -258,6 +258,7 @@ extern void rcu_init(void);
extern void rcu_check_callbacks(int cpu, int user);
extern void rcu_restart_cpu(int cpu);
extern long rcu_batches_completed(void);
+extern long rcu_batches_completed_bh(void);
/* Exported interfaces */
extern void FASTCALL(call_rcu(struct rcu_head *head,
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
new file mode 100644
index 00000000000000..fa4a3b82ba704a
--- /dev/null
+++ b/include/linux/rtmutex.h
@@ -0,0 +1,117 @@
+/*
+ * RT Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains the public data structure and API definitions.
+ */
+
+#ifndef __LINUX_RT_MUTEX_H
+#define __LINUX_RT_MUTEX_H
+
+#include <linux/linkage.h>
+#include <linux/plist.h>
+#include <linux/spinlock_types.h>
+
+/*
+ * The rt_mutex structure
+ *
+ * @wait_lock: spinlock to protect the structure
+ * @wait_list: pilist head to enqueue waiters in priority order
+ * @owner: the mutex owner
+ */
+struct rt_mutex {
+ spinlock_t wait_lock;
+ struct plist_head wait_list;
+ struct task_struct *owner;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ int save_state;
+ struct list_head held_list_entry;
+ unsigned long acquire_ip;
+ const char *name, *file;
+ int line;
+ void *magic;
+#endif
+};
+
+struct rt_mutex_waiter;
+struct hrtimer_sleeper;
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ extern int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len);
+ extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
+#else
+ static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len)
+ {
+ return 0;
+ }
+# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ , .name = #mutexname, .file = __FILE__, .line = __LINE__
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__)
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+#else
+# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL)
+# define rt_mutex_debug_task_free(t) do { } while (0)
+#endif
+
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+ { .wait_lock = SPIN_LOCK_UNLOCKED \
+ , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list, mutexname.wait_lock) \
+ , .owner = NULL \
+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
+
+#define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+
+/***
+ * rt_mutex_is_locked - is the mutex locked
+ * @lock: the mutex to be queried
+ *
+ * Returns 1 if the mutex is locked, 0 if unlocked.
+ */
+static inline int rt_mutex_is_locked(struct rt_mutex *lock)
+{
+ return lock->owner != NULL;
+}
+
+extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void rt_mutex_destroy(struct rt_mutex *lock);
+
+extern void rt_mutex_lock(struct rt_mutex *lock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ int detect_deadlock);
+extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock);
+
+extern int rt_mutex_trylock(struct rt_mutex *lock);
+
+extern void rt_mutex_unlock(struct rt_mutex *lock);
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# define INIT_RT_MUTEX_DEBUG(tsk) \
+ .held_list_head = LIST_HEAD_INIT(tsk.held_list_head), \
+ .held_list_lock = SPIN_LOCK_UNLOCKED
+#else
+# define INIT_RT_MUTEX_DEBUG(tsk)
+#endif
+
+#ifdef CONFIG_RT_MUTEXES
+# define INIT_RT_MUTEXES(tsk) \
+ .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters, tsk.pi_lock), \
+ INIT_RT_MUTEX_DEBUG(tsk)
+#else
+# define INIT_RT_MUTEXES(tsk)
+#endif
+
+#endif
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 122a25c1b997e4..821f0481ebe190 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -73,6 +73,7 @@ struct sched_param {
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/futex.h>
+#include <linux/rtmutex.h>
#include <linux/time.h>
#include <linux/param.h>
@@ -83,6 +84,7 @@ struct sched_param {
#include <asm/processor.h>
struct exec_domain;
+struct futex_pi_state;
/*
* List of flags we want to share for kernel threads,
@@ -123,6 +125,7 @@ extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
+extern unsigned long weighted_cpuload(const int cpu);
/*
@@ -494,8 +497,11 @@ struct signal_struct {
#define MAX_PRIO (MAX_RT_PRIO + 40)
-#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO))
+#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO)
+#define rt_task(p) rt_prio((p)->prio)
#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
+#define has_rt_policy(p) \
+ unlikely((p)->policy != SCHED_NORMAL && (p)->policy != SCHED_BATCH)
/*
* Some day this will be a full-fledged user tracking system..
@@ -558,9 +564,9 @@ enum idle_type
/*
* sched-domains (multiprocessor balancing) declarations:
*/
-#ifdef CONFIG_SMP
#define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
+#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
#define SD_BALANCE_EXEC 4 /* Balance on exec */
@@ -569,6 +575,11 @@ enum idle_type
#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
+#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
+
+#define BALANCE_FOR_POWER ((sched_mc_power_savings || sched_smt_power_savings) \
+ ? SD_POWERSAVINGS_BALANCE : 0)
+
struct sched_group {
struct sched_group *next; /* Must be a circular list */
@@ -638,7 +649,7 @@ struct sched_domain {
#endif
};
-extern void partition_sched_domains(cpumask_t *partition1,
+extern int partition_sched_domains(cpumask_t *partition1,
cpumask_t *partition2);
/*
@@ -713,10 +724,13 @@ struct task_struct {
int lock_depth; /* BKL lock depth */
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#ifdef CONFIG_SMP
+#ifdef __ARCH_WANT_UNLOCKED_CTXSW
int oncpu;
#endif
- int prio, static_prio;
+#endif
+ int load_weight; /* for niceness load balancing purposes */
+ int prio, static_prio, normal_prio;
struct list_head run_list;
prio_array_t *array;
@@ -843,6 +857,20 @@ struct task_struct {
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
spinlock_t alloc_lock;
+ /* Protection of the PI data structures: */
+ spinlock_t pi_lock;
+
+#ifdef CONFIG_RT_MUTEXES
+ /* PI waiters blocked on a rt_mutex held by this task */
+ struct plist_head pi_waiters;
+ /* Deadlock detection and priority inheritance handling */
+ struct rt_mutex_waiter *pi_blocked_on;
+# ifdef CONFIG_DEBUG_RT_MUTEXES
+ spinlock_t held_list_lock;
+ struct list_head held_list_head;
+# endif
+#endif
+
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
@@ -888,6 +916,8 @@ struct task_struct {
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
+ struct list_head pi_state_list;
+ struct futex_pi_state *pi_state_cache;
atomic_t fs_excl; /* holding fs exclusive resources */
struct rcu_head rcu;
@@ -955,6 +985,7 @@ static inline void put_task_struct(struct task_struct *t)
#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
+#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1009,6 +1040,19 @@ static inline void idle_task_exit(void) {}
#endif
extern void sched_idle_next(void);
+
+#ifdef CONFIG_RT_MUTEXES
+extern int rt_mutex_getprio(task_t *p);
+extern void rt_mutex_setprio(task_t *p, int prio);
+extern void rt_mutex_adjust_pi(task_t *p);
+#else
+static inline int rt_mutex_getprio(task_t *p)
+{
+ return p->normal_prio;
+}
+# define rt_mutex_adjust_pi(p) do { } while (0)
+#endif
+
extern void set_user_nice(task_t *p, long nice);
extern int task_prio(const task_t *p);
extern int task_nice(const task_t *p);
@@ -1408,6 +1452,11 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
extern long sched_setaffinity(pid_t pid, cpumask_t new_mask);
extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
+#include <linux/sysdev.h>
+extern int sched_mc_power_savings, sched_smt_power_savings;
+extern struct sysdev_attribute attr_sched_mc_power_savings, attr_sched_smt_power_savings;
+extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
+
extern void normalize_rt_tasks(void);
#ifdef CONFIG_PM
diff --git a/include/linux/scx200.h b/include/linux/scx200.h
index a22f9e173ad2d3..693c0557e70bdf 100644
--- a/include/linux/scx200.h
+++ b/include/linux/scx200.h
@@ -49,10 +49,3 @@ extern unsigned scx200_cb_base;
#define SCx200_REV 0x3d /* Revision Register */
#define SCx200_CBA 0x3e /* Configuration Base Address Register */
#define SCx200_CBA_SCRATCH 0x64 /* Configuration Base Address Scratchpad */
-
-/*
- Local variables:
- compile-command: "make -C ../.. bzImage modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/include/linux/scx200_gpio.h b/include/linux/scx200_gpio.h
index 30cdd648ba7935..90dd069cc145c4 100644
--- a/include/linux/scx200_gpio.h
+++ b/include/linux/scx200_gpio.h
@@ -1,6 +1,6 @@
#include <linux/spinlock.h>
-u32 scx200_gpio_configure(int index, u32 set, u32 clear);
+u32 scx200_gpio_configure(unsigned index, u32 set, u32 clear);
extern unsigned scx200_gpio_base;
extern long scx200_gpio_shadow[2];
@@ -17,7 +17,7 @@ extern long scx200_gpio_shadow[2];
/* returns the value of the GPIO pin */
-static inline int scx200_gpio_get(int index) {
+static inline int scx200_gpio_get(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR + 0x04;
__SCx200_GPIO_INDEX;
@@ -29,7 +29,7 @@ static inline int scx200_gpio_get(int index) {
driven if the GPIO is configured as an output, it might not be the
state of the GPIO right now if the GPIO is configured as an input) */
-static inline int scx200_gpio_current(int index) {
+static inline int scx200_gpio_current(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_INDEX;
@@ -38,7 +38,7 @@ static inline int scx200_gpio_current(int index) {
/* drive the GPIO signal high */
-static inline void scx200_gpio_set_high(int index) {
+static inline void scx200_gpio_set_high(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR;
__SCx200_GPIO_SHADOW;
@@ -49,7 +49,7 @@ static inline void scx200_gpio_set_high(int index) {
/* drive the GPIO signal low */
-static inline void scx200_gpio_set_low(int index) {
+static inline void scx200_gpio_set_low(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR;
__SCx200_GPIO_SHADOW;
@@ -60,7 +60,7 @@ static inline void scx200_gpio_set_low(int index) {
/* drive the GPIO signal to state */
-static inline void scx200_gpio_set(int index, int state) {
+static inline void scx200_gpio_set(unsigned index, int state) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR;
__SCx200_GPIO_SHADOW;
@@ -73,7 +73,7 @@ static inline void scx200_gpio_set(int index, int state) {
}
/* toggle the GPIO signal */
-static inline void scx200_gpio_change(int index) {
+static inline void scx200_gpio_change(unsigned index) {
__SCx200_GPIO_BANK;
__SCx200_GPIO_IOADDR;
__SCx200_GPIO_SHADOW;
@@ -87,10 +87,3 @@ static inline void scx200_gpio_change(int index) {
#undef __SCx200_GPIO_SHADOW
#undef __SCx200_GPIO_INDEX
#undef __SCx200_GPIO_OUT
-
-/*
- Local variables:
- compile-command: "make -C ../.. bzImage modules"
- c-basic-offset: 8
- End:
-*/
diff --git a/include/linux/swap.h b/include/linux/swap.h
index dc3f3aa0c83e89..c41e2d6d1acc3d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -199,6 +199,8 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
}
#endif
+extern int kswapd_run(int nid);
+
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */
extern int shmem_unuse(swp_entry_t entry, struct page *page);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 33785b79d548a8..008f04c5673715 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -174,9 +174,9 @@ asmlinkage long sys_waitid(int which, pid_t pid,
int options, struct rusage __user *ru);
asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options);
asmlinkage long sys_set_tid_address(int __user *tidptr);
-asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
+asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
struct timespec __user *utime, u32 __user *uaddr2,
- int val3);
+ u32 val3);
asmlinkage long sys_init_module(void __user *umod, unsigned long len,
const char __user *uargs);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 349ef908a2222d..46e4d8f2771f9f 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -149,6 +149,7 @@ enum
KERN_ACPI_VIDEO_FLAGS=71, /* int: flags for setting up video after ACPI sleep */
KERN_IA64_UNALIGNED=72, /* int: ia64 unaligned userland trap enable */
KERN_COMPAT_LOG=73, /* int: print compat layer messages */
+ KERN_MAX_LOCK_DEPTH=74,
};
@@ -189,6 +190,7 @@ enum
VM_ZONE_RECLAIM_MODE=31, /* reclaim local zone memory before going off node */
VM_ZONE_RECLAIM_INTERVAL=32, /* time period to wait after reclaim failure */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
+ VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
};
diff --git a/include/linux/topology.h b/include/linux/topology.h
index a305ae2e44b6dd..ec1eca85290ade 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -134,7 +134,8 @@
.flags = SD_LOAD_BALANCE \
| SD_BALANCE_NEWIDLE \
| SD_BALANCE_EXEC \
- | SD_WAKE_AFFINE, \
+ | SD_WAKE_AFFINE \
+ | BALANCE_FOR_POWER, \
.last_balance = jiffies, \
.balance_interval = 1, \
.nr_balance_failed = 0, \
diff --git a/include/media/cx2341x.h b/include/media/cx2341x.h
index 074c4008ad5296..d91d88f93c8b34 100644
--- a/include/media/cx2341x.h
+++ b/include/media/cx2341x.h
@@ -89,9 +89,9 @@ int cx2341x_ctrl_query(struct cx2341x_mpeg_params *params,
struct v4l2_queryctrl *qctrl);
const char **cx2341x_ctrl_get_menu(u32 id);
int cx2341x_ext_ctrls(struct cx2341x_mpeg_params *params,
- struct v4l2_ext_controls *ctrls, int cmd);
+ struct v4l2_ext_controls *ctrls, unsigned int cmd);
void cx2341x_fill_defaults(struct cx2341x_mpeg_params *p);
-void cx2341x_log_status(struct cx2341x_mpeg_params *p, int cardid);
+void cx2341x_log_status(struct cx2341x_mpeg_params *p, const char *prefix);
/* Firmware names */
#define CX2341X_FIRM_ENC_FILENAME "v4l-cx2341x-enc.fw"
diff --git a/init/Kconfig b/init/Kconfig
index df55b36656010b..f70f2fd273c215 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -339,9 +339,14 @@ config BASE_FULL
kernel data structures. This saves memory on small machines,
but may reduce performance.
+config RT_MUTEXES
+ boolean
+ select PLIST
+
config FUTEX
bool "Enable futex support" if EMBEDDED
default y
+ select RT_MUTEXES
help
Disabling this option will cause the kernel to be built without
support for "fast userspace mutexes". The resulting kernel may not
diff --git a/init/main.c b/init/main.c
index 80af1a52485fcf..0d57f6ccb63a1a 100644
--- a/init/main.c
+++ b/init/main.c
@@ -48,6 +48,7 @@
#include <linux/mempolicy.h>
#include <linux/key.h>
#include <linux/unwind.h>
+#include <linux/buffer_head.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -80,7 +81,6 @@ extern void mca_init(void);
extern void sbus_init(void);
extern void sysctl_init(void);
extern void signals_init(void);
-extern void buffer_init(void);
extern void pidhash_init(void);
extern void pidmap_init(void);
extern void prio_tree_init(void);
diff --git a/kernel/Makefile b/kernel/Makefile
index 752bd7d383af34..82fb182f6f6184 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -16,6 +16,9 @@ obj-$(CONFIG_FUTEX) += futex.o
ifeq ($(CONFIG_COMPAT),y)
obj-$(CONFIG_FUTEX) += futex_compat.o
endif
+obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
+obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
obj-$(CONFIG_SMP) += cpu.o spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
diff --git a/kernel/acct.c b/kernel/acct.c
index 368c4f03fe0e9f..126ca43d5d2ba9 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -521,6 +521,7 @@ static void do_acct_process(struct file *file)
/**
* acct_init_pacct - initialize a new pacct_struct
+ * @pacct: per-process accounting info struct to initialize
*/
void acct_init_pacct(struct pacct_struct *pacct)
{
@@ -576,7 +577,7 @@ void acct_collect(long exitcode, int group_dead)
*
* handles process accounting for an exiting task
*/
-void acct_process()
+void acct_process(void)
{
struct file *file = NULL;
diff --git a/kernel/audit.c b/kernel/audit.c
index 7dfac7031bd734..82443fb433efcb 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -818,7 +818,7 @@ err:
*/
unsigned int audit_serial(void)
{
- static spinlock_t serial_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(serial_lock);
static unsigned int serial = 0;
unsigned long flags;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 9ebd96fda29588..dc5e3f01efe747 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -658,8 +658,7 @@ static void audit_log_task_context(struct audit_buffer *ab)
return;
error_path:
- if (ctx)
- kfree(ctx);
+ kfree(ctx);
audit_panic("error in audit_log_task_context");
return;
}
@@ -1367,7 +1366,7 @@ int __audit_mq_open(int oflag, mode_t mode, struct mq_attr __user *u_attr)
* @mqdes: MQ descriptor
* @msg_len: Message length
* @msg_prio: Message priority
- * @abs_timeout: Message timeout in absolute time
+ * @u_abs_timeout: Message timeout in absolute time
*
* Returns 0 for success or NULL context or < 0 on error.
*/
@@ -1409,8 +1408,8 @@ int __audit_mq_timedsend(mqd_t mqdes, size_t msg_len, unsigned int msg_prio,
* __audit_mq_timedreceive - record audit data for a POSIX MQ timed receive
* @mqdes: MQ descriptor
* @msg_len: Message length
- * @msg_prio: Message priority
- * @abs_timeout: Message timeout in absolute time
+ * @u_msg_prio: Message priority
+ * @u_abs_timeout: Message timeout in absolute time
*
* Returns 0 for success or NULL context or < 0 on error.
*/
@@ -1558,7 +1557,6 @@ int __audit_ipc_obj(struct kern_ipc_perm *ipcp)
* @uid: msgq user id
* @gid: msgq group id
* @mode: msgq mode (permissions)
- * @ipcp: in-kernel IPC permissions
*
* Returns 0 for success or NULL context or < 0 on error.
*/
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 03dcd981846a6b..70fbf2e83766ab 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -18,7 +18,7 @@
/* This protects CPUs going up and down... */
static DEFINE_MUTEX(cpucontrol);
-static BLOCKING_NOTIFIER_HEAD(cpu_chain);
+static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain);
#ifdef CONFIG_HOTPLUG_CPU
static struct task_struct *lock_cpu_hotplug_owner;
@@ -69,10 +69,13 @@ EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
-int register_cpu_notifier(struct notifier_block *nb)
+int __cpuinit register_cpu_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&cpu_chain, nb);
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
@@ -81,7 +84,6 @@ void unregister_cpu_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_cpu_notifier);
-#ifdef CONFIG_HOTPLUG_CPU
static inline void check_for_tasks(int cpu)
{
struct task_struct *p;
diff --git a/kernel/exit.c b/kernel/exit.c
index 304ef637be6c70..ab06b9f88f6467 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -926,9 +926,18 @@ fastcall NORET_TYPE void do_exit(long code)
tsk->mempolicy = NULL;
#endif
/*
+ * This must happen late, after the PID is not
+ * hashed anymore:
+ */
+ if (unlikely(!list_empty(&tsk->pi_state_list)))
+ exit_pi_state_list(tsk);
+ if (unlikely(current->pi_state_cache))
+ kfree(current->pi_state_cache);
+ /*
* If DEBUG_MUTEXES is on, make sure we are holding no locks:
*/
mutex_debug_check_no_locks_held(tsk);
+ rt_mutex_debug_check_no_locks_held(tsk);
if (tsk->io_context)
exit_io_context();
diff --git a/kernel/fork.c b/kernel/fork.c
index 9b4e54ef0225e2..628198a4f28a72 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -104,6 +104,7 @@ static kmem_cache_t *mm_cachep;
void free_task(struct task_struct *tsk)
{
free_thread_info(tsk->thread_info);
+ rt_mutex_debug_task_free(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
@@ -913,6 +914,19 @@ asmlinkage long sys_set_tid_address(int __user *tidptr)
return current->pid;
}
+static inline void rt_mutex_init_task(struct task_struct *p)
+{
+#ifdef CONFIG_RT_MUTEXES
+ spin_lock_init(&p->pi_lock);
+ plist_head_init(&p->pi_waiters, &p->pi_lock);
+ p->pi_blocked_on = NULL;
+# ifdef CONFIG_DEBUG_RT_MUTEXES
+ spin_lock_init(&p->held_list_lock);
+ INIT_LIST_HEAD(&p->held_list_head);
+# endif
+#endif
+}
+
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
@@ -1034,6 +1048,8 @@ static task_t *copy_process(unsigned long clone_flags,
mpol_fix_fork_child_flag(p);
#endif
+ rt_mutex_init_task(p);
+
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
@@ -1076,6 +1092,9 @@ static task_t *copy_process(unsigned long clone_flags,
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
+ INIT_LIST_HEAD(&p->pi_state_list);
+ p->pi_state_cache = NULL;
+
/*
* sigaltstack should be cleared when sharing the same VM
*/
diff --git a/kernel/futex.c b/kernel/futex.c
index e1a380c77a5a2a..6c91f938005db0 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -12,6 +12,10 @@
* (C) Copyright 2006 Red Hat Inc, All Rights Reserved
* Thanks to Thomas Gleixner for suggestions, analysis and fixes.
*
+ * PI-futex support started by Ingo Molnar and Thomas Gleixner
+ * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
* Thanks to Ben LaHaise for yelling "hashed waitqueues" loudly
* enough at me, Linus for the original (flawed) idea, Matthew
* Kirkwood for proof-of-concept implementation.
@@ -46,6 +50,8 @@
#include <linux/signal.h>
#include <asm/futex.h>
+#include "rtmutex_common.h"
+
#define FUTEX_HASHBITS (CONFIG_BASE_SMALL ? 4 : 8)
/*
@@ -63,7 +69,7 @@ union futex_key {
int offset;
} shared;
struct {
- unsigned long uaddr;
+ unsigned long address;
struct mm_struct *mm;
int offset;
} private;
@@ -75,6 +81,27 @@ union futex_key {
};
/*
+ * Priority Inheritance state:
+ */
+struct futex_pi_state {
+ /*
+ * list of 'owned' pi_state instances - these have to be
+ * cleaned up in do_exit() if the task exits prematurely:
+ */
+ struct list_head list;
+
+ /*
+ * The PI object:
+ */
+ struct rt_mutex pi_mutex;
+
+ struct task_struct *owner;
+ atomic_t refcount;
+
+ union futex_key key;
+};
+
+/*
* We use this hashed waitqueue instead of a normal wait_queue_t, so
* we can wake only the relevant ones (hashed queues may be shared).
*
@@ -87,15 +114,19 @@ struct futex_q {
struct list_head list;
wait_queue_head_t waiters;
- /* Which hash list lock to use. */
+ /* Which hash list lock to use: */
spinlock_t *lock_ptr;
- /* Key which the futex is hashed on. */
+ /* Key which the futex is hashed on: */
union futex_key key;
- /* For fd, sigio sent using these. */
+ /* For fd, sigio sent using these: */
int fd;
struct file *filp;
+
+ /* Optional priority inheritance state: */
+ struct futex_pi_state *pi_state;
+ struct task_struct *task;
};
/*
@@ -144,8 +175,9 @@ static inline int match_futex(union futex_key *key1, union futex_key *key2)
*
* Should be called with &current->mm->mmap_sem but NOT any spinlocks.
*/
-static int get_futex_key(unsigned long uaddr, union futex_key *key)
+static int get_futex_key(u32 __user *uaddr, union futex_key *key)
{
+ unsigned long address = (unsigned long)uaddr;
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct page *page;
@@ -154,16 +186,16 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
/*
* The futex address must be "naturally" aligned.
*/
- key->both.offset = uaddr % PAGE_SIZE;
+ key->both.offset = address % PAGE_SIZE;
if (unlikely((key->both.offset % sizeof(u32)) != 0))
return -EINVAL;
- uaddr -= key->both.offset;
+ address -= key->both.offset;
/*
* The futex is hashed differently depending on whether
* it's in a shared or private mapping. So check vma first.
*/
- vma = find_extend_vma(mm, uaddr);
+ vma = find_extend_vma(mm, address);
if (unlikely(!vma))
return -EFAULT;
@@ -184,7 +216,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
*/
if (likely(!(vma->vm_flags & VM_MAYSHARE))) {
key->private.mm = mm;
- key->private.uaddr = uaddr;
+ key->private.address = address;
return 0;
}
@@ -194,7 +226,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
key->shared.inode = vma->vm_file->f_dentry->d_inode;
key->both.offset++; /* Bit 0 of offset indicates inode-based key. */
if (likely(!(vma->vm_flags & VM_NONLINEAR))) {
- key->shared.pgoff = (((uaddr - vma->vm_start) >> PAGE_SHIFT)
+ key->shared.pgoff = (((address - vma->vm_start) >> PAGE_SHIFT)
+ vma->vm_pgoff);
return 0;
}
@@ -205,7 +237,7 @@ static int get_futex_key(unsigned long uaddr, union futex_key *key)
* from swap. But that's a lot of code to duplicate here
* for a rare case, so we simply fetch the page.
*/
- err = get_user_pages(current, mm, uaddr, 1, 0, 0, &page, NULL);
+ err = get_user_pages(current, mm, address, 1, 0, 0, &page, NULL);
if (err >= 0) {
key->shared.pgoff =
page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
@@ -246,18 +278,244 @@ static void drop_key_refs(union futex_key *key)
}
}
-static inline int get_futex_value_locked(int *dest, int __user *from)
+static inline int get_futex_value_locked(u32 *dest, u32 __user *from)
{
int ret;
inc_preempt_count();
- ret = __copy_from_user_inatomic(dest, from, sizeof(int));
+ ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
dec_preempt_count();
return ret ? -EFAULT : 0;
}
/*
+ * Fault handling. Called with current->mm->mmap_sem held.
+ */
+static int futex_handle_fault(unsigned long address, int attempt)
+{
+ struct vm_area_struct * vma;
+ struct mm_struct *mm = current->mm;
+
+ if (attempt >= 2 || !(vma = find_vma(mm, address)) ||
+ vma->vm_start > address || !(vma->vm_flags & VM_WRITE))
+ return -EFAULT;
+
+ switch (handle_mm_fault(mm, vma, address, 1)) {
+ case VM_FAULT_MINOR:
+ current->min_flt++;
+ break;
+ case VM_FAULT_MAJOR:
+ current->maj_flt++;
+ break;
+ default:
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/*
+ * PI code:
+ */
+static int refill_pi_state_cache(void)
+{
+ struct futex_pi_state *pi_state;
+
+ if (likely(current->pi_state_cache))
+ return 0;
+
+ pi_state = kmalloc(sizeof(*pi_state), GFP_KERNEL);
+
+ if (!pi_state)
+ return -ENOMEM;
+
+ memset(pi_state, 0, sizeof(*pi_state));
+ INIT_LIST_HEAD(&pi_state->list);
+ /* pi_mutex gets initialized later */
+ pi_state->owner = NULL;
+ atomic_set(&pi_state->refcount, 1);
+
+ current->pi_state_cache = pi_state;
+
+ return 0;
+}
+
+static struct futex_pi_state * alloc_pi_state(void)
+{
+ struct futex_pi_state *pi_state = current->pi_state_cache;
+
+ WARN_ON(!pi_state);
+ current->pi_state_cache = NULL;
+
+ return pi_state;
+}
+
+static void free_pi_state(struct futex_pi_state *pi_state)
+{
+ if (!atomic_dec_and_test(&pi_state->refcount))
+ return;
+
+ /*
+ * If pi_state->owner is NULL, the owner is most probably dying
+ * and has cleaned up the pi_state already
+ */
+ if (pi_state->owner) {
+ spin_lock_irq(&pi_state->owner->pi_lock);
+ list_del_init(&pi_state->list);
+ spin_unlock_irq(&pi_state->owner->pi_lock);
+
+ rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
+ }
+
+ if (current->pi_state_cache)
+ kfree(pi_state);
+ else {
+ /*
+ * pi_state->list is already empty.
+ * clear pi_state->owner.
+ * refcount is at 0 - put it back to 1.
+ */
+ pi_state->owner = NULL;
+ atomic_set(&pi_state->refcount, 1);
+ current->pi_state_cache = pi_state;
+ }
+}
+
+/*
+ * Look up the task based on what TID userspace gave us.
+ * We dont trust it.
+ */
+static struct task_struct * futex_find_get_task(pid_t pid)
+{
+ struct task_struct *p;
+
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+ if ((current->euid != p->euid) && (current->euid != p->uid)) {
+ p = NULL;
+ goto out_unlock;
+ }
+ if (p->state == EXIT_ZOMBIE || p->exit_state == EXIT_ZOMBIE) {
+ p = NULL;
+ goto out_unlock;
+ }
+ get_task_struct(p);
+out_unlock:
+ read_unlock(&tasklist_lock);
+
+ return p;
+}
+
+/*
+ * This task is holding PI mutexes at exit time => bad.
+ * Kernel cleans up PI-state, but userspace is likely hosed.
+ * (Robust-futex cleanup is separate and might save the day for userspace.)
+ */
+void exit_pi_state_list(struct task_struct *curr)
+{
+ struct futex_hash_bucket *hb;
+ struct list_head *next, *head = &curr->pi_state_list;
+ struct futex_pi_state *pi_state;
+ union futex_key key;
+
+ /*
+ * We are a ZOMBIE and nobody can enqueue itself on
+ * pi_state_list anymore, but we have to be careful
+ * versus waiters unqueueing themselfs
+ */
+ spin_lock_irq(&curr->pi_lock);
+ while (!list_empty(head)) {
+
+ next = head->next;
+ pi_state = list_entry(next, struct futex_pi_state, list);
+ key = pi_state->key;
+ spin_unlock_irq(&curr->pi_lock);
+
+ hb = hash_futex(&key);
+ spin_lock(&hb->lock);
+
+ spin_lock_irq(&curr->pi_lock);
+ if (head->next != next) {
+ spin_unlock(&hb->lock);
+ continue;
+ }
+
+ list_del_init(&pi_state->list);
+
+ WARN_ON(pi_state->owner != curr);
+
+ pi_state->owner = NULL;
+ spin_unlock_irq(&curr->pi_lock);
+
+ rt_mutex_unlock(&pi_state->pi_mutex);
+
+ spin_unlock(&hb->lock);
+
+ spin_lock_irq(&curr->pi_lock);
+ }
+ spin_unlock_irq(&curr->pi_lock);
+}
+
+static int
+lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, struct futex_q *me)
+{
+ struct futex_pi_state *pi_state = NULL;
+ struct futex_q *this, *next;
+ struct list_head *head;
+ struct task_struct *p;
+ pid_t pid;
+
+ head = &hb->chain;
+
+ list_for_each_entry_safe(this, next, head, list) {
+ if (match_futex (&this->key, &me->key)) {
+ /*
+ * Another waiter already exists - bump up
+ * the refcount and return its pi_state:
+ */
+ pi_state = this->pi_state;
+ atomic_inc(&pi_state->refcount);
+ me->pi_state = pi_state;
+
+ return 0;
+ }
+ }
+
+ /*
+ * We are the first waiter - try to look up the real owner and
+ * attach the new pi_state to it:
+ */
+ pid = uval & FUTEX_TID_MASK;
+ p = futex_find_get_task(pid);
+ if (!p)
+ return -ESRCH;
+
+ pi_state = alloc_pi_state();
+
+ /*
+ * Initialize the pi_mutex in locked state and make 'p'
+ * the owner of it:
+ */
+ rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
+
+ /* Store the key for possible exit cleanups: */
+ pi_state->key = me->key;
+
+ spin_lock_irq(&p->pi_lock);
+ list_add(&pi_state->list, &p->pi_state_list);
+ pi_state->owner = p;
+ spin_unlock_irq(&p->pi_lock);
+
+ put_task_struct(p);
+
+ me->pi_state = pi_state;
+
+ return 0;
+}
+
+/*
* The hash bucket lock must be held when this is called.
* Afterwards, the futex_q must not be accessed.
*/
@@ -284,16 +542,80 @@ static void wake_futex(struct futex_q *q)
q->lock_ptr = NULL;
}
+static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
+{
+ struct task_struct *new_owner;
+ struct futex_pi_state *pi_state = this->pi_state;
+ u32 curval, newval;
+
+ if (!pi_state)
+ return -EINVAL;
+
+ new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
+
+ /*
+ * This happens when we have stolen the lock and the original
+ * pending owner did not enqueue itself back on the rt_mutex.
+ * Thats not a tragedy. We know that way, that a lock waiter
+ * is on the fly. We make the futex_q waiter the pending owner.
+ */
+ if (!new_owner)
+ new_owner = this->task;
+
+ /*
+ * We pass it to the next owner. (The WAITERS bit is always
+ * kept enabled while there is PI state around. We must also
+ * preserve the owner died bit.)
+ */
+ newval = (uval & FUTEX_OWNER_DIED) | FUTEX_WAITERS | new_owner->pid;
+
+ inc_preempt_count();
+ curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
+ dec_preempt_count();
+
+ if (curval == -EFAULT)
+ return -EFAULT;
+ if (curval != uval)
+ return -EINVAL;
+
+ list_del_init(&pi_state->owner->pi_state_list);
+ list_add(&pi_state->list, &new_owner->pi_state_list);
+ pi_state->owner = new_owner;
+ rt_mutex_unlock(&pi_state->pi_mutex);
+
+ return 0;
+}
+
+static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
+{
+ u32 oldval;
+
+ /*
+ * There is no waiter, so we unlock the futex. The owner died
+ * bit has not to be preserved here. We are the owner:
+ */
+ inc_preempt_count();
+ oldval = futex_atomic_cmpxchg_inatomic(uaddr, uval, 0);
+ dec_preempt_count();
+
+ if (oldval == -EFAULT)
+ return oldval;
+ if (oldval != uval)
+ return -EAGAIN;
+
+ return 0;
+}
+
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
-static int futex_wake(unsigned long uaddr, int nr_wake)
+static int futex_wake(u32 __user *uaddr, int nr_wake)
{
- union futex_key key;
- struct futex_hash_bucket *bh;
- struct list_head *head;
+ struct futex_hash_bucket *hb;
struct futex_q *this, *next;
+ struct list_head *head;
+ union futex_key key;
int ret;
down_read(&current->mm->mmap_sem);
@@ -302,19 +624,21 @@ static int futex_wake(unsigned long uaddr, int nr_wake)
if (unlikely(ret != 0))
goto out;
- bh = hash_futex(&key);
- spin_lock(&bh->lock);
- head = &bh->chain;
+ hb = hash_futex(&key);
+ spin_lock(&hb->lock);
+ head = &hb->chain;
list_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key)) {
+ if (this->pi_state)
+ return -EINVAL;
wake_futex(this);
if (++ret >= nr_wake)
break;
}
}
- spin_unlock(&bh->lock);
+ spin_unlock(&hb->lock);
out:
up_read(&current->mm->mmap_sem);
return ret;
@@ -324,10 +648,12 @@ out:
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
*/
-static int futex_wake_op(unsigned long uaddr1, unsigned long uaddr2, int nr_wake, int nr_wake2, int op)
+static int
+futex_wake_op(u32 __user *uaddr1, u32 __user *uaddr2,
+ int nr_wake, int nr_wake2, int op)
{
union futex_key key1, key2;
- struct futex_hash_bucket *bh1, *bh2;
+ struct futex_hash_bucket *hb1, *hb2;
struct list_head *head;
struct futex_q *this, *next;
int ret, op_ret, attempt = 0;
@@ -342,27 +668,29 @@ retryfull:
if (unlikely(ret != 0))
goto out;
- bh1 = hash_futex(&key1);
- bh2 = hash_futex(&key2);
+ hb1 = hash_futex(&key1);
+ hb2 = hash_futex(&key2);
retry:
- if (bh1 < bh2)
- spin_lock(&bh1->lock);
- spin_lock(&bh2->lock);
- if (bh1 > bh2)
- spin_lock(&bh1->lock);
+ if (hb1 < hb2)
+ spin_lock(&hb1->lock);
+ spin_lock(&hb2->lock);
+ if (hb1 > hb2)
+ spin_lock(&hb1->lock);
- op_ret = futex_atomic_op_inuser(op, (int __user *)uaddr2);
+ op_ret = futex_atomic_op_inuser(op, uaddr2);
if (unlikely(op_ret < 0)) {
- int dummy;
+ u32 dummy;
- spin_unlock(&bh1->lock);
- if (bh1 != bh2)
- spin_unlock(&bh2->lock);
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
#ifndef CONFIG_MMU
- /* we don't get EFAULT from MMU faults if we don't have an MMU,
- * but we might get them from range checking */
+ /*
+ * we don't get EFAULT from MMU faults if we don't have an MMU,
+ * but we might get them from range checking
+ */
ret = op_ret;
goto out;
#endif
@@ -372,47 +700,34 @@ retry:
goto out;
}
- /* futex_atomic_op_inuser needs to both read and write
+ /*
+ * futex_atomic_op_inuser needs to both read and write
* *(int __user *)uaddr2, but we can't modify it
* non-atomically. Therefore, if get_user below is not
* enough, we need to handle the fault ourselves, while
- * still holding the mmap_sem. */
+ * still holding the mmap_sem.
+ */
if (attempt++) {
- struct vm_area_struct * vma;
- struct mm_struct *mm = current->mm;
-
- ret = -EFAULT;
- if (attempt >= 2 ||
- !(vma = find_vma(mm, uaddr2)) ||
- vma->vm_start > uaddr2 ||
- !(vma->vm_flags & VM_WRITE))
- goto out;
-
- switch (handle_mm_fault(mm, vma, uaddr2, 1)) {
- case VM_FAULT_MINOR:
- current->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- current->maj_flt++;
- break;
- default:
+ if (futex_handle_fault((unsigned long)uaddr2,
+ attempt))
goto out;
- }
goto retry;
}
- /* If we would have faulted, release mmap_sem,
- * fault it in and start all over again. */
+ /*
+ * If we would have faulted, release mmap_sem,
+ * fault it in and start all over again.
+ */
up_read(&current->mm->mmap_sem);
- ret = get_user(dummy, (int __user *)uaddr2);
+ ret = get_user(dummy, uaddr2);
if (ret)
return ret;
goto retryfull;
}
- head = &bh1->chain;
+ head = &hb1->chain;
list_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) {
@@ -423,7 +738,7 @@ retry:
}
if (op_ret > 0) {
- head = &bh2->chain;
+ head = &hb2->chain;
op_ret = 0;
list_for_each_entry_safe(this, next, head, list) {
@@ -436,9 +751,9 @@ retry:
ret += op_ret;
}
- spin_unlock(&bh1->lock);
- if (bh1 != bh2)
- spin_unlock(&bh2->lock);
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
out:
up_read(&current->mm->mmap_sem);
return ret;
@@ -448,11 +763,11 @@ out:
* Requeue all waiters hashed on one physical page to another
* physical page.
*/
-static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
- int nr_wake, int nr_requeue, int *valp)
+static int futex_requeue(u32 __user *uaddr1, u32 __user *uaddr2,
+ int nr_wake, int nr_requeue, u32 *cmpval)
{
union futex_key key1, key2;
- struct futex_hash_bucket *bh1, *bh2;
+ struct futex_hash_bucket *hb1, *hb2;
struct list_head *head1;
struct futex_q *this, *next;
int ret, drop_count = 0;
@@ -467,68 +782,72 @@ static int futex_requeue(unsigned long uaddr1, unsigned long uaddr2,
if (unlikely(ret != 0))
goto out;
- bh1 = hash_futex(&key1);
- bh2 = hash_futex(&key2);
+ hb1 = hash_futex(&key1);
+ hb2 = hash_futex(&key2);
- if (bh1 < bh2)
- spin_lock(&bh1->lock);
- spin_lock(&bh2->lock);
- if (bh1 > bh2)
- spin_lock(&bh1->lock);
+ if (hb1 < hb2)
+ spin_lock(&hb1->lock);
+ spin_lock(&hb2->lock);
+ if (hb1 > hb2)
+ spin_lock(&hb1->lock);
- if (likely(valp != NULL)) {
- int curval;
+ if (likely(cmpval != NULL)) {
+ u32 curval;
- ret = get_futex_value_locked(&curval, (int __user *)uaddr1);
+ ret = get_futex_value_locked(&curval, uaddr1);
if (unlikely(ret)) {
- spin_unlock(&bh1->lock);
- if (bh1 != bh2)
- spin_unlock(&bh2->lock);
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
- /* If we would have faulted, release mmap_sem, fault
+ /*
+ * If we would have faulted, release mmap_sem, fault
* it in and start all over again.
*/
up_read(&current->mm->mmap_sem);
- ret = get_user(curval, (int __user *)uaddr1);
+ ret = get_user(curval, uaddr1);
if (!ret)
goto retry;
return ret;
}
- if (curval != *valp) {
+ if (curval != *cmpval) {
ret = -EAGAIN;
goto out_unlock;
}
}
- head1 = &bh1->chain;
+ head1 = &hb1->chain;
list_for_each_entry_safe(this, next, head1, list) {
if (!match_futex (&this->key, &key1))
continue;
if (++ret <= nr_wake) {
wake_futex(this);
} else {
- list_move_tail(&this->list, &bh2->chain);
- this->lock_ptr = &bh2->lock;
+ /*
+ * If key1 and key2 hash to the same bucket, no need to
+ * requeue.
+ */
+ if (likely(head1 != &hb2->chain)) {
+ list_move_tail(&this->list, &hb2->chain);
+ this->lock_ptr = &hb2->lock;
+ }
this->key = key2;
get_key_refs(&key2);
drop_count++;
if (ret - nr_wake >= nr_requeue)
break;
- /* Make sure to stop if key1 == key2 */
- if (head1 == &bh2->chain && head1 != &next->list)
- head1 = &this->list;
}
}
out_unlock:
- spin_unlock(&bh1->lock);
- if (bh1 != bh2)
- spin_unlock(&bh2->lock);
+ spin_unlock(&hb1->lock);
+ if (hb1 != hb2)
+ spin_unlock(&hb2->lock);
/* drop_key_refs() must be called outside the spinlocks. */
while (--drop_count >= 0)
@@ -543,7 +862,7 @@ out:
static inline struct futex_hash_bucket *
queue_lock(struct futex_q *q, int fd, struct file *filp)
{
- struct futex_hash_bucket *bh;
+ struct futex_hash_bucket *hb;
q->fd = fd;
q->filp = filp;
@@ -551,23 +870,24 @@ queue_lock(struct futex_q *q, int fd, struct file *filp)
init_waitqueue_head(&q->waiters);
get_key_refs(&q->key);
- bh = hash_futex(&q->key);
- q->lock_ptr = &bh->lock;
+ hb = hash_futex(&q->key);
+ q->lock_ptr = &hb->lock;
- spin_lock(&bh->lock);
- return bh;
+ spin_lock(&hb->lock);
+ return hb;
}
-static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *bh)
+static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
{
- list_add_tail(&q->list, &bh->chain);
- spin_unlock(&bh->lock);
+ list_add_tail(&q->list, &hb->chain);
+ q->task = current;
+ spin_unlock(&hb->lock);
}
static inline void
-queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh)
+queue_unlock(struct futex_q *q, struct futex_hash_bucket *hb)
{
- spin_unlock(&bh->lock);
+ spin_unlock(&hb->lock);
drop_key_refs(&q->key);
}
@@ -579,16 +899,17 @@ queue_unlock(struct futex_q *q, struct futex_hash_bucket *bh)
/* The key must be already stored in q->key. */
static void queue_me(struct futex_q *q, int fd, struct file *filp)
{
- struct futex_hash_bucket *bh;
- bh = queue_lock(q, fd, filp);
- __queue_me(q, bh);
+ struct futex_hash_bucket *hb;
+
+ hb = queue_lock(q, fd, filp);
+ __queue_me(q, hb);
}
/* Return 1 if we were still queued (ie. 0 means we were woken) */
static int unqueue_me(struct futex_q *q)
{
- int ret = 0;
spinlock_t *lock_ptr;
+ int ret = 0;
/* In the common case we don't take the spinlock, which is nice. */
retry:
@@ -614,6 +935,9 @@ static int unqueue_me(struct futex_q *q)
}
WARN_ON(list_empty(&q->list));
list_del(&q->list);
+
+ BUG_ON(q->pi_state);
+
spin_unlock(lock_ptr);
ret = 1;
}
@@ -622,21 +946,42 @@ static int unqueue_me(struct futex_q *q)
return ret;
}
-static int futex_wait(unsigned long uaddr, int val, unsigned long time)
+/*
+ * PI futexes can not be requeued and must remove themself from the
+ * hash bucket. The hash bucket lock is held on entry and dropped here.
+ */
+static void unqueue_me_pi(struct futex_q *q, struct futex_hash_bucket *hb)
{
- DECLARE_WAITQUEUE(wait, current);
- int ret, curval;
+ WARN_ON(list_empty(&q->list));
+ list_del(&q->list);
+
+ BUG_ON(!q->pi_state);
+ free_pi_state(q->pi_state);
+ q->pi_state = NULL;
+
+ spin_unlock(&hb->lock);
+
+ drop_key_refs(&q->key);
+}
+
+static int futex_wait(u32 __user *uaddr, u32 val, unsigned long time)
+{
+ struct task_struct *curr = current;
+ DECLARE_WAITQUEUE(wait, curr);
+ struct futex_hash_bucket *hb;
struct futex_q q;
- struct futex_hash_bucket *bh;
+ u32 uval;
+ int ret;
+ q.pi_state = NULL;
retry:
- down_read(&current->mm->mmap_sem);
+ down_read(&curr->mm->mmap_sem);
ret = get_futex_key(uaddr, &q.key);
if (unlikely(ret != 0))
goto out_release_sem;
- bh = queue_lock(&q, -1, NULL);
+ hb = queue_lock(&q, -1, NULL);
/*
* Access the page AFTER the futex is queued.
@@ -658,37 +1003,35 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
* We hold the mmap semaphore, so the mapping cannot have changed
* since we looked it up in get_futex_key.
*/
-
- ret = get_futex_value_locked(&curval, (int __user *)uaddr);
+ ret = get_futex_value_locked(&uval, uaddr);
if (unlikely(ret)) {
- queue_unlock(&q, bh);
+ queue_unlock(&q, hb);
- /* If we would have faulted, release mmap_sem, fault it in and
+ /*
+ * If we would have faulted, release mmap_sem, fault it in and
* start all over again.
*/
- up_read(&current->mm->mmap_sem);
+ up_read(&curr->mm->mmap_sem);
- ret = get_user(curval, (int __user *)uaddr);
+ ret = get_user(uval, uaddr);
if (!ret)
goto retry;
return ret;
}
- if (curval != val) {
- ret = -EWOULDBLOCK;
- queue_unlock(&q, bh);
- goto out_release_sem;
- }
+ ret = -EWOULDBLOCK;
+ if (uval != val)
+ goto out_unlock_release_sem;
/* Only actually queue if *uaddr contained val. */
- __queue_me(&q, bh);
+ __queue_me(&q, hb);
/*
* Now the futex is queued and we have checked the data, we
* don't want to hold mmap_sem while we sleep.
- */
- up_read(&current->mm->mmap_sem);
+ */
+ up_read(&curr->mm->mmap_sem);
/*
* There might have been scheduling since the queue_me(), as we
@@ -720,12 +1063,421 @@ static int futex_wait(unsigned long uaddr, int val, unsigned long time)
return 0;
if (time == 0)
return -ETIMEDOUT;
- /* We expect signal_pending(current), but another thread may
- * have handled it for us already. */
+ /*
+ * We expect signal_pending(current), but another thread may
+ * have handled it for us already.
+ */
return -EINTR;
+ out_unlock_release_sem:
+ queue_unlock(&q, hb);
+
out_release_sem:
+ up_read(&curr->mm->mmap_sem);
+ return ret;
+}
+
+/*
+ * Userspace tried a 0 -> TID atomic transition of the futex value
+ * and failed. The kernel side here does the whole locking operation:
+ * if there are waiters then it will block, it does PI, etc. (Due to
+ * races the kernel might see a 0 value of the futex too.)
+ */
+static int do_futex_lock_pi(u32 __user *uaddr, int detect, int trylock,
+ struct hrtimer_sleeper *to)
+{
+ struct task_struct *curr = current;
+ struct futex_hash_bucket *hb;
+ u32 uval, newval, curval;
+ struct futex_q q;
+ int ret, attempt = 0;
+
+ if (refill_pi_state_cache())
+ return -ENOMEM;
+
+ q.pi_state = NULL;
+ retry:
+ down_read(&curr->mm->mmap_sem);
+
+ ret = get_futex_key(uaddr, &q.key);
+ if (unlikely(ret != 0))
+ goto out_release_sem;
+
+ hb = queue_lock(&q, -1, NULL);
+
+ retry_locked:
+ /*
+ * To avoid races, we attempt to take the lock here again
+ * (by doing a 0 -> TID atomic cmpxchg), while holding all
+ * the locks. It will most likely not succeed.
+ */
+ newval = current->pid;
+
+ inc_preempt_count();
+ curval = futex_atomic_cmpxchg_inatomic(uaddr, 0, newval);
+ dec_preempt_count();
+
+ if (unlikely(curval == -EFAULT))
+ goto uaddr_faulted;
+
+ /* We own the lock already */
+ if (unlikely((curval & FUTEX_TID_MASK) == current->pid)) {
+ if (!detect && 0)
+ force_sig(SIGKILL, current);
+ ret = -EDEADLK;
+ goto out_unlock_release_sem;
+ }
+
+ /*
+ * Surprise - we got the lock. Just return
+ * to userspace:
+ */
+ if (unlikely(!curval))
+ goto out_unlock_release_sem;
+
+ uval = curval;
+ newval = uval | FUTEX_WAITERS;
+
+ inc_preempt_count();
+ curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval);
+ dec_preempt_count();
+
+ if (unlikely(curval == -EFAULT))
+ goto uaddr_faulted;
+ if (unlikely(curval != uval))
+ goto retry_locked;
+
+ /*
+ * We dont have the lock. Look up the PI state (or create it if
+ * we are the first waiter):
+ */
+ ret = lookup_pi_state(uval, hb, &q);
+
+ if (unlikely(ret)) {
+ /*
+ * There were no waiters and the owner task lookup
+ * failed. When the OWNER_DIED bit is set, then we
+ * know that this is a robust futex and we actually
+ * take the lock. This is safe as we are protected by
+ * the hash bucket lock. We also set the waiters bit
+ * unconditionally here, to simplify glibc handling of
+ * multiple tasks racing to acquire the lock and
+ * cleanup the problems which were left by the dead
+ * owner.
+ */
+ if (curval & FUTEX_OWNER_DIED) {
+ uval = newval;
+ newval = current->pid |
+ FUTEX_OWNER_DIED | FUTEX_WAITERS;
+
+ inc_preempt_count();
+ curval = futex_atomic_cmpxchg_inatomic(uaddr,
+ uval, newval);
+ dec_preempt_count();
+
+ if (unlikely(curval == -EFAULT))
+ goto uaddr_faulted;
+ if (unlikely(curval != uval))
+ goto retry_locked;
+ ret = 0;
+ }
+ goto out_unlock_release_sem;
+ }
+
+ /*
+ * Only actually queue now that the atomic ops are done:
+ */
+ __queue_me(&q, hb);
+
+ /*
+ * Now the futex is queued and we have checked the data, we
+ * don't want to hold mmap_sem while we sleep.
+ */
+ up_read(&curr->mm->mmap_sem);
+
+ WARN_ON(!q.pi_state);
+ /*
+ * Block on the PI mutex:
+ */
+ if (!trylock)
+ ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
+ else {
+ ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
+ /* Fixup the trylock return value: */
+ ret = ret ? 0 : -EWOULDBLOCK;
+ }
+
+ down_read(&curr->mm->mmap_sem);
+ hb = queue_lock(&q, -1, NULL);
+
+ /*
+ * Got the lock. We might not be the anticipated owner if we
+ * did a lock-steal - fix up the PI-state in that case.
+ */
+ if (!ret && q.pi_state->owner != curr) {
+ u32 newtid = current->pid | FUTEX_WAITERS;
+
+ /* Owner died? */
+ if (q.pi_state->owner != NULL) {
+ spin_lock_irq(&q.pi_state->owner->pi_lock);
+ list_del_init(&q.pi_state->list);
+ spin_unlock_irq(&q.pi_state->owner->pi_lock);
+ } else
+ newtid |= FUTEX_OWNER_DIED;
+
+ q.pi_state->owner = current;
+
+ spin_lock_irq(&current->pi_lock);
+ list_add(&q.pi_state->list, &current->pi_state_list);
+ spin_unlock_irq(&current->pi_lock);
+
+ /* Unqueue and drop the lock */
+ unqueue_me_pi(&q, hb);
+ up_read(&curr->mm->mmap_sem);
+ /*
+ * We own it, so we have to replace the pending owner
+ * TID. This must be atomic as we have preserve the
+ * owner died bit here.
+ */
+ ret = get_user(uval, uaddr);
+ while (!ret) {
+ newval = (uval & FUTEX_OWNER_DIED) | newtid;
+ curval = futex_atomic_cmpxchg_inatomic(uaddr,
+ uval, newval);
+ if (curval == -EFAULT)
+ ret = -EFAULT;
+ if (curval == uval)
+ break;
+ uval = curval;
+ }
+ } else {
+ /*
+ * Catch the rare case, where the lock was released
+ * when we were on the way back before we locked
+ * the hash bucket.
+ */
+ if (ret && q.pi_state->owner == curr) {
+ if (rt_mutex_trylock(&q.pi_state->pi_mutex))
+ ret = 0;
+ }
+ /* Unqueue and drop the lock */
+ unqueue_me_pi(&q, hb);
+ up_read(&curr->mm->mmap_sem);
+ }
+
+ if (!detect && ret == -EDEADLK && 0)
+ force_sig(SIGKILL, current);
+
+ return ret;
+
+ out_unlock_release_sem:
+ queue_unlock(&q, hb);
+
+ out_release_sem:
+ up_read(&curr->mm->mmap_sem);
+ return ret;
+
+ uaddr_faulted:
+ /*
+ * We have to r/w *(int __user *)uaddr, but we can't modify it
+ * non-atomically. Therefore, if get_user below is not
+ * enough, we need to handle the fault ourselves, while
+ * still holding the mmap_sem.
+ */
+ if (attempt++) {
+ if (futex_handle_fault((unsigned long)uaddr, attempt))
+ goto out_unlock_release_sem;
+
+ goto retry_locked;
+ }
+
+ queue_unlock(&q, hb);
+ up_read(&curr->mm->mmap_sem);
+
+ ret = get_user(uval, uaddr);
+ if (!ret && (uval != -EFAULT))
+ goto retry;
+
+ return ret;
+}
+
+/*
+ * Restart handler
+ */
+static long futex_lock_pi_restart(struct restart_block *restart)
+{
+ struct hrtimer_sleeper timeout, *to = NULL;
+ int ret;
+
+ restart->fn = do_no_restart_syscall;
+
+ if (restart->arg2 || restart->arg3) {
+ to = &timeout;
+ hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
+ hrtimer_init_sleeper(to, current);
+ to->timer.expires.tv64 = ((u64)restart->arg1 << 32) |
+ (u64) restart->arg0;
+ }
+
+ pr_debug("lock_pi restart: %p, %d (%d)\n",
+ (u32 __user *)restart->arg0, current->pid);
+
+ ret = do_futex_lock_pi((u32 __user *)restart->arg0, restart->arg1,
+ 0, to);
+
+ if (ret != -EINTR)
+ return ret;
+
+ restart->fn = futex_lock_pi_restart;
+
+ /* The other values are filled in */
+ return -ERESTART_RESTARTBLOCK;
+}
+
+/*
+ * Called from the syscall entry below.
+ */
+static int futex_lock_pi(u32 __user *uaddr, int detect, unsigned long sec,
+ long nsec, int trylock)
+{
+ struct hrtimer_sleeper timeout, *to = NULL;
+ struct restart_block *restart;
+ int ret;
+
+ if (sec != MAX_SCHEDULE_TIMEOUT) {
+ to = &timeout;
+ hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_ABS);
+ hrtimer_init_sleeper(to, current);
+ to->timer.expires = ktime_set(sec, nsec);
+ }
+
+ ret = do_futex_lock_pi(uaddr, detect, trylock, to);
+
+ if (ret != -EINTR)
+ return ret;
+
+ pr_debug("lock_pi interrupted: %p, %d (%d)\n", uaddr, current->pid);
+
+ restart = &current_thread_info()->restart_block;
+ restart->fn = futex_lock_pi_restart;
+ restart->arg0 = (unsigned long) uaddr;
+ restart->arg1 = detect;
+ if (to) {
+ restart->arg2 = to->timer.expires.tv64 & 0xFFFFFFFF;
+ restart->arg3 = to->timer.expires.tv64 >> 32;
+ } else
+ restart->arg2 = restart->arg3 = 0;
+
+ return -ERESTART_RESTARTBLOCK;
+}
+
+/*
+ * Userspace attempted a TID -> 0 atomic transition, and failed.
+ * This is the in-kernel slowpath: we look up the PI state (if any),
+ * and do the rt-mutex unlock.
+ */
+static int futex_unlock_pi(u32 __user *uaddr)
+{
+ struct futex_hash_bucket *hb;
+ struct futex_q *this, *next;
+ u32 uval;
+ struct list_head *head;
+ union futex_key key;
+ int ret, attempt = 0;
+
+retry:
+ if (get_user(uval, uaddr))
+ return -EFAULT;
+ /*
+ * We release only a lock we actually own:
+ */
+ if ((uval & FUTEX_TID_MASK) != current->pid)
+ return -EPERM;
+ /*
+ * First take all the futex related locks:
+ */
+ down_read(&current->mm->mmap_sem);
+
+ ret = get_futex_key(uaddr, &key);
+ if (unlikely(ret != 0))
+ goto out;
+
+ hb = hash_futex(&key);
+ spin_lock(&hb->lock);
+
+retry_locked:
+ /*
+ * To avoid races, try to do the TID -> 0 atomic transition
+ * again. If it succeeds then we can return without waking
+ * anyone else up:
+ */
+ inc_preempt_count();
+ uval = futex_atomic_cmpxchg_inatomic(uaddr, current->pid, 0);
+ dec_preempt_count();
+
+ if (unlikely(uval == -EFAULT))
+ goto pi_faulted;
+ /*
+ * Rare case: we managed to release the lock atomically,
+ * no need to wake anyone else up:
+ */
+ if (unlikely(uval == current->pid))
+ goto out_unlock;
+
+ /*
+ * Ok, other tasks may need to be woken up - check waiters
+ * and do the wakeup if necessary:
+ */
+ head = &hb->chain;
+
+ list_for_each_entry_safe(this, next, head, list) {
+ if (!match_futex (&this->key, &key))
+ continue;
+ ret = wake_futex_pi(uaddr, uval, this);
+ /*
+ * The atomic access to the futex value
+ * generated a pagefault, so retry the
+ * user-access and the wakeup:
+ */
+ if (ret == -EFAULT)
+ goto pi_faulted;
+ goto out_unlock;
+ }
+ /*
+ * No waiters - kernel unlocks the futex:
+ */
+ ret = unlock_futex_pi(uaddr, uval);
+ if (ret == -EFAULT)
+ goto pi_faulted;
+
+out_unlock:
+ spin_unlock(&hb->lock);
+out:
up_read(&current->mm->mmap_sem);
+
+ return ret;
+
+pi_faulted:
+ /*
+ * We have to r/w *(int __user *)uaddr, but we can't modify it
+ * non-atomically. Therefore, if get_user below is not
+ * enough, we need to handle the fault ourselves, while
+ * still holding the mmap_sem.
+ */
+ if (attempt++) {
+ if (futex_handle_fault((unsigned long)uaddr, attempt))
+ goto out_unlock;
+
+ goto retry_locked;
+ }
+
+ spin_unlock(&hb->lock);
+ up_read(&current->mm->mmap_sem);
+
+ ret = get_user(uval, uaddr);
+ if (!ret && (uval != -EFAULT))
+ goto retry;
+
return ret;
}
@@ -735,6 +1487,7 @@ static int futex_close(struct inode *inode, struct file *filp)
unqueue_me(q);
kfree(q);
+
return 0;
}
@@ -766,7 +1519,7 @@ static struct file_operations futex_fops = {
* Signal allows caller to avoid the race which would occur if they
* set the sigio stuff up afterwards.
*/
-static int futex_fd(unsigned long uaddr, int signal)
+static int futex_fd(u32 __user *uaddr, int signal)
{
struct futex_q *q;
struct file *filp;
@@ -803,6 +1556,7 @@ static int futex_fd(unsigned long uaddr, int signal)
err = -ENOMEM;
goto error;
}
+ q->pi_state = NULL;
down_read(&current->mm->mmap_sem);
err = get_futex_key(uaddr, &q->key);
@@ -840,7 +1594,7 @@ error:
* Implementation: user-space maintains a per-thread list of locks it
* is holding. Upon do_exit(), the kernel carefully walks this list,
* and marks all locks that are owned by this thread with the
- * FUTEX_OWNER_DEAD bit, and wakes up a waiter (if any). The list is
+ * FUTEX_OWNER_DIED bit, and wakes up a waiter (if any). The list is
* always manipulated with the lock held, so the list is private and
* per-thread. Userspace also maintains a per-thread 'list_op_pending'
* field, to allow the kernel to clean up if the thread dies after
@@ -915,7 +1669,7 @@ err_unlock:
*/
int handle_futex_death(u32 __user *uaddr, struct task_struct *curr)
{
- u32 uval;
+ u32 uval, nval;
retry:
if (get_user(uval, uaddr))
@@ -932,12 +1686,16 @@ retry:
* thread-death.) The rest of the cleanup is done in
* userspace.
*/
- if (futex_atomic_cmpxchg_inatomic(uaddr, uval,
- uval | FUTEX_OWNER_DIED) != uval)
+ nval = futex_atomic_cmpxchg_inatomic(uaddr, uval,
+ uval | FUTEX_OWNER_DIED);
+ if (nval == -EFAULT)
+ return -1;
+
+ if (nval != uval)
goto retry;
if (uval & FUTEX_WAITERS)
- futex_wake((unsigned long)uaddr, 1);
+ futex_wake(uaddr, 1);
}
return 0;
}
@@ -978,7 +1736,7 @@ void exit_robust_list(struct task_struct *curr)
while (entry != &head->list) {
/*
* A pending lock might already be on the list, so
- * dont process it twice:
+ * don't process it twice:
*/
if (entry != pending)
if (handle_futex_death((void *)entry + futex_offset,
@@ -999,8 +1757,8 @@ void exit_robust_list(struct task_struct *curr)
}
}
-long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
- unsigned long uaddr2, int val2, int val3)
+long do_futex(u32 __user *uaddr, int op, u32 val, unsigned long timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3)
{
int ret;
@@ -1024,6 +1782,15 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
case FUTEX_WAKE_OP:
ret = futex_wake_op(uaddr, uaddr2, val, val2, val3);
break;
+ case FUTEX_LOCK_PI:
+ ret = futex_lock_pi(uaddr, val, timeout, val2, 0);
+ break;
+ case FUTEX_UNLOCK_PI:
+ ret = futex_unlock_pi(uaddr);
+ break;
+ case FUTEX_TRYLOCK_PI:
+ ret = futex_lock_pi(uaddr, 0, timeout, val2, 1);
+ break;
default:
ret = -ENOSYS;
}
@@ -1031,29 +1798,33 @@ long do_futex(unsigned long uaddr, int op, int val, unsigned long timeout,
}
-asmlinkage long sys_futex(u32 __user *uaddr, int op, int val,
+asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val,
struct timespec __user *utime, u32 __user *uaddr2,
- int val3)
+ u32 val3)
{
struct timespec t;
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
- int val2 = 0;
+ u32 val2 = 0;
- if (utime && (op == FUTEX_WAIT)) {
+ if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
if (copy_from_user(&t, utime, sizeof(t)) != 0)
return -EFAULT;
if (!timespec_valid(&t))
return -EINVAL;
- timeout = timespec_to_jiffies(&t) + 1;
+ if (op == FUTEX_WAIT)
+ timeout = timespec_to_jiffies(&t) + 1;
+ else {
+ timeout = t.tv_sec;
+ val2 = t.tv_nsec;
+ }
}
/*
* requeue parameter in 'utime' if op == FUTEX_REQUEUE.
*/
- if (op >= FUTEX_REQUEUE)
- val2 = (int) (unsigned long) utime;
+ if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
+ val2 = (u32) (unsigned long) utime;
- return do_futex((unsigned long)uaddr, op, val, timeout,
- (unsigned long)uaddr2, val2, val3);
+ return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
}
static int futexfs_get_sb(struct file_system_type *fs_type,
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index 1ab6a0ea3d1477..d1d92b441fb7d7 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -129,16 +129,20 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
unsigned long timeout = MAX_SCHEDULE_TIMEOUT;
int val2 = 0;
- if (utime && (op == FUTEX_WAIT)) {
+ if (utime && (op == FUTEX_WAIT || op == FUTEX_LOCK_PI)) {
if (get_compat_timespec(&t, utime))
return -EFAULT;
if (!timespec_valid(&t))
return -EINVAL;
- timeout = timespec_to_jiffies(&t) + 1;
+ if (op == FUTEX_WAIT)
+ timeout = timespec_to_jiffies(&t) + 1;
+ else {
+ timeout = t.tv_sec;
+ val2 = t.tv_nsec;
+ }
}
- if (op >= FUTEX_REQUEUE)
+ if (op == FUTEX_REQUEUE || op == FUTEX_CMP_REQUEUE)
val2 = (int) (unsigned long) utime;
- return do_futex((unsigned long)uaddr, op, val, timeout,
- (unsigned long)uaddr2, val2, val3);
+ return do_futex(uaddr, op, val, timeout, uaddr2, val2, val3);
}
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 55601b3ce60e92..8d3dc29ef41ae1 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -833,7 +833,7 @@ static void migrate_hrtimers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int hrtimer_cpu_notify(struct notifier_block *self,
+static int __devinit hrtimer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -857,7 +857,7 @@ static int hrtimer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block hrtimers_nb = {
+static struct notifier_block __devinitdata hrtimers_nb = {
.notifier_call = hrtimer_cpu_notify,
};
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index 036b6285b15ccb..e38e4bac97cac6 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -16,6 +16,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/module.h>
+#include <linux/poison.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
@@ -381,7 +382,7 @@ void debug_mutex_set_owner(struct mutex *lock,
void debug_mutex_init_waiter(struct mutex_waiter *waiter)
{
- memset(waiter, 0x11, sizeof(*waiter));
+ memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
waiter->magic = waiter;
INIT_LIST_HEAD(&waiter->list);
}
@@ -397,7 +398,7 @@ void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter)
void debug_mutex_free_waiter(struct mutex_waiter *waiter)
{
DEBUG_WARN_ON(!list_empty(&waiter->list));
- memset(waiter, 0x22, sizeof(*waiter));
+ memset(waiter, MUTEX_DEBUG_FREE, sizeof(*waiter));
}
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index fc311a4673a25e..857b4fa0912447 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -38,13 +38,22 @@ config PM_DEBUG
config PM_TRACE
bool "Suspend/resume event tracing"
- depends on PM && PM_DEBUG && X86_32
- default y
+ depends on PM && PM_DEBUG && X86_32 && EXPERIMENTAL
+ default n
---help---
This enables some cheesy code to save the last PM event point in the
RTC across reboots, so that you can debug a machine that just hangs
during suspend (or more commonly, during resume).
+ To use this debugging feature you should attempt to suspend the machine,
+ then reboot it, then run
+
+ dmesg -s 1000000 | grep 'hash matches'
+
+ CAUTION: this option will cause your machine's real-time clock to be
+ set to an invalid time after a resume.
+
+
config SOFTWARE_SUSPEND
bool "Software Suspend"
depends on PM && SWAP && (X86 && (!SMP || SUSPEND_SMP)) || ((FRV || PPC32) && !SMP)
diff --git a/kernel/profile.c b/kernel/profile.c
index 68afe121e5071f..5a730fdb1a2cec 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -299,7 +299,7 @@ out:
}
#ifdef CONFIG_HOTPLUG_CPU
-static int profile_cpu_callback(struct notifier_block *info,
+static int __devinit profile_cpu_callback(struct notifier_block *info,
unsigned long action, void *__cpu)
{
int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index 20e9710fc21c5f..f464f5ae3f11a8 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -182,6 +182,15 @@ long rcu_batches_completed(void)
return rcu_ctrlblk.completed;
}
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed_bh(void)
+{
+ return rcu_bh_ctrlblk.completed;
+}
+
static void rcu_barrier_callback(struct rcu_head *notused)
{
if (atomic_dec_and_test(&rcu_barrier_cpu_count))
@@ -539,7 +548,7 @@ static void __devinit rcu_online_cpu(int cpu)
tasklet_init(&per_cpu(rcu_tasklet, cpu), rcu_process_callbacks, 0UL);
}
-static int rcu_cpu_notify(struct notifier_block *self,
+static int __devinit rcu_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -556,7 +565,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block rcu_nb = {
+static struct notifier_block __devinitdata rcu_nb = {
.notifier_call = rcu_cpu_notify,
};
@@ -619,6 +628,7 @@ module_param(qlowmark, int, 0);
module_param(rsinterval, int, 0);
#endif
EXPORT_SYMBOL_GPL(rcu_batches_completed);
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
EXPORT_SYMBOL_GPL(call_rcu);
EXPORT_SYMBOL_GPL(call_rcu_bh);
EXPORT_SYMBOL_GPL(synchronize_rcu);
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 8154e7589d1284..4d1c3d2471278e 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -1,5 +1,5 @@
/*
- * Read-Copy Update /proc-based torture test facility
+ * Read-Copy Update module-based torture test facility
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -53,6 +53,7 @@ static int stat_interval; /* Interval between stats, in seconds. */
static int verbose; /* Print more debug info. */
static int test_no_idle_hz; /* Test RCU's support for tickless idle CPUs. */
static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
+static char *torture_type = "rcu"; /* What to torture. */
module_param(nreaders, int, 0);
MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
@@ -64,13 +65,16 @@ module_param(test_no_idle_hz, bool, 0);
MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
module_param(shuffle_interval, int, 0);
MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-#define TORTURE_FLAG "rcutorture: "
+module_param(torture_type, charp, 0);
+MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh)");
+
+#define TORTURE_FLAG "-torture:"
#define PRINTK_STRING(s) \
- do { printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
+ do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
#define VERBOSE_PRINTK_STRING(s) \
- do { if (verbose) printk(KERN_ALERT TORTURE_FLAG s "\n"); } while (0)
+ do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
#define VERBOSE_PRINTK_ERRSTRING(s) \
- do { if (verbose) printk(KERN_ALERT TORTURE_FLAG "!!! " s "\n"); } while (0)
+ do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
static char printk_buf[4096];
@@ -139,28 +143,6 @@ rcu_torture_free(struct rcu_torture *p)
spin_unlock_bh(&rcu_torture_lock);
}
-static void
-rcu_torture_cb(struct rcu_head *p)
-{
- int i;
- struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
-
- if (fullstop) {
- /* Test is ending, just drop callbacks on the floor. */
- /* The next initialization will pick up the pieces. */
- return;
- }
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- rcu_torture_free(rp);
- } else
- call_rcu(p, rcu_torture_cb);
-}
-
struct rcu_random_state {
unsigned long rrs_state;
unsigned long rrs_count;
@@ -191,6 +173,119 @@ rcu_random(struct rcu_random_state *rrsp)
}
/*
+ * Operations vector for selecting different types of tests.
+ */
+
+struct rcu_torture_ops {
+ void (*init)(void);
+ void (*cleanup)(void);
+ int (*readlock)(void);
+ void (*readunlock)(int idx);
+ int (*completed)(void);
+ void (*deferredfree)(struct rcu_torture *p);
+ int (*stats)(char *page);
+ char *name;
+};
+static struct rcu_torture_ops *cur_ops = NULL;
+
+/*
+ * Definitions for rcu torture testing.
+ */
+
+static int rcu_torture_read_lock(void)
+{
+ rcu_read_lock();
+ return 0;
+}
+
+static void rcu_torture_read_unlock(int idx)
+{
+ rcu_read_unlock();
+}
+
+static int rcu_torture_completed(void)
+{
+ return rcu_batches_completed();
+}
+
+static void
+rcu_torture_cb(struct rcu_head *p)
+{
+ int i;
+ struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
+
+ if (fullstop) {
+ /* Test is ending, just drop callbacks on the floor. */
+ /* The next initialization will pick up the pieces. */
+ return;
+ }
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ rcu_torture_free(rp);
+ } else
+ cur_ops->deferredfree(rp);
+}
+
+static void rcu_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static struct rcu_torture_ops rcu_ops = {
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_torture_read_lock,
+ .readunlock = rcu_torture_read_unlock,
+ .completed = rcu_torture_completed,
+ .deferredfree = rcu_torture_deferred_free,
+ .stats = NULL,
+ .name = "rcu"
+};
+
+/*
+ * Definitions for rcu_bh torture testing.
+ */
+
+static int rcu_bh_torture_read_lock(void)
+{
+ rcu_read_lock_bh();
+ return 0;
+}
+
+static void rcu_bh_torture_read_unlock(int idx)
+{
+ rcu_read_unlock_bh();
+}
+
+static int rcu_bh_torture_completed(void)
+{
+ return rcu_batches_completed_bh();
+}
+
+static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
+{
+ call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
+}
+
+static struct rcu_torture_ops rcu_bh_ops = {
+ .init = NULL,
+ .cleanup = NULL,
+ .readlock = rcu_bh_torture_read_lock,
+ .readunlock = rcu_bh_torture_read_unlock,
+ .completed = rcu_bh_torture_completed,
+ .deferredfree = rcu_bh_torture_deferred_free,
+ .stats = NULL,
+ .name = "rcu_bh"
+};
+
+static struct rcu_torture_ops *torture_ops[] =
+ { &rcu_ops, &rcu_bh_ops, NULL };
+
+/*
* RCU torture writer kthread. Repeatedly substitutes a new structure
* for that pointed to by rcu_torture_current, freeing the old structure
* after a series of grace periods (the "pipeline").
@@ -209,8 +304,6 @@ rcu_torture_writer(void *arg)
do {
schedule_timeout_uninterruptible(1);
- if (rcu_batches_completed() == oldbatch)
- continue;
if ((rp = rcu_torture_alloc()) == NULL)
continue;
rp->rtort_pipe_count = 0;
@@ -225,10 +318,10 @@ rcu_torture_writer(void *arg)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
old_rp->rtort_pipe_count++;
- call_rcu(&old_rp->rtort_rcu, rcu_torture_cb);
+ cur_ops->deferredfree(old_rp);
}
rcu_torture_current_version++;
- oldbatch = rcu_batches_completed();
+ oldbatch = cur_ops->completed();
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
while (!kthread_should_stop())
@@ -246,6 +339,7 @@ static int
rcu_torture_reader(void *arg)
{
int completed;
+ int idx;
DEFINE_RCU_RANDOM(rand);
struct rcu_torture *p;
int pipe_count;
@@ -254,12 +348,12 @@ rcu_torture_reader(void *arg)
set_user_nice(current, 19);
do {
- rcu_read_lock();
- completed = rcu_batches_completed();
+ idx = cur_ops->readlock();
+ completed = cur_ops->completed();
p = rcu_dereference(rcu_torture_current);
if (p == NULL) {
/* Wait for rcu_torture_writer to get underway */
- rcu_read_unlock();
+ cur_ops->readunlock(idx);
schedule_timeout_interruptible(HZ);
continue;
}
@@ -273,14 +367,14 @@ rcu_torture_reader(void *arg)
pipe_count = RCU_TORTURE_PIPE_LEN;
}
++__get_cpu_var(rcu_torture_count)[pipe_count];
- completed = rcu_batches_completed() - completed;
+ completed = cur_ops->completed() - completed;
if (completed > RCU_TORTURE_PIPE_LEN) {
/* Should not happen, but... */
completed = RCU_TORTURE_PIPE_LEN;
}
++__get_cpu_var(rcu_torture_batch)[completed];
preempt_enable();
- rcu_read_unlock();
+ cur_ops->readunlock(idx);
schedule();
} while (!kthread_should_stop() && !fullstop);
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
@@ -311,7 +405,7 @@ rcu_torture_printk(char *page)
if (pipesummary[i] != 0)
break;
}
- cnt += sprintf(&page[cnt], "rcutorture: ");
+ cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt],
"rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
"rtmbe: %d",
@@ -324,7 +418,7 @@ rcu_torture_printk(char *page)
atomic_read(&n_rcu_torture_mberror));
if (atomic_read(&n_rcu_torture_mberror) != 0)
cnt += sprintf(&page[cnt], " !!!");
- cnt += sprintf(&page[cnt], "\nrcutorture: ");
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
if (i > 1) {
cnt += sprintf(&page[cnt], "!!! ");
atomic_inc(&n_rcu_torture_error);
@@ -332,17 +426,19 @@ rcu_torture_printk(char *page)
cnt += sprintf(&page[cnt], "Reader Pipe: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
- cnt += sprintf(&page[cnt], "\nrcutorture: ");
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt], "Reader Batch: ");
- for (i = 0; i < RCU_TORTURE_PIPE_LEN; i++)
+ for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
- cnt += sprintf(&page[cnt], "\nrcutorture: ");
+ cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
cnt += sprintf(&page[cnt], " %d",
atomic_read(&rcu_torture_wcount[i]));
}
cnt += sprintf(&page[cnt], "\n");
+ if (cur_ops->stats != NULL)
+ cnt += cur_ops->stats(&page[cnt]);
return cnt;
}
@@ -444,11 +540,11 @@ rcu_torture_shuffle(void *arg)
static inline void
rcu_torture_print_module_parms(char *tag)
{
- printk(KERN_ALERT TORTURE_FLAG "--- %s: nreaders=%d "
+ printk(KERN_ALERT "%s" TORTURE_FLAG "--- %s: nreaders=%d "
"stat_interval=%d verbose=%d test_no_idle_hz=%d "
"shuffle_interval = %d\n",
- tag, nrealreaders, stat_interval, verbose, test_no_idle_hz,
- shuffle_interval);
+ torture_type, tag, nrealreaders, stat_interval, verbose,
+ test_no_idle_hz, shuffle_interval);
}
static void
@@ -493,6 +589,9 @@ rcu_torture_cleanup(void)
rcu_barrier();
rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
+
+ if (cur_ops->cleanup != NULL)
+ cur_ops->cleanup();
if (atomic_read(&n_rcu_torture_error))
rcu_torture_print_module_parms("End of test: FAILURE");
else
@@ -508,6 +607,20 @@ rcu_torture_init(void)
/* Process args and tell the world that the torturer is on the job. */
+ for (i = 0; cur_ops = torture_ops[i], cur_ops != NULL; i++) {
+ cur_ops = torture_ops[i];
+ if (strcmp(torture_type, cur_ops->name) == 0) {
+ break;
+ }
+ }
+ if (cur_ops == NULL) {
+ printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
+ torture_type);
+ return (-EINVAL);
+ }
+ if (cur_ops->init != NULL)
+ cur_ops->init(); /* no "goto unwind" prior to this point!!! */
+
if (nreaders >= 0)
nrealreaders = nreaders;
else
diff --git a/kernel/resource.c b/kernel/resource.c
index e3080fcc66a3b1..2404f9b0bc4772 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -232,6 +232,44 @@ int release_resource(struct resource *old)
EXPORT_SYMBOL(release_resource);
+#ifdef CONFIG_MEMORY_HOTPLUG
+/*
+ * Finds the lowest memory reosurce exists within [res->start.res->end)
+ * the caller must specify res->start, res->end, res->flags.
+ * If found, returns 0, res is overwritten, if not found, returns -1.
+ */
+int find_next_system_ram(struct resource *res)
+{
+ resource_size_t start, end;
+ struct resource *p;
+
+ BUG_ON(!res);
+
+ start = res->start;
+ end = res->end;
+
+ read_lock(&resource_lock);
+ for (p = iomem_resource.child; p ; p = p->sibling) {
+ /* system ram is just marked as IORESOURCE_MEM */
+ if (p->flags != res->flags)
+ continue;
+ if (p->start > end) {
+ p = NULL;
+ break;
+ }
+ if (p->start >= start)
+ break;
+ }
+ read_unlock(&resource_lock);
+ if (!p)
+ return -1;
+ /* copy data */
+ res->start = p->start;
+ res->end = p->end;
+ return 0;
+}
+#endif
+
/*
* Find empty slot in the resource tree given range and alignment.
*/
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
new file mode 100644
index 00000000000000..4aa8a2c9f45341
--- /dev/null
+++ b/kernel/rtmutex-debug.c
@@ -0,0 +1,513 @@
+/*
+ * RT-Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This code is based on the rt.c implementation in the preempt-rt tree.
+ * Portions of said code are
+ *
+ * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey
+ * Copyright (C) 2006 Esben Nielsen
+ * Copyright (C) 2006 Kihon Technologies Inc.,
+ * Steven Rostedt <rostedt@goodmis.org>
+ *
+ * See rt.c in preempt-rt for proper credits and further information
+ */
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+#include <linux/plist.h>
+#include <linux/fs.h>
+
+#include "rtmutex_common.h"
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# include "rtmutex-debug.h"
+#else
+# include "rtmutex.h"
+#endif
+
+# define TRACE_WARN_ON(x) WARN_ON(x)
+# define TRACE_BUG_ON(x) BUG_ON(x)
+
+# define TRACE_OFF() \
+do { \
+ if (rt_trace_on) { \
+ rt_trace_on = 0; \
+ console_verbose(); \
+ if (spin_is_locked(&current->pi_lock)) \
+ spin_unlock(&current->pi_lock); \
+ if (spin_is_locked(&current->held_list_lock)) \
+ spin_unlock(&current->held_list_lock); \
+ } \
+} while (0)
+
+# define TRACE_OFF_NOLOCK() \
+do { \
+ if (rt_trace_on) { \
+ rt_trace_on = 0; \
+ console_verbose(); \
+ } \
+} while (0)
+
+# define TRACE_BUG_LOCKED() \
+do { \
+ TRACE_OFF(); \
+ BUG(); \
+} while (0)
+
+# define TRACE_WARN_ON_LOCKED(c) \
+do { \
+ if (unlikely(c)) { \
+ TRACE_OFF(); \
+ WARN_ON(1); \
+ } \
+} while (0)
+
+# define TRACE_BUG_ON_LOCKED(c) \
+do { \
+ if (unlikely(c)) \
+ TRACE_BUG_LOCKED(); \
+} while (0)
+
+#ifdef CONFIG_SMP
+# define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c)
+#else
+# define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0)
+#endif
+
+/*
+ * deadlock detection flag. We turn it off when we detect
+ * the first problem because we dont want to recurse back
+ * into the tracing code when doing error printk or
+ * executing a BUG():
+ */
+int rt_trace_on = 1;
+
+void deadlock_trace_off(void)
+{
+ rt_trace_on = 0;
+}
+
+static void printk_task(task_t *p)
+{
+ if (p)
+ printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ else
+ printk("<none>");
+}
+
+static void printk_task_short(task_t *p)
+{
+ if (p)
+ printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
+ else
+ printk("<none>");
+}
+
+static void printk_lock(struct rt_mutex *lock, int print_owner)
+{
+ if (lock->name)
+ printk(" [%p] {%s}\n",
+ lock, lock->name);
+ else
+ printk(" [%p] {%s:%d}\n",
+ lock, lock->file, lock->line);
+
+ if (print_owner && rt_mutex_owner(lock)) {
+ printk(".. ->owner: %p\n", lock->owner);
+ printk(".. held by: ");
+ printk_task(rt_mutex_owner(lock));
+ printk("\n");
+ }
+ if (rt_mutex_owner(lock)) {
+ printk("... acquired at: ");
+ print_symbol("%s\n", lock->acquire_ip);
+ }
+}
+
+static void printk_waiter(struct rt_mutex_waiter *w)
+{
+ printk("-------------------------\n");
+ printk("| waiter struct %p:\n", w);
+ printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
+ w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next,
+ w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next,
+ w->list_entry.prio);
+ printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
+ w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next,
+ w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next,
+ w->pi_list_entry.prio);
+ printk("\n| lock:\n");
+ printk_lock(w->lock, 1);
+ printk("| w->ti->task:\n");
+ printk_task(w->task);
+ printk("| blocked at: ");
+ print_symbol("%s\n", w->ip);
+ printk("-------------------------\n");
+}
+
+static void show_task_locks(task_t *p)
+{
+ switch (p->state) {
+ case TASK_RUNNING: printk("R"); break;
+ case TASK_INTERRUPTIBLE: printk("S"); break;
+ case TASK_UNINTERRUPTIBLE: printk("D"); break;
+ case TASK_STOPPED: printk("T"); break;
+ case EXIT_ZOMBIE: printk("Z"); break;
+ case EXIT_DEAD: printk("X"); break;
+ default: printk("?"); break;
+ }
+ printk_task(p);
+ if (p->pi_blocked_on) {
+ struct rt_mutex *lock = p->pi_blocked_on->lock;
+
+ printk(" blocked on:");
+ printk_lock(lock, 1);
+ } else
+ printk(" (not blocked)\n");
+}
+
+void rt_mutex_show_held_locks(task_t *task, int verbose)
+{
+ struct list_head *curr, *cursor = NULL;
+ struct rt_mutex *lock;
+ task_t *t;
+ unsigned long flags;
+ int count = 0;
+
+ if (!rt_trace_on)
+ return;
+
+ if (verbose) {
+ printk("------------------------------\n");
+ printk("| showing all locks held by: | (");
+ printk_task_short(task);
+ printk("):\n");
+ printk("------------------------------\n");
+ }
+
+next:
+ spin_lock_irqsave(&task->held_list_lock, flags);
+ list_for_each(curr, &task->held_list_head) {
+ if (cursor && curr != cursor)
+ continue;
+ lock = list_entry(curr, struct rt_mutex, held_list_entry);
+ t = rt_mutex_owner(lock);
+ WARN_ON(t != task);
+ count++;
+ cursor = curr->next;
+ spin_unlock_irqrestore(&task->held_list_lock, flags);
+
+ printk("\n#%03d: ", count);
+ printk_lock(lock, 0);
+ goto next;
+ }
+ spin_unlock_irqrestore(&task->held_list_lock, flags);
+
+ printk("\n");
+}
+
+void rt_mutex_show_all_locks(void)
+{
+ task_t *g, *p;
+ int count = 10;
+ int unlock = 1;
+
+ printk("\n");
+ printk("----------------------\n");
+ printk("| showing all tasks: |\n");
+ printk("----------------------\n");
+
+ /*
+ * Here we try to get the tasklist_lock as hard as possible,
+ * if not successful after 2 seconds we ignore it (but keep
+ * trying). This is to enable a debug printout even if a
+ * tasklist_lock-holding task deadlocks or crashes.
+ */
+retry:
+ if (!read_trylock(&tasklist_lock)) {
+ if (count == 10)
+ printk("hm, tasklist_lock locked, retrying... ");
+ if (count) {
+ count--;
+ printk(" #%d", 10-count);
+ mdelay(200);
+ goto retry;
+ }
+ printk(" ignoring it.\n");
+ unlock = 0;
+ }
+ if (count != 10)
+ printk(" locked it.\n");
+
+ do_each_thread(g, p) {
+ show_task_locks(p);
+ if (!unlock)
+ if (read_trylock(&tasklist_lock))
+ unlock = 1;
+ } while_each_thread(g, p);
+
+ printk("\n");
+
+ printk("-----------------------------------------\n");
+ printk("| showing all locks held in the system: |\n");
+ printk("-----------------------------------------\n");
+
+ do_each_thread(g, p) {
+ rt_mutex_show_held_locks(p, 0);
+ if (!unlock)
+ if (read_trylock(&tasklist_lock))
+ unlock = 1;
+ } while_each_thread(g, p);
+
+
+ printk("=============================================\n\n");
+
+ if (unlock)
+ read_unlock(&tasklist_lock);
+}
+
+void rt_mutex_debug_check_no_locks_held(task_t *task)
+{
+ struct rt_mutex_waiter *w;
+ struct list_head *curr;
+ struct rt_mutex *lock;
+
+ if (!rt_trace_on)
+ return;
+ if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) {
+ printk("BUG: PI priority boost leaked!\n");
+ printk_task(task);
+ printk("\n");
+ }
+ if (list_empty(&task->held_list_head))
+ return;
+
+ spin_lock(&task->pi_lock);
+ plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) {
+ TRACE_OFF();
+
+ printk("hm, PI interest held at exit time? Task:\n");
+ printk_task(task);
+ printk_waiter(w);
+ return;
+ }
+ spin_unlock(&task->pi_lock);
+
+ list_for_each(curr, &task->held_list_head) {
+ lock = list_entry(curr, struct rt_mutex, held_list_entry);
+
+ printk("BUG: %s/%d, lock held at task exit time!\n",
+ task->comm, task->pid);
+ printk_lock(lock, 1);
+ if (rt_mutex_owner(lock) != task)
+ printk("exiting task is not even the owner??\n");
+ }
+}
+
+int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
+{
+ const void *to = from + len;
+ struct list_head *curr;
+ struct rt_mutex *lock;
+ unsigned long flags;
+ void *lock_addr;
+
+ if (!rt_trace_on)
+ return 0;
+
+ spin_lock_irqsave(&current->held_list_lock, flags);
+ list_for_each(curr, &current->held_list_head) {
+ lock = list_entry(curr, struct rt_mutex, held_list_entry);
+ lock_addr = lock;
+ if (lock_addr < from || lock_addr >= to)
+ continue;
+ TRACE_OFF();
+
+ printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
+ current->comm, current->pid, lock, from, to);
+ dump_stack();
+ printk_lock(lock, 1);
+ if (rt_mutex_owner(lock) != current)
+ printk("freeing task is not even the owner??\n");
+ return 1;
+ }
+ spin_unlock_irqrestore(&current->held_list_lock, flags);
+
+ return 0;
+}
+
+void rt_mutex_debug_task_free(struct task_struct *task)
+{
+ WARN_ON(!plist_head_empty(&task->pi_waiters));
+ WARN_ON(task->pi_blocked_on);
+}
+
+/*
+ * We fill out the fields in the waiter to store the information about
+ * the deadlock. We print when we return. act_waiter can be NULL in
+ * case of a remove waiter operation.
+ */
+void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
+ struct rt_mutex *lock)
+{
+ struct task_struct *task;
+
+ if (!rt_trace_on || detect || !act_waiter)
+ return;
+
+ task = rt_mutex_owner(act_waiter->lock);
+ if (task && task != current) {
+ act_waiter->deadlock_task_pid = task->pid;
+ act_waiter->deadlock_lock = lock;
+ }
+}
+
+void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
+{
+ struct task_struct *task;
+
+ if (!waiter->deadlock_lock || !rt_trace_on)
+ return;
+
+ task = find_task_by_pid(waiter->deadlock_task_pid);
+ if (!task)
+ return;
+
+ TRACE_OFF_NOLOCK();
+
+ printk("\n============================================\n");
+ printk( "[ BUG: circular locking deadlock detected! ]\n");
+ printk( "--------------------------------------------\n");
+ printk("%s/%d is deadlocking current task %s/%d\n\n",
+ task->comm, task->pid, current->comm, current->pid);
+
+ printk("\n1) %s/%d is trying to acquire this lock:\n",
+ current->comm, current->pid);
+ printk_lock(waiter->lock, 1);
+
+ printk("... trying at: ");
+ print_symbol("%s\n", waiter->ip);
+
+ printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
+ printk_lock(waiter->deadlock_lock, 1);
+
+ rt_mutex_show_held_locks(current, 1);
+ rt_mutex_show_held_locks(task, 1);
+
+ printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
+ show_stack(task, NULL);
+ printk("\n%s/%d's [current] stackdump:\n\n",
+ current->comm, current->pid);
+ dump_stack();
+ rt_mutex_show_all_locks();
+ printk("[ turning off deadlock detection."
+ "Please report this trace. ]\n\n");
+ local_irq_disable();
+}
+
+void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__)
+{
+ unsigned long flags;
+
+ if (rt_trace_on) {
+ TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
+
+ spin_lock_irqsave(&current->held_list_lock, flags);
+ list_add_tail(&lock->held_list_entry, &current->held_list_head);
+ spin_unlock_irqrestore(&current->held_list_lock, flags);
+
+ lock->acquire_ip = ip;
+ }
+}
+
+void debug_rt_mutex_unlock(struct rt_mutex *lock)
+{
+ unsigned long flags;
+
+ if (rt_trace_on) {
+ TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
+ TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
+
+ spin_lock_irqsave(&current->held_list_lock, flags);
+ list_del_init(&lock->held_list_entry);
+ spin_unlock_irqrestore(&current->held_list_lock, flags);
+ }
+}
+
+void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
+ struct task_struct *powner __IP_DECL__)
+{
+ unsigned long flags;
+
+ if (rt_trace_on) {
+ TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
+
+ spin_lock_irqsave(&powner->held_list_lock, flags);
+ list_add_tail(&lock->held_list_entry, &powner->held_list_head);
+ spin_unlock_irqrestore(&powner->held_list_lock, flags);
+
+ lock->acquire_ip = ip;
+ }
+}
+
+void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
+{
+ unsigned long flags;
+
+ if (rt_trace_on) {
+ struct task_struct *owner = rt_mutex_owner(lock);
+
+ TRACE_WARN_ON_LOCKED(!owner);
+ TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
+
+ spin_lock_irqsave(&owner->held_list_lock, flags);
+ list_del_init(&lock->held_list_entry);
+ spin_unlock_irqrestore(&owner->held_list_lock, flags);
+ }
+}
+
+void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+{
+ memset(waiter, 0x11, sizeof(*waiter));
+ plist_node_init(&waiter->list_entry, MAX_PRIO);
+ plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
+}
+
+void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
+{
+ TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
+ TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+ TRACE_WARN_ON(waiter->task);
+ memset(waiter, 0x22, sizeof(*waiter));
+}
+
+void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
+{
+ void *addr = lock;
+
+ if (rt_trace_on) {
+ rt_mutex_debug_check_no_locks_freed(addr,
+ sizeof(struct rt_mutex));
+ INIT_LIST_HEAD(&lock->held_list_entry);
+ lock->name = name;
+ }
+}
+
+void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task)
+{
+}
+
+void rt_mutex_deadlock_account_unlock(struct task_struct *task)
+{
+}
+
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
new file mode 100644
index 00000000000000..7612fbc62d701b
--- /dev/null
+++ b/kernel/rtmutex-debug.h
@@ -0,0 +1,37 @@
+/*
+ * RT-Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains macros used solely by rtmutex.c. Debug version.
+ */
+
+#define __IP_DECL__ , unsigned long ip
+#define __IP__ , ip
+#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
+
+extern void
+rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
+extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
+extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
+extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
+extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
+extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__);
+extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
+extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
+ struct task_struct *powner __IP_DECL__);
+extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
+extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
+ struct rt_mutex *lock);
+extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
+# define debug_rt_mutex_reset_waiter(w) \
+ do { (w)->deadlock_lock = NULL; } while (0)
+
+static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+ int detect)
+{
+ return (waiter != NULL);
+}
diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
new file mode 100644
index 00000000000000..e82c2f84824946
--- /dev/null
+++ b/kernel/rtmutex-tester.c
@@ -0,0 +1,440 @@
+/*
+ * RT-Mutex-tester: scriptable tester for rt mutexes
+ *
+ * started by Thomas Gleixner:
+ *
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ */
+#include <linux/config.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/smp_lock.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/timer.h>
+
+#include "rtmutex.h"
+
+#define MAX_RT_TEST_THREADS 8
+#define MAX_RT_TEST_MUTEXES 8
+
+static spinlock_t rttest_lock;
+static atomic_t rttest_event;
+
+struct test_thread_data {
+ int opcode;
+ int opdata;
+ int mutexes[MAX_RT_TEST_MUTEXES];
+ int bkl;
+ int event;
+ struct sys_device sysdev;
+};
+
+static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
+static task_t *threads[MAX_RT_TEST_THREADS];
+static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
+
+enum test_opcodes {
+ RTTEST_NOP = 0,
+ RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
+ RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
+ RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
+ RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
+ RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
+ RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
+ RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
+ RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
+ RTTEST_LOCKBKL, /* 9 Lock BKL */
+ RTTEST_UNLOCKBKL, /* 10 Unlock BKL */
+ RTTEST_SIGNAL, /* 11 Signal other test thread, data = thread id */
+ RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
+ RTTEST_RESET = 99, /* 99 Reset all pending operations */
+};
+
+static int handle_op(struct test_thread_data *td, int lockwakeup)
+{
+ int i, id, ret = -EINVAL;
+
+ switch(td->opcode) {
+
+ case RTTEST_NOP:
+ return 0;
+
+ case RTTEST_LOCKCONT:
+ td->mutexes[td->opdata] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ return 0;
+
+ case RTTEST_RESET:
+ for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
+ if (td->mutexes[i] == 4) {
+ rt_mutex_unlock(&mutexes[i]);
+ td->mutexes[i] = 0;
+ }
+ }
+
+ if (!lockwakeup && td->bkl == 4) {
+ unlock_kernel();
+ td->bkl = 0;
+ }
+ return 0;
+
+ case RTTEST_RESETEVENT:
+ atomic_set(&rttest_event, 0);
+ return 0;
+
+ default:
+ if (lockwakeup)
+ return ret;
+ }
+
+ switch(td->opcode) {
+
+ case RTTEST_LOCK:
+ case RTTEST_LOCKNOWAIT:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
+ return ret;
+
+ td->mutexes[id] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ rt_mutex_lock(&mutexes[id]);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = 4;
+ return 0;
+
+ case RTTEST_LOCKINT:
+ case RTTEST_LOCKINTNOWAIT:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
+ return ret;
+
+ td->mutexes[id] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = ret ? 0 : 4;
+ return ret ? -EINTR : 0;
+
+ case RTTEST_UNLOCK:
+ id = td->opdata;
+ if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
+ return ret;
+
+ td->event = atomic_add_return(1, &rttest_event);
+ rt_mutex_unlock(&mutexes[id]);
+ td->event = atomic_add_return(1, &rttest_event);
+ td->mutexes[id] = 0;
+ return 0;
+
+ case RTTEST_LOCKBKL:
+ if (td->bkl)
+ return 0;
+ td->bkl = 1;
+ lock_kernel();
+ td->bkl = 4;
+ return 0;
+
+ case RTTEST_UNLOCKBKL:
+ if (td->bkl != 4)
+ break;
+ unlock_kernel();
+ td->bkl = 0;
+ return 0;
+
+ default:
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Schedule replacement for rtsem_down(). Only called for threads with
+ * PF_MUTEX_TESTER set.
+ *
+ * This allows us to have finegrained control over the event flow.
+ *
+ */
+void schedule_rt_mutex_test(struct rt_mutex *mutex)
+{
+ int tid, op, dat;
+ struct test_thread_data *td;
+
+ /* We have to lookup the task */
+ for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
+ if (threads[tid] == current)
+ break;
+ }
+
+ BUG_ON(tid == MAX_RT_TEST_THREADS);
+
+ td = &thread_data[tid];
+
+ op = td->opcode;
+ dat = td->opdata;
+
+ switch (op) {
+ case RTTEST_LOCK:
+ case RTTEST_LOCKINT:
+ case RTTEST_LOCKNOWAIT:
+ case RTTEST_LOCKINTNOWAIT:
+ if (mutex != &mutexes[dat])
+ break;
+
+ if (td->mutexes[dat] != 1)
+ break;
+
+ td->mutexes[dat] = 2;
+ td->event = atomic_add_return(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKBKL:
+ default:
+ break;
+ }
+
+ schedule();
+
+
+ switch (op) {
+ case RTTEST_LOCK:
+ case RTTEST_LOCKINT:
+ if (mutex != &mutexes[dat])
+ return;
+
+ if (td->mutexes[dat] != 2)
+ return;
+
+ td->mutexes[dat] = 3;
+ td->event = atomic_add_return(1, &rttest_event);
+ break;
+
+ case RTTEST_LOCKNOWAIT:
+ case RTTEST_LOCKINTNOWAIT:
+ if (mutex != &mutexes[dat])
+ return;
+
+ if (td->mutexes[dat] != 2)
+ return;
+
+ td->mutexes[dat] = 1;
+ td->event = atomic_add_return(1, &rttest_event);
+ return;
+
+ case RTTEST_LOCKBKL:
+ return;
+ default:
+ return;
+ }
+
+ td->opcode = 0;
+
+ for (;;) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (td->opcode > 0) {
+ int ret;
+
+ set_current_state(TASK_RUNNING);
+ ret = handle_op(td, 1);
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (td->opcode == RTTEST_LOCKCONT)
+ break;
+ td->opcode = ret;
+ }
+
+ /* Wait for the next command to be executed */
+ schedule();
+ }
+
+ /* Restore previous command and data */
+ td->opcode = op;
+ td->opdata = dat;
+}
+
+static int test_func(void *data)
+{
+ struct test_thread_data *td = data;
+ int ret;
+
+ current->flags |= PF_MUTEX_TESTER;
+ allow_signal(SIGHUP);
+
+ for(;;) {
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ if (td->opcode > 0) {
+ set_current_state(TASK_RUNNING);
+ ret = handle_op(td, 0);
+ set_current_state(TASK_INTERRUPTIBLE);
+ td->opcode = ret;
+ }
+
+ /* Wait for the next command to be executed */
+ schedule();
+
+ if (signal_pending(current))
+ flush_signals(current);
+
+ if(kthread_should_stop())
+ break;
+ }
+ return 0;
+}
+
+/**
+ * sysfs_test_command - interface for test commands
+ * @dev: thread reference
+ * @buf: command for actual step
+ * @count: length of buffer
+ *
+ * command syntax:
+ *
+ * opcode:data
+ */
+static ssize_t sysfs_test_command(struct sys_device *dev, const char *buf,
+ size_t count)
+{
+ struct sched_param schedpar;
+ struct test_thread_data *td;
+ char cmdbuf[32];
+ int op, dat, tid, ret;
+
+ td = container_of(dev, struct test_thread_data, sysdev);
+ tid = td->sysdev.id;
+
+ /* strings from sysfs write are not 0 terminated! */
+ if (count >= sizeof(cmdbuf))
+ return -EINVAL;
+
+ /* strip of \n: */
+ if (buf[count-1] == '\n')
+ count--;
+ if (count < 1)
+ return -EINVAL;
+
+ memcpy(cmdbuf, buf, count);
+ cmdbuf[count] = 0;
+
+ if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
+ return -EINVAL;
+
+ switch (op) {
+ case RTTEST_SCHEDOT:
+ schedpar.sched_priority = 0;
+ ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
+ if (ret)
+ return ret;
+ set_user_nice(current, 0);
+ break;
+
+ case RTTEST_SCHEDRT:
+ schedpar.sched_priority = dat;
+ ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
+ if (ret)
+ return ret;
+ break;
+
+ case RTTEST_SIGNAL:
+ send_sig(SIGHUP, threads[tid], 0);
+ break;
+
+ default:
+ if (td->opcode > 0)
+ return -EBUSY;
+ td->opdata = dat;
+ td->opcode = op;
+ wake_up_process(threads[tid]);
+ }
+
+ return count;
+}
+
+/**
+ * sysfs_test_status - sysfs interface for rt tester
+ * @dev: thread to query
+ * @buf: char buffer to be filled with thread status info
+ */
+static ssize_t sysfs_test_status(struct sys_device *dev, char *buf)
+{
+ struct test_thread_data *td;
+ char *curr = buf;
+ task_t *tsk;
+ int i;
+
+ td = container_of(dev, struct test_thread_data, sysdev);
+ tsk = threads[td->sysdev.id];
+
+ spin_lock(&rttest_lock);
+
+ curr += sprintf(curr,
+ "O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, K: %d, M:",
+ td->opcode, td->event, tsk->state,
+ (MAX_RT_PRIO - 1) - tsk->prio,
+ (MAX_RT_PRIO - 1) - tsk->normal_prio,
+ tsk->pi_blocked_on, td->bkl);
+
+ for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
+ curr += sprintf(curr, "%d", td->mutexes[i]);
+
+ spin_unlock(&rttest_lock);
+
+ curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
+ mutexes[td->sysdev.id].owner);
+
+ return curr - buf;
+}
+
+static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
+static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
+
+static struct sysdev_class rttest_sysclass = {
+ set_kset_name("rttest"),
+};
+
+static int init_test_thread(int id)
+{
+ thread_data[id].sysdev.cls = &rttest_sysclass;
+ thread_data[id].sysdev.id = id;
+
+ threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
+ if (IS_ERR(threads[id]))
+ return PTR_ERR(threads[id]);
+
+ return sysdev_register(&thread_data[id].sysdev);
+}
+
+static int init_rttest(void)
+{
+ int ret, i;
+
+ spin_lock_init(&rttest_lock);
+
+ for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
+ rt_mutex_init(&mutexes[i]);
+
+ ret = sysdev_class_register(&rttest_sysclass);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
+ ret = init_test_thread(i);
+ if (ret)
+ break;
+ ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
+ if (ret)
+ break;
+ ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
+ if (ret)
+ break;
+ }
+
+ printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
+
+ return ret;
+}
+
+device_initcall(init_rttest);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
new file mode 100644
index 00000000000000..45d61016da5761
--- /dev/null
+++ b/kernel/rtmutex.c
@@ -0,0 +1,990 @@
+/*
+ * RT-Mutexes: simple blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner.
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
+ * Copyright (C) 2006 Esben Nielsen
+ */
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+
+#include "rtmutex_common.h"
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# include "rtmutex-debug.h"
+#else
+# include "rtmutex.h"
+#endif
+
+/*
+ * lock->owner state tracking:
+ *
+ * lock->owner holds the task_struct pointer of the owner. Bit 0 and 1
+ * are used to keep track of the "owner is pending" and "lock has
+ * waiters" state.
+ *
+ * owner bit1 bit0
+ * NULL 0 0 lock is free (fast acquire possible)
+ * NULL 0 1 invalid state
+ * NULL 1 0 Transitional State*
+ * NULL 1 1 invalid state
+ * taskpointer 0 0 lock is held (fast release possible)
+ * taskpointer 0 1 task is pending owner
+ * taskpointer 1 0 lock is held and has waiters
+ * taskpointer 1 1 task is pending owner and lock has more waiters
+ *
+ * Pending ownership is assigned to the top (highest priority)
+ * waiter of the lock, when the lock is released. The thread is woken
+ * up and can now take the lock. Until the lock is taken (bit 0
+ * cleared) a competing higher priority thread can steal the lock
+ * which puts the woken up thread back on the waiters list.
+ *
+ * The fast atomic compare exchange based acquire and release is only
+ * possible when bit 0 and 1 of lock->owner are 0.
+ *
+ * (*) There's a small time where the owner can be NULL and the
+ * "lock has waiters" bit is set. This can happen when grabbing the lock.
+ * To prevent a cmpxchg of the owner releasing the lock, we need to set this
+ * bit before looking at the lock, hence the reason this is a transitional
+ * state.
+ */
+
+static void
+rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner,
+ unsigned long mask)
+{
+ unsigned long val = (unsigned long)owner | mask;
+
+ if (rt_mutex_has_waiters(lock))
+ val |= RT_MUTEX_HAS_WAITERS;
+
+ lock->owner = (struct task_struct *)val;
+}
+
+static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
+{
+ lock->owner = (struct task_struct *)
+ ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
+}
+
+static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
+{
+ if (!rt_mutex_has_waiters(lock))
+ clear_rt_mutex_waiters(lock);
+}
+
+/*
+ * We can speed up the acquire/release, if the architecture
+ * supports cmpxchg and if there's no debugging state to be set up
+ */
+#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
+# define rt_mutex_cmpxchg(l,c,n) (cmpxchg(&l->owner, c, n) == c)
+static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+{
+ unsigned long owner, *p = (unsigned long *) &lock->owner;
+
+ do {
+ owner = *p;
+ } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
+}
+#else
+# define rt_mutex_cmpxchg(l,c,n) (0)
+static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
+{
+ lock->owner = (struct task_struct *)
+ ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
+}
+#endif
+
+/*
+ * Calculate task priority from the waiter list priority
+ *
+ * Return task->normal_prio when the waiter list is empty or when
+ * the waiter is not allowed to do priority boosting
+ */
+int rt_mutex_getprio(struct task_struct *task)
+{
+ if (likely(!task_has_pi_waiters(task)))
+ return task->normal_prio;
+
+ return min(task_top_pi_waiter(task)->pi_list_entry.prio,
+ task->normal_prio);
+}
+
+/*
+ * Adjust the priority of a task, after its pi_waiters got modified.
+ *
+ * This can be both boosting and unboosting. task->pi_lock must be held.
+ */
+static void __rt_mutex_adjust_prio(struct task_struct *task)
+{
+ int prio = rt_mutex_getprio(task);
+
+ if (task->prio != prio)
+ rt_mutex_setprio(task, prio);
+}
+
+/*
+ * Adjust task priority (undo boosting). Called from the exit path of
+ * rt_mutex_slowunlock() and rt_mutex_slowlock().
+ *
+ * (Note: We do this outside of the protection of lock->wait_lock to
+ * allow the lock to be taken while or before we readjust the priority
+ * of task. We do not use the spin_xx_mutex() variants here as we are
+ * outside of the debug path.)
+ */
+static void rt_mutex_adjust_prio(struct task_struct *task)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->pi_lock, flags);
+ __rt_mutex_adjust_prio(task);
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+}
+
+/*
+ * Max number of times we'll walk the boosting chain:
+ */
+int max_lock_depth = 1024;
+
+/*
+ * Adjust the priority chain. Also used for deadlock detection.
+ * Decreases task's usage by one - may thus free the task.
+ * Returns 0 or -EDEADLK.
+ */
+static int rt_mutex_adjust_prio_chain(task_t *task,
+ int deadlock_detect,
+ struct rt_mutex *orig_lock,
+ struct rt_mutex_waiter *orig_waiter,
+ struct task_struct *top_task
+ __IP_DECL__)
+{
+ struct rt_mutex *lock;
+ struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
+ int detect_deadlock, ret = 0, depth = 0;
+ unsigned long flags;
+
+ detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
+ deadlock_detect);
+
+ /*
+ * The (de)boosting is a step by step approach with a lot of
+ * pitfalls. We want this to be preemptible and we want hold a
+ * maximum of two locks per step. So we have to check
+ * carefully whether things change under us.
+ */
+ again:
+ if (++depth > max_lock_depth) {
+ static int prev_max;
+
+ /*
+ * Print this only once. If the admin changes the limit,
+ * print a new message when reaching the limit again.
+ */
+ if (prev_max != max_lock_depth) {
+ prev_max = max_lock_depth;
+ printk(KERN_WARNING "Maximum lock depth %d reached "
+ "task: %s (%d)\n", max_lock_depth,
+ top_task->comm, top_task->pid);
+ }
+ put_task_struct(task);
+
+ return deadlock_detect ? -EDEADLK : 0;
+ }
+ retry:
+ /*
+ * Task can not go away as we did a get_task() before !
+ */
+ spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+ /*
+ * Check whether the end of the boosting chain has been
+ * reached or the state of the chain has changed while we
+ * dropped the locks.
+ */
+ if (!waiter || !waiter->task)
+ goto out_unlock_pi;
+
+ if (top_waiter && (!task_has_pi_waiters(task) ||
+ top_waiter != task_top_pi_waiter(task)))
+ goto out_unlock_pi;
+
+ /*
+ * When deadlock detection is off then we check, if further
+ * priority adjustment is necessary.
+ */
+ if (!detect_deadlock && waiter->list_entry.prio == task->prio)
+ goto out_unlock_pi;
+
+ lock = waiter->lock;
+ if (!spin_trylock(&lock->wait_lock)) {
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+ cpu_relax();
+ goto retry;
+ }
+
+ /* Deadlock detection */
+ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+ debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+ spin_unlock(&lock->wait_lock);
+ ret = deadlock_detect ? -EDEADLK : 0;
+ goto out_unlock_pi;
+ }
+
+ top_waiter = rt_mutex_top_waiter(lock);
+
+ /* Requeue the waiter */
+ plist_del(&waiter->list_entry, &lock->wait_list);
+ waiter->list_entry.prio = task->prio;
+ plist_add(&waiter->list_entry, &lock->wait_list);
+
+ /* Release the task */
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+ put_task_struct(task);
+
+ /* Grab the next task */
+ task = rt_mutex_owner(lock);
+ spin_lock_irqsave(&task->pi_lock, flags);
+
+ if (waiter == rt_mutex_top_waiter(lock)) {
+ /* Boost the owner */
+ plist_del(&top_waiter->pi_list_entry, &task->pi_waiters);
+ waiter->pi_list_entry.prio = waiter->list_entry.prio;
+ plist_add(&waiter->pi_list_entry, &task->pi_waiters);
+ __rt_mutex_adjust_prio(task);
+
+ } else if (top_waiter == waiter) {
+ /* Deboost the owner */
+ plist_del(&waiter->pi_list_entry, &task->pi_waiters);
+ waiter = rt_mutex_top_waiter(lock);
+ waiter->pi_list_entry.prio = waiter->list_entry.prio;
+ plist_add(&waiter->pi_list_entry, &task->pi_waiters);
+ __rt_mutex_adjust_prio(task);
+ }
+
+ get_task_struct(task);
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ top_waiter = rt_mutex_top_waiter(lock);
+ spin_unlock(&lock->wait_lock);
+
+ if (!detect_deadlock && waiter != top_waiter)
+ goto out_put_task;
+
+ goto again;
+
+ out_unlock_pi:
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+ out_put_task:
+ put_task_struct(task);
+ return ret;
+}
+
+/*
+ * Optimization: check if we can steal the lock from the
+ * assigned pending owner [which might not have taken the
+ * lock yet]:
+ */
+static inline int try_to_steal_lock(struct rt_mutex *lock)
+{
+ struct task_struct *pendowner = rt_mutex_owner(lock);
+ struct rt_mutex_waiter *next;
+ unsigned long flags;
+
+ if (!rt_mutex_owner_pending(lock))
+ return 0;
+
+ if (pendowner == current)
+ return 1;
+
+ spin_lock_irqsave(&pendowner->pi_lock, flags);
+ if (current->prio >= pendowner->prio) {
+ spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ return 0;
+ }
+
+ /*
+ * Check if a waiter is enqueued on the pending owners
+ * pi_waiters list. Remove it and readjust pending owners
+ * priority.
+ */
+ if (likely(!rt_mutex_has_waiters(lock))) {
+ spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+ return 1;
+ }
+
+ /* No chain handling, pending owner is not blocked on anything: */
+ next = rt_mutex_top_waiter(lock);
+ plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
+ __rt_mutex_adjust_prio(pendowner);
+ spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+
+ /*
+ * We are going to steal the lock and a waiter was
+ * enqueued on the pending owners pi_waiters queue. So
+ * we have to enqueue this waiter into
+ * current->pi_waiters list. This covers the case,
+ * where current is boosted because it holds another
+ * lock and gets unboosted because the booster is
+ * interrupted, so we would delay a waiter with higher
+ * priority as current->normal_prio.
+ *
+ * Note: in the rare case of a SCHED_OTHER task changing
+ * its priority and thus stealing the lock, next->task
+ * might be current:
+ */
+ if (likely(next->task != current)) {
+ spin_lock_irqsave(&current->pi_lock, flags);
+ plist_add(&next->pi_list_entry, &current->pi_waiters);
+ __rt_mutex_adjust_prio(current);
+ spin_unlock_irqrestore(&current->pi_lock, flags);
+ }
+ return 1;
+}
+
+/*
+ * Try to take an rt-mutex
+ *
+ * This fails
+ * - when the lock has a real owner
+ * - when a different pending owner exists and has higher priority than current
+ *
+ * Must be called with lock->wait_lock held.
+ */
+static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
+{
+ /*
+ * We have to be careful here if the atomic speedups are
+ * enabled, such that, when
+ * - no other waiter is on the lock
+ * - the lock has been released since we did the cmpxchg
+ * the lock can be released or taken while we are doing the
+ * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
+ *
+ * The atomic acquire/release aware variant of
+ * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
+ * the WAITERS bit, the atomic release / acquire can not
+ * happen anymore and lock->wait_lock protects us from the
+ * non-atomic case.
+ *
+ * Note, that this might set lock->owner =
+ * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
+ * any more. This is fixed up when we take the ownership.
+ * This is the transitional state explained at the top of this file.
+ */
+ mark_rt_mutex_waiters(lock);
+
+ if (rt_mutex_owner(lock) && !try_to_steal_lock(lock))
+ return 0;
+
+ /* We got the lock. */
+ debug_rt_mutex_lock(lock __IP__);
+
+ rt_mutex_set_owner(lock, current, 0);
+
+ rt_mutex_deadlock_account_lock(lock, current);
+
+ return 1;
+}
+
+/*
+ * Task blocks on lock.
+ *
+ * Prepare waiter and propagate pi chain
+ *
+ * This must be called with lock->wait_lock held.
+ */
+static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ int detect_deadlock
+ __IP_DECL__)
+{
+ struct rt_mutex_waiter *top_waiter = waiter;
+ task_t *owner = rt_mutex_owner(lock);
+ int boost = 0, res;
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->pi_lock, flags);
+ __rt_mutex_adjust_prio(current);
+ waiter->task = current;
+ waiter->lock = lock;
+ plist_node_init(&waiter->list_entry, current->prio);
+ plist_node_init(&waiter->pi_list_entry, current->prio);
+
+ /* Get the top priority waiter on the lock */
+ if (rt_mutex_has_waiters(lock))
+ top_waiter = rt_mutex_top_waiter(lock);
+ plist_add(&waiter->list_entry, &lock->wait_list);
+
+ current->pi_blocked_on = waiter;
+
+ spin_unlock_irqrestore(&current->pi_lock, flags);
+
+ if (waiter == rt_mutex_top_waiter(lock)) {
+ spin_lock_irqsave(&owner->pi_lock, flags);
+ plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
+ plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
+
+ __rt_mutex_adjust_prio(owner);
+ if (owner->pi_blocked_on) {
+ boost = 1;
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(owner);
+ }
+ spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+ else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
+ spin_lock_irqsave(&owner->pi_lock, flags);
+ if (owner->pi_blocked_on) {
+ boost = 1;
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(owner);
+ }
+ spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+ if (!boost)
+ return 0;
+
+ spin_unlock(&lock->wait_lock);
+
+ res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
+ current __IP__);
+
+ spin_lock(&lock->wait_lock);
+
+ return res;
+}
+
+/*
+ * Wake up the next waiter on the lock.
+ *
+ * Remove the top waiter from the current tasks waiter list and from
+ * the lock waiter list. Set it as pending owner. Then wake it up.
+ *
+ * Called with lock->wait_lock held.
+ */
+static void wakeup_next_waiter(struct rt_mutex *lock)
+{
+ struct rt_mutex_waiter *waiter;
+ struct task_struct *pendowner;
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->pi_lock, flags);
+
+ waiter = rt_mutex_top_waiter(lock);
+ plist_del(&waiter->list_entry, &lock->wait_list);
+
+ /*
+ * Remove it from current->pi_waiters. We do not adjust a
+ * possible priority boost right now. We execute wakeup in the
+ * boosted mode and go back to normal after releasing
+ * lock->wait_lock.
+ */
+ plist_del(&waiter->pi_list_entry, &current->pi_waiters);
+ pendowner = waiter->task;
+ waiter->task = NULL;
+
+ rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
+
+ spin_unlock_irqrestore(&current->pi_lock, flags);
+
+ /*
+ * Clear the pi_blocked_on variable and enqueue a possible
+ * waiter into the pi_waiters list of the pending owner. This
+ * prevents that in case the pending owner gets unboosted a
+ * waiter with higher priority than pending-owner->normal_prio
+ * is blocked on the unboosted (pending) owner.
+ */
+ spin_lock_irqsave(&pendowner->pi_lock, flags);
+
+ WARN_ON(!pendowner->pi_blocked_on);
+ WARN_ON(pendowner->pi_blocked_on != waiter);
+ WARN_ON(pendowner->pi_blocked_on->lock != lock);
+
+ pendowner->pi_blocked_on = NULL;
+
+ if (rt_mutex_has_waiters(lock)) {
+ struct rt_mutex_waiter *next;
+
+ next = rt_mutex_top_waiter(lock);
+ plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
+ }
+ spin_unlock_irqrestore(&pendowner->pi_lock, flags);
+
+ wake_up_process(pendowner);
+}
+
+/*
+ * Remove a waiter from a lock
+ *
+ * Must be called with lock->wait_lock held
+ */
+static void remove_waiter(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter __IP_DECL__)
+{
+ int first = (waiter == rt_mutex_top_waiter(lock));
+ int boost = 0;
+ task_t *owner = rt_mutex_owner(lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&current->pi_lock, flags);
+ plist_del(&waiter->list_entry, &lock->wait_list);
+ waiter->task = NULL;
+ current->pi_blocked_on = NULL;
+ spin_unlock_irqrestore(&current->pi_lock, flags);
+
+ if (first && owner != current) {
+
+ spin_lock_irqsave(&owner->pi_lock, flags);
+
+ plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
+
+ if (rt_mutex_has_waiters(lock)) {
+ struct rt_mutex_waiter *next;
+
+ next = rt_mutex_top_waiter(lock);
+ plist_add(&next->pi_list_entry, &owner->pi_waiters);
+ }
+ __rt_mutex_adjust_prio(owner);
+
+ if (owner->pi_blocked_on) {
+ boost = 1;
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(owner);
+ }
+ spin_unlock_irqrestore(&owner->pi_lock, flags);
+ }
+
+ WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
+
+ if (!boost)
+ return;
+
+ spin_unlock(&lock->wait_lock);
+
+ rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__);
+
+ spin_lock(&lock->wait_lock);
+}
+
+/*
+ * Recheck the pi chain, in case we got a priority setting
+ *
+ * Called from sched_setscheduler
+ */
+void rt_mutex_adjust_pi(struct task_struct *task)
+{
+ struct rt_mutex_waiter *waiter;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task->pi_lock, flags);
+
+ waiter = task->pi_blocked_on;
+ if (!waiter || waiter->list_entry.prio == task->prio) {
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+ return;
+ }
+
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(task);
+ spin_unlock_irqrestore(&task->pi_lock, flags);
+
+ rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__);
+}
+
+/*
+ * Slow path lock function:
+ */
+static int __sched
+rt_mutex_slowlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock __IP_DECL__)
+{
+ struct rt_mutex_waiter waiter;
+ int ret = 0;
+
+ debug_rt_mutex_init_waiter(&waiter);
+ waiter.task = NULL;
+
+ spin_lock(&lock->wait_lock);
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock __IP__)) {
+ spin_unlock(&lock->wait_lock);
+ return 0;
+ }
+
+ set_current_state(state);
+
+ /* Setup the timer, when timeout != NULL */
+ if (unlikely(timeout))
+ hrtimer_start(&timeout->timer, timeout->timer.expires,
+ HRTIMER_ABS);
+
+ for (;;) {
+ /* Try to acquire the lock: */
+ if (try_to_take_rt_mutex(lock __IP__))
+ break;
+
+ /*
+ * TASK_INTERRUPTIBLE checks for signals and
+ * timeout. Ignored otherwise.
+ */
+ if (unlikely(state == TASK_INTERRUPTIBLE)) {
+ /* Signal pending? */
+ if (signal_pending(current))
+ ret = -EINTR;
+ if (timeout && !timeout->task)
+ ret = -ETIMEDOUT;
+ if (ret)
+ break;
+ }
+
+ /*
+ * waiter.task is NULL the first time we come here and
+ * when we have been woken up by the previous owner
+ * but the lock got stolen by a higher prio task.
+ */
+ if (!waiter.task) {
+ ret = task_blocks_on_rt_mutex(lock, &waiter,
+ detect_deadlock __IP__);
+ /*
+ * If we got woken up by the owner then start loop
+ * all over without going into schedule to try
+ * to get the lock now:
+ */
+ if (unlikely(!waiter.task))
+ continue;
+
+ if (unlikely(ret))
+ break;
+ }
+
+ spin_unlock(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(&waiter);
+
+ if (waiter.task)
+ schedule_rt_mutex(lock);
+
+ spin_lock(&lock->wait_lock);
+ set_current_state(state);
+ }
+
+ set_current_state(TASK_RUNNING);
+
+ if (unlikely(waiter.task))
+ remove_waiter(lock, &waiter __IP__);
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit
+ * unconditionally. We might have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
+
+ spin_unlock(&lock->wait_lock);
+
+ /* Remove pending timer: */
+ if (unlikely(timeout))
+ hrtimer_cancel(&timeout->timer);
+
+ /*
+ * Readjust priority, when we did not get the lock. We might
+ * have been the pending owner and boosted. Since we did not
+ * take the lock, the PI boost has to go.
+ */
+ if (unlikely(ret))
+ rt_mutex_adjust_prio(current);
+
+ debug_rt_mutex_free_waiter(&waiter);
+
+ return ret;
+}
+
+/*
+ * Slow path try-lock function:
+ */
+static inline int
+rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
+{
+ int ret = 0;
+
+ spin_lock(&lock->wait_lock);
+
+ if (likely(rt_mutex_owner(lock) != current)) {
+
+ ret = try_to_take_rt_mutex(lock __IP__);
+ /*
+ * try_to_take_rt_mutex() sets the lock waiters
+ * bit unconditionally. Clean this up.
+ */
+ fixup_rt_mutex_waiters(lock);
+ }
+
+ spin_unlock(&lock->wait_lock);
+
+ return ret;
+}
+
+/*
+ * Slow path to release a rt-mutex:
+ */
+static void __sched
+rt_mutex_slowunlock(struct rt_mutex *lock)
+{
+ spin_lock(&lock->wait_lock);
+
+ debug_rt_mutex_unlock(lock);
+
+ rt_mutex_deadlock_account_unlock(current);
+
+ if (!rt_mutex_has_waiters(lock)) {
+ lock->owner = NULL;
+ spin_unlock(&lock->wait_lock);
+ return;
+ }
+
+ wakeup_next_waiter(lock);
+
+ spin_unlock(&lock->wait_lock);
+
+ /* Undo pi boosting if necessary: */
+ rt_mutex_adjust_prio(current);
+}
+
+/*
+ * debug aware fast / slowpath lock,trylock,unlock
+ *
+ * The atomic acquire/release ops are compiled away, when either the
+ * architecture does not support cmpxchg or when debugging is enabled.
+ */
+static inline int
+rt_mutex_fastlock(struct rt_mutex *lock, int state,
+ int detect_deadlock,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock __IP_DECL__))
+{
+ if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 0;
+ } else
+ return slowfn(lock, state, NULL, detect_deadlock __RET_IP__);
+}
+
+static inline int
+rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout, int detect_deadlock,
+ int (*slowfn)(struct rt_mutex *lock, int state,
+ struct hrtimer_sleeper *timeout,
+ int detect_deadlock __IP_DECL__))
+{
+ if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 0;
+ } else
+ return slowfn(lock, state, timeout, detect_deadlock __RET_IP__);
+}
+
+static inline int
+rt_mutex_fasttrylock(struct rt_mutex *lock,
+ int (*slowfn)(struct rt_mutex *lock __IP_DECL__))
+{
+ if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+ rt_mutex_deadlock_account_lock(lock, current);
+ return 1;
+ }
+ return slowfn(lock __RET_IP__);
+}
+
+static inline void
+rt_mutex_fastunlock(struct rt_mutex *lock,
+ void (*slowfn)(struct rt_mutex *lock))
+{
+ if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
+ rt_mutex_deadlock_account_unlock(current);
+ else
+ slowfn(lock);
+}
+
+/**
+ * rt_mutex_lock - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ */
+void __sched rt_mutex_lock(struct rt_mutex *lock)
+{
+ might_sleep();
+
+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock);
+
+/**
+ * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+ *
+ * @lock: the rt_mutex to be locked
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+ * 0 on success
+ * -EINTR when interrupted by a signal
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
+ int detect_deadlock)
+{
+ might_sleep();
+
+ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
+ detect_deadlock, rt_mutex_slowlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+/**
+ * rt_mutex_lock_interruptible_ktime - lock a rt_mutex interruptible
+ * the timeout structure is provided
+ * by the caller
+ *
+ * @lock: the rt_mutex to be locked
+ * @timeout: timeout structure or NULL (no timeout)
+ * @detect_deadlock: deadlock detection on/off
+ *
+ * Returns:
+ * 0 on success
+ * -EINTR when interrupted by a signal
+ * -ETIMEOUT when the timeout expired
+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
+int
+rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
+ int detect_deadlock)
+{
+ might_sleep();
+
+ return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+ detect_deadlock, rt_mutex_slowlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+
+/**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ *
+ * Returns 1 on success and 0 on contention
+ */
+int __sched rt_mutex_trylock(struct rt_mutex *lock)
+{
+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_trylock);
+
+/**
+ * rt_mutex_unlock - unlock a rt_mutex
+ *
+ * @lock: the rt_mutex to be unlocked
+ */
+void __sched rt_mutex_unlock(struct rt_mutex *lock)
+{
+ rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+
+/***
+ * rt_mutex_destroy - mark a mutex unusable
+ * @lock: the mutex to be destroyed
+ *
+ * This function marks the mutex uninitialized, and any subsequent
+ * use of the mutex is forbidden. The mutex must not be locked when
+ * this function is called.
+ */
+void rt_mutex_destroy(struct rt_mutex *lock)
+{
+ WARN_ON(rt_mutex_is_locked(lock));
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ lock->magic = NULL;
+#endif
+}
+
+EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+
+/**
+ * __rt_mutex_init - initialize the rt lock
+ *
+ * @lock: the rt lock to be initialized
+ *
+ * Initialize the rt lock to unlocked state.
+ *
+ * Initializing of a locked rt lock is not allowed
+ */
+void __rt_mutex_init(struct rt_mutex *lock, const char *name)
+{
+ lock->owner = NULL;
+ spin_lock_init(&lock->wait_lock);
+ plist_head_init(&lock->wait_list, &lock->wait_lock);
+
+ debug_rt_mutex_init(lock, name);
+}
+EXPORT_SYMBOL_GPL(__rt_mutex_init);
+
+/**
+ * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
+ * proxy owner
+ *
+ * @lock: the rt_mutex to be locked
+ * @proxy_owner:the task to set as owner
+ *
+ * No locking. Caller has to do serializing itself
+ * Special API call for PI-futex support
+ */
+void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+{
+ __rt_mutex_init(lock, NULL);
+ debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__);
+ rt_mutex_set_owner(lock, proxy_owner, 0);
+ rt_mutex_deadlock_account_lock(lock, proxy_owner);
+}
+
+/**
+ * rt_mutex_proxy_unlock - release a lock on behalf of owner
+ *
+ * @lock: the rt_mutex to be locked
+ *
+ * No locking. Caller has to do serializing itself
+ * Special API call for PI-futex support
+ */
+void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+ struct task_struct *proxy_owner)
+{
+ debug_rt_mutex_proxy_unlock(lock);
+ rt_mutex_set_owner(lock, NULL, 0);
+ rt_mutex_deadlock_account_unlock(proxy_owner);
+}
+
+/**
+ * rt_mutex_next_owner - return the next owner of the lock
+ *
+ * @lock: the rt lock query
+ *
+ * Returns the next owner of the lock or NULL
+ *
+ * Caller has to serialize against other accessors to the lock
+ * itself.
+ *
+ * Special API call for PI-futex support
+ */
+struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
+{
+ if (!rt_mutex_has_waiters(lock))
+ return NULL;
+
+ return rt_mutex_top_waiter(lock)->task;
+}
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
new file mode 100644
index 00000000000000..1e0fca13ff72b4
--- /dev/null
+++ b/kernel/rtmutex.h
@@ -0,0 +1,29 @@
+/*
+ * RT-Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains macros used solely by rtmutex.c.
+ * Non-debug version.
+ */
+
+#define __IP_DECL__
+#define __IP__
+#define __RET_IP__
+#define rt_mutex_deadlock_check(l) (0)
+#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
+#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
+#define debug_rt_mutex_init_waiter(w) do { } while (0)
+#define debug_rt_mutex_free_waiter(w) do { } while (0)
+#define debug_rt_mutex_lock(l) do { } while (0)
+#define debug_rt_mutex_proxy_lock(l,p) do { } while (0)
+#define debug_rt_mutex_proxy_unlock(l) do { } while (0)
+#define debug_rt_mutex_unlock(l) do { } while (0)
+#define debug_rt_mutex_init(m, n) do { } while (0)
+#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
+#define debug_rt_mutex_print_deadlock(w) do { } while (0)
+#define debug_rt_mutex_detect_deadlock(w,d) (d)
+#define debug_rt_mutex_reset_waiter(w) do { } while (0)
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
new file mode 100644
index 00000000000000..9c75856e791ee9
--- /dev/null
+++ b/kernel/rtmutex_common.h
@@ -0,0 +1,123 @@
+/*
+ * RT Mutexes: blocking mutual exclusion locks with PI support
+ *
+ * started by Ingo Molnar and Thomas Gleixner:
+ *
+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * This file contains the private data structure and API definitions.
+ */
+
+#ifndef __KERNEL_RTMUTEX_COMMON_H
+#define __KERNEL_RTMUTEX_COMMON_H
+
+#include <linux/rtmutex.h>
+
+/*
+ * The rtmutex in kernel tester is independent of rtmutex debugging. We
+ * call schedule_rt_mutex_test() instead of schedule() for the tasks which
+ * belong to the tester. That way we can delay the wakeup path of those
+ * threads to provoke lock stealing and testing of complex boosting scenarios.
+ */
+#ifdef CONFIG_RT_MUTEX_TESTER
+
+extern void schedule_rt_mutex_test(struct rt_mutex *lock);
+
+#define schedule_rt_mutex(_lock) \
+ do { \
+ if (!(current->flags & PF_MUTEX_TESTER)) \
+ schedule(); \
+ else \
+ schedule_rt_mutex_test(_lock); \
+ } while (0)
+
+#else
+# define schedule_rt_mutex(_lock) schedule()
+#endif
+
+/*
+ * This is the control structure for tasks blocked on a rt_mutex,
+ * which is allocated on the kernel stack on of the blocked task.
+ *
+ * @list_entry: pi node to enqueue into the mutex waiters list
+ * @pi_list_entry: pi node to enqueue into the mutex owner waiters list
+ * @task: task reference to the blocked task
+ */
+struct rt_mutex_waiter {
+ struct plist_node list_entry;
+ struct plist_node pi_list_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+ unsigned long ip;
+ pid_t deadlock_task_pid;
+ struct rt_mutex *deadlock_lock;
+#endif
+};
+
+/*
+ * Various helpers to access the waiters-plist:
+ */
+static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
+{
+ return !plist_head_empty(&lock->wait_list);
+}
+
+static inline struct rt_mutex_waiter *
+rt_mutex_top_waiter(struct rt_mutex *lock)
+{
+ struct rt_mutex_waiter *w;
+
+ w = plist_first_entry(&lock->wait_list, struct rt_mutex_waiter,
+ list_entry);
+ BUG_ON(w->lock != lock);
+
+ return w;
+}
+
+static inline int task_has_pi_waiters(struct task_struct *p)
+{
+ return !plist_head_empty(&p->pi_waiters);
+}
+
+static inline struct rt_mutex_waiter *
+task_top_pi_waiter(struct task_struct *p)
+{
+ return plist_first_entry(&p->pi_waiters, struct rt_mutex_waiter,
+ pi_list_entry);
+}
+
+/*
+ * lock->owner state tracking:
+ */
+#define RT_MUTEX_OWNER_PENDING 1UL
+#define RT_MUTEX_HAS_WAITERS 2UL
+#define RT_MUTEX_OWNER_MASKALL 3UL
+
+static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
+{
+ return (struct task_struct *)
+ ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
+}
+
+static inline struct task_struct *rt_mutex_real_owner(struct rt_mutex *lock)
+{
+ return (struct task_struct *)
+ ((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
+}
+
+static inline unsigned long rt_mutex_owner_pending(struct rt_mutex *lock)
+{
+ return (unsigned long)lock->owner & RT_MUTEX_OWNER_PENDING;
+}
+
+/*
+ * PI-futex support (proxy locking functions, etc.):
+ */
+extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
+extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
+ struct task_struct *proxy_owner);
+#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index a856040c200a42..2629c1711fd62b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -168,15 +168,21 @@
*/
#define SCALE_PRIO(x, prio) \
- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
+ max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
-static unsigned int task_timeslice(task_t *p)
+static unsigned int static_prio_timeslice(int static_prio)
{
- if (p->static_prio < NICE_TO_PRIO(0))
- return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
+ if (static_prio < NICE_TO_PRIO(0))
+ return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
else
- return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+ return SCALE_PRIO(DEF_TIMESLICE, static_prio);
}
+
+static inline unsigned int task_timeslice(task_t *p)
+{
+ return static_prio_timeslice(p->static_prio);
+}
+
#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
< (long long) (sd)->cache_hot_time)
@@ -184,13 +190,11 @@ static unsigned int task_timeslice(task_t *p)
* These are the runqueue data structures:
*/
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
-
typedef struct runqueue runqueue_t;
struct prio_array {
unsigned int nr_active;
- unsigned long bitmap[BITMAP_SIZE];
+ DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
struct list_head queue[MAX_PRIO];
};
@@ -209,6 +213,7 @@ struct runqueue {
* remote CPUs use both these fields when doing load calculation.
*/
unsigned long nr_running;
+ unsigned long raw_weighted_load;
#ifdef CONFIG_SMP
unsigned long cpu_load[3];
#endif
@@ -239,7 +244,6 @@ struct runqueue {
task_t *migration_thread;
struct list_head migration_queue;
- int cpu;
#endif
#ifdef CONFIG_SCHEDSTATS
@@ -351,11 +355,30 @@ static inline void finish_lock_switch(runqueue_t *rq, task_t *prev)
#endif /* __ARCH_WANT_UNLOCKED_CTXSW */
/*
+ * __task_rq_lock - lock the runqueue a given task resides on.
+ * Must be called interrupts disabled.
+ */
+static inline runqueue_t *__task_rq_lock(task_t *p)
+ __acquires(rq->lock)
+{
+ struct runqueue *rq;
+
+repeat_lock_task:
+ rq = task_rq(p);
+ spin_lock(&rq->lock);
+ if (unlikely(rq != task_rq(p))) {
+ spin_unlock(&rq->lock);
+ goto repeat_lock_task;
+ }
+ return rq;
+}
+
+/*
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
*/
-static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
+static runqueue_t *task_rq_lock(task_t *p, unsigned long *flags)
__acquires(rq->lock)
{
struct runqueue *rq;
@@ -371,6 +394,12 @@ repeat_lock_task:
return rq;
}
+static inline void __task_rq_unlock(runqueue_t *rq)
+ __releases(rq->lock)
+{
+ spin_unlock(&rq->lock);
+}
+
static inline void task_rq_unlock(runqueue_t *rq, unsigned long *flags)
__releases(rq->lock)
{
@@ -634,7 +663,7 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
}
/*
- * effective_prio - return the priority that is based on the static
+ * __normal_prio - return the priority that is based on the static
* priority but is modified by bonuses/penalties.
*
* We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
@@ -647,13 +676,11 @@ static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
*
* Both properties are important to certain workloads.
*/
-static int effective_prio(task_t *p)
+
+static inline int __normal_prio(task_t *p)
{
int bonus, prio;
- if (rt_task(p))
- return p->prio;
-
bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
prio = p->static_prio - bonus;
@@ -665,6 +692,106 @@ static int effective_prio(task_t *p)
}
/*
+ * To aid in avoiding the subversion of "niceness" due to uneven distribution
+ * of tasks with abnormal "nice" values across CPUs the contribution that
+ * each task makes to its run queue's load is weighted according to its
+ * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
+ * scaled version of the new time slice allocation that they receive on time
+ * slice expiry etc.
+ */
+
+/*
+ * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
+ * If static_prio_timeslice() is ever changed to break this assumption then
+ * this code will need modification
+ */
+#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
+#define LOAD_WEIGHT(lp) \
+ (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
+#define PRIO_TO_LOAD_WEIGHT(prio) \
+ LOAD_WEIGHT(static_prio_timeslice(prio))
+#define RTPRIO_TO_LOAD_WEIGHT(rp) \
+ (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
+
+static void set_load_weight(task_t *p)
+{
+ if (has_rt_policy(p)) {
+#ifdef CONFIG_SMP
+ if (p == task_rq(p)->migration_thread)
+ /*
+ * The migration thread does the actual balancing.
+ * Giving its load any weight will skew balancing
+ * adversely.
+ */
+ p->load_weight = 0;
+ else
+#endif
+ p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
+ } else
+ p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
+}
+
+static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+ rq->raw_weighted_load += p->load_weight;
+}
+
+static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
+{
+ rq->raw_weighted_load -= p->load_weight;
+}
+
+static inline void inc_nr_running(task_t *p, runqueue_t *rq)
+{
+ rq->nr_running++;
+ inc_raw_weighted_load(rq, p);
+}
+
+static inline void dec_nr_running(task_t *p, runqueue_t *rq)
+{
+ rq->nr_running--;
+ dec_raw_weighted_load(rq, p);
+}
+
+/*
+ * Calculate the expected normal priority: i.e. priority
+ * without taking RT-inheritance into account. Might be
+ * boosted by interactivity modifiers. Changes upon fork,
+ * setprio syscalls, and whenever the interactivity
+ * estimator recalculates.
+ */
+static inline int normal_prio(task_t *p)
+{
+ int prio;
+
+ if (has_rt_policy(p))
+ prio = MAX_RT_PRIO-1 - p->rt_priority;
+ else
+ prio = __normal_prio(p);
+ return prio;
+}
+
+/*
+ * Calculate the current priority, i.e. the priority
+ * taken into account by the scheduler. This value might
+ * be boosted by RT tasks, or might be boosted by
+ * interactivity modifiers. Will be RT if the task got
+ * RT-boosted. If not then it returns p->normal_prio.
+ */
+static int effective_prio(task_t *p)
+{
+ p->normal_prio = normal_prio(p);
+ /*
+ * If we are RT tasks or we were boosted to RT priority,
+ * keep the priority unchanged. Otherwise, update priority
+ * to the normal priority:
+ */
+ if (!rt_prio(p->prio))
+ return p->normal_prio;
+ return p->prio;
+}
+
+/*
* __activate_task - move a task to the runqueue.
*/
static void __activate_task(task_t *p, runqueue_t *rq)
@@ -674,7 +801,7 @@ static void __activate_task(task_t *p, runqueue_t *rq)
if (batch_task(p))
target = rq->expired;
enqueue_task(p, target);
- rq->nr_running++;
+ inc_nr_running(p, rq);
}
/*
@@ -683,39 +810,45 @@ static void __activate_task(task_t *p, runqueue_t *rq)
static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
{
enqueue_task_head(p, rq->active);
- rq->nr_running++;
+ inc_nr_running(p, rq);
}
+/*
+ * Recalculate p->normal_prio and p->prio after having slept,
+ * updating the sleep-average too:
+ */
static int recalc_task_prio(task_t *p, unsigned long long now)
{
/* Caller must always ensure 'now >= p->timestamp' */
- unsigned long long __sleep_time = now - p->timestamp;
- unsigned long sleep_time;
+ unsigned long sleep_time = now - p->timestamp;
if (batch_task(p))
sleep_time = 0;
- else {
- if (__sleep_time > NS_MAX_SLEEP_AVG)
- sleep_time = NS_MAX_SLEEP_AVG;
- else
- sleep_time = (unsigned long)__sleep_time;
- }
if (likely(sleep_time > 0)) {
/*
- * User tasks that sleep a long time are categorised as
- * idle. They will only have their sleep_avg increased to a
- * level that makes them just interactive priority to stay
- * active yet prevent them suddenly becoming cpu hogs and
- * starving other processes.
+ * This ceiling is set to the lowest priority that would allow
+ * a task to be reinserted into the active array on timeslice
+ * completion.
*/
- if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) {
- unsigned long ceiling;
+ unsigned long ceiling = INTERACTIVE_SLEEP(p);
- ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG -
- DEF_TIMESLICE);
- if (p->sleep_avg < ceiling)
- p->sleep_avg = ceiling;
+ if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
+ /*
+ * Prevents user tasks from achieving best priority
+ * with one single large enough sleep.
+ */
+ p->sleep_avg = ceiling;
+ /*
+ * Using INTERACTIVE_SLEEP() as a ceiling places a
+ * nice(0) task 1ms sleep away from promotion, and
+ * gives it 700ms to round-robin with no chance of
+ * being demoted. This is more than generous, so
+ * mark this sleep as non-interactive to prevent the
+ * on-runqueue bonus logic from intervening should
+ * this task not receive cpu immediately.
+ */
+ p->sleep_type = SLEEP_NONINTERACTIVE;
} else {
/*
* Tasks waking from uninterruptible sleep are
@@ -723,12 +856,12 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
* are likely to be waiting on I/O
*/
if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
- if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
+ if (p->sleep_avg >= ceiling)
sleep_time = 0;
else if (p->sleep_avg + sleep_time >=
- INTERACTIVE_SLEEP(p)) {
- p->sleep_avg = INTERACTIVE_SLEEP(p);
- sleep_time = 0;
+ ceiling) {
+ p->sleep_avg = ceiling;
+ sleep_time = 0;
}
}
@@ -742,9 +875,9 @@ static int recalc_task_prio(task_t *p, unsigned long long now)
*/
p->sleep_avg += sleep_time;
- if (p->sleep_avg > NS_MAX_SLEEP_AVG)
- p->sleep_avg = NS_MAX_SLEEP_AVG;
}
+ if (p->sleep_avg > NS_MAX_SLEEP_AVG)
+ p->sleep_avg = NS_MAX_SLEEP_AVG;
}
return effective_prio(p);
@@ -805,7 +938,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
*/
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
- rq->nr_running--;
+ dec_nr_running(p, rq);
dequeue_task(p, p->array);
p->array = NULL;
}
@@ -860,6 +993,12 @@ inline int task_curr(const task_t *p)
return cpu_curr(task_cpu(p)) == p;
}
+/* Used instead of source_load when we know the type == 0 */
+unsigned long weighted_cpuload(const int cpu)
+{
+ return cpu_rq(cpu)->raw_weighted_load;
+}
+
#ifdef CONFIG_SMP
typedef struct {
struct list_head list;
@@ -949,7 +1088,8 @@ void kick_process(task_t *p)
}
/*
- * Return a low guess at the load of a migration-source cpu.
+ * Return a low guess at the load of a migration-source cpu weighted
+ * according to the scheduling class and "nice" value.
*
* We want to under-estimate the load of migration sources, to
* balance conservatively.
@@ -957,24 +1097,36 @@ void kick_process(task_t *p)
static inline unsigned long source_load(int cpu, int type)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
+
if (type == 0)
- return load_now;
+ return rq->raw_weighted_load;
- return min(rq->cpu_load[type-1], load_now);
+ return min(rq->cpu_load[type-1], rq->raw_weighted_load);
}
/*
- * Return a high guess at the load of a migration-target cpu
+ * Return a high guess at the load of a migration-target cpu weighted
+ * according to the scheduling class and "nice" value.
*/
static inline unsigned long target_load(int cpu, int type)
{
runqueue_t *rq = cpu_rq(cpu);
- unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
+
if (type == 0)
- return load_now;
+ return rq->raw_weighted_load;
+
+ return max(rq->cpu_load[type-1], rq->raw_weighted_load);
+}
+
+/*
+ * Return the average load per task on the cpu's run queue
+ */
+static inline unsigned long cpu_avg_load_per_task(int cpu)
+{
+ runqueue_t *rq = cpu_rq(cpu);
+ unsigned long n = rq->nr_running;
- return max(rq->cpu_load[type-1], load_now);
+ return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
}
/*
@@ -1047,7 +1199,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
cpus_and(tmp, group->cpumask, p->cpus_allowed);
for_each_cpu_mask(i, tmp) {
- load = source_load(i, 0);
+ load = weighted_cpuload(i);
if (load < min_load || (load == min_load && i == this_cpu)) {
min_load = load;
@@ -1074,9 +1226,15 @@ static int sched_balance_self(int cpu, int flag)
struct task_struct *t = current;
struct sched_domain *tmp, *sd = NULL;
- for_each_domain(cpu, tmp)
+ for_each_domain(cpu, tmp) {
+ /*
+ * If power savings logic is enabled for a domain, stop there.
+ */
+ if (tmp->flags & SD_POWERSAVINGS_BALANCE)
+ break;
if (tmp->flags & flag)
sd = tmp;
+ }
while (sd) {
cpumask_t span;
@@ -1226,17 +1384,19 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
if (this_sd->flags & SD_WAKE_AFFINE) {
unsigned long tl = this_load;
+ unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu);
+
/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
- tl -= SCHED_LOAD_SCALE;
+ tl -= current->load_weight;
if ((tl <= load &&
- tl + target_load(cpu, idx) <= SCHED_LOAD_SCALE) ||
- 100*(tl + SCHED_LOAD_SCALE) <= imbalance*load) {
+ tl + target_load(cpu, idx) <= tl_per_task) ||
+ 100*(tl + p->load_weight) <= imbalance*load) {
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
@@ -1353,6 +1513,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
+
+ /*
+ * Make sure we do not leak PI boosting priority to the child:
+ */
+ p->prio = current->normal_prio;
+
INIT_LIST_HEAD(&p->run_list);
p->array = NULL;
#ifdef CONFIG_SCHEDSTATS
@@ -1432,10 +1598,11 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
__activate_task(p, rq);
else {
p->prio = current->prio;
+ p->normal_prio = current->normal_prio;
list_add_tail(&p->run_list, &current->run_list);
p->array = current->array;
p->array->nr_active++;
- rq->nr_running++;
+ inc_nr_running(p, rq);
}
set_need_resched();
} else
@@ -1653,7 +1820,8 @@ unsigned long nr_uninterruptible(void)
unsigned long long nr_context_switches(void)
{
- unsigned long long i, sum = 0;
+ int i;
+ unsigned long long sum = 0;
for_each_possible_cpu(i)
sum += cpu_rq(i)->nr_switches;
@@ -1691,9 +1859,6 @@ unsigned long nr_active(void)
/*
* double_rq_lock - safely lock two runqueues
*
- * We must take them in cpu order to match code in
- * dependent_sleeper and wake_dependent_sleeper.
- *
* Note this does not disable interrupts like task_rq_lock,
* you need to do so manually before calling.
*/
@@ -1705,7 +1870,7 @@ static void double_rq_lock(runqueue_t *rq1, runqueue_t *rq2)
spin_lock(&rq1->lock);
__acquire(rq2->lock); /* Fake it out ;) */
} else {
- if (rq1->cpu < rq2->cpu) {
+ if (rq1 < rq2) {
spin_lock(&rq1->lock);
spin_lock(&rq2->lock);
} else {
@@ -1741,7 +1906,7 @@ static void double_lock_balance(runqueue_t *this_rq, runqueue_t *busiest)
__acquires(this_rq->lock)
{
if (unlikely(!spin_trylock(&busiest->lock))) {
- if (busiest->cpu < this_rq->cpu) {
+ if (busiest < this_rq) {
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
@@ -1804,9 +1969,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
{
dequeue_task(p, src_array);
- src_rq->nr_running--;
+ dec_nr_running(p, src_rq);
set_task_cpu(p, this_cpu);
- this_rq->nr_running++;
+ inc_nr_running(p, this_rq);
enqueue_task(p, this_array);
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+ this_rq->timestamp_last_tick;
@@ -1853,26 +2018,42 @@ int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
return 1;
}
+#define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
/*
- * move_tasks tries to move up to max_nr_move tasks from busiest to this_rq,
- * as part of a balancing operation within "domain". Returns the number of
- * tasks moved.
+ * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
+ * load from busiest to this_rq, as part of a balancing operation within
+ * "domain". Returns the number of tasks moved.
*
* Called with both runqueues locked.
*/
static int move_tasks(runqueue_t *this_rq, int this_cpu, runqueue_t *busiest,
- unsigned long max_nr_move, struct sched_domain *sd,
- enum idle_type idle, int *all_pinned)
+ unsigned long max_nr_move, unsigned long max_load_move,
+ struct sched_domain *sd, enum idle_type idle,
+ int *all_pinned)
{
prio_array_t *array, *dst_array;
struct list_head *head, *curr;
- int idx, pulled = 0, pinned = 0;
+ int idx, pulled = 0, pinned = 0, this_best_prio, busiest_best_prio;
+ int busiest_best_prio_seen;
+ int skip_for_load; /* skip the task based on weighted load issues */
+ long rem_load_move;
task_t *tmp;
- if (max_nr_move == 0)
+ if (max_nr_move == 0 || max_load_move == 0)
goto out;
+ rem_load_move = max_load_move;
pinned = 1;
+ this_best_prio = rq_best_prio(this_rq);
+ busiest_best_prio = rq_best_prio(busiest);
+ /*
+ * Enable handling of the case where there is more than one task
+ * with the best priority. If the current running task is one
+ * of those with prio==busiest_best_prio we know it won't be moved
+ * and therefore it's safe to override the skip (based on load) of
+ * any task we find with that prio.
+ */
+ busiest_best_prio_seen = busiest_best_prio == busiest->curr->prio;
/*
* We first consider expired tasks. Those will likely not be
@@ -1912,7 +2093,17 @@ skip_queue:
curr = curr->prev;
- if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+ /*
+ * To help distribute high priority tasks accross CPUs we don't
+ * skip a task if it will be the highest priority task (i.e. smallest
+ * prio value) on its new queue regardless of its load weight
+ */
+ skip_for_load = tmp->load_weight > rem_load_move;
+ if (skip_for_load && idx < this_best_prio)
+ skip_for_load = !busiest_best_prio_seen && idx == busiest_best_prio;
+ if (skip_for_load ||
+ !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
+ busiest_best_prio_seen |= idx == busiest_best_prio;
if (curr != head)
goto skip_queue;
idx++;
@@ -1926,9 +2117,15 @@ skip_queue:
pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
pulled++;
+ rem_load_move -= tmp->load_weight;
- /* We only want to steal up to the prescribed number of tasks. */
- if (pulled < max_nr_move) {
+ /*
+ * We only want to steal up to the prescribed number of tasks
+ * and the prescribed amount of weighted load.
+ */
+ if (pulled < max_nr_move && rem_load_move > 0) {
+ if (idx < this_best_prio)
+ this_best_prio = idx;
if (curr != head)
goto skip_queue;
idx++;
@@ -1949,7 +2146,7 @@ out:
/*
* find_busiest_group finds and returns the busiest CPU group within the
- * domain. It calculates and returns the number of tasks which should be
+ * domain. It calculates and returns the amount of weighted load which should be
* moved to restore balance via the imbalance parameter.
*/
static struct sched_group *
@@ -1959,9 +2156,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
unsigned long max_pull;
+ unsigned long busiest_load_per_task, busiest_nr_running;
+ unsigned long this_load_per_task, this_nr_running;
int load_idx;
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ int power_savings_balance = 1;
+ unsigned long leader_nr_running = 0, min_load_per_task = 0;
+ unsigned long min_nr_running = ULONG_MAX;
+ struct sched_group *group_min = NULL, *group_leader = NULL;
+#endif
max_load = this_load = total_load = total_pwr = 0;
+ busiest_load_per_task = busiest_nr_running = 0;
+ this_load_per_task = this_nr_running = 0;
if (idle == NOT_IDLE)
load_idx = sd->busy_idx;
else if (idle == NEWLY_IDLE)
@@ -1970,16 +2177,19 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
load_idx = sd->idle_idx;
do {
- unsigned long load;
+ unsigned long load, group_capacity;
int local_group;
int i;
+ unsigned long sum_nr_running, sum_weighted_load;
local_group = cpu_isset(this_cpu, group->cpumask);
/* Tally up the load of all CPUs in the group */
- avg_load = 0;
+ sum_weighted_load = sum_nr_running = avg_load = 0;
for_each_cpu_mask(i, group->cpumask) {
+ runqueue_t *rq = cpu_rq(i);
+
if (*sd_idle && !idle_cpu(i))
*sd_idle = 0;
@@ -1990,6 +2200,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
load = source_load(i, load_idx);
avg_load += load;
+ sum_nr_running += rq->nr_running;
+ sum_weighted_load += rq->raw_weighted_load;
}
total_load += avg_load;
@@ -1998,17 +2210,80 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/* Adjust by relative CPU power of the group */
avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
+ group_capacity = group->cpu_power / SCHED_LOAD_SCALE;
+
if (local_group) {
this_load = avg_load;
this = group;
- } else if (avg_load > max_load) {
+ this_nr_running = sum_nr_running;
+ this_load_per_task = sum_weighted_load;
+ } else if (avg_load > max_load &&
+ sum_nr_running > group_capacity) {
max_load = avg_load;
busiest = group;
+ busiest_nr_running = sum_nr_running;
+ busiest_load_per_task = sum_weighted_load;
}
+
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ /*
+ * Busy processors will not participate in power savings
+ * balance.
+ */
+ if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ goto group_next;
+
+ /*
+ * If the local group is idle or completely loaded
+ * no need to do power savings balance at this domain
+ */
+ if (local_group && (this_nr_running >= group_capacity ||
+ !this_nr_running))
+ power_savings_balance = 0;
+
+ /*
+ * If a group is already running at full capacity or idle,
+ * don't include that group in power savings calculations
+ */
+ if (!power_savings_balance || sum_nr_running >= group_capacity
+ || !sum_nr_running)
+ goto group_next;
+
+ /*
+ * Calculate the group which has the least non-idle load.
+ * This is the group from where we need to pick up the load
+ * for saving power
+ */
+ if ((sum_nr_running < min_nr_running) ||
+ (sum_nr_running == min_nr_running &&
+ first_cpu(group->cpumask) <
+ first_cpu(group_min->cpumask))) {
+ group_min = group;
+ min_nr_running = sum_nr_running;
+ min_load_per_task = sum_weighted_load /
+ sum_nr_running;
+ }
+
+ /*
+ * Calculate the group which is almost near its
+ * capacity but still has some space to pick up some load
+ * from other group and save more power
+ */
+ if (sum_nr_running <= group_capacity - 1)
+ if (sum_nr_running > leader_nr_running ||
+ (sum_nr_running == leader_nr_running &&
+ first_cpu(group->cpumask) >
+ first_cpu(group_leader->cpumask))) {
+ group_leader = group;
+ leader_nr_running = sum_nr_running;
+ }
+
+group_next:
+#endif
group = group->next;
} while (group != sd->groups);
- if (!busiest || this_load >= max_load || max_load <= SCHED_LOAD_SCALE)
+ if (!busiest || this_load >= max_load || busiest_nr_running == 0)
goto out_balanced;
avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
@@ -2017,6 +2292,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
100*max_load <= sd->imbalance_pct*this_load)
goto out_balanced;
+ busiest_load_per_task /= busiest_nr_running;
/*
* We're trying to get all the cpus to the average_load, so we don't
* want to push ourselves above the average load, nor do we wish to
@@ -2028,21 +2304,50 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* by pulling tasks to us. Be careful of negative numbers as they'll
* appear as very large values with unsigned longs.
*/
+ if (max_load <= busiest_load_per_task)
+ goto out_balanced;
+
+ /*
+ * In the presence of smp nice balancing, certain scenarios can have
+ * max load less than avg load(as we skip the groups at or below
+ * its cpu_power, while calculating max_load..)
+ */
+ if (max_load < avg_load) {
+ *imbalance = 0;
+ goto small_imbalance;
+ }
/* Don't want to pull so many tasks that a group would go idle */
- max_pull = min(max_load - avg_load, max_load - SCHED_LOAD_SCALE);
+ max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
/* How much load to actually move to equalise the imbalance */
*imbalance = min(max_pull * busiest->cpu_power,
(avg_load - this_load) * this->cpu_power)
/ SCHED_LOAD_SCALE;
- if (*imbalance < SCHED_LOAD_SCALE) {
- unsigned long pwr_now = 0, pwr_move = 0;
+ /*
+ * if *imbalance is less than the average load per runnable task
+ * there is no gaurantee that any tasks will be moved so we'll have
+ * a think about bumping its value to force at least one task to be
+ * moved
+ */
+ if (*imbalance < busiest_load_per_task) {
+ unsigned long pwr_now, pwr_move;
unsigned long tmp;
+ unsigned int imbn;
+
+small_imbalance:
+ pwr_move = pwr_now = 0;
+ imbn = 2;
+ if (this_nr_running) {
+ this_load_per_task /= this_nr_running;
+ if (busiest_load_per_task > this_load_per_task)
+ imbn = 1;
+ } else
+ this_load_per_task = SCHED_LOAD_SCALE;
- if (max_load - this_load >= SCHED_LOAD_SCALE*2) {
- *imbalance = 1;
+ if (max_load - this_load >= busiest_load_per_task * imbn) {
+ *imbalance = busiest_load_per_task;
return busiest;
}
@@ -2052,39 +2357,47 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
* moving them.
*/
- pwr_now += busiest->cpu_power*min(SCHED_LOAD_SCALE, max_load);
- pwr_now += this->cpu_power*min(SCHED_LOAD_SCALE, this_load);
+ pwr_now += busiest->cpu_power *
+ min(busiest_load_per_task, max_load);
+ pwr_now += this->cpu_power *
+ min(this_load_per_task, this_load);
pwr_now /= SCHED_LOAD_SCALE;
/* Amount of load we'd subtract */
- tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/busiest->cpu_power;
+ tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power;
if (max_load > tmp)
- pwr_move += busiest->cpu_power*min(SCHED_LOAD_SCALE,
- max_load - tmp);
+ pwr_move += busiest->cpu_power *
+ min(busiest_load_per_task, max_load - tmp);
/* Amount of load we'd add */
if (max_load*busiest->cpu_power <
- SCHED_LOAD_SCALE*SCHED_LOAD_SCALE)
+ busiest_load_per_task*SCHED_LOAD_SCALE)
tmp = max_load*busiest->cpu_power/this->cpu_power;
else
- tmp = SCHED_LOAD_SCALE*SCHED_LOAD_SCALE/this->cpu_power;
- pwr_move += this->cpu_power*min(SCHED_LOAD_SCALE, this_load + tmp);
+ tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power;
+ pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp);
pwr_move /= SCHED_LOAD_SCALE;
/* Move if we gain throughput */
if (pwr_move <= pwr_now)
goto out_balanced;
- *imbalance = 1;
- return busiest;
+ *imbalance = busiest_load_per_task;
}
- /* Get rid of the scaling factor, rounding down as we divide */
- *imbalance = *imbalance / SCHED_LOAD_SCALE;
return busiest;
out_balanced:
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
+ goto ret;
+ if (this == group_leader && group_leader != group_min) {
+ *imbalance = min_load_per_task;
+ return group_min;
+ }
+ret:
+#endif
*imbalance = 0;
return NULL;
}
@@ -2093,18 +2406,21 @@ out_balanced:
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static runqueue_t *find_busiest_queue(struct sched_group *group,
- enum idle_type idle)
+ enum idle_type idle, unsigned long imbalance)
{
- unsigned long load, max_load = 0;
- runqueue_t *busiest = NULL;
+ unsigned long max_load = 0;
+ runqueue_t *busiest = NULL, *rqi;
int i;
for_each_cpu_mask(i, group->cpumask) {
- load = source_load(i, 0);
+ rqi = cpu_rq(i);
+
+ if (rqi->nr_running == 1 && rqi->raw_weighted_load > imbalance)
+ continue;
- if (load > max_load) {
- max_load = load;
- busiest = cpu_rq(i);
+ if (rqi->raw_weighted_load > max_load) {
+ max_load = rqi->raw_weighted_load;
+ busiest = rqi;
}
}
@@ -2117,6 +2433,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group,
*/
#define MAX_PINNED_INTERVAL 512
+#define minus_1_or_zero(n) ((n) > 0 ? (n) - 1 : 0)
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
@@ -2133,7 +2450,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
int active_balance = 0;
int sd_idle = 0;
- if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER)
+ if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
+ !sched_smt_power_savings)
sd_idle = 1;
schedstat_inc(sd, lb_cnt[idle]);
@@ -2144,7 +2462,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
goto out_balanced;
}
- busiest = find_busiest_queue(group, idle);
+ busiest = find_busiest_queue(group, idle, imbalance);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
@@ -2164,6 +2482,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
*/
double_rq_lock(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
+ minus_1_or_zero(busiest->nr_running),
imbalance, sd, idle, &all_pinned);
double_rq_unlock(this_rq, busiest);
@@ -2221,7 +2540,8 @@ static int load_balance(int this_cpu, runqueue_t *this_rq,
sd->balance_interval *= 2;
}
- if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+ if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
+ !sched_smt_power_savings)
return -1;
return nr_moved;
@@ -2236,7 +2556,7 @@ out_one_pinned:
(sd->balance_interval < sd->max_interval))
sd->balance_interval *= 2;
- if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
return -1;
return 0;
}
@@ -2257,7 +2577,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
int nr_moved = 0;
int sd_idle = 0;
- if (sd->flags & SD_SHARE_CPUPOWER)
+ if (sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
sd_idle = 1;
schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
@@ -2267,7 +2587,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
goto out_balanced;
}
- busiest = find_busiest_queue(group, NEWLY_IDLE);
+ busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
goto out_balanced;
@@ -2282,6 +2602,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
/* Attempt to move tasks */
double_lock_balance(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
+ minus_1_or_zero(busiest->nr_running),
imbalance, sd, NEWLY_IDLE, NULL);
spin_unlock(&busiest->lock);
}
@@ -2297,7 +2618,7 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq,
out_balanced:
schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
- if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER)
+ if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER && !sched_smt_power_savings)
return -1;
sd->nr_balance_failed = 0;
return 0;
@@ -2352,17 +2673,19 @@ static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu)
double_lock_balance(busiest_rq, target_rq);
/* Search for an sd spanning us and the target CPU. */
- for_each_domain(target_cpu, sd)
+ for_each_domain(target_cpu, sd) {
if ((sd->flags & SD_LOAD_BALANCE) &&
cpu_isset(busiest_cpu, sd->span))
break;
+ }
if (unlikely(sd == NULL))
goto out;
schedstat_inc(sd, alb_cnt);
- if (move_tasks(target_rq, target_cpu, busiest_rq, 1, sd, SCHED_IDLE, NULL))
+ if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
+ RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE, NULL))
schedstat_inc(sd, alb_pushed);
else
schedstat_inc(sd, alb_failed);
@@ -2390,7 +2713,7 @@ static void rebalance_tick(int this_cpu, runqueue_t *this_rq,
struct sched_domain *sd;
int i;
- this_load = this_rq->nr_running * SCHED_LOAD_SCALE;
+ this_load = this_rq->raw_weighted_load;
/* Update our load */
for (i = 0; i < 3; i++) {
unsigned long new_load = this_load;
@@ -2691,48 +3014,35 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq)
resched_task(rq->idle);
}
-static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+/*
+ * Called with interrupt disabled and this_rq's runqueue locked.
+ */
+static void wake_sleeping_dependent(int this_cpu)
{
struct sched_domain *tmp, *sd = NULL;
- cpumask_t sibling_map;
int i;
- for_each_domain(this_cpu, tmp)
- if (tmp->flags & SD_SHARE_CPUPOWER)
+ for_each_domain(this_cpu, tmp) {
+ if (tmp->flags & SD_SHARE_CPUPOWER) {
sd = tmp;
+ break;
+ }
+ }
if (!sd)
return;
- /*
- * Unlock the current runqueue because we have to lock in
- * CPU order to avoid deadlocks. Caller knows that we might
- * unlock. We keep IRQs disabled.
- */
- spin_unlock(&this_rq->lock);
-
- sibling_map = sd->span;
-
- for_each_cpu_mask(i, sibling_map)
- spin_lock(&cpu_rq(i)->lock);
- /*
- * We clear this CPU from the mask. This both simplifies the
- * inner loop and keps this_rq locked when we exit:
- */
- cpu_clear(this_cpu, sibling_map);
-
- for_each_cpu_mask(i, sibling_map) {
+ for_each_cpu_mask(i, sd->span) {
runqueue_t *smt_rq = cpu_rq(i);
+ if (i == this_cpu)
+ continue;
+ if (unlikely(!spin_trylock(&smt_rq->lock)))
+ continue;
+
wakeup_busy_runqueue(smt_rq);
+ spin_unlock(&smt_rq->lock);
}
-
- for_each_cpu_mask(i, sibling_map)
- spin_unlock(&cpu_rq(i)->lock);
- /*
- * We exit with this_cpu's rq still held and IRQs
- * still disabled:
- */
}
/*
@@ -2745,52 +3055,46 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
return p->time_slice * (100 - sd->per_cpu_gain) / 100;
}
-static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+/*
+ * To minimise lock contention and not have to drop this_rq's runlock we only
+ * trylock the sibling runqueues and bypass those runqueues if we fail to
+ * acquire their lock. As we only trylock the normal locking order does not
+ * need to be obeyed.
+ */
+static int dependent_sleeper(int this_cpu, runqueue_t *this_rq, task_t *p)
{
struct sched_domain *tmp, *sd = NULL;
- cpumask_t sibling_map;
- prio_array_t *array;
int ret = 0, i;
- task_t *p;
- for_each_domain(this_cpu, tmp)
- if (tmp->flags & SD_SHARE_CPUPOWER)
+ /* kernel/rt threads do not participate in dependent sleeping */
+ if (!p->mm || rt_task(p))
+ return 0;
+
+ for_each_domain(this_cpu, tmp) {
+ if (tmp->flags & SD_SHARE_CPUPOWER) {
sd = tmp;
+ break;
+ }
+ }
if (!sd)
return 0;
- /*
- * The same locking rules and details apply as for
- * wake_sleeping_dependent():
- */
- spin_unlock(&this_rq->lock);
- sibling_map = sd->span;
- for_each_cpu_mask(i, sibling_map)
- spin_lock(&cpu_rq(i)->lock);
- cpu_clear(this_cpu, sibling_map);
+ for_each_cpu_mask(i, sd->span) {
+ runqueue_t *smt_rq;
+ task_t *smt_curr;
- /*
- * Establish next task to be run - it might have gone away because
- * we released the runqueue lock above:
- */
- if (!this_rq->nr_running)
- goto out_unlock;
- array = this_rq->active;
- if (!array->nr_active)
- array = this_rq->expired;
- BUG_ON(!array->nr_active);
+ if (i == this_cpu)
+ continue;
- p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
- task_t, run_list);
+ smt_rq = cpu_rq(i);
+ if (unlikely(!spin_trylock(&smt_rq->lock)))
+ continue;
- for_each_cpu_mask(i, sibling_map) {
- runqueue_t *smt_rq = cpu_rq(i);
- task_t *smt_curr = smt_rq->curr;
+ smt_curr = smt_rq->curr;
- /* Kernel threads do not participate in dependent sleeping */
- if (!p->mm || !smt_curr->mm || rt_task(p))
- goto check_smt_task;
+ if (!smt_curr->mm)
+ goto unlock;
/*
* If a user task with lower static priority than the
@@ -2808,49 +3112,24 @@ static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
if ((jiffies % DEF_TIMESLICE) >
(sd->per_cpu_gain * DEF_TIMESLICE / 100))
ret = 1;
- } else
+ } else {
if (smt_curr->static_prio < p->static_prio &&
!TASK_PREEMPTS_CURR(p, smt_rq) &&
smt_slice(smt_curr, sd) > task_timeslice(p))
ret = 1;
-
-check_smt_task:
- if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
- rt_task(smt_curr))
- continue;
- if (!p->mm) {
- wakeup_busy_runqueue(smt_rq);
- continue;
- }
-
- /*
- * Reschedule a lower priority task on the SMT sibling for
- * it to be put to sleep, or wake it up if it has been put to
- * sleep for priority reasons to see if it should run now.
- */
- if (rt_task(p)) {
- if ((jiffies % DEF_TIMESLICE) >
- (sd->per_cpu_gain * DEF_TIMESLICE / 100))
- resched_task(smt_curr);
- } else {
- if (TASK_PREEMPTS_CURR(p, smt_rq) &&
- smt_slice(p, sd) > task_timeslice(smt_curr))
- resched_task(smt_curr);
- else
- wakeup_busy_runqueue(smt_rq);
}
+unlock:
+ spin_unlock(&smt_rq->lock);
}
-out_unlock:
- for_each_cpu_mask(i, sibling_map)
- spin_unlock(&cpu_rq(i)->lock);
return ret;
}
#else
-static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
+static inline void wake_sleeping_dependent(int this_cpu)
{
}
-static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
+static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq,
+ task_t *p)
{
return 0;
}
@@ -2972,32 +3251,13 @@ need_resched_nonpreemptible:
cpu = smp_processor_id();
if (unlikely(!rq->nr_running)) {
-go_idle:
idle_balance(cpu, rq);
if (!rq->nr_running) {
next = rq->idle;
rq->expired_timestamp = 0;
- wake_sleeping_dependent(cpu, rq);
- /*
- * wake_sleeping_dependent() might have released
- * the runqueue, so break out if we got new
- * tasks meanwhile:
- */
- if (!rq->nr_running)
- goto switch_tasks;
- }
- } else {
- if (dependent_sleeper(cpu, rq)) {
- next = rq->idle;
+ wake_sleeping_dependent(cpu);
goto switch_tasks;
}
- /*
- * dependent_sleeper() releases and reacquires the runqueue
- * lock, hence go into the idle loop if the rq went
- * empty meanwhile:
- */
- if (unlikely(!rq->nr_running))
- goto go_idle;
}
array = rq->active;
@@ -3035,6 +3295,8 @@ go_idle:
}
}
next->sleep_type = SLEEP_NORMAL;
+ if (dependent_sleeper(cpu, rq, next))
+ next = rq->idle;
switch_tasks:
if (next == rq->idle)
schedstat_inc(rq, sched_goidle);
@@ -3478,12 +3740,65 @@ long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
EXPORT_SYMBOL(sleep_on_timeout);
+#ifdef CONFIG_RT_MUTEXES
+
+/*
+ * rt_mutex_setprio - set the current priority of a task
+ * @p: task
+ * @prio: prio value (kernel-internal form)
+ *
+ * This function changes the 'effective' priority of a task. It does
+ * not touch ->normal_prio like __setscheduler().
+ *
+ * Used by the rt_mutex code to implement priority inheritance logic.
+ */
+void rt_mutex_setprio(task_t *p, int prio)
+{
+ unsigned long flags;
+ prio_array_t *array;
+ runqueue_t *rq;
+ int oldprio;
+
+ BUG_ON(prio < 0 || prio > MAX_PRIO);
+
+ rq = task_rq_lock(p, &flags);
+
+ oldprio = p->prio;
+ array = p->array;
+ if (array)
+ dequeue_task(p, array);
+ p->prio = prio;
+
+ if (array) {
+ /*
+ * If changing to an RT priority then queue it
+ * in the active array!
+ */
+ if (rt_task(p))
+ array = rq->active;
+ enqueue_task(p, array);
+ /*
+ * Reschedule if we are currently running on this runqueue and
+ * our priority decreased, or if we are not currently running on
+ * this runqueue and our priority is higher than the current's
+ */
+ if (task_running(rq, p)) {
+ if (p->prio > oldprio)
+ resched_task(rq->curr);
+ } else if (TASK_PREEMPTS_CURR(p, rq))
+ resched_task(rq->curr);
+ }
+ task_rq_unlock(rq, &flags);
+}
+
+#endif
+
void set_user_nice(task_t *p, long nice)
{
unsigned long flags;
prio_array_t *array;
runqueue_t *rq;
- int old_prio, new_prio, delta;
+ int old_prio, delta;
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
@@ -3498,22 +3813,25 @@ void set_user_nice(task_t *p, long nice)
* it wont have any effect on scheduling until the task is
* not SCHED_NORMAL/SCHED_BATCH:
*/
- if (rt_task(p)) {
+ if (has_rt_policy(p)) {
p->static_prio = NICE_TO_PRIO(nice);
goto out_unlock;
}
array = p->array;
- if (array)
+ if (array) {
dequeue_task(p, array);
+ dec_raw_weighted_load(rq, p);
+ }
- old_prio = p->prio;
- new_prio = NICE_TO_PRIO(nice);
- delta = new_prio - old_prio;
p->static_prio = NICE_TO_PRIO(nice);
- p->prio += delta;
+ set_load_weight(p);
+ old_prio = p->prio;
+ p->prio = effective_prio(p);
+ delta = p->prio - old_prio;
if (array) {
enqueue_task(p, array);
+ inc_raw_weighted_load(rq, p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
@@ -3524,7 +3842,6 @@ void set_user_nice(task_t *p, long nice)
out_unlock:
task_rq_unlock(rq, &flags);
}
-
EXPORT_SYMBOL(set_user_nice);
/*
@@ -3639,16 +3956,15 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
BUG_ON(p->array);
p->policy = policy;
p->rt_priority = prio;
- if (policy != SCHED_NORMAL && policy != SCHED_BATCH) {
- p->prio = MAX_RT_PRIO-1 - p->rt_priority;
- } else {
- p->prio = p->static_prio;
- /*
- * SCHED_BATCH tasks are treated as perpetual CPU hogs:
- */
- if (policy == SCHED_BATCH)
- p->sleep_avg = 0;
- }
+ p->normal_prio = normal_prio(p);
+ /* we are holding p->pi_lock already */
+ p->prio = rt_mutex_getprio(p);
+ /*
+ * SCHED_BATCH tasks are treated as perpetual CPU hogs:
+ */
+ if (policy == SCHED_BATCH)
+ p->sleep_avg = 0;
+ set_load_weight(p);
}
/**
@@ -3667,6 +3983,8 @@ int sched_setscheduler(struct task_struct *p, int policy,
unsigned long flags;
runqueue_t *rq;
+ /* may grab non-irq protected spin_locks */
+ BUG_ON(in_interrupt());
recheck:
/* double check policy once rq lock held */
if (policy < 0)
@@ -3715,14 +4033,20 @@ recheck:
if (retval)
return retval;
/*
+ * make sure no PI-waiters arrive (or leave) while we are
+ * changing the priority of the task:
+ */
+ spin_lock_irqsave(&p->pi_lock, flags);
+ /*
* To be able to change p->policy safely, the apropriate
* runqueue lock must be held.
*/
- rq = task_rq_lock(p, &flags);
+ rq = __task_rq_lock(p);
/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
policy = oldpolicy = -1;
- task_rq_unlock(rq, &flags);
+ __task_rq_unlock(rq);
+ spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
array = p->array;
@@ -3743,7 +4067,11 @@ recheck:
} else if (TASK_PREEMPTS_CURR(p, rq))
resched_task(rq->curr);
}
- task_rq_unlock(rq, &flags);
+ __task_rq_unlock(rq);
+ spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ rt_mutex_adjust_pi(p);
+
return 0;
}
EXPORT_SYMBOL_GPL(sched_setscheduler);
@@ -3765,8 +4093,10 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
read_unlock_irq(&tasklist_lock);
return -ESRCH;
}
- retval = sched_setscheduler(p, policy, &lparam);
+ get_task_struct(p);
read_unlock_irq(&tasklist_lock);
+ retval = sched_setscheduler(p, policy, &lparam);
+ put_task_struct(p);
return retval;
}
@@ -4378,7 +4708,7 @@ void __devinit init_idle(task_t *idle, int cpu)
idle->timestamp = sched_clock();
idle->sleep_avg = 0;
idle->array = NULL;
- idle->prio = MAX_PRIO;
+ idle->prio = idle->normal_prio = MAX_PRIO;
idle->state = TASK_RUNNING;
idle->cpus_allowed = cpumask_of_cpu(cpu);
set_task_cpu(idle, cpu);
@@ -4474,13 +4804,16 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
*
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
+ *
+ * Returns non-zero if task was successfully migrated.
*/
-static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
+static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
runqueue_t *rq_dest, *rq_src;
+ int ret = 0;
if (unlikely(cpu_is_offline(dest_cpu)))
- return;
+ return ret;
rq_src = cpu_rq(src_cpu);
rq_dest = cpu_rq(dest_cpu);
@@ -4508,9 +4841,10 @@ static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
if (TASK_PREEMPTS_CURR(p, rq_dest))
resched_task(rq_dest->curr);
}
-
+ ret = 1;
out:
double_rq_unlock(rq_src, rq_dest);
+ return ret;
}
/*
@@ -4580,9 +4914,12 @@ wait_to_die:
/* Figure out where task on dead CPU should go, use force if neccessary. */
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
{
+ runqueue_t *rq;
+ unsigned long flags;
int dest_cpu;
cpumask_t mask;
+restart:
/* On same node? */
mask = node_to_cpumask(cpu_to_node(dead_cpu));
cpus_and(mask, mask, tsk->cpus_allowed);
@@ -4594,8 +4931,10 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
/* No more Mr. Nice Guy. */
if (dest_cpu == NR_CPUS) {
+ rq = task_rq_lock(tsk, &flags);
cpus_setall(tsk->cpus_allowed);
dest_cpu = any_online_cpu(tsk->cpus_allowed);
+ task_rq_unlock(rq, &flags);
/*
* Don't tell them about moving exiting tasks or
@@ -4607,7 +4946,8 @@ static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *tsk)
"longer affine to cpu%d\n",
tsk->pid, tsk->comm, dead_cpu);
}
- __migrate_task(tsk, dead_cpu, dest_cpu);
+ if (!__migrate_task(tsk, dead_cpu, dest_cpu))
+ goto restart;
}
/*
@@ -4734,8 +5074,9 @@ static void migrate_dead_tasks(unsigned int dead_cpu)
* migration_call - callback that gets triggered when a CPU is added.
* Here we can start up the necessary migration thread for the new CPU.
*/
-static int migration_call(struct notifier_block *nfb, unsigned long action,
- void *hcpu)
+static int __cpuinit migration_call(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
{
int cpu = (long)hcpu;
struct task_struct *p;
@@ -4805,7 +5146,7 @@ static int migration_call(struct notifier_block *nfb, unsigned long action,
/* Register at highest priority so that task migration (migrate_all_tasks)
* happens before everything else.
*/
-static struct notifier_block migration_notifier = {
+static struct notifier_block __cpuinitdata migration_notifier = {
.notifier_call = migration_call,
.priority = 10
};
@@ -5606,6 +5947,7 @@ static cpumask_t sched_domain_node_span(int node)
}
#endif
+int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
/*
* At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
* can switch it on easily if needed.
@@ -5621,7 +5963,7 @@ static int cpu_to_cpu_group(int cpu)
#ifdef CONFIG_SCHED_MC
static DEFINE_PER_CPU(struct sched_domain, core_domains);
-static struct sched_group sched_group_core[NR_CPUS];
+static struct sched_group *sched_group_core_bycpu[NR_CPUS];
#endif
#if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
@@ -5637,7 +5979,7 @@ static int cpu_to_core_group(int cpu)
#endif
static DEFINE_PER_CPU(struct sched_domain, phys_domains);
-static struct sched_group sched_group_phys[NR_CPUS];
+static struct sched_group *sched_group_phys_bycpu[NR_CPUS];
static int cpu_to_phys_group(int cpu)
{
#if defined(CONFIG_SCHED_MC)
@@ -5694,13 +6036,74 @@ next_sg:
}
#endif
+/* Free memory allocated for various sched_group structures */
+static void free_sched_groups(const cpumask_t *cpu_map)
+{
+ int cpu;
+#ifdef CONFIG_NUMA
+ int i;
+
+ for_each_cpu_mask(cpu, *cpu_map) {
+ struct sched_group *sched_group_allnodes
+ = sched_group_allnodes_bycpu[cpu];
+ struct sched_group **sched_group_nodes
+ = sched_group_nodes_bycpu[cpu];
+
+ if (sched_group_allnodes) {
+ kfree(sched_group_allnodes);
+ sched_group_allnodes_bycpu[cpu] = NULL;
+ }
+
+ if (!sched_group_nodes)
+ continue;
+
+ for (i = 0; i < MAX_NUMNODES; i++) {
+ cpumask_t nodemask = node_to_cpumask(i);
+ struct sched_group *oldsg, *sg = sched_group_nodes[i];
+
+ cpus_and(nodemask, nodemask, *cpu_map);
+ if (cpus_empty(nodemask))
+ continue;
+
+ if (sg == NULL)
+ continue;
+ sg = sg->next;
+next_sg:
+ oldsg = sg;
+ sg = sg->next;
+ kfree(oldsg);
+ if (oldsg != sched_group_nodes[i])
+ goto next_sg;
+ }
+ kfree(sched_group_nodes);
+ sched_group_nodes_bycpu[cpu] = NULL;
+ }
+#endif
+ for_each_cpu_mask(cpu, *cpu_map) {
+ if (sched_group_phys_bycpu[cpu]) {
+ kfree(sched_group_phys_bycpu[cpu]);
+ sched_group_phys_bycpu[cpu] = NULL;
+ }
+#ifdef CONFIG_SCHED_MC
+ if (sched_group_core_bycpu[cpu]) {
+ kfree(sched_group_core_bycpu[cpu]);
+ sched_group_core_bycpu[cpu] = NULL;
+ }
+#endif
+ }
+}
+
/*
* Build sched domains for a given set of cpus and attach the sched domains
* to the individual cpus
*/
-void build_sched_domains(const cpumask_t *cpu_map)
+static int build_sched_domains(const cpumask_t *cpu_map)
{
int i;
+ struct sched_group *sched_group_phys = NULL;
+#ifdef CONFIG_SCHED_MC
+ struct sched_group *sched_group_core = NULL;
+#endif
#ifdef CONFIG_NUMA
struct sched_group **sched_group_nodes = NULL;
struct sched_group *sched_group_allnodes = NULL;
@@ -5708,11 +6111,11 @@ void build_sched_domains(const cpumask_t *cpu_map)
/*
* Allocate the per-node list of sched groups
*/
- sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
- GFP_ATOMIC);
+ sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
+ GFP_KERNEL);
if (!sched_group_nodes) {
printk(KERN_WARNING "Can not alloc sched group node list\n");
- return;
+ return -ENOMEM;
}
sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif
@@ -5738,7 +6141,7 @@ void build_sched_domains(const cpumask_t *cpu_map)
if (!sched_group_allnodes) {
printk(KERN_WARNING
"Can not alloc allnodes sched group\n");
- break;
+ goto error;
}
sched_group_allnodes_bycpu[i]
= sched_group_allnodes;
@@ -5759,6 +6162,18 @@ void build_sched_domains(const cpumask_t *cpu_map)
cpus_and(sd->span, sd->span, *cpu_map);
#endif
+ if (!sched_group_phys) {
+ sched_group_phys
+ = kmalloc(sizeof(struct sched_group) * NR_CPUS,
+ GFP_KERNEL);
+ if (!sched_group_phys) {
+ printk (KERN_WARNING "Can not alloc phys sched"
+ "group\n");
+ goto error;
+ }
+ sched_group_phys_bycpu[i] = sched_group_phys;
+ }
+
p = sd;
sd = &per_cpu(phys_domains, i);
group = cpu_to_phys_group(i);
@@ -5768,6 +6183,18 @@ void build_sched_domains(const cpumask_t *cpu_map)
sd->groups = &sched_group_phys[group];
#ifdef CONFIG_SCHED_MC
+ if (!sched_group_core) {
+ sched_group_core
+ = kmalloc(sizeof(struct sched_group) * NR_CPUS,
+ GFP_KERNEL);
+ if (!sched_group_core) {
+ printk (KERN_WARNING "Can not alloc core sched"
+ "group\n");
+ goto error;
+ }
+ sched_group_core_bycpu[i] = sched_group_core;
+ }
+
p = sd;
sd = &per_cpu(core_domains, i);
group = cpu_to_core_group(i);
@@ -5851,24 +6278,21 @@ void build_sched_domains(const cpumask_t *cpu_map)
domainspan = sched_domain_node_span(i);
cpus_and(domainspan, domainspan, *cpu_map);
- sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+ sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
+ if (!sg) {
+ printk(KERN_WARNING "Can not alloc domain group for "
+ "node %d\n", i);
+ goto error;
+ }
sched_group_nodes[i] = sg;
for_each_cpu_mask(j, nodemask) {
struct sched_domain *sd;
sd = &per_cpu(node_domains, j);
sd->groups = sg;
- if (sd->groups == NULL) {
- /* Turn off balancing if we have no groups */
- sd->flags = 0;
- }
- }
- if (!sg) {
- printk(KERN_WARNING
- "Can not alloc domain group for node %d\n", i);
- continue;
}
sg->cpu_power = 0;
sg->cpumask = nodemask;
+ sg->next = sg;
cpus_or(covered, covered, nodemask);
prev = sg;
@@ -5887,54 +6311,90 @@ void build_sched_domains(const cpumask_t *cpu_map)
if (cpus_empty(tmp))
continue;
- sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
+ sg = kmalloc_node(sizeof(struct sched_group),
+ GFP_KERNEL, i);
if (!sg) {
printk(KERN_WARNING
"Can not alloc domain group for node %d\n", j);
- break;
+ goto error;
}
sg->cpu_power = 0;
sg->cpumask = tmp;
+ sg->next = prev->next;
cpus_or(covered, covered, tmp);
prev->next = sg;
prev = sg;
}
- prev->next = sched_group_nodes[i];
}
#endif
/* Calculate CPU power for physical packages and nodes */
+#ifdef CONFIG_SCHED_SMT
for_each_cpu_mask(i, *cpu_map) {
- int power;
struct sched_domain *sd;
-#ifdef CONFIG_SCHED_SMT
sd = &per_cpu(cpu_domains, i);
- power = SCHED_LOAD_SCALE;
- sd->groups->cpu_power = power;
+ sd->groups->cpu_power = SCHED_LOAD_SCALE;
+ }
#endif
#ifdef CONFIG_SCHED_MC
+ for_each_cpu_mask(i, *cpu_map) {
+ int power;
+ struct sched_domain *sd;
sd = &per_cpu(core_domains, i);
- power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1)
+ if (sched_smt_power_savings)
+ power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask);
+ else
+ power = SCHED_LOAD_SCALE + (cpus_weight(sd->groups->cpumask)-1)
* SCHED_LOAD_SCALE / 10;
sd->groups->cpu_power = power;
+ }
+#endif
+ for_each_cpu_mask(i, *cpu_map) {
+ struct sched_domain *sd;
+#ifdef CONFIG_SCHED_MC
sd = &per_cpu(phys_domains, i);
+ if (i != first_cpu(sd->groups->cpumask))
+ continue;
- /*
- * This has to be < 2 * SCHED_LOAD_SCALE
- * Lets keep it SCHED_LOAD_SCALE, so that
- * while calculating NUMA group's cpu_power
- * we can simply do
- * numa_group->cpu_power += phys_group->cpu_power;
- *
- * See "only add power once for each physical pkg"
- * comment below
- */
- sd->groups->cpu_power = SCHED_LOAD_SCALE;
+ sd->groups->cpu_power = 0;
+ if (sched_mc_power_savings || sched_smt_power_savings) {
+ int j;
+
+ for_each_cpu_mask(j, sd->groups->cpumask) {
+ struct sched_domain *sd1;
+ sd1 = &per_cpu(core_domains, j);
+ /*
+ * for each core we will add once
+ * to the group in physical domain
+ */
+ if (j != first_cpu(sd1->groups->cpumask))
+ continue;
+
+ if (sched_smt_power_savings)
+ sd->groups->cpu_power += sd1->groups->cpu_power;
+ else
+ sd->groups->cpu_power += SCHED_LOAD_SCALE;
+ }
+ } else
+ /*
+ * This has to be < 2 * SCHED_LOAD_SCALE
+ * Lets keep it SCHED_LOAD_SCALE, so that
+ * while calculating NUMA group's cpu_power
+ * we can simply do
+ * numa_group->cpu_power += phys_group->cpu_power;
+ *
+ * See "only add power once for each physical pkg"
+ * comment below
+ */
+ sd->groups->cpu_power = SCHED_LOAD_SCALE;
#else
+ int power;
sd = &per_cpu(phys_domains, i);
- power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
- (cpus_weight(sd->groups->cpumask)-1) / 10;
+ if (sched_smt_power_savings)
+ power = SCHED_LOAD_SCALE * cpus_weight(sd->groups->cpumask);
+ else
+ power = SCHED_LOAD_SCALE;
sd->groups->cpu_power = power;
#endif
}
@@ -5962,13 +6422,20 @@ void build_sched_domains(const cpumask_t *cpu_map)
* Tune cache-hot values:
*/
calibrate_migration_costs(cpu_map);
+
+ return 0;
+
+error:
+ free_sched_groups(cpu_map);
+ return -ENOMEM;
}
/*
* Set up scheduler domains and groups. Callers must hold the hotplug lock.
*/
-static void arch_init_sched_domains(const cpumask_t *cpu_map)
+static int arch_init_sched_domains(const cpumask_t *cpu_map)
{
cpumask_t cpu_default_map;
+ int err;
/*
* Setup mask for cpus without special case scheduling requirements.
@@ -5977,51 +6444,14 @@ static void arch_init_sched_domains(const cpumask_t *cpu_map)
*/
cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
- build_sched_domains(&cpu_default_map);
+ err = build_sched_domains(&cpu_default_map);
+
+ return err;
}
static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
{
-#ifdef CONFIG_NUMA
- int i;
- int cpu;
-
- for_each_cpu_mask(cpu, *cpu_map) {
- struct sched_group *sched_group_allnodes
- = sched_group_allnodes_bycpu[cpu];
- struct sched_group **sched_group_nodes
- = sched_group_nodes_bycpu[cpu];
-
- if (sched_group_allnodes) {
- kfree(sched_group_allnodes);
- sched_group_allnodes_bycpu[cpu] = NULL;
- }
-
- if (!sched_group_nodes)
- continue;
-
- for (i = 0; i < MAX_NUMNODES; i++) {
- cpumask_t nodemask = node_to_cpumask(i);
- struct sched_group *oldsg, *sg = sched_group_nodes[i];
-
- cpus_and(nodemask, nodemask, *cpu_map);
- if (cpus_empty(nodemask))
- continue;
-
- if (sg == NULL)
- continue;
- sg = sg->next;
-next_sg:
- oldsg = sg;
- sg = sg->next;
- kfree(oldsg);
- if (oldsg != sched_group_nodes[i])
- goto next_sg;
- }
- kfree(sched_group_nodes);
- sched_group_nodes_bycpu[cpu] = NULL;
- }
-#endif
+ free_sched_groups(cpu_map);
}
/*
@@ -6046,9 +6476,10 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
* correct sched domains
* Call with hotplug lock held
*/
-void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
+int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
{
cpumask_t change_map;
+ int err = 0;
cpus_and(*partition1, *partition1, cpu_online_map);
cpus_and(*partition2, *partition2, cpu_online_map);
@@ -6057,10 +6488,86 @@ void partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
/* Detach sched domains from all of the affected cpus */
detach_destroy_domains(&change_map);
if (!cpus_empty(*partition1))
- build_sched_domains(partition1);
- if (!cpus_empty(*partition2))
- build_sched_domains(partition2);
+ err = build_sched_domains(partition1);
+ if (!err && !cpus_empty(*partition2))
+ err = build_sched_domains(partition2);
+
+ return err;
+}
+
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+int arch_reinit_sched_domains(void)
+{
+ int err;
+
+ lock_cpu_hotplug();
+ detach_destroy_domains(&cpu_online_map);
+ err = arch_init_sched_domains(&cpu_online_map);
+ unlock_cpu_hotplug();
+
+ return err;
+}
+
+static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
+{
+ int ret;
+
+ if (buf[0] != '0' && buf[0] != '1')
+ return -EINVAL;
+
+ if (smt)
+ sched_smt_power_savings = (buf[0] == '1');
+ else
+ sched_mc_power_savings = (buf[0] == '1');
+
+ ret = arch_reinit_sched_domains();
+
+ return ret ? ret : count;
+}
+
+int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
+{
+ int err = 0;
+#ifdef CONFIG_SCHED_SMT
+ if (smt_capable())
+ err = sysfs_create_file(&cls->kset.kobj,
+ &attr_sched_smt_power_savings.attr);
+#endif
+#ifdef CONFIG_SCHED_MC
+ if (!err && mc_capable())
+ err = sysfs_create_file(&cls->kset.kobj,
+ &attr_sched_mc_power_savings.attr);
+#endif
+ return err;
+}
+#endif
+
+#ifdef CONFIG_SCHED_MC
+static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
+{
+ return sprintf(page, "%u\n", sched_mc_power_savings);
+}
+static ssize_t sched_mc_power_savings_store(struct sys_device *dev, const char *buf, size_t count)
+{
+ return sched_power_savings_store(buf, count, 0);
+}
+SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
+ sched_mc_power_savings_store);
+#endif
+
+#ifdef CONFIG_SCHED_SMT
+static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
+{
+ return sprintf(page, "%u\n", sched_smt_power_savings);
+}
+static ssize_t sched_smt_power_savings_store(struct sys_device *dev, const char *buf, size_t count)
+{
+ return sched_power_savings_store(buf, count, 1);
}
+SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
+ sched_smt_power_savings_store);
+#endif
+
#ifdef CONFIG_HOTPLUG_CPU
/*
@@ -6143,7 +6650,6 @@ void __init sched_init(void)
rq->push_cpu = 0;
rq->migration_thread = NULL;
INIT_LIST_HEAD(&rq->migration_queue);
- rq->cpu = i;
#endif
atomic_set(&rq->nr_iowait, 0);
@@ -6158,6 +6664,7 @@ void __init sched_init(void)
}
}
+ set_load_weight(&init_task);
/*
* The boot idle thread does lazy MMU switching as well:
*/
@@ -6204,11 +6711,12 @@ void normalize_rt_tasks(void)
runqueue_t *rq;
read_lock_irq(&tasklist_lock);
- for_each_process (p) {
+ for_each_process(p) {
if (!rt_task(p))
continue;
- rq = task_rq_lock(p, &flags);
+ spin_lock_irqsave(&p->pi_lock, flags);
+ rq = __task_rq_lock(p);
array = p->array;
if (array)
@@ -6219,7 +6727,8 @@ void normalize_rt_tasks(void)
resched_task(rq->curr);
}
- task_rq_unlock(rq, &flags);
+ __task_rq_unlock(rq);
+ spin_unlock_irqrestore(&p->pi_lock, flags);
}
read_unlock_irq(&tasklist_lock);
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 9e2f1c6e73d7b3..8f03e3b89b5540 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -446,7 +446,7 @@ static void takeover_tasklets(unsigned int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int cpu_callback(struct notifier_block *nfb,
+static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -486,7 +486,7 @@ static int cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block __devinitdata cpu_nfb = {
.notifier_call = cpu_callback
};
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index b5c3b94e01ce74..6b76caa2298188 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -104,7 +104,7 @@ static int watchdog(void * __bind_cpu)
/*
* Create/destroy watchdog threads as CPUs come and go:
*/
-static int
+static int __devinit
cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
int hotcpu = (unsigned long)hcpu;
@@ -142,7 +142,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
return NOTIFY_OK;
}
-static struct notifier_block cpu_nfb = {
+static struct notifier_block __devinitdata cpu_nfb = {
.notifier_call = cpu_callback
};
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index f1a4eb1a655e31..93a2c53986488f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -133,6 +133,10 @@ extern int acct_parm[];
extern int no_unaligned_warning;
#endif
+#ifdef CONFIG_RT_MUTEXES
+extern int max_lock_depth;
+#endif
+
static int parse_table(int __user *, int, void __user *, size_t __user *, void __user *, size_t,
ctl_table *, void **);
static int proc_doutsstring(ctl_table *table, int write, struct file *filp,
@@ -688,6 +692,17 @@ static ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
+#ifdef CONFIG_RT_MUTEXES
+ {
+ .ctl_name = KERN_MAX_LOCK_DEPTH,
+ .procname = "max_lock_depth",
+ .data = &max_lock_depth,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+#endif
+
{ .ctl_name = 0 }
};
@@ -928,6 +943,18 @@ static ctl_table vm_table[] = {
.strategy = &sysctl_jiffies,
},
#endif
+#ifdef CONFIG_X86_32
+ {
+ .ctl_name = VM_VDSO_ENABLED,
+ .procname = "vdso_enabled",
+ .data = &vdso_enabled,
+ .maxlen = sizeof(vdso_enabled),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ },
+#endif
{ .ctl_name = 0 }
};
diff --git a/kernel/timer.c b/kernel/timer.c
index 5bb6b7976eecf6..5a8960253063d0 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1652,7 +1652,7 @@ static void __devinit migrate_timers(int cpu)
}
#endif /* CONFIG_HOTPLUG_CPU */
-static int timer_cpu_notify(struct notifier_block *self,
+static int __devinit timer_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1672,7 +1672,7 @@ static int timer_cpu_notify(struct notifier_block *self,
return NOTIFY_OK;
}
-static struct notifier_block timers_nb = {
+static struct notifier_block __devinitdata timers_nb = {
.notifier_call = timer_cpu_notify,
};
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 565cf7a1febda9..59f0b42bd89e0e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -559,7 +559,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu)
}
/* We're holding the cpucontrol mutex here */
-static int workqueue_cpu_callback(struct notifier_block *nfb,
+static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/lib/Kconfig b/lib/Kconfig
index 3de93357f5ab1e..f6299342b882d8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -86,4 +86,10 @@ config TEXTSEARCH_BM
config TEXTSEARCH_FSM
tristate
+#
+# plist support is select#ed if needed
+#
+config PLIST
+ boolean
+
endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 8bab0102ac739d..5330911ebd303e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -107,6 +107,24 @@ config DEBUG_MUTEXES
This allows mutex semantics violations and mutex related deadlocks
(lockups) to be detected and reported automatically.
+config DEBUG_RT_MUTEXES
+ bool "RT Mutex debugging, deadlock detection"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This allows rt mutex semantics violations and rt mutex related
+ deadlocks (lockups) to be detected and reported automatically.
+
+config DEBUG_PI_LIST
+ bool
+ default y
+ depends on DEBUG_RT_MUTEXES
+
+config RT_MUTEX_TESTER
+ bool "Built-in scriptable tester for rt-mutexes"
+ depends on DEBUG_KERNEL && RT_MUTEXES
+ help
+ This option enables a rt-mutex tester.
+
config DEBUG_SPINLOCK
bool "Spinlock debugging"
depends on DEBUG_KERNEL
diff --git a/lib/Makefile b/lib/Makefile
index 79358ad1f11353..10c13c9d7824d2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -25,6 +25,7 @@ lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
+obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
diff --git a/lib/plist.c b/lib/plist.c
new file mode 100644
index 00000000000000..3074a02272f3c4
--- /dev/null
+++ b/lib/plist.c
@@ -0,0 +1,118 @@
+/*
+ * lib/plist.c
+ *
+ * Descending-priority-sorted double-linked list
+ *
+ * (C) 2002-2003 Intel Corp
+ * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>.
+ *
+ * 2001-2005 (c) MontaVista Software, Inc.
+ * Daniel Walker <dwalker@mvista.com>
+ *
+ * (C) 2005 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Simplifications of the original code by
+ * Oleg Nesterov <oleg@tv-sign.ru>
+ *
+ * Licensed under the FSF's GNU Public License v2 or later.
+ *
+ * Based on simple lists (include/linux/list.h).
+ *
+ * This file contains the add / del functions which are considered to
+ * be too large to inline. See include/linux/plist.h for further
+ * information.
+ */
+
+#include <linux/plist.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_DEBUG_PI_LIST
+
+static void plist_check_prev_next(struct list_head *t, struct list_head *p,
+ struct list_head *n)
+{
+ if (n->prev != p || p->next != n) {
+ printk("top: %p, n: %p, p: %p\n", t, t->next, t->prev);
+ printk("prev: %p, n: %p, p: %p\n", p, p->next, p->prev);
+ printk("next: %p, n: %p, p: %p\n", n, n->next, n->prev);
+ WARN_ON(1);
+ }
+}
+
+static void plist_check_list(struct list_head *top)
+{
+ struct list_head *prev = top, *next = top->next;
+
+ plist_check_prev_next(top, prev, next);
+ while (next != top) {
+ prev = next;
+ next = prev->next;
+ plist_check_prev_next(top, prev, next);
+ }
+}
+
+static void plist_check_head(struct plist_head *head)
+{
+ WARN_ON(!head->lock);
+ if (head->lock)
+ WARN_ON_SMP(!spin_is_locked(head->lock));
+ plist_check_list(&head->prio_list);
+ plist_check_list(&head->node_list);
+}
+
+#else
+# define plist_check_head(h) do { } while (0)
+#endif
+
+/**
+ * plist_add - add @node to @head
+ *
+ * @node: &struct plist_node pointer
+ * @head: &struct plist_head pointer
+ */
+void plist_add(struct plist_node *node, struct plist_head *head)
+{
+ struct plist_node *iter;
+
+ plist_check_head(head);
+ WARN_ON(!plist_node_empty(node));
+
+ list_for_each_entry(iter, &head->prio_list, plist.prio_list) {
+ if (node->prio < iter->prio)
+ goto lt_prio;
+ else if (node->prio == iter->prio) {
+ iter = list_entry(iter->plist.prio_list.next,
+ struct plist_node, plist.prio_list);
+ goto eq_prio;
+ }
+ }
+
+lt_prio:
+ list_add_tail(&node->plist.prio_list, &iter->plist.prio_list);
+eq_prio:
+ list_add_tail(&node->plist.node_list, &iter->plist.node_list);
+
+ plist_check_head(head);
+}
+
+/**
+ * plist_del - Remove a @node from plist.
+ *
+ * @node: &struct plist_node pointer - entry to be removed
+ * @head: &struct plist_head pointer - list head
+ */
+void plist_del(struct plist_node *node, struct plist_head *head)
+{
+ plist_check_head(head);
+
+ if (!list_empty(&node->plist.prio_list)) {
+ struct plist_node *next = plist_first(&node->plist);
+
+ list_move_tail(&next->plist.prio_list, &node->plist.prio_list);
+ list_del_init(&node->plist.prio_list);
+ }
+
+ list_del_init(&node->plist.node_list);
+
+ plist_check_head(head);
+}
diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c
index 02a16eacb72dab..d84560c076d83c 100644
--- a/lib/zlib_inflate/inffast.c
+++ b/lib/zlib_inflate/inffast.c
@@ -63,10 +63,10 @@
bytes, which is the maximum length that can be coded. inflate_fast()
requires strm->avail_out >= 258 for each loop to avoid checking for
output space.
+
+ - @start: inflate()'s starting value for strm->avail_out
*/
-void inflate_fast(strm, start)
-z_streamp strm;
-unsigned start; /* inflate()'s starting value for strm->avail_out */
+void inflate_fast(z_streamp strm, unsigned start)
{
struct inflate_state *state;
unsigned char *in; /* local strm->next_in */
diff --git a/lib/zlib_inflate/inftrees.c b/lib/zlib_inflate/inftrees.c
index da665fbb16aaf4..3fe6ce5b53e519 100644
--- a/lib/zlib_inflate/inftrees.c
+++ b/lib/zlib_inflate/inftrees.c
@@ -20,13 +20,8 @@
table index bits. It will differ if the request is greater than the
longest code or if it is less than the shortest code.
*/
-int zlib_inflate_table(type, lens, codes, table, bits, work)
-codetype type;
-unsigned short *lens;
-unsigned codes;
-code **table;
-unsigned *bits;
-unsigned short *work;
+int zlib_inflate_table(codetype type, unsigned short *lens, unsigned codes,
+ code **table, unsigned *bits, unsigned short *work)
{
unsigned len; /* a code's length in bits */
unsigned sym; /* index of code symbols */
diff --git a/mm/Kconfig b/mm/Kconfig
index 66e65ab3942651..e76c023eb0bb46 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -116,6 +116,7 @@ config SPARSEMEM_EXTREME
config MEMORY_HOTPLUG
bool "Allow for memory hot-add"
depends on SPARSEMEM && HOTPLUG && !SOFTWARE_SUSPEND
+ depends on (IA64 || X86 || PPC64)
comment "Memory hotplug is currently incompatible with Software Suspend"
depends on SPARSEMEM && HOTPLUG && SOFTWARE_SUSPEND
diff --git a/mm/filemap.c b/mm/filemap.c
index 9c7334bafda8da..d504d6e98886f2 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2095,14 +2095,21 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
do {
unsigned long index;
unsigned long offset;
- unsigned long maxlen;
size_t copied;
offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
index = pos >> PAGE_CACHE_SHIFT;
bytes = PAGE_CACHE_SIZE - offset;
- if (bytes > count)
- bytes = count;
+
+ /* Limit the size of the copy to the caller's write size */
+ bytes = min(bytes, count);
+
+ /*
+ * Limit the size of the copy to that of the current segment,
+ * because fault_in_pages_readable() doesn't know how to walk
+ * segments.
+ */
+ bytes = min(bytes, cur_iov->iov_len - iov_base);
/*
* Bring in the user page that we will copy from _first_.
@@ -2110,10 +2117,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
* same page as we're writing to, without it being marked
* up-to-date.
*/
- maxlen = cur_iov->iov_len - iov_base;
- if (maxlen > bytes)
- maxlen = bytes;
- fault_in_pages_readable(buf, maxlen);
+ fault_in_pages_readable(buf, bytes);
page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec);
if (!page) {
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 841a077d5aeb0d..ea4038838b0a2b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -21,6 +21,7 @@
#include <linux/memory_hotplug.h>
#include <linux/highmem.h>
#include <linux/vmalloc.h>
+#include <linux/ioport.h>
#include <asm/tlbflush.h>
@@ -126,6 +127,9 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
unsigned long i;
unsigned long flags;
unsigned long onlined_pages = 0;
+ struct resource res;
+ u64 section_end;
+ unsigned long start_pfn;
struct zone *zone;
int need_zonelists_rebuild = 0;
@@ -148,10 +152,27 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
if (!populated_zone(zone))
need_zonelists_rebuild = 1;
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pfn_to_page(pfn + i);
- online_page(page);
- onlined_pages++;
+ res.start = (u64)pfn << PAGE_SHIFT;
+ res.end = res.start + ((u64)nr_pages << PAGE_SHIFT) - 1;
+ res.flags = IORESOURCE_MEM; /* we just need system ram */
+ section_end = res.end;
+
+ while (find_next_system_ram(&res) >= 0) {
+ start_pfn = (unsigned long)(res.start >> PAGE_SHIFT);
+ nr_pages = (unsigned long)
+ ((res.end + 1 - res.start) >> PAGE_SHIFT);
+
+ if (PageReserved(pfn_to_page(start_pfn))) {
+ /* this region's page is not onlined now */
+ for (i = 0; i < nr_pages; i++) {
+ struct page *page = pfn_to_page(start_pfn + i);
+ online_page(page);
+ onlined_pages++;
+ }
+ }
+
+ res.start = res.end + 1;
+ res.end = section_end;
}
zone->present_pages += onlined_pages;
zone->zone_pgdat->node_present_pages += onlined_pages;
@@ -163,3 +184,100 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
vm_total_pages = nr_free_pagecache_pages();
return 0;
}
+
+static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
+{
+ struct pglist_data *pgdat;
+ unsigned long zones_size[MAX_NR_ZONES] = {0};
+ unsigned long zholes_size[MAX_NR_ZONES] = {0};
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+
+ pgdat = arch_alloc_nodedata(nid);
+ if (!pgdat)
+ return NULL;
+
+ arch_refresh_nodedata(nid, pgdat);
+
+ /* we can use NODE_DATA(nid) from here */
+
+ /* init node's zones as empty zones, we don't have any present pages.*/
+ free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
+
+ return pgdat;
+}
+
+static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
+{
+ arch_refresh_nodedata(nid, NULL);
+ arch_free_nodedata(pgdat);
+ return;
+}
+
+/* add this memory to iomem resource */
+static void register_memory_resource(u64 start, u64 size)
+{
+ struct resource *res;
+
+ res = kzalloc(sizeof(struct resource), GFP_KERNEL);
+ BUG_ON(!res);
+
+ res->name = "System RAM";
+ res->start = start;
+ res->end = start + size - 1;
+ res->flags = IORESOURCE_MEM;
+ if (request_resource(&iomem_resource, res) < 0) {
+ printk("System RAM resource %llx - %llx cannot be added\n",
+ (unsigned long long)res->start, (unsigned long long)res->end);
+ kfree(res);
+ }
+}
+
+
+
+int add_memory(int nid, u64 start, u64 size)
+{
+ pg_data_t *pgdat = NULL;
+ int new_pgdat = 0;
+ int ret;
+
+ if (!node_online(nid)) {
+ pgdat = hotadd_new_pgdat(nid, start);
+ if (!pgdat)
+ return -ENOMEM;
+ new_pgdat = 1;
+ ret = kswapd_run(nid);
+ if (ret)
+ goto error;
+ }
+
+ /* call arch's memory hotadd */
+ ret = arch_add_memory(nid, start, size);
+
+ if (ret < 0)
+ goto error;
+
+ /* we online node here. we can't roll back from here. */
+ node_set_online(nid);
+
+ if (new_pgdat) {
+ ret = register_one_node(nid);
+ /*
+ * If sysfs file of new node can't create, cpu on the node
+ * can't be hot-added. There is no rollback way now.
+ * So, check by BUG_ON() to catch it reluctantly..
+ */
+ BUG_ON(ret);
+ }
+
+ /* register this memory as resource */
+ register_memory_resource(start, size);
+
+ return ret;
+error:
+ /* rollback pgdat allocation and others */
+ if (new_pgdat)
+ rollback_node_hotadd(nid, pgdat);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(add_memory);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 8ccf6f1b1473c1..4ec7026c7bab14 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -516,14 +516,14 @@ static void set_ratelimit(void)
ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
}
-static int
+static int __cpuinit
ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
{
set_ratelimit();
return 0;
}
-static struct notifier_block ratelimit_nb = {
+static struct notifier_block __cpuinitdata ratelimit_nb = {
.notifier_call = ratelimit_handler,
.next = NULL,
};
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9f86191bb63295..084a2de7e52a8c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -446,8 +446,8 @@ static void __free_pages_ok(struct page *page, unsigned int order)
arch_free_page(page, order);
if (!PageHighMem(page))
- mutex_debug_check_no_locks_freed(page_address(page),
- PAGE_SIZE<<order);
+ debug_check_no_locks_freed(page_address(page),
+ PAGE_SIZE<<order);
for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i);
@@ -2009,7 +2009,7 @@ static inline void free_zone_pagesets(int cpu)
}
}
-static int pageset_cpuup_callback(struct notifier_block *nfb,
+static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -2031,7 +2031,7 @@ static int pageset_cpuup_callback(struct notifier_block *nfb,
return ret;
}
-static struct notifier_block pageset_notifier =
+static struct notifier_block __cpuinitdata pageset_notifier =
{ &pageset_cpuup_callback, NULL, 0 };
void __init setup_per_cpu_pageset(void)
diff --git a/mm/slab.c b/mm/slab.c
index 98ac20bc0de9a3..233e39d14caf5a 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -89,6 +89,7 @@
#include <linux/config.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/poison.h>
#include <linux/swap.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
@@ -106,6 +107,7 @@
#include <linux/nodemask.h>
#include <linux/mempolicy.h>
#include <linux/mutex.h>
+#include <linux/rtmutex.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@@ -492,17 +494,6 @@ struct kmem_cache {
#endif
#if DEBUG
-/*
- * Magic nums for obj red zoning.
- * Placed in the first word before and the first word after an obj.
- */
-#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
-#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
-
-/* ...and for poisoning */
-#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
-#define POISON_FREE 0x6b /* for use-after-free poisoning */
-#define POISON_END 0xa5 /* end-byte of poisoning */
/*
* memory layout of objects:
@@ -1083,7 +1074,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
#endif
-static int cpuup_callback(struct notifier_block *nfb,
+static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
@@ -1265,7 +1256,9 @@ bad:
return NOTIFY_BAD;
}
-static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
+static struct notifier_block __cpuinitdata cpucache_notifier = {
+ &cpuup_callback, NULL, 0
+};
/*
* swap the static kmem_list3 with kmalloced memory
@@ -3405,7 +3398,7 @@ void kfree(const void *objp)
local_irq_save(flags);
kfree_debugcheck(objp);
c = virt_to_cache(objp);
- mutex_debug_check_no_locks_freed(objp, obj_size(c));
+ debug_check_no_locks_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
diff --git a/mm/sparse.c b/mm/sparse.c
index e0a3fe48aa3745..c7a2b3a0e46b29 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -45,7 +45,7 @@ static struct mem_section *sparse_index_alloc(int nid)
static int sparse_index_init(unsigned long section_nr, int nid)
{
- static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(index_init_lock);
unsigned long root = SECTION_NR_TO_ROOT(section_nr);
struct mem_section *section;
int ret = 0;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 72babac71deaba..eeacb0d695c352 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -34,6 +34,7 @@
#include <linux/notifier.h>
#include <linux/rwsem.h>
#include <linux/delay.h>
+#include <linux/kthread.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -1223,7 +1224,6 @@ static int kswapd(void *p)
};
cpumask_t cpumask;
- daemonize("kswapd%d", pgdat->node_id);
cpumask = node_to_cpumask(pgdat->node_id);
if (!cpus_empty(cpumask))
set_cpus_allowed(tsk, cpumask);
@@ -1450,7 +1450,7 @@ out:
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
-static int cpu_callback(struct notifier_block *nfb,
+static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
pg_data_t *pgdat;
@@ -1468,20 +1468,35 @@ static int cpu_callback(struct notifier_block *nfb,
}
#endif /* CONFIG_HOTPLUG_CPU */
+/*
+ * This kswapd start function will be called by init and node-hot-add.
+ * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
+ */
+int kswapd_run(int nid)
+{
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int ret = 0;
+
+ if (pgdat->kswapd)
+ return 0;
+
+ pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
+ if (IS_ERR(pgdat->kswapd)) {
+ /* failure at boot is fatal */
+ BUG_ON(system_state == SYSTEM_BOOTING);
+ printk("Failed to start kswapd on node %d\n",nid);
+ ret = -1;
+ }
+ return ret;
+}
+
static int __init kswapd_init(void)
{
- pg_data_t *pgdat;
+ int nid;
swap_setup();
- for_each_online_pgdat(pgdat) {
- pid_t pid;
-
- pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
- BUG_ON(pid < 0);
- read_lock(&tasklist_lock);
- pgdat->kswapd = find_task_by_pid(pid);
- read_unlock(&tasklist_lock);
- }
+ for_each_online_node(nid)
+ kswapd_run(nid);
hotcpu_notifier(cpu_callback, 0);
return 0;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8a777932786d7d..e728980160d224 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -349,7 +349,7 @@ static struct rt6_info *rt6_select(struct rt6_info **head, int oif,
(strict & RT6_SELECT_F_REACHABLE) &&
last && last != rt0) {
/* no entries matched; do round-robin */
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+ static DEFINE_SPINLOCK(lock);
spin_lock(&lock);
*head = rt0->u.next;
rt0->u.next = last->u.next;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index f43311221a72b9..2f312164d6d561 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -70,7 +70,7 @@
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
-spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(krb5_seq_lock);
u32
gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 54128040a1245a..1bb75703f3848b 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -117,7 +117,7 @@ struct bclink {
static struct bcbearer *bcbearer = NULL;
static struct bclink *bclink = NULL;
static struct link *bcl = NULL;
-static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(bc_lock);
char tipc_bclink_name[] = "multicast-link";
@@ -796,7 +796,7 @@ int tipc_bclink_init(void)
memset(bclink, 0, sizeof(struct bclink));
INIT_LIST_HEAD(&bcl->waiting_ports);
bcl->next_out_no = 1;
- bclink->node.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&bclink->node.lock);
bcl->owner = &bclink->node;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 4fa24b5e8914a5..7ef17a449cfdb3 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -566,7 +566,7 @@ restart:
b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
bcast_scope, 2);
}
- b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&b_ptr->publ.lock);
write_unlock_bh(&tipc_net_lock);
info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name, addr_string_fill(addr_string, bcast_scope), priority);
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 3ec502fac8c34b..285e1bc2d88085 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -63,7 +63,7 @@ struct manager {
static struct manager mng = { 0};
-static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(config_lock);
static const void *req_tlv_area; /* request message TLV area */
static int req_tlv_space; /* request message TLV area size */
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 26ef95d5fe3860..55130655e1edbe 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -41,7 +41,7 @@
#define MAX_STRING 512
static char print_string[MAX_STRING];
-static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(print_lock);
static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
struct print_buf *TIPC_CONS = &cons_buf;
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index 966f70a1b60800..ae6ddf00a1aaea 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -44,7 +44,7 @@ struct queue_item {
static kmem_cache_t *tipc_queue_item_cache;
static struct list_head signal_queue_head;
-static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(qitem_lock);
static int handler_enabled = 0;
static void process_signal_queue(unsigned long dummy);
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 38571306aba5ab..a6926ff07bcc3e 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -101,7 +101,7 @@ struct name_table {
static struct name_table table = { NULL } ;
static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
-rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(tipc_nametbl_lock);
static int hash(int x)
@@ -172,7 +172,7 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
}
memset(nseq, 0, sizeof(*nseq));
- nseq->lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&nseq->lock);
nseq->type = type;
nseq->sseqs = sseq;
dbg("tipc_nameseq_create(): nseq = %p, type %u, ssseqs %p, ff: %u\n",
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f7c8223ddf7dd0..e5a359ab493080 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -115,7 +115,7 @@
* - A local spin_lock protecting the queue of subscriber events.
*/
-rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
+DEFINE_RWLOCK(tipc_net_lock);
struct network tipc_net = { NULL };
struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index ce9678efa98a58..861322b935daf1 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -77,7 +77,7 @@ struct node *tipc_node_create(u32 addr)
memset(n_ptr, 0, sizeof(*n_ptr));
n_ptr->addr = addr;
- n_ptr->lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&n_ptr->lock);
INIT_LIST_HEAD(&n_ptr->nsub);
n_ptr->owner = c_ptr;
tipc_cltr_attach_node(c_ptr, n_ptr);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 47d97404e3ee06..3251c8d8e53c3b 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -57,8 +57,8 @@
static struct sk_buff *msg_queue_head = NULL;
static struct sk_buff *msg_queue_tail = NULL;
-spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
-static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
+DEFINE_SPINLOCK(tipc_port_list_lock);
+static DEFINE_SPINLOCK(queue_lock);
static LIST_HEAD(ports);
static void port_handle_node_down(unsigned long ref);
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index d2f0cce10e2046..596d3c8ff75006 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -63,7 +63,7 @@
struct ref_table tipc_ref_table = { NULL };
-static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
+static DEFINE_RWLOCK(ref_table_lock);
/**
* tipc_ref_table_init - create reference table for objects
@@ -87,7 +87,7 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
index_mask = sz - 1;
for (i = sz - 1; i >= 0; i--) {
table[i].object = NULL;
- table[i].lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&table[i].lock);
table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
}
tipc_ref_table.entries = table;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index fc171875660c5e..e19b4bcd67ec2b 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -457,7 +457,7 @@ int tipc_subscr_start(void)
int res = -1;
memset(&topsrv, 0, sizeof (topsrv));
- topsrv.lock = SPIN_LOCK_UNLOCKED;
+ spin_lock_init(&topsrv.lock);
INIT_LIST_HEAD(&topsrv.subscriber_list);
spin_lock_bh(&topsrv.lock);
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 3f3f933976e9dc..1e3ae57c722872 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -67,7 +67,7 @@ struct tipc_user {
static struct tipc_user *users = NULL;
static u32 next_free_user = MAX_USERID + 1;
-static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(reg_lock);
/**
* reg_init - create TIPC user registry (but don't activate it)
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index ac5f275b0283f1..b0d067be739056 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -13,11 +13,6 @@ space := $(empty) $(empty)
depfile = $(subst $(comma),_,$(@D)/.$(@F).d)
###
-# basetarget equals the filename of the target with no extension.
-# So 'foo/bar.o' becomes 'bar'
-basetarget = $(basename $(notdir $@))
-
-###
# Escape single quote for use in echo statements
escsq = $(subst $(squote),'\$(squote)',$1)
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 3cb445cc7432fd..02a7eea5fdbcb7 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -117,7 +117,7 @@ $(real-objs-m:.o=.lst): quiet_modtag := [M]
$(obj-m) : quiet_modtag := [M]
# Default for not multi-part modules
-modname = $(basetarget)
+modname = $(*F)
$(multi-objs-m) : modname = $(modname-multi)
$(multi-objs-m:.o=.i) : modname = $(modname-multi)
diff --git a/scripts/Makefile.host b/scripts/Makefile.host
index 18ecd4d5df7fe4..2b066d12af2c30 100644
--- a/scripts/Makefile.host
+++ b/scripts/Makefile.host
@@ -80,10 +80,8 @@ obj-dirs += $(host-objdirs)
#####
# Handle options to gcc. Support building with separate output directory
-_hostc_flags = $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) \
- $(HOSTCFLAGS_$(basetarget).o)
-_hostcxx_flags = $(HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) \
- $(HOSTCXXFLAGS_$(basetarget).o)
+_hostc_flags = $(HOSTCFLAGS) $(HOST_EXTRACFLAGS) $(HOSTCFLAGS_$(*F).o)
+_hostcxx_flags = $(HOSTCXXFLAGS) $(HOST_EXTRACXXFLAGS) $(HOSTCXXFLAGS_$(*F).o)
ifeq ($(KBUILD_SRC),)
__hostc_flags = $(_hostc_flags)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index fc498fee68edef..2cb4935e85d1b0 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -82,12 +82,12 @@ obj-dirs := $(addprefix $(obj)/,$(obj-dirs))
# than one module. In that case KBUILD_MODNAME will be set to foo_bar,
# where foo and bar are the name of the modules.
name-fix = $(subst $(comma),_,$(subst -,_,$1))
-basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
+basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(*F)))"
modname_flags = $(if $(filter 1,$(words $(modname))),\
-D"KBUILD_MODNAME=KBUILD_STR($(call name-fix,$(modname)))")
-_c_flags = $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$(basetarget).o)
-_a_flags = $(AFLAGS) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
+_c_flags = $(CFLAGS) $(EXTRA_CFLAGS) $(CFLAGS_$(*F).o)
+_a_flags = $(AFLAGS) $(EXTRA_AFLAGS) $(AFLAGS_$(*F).o)
_cpp_flags = $(CPPFLAGS) $(EXTRA_CPPFLAGS) $(CPPFLAGS_$(@F))
# If building the kernel in a separate objtree expand all occurrences
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index e83613e0e82726..576cce5e387f44 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -72,7 +72,7 @@ $(modules:.ko=.mod.c): __modpost ;
# Step 5), compile all *.mod.c files
# modname is set to make c_flags define KBUILD_MODNAME
-modname = $(basetarget)
+modname = $(*F)
quiet_cmd_cc_o_c = CC $@
cmd_cc_o_c = $(CC) $(c_flags) $(CFLAGS_MODULE) \
diff --git a/scripts/rt-tester/check-all.sh b/scripts/rt-tester/check-all.sh
new file mode 100644
index 00000000000000..43098afe743116
--- /dev/null
+++ b/scripts/rt-tester/check-all.sh
@@ -0,0 +1,22 @@
+
+
+function testit ()
+{
+ printf "%-30s: " $1
+ ./rt-tester.py $1 | grep Pass
+}
+
+testit t2-l1-2rt-sameprio.tst
+testit t2-l1-pi.tst
+testit t2-l1-signal.tst
+#testit t2-l2-2rt-deadlock.tst
+testit t3-l1-pi-1rt.tst
+testit t3-l1-pi-2rt.tst
+testit t3-l1-pi-3rt.tst
+testit t3-l1-pi-signal.tst
+testit t3-l1-pi-steal.tst
+testit t3-l2-pi.tst
+testit t4-l2-pi-deboost.tst
+testit t5-l4-pi-boost-deboost.tst
+testit t5-l4-pi-boost-deboost-setsched.tst
+
diff --git a/scripts/rt-tester/rt-tester.py b/scripts/rt-tester/rt-tester.py
new file mode 100644
index 00000000000000..4c79660793cf31
--- /dev/null
+++ b/scripts/rt-tester/rt-tester.py
@@ -0,0 +1,222 @@
+#!/usr/bin/env python
+#
+# rt-mutex tester
+#
+# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 2 as
+# published by the Free Software Foundation.
+#
+import os
+import sys
+import getopt
+import shutil
+import string
+
+# Globals
+quiet = 0
+test = 0
+comments = 0
+
+sysfsprefix = "/sys/devices/system/rttest/rttest"
+statusfile = "/status"
+commandfile = "/command"
+
+# Command opcodes
+cmd_opcodes = {
+ "schedother" : "1",
+ "schedfifo" : "2",
+ "lock" : "3",
+ "locknowait" : "4",
+ "lockint" : "5",
+ "lockintnowait" : "6",
+ "lockcont" : "7",
+ "unlock" : "8",
+ "lockbkl" : "9",
+ "unlockbkl" : "10",
+ "signal" : "11",
+ "resetevent" : "98",
+ "reset" : "99",
+ }
+
+test_opcodes = {
+ "prioeq" : ["P" , "eq" , None],
+ "priolt" : ["P" , "lt" , None],
+ "priogt" : ["P" , "gt" , None],
+ "nprioeq" : ["N" , "eq" , None],
+ "npriolt" : ["N" , "lt" , None],
+ "npriogt" : ["N" , "gt" , None],
+ "unlocked" : ["M" , "eq" , 0],
+ "trylock" : ["M" , "eq" , 1],
+ "blocked" : ["M" , "eq" , 2],
+ "blockedwake" : ["M" , "eq" , 3],
+ "locked" : ["M" , "eq" , 4],
+ "opcodeeq" : ["O" , "eq" , None],
+ "opcodelt" : ["O" , "lt" , None],
+ "opcodegt" : ["O" , "gt" , None],
+ "eventeq" : ["E" , "eq" , None],
+ "eventlt" : ["E" , "lt" , None],
+ "eventgt" : ["E" , "gt" , None],
+ }
+
+# Print usage information
+def usage():
+ print "rt-tester.py <-c -h -q -t> <testfile>"
+ print " -c display comments after first command"
+ print " -h help"
+ print " -q quiet mode"
+ print " -t test mode (syntax check)"
+ print " testfile: read test specification from testfile"
+ print " otherwise from stdin"
+ return
+
+# Print progress when not in quiet mode
+def progress(str):
+ if not quiet:
+ print str
+
+# Analyse a status value
+def analyse(val, top, arg):
+
+ intval = int(val)
+
+ if top[0] == "M":
+ intval = intval / (10 ** int(arg))
+ intval = intval % 10
+ argval = top[2]
+ elif top[0] == "O":
+ argval = int(cmd_opcodes.get(arg, arg))
+ else:
+ argval = int(arg)
+
+ # progress("%d %s %d" %(intval, top[1], argval))
+
+ if top[1] == "eq" and intval == argval:
+ return 1
+ if top[1] == "lt" and intval < argval:
+ return 1
+ if top[1] == "gt" and intval > argval:
+ return 1
+ return 0
+
+# Parse the commandline
+try:
+ (options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
+except getopt.GetoptError, ex:
+ usage()
+ sys.exit(1)
+
+# Parse commandline options
+for option, value in options:
+ if option == "-c":
+ comments = 1
+ elif option == "-q":
+ quiet = 1
+ elif option == "-t":
+ test = 1
+ elif option == '-h':
+ usage()
+ sys.exit(0)
+
+# Select the input source
+if arguments:
+ try:
+ fd = open(arguments[0])
+ except Exception,ex:
+ sys.stderr.write("File not found %s\n" %(arguments[0]))
+ sys.exit(1)
+else:
+ fd = sys.stdin
+
+linenr = 0
+
+# Read the test patterns
+while 1:
+
+ linenr = linenr + 1
+ line = fd.readline()
+ if not len(line):
+ break
+
+ line = line.strip()
+ parts = line.split(":")
+
+ if not parts or len(parts) < 1:
+ continue
+
+ if len(parts[0]) == 0:
+ continue
+
+ if parts[0].startswith("#"):
+ if comments > 1:
+ progress(line)
+ continue
+
+ if comments == 1:
+ comments = 2
+
+ progress(line)
+
+ cmd = parts[0].strip().lower()
+ opc = parts[1].strip().lower()
+ tid = parts[2].strip()
+ dat = parts[3].strip()
+
+ try:
+ # Test or wait for a status value
+ if cmd == "t" or cmd == "w":
+ testop = test_opcodes[opc]
+
+ fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
+ if test:
+ print fname
+ continue
+
+ while 1:
+ query = 1
+ fsta = open(fname, 'r')
+ status = fsta.readline().strip()
+ fsta.close()
+ stat = status.split(",")
+ for s in stat:
+ s = s.strip()
+ if s.startswith(testop[0]):
+ # Seperate status value
+ val = s[2:].strip()
+ query = analyse(val, testop, dat)
+ break
+ if query or cmd == "t":
+ break
+
+ progress(" " + status)
+
+ if not query:
+ sys.stderr.write("Test failed in line %d\n" %(linenr))
+ sys.exit(1)
+
+ # Issue a command to the tester
+ elif cmd == "c":
+ cmdnr = cmd_opcodes[opc]
+ # Build command string and sys filename
+ cmdstr = "%s:%s" %(cmdnr, dat)
+ fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
+ if test:
+ print fname
+ continue
+ fcmd = open(fname, 'w')
+ fcmd.write(cmdstr)
+ fcmd.close()
+
+ except Exception,ex:
+ sys.stderr.write(str(ex))
+ sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
+ if not test:
+ fd.close()
+ sys.exit(1)
+
+# Normal exit pass
+print "Pass"
+sys.exit(0)
+
+
diff --git a/scripts/rt-tester/t2-l1-2rt-sameprio.tst b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
new file mode 100644
index 00000000000000..8821f27cc8be6a
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-2rt-sameprio.tst
@@ -0,0 +1,99 @@
+#
+# RT-Mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal 0
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 2 threads 1 lock
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedfifo: 0: 80
+C: schedfifo: 1: 80
+
+# T0 lock L0
+C: locknowait: 0: 0
+C: locknowait: 1: 0
+W: locked: 0: 0
+W: blocked: 1: 0
+T: prioeq: 0: 80
+
+# T0 unlock L0
+C: unlock: 0: 0
+W: locked: 1: 0
+
+# Verify T0
+W: unlocked: 0: 0
+T: prioeq: 0: 80
+
+# Unlock
+C: unlock: 1: 0
+W: unlocked: 1: 0
+
+# T1,T0 lock L0
+C: locknowait: 1: 0
+C: locknowait: 0: 0
+W: locked: 1: 0
+W: blocked: 0: 0
+T: prioeq: 1: 80
+
+# T1 unlock L0
+C: unlock: 1: 0
+W: locked: 0: 0
+
+# Verify T1
+W: unlocked: 1: 0
+T: prioeq: 1: 80
+
+# Unlock and exit
+C: unlock: 0: 0
+W: unlocked: 0: 0
+
diff --git a/scripts/rt-tester/t2-l1-pi.tst b/scripts/rt-tester/t2-l1-pi.tst
new file mode 100644
index 00000000000000..cde1f189a02bf2
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-pi.tst
@@ -0,0 +1,82 @@
+#
+# RT-Mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal 0
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 2 threads 1 lock with priority inversion
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 80
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 80
+
+# T0 unlock L0
+C: unlock: 0: 0
+W: locked: 1: 0
+
+# Verify T1
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+# Unlock and exit
+C: unlock: 1: 0
+W: unlocked: 1: 0
+
diff --git a/scripts/rt-tester/t2-l1-signal.tst b/scripts/rt-tester/t2-l1-signal.tst
new file mode 100644
index 00000000000000..3ab0bfc49950ee
--- /dev/null
+++ b/scripts/rt-tester/t2-l1-signal.tst
@@ -0,0 +1,77 @@
+#
+# RT-Mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal 0
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 2 threads 1 lock with priority inversion
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedother: 1: 0
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: lockintnowait: 1: 0
+W: blocked: 1: 0
+
+# Interrupt T1
+C: signal: 1: 0
+W: unlocked: 1: 0
+T: opcodeeq: 1: -4
+
+# Unlock and exit
+C: unlock: 0: 0
+W: unlocked: 0: 0
diff --git a/scripts/rt-tester/t2-l2-2rt-deadlock.tst b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
new file mode 100644
index 00000000000000..f4b5d5d6215fd9
--- /dev/null
+++ b/scripts/rt-tester/t2-l2-2rt-deadlock.tst
@@ -0,0 +1,89 @@
+#
+# RT-Mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal 0
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 2 threads 2 lock
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedfifo: 0: 80
+C: schedfifo: 1: 80
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L1
+C: locknowait: 1: 1
+W: locked: 1: 1
+
+# T0 lock L1
+C: lockintnowait: 0: 1
+W: blocked: 0: 1
+
+# T1 lock L0
+C: lockintnowait: 1: 0
+W: blocked: 1: 0
+
+# Make deadlock go away
+C: signal: 1: 0
+W: unlocked: 1: 0
+C: signal: 0: 0
+W: unlocked: 0: 1
+
+# Unlock and exit
+C: unlock: 0: 0
+W: unlocked: 0: 0
+C: unlock: 1: 1
+W: unlocked: 1: 1
+
diff --git a/scripts/rt-tester/t3-l1-pi-1rt.tst b/scripts/rt-tester/t3-l1-pi-1rt.tst
new file mode 100644
index 00000000000000..63440ca2cce9e8
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-1rt.tst
@@ -0,0 +1,92 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 1 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedother: 1: 0
+C: schedfifo: 2: 82
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: priolt: 0: 1
+
+# T2 lock L0
+C: locknowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 82
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T2 got the lock
+W: locked: 2: 0
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+W: unlocked: 2: 0
+W: locked: 1: 0
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-2rt.tst b/scripts/rt-tester/t3-l1-pi-2rt.tst
new file mode 100644
index 00000000000000..e5816fe67df324
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-2rt.tst
@@ -0,0 +1,93 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 1 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 81
+C: schedfifo: 2: 82
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 81
+
+# T2 lock L0
+C: locknowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 82
+T: prioeq: 1: 81
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T2 got the lock
+W: locked: 2: 0
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+W: unlocked: 2: 0
+W: locked: 1: 0
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-3rt.tst b/scripts/rt-tester/t3-l1-pi-3rt.tst
new file mode 100644
index 00000000000000..718b82b5d3bbe0
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-3rt.tst
@@ -0,0 +1,92 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 1 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedfifo: 0: 80
+C: schedfifo: 1: 81
+C: schedfifo: 2: 82
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 81
+
+# T2 lock L0
+C: locknowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 82
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T2 got the lock
+W: locked: 2: 0
+W: unlocked: 0: 0
+T: prioeq: 0: 80
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+W: locked: 1: 0
+W: unlocked: 2: 0
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l1-pi-signal.tst b/scripts/rt-tester/t3-l1-pi-signal.tst
new file mode 100644
index 00000000000000..c6e2135634985f
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-signal.tst
@@ -0,0 +1,98 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+# Reset event counter
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set priorities
+C: schedother: 0: 0
+C: schedfifo: 1: 80
+C: schedfifo: 2: 81
+
+# T0 lock L0
+C: lock: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0, no wait in the wakeup path
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 80
+T: prioeq: 1: 80
+
+# T2 lock L0 interruptible, no wait in the wakeup path
+C: lockintnowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 81
+T: prioeq: 1: 80
+
+# Interrupt T2
+C: signal: 2: 2
+W: unlocked: 2: 0
+T: prioeq: 1: 80
+T: prioeq: 0: 80
+
+T: locked: 0: 0
+T: blocked: 1: 0
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T1 has locked L0 and exit
+W: locked: 1: 0
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
+
+
+
diff --git a/scripts/rt-tester/t3-l1-pi-steal.tst b/scripts/rt-tester/t3-l1-pi-steal.tst
new file mode 100644
index 00000000000000..f53749d59d79d2
--- /dev/null
+++ b/scripts/rt-tester/t3-l1-pi-steal.tst
@@ -0,0 +1,96 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 1 lock PI steal pending ownership
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 80
+C: schedfifo: 2: 81
+
+# T0 lock L0
+C: lock: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: lock: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 80
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T1 is in the wakeup loop
+W: blockedwake: 1: 0
+T: priolt: 0: 1
+
+# T2 lock L0
+C: lock: 2: 0
+# T1 leave wakeup loop
+C: lockcont: 1: 0
+
+# T2 must have the lock and T1 must be blocked
+W: locked: 2: 0
+W: blocked: 1: 0
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+# Wait until T1 is in the wakeup loop and let it run
+W: blockedwake: 1: 0
+C: lockcont: 1: 0
+W: locked: 1: 0
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t3-l2-pi.tst b/scripts/rt-tester/t3-l2-pi.tst
new file mode 100644
index 00000000000000..cdc3e4fd7bac63
--- /dev/null
+++ b/scripts/rt-tester/t3-l2-pi.tst
@@ -0,0 +1,92 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 3 threads 2 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedother: 1: 0
+C: schedfifo: 2: 82
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L0
+C: locknowait: 1: 0
+W: blocked: 1: 0
+T: priolt: 0: 1
+
+# T2 lock L0
+C: locknowait: 2: 0
+W: blocked: 2: 0
+T: prioeq: 0: 82
+
+# T0 unlock L0
+C: unlock: 0: 0
+
+# Wait until T2 got the lock
+W: locked: 2: 0
+W: unlocked: 0: 0
+T: priolt: 0: 1
+
+# T2 unlock L0
+C: unlock: 2: 0
+
+W: unlocked: 2: 0
+W: locked: 1: 0
+
+C: unlock: 1: 0
+W: unlocked: 1: 0
diff --git a/scripts/rt-tester/t4-l2-pi-deboost.tst b/scripts/rt-tester/t4-l2-pi-deboost.tst
new file mode 100644
index 00000000000000..baa14137f47354
--- /dev/null
+++ b/scripts/rt-tester/t4-l2-pi-deboost.tst
@@ -0,0 +1,123 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 4 threads 2 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedother: 1: 0
+C: schedfifo: 2: 82
+C: schedfifo: 3: 83
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L1
+C: locknowait: 1: 1
+W: locked: 1: 1
+
+# T3 lock L0
+C: lockintnowait: 3: 0
+W: blocked: 3: 0
+T: prioeq: 0: 83
+
+# T0 lock L1
+C: lock: 0: 1
+W: blocked: 0: 1
+T: prioeq: 1: 83
+
+# T1 unlock L1
+C: unlock: 1: 1
+
+# Wait until T0 is in the wakeup code
+W: blockedwake: 0: 1
+
+# Verify that T1 is unboosted
+W: unlocked: 1: 1
+T: priolt: 1: 1
+
+# T2 lock L1 (T0 is boosted and pending owner !)
+C: locknowait: 2: 1
+W: blocked: 2: 1
+T: prioeq: 0: 83
+
+# Interrupt T3 and wait until T3 returned
+C: signal: 3: 0
+W: unlocked: 3: 0
+
+# Verify prio of T0 (still pending owner,
+# but T2 is enqueued due to the previous boost by T3
+T: prioeq: 0: 82
+
+# Let T0 continue
+C: lockcont: 0: 1
+W: locked: 0: 1
+
+# Unlock L1 and let T2 get L1
+C: unlock: 0: 1
+W: locked: 2: 1
+
+# Verify that T0 is unboosted
+W: unlocked: 0: 1
+T: priolt: 0: 1
+
+# Unlock everything and exit
+C: unlock: 2: 1
+W: unlocked: 2: 1
+
+C: unlock: 0: 0
+W: unlocked: 0: 0
+
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
new file mode 100644
index 00000000000000..e6ec0c81b54dc9
--- /dev/null
+++ b/scripts/rt-tester/t5-l4-pi-boost-deboost-setsched.tst
@@ -0,0 +1,183 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 5 threads 4 lock PI - modify priority of blocked threads
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 81
+C: schedfifo: 2: 82
+C: schedfifo: 3: 83
+C: schedfifo: 4: 84
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L1
+C: locknowait: 1: 1
+W: locked: 1: 1
+
+# T1 lock L0
+C: lockintnowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 81
+
+# T2 lock L2
+C: locknowait: 2: 2
+W: locked: 2: 2
+
+# T2 lock L1
+C: lockintnowait: 2: 1
+W: blocked: 2: 1
+T: prioeq: 0: 82
+T: prioeq: 1: 82
+
+# T3 lock L3
+C: locknowait: 3: 3
+W: locked: 3: 3
+
+# T3 lock L2
+C: lockintnowait: 3: 2
+W: blocked: 3: 2
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+
+# T4 lock L3
+C: lockintnowait: 4: 3
+W: blocked: 4: 3
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+
+# Reduce prio of T4
+C: schedfifo: 4: 80
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+T: prioeq: 3: 83
+T: prioeq: 4: 80
+
+# Increase prio of T4
+C: schedfifo: 4: 84
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+T: prioeq: 4: 84
+
+# Reduce prio of T3
+C: schedfifo: 3: 80
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+T: prioeq: 4: 84
+
+# Increase prio of T3
+C: schedfifo: 3: 85
+T: prioeq: 0: 85
+T: prioeq: 1: 85
+T: prioeq: 2: 85
+T: prioeq: 3: 85
+T: prioeq: 4: 84
+
+# Reduce prio of T3
+C: schedfifo: 3: 83
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+T: prioeq: 4: 84
+
+# Signal T4
+C: signal: 4: 0
+W: unlocked: 4: 3
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+T: prioeq: 3: 83
+
+# Signal T3
+C: signal: 3: 0
+W: unlocked: 3: 2
+T: prioeq: 0: 82
+T: prioeq: 1: 82
+T: prioeq: 2: 82
+
+# Signal T2
+C: signal: 2: 0
+W: unlocked: 2: 1
+T: prioeq: 0: 81
+T: prioeq: 1: 81
+
+# Signal T1
+C: signal: 1: 0
+W: unlocked: 1: 0
+T: priolt: 0: 1
+
+# Unlock and exit
+C: unlock: 3: 3
+C: unlock: 2: 2
+C: unlock: 1: 1
+C: unlock: 0: 0
+
+W: unlocked: 3: 3
+W: unlocked: 2: 2
+W: unlocked: 1: 1
+W: unlocked: 0: 0
+
diff --git a/scripts/rt-tester/t5-l4-pi-boost-deboost.tst b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
new file mode 100644
index 00000000000000..ca64f8bbf4bc7a
--- /dev/null
+++ b/scripts/rt-tester/t5-l4-pi-boost-deboost.tst
@@ -0,0 +1,143 @@
+#
+# rt-mutex test
+#
+# Op: C(ommand)/T(est)/W(ait)
+# | opcode
+# | | threadid: 0-7
+# | | | opcode argument
+# | | | |
+# C: lock: 0: 0
+#
+# Commands
+#
+# opcode opcode argument
+# schedother nice value
+# schedfifo priority
+# lock lock nr (0-7)
+# locknowait lock nr (0-7)
+# lockint lock nr (0-7)
+# lockintnowait lock nr (0-7)
+# lockcont lock nr (0-7)
+# unlock lock nr (0-7)
+# lockbkl lock nr (0-7)
+# unlockbkl lock nr (0-7)
+# signal thread to signal (0-7)
+# reset 0
+# resetevent 0
+#
+# Tests / Wait
+#
+# opcode opcode argument
+#
+# prioeq priority
+# priolt priority
+# priogt priority
+# nprioeq normal priority
+# npriolt normal priority
+# npriogt normal priority
+# locked lock nr (0-7)
+# blocked lock nr (0-7)
+# blockedwake lock nr (0-7)
+# unlocked lock nr (0-7)
+# lockedbkl dont care
+# blockedbkl dont care
+# unlockedbkl dont care
+# opcodeeq command opcode or number
+# opcodelt number
+# opcodegt number
+# eventeq number
+# eventgt number
+# eventlt number
+
+#
+# 5 threads 4 lock PI
+#
+C: resetevent: 0: 0
+W: opcodeeq: 0: 0
+
+# Set schedulers
+C: schedother: 0: 0
+C: schedfifo: 1: 81
+C: schedfifo: 2: 82
+C: schedfifo: 3: 83
+C: schedfifo: 4: 84
+
+# T0 lock L0
+C: locknowait: 0: 0
+W: locked: 0: 0
+
+# T1 lock L1
+C: locknowait: 1: 1
+W: locked: 1: 1
+
+# T1 lock L0
+C: lockintnowait: 1: 0
+W: blocked: 1: 0
+T: prioeq: 0: 81
+
+# T2 lock L2
+C: locknowait: 2: 2
+W: locked: 2: 2
+
+# T2 lock L1
+C: lockintnowait: 2: 1
+W: blocked: 2: 1
+T: prioeq: 0: 82
+T: prioeq: 1: 82
+
+# T3 lock L3
+C: locknowait: 3: 3
+W: locked: 3: 3
+
+# T3 lock L2
+C: lockintnowait: 3: 2
+W: blocked: 3: 2
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+
+# T4 lock L3
+C: lockintnowait: 4: 3
+W: blocked: 4: 3
+T: prioeq: 0: 84
+T: prioeq: 1: 84
+T: prioeq: 2: 84
+T: prioeq: 3: 84
+
+# Signal T4
+C: signal: 4: 0
+W: unlocked: 4: 3
+T: prioeq: 0: 83
+T: prioeq: 1: 83
+T: prioeq: 2: 83
+T: prioeq: 3: 83
+
+# Signal T3
+C: signal: 3: 0
+W: unlocked: 3: 2
+T: prioeq: 0: 82
+T: prioeq: 1: 82
+T: prioeq: 2: 82
+
+# Signal T2
+C: signal: 2: 0
+W: unlocked: 2: 1
+T: prioeq: 0: 81
+T: prioeq: 1: 81
+
+# Signal T1
+C: signal: 1: 0
+W: unlocked: 1: 0
+T: priolt: 0: 1
+
+# Unlock and exit
+C: unlock: 3: 3
+C: unlock: 2: 2
+C: unlock: 1: 1
+C: unlock: 0: 0
+
+W: unlocked: 3: 3
+W: unlocked: 2: 2
+W: unlocked: 1: 1
+W: unlocked: 0: 0
+
diff --git a/security/keys/key.c b/security/keys/key.c
index 43295ca37b5dcb..80de8c3e9cc3ea 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/poison.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
@@ -988,7 +989,7 @@ void unregister_key_type(struct key_type *ktype)
if (key->type == ktype) {
if (ktype->destroy)
ktype->destroy(key);
- memset(&key->payload, 0xbd, sizeof(key->payload));
+ memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
}
}
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index ac7f2b2e392400..28832e689800b9 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1532,8 +1532,9 @@ static int selinux_bprm_set_security(struct linux_binprm *bprm)
/* Default to the current task SID. */
bsec->sid = tsec->sid;
- /* Reset create and sockcreate SID on execve. */
+ /* Reset fs, key, and sock SIDs on execve. */
tsec->create_sid = 0;
+ tsec->keycreate_sid = 0;
tsec->sockcreate_sid = 0;
if (tsec->exec_sid) {
@@ -2586,9 +2587,10 @@ static int selinux_task_alloc_security(struct task_struct *tsk)
tsec2->osid = tsec1->osid;
tsec2->sid = tsec1->sid;
- /* Retain the exec, create, and sock SIDs across fork */
+ /* Retain the exec, fs, key, and sock SIDs across fork */
tsec2->exec_sid = tsec1->exec_sid;
tsec2->create_sid = tsec1->create_sid;
+ tsec2->keycreate_sid = tsec1->keycreate_sid;
tsec2->sockcreate_sid = tsec1->sockcreate_sid;
/* Retain ptracer SID across fork, if any.
diff --git a/sound/oss/via82cxxx_audio.c b/sound/oss/via82cxxx_audio.c
index 1a921ee71aba2d..2343dedd44ae30 100644
--- a/sound/oss/via82cxxx_audio.c
+++ b/sound/oss/via82cxxx_audio.c
@@ -24,6 +24,7 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pci.h>
+#include <linux/poison.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
@@ -3522,7 +3523,7 @@ err_out_have_mixer:
err_out_kfree:
#ifndef VIA_NDEBUG
- memset (card, 0xAB, sizeof (*card)); /* poison memory */
+ memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */
#endif
kfree (card);
@@ -3559,7 +3560,7 @@ static void __devexit via_remove_one (struct pci_dev *pdev)
via_ac97_cleanup (card);
#ifndef VIA_NDEBUG
- memset (card, 0xAB, sizeof (*card)); /* poison memory */
+ memset (card, OSS_POISON_FREE, sizeof (*card)); /* poison memory */
#endif
kfree (card);
diff --git a/sound/ppc/toonie.c b/sound/ppc/toonie.c
deleted file mode 100644
index e69de29bb2d1d6..00000000000000
--- a/sound/ppc/toonie.c
+++ /dev/null