diff --git a/[refs] b/[refs] index 15f4023ef49a..1eb593bac743 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: b924f9599dfd4a604761e84b1e920e480fb57f66 +refs/heads/master: fe8e5b5a60f8427940d33b205e127aecfb0bca10 diff --git a/trunk/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss b/trunk/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss index 4f29e5f1ebfa..0a92a7c93a62 100644 --- a/trunk/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss +++ b/trunk/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss @@ -31,31 +31,3 @@ Date: March 2009 Kernel Version: 2.6.30 Contact: iss_storagedev@hp.com Description: A symbolic link to /sys/block/cciss!cXdY - -Where: /sys/bus/pci/devices//ccissX/rescan -Date: August 2009 -Kernel Version: 2.6.31 -Contact: iss_storagedev@hp.com -Description: Kicks of a rescan of the controller to discover logical - drive topology changes. - -Where: /sys/bus/pci/devices//ccissX/cXdY/lunid -Date: August 2009 -Kernel Version: 2.6.31 -Contact: iss_storagedev@hp.com -Description: Displays the 8-byte LUN ID used to address logical - drive Y of controller X. - -Where: /sys/bus/pci/devices//ccissX/cXdY/raid_level -Date: August 2009 -Kernel Version: 2.6.31 -Contact: iss_storagedev@hp.com -Description: Displays the RAID level of logical drive Y of - controller X. - -Where: /sys/bus/pci/devices//ccissX/cXdY/usage_count -Date: August 2009 -Kernel Version: 2.6.31 -Contact: iss_storagedev@hp.com -Description: Displays the usage count (number of opens) of logical drive Y - of controller X. diff --git a/trunk/Documentation/cgroups/cgroups.txt b/trunk/Documentation/cgroups/cgroups.txt index 0b33bfe7dde9..455d4e6d346d 100644 --- a/trunk/Documentation/cgroups/cgroups.txt +++ b/trunk/Documentation/cgroups/cgroups.txt @@ -227,14 +227,7 @@ as the path relative to the root of the cgroup file system. Each cgroup is represented by a directory in the cgroup file system containing the following files describing that cgroup: - - tasks: list of tasks (by pid) attached to that cgroup. This list - is not guaranteed to be sorted. Writing a thread id into this file - moves the thread into this cgroup. - - cgroup.procs: list of tgids in the cgroup. This list is not - guaranteed to be sorted or free of duplicate tgids, and userspace - should sort/uniquify the list if this property is required. - Writing a tgid into this file moves all threads with that tgid into - this cgroup. + - tasks: list of tasks (by pid) attached to that cgroup - notify_on_release flag: run the release agent on exit? - release_agent: the path to use for release notifications (this file exists in the top cgroup only) @@ -381,7 +374,7 @@ Now you want to do something with this cgroup. In this directory you can find several files: # ls -cgroup.procs notify_on_release tasks +notify_on_release tasks (plus whatever files added by the attached subsystems) Now attach your shell to this cgroup: diff --git a/trunk/Documentation/hwmon/ltc4215 b/trunk/Documentation/hwmon/ltc4215 index c196a1846259..2e6a21eb656c 100644 --- a/trunk/Documentation/hwmon/ltc4215 +++ b/trunk/Documentation/hwmon/ltc4215 @@ -22,13 +22,12 @@ Usage Notes ----------- This driver does not probe for LTC4215 devices, due to the fact that some -of the possible addresses are unfriendly to probing. You will have to -instantiate the devices explicitly. +of the possible addresses are unfriendly to probing. You will need to use +the "force" parameter to tell the driver where to find the device. Example: the following will load the driver for an LTC4215 at address 0x44 on I2C bus #0: -$ modprobe ltc4215 -$ echo ltc4215 0x44 > /sys/bus/i2c/devices/i2c-0/new_device +$ modprobe ltc4215 force=0,0x44 Sysfs entries diff --git a/trunk/Documentation/hwmon/ltc4245 b/trunk/Documentation/hwmon/ltc4245 index 02838a47d862..bae7a3adc5d8 100644 --- a/trunk/Documentation/hwmon/ltc4245 +++ b/trunk/Documentation/hwmon/ltc4245 @@ -23,13 +23,12 @@ Usage Notes ----------- This driver does not probe for LTC4245 devices, due to the fact that some -of the possible addresses are unfriendly to probing. You will have to -instantiate the devices explicitly. +of the possible addresses are unfriendly to probing. You will need to use +the "force" parameter to tell the driver where to find the device. Example: the following will load the driver for an LTC4245 at address 0x23 on I2C bus #1: -$ modprobe ltc4245 -$ echo ltc4245 0x23 > /sys/bus/i2c/devices/i2c-1/new_device +$ modprobe ltc4245 force=1,0x23 Sysfs entries diff --git a/trunk/Documentation/misc-devices/eeprom b/trunk/Documentation/i2c/chips/eeprom similarity index 100% rename from trunk/Documentation/misc-devices/eeprom rename to trunk/Documentation/i2c/chips/eeprom diff --git a/trunk/Documentation/misc-devices/max6875 b/trunk/Documentation/i2c/chips/max6875 similarity index 94% rename from trunk/Documentation/misc-devices/max6875 rename to trunk/Documentation/i2c/chips/max6875 index 1e89ee3ccc1b..10ca43cd1a72 100644 --- a/trunk/Documentation/misc-devices/max6875 +++ b/trunk/Documentation/i2c/chips/max6875 @@ -42,12 +42,10 @@ General Remarks Valid addresses for the MAX6875 are 0x50 and 0x52. Valid addresses for the MAX6874 are 0x50, 0x52, 0x54 and 0x56. -The driver does not probe any address, so you explicitly instantiate the -devices. +The driver does not probe any address, so you must force the address. Example: -$ modprobe max6875 -$ echo max6875 0x50 > /sys/bus/i2c/devices/i2c-0/new_device +$ modprobe max6875 force=0,0x50 The MAX6874/MAX6875 ignores address bit 0, so this driver attaches to multiple addresses. For example, for address 0x50, it also reserves 0x51. diff --git a/trunk/Documentation/i2c/instantiating-devices b/trunk/Documentation/i2c/instantiating-devices index e89490270aba..c740b7b41088 100644 --- a/trunk/Documentation/i2c/instantiating-devices +++ b/trunk/Documentation/i2c/instantiating-devices @@ -188,7 +188,7 @@ segment, the address is sufficient to uniquely identify the device to be deleted. Example: -# echo eeprom 0x50 > /sys/bus/i2c/devices/i2c-3/new_device +# echo eeprom 0x50 > /sys/class/i2c-adapter/i2c-3/new_device While this interface should only be used when in-kernel device declaration can't be done, there is a variety of cases where it can be helpful: diff --git a/trunk/Documentation/isdn/INTERFACE.CAPI b/trunk/Documentation/isdn/INTERFACE.CAPI index 5fe8de5cc727..686e107923ec 100644 --- a/trunk/Documentation/isdn/INTERFACE.CAPI +++ b/trunk/Documentation/isdn/INTERFACE.CAPI @@ -60,9 +60,10 @@ open() operation on regular files or character devices. After a successful return from register_appl(), CAPI messages from the application may be passed to the driver for the device via calls to the -send_message() callback function. Conversely, the driver may call Kernel -CAPI's capi_ctr_handle_message() function to pass a received CAPI message to -Kernel CAPI for forwarding to an application, specifying its ApplID. +send_message() callback function. The CAPI message to send is stored in the +data portion of an skb. Conversely, the driver may call Kernel CAPI's +capi_ctr_handle_message() function to pass a received CAPI message to Kernel +CAPI for forwarding to an application, specifying its ApplID. Deregistration requests (CAPI operation CAPI_RELEASE) from applications are forwarded as calls to the release_appl() callback function, passing the same @@ -141,7 +142,6 @@ u16 (*send_message)(struct capi_ctr *ctrlr, struct sk_buff *skb) to accepting or queueing the message. Errors occurring during the actual processing of the message should be signaled with an appropriate reply message. - May be called in process or interrupt context. Calls to this function are not serialized by Kernel CAPI, ie. it must be prepared to be re-entered. @@ -154,8 +154,7 @@ read_proc_t *ctr_read_proc system entry, /proc/capi/controllers/; will be called with a pointer to the device's capi_ctr structure as the last (data) argument -Note: Callback functions except send_message() are never called in interrupt -context. +Note: Callback functions are never called in interrupt context. - to be filled in before calling capi_ctr_ready(): @@ -172,40 +171,14 @@ u8 serial[CAPI_SERIAL_LEN] value to return for CAPI_GET_SERIAL -4.3 SKBs - -CAPI messages are passed between Kernel CAPI and the driver via send_message() -and capi_ctr_handle_message(), stored in the data portion of a socket buffer -(skb). Each skb contains a single CAPI message coded according to the CAPI 2.0 -standard. - -For the data transfer messages, DATA_B3_REQ and DATA_B3_IND, the actual -payload data immediately follows the CAPI message itself within the same skb. -The Data and Data64 parameters are not used for processing. The Data64 -parameter may be omitted by setting the length field of the CAPI message to 22 -instead of 30. - - -4.4 The _cmsg Structure +4.3 The _cmsg Structure (declared in ) The _cmsg structure stores the contents of a CAPI 2.0 message in an easily -accessible form. It contains members for all possible CAPI 2.0 parameters, -including subparameters of the Additional Info and B Protocol structured -parameters, with the following exceptions: - -* second Calling party number (CONNECT_IND) - -* Data64 (DATA_B3_REQ and DATA_B3_IND) - -* Sending complete (subparameter of Additional Info, CONNECT_REQ and INFO_REQ) - -* Global Configuration (subparameter of B Protocol, CONNECT_REQ, CONNECT_RESP - and SELECT_B_PROTOCOL_REQ) - -Only those parameters appearing in the message type currently being processed -are actually used. Unused members should be set to zero. +accessible form. It contains members for all possible CAPI 2.0 parameters, of +which only those appearing in the message type currently being processed are +actually used. Unused members should be set to zero. Members are named after the CAPI 2.0 standard names of the parameters they represent. See for the exact spelling. Member data @@ -217,19 +190,18 @@ u16 for CAPI parameters of type 'word' u32 for CAPI parameters of type 'dword' -_cstruct for CAPI parameters of type 'struct' +_cstruct for CAPI parameters of type 'struct' not containing any + variably-sized (struct) subparameters (eg. 'Called Party Number') The member is a pointer to a buffer containing the parameter in CAPI encoding (length + content). It may also be NULL, which will be taken to represent an empty (zero length) parameter. - Subparameters are stored in encoded form within the content part. -_cmstruct alternative representation for CAPI parameters of type 'struct' - (used only for the 'Additional Info' and 'B Protocol' parameters) +_cmstruct for CAPI parameters of type 'struct' containing 'struct' + subparameters ('Additional Info' and 'B Protocol') The representation is a single byte containing one of the values: - CAPI_DEFAULT: The parameter is empty/absent. - CAPI_COMPOSE: The parameter is present. - Subparameter values are stored individually in the corresponding - _cmsg structure members. + CAPI_DEFAULT: the parameter is empty + CAPI_COMPOSE: the values of the subparameters are stored + individually in the corresponding _cmsg structure members Functions capi_cmsg2message() and capi_message2cmsg() are provided to convert messages between their transport encoding described in the CAPI 2.0 standard @@ -325,26 +297,3 @@ char *capi_cmd2str(u8 Command, u8 Subcommand) be NULL if the command/subcommand is not one of those defined in the CAPI 2.0 standard. - -7. Debugging - -The module kernelcapi has a module parameter showcapimsgs controlling some -debugging output produced by the module. It can only be set when the module is -loaded, via a parameter "showcapimsgs=" to the modprobe command, either on -the command line or in the configuration file. - -If the lowest bit of showcapimsgs is set, kernelcapi logs controller and -application up and down events. - -In addition, every registered CAPI controller has an associated traceflag -parameter controlling how CAPI messages sent from and to tha controller are -logged. The traceflag parameter is initialized with the value of the -showcapimsgs parameter when the controller is registered, but can later be -changed via the MANUFACTURER_REQ command KCAPI_CMD_TRACE. - -If the value of traceflag is non-zero, CAPI messages are logged. -DATA_B3 messages are only logged if the value of traceflag is > 2. - -If the lowest bit of traceflag is set, only the command/subcommand and message -length are logged. Otherwise, kernelcapi logs a readable representation of -the entire message. diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 9107b387e91f..6fa7292947e5 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -671,7 +671,6 @@ and is between 256 and 4096 characters. It is defined in the file earlyprintk= [X86,SH,BLACKFIN] earlyprintk=vga earlyprintk=serial[,ttySn[,baudrate]] - earlyprintk=ttySn[,baudrate] earlyprintk=dbgp[debugController#] Append ",keep" to not disable it when the real console diff --git a/trunk/Documentation/networking/pktgen.txt b/trunk/Documentation/networking/pktgen.txt index 61bb645d50e0..c6cf4a3c16e0 100644 --- a/trunk/Documentation/networking/pktgen.txt +++ b/trunk/Documentation/networking/pktgen.txt @@ -90,11 +90,6 @@ Examples: pgset "dstmac 00:00:00:00:00:00" sets MAC destination address pgset "srcmac 00:00:00:00:00:00" sets MAC source address - pgset "queue_map_min 0" Sets the min value of tx queue interval - pgset "queue_map_max 7" Sets the max value of tx queue interval, for multiqueue devices - To select queue 1 of a given device, - use queue_map_min=1 and queue_map_max=1 - pgset "src_mac_count 1" Sets the number of MACs we'll range through. The 'minimum' MAC is what you set with srcmac. @@ -106,9 +101,6 @@ Examples: IPDST_RND, UDPSRC_RND, UDPDST_RND, MACSRC_RND, MACDST_RND MPLS_RND, VID_RND, SVID_RND - QUEUE_MAP_RND # queue map random - QUEUE_MAP_CPU # queue map mirrors smp_processor_id() - pgset "udp_src_min 9" set UDP source port min, If < udp_src_max, then cycle through the port range. diff --git a/trunk/Documentation/vm/ksm.txt b/trunk/Documentation/vm/ksm.txt index 262d8e6793a3..72a22f65960e 100644 --- a/trunk/Documentation/vm/ksm.txt +++ b/trunk/Documentation/vm/ksm.txt @@ -52,15 +52,15 @@ The KSM daemon is controlled by sysfs files in /sys/kernel/mm/ksm/, readable by all but writable only by root: max_kernel_pages - set to maximum number of kernel pages that KSM may use - e.g. "echo 100000 > /sys/kernel/mm/ksm/max_kernel_pages" + e.g. "echo 2000 > /sys/kernel/mm/ksm/max_kernel_pages" Value 0 imposes no limit on the kernel pages KSM may use; but note that any process using MADV_MERGEABLE can cause KSM to allocate these pages, unswappable until it exits. - Default: quarter of memory (chosen to not pin too much) + Default: 2000 (chosen for demonstration purposes) pages_to_scan - how many present pages to scan before ksmd goes to sleep - e.g. "echo 100 > /sys/kernel/mm/ksm/pages_to_scan" - Default: 100 (chosen for demonstration purposes) + e.g. "echo 200 > /sys/kernel/mm/ksm/pages_to_scan" + Default: 200 (chosen for demonstration purposes) sleep_millisecs - how many milliseconds ksmd should sleep before next scan e.g. "echo 20 > /sys/kernel/mm/ksm/sleep_millisecs" @@ -70,8 +70,7 @@ run - set 0 to stop ksmd from running but keep merged pages, set 1 to run ksmd e.g. "echo 1 > /sys/kernel/mm/ksm/run", set 2 to stop ksmd and unmerge all pages currently merged, but leave mergeable areas registered for next run - Default: 0 (must be changed to 1 to activate KSM, - except if CONFIG_SYSFS is disabled) + Default: 1 (for immediate use by apps which register) The effectiveness of KSM and MADV_MERGEABLE is shown in /sys/kernel/mm/ksm/: @@ -87,4 +86,4 @@ pages_volatile embraces several different kinds of activity, but a high proportion there would also indicate poor use of madvise MADV_MERGEABLE. Izik Eidus, -Hugh Dickins, 24 Sept 2009 +Hugh Dickins, 30 July 2009 diff --git a/trunk/Documentation/vm/page-types.c b/trunk/Documentation/vm/page-types.c index 3ec4f2a22585..fa1a30d9e9d5 100644 --- a/trunk/Documentation/vm/page-types.c +++ b/trunk/Documentation/vm/page-types.c @@ -2,10 +2,7 @@ * page-types: Tool for querying page flags * * Copyright (C) 2009 Intel corporation - * - * Authors: Wu Fengguang - * - * Released under the General Public License (GPL). + * Copyright (C) 2009 Wu Fengguang */ #define _LARGEFILE64_SOURCE @@ -72,9 +69,7 @@ #define KPF_COMPOUND_TAIL 16 #define KPF_HUGE 17 #define KPF_UNEVICTABLE 18 -#define KPF_HWPOISON 19 #define KPF_NOPAGE 20 -#define KPF_KSM 21 /* [32-] kernel hacking assistances */ #define KPF_RESERVED 32 @@ -121,9 +116,7 @@ static char *page_flag_names[] = { [KPF_COMPOUND_TAIL] = "T:compound_tail", [KPF_HUGE] = "G:huge", [KPF_UNEVICTABLE] = "u:unevictable", - [KPF_HWPOISON] = "X:hwpoison", [KPF_NOPAGE] = "n:nopage", - [KPF_KSM] = "x:ksm", [KPF_RESERVED] = "r:reserved", [KPF_MLOCKED] = "m:mlocked", @@ -159,6 +152,9 @@ static unsigned long opt_size[MAX_ADDR_RANGES]; static int nr_vmas; static unsigned long pg_start[MAX_VMAS]; static unsigned long pg_end[MAX_VMAS]; +static unsigned long voffset; + +static int pagemap_fd; #define MAX_BIT_FILTERS 64 static int nr_bit_filters; @@ -167,16 +163,9 @@ static uint64_t opt_bits[MAX_BIT_FILTERS]; static int page_size; -static int pagemap_fd; +#define PAGES_BATCH (64 << 10) /* 64k pages */ static int kpageflags_fd; -static int opt_hwpoison; -static int opt_unpoison; - -static char *hwpoison_debug_fs = "/debug/hwpoison"; -static int hwpoison_inject_fd; -static int hwpoison_forget_fd; - #define HASH_SHIFT 13 #define HASH_SIZE (1 << HASH_SHIFT) #define HASH_MASK (HASH_SIZE - 1) @@ -218,74 +207,6 @@ static void fatal(const char *x, ...) exit(EXIT_FAILURE); } -int checked_open(const char *pathname, int flags) -{ - int fd = open(pathname, flags); - - if (fd < 0) { - perror(pathname); - exit(EXIT_FAILURE); - } - - return fd; -} - -/* - * pagemap/kpageflags routines - */ - -static unsigned long do_u64_read(int fd, char *name, - uint64_t *buf, - unsigned long index, - unsigned long count) -{ - long bytes; - - if (index > ULONG_MAX / 8) - fatal("index overflow: %lu\n", index); - - if (lseek(fd, index * 8, SEEK_SET) < 0) { - perror(name); - exit(EXIT_FAILURE); - } - - bytes = read(fd, buf, count * 8); - if (bytes < 0) { - perror(name); - exit(EXIT_FAILURE); - } - if (bytes % 8) - fatal("partial read: %lu bytes\n", bytes); - - return bytes / 8; -} - -static unsigned long kpageflags_read(uint64_t *buf, - unsigned long index, - unsigned long pages) -{ - return do_u64_read(kpageflags_fd, PROC_KPAGEFLAGS, buf, index, pages); -} - -static unsigned long pagemap_read(uint64_t *buf, - unsigned long index, - unsigned long pages) -{ - return do_u64_read(pagemap_fd, "/proc/pid/pagemap", buf, index, pages); -} - -static unsigned long pagemap_pfn(uint64_t val) -{ - unsigned long pfn; - - if (val & PM_PRESENT) - pfn = PM_PFRAME(val); - else - pfn = 0; - - return pfn; -} - /* * page flag names @@ -334,8 +255,7 @@ static char *page_flag_longname(uint64_t flags) * page list and summary */ -static void show_page_range(unsigned long voffset, - unsigned long offset, uint64_t flags) +static void show_page_range(unsigned long offset, uint64_t flags) { static uint64_t flags0; static unsigned long voff; @@ -361,8 +281,7 @@ static void show_page_range(unsigned long voffset, count = 1; } -static void show_page(unsigned long voffset, - unsigned long offset, uint64_t flags) +static void show_page(unsigned long offset, uint64_t flags) { if (opt_pid) printf("%lx\t", voffset); @@ -443,62 +362,6 @@ static uint64_t well_known_flags(uint64_t flags) return flags; } -static uint64_t kpageflags_flags(uint64_t flags) -{ - flags = expand_overloaded_flags(flags); - - if (!opt_raw) - flags = well_known_flags(flags); - - return flags; -} - -/* - * page actions - */ - -static void prepare_hwpoison_fd(void) -{ - char buf[100]; - - if (opt_hwpoison && !hwpoison_inject_fd) { - sprintf(buf, "%s/corrupt-pfn", hwpoison_debug_fs); - hwpoison_inject_fd = checked_open(buf, O_WRONLY); - } - - if (opt_unpoison && !hwpoison_forget_fd) { - sprintf(buf, "%s/renew-pfn", hwpoison_debug_fs); - hwpoison_forget_fd = checked_open(buf, O_WRONLY); - } -} - -static int hwpoison_page(unsigned long offset) -{ - char buf[100]; - int len; - - len = sprintf(buf, "0x%lx\n", offset); - len = write(hwpoison_inject_fd, buf, len); - if (len < 0) { - perror("hwpoison inject"); - return len; - } - return 0; -} - -static int unpoison_page(unsigned long offset) -{ - char buf[100]; - int len; - - len = sprintf(buf, "0x%lx\n", offset); - len = write(hwpoison_forget_fd, buf, len); - if (len < 0) { - perror("hwpoison forget"); - return len; - } - return 0; -} /* * page frame walker @@ -531,83 +394,104 @@ static int hash_slot(uint64_t flags) exit(EXIT_FAILURE); } -static void add_page(unsigned long voffset, - unsigned long offset, uint64_t flags) +static void add_page(unsigned long offset, uint64_t flags) { - flags = kpageflags_flags(flags); + flags = expand_overloaded_flags(flags); + + if (!opt_raw) + flags = well_known_flags(flags); if (!bit_mask_ok(flags)) return; - if (opt_hwpoison) - hwpoison_page(offset); - if (opt_unpoison) - unpoison_page(offset); - if (opt_list == 1) - show_page_range(voffset, offset, flags); + show_page_range(offset, flags); else if (opt_list == 2) - show_page(voffset, offset, flags); + show_page(offset, flags); nr_pages[hash_slot(flags)]++; total_pages++; } -#define KPAGEFLAGS_BATCH (64 << 10) /* 64k pages */ -static void walk_pfn(unsigned long voffset, - unsigned long index, - unsigned long count) +static void walk_pfn(unsigned long index, unsigned long count) { - uint64_t buf[KPAGEFLAGS_BATCH]; unsigned long batch; - unsigned long pages; + unsigned long n; unsigned long i; + if (index > ULONG_MAX / KPF_BYTES) + fatal("index overflow: %lu\n", index); + + lseek(kpageflags_fd, index * KPF_BYTES, SEEK_SET); + while (count) { - batch = min_t(unsigned long, count, KPAGEFLAGS_BATCH); - pages = kpageflags_read(buf, index, batch); - if (pages == 0) + uint64_t kpageflags_buf[KPF_BYTES * PAGES_BATCH]; + + batch = min_t(unsigned long, count, PAGES_BATCH); + n = read(kpageflags_fd, kpageflags_buf, batch * KPF_BYTES); + if (n == 0) break; + if (n < 0) { + perror(PROC_KPAGEFLAGS); + exit(EXIT_FAILURE); + } - for (i = 0; i < pages; i++) - add_page(voffset + i, index + i, buf[i]); + if (n % KPF_BYTES != 0) + fatal("partial read: %lu bytes\n", n); + n = n / KPF_BYTES; - index += pages; - count -= pages; + for (i = 0; i < n; i++) + add_page(index + i, kpageflags_buf[i]); + + index += batch; + count -= batch; } } -#define PAGEMAP_BATCH (64 << 10) -static void walk_vma(unsigned long index, unsigned long count) -{ - uint64_t buf[PAGEMAP_BATCH]; - unsigned long batch; - unsigned long pages; - unsigned long pfn; - unsigned long i; - while (count) { - batch = min_t(unsigned long, count, PAGEMAP_BATCH); - pages = pagemap_read(buf, index, batch); - if (pages == 0) - break; +#define PAGEMAP_BATCH 4096 +static unsigned long task_pfn(unsigned long pgoff) +{ + static uint64_t buf[PAGEMAP_BATCH]; + static unsigned long start; + static long count; + uint64_t pfn; - for (i = 0; i < pages; i++) { - pfn = pagemap_pfn(buf[i]); - if (pfn) - walk_pfn(index + i, pfn, 1); + if (pgoff < start || pgoff >= start + count) { + if (lseek64(pagemap_fd, + (uint64_t)pgoff * PM_ENTRY_BYTES, + SEEK_SET) < 0) { + perror("pagemap seek"); + exit(EXIT_FAILURE); } - - index += pages; - count -= pages; + count = read(pagemap_fd, buf, sizeof(buf)); + if (count == 0) + return 0; + if (count < 0) { + perror("pagemap read"); + exit(EXIT_FAILURE); + } + if (count % PM_ENTRY_BYTES) { + fatal("pagemap read not aligned.\n"); + exit(EXIT_FAILURE); + } + count /= PM_ENTRY_BYTES; + start = pgoff; } + + pfn = buf[pgoff - start]; + if (pfn & PM_PRESENT) + pfn = PM_PFRAME(pfn); + else + pfn = 0; + + return pfn; } static void walk_task(unsigned long index, unsigned long count) { - const unsigned long end = index + count; - unsigned long start; int i = 0; + const unsigned long end = index + count; while (index < end) { @@ -617,11 +501,15 @@ static void walk_task(unsigned long index, unsigned long count) if (pg_start[i] >= end) return; - start = max_t(unsigned long, pg_start[i], index); - index = min_t(unsigned long, pg_end[i], end); + voffset = max_t(unsigned long, pg_start[i], index); + index = min_t(unsigned long, pg_end[i], end); - assert(start < index); - walk_vma(start, index - start); + assert(voffset < index); + for (; voffset < index; voffset++) { + unsigned long pfn = task_pfn(voffset); + if (pfn) + walk_pfn(pfn, 1); + } } } @@ -639,14 +527,18 @@ static void walk_addr_ranges(void) { int i; - kpageflags_fd = checked_open(PROC_KPAGEFLAGS, O_RDONLY); + kpageflags_fd = open(PROC_KPAGEFLAGS, O_RDONLY); + if (kpageflags_fd < 0) { + perror(PROC_KPAGEFLAGS); + exit(EXIT_FAILURE); + } if (!nr_addr_ranges) add_addr_range(0, ULONG_MAX); for (i = 0; i < nr_addr_ranges; i++) if (!opt_pid) - walk_pfn(0, opt_offset[i], opt_size[i]); + walk_pfn(opt_offset[i], opt_size[i]); else walk_task(opt_offset[i], opt_size[i]); @@ -683,8 +575,6 @@ static void usage(void) " -l|--list Show page details in ranges\n" " -L|--list-each Show page details one by one\n" " -N|--no-summary Don't show summay info\n" -" -X|--hwpoison hwpoison pages\n" -" -x|--unpoison unpoison pages\n" " -h|--help Show this usage message\n" "addr-spec:\n" " N one page at offset N (unit: pages)\n" @@ -734,7 +624,11 @@ static void parse_pid(const char *str) opt_pid = parse_number(str); sprintf(buf, "/proc/%d/pagemap", opt_pid); - pagemap_fd = checked_open(buf, O_RDONLY); + pagemap_fd = open(buf, O_RDONLY); + if (pagemap_fd < 0) { + perror(buf); + exit(EXIT_FAILURE); + } sprintf(buf, "/proc/%d/maps", opt_pid); file = fopen(buf, "r"); @@ -894,8 +788,6 @@ static struct option opts[] = { { "list" , 0, NULL, 'l' }, { "list-each" , 0, NULL, 'L' }, { "no-summary", 0, NULL, 'N' }, - { "hwpoison" , 0, NULL, 'X' }, - { "unpoison" , 0, NULL, 'x' }, { "help" , 0, NULL, 'h' }, { NULL , 0, NULL, 0 } }; @@ -907,7 +799,7 @@ int main(int argc, char *argv[]) page_size = getpagesize(); while ((c = getopt_long(argc, argv, - "rp:f:a:b:lLNXxh", opts, NULL)) != -1) { + "rp:f:a:b:lLNh", opts, NULL)) != -1) { switch (c) { case 'r': opt_raw = 1; @@ -933,14 +825,6 @@ int main(int argc, char *argv[]) case 'N': opt_no_summary = 1; break; - case 'X': - opt_hwpoison = 1; - prepare_hwpoison_fd(); - break; - case 'x': - opt_unpoison = 1; - prepare_hwpoison_fd(); - break; case 'h': usage(); exit(0); @@ -960,7 +844,7 @@ int main(int argc, char *argv[]) walk_addr_ranges(); if (opt_list == 1) - show_page_range(0, 0, 0); /* drain the buffer */ + show_page_range(0, 0); /* drain the buffer */ if (opt_no_summary) return 0; diff --git a/trunk/Documentation/vm/pagemap.txt b/trunk/Documentation/vm/pagemap.txt index df09b9650a81..600a304a828c 100644 --- a/trunk/Documentation/vm/pagemap.txt +++ b/trunk/Documentation/vm/pagemap.txt @@ -57,9 +57,7 @@ There are three components to pagemap: 16. COMPOUND_TAIL 16. HUGE 18. UNEVICTABLE - 19. HWPOISON 20. NOPAGE - 21. KSM Short descriptions to the page flags: @@ -88,15 +86,9 @@ Short descriptions to the page flags: 17. HUGE this is an integral part of a HugeTLB page -19. HWPOISON - hardware detected memory corruption on this page: don't touch the data! - 20. NOPAGE no page frame exists at the requested address -21. KSM - identical memory pages dynamically shared between one or more processes - [IO related page flags] 1. ERROR IO error occurred 3. UPTODATE page has up-to-date data diff --git a/trunk/Documentation/w1/masters/ds2482 b/trunk/Documentation/w1/masters/ds2482 index 299b91c7609f..9210d6fa5024 100644 --- a/trunk/Documentation/w1/masters/ds2482 +++ b/trunk/Documentation/w1/masters/ds2482 @@ -24,8 +24,8 @@ General Remarks Valid addresses are 0x18, 0x19, 0x1a, and 0x1b. However, the device cannot be detected without writing to the i2c bus, so no -detection is done. You should instantiate the device explicitly. +detection is done. +You should force the device address. -$ modprobe ds2482 -$ echo ds2482 0x18 > /sys/bus/i2c/devices/i2c-0/new_device +$ modprobe ds2482 force=0,0x18 diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index e1da925b38c8..737a9b2c532d 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -257,13 +257,6 @@ W: http://www.lesswatts.org/projects/acpi/ S: Supported F: drivers/acpi/fan.c -ACPI PROCESSOR AGGREGATOR DRIVER -M: Shaohua Li -L: linux-acpi@vger.kernel.org -W: http://www.lesswatts.org/projects/acpi/ -S: Supported -F: drivers/acpi/acpi_pad.c - ACPI THERMAL DRIVER M: Zhang Rui L: linux-acpi@vger.kernel.org @@ -3643,13 +3636,6 @@ F: Documentation/blockdev/nbd.txt F: drivers/block/nbd.c F: include/linux/nbd.h -NETWORK DROP MONITOR -M: Neil Horman -L: netdev@vger.kernel.org -S: Maintained -W: https://fedorahosted.org/dropwatch/ -F: net/core/drop_monitor.c - NETWORKING [GENERAL] M: "David S. Miller" L: netdev@vger.kernel.org @@ -3980,7 +3966,6 @@ F: drivers/block/paride/ PARISC ARCHITECTURE M: Kyle McMartin M: Helge Deller -M: "James E.J. Bottomley" L: linux-parisc@vger.kernel.org W: http://www.parisc-linux.org/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kyle/parisc-2.6.git diff --git a/trunk/Makefile b/trunk/Makefile index e50569ab5fe8..00444a8e304f 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 2 PATCHLEVEL = 6 SUBLEVEL = 32 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc2 NAME = Man-Eating Seals of Antiquity # *DOCUMENTATION* diff --git a/trunk/arch/arm/mach-omap2/clock34xx.c b/trunk/arch/arm/mach-omap2/clock34xx.c index 489556eecbd1..fafcd32e6907 100644 --- a/trunk/arch/arm/mach-omap2/clock34xx.c +++ b/trunk/arch/arm/mach-omap2/clock34xx.c @@ -338,13 +338,6 @@ static struct omap_clk omap34xx_clks[] = { */ #define SDRC_MPURATE_LOOPS 96 -/* - * DPLL5_FREQ_FOR_USBHOST: USBHOST and USBTLL are the only clocks - * that are sourced by DPLL5, and both of these require this clock - * to be at 120 MHz for proper operation. - */ -#define DPLL5_FREQ_FOR_USBHOST 120000000 - /** * omap3430es2_clk_ssi_find_idlest - return CM_IDLEST info for SSI * @clk: struct clk * being enabled @@ -1063,28 +1056,6 @@ void omap2_clk_prepare_for_reboot(void) #endif } -static void omap3_clk_lock_dpll5(void) -{ - struct clk *dpll5_clk; - struct clk *dpll5_m2_clk; - - dpll5_clk = clk_get(NULL, "dpll5_ck"); - clk_set_rate(dpll5_clk, DPLL5_FREQ_FOR_USBHOST); - clk_enable(dpll5_clk); - - /* Enable autoidle to allow it to enter low power bypass */ - omap3_dpll_allow_idle(dpll5_clk); - - /* Program dpll5_m2_clk divider for no division */ - dpll5_m2_clk = clk_get(NULL, "dpll5_m2_ck"); - clk_enable(dpll5_m2_clk); - clk_set_rate(dpll5_m2_clk, DPLL5_FREQ_FOR_USBHOST); - - clk_disable(dpll5_m2_clk); - clk_disable(dpll5_clk); - return; -} - /* REVISIT: Move this init stuff out into clock.c */ /* @@ -1177,12 +1148,6 @@ int __init omap2_clk_init(void) */ clk_enable_init_clocks(); - /* - * Lock DPLL5 and put it in autoidle. - */ - if (omap_rev() >= OMAP3430_REV_ES2_0) - omap3_clk_lock_dpll5(); - /* Avoid sleeping during omap2_clk_prepare_for_reboot() */ /* REVISIT: not yet ready for 343x */ #if 0 diff --git a/trunk/arch/arm/mach-omap2/pm-debug.c b/trunk/arch/arm/mach-omap2/pm-debug.c index 2fc4d6abbd0a..1b4c1600f8d8 100644 --- a/trunk/arch/arm/mach-omap2/pm-debug.c +++ b/trunk/arch/arm/mach-omap2/pm-debug.c @@ -541,7 +541,7 @@ static int __init pm_dbg_init(void) printk(KERN_ERR "%s: only OMAP3 supported\n", __func__); return -ENODEV; } - + d = debugfs_create_dir("pm_debug", NULL); if (IS_ERR(d)) return PTR_ERR(d); @@ -551,7 +551,7 @@ static int __init pm_dbg_init(void) (void) debugfs_create_file("time", S_IRUGO, d, (void *)DEBUG_FILE_TIMERS, &debug_fops); - pwrdm_for_each_nolock(pwrdms_setup, (void *)d); + pwrdm_for_each(pwrdms_setup, (void *)d); pm_dbg_dir = debugfs_create_dir("registers", d); if (IS_ERR(pm_dbg_dir)) diff --git a/trunk/arch/arm/mach-omap2/pm34xx.c b/trunk/arch/arm/mach-omap2/pm34xx.c index 378c2f618358..0ff5a6c53aa0 100644 --- a/trunk/arch/arm/mach-omap2/pm34xx.c +++ b/trunk/arch/arm/mach-omap2/pm34xx.c @@ -51,112 +51,97 @@ static void (*_omap_sram_idle)(u32 *addr, int save_state); static struct powerdomain *mpu_pwrdm; -/* - * PRCM Interrupt Handler Helper Function - * - * The purpose of this function is to clear any wake-up events latched - * in the PRCM PM_WKST_x registers. It is possible that a wake-up event - * may occur whilst attempting to clear a PM_WKST_x register and thus - * set another bit in this register. A while loop is used to ensure - * that any peripheral wake-up events occurring while attempting to - * clear the PM_WKST_x are detected and cleared. - */ -static int prcm_clear_mod_irqs(s16 module, u8 regs) +/* PRCM Interrupt Handler for wakeups */ +static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) { - u32 wkst, fclk, iclk, clken; - u16 wkst_off = (regs == 3) ? OMAP3430ES2_PM_WKST3 : PM_WKST1; - u16 fclk_off = (regs == 3) ? OMAP3430ES2_CM_FCLKEN3 : CM_FCLKEN1; - u16 iclk_off = (regs == 3) ? CM_ICLKEN3 : CM_ICLKEN1; - u16 grpsel_off = (regs == 3) ? - OMAP3430ES2_PM_MPUGRPSEL3 : OMAP3430_PM_MPUGRPSEL; - int c = 0; - - wkst = prm_read_mod_reg(module, wkst_off); - wkst &= prm_read_mod_reg(module, grpsel_off); + u32 wkst, irqstatus_mpu; + u32 fclk, iclk; + + /* WKUP */ + wkst = prm_read_mod_reg(WKUP_MOD, PM_WKST); if (wkst) { - iclk = cm_read_mod_reg(module, iclk_off); - fclk = cm_read_mod_reg(module, fclk_off); - while (wkst) { - clken = wkst; - cm_set_mod_reg_bits(clken, module, iclk_off); - /* - * For USBHOST, we don't know whether HOST1 or - * HOST2 woke us up, so enable both f-clocks - */ - if (module == OMAP3430ES2_USBHOST_MOD) - clken |= 1 << OMAP3430ES2_EN_USBHOST2_SHIFT; - cm_set_mod_reg_bits(clken, module, fclk_off); - prm_write_mod_reg(wkst, module, wkst_off); - wkst = prm_read_mod_reg(module, wkst_off); - c++; - } - cm_write_mod_reg(iclk, module, iclk_off); - cm_write_mod_reg(fclk, module, fclk_off); + iclk = cm_read_mod_reg(WKUP_MOD, CM_ICLKEN); + fclk = cm_read_mod_reg(WKUP_MOD, CM_FCLKEN); + cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_ICLKEN); + cm_set_mod_reg_bits(wkst, WKUP_MOD, CM_FCLKEN); + prm_write_mod_reg(wkst, WKUP_MOD, PM_WKST); + while (prm_read_mod_reg(WKUP_MOD, PM_WKST)) + cpu_relax(); + cm_write_mod_reg(iclk, WKUP_MOD, CM_ICLKEN); + cm_write_mod_reg(fclk, WKUP_MOD, CM_FCLKEN); } - return c; -} - -static int _prcm_int_handle_wakeup(void) -{ - int c; - - c = prcm_clear_mod_irqs(WKUP_MOD, 1); - c += prcm_clear_mod_irqs(CORE_MOD, 1); - c += prcm_clear_mod_irqs(OMAP3430_PER_MOD, 1); - if (omap_rev() > OMAP3430_REV_ES1_0) { - c += prcm_clear_mod_irqs(CORE_MOD, 3); - c += prcm_clear_mod_irqs(OMAP3430ES2_USBHOST_MOD, 1); + /* CORE */ + wkst = prm_read_mod_reg(CORE_MOD, PM_WKST1); + if (wkst) { + iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN1); + fclk = cm_read_mod_reg(CORE_MOD, CM_FCLKEN1); + cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN1); + cm_set_mod_reg_bits(wkst, CORE_MOD, CM_FCLKEN1); + prm_write_mod_reg(wkst, CORE_MOD, PM_WKST1); + while (prm_read_mod_reg(CORE_MOD, PM_WKST1)) + cpu_relax(); + cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN1); + cm_write_mod_reg(fclk, CORE_MOD, CM_FCLKEN1); + } + wkst = prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3); + if (wkst) { + iclk = cm_read_mod_reg(CORE_MOD, CM_ICLKEN3); + fclk = cm_read_mod_reg(CORE_MOD, OMAP3430ES2_CM_FCLKEN3); + cm_set_mod_reg_bits(wkst, CORE_MOD, CM_ICLKEN3); + cm_set_mod_reg_bits(wkst, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); + prm_write_mod_reg(wkst, CORE_MOD, OMAP3430ES2_PM_WKST3); + while (prm_read_mod_reg(CORE_MOD, OMAP3430ES2_PM_WKST3)) + cpu_relax(); + cm_write_mod_reg(iclk, CORE_MOD, CM_ICLKEN3); + cm_write_mod_reg(fclk, CORE_MOD, OMAP3430ES2_CM_FCLKEN3); } - return c; -} + /* PER */ + wkst = prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST); + if (wkst) { + iclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_ICLKEN); + fclk = cm_read_mod_reg(OMAP3430_PER_MOD, CM_FCLKEN); + cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_ICLKEN); + cm_set_mod_reg_bits(wkst, OMAP3430_PER_MOD, CM_FCLKEN); + prm_write_mod_reg(wkst, OMAP3430_PER_MOD, PM_WKST); + while (prm_read_mod_reg(OMAP3430_PER_MOD, PM_WKST)) + cpu_relax(); + cm_write_mod_reg(iclk, OMAP3430_PER_MOD, CM_ICLKEN); + cm_write_mod_reg(fclk, OMAP3430_PER_MOD, CM_FCLKEN); + } -/* - * PRCM Interrupt Handler - * - * The PRM_IRQSTATUS_MPU register indicates if there are any pending - * interrupts from the PRCM for the MPU. These bits must be cleared in - * order to clear the PRCM interrupt. The PRCM interrupt handler is - * implemented to simply clear the PRM_IRQSTATUS_MPU in order to clear - * the PRCM interrupt. Please note that bit 0 of the PRM_IRQSTATUS_MPU - * register indicates that a wake-up event is pending for the MPU and - * this bit can only be cleared if the all the wake-up events latched - * in the various PM_WKST_x registers have been cleared. The interrupt - * handler is implemented using a do-while loop so that if a wake-up - * event occurred during the processing of the prcm interrupt handler - * (setting a bit in the corresponding PM_WKST_x register and thus - * preventing us from clearing bit 0 of the PRM_IRQSTATUS_MPU register) - * this would be handled. - */ -static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id) -{ - u32 irqstatus_mpu; - int c = 0; - - do { - irqstatus_mpu = prm_read_mod_reg(OCP_MOD, - OMAP3_PRM_IRQSTATUS_MPU_OFFSET); - - if (irqstatus_mpu & (OMAP3430_WKUP_ST | OMAP3430_IO_ST)) { - c = _prcm_int_handle_wakeup(); - - /* - * Is the MPU PRCM interrupt handler racing with the - * IVA2 PRCM interrupt handler ? - */ - WARN(c == 0, "prcm: WARNING: PRCM indicated MPU wakeup " - "but no wakeup sources are marked\n"); - } else { - /* XXX we need to expand our PRCM interrupt handler */ - WARN(1, "prcm: WARNING: PRCM interrupt received, but " - "no code to handle it (%08x)\n", irqstatus_mpu); + if (omap_rev() > OMAP3430_REV_ES1_0) { + /* USBHOST */ + wkst = prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, PM_WKST); + if (wkst) { + iclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, + CM_ICLKEN); + fclk = cm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, + CM_FCLKEN); + cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD, + CM_ICLKEN); + cm_set_mod_reg_bits(wkst, OMAP3430ES2_USBHOST_MOD, + CM_FCLKEN); + prm_write_mod_reg(wkst, OMAP3430ES2_USBHOST_MOD, + PM_WKST); + while (prm_read_mod_reg(OMAP3430ES2_USBHOST_MOD, + PM_WKST)) + cpu_relax(); + cm_write_mod_reg(iclk, OMAP3430ES2_USBHOST_MOD, + CM_ICLKEN); + cm_write_mod_reg(fclk, OMAP3430ES2_USBHOST_MOD, + CM_FCLKEN); } + } - prm_write_mod_reg(irqstatus_mpu, OCP_MOD, - OMAP3_PRM_IRQSTATUS_MPU_OFFSET); + irqstatus_mpu = prm_read_mod_reg(OCP_MOD, + OMAP3_PRM_IRQSTATUS_MPU_OFFSET); + prm_write_mod_reg(irqstatus_mpu, OCP_MOD, + OMAP3_PRM_IRQSTATUS_MPU_OFFSET); - } while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET)); + while (prm_read_mod_reg(OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET)) + cpu_relax(); return IRQ_HANDLED; } @@ -639,16 +624,6 @@ static void __init prcm_setup_regs(void) prm_write_mod_reg(OMAP3430_IO_EN | OMAP3430_WKUP_EN, OCP_MOD, OMAP3_PRM_IRQENABLE_MPU_OFFSET); - /* Enable GPIO wakeups in PER */ - prm_write_mod_reg(OMAP3430_EN_GPIO2 | OMAP3430_EN_GPIO3 | - OMAP3430_EN_GPIO4 | OMAP3430_EN_GPIO5 | - OMAP3430_EN_GPIO6, OMAP3430_PER_MOD, PM_WKEN); - /* and allow them to wake up MPU */ - prm_write_mod_reg(OMAP3430_GRPSEL_GPIO2 | OMAP3430_EN_GPIO3 | - OMAP3430_GRPSEL_GPIO4 | OMAP3430_EN_GPIO5 | - OMAP3430_GRPSEL_GPIO6, - OMAP3430_PER_MOD, OMAP3430_PM_MPUGRPSEL); - /* Don't attach IVA interrupts */ prm_write_mod_reg(0, WKUP_MOD, OMAP3430_PM_IVAGRPSEL); prm_write_mod_reg(0, CORE_MOD, OMAP3430_PM_IVAGRPSEL1); diff --git a/trunk/arch/arm/mach-omap2/powerdomain.c b/trunk/arch/arm/mach-omap2/powerdomain.c index f00289abd30f..2594cbff3947 100644 --- a/trunk/arch/arm/mach-omap2/powerdomain.c +++ b/trunk/arch/arm/mach-omap2/powerdomain.c @@ -273,50 +273,35 @@ struct powerdomain *pwrdm_lookup(const char *name) } /** - * pwrdm_for_each_nolock - call function on each registered clockdomain + * pwrdm_for_each - call function on each registered clockdomain * @fn: callback function * * * Call the supplied function for each registered powerdomain. The * callback function can return anything but 0 to bail out early from - * the iterator. Returns the last return value of the callback function, which - * should be 0 for success or anything else to indicate failure; or -EINVAL if - * the function pointer is null. + * the iterator. The callback function is called with the pwrdm_rwlock + * held for reading, so no powerdomain structure manipulation + * functions should be called from the callback, although hardware + * powerdomain control functions are fine. Returns the last return + * value of the callback function, which should be 0 for success or + * anything else to indicate failure; or -EINVAL if the function + * pointer is null. */ -int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user), - void *user) +int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), + void *user) { struct powerdomain *temp_pwrdm; + unsigned long flags; int ret = 0; if (!fn) return -EINVAL; + read_lock_irqsave(&pwrdm_rwlock, flags); list_for_each_entry(temp_pwrdm, &pwrdm_list, node) { ret = (*fn)(temp_pwrdm, user); if (ret) break; } - - return ret; -} - -/** - * pwrdm_for_each - call function on each registered clockdomain - * @fn: callback function * - * - * This function is the same as 'pwrdm_for_each_nolock()', but keeps the - * &pwrdm_rwlock locked for reading, so no powerdomain structure manipulation - * functions should be called from the callback, although hardware powerdomain - * control functions are fine. - */ -int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), - void *user) -{ - unsigned long flags; - int ret; - - read_lock_irqsave(&pwrdm_rwlock, flags); - ret = pwrdm_for_each_nolock(fn, user); read_unlock_irqrestore(&pwrdm_rwlock, flags); return ret; diff --git a/trunk/arch/arm/plat-omap/include/mach/cpu.h b/trunk/arch/arm/plat-omap/include/mach/cpu.h index f129efb3075e..11e73d9e8928 100644 --- a/trunk/arch/arm/plat-omap/include/mach/cpu.h +++ b/trunk/arch/arm/plat-omap/include/mach/cpu.h @@ -303,21 +303,32 @@ IS_OMAP_TYPE(3430, 0x3430) #define cpu_is_omap2430() 0 #define cpu_is_omap3430() 0 -/* - * Whether we have MULTI_OMAP1 or not, we still need to distinguish - * between 730 vs 850, 330 vs. 1510 and 1611B/5912 vs. 1710. - */ - -#if defined(CONFIG_ARCH_OMAP730) -# undef cpu_is_omap730 -# define cpu_is_omap730() is_omap730() +#if defined(MULTI_OMAP1) +# if defined(CONFIG_ARCH_OMAP730) +# undef cpu_is_omap730 +# define cpu_is_omap730() is_omap730() +# endif +# if defined(CONFIG_ARCH_OMAP850) +# undef cpu_is_omap850 +# define cpu_is_omap850() is_omap850() +# endif +#else +# if defined(CONFIG_ARCH_OMAP730) +# undef cpu_is_omap730 +# define cpu_is_omap730() 1 +# endif #endif - -#if defined(CONFIG_ARCH_OMAP850) -# undef cpu_is_omap850 -# define cpu_is_omap850() is_omap850() +#else +# if defined(CONFIG_ARCH_OMAP850) +# undef cpu_is_omap850 +# define cpu_is_omap850() 1 +# endif #endif +/* + * Whether we have MULTI_OMAP1 or not, we still need to distinguish + * between 330 vs. 1510 and 1611B/5912 vs. 1710. + */ #if defined(CONFIG_ARCH_OMAP15XX) # undef cpu_is_omap310 # undef cpu_is_omap1510 @@ -422,5 +433,3 @@ IS_OMAP_TYPE(3430, 0x3430) int omap_chip_is(struct omap_chip_id oci); void omap2_check_revision(void); - -#endif diff --git a/trunk/arch/arm/plat-omap/include/mach/powerdomain.h b/trunk/arch/arm/plat-omap/include/mach/powerdomain.h index fa6461423bd0..6271d8556a40 100644 --- a/trunk/arch/arm/plat-omap/include/mach/powerdomain.h +++ b/trunk/arch/arm/plat-omap/include/mach/powerdomain.h @@ -135,8 +135,6 @@ struct powerdomain *pwrdm_lookup(const char *name); int pwrdm_for_each(int (*fn)(struct powerdomain *pwrdm, void *user), void *user); -int pwrdm_for_each_nolock(int (*fn)(struct powerdomain *pwrdm, void *user), - void *user); int pwrdm_add_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); int pwrdm_del_clkdm(struct powerdomain *pwrdm, struct clockdomain *clkdm); diff --git a/trunk/arch/arm/plat-omap/iovmm.c b/trunk/arch/arm/plat-omap/iovmm.c index dc3fac3dd0ea..57f7122a0919 100644 --- a/trunk/arch/arm/plat-omap/iovmm.c +++ b/trunk/arch/arm/plat-omap/iovmm.c @@ -47,7 +47,7 @@ * 'va': mpu virtual address * * 'c': contiguous memory area - * 'd': discontiguous memory area + * 'd': dicontiguous memory area * 'a': anonymous memory allocation * '()': optional feature * @@ -363,9 +363,8 @@ void *da_to_va(struct iommu *obj, u32 da) goto out; } va = area->va; -out: mutex_unlock(&obj->mmap_lock); - +out: return va; } EXPORT_SYMBOL_GPL(da_to_va); @@ -399,7 +398,7 @@ static inline void sgtable_drain_vmalloc(struct sg_table *sgt) { /* * Actually this is not necessary at all, just exists for - * consistency of the code readability. + * consistency of the code readibility. */ BUG_ON(!sgt); } @@ -435,7 +434,7 @@ static inline void sgtable_drain_kmalloc(struct sg_table *sgt) { /* * Actually this is not necessary at all, just exists for - * consistency of the code readability + * consistency of the code readibility */ BUG_ON(!sgt); } diff --git a/trunk/arch/arm/plat-omap/sram.c b/trunk/arch/arm/plat-omap/sram.c index 75d1f26e5b17..925f64711c37 100644 --- a/trunk/arch/arm/plat-omap/sram.c +++ b/trunk/arch/arm/plat-omap/sram.c @@ -270,8 +270,7 @@ void * omap_sram_push(void * start, unsigned long size) omap_sram_ceil -= size; omap_sram_ceil = ROUND_DOWN(omap_sram_ceil, sizeof(void *)); memcpy((void *)omap_sram_ceil, start, size); - flush_icache_range((unsigned long)omap_sram_ceil, - (unsigned long)(omap_sram_ceil + size)); + flush_icache_range((unsigned long)start, (unsigned long)(start + size)); return (void *)omap_sram_ceil; } diff --git a/trunk/arch/m68knommu/kernel/asm-offsets.c b/trunk/arch/m68knommu/kernel/asm-offsets.c index 9a8876f715d8..594ee0e657fe 100644 --- a/trunk/arch/m68knommu/kernel/asm-offsets.c +++ b/trunk/arch/m68knommu/kernel/asm-offsets.c @@ -45,25 +45,25 @@ int main(void) DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); /* offsets into the pt_regs */ - DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); - DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); - DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); - DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); - DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); - DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); - DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); - DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); - DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); - DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); - DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); - DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); + DEFINE(PT_D0, offsetof(struct pt_regs, d0)); + DEFINE(PT_ORIG_D0, offsetof(struct pt_regs, orig_d0)); + DEFINE(PT_D1, offsetof(struct pt_regs, d1)); + DEFINE(PT_D2, offsetof(struct pt_regs, d2)); + DEFINE(PT_D3, offsetof(struct pt_regs, d3)); + DEFINE(PT_D4, offsetof(struct pt_regs, d4)); + DEFINE(PT_D5, offsetof(struct pt_regs, d5)); + DEFINE(PT_A0, offsetof(struct pt_regs, a0)); + DEFINE(PT_A1, offsetof(struct pt_regs, a1)); + DEFINE(PT_A2, offsetof(struct pt_regs, a2)); + DEFINE(PT_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_SR, offsetof(struct pt_regs, sr)); #ifdef CONFIG_COLDFIRE /* bitfields are a bit difficult */ - DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, sr) - 2); + DEFINE(PT_FORMATVEC, offsetof(struct pt_regs, sr) - 2); #else /* bitfields are a bit difficult */ - DEFINE(PT_OFF_VECTOR, offsetof(struct pt_regs, pc) + 4); + DEFINE(PT_VECTOR, offsetof(struct pt_regs, pc) + 4); #endif /* signal defines */ diff --git a/trunk/arch/m68knommu/kernel/entry.S b/trunk/arch/m68knommu/kernel/entry.S index 56043ade3941..f56faa5c9cd9 100644 --- a/trunk/arch/m68knommu/kernel/entry.S +++ b/trunk/arch/m68knommu/kernel/entry.S @@ -46,7 +46,7 @@ ENTRY(buserr) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_OFF_ORIG_D0) + movel %d0,%sp@(PT_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr buserr_c addql #4,%sp @@ -55,7 +55,7 @@ ENTRY(buserr) ENTRY(trap) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_OFF_ORIG_D0) + movel %d0,%sp@(PT_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr trap_c addql #4,%sp @@ -67,7 +67,7 @@ ENTRY(trap) ENTRY(dbginterrupt) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_OFF_ORIG_D0) + movel %d0,%sp@(PT_ORIG_D0) movel %sp,%sp@- /* stack frame pointer argument */ jsr dbginterrupt_c addql #4,%sp diff --git a/trunk/arch/m68knommu/mm/init.c b/trunk/arch/m68knommu/mm/init.c index f3236d0b522d..b1703c67a4f1 100644 --- a/trunk/arch/m68knommu/mm/init.c +++ b/trunk/arch/m68knommu/mm/init.c @@ -162,7 +162,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) totalram_pages++; pages++; } - printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages * (PAGE_SIZE / 1024)); + printk (KERN_NOTICE "Freeing initrd memory: %dk freed\n", pages); } #endif diff --git a/trunk/arch/m68knommu/platform/5206e/config.c b/trunk/arch/m68knommu/platform/5206e/config.c index 942397984c66..0f41ba82a3b5 100644 --- a/trunk/arch/m68knommu/platform/5206e/config.c +++ b/trunk/arch/m68knommu/platform/5206e/config.c @@ -17,6 +17,7 @@ #include #include #include +#include /***************************************************************************/ diff --git a/trunk/arch/m68knommu/platform/68328/entry.S b/trunk/arch/m68knommu/platform/68328/entry.S index 9d80d2c42866..b1aef72f3baf 100644 --- a/trunk/arch/m68knommu/platform/68328/entry.S +++ b/trunk/arch/m68knommu/platform/68328/entry.S @@ -39,17 +39,17 @@ .globl inthandler7 badsys: - movel #-ENOSYS,%sp@(PT_OFF_D0) + movel #-ENOSYS,%sp@(PT_D0) jra ret_from_exception do_trace: - movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ + movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace RESTORE_SWITCH_STACK addql #4,%sp - movel %sp@(PT_OFF_ORIG_D0),%d1 + movel %sp@(PT_ORIG_D0),%d1 movel #-ENOSYS,%d0 cmpl #NR_syscalls,%d1 jcc 1f @@ -57,7 +57,7 @@ do_trace: lea sys_call_table, %a0 jbsr %a0@(%d1) -1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ +1: movel %d0,%sp@(PT_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -75,7 +75,7 @@ ENTRY(system_call) jbsr set_esp0 addql #4,%sp - movel %sp@(PT_OFF_ORIG_D0),%d0 + movel %sp@(PT_ORIG_D0),%d0 movel %sp,%d1 /* get thread_info pointer */ andl #-THREAD_SIZE,%d1 @@ -88,10 +88,10 @@ ENTRY(system_call) lea sys_call_table,%a0 movel %a0@(%d0), %a0 jbsr %a0@ - movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ + movel %d0,%sp@(PT_D0) /* save the return value*/ ret_from_exception: - btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ + btst #5,%sp@(PT_SR) /* check if returning to kernel*/ jeq Luser_return /* if so, skip resched, signals*/ Lkernel_return: @@ -133,7 +133,7 @@ Lreturn: */ inthandler1: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -144,7 +144,7 @@ inthandler1: inthandler2: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -155,7 +155,7 @@ inthandler2: inthandler3: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -166,7 +166,7 @@ inthandler3: inthandler4: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -177,7 +177,7 @@ inthandler4: inthandler5: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -188,7 +188,7 @@ inthandler5: inthandler6: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -199,7 +199,7 @@ inthandler6: inthandler7: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -210,7 +210,7 @@ inthandler7: inthandler: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and #0x3ff, %d0 movel %sp,%sp@- @@ -224,7 +224,7 @@ ret_from_interrupt: 2: RESTORE_ALL 1: - moveb %sp@(PT_OFF_SR), %d0 + moveb %sp@(PT_SR), %d0 and #7, %d0 jhi 2b diff --git a/trunk/arch/m68knommu/platform/68360/entry.S b/trunk/arch/m68knommu/platform/68360/entry.S index 6d3460a39cac..55dfefe38642 100644 --- a/trunk/arch/m68knommu/platform/68360/entry.S +++ b/trunk/arch/m68knommu/platform/68360/entry.S @@ -35,17 +35,17 @@ .globl inthandler badsys: - movel #-ENOSYS,%sp@(PT_OFF_D0) + movel #-ENOSYS,%sp@(PT_D0) jra ret_from_exception do_trace: - movel #-ENOSYS,%sp@(PT_OFF_D0) /* needed for strace*/ + movel #-ENOSYS,%sp@(PT_D0) /* needed for strace*/ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace RESTORE_SWITCH_STACK addql #4,%sp - movel %sp@(PT_OFF_ORIG_D0),%d1 + movel %sp@(PT_ORIG_D0),%d1 movel #-ENOSYS,%d0 cmpl #NR_syscalls,%d1 jcc 1f @@ -53,7 +53,7 @@ do_trace: lea sys_call_table, %a0 jbsr %a0@(%d1) -1: movel %d0,%sp@(PT_OFF_D0) /* save the return value */ +1: movel %d0,%sp@(PT_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -79,10 +79,10 @@ ENTRY(system_call) lea sys_call_table,%a0 movel %a0@(%d0), %a0 jbsr %a0@ - movel %d0,%sp@(PT_OFF_D0) /* save the return value*/ + movel %d0,%sp@(PT_D0) /* save the return value*/ ret_from_exception: - btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel*/ + btst #5,%sp@(PT_SR) /* check if returning to kernel*/ jeq Luser_return /* if so, skip resched, signals*/ Lkernel_return: @@ -124,7 +124,7 @@ Lreturn: */ inthandler: SAVE_ALL - movew %sp@(PT_OFF_VECTOR), %d0 + movew %sp@(PT_VECTOR), %d0 and.l #0x3ff, %d0 lsr.l #0x02, %d0 @@ -139,7 +139,7 @@ ret_from_interrupt: 2: RESTORE_ALL 1: - moveb %sp@(PT_OFF_SR), %d0 + moveb %sp@(PT_SR), %d0 and #7, %d0 jhi 2b /* check if we need to do software interrupts */ diff --git a/trunk/arch/m68knommu/platform/coldfire/entry.S b/trunk/arch/m68knommu/platform/coldfire/entry.S index dd7d591f70ea..3b471c0da24a 100644 --- a/trunk/arch/m68knommu/platform/coldfire/entry.S +++ b/trunk/arch/m68knommu/platform/coldfire/entry.S @@ -81,11 +81,11 @@ ENTRY(system_call) movel %d3,%a0 jbsr %a0@ - movel %d0,%sp@(PT_OFF_D0) /* save the return value */ + movel %d0,%sp@(PT_D0) /* save the return value */ jra ret_from_exception 1: - movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_OFF_D0 */ - movel %d2,PT_OFF_D0(%sp) /* on syscall entry */ + movel #-ENOSYS,%d2 /* strace needs -ENOSYS in PT_D0 */ + movel %d2,PT_D0(%sp) /* on syscall entry */ subql #4,%sp SAVE_SWITCH_STACK jbsr syscall_trace @@ -93,7 +93,7 @@ ENTRY(system_call) addql #4,%sp movel %d3,%a0 jbsr %a0@ - movel %d0,%sp@(PT_OFF_D0) /* save the return value */ + movel %d0,%sp@(PT_D0) /* save the return value */ subql #4,%sp /* dummy return address */ SAVE_SWITCH_STACK jbsr syscall_trace @@ -104,7 +104,7 @@ ret_from_signal: ret_from_exception: move #0x2700,%sr /* disable intrs */ - btst #5,%sp@(PT_OFF_SR) /* check if returning to kernel */ + btst #5,%sp@(PT_SR) /* check if returning to kernel */ jeq Luser_return /* if so, skip resched, signals */ #ifdef CONFIG_PREEMPT @@ -142,8 +142,8 @@ Luser_return: Lreturn: move #0x2700,%sr /* disable intrs */ movel sw_usp,%a0 /* get usp */ - movel %sp@(PT_OFF_PC),%a0@- /* copy exception program counter */ - movel %sp@(PT_OFF_FORMATVEC),%a0@- /* copy exception format/vector/sr */ + movel %sp@(PT_PC),%a0@- /* copy exception program counter */ + movel %sp@(PT_FORMATVEC),%a0@-/* copy exception format/vector/sr */ moveml %sp@,%d1-%d5/%a0-%a2 lea %sp@(32),%sp /* space for 8 regs */ movel %sp@+,%d0 @@ -181,9 +181,9 @@ Lsignal_return: ENTRY(inthandler) SAVE_ALL moveq #-1,%d0 - movel %d0,%sp@(PT_OFF_ORIG_D0) + movel %d0,%sp@(PT_ORIG_D0) - movew %sp@(PT_OFF_FORMATVEC),%d0 /* put exception # in d0 */ + movew %sp@(PT_FORMATVEC),%d0 /* put exception # in d0 */ andl #0x03fc,%d0 /* mask out vector only */ movel %sp,%sp@- /* push regs arg */ @@ -203,7 +203,7 @@ ENTRY(inthandler) ENTRY(fasthandler) SAVE_LOCAL - movew %sp@(PT_OFF_FORMATVEC),%d0 + movew %sp@(PT_FORMATVEC),%d0 andl #0x03fc,%d0 /* mask out vector only */ movel %sp,%sp@- /* push regs arg */ diff --git a/trunk/arch/microblaze/kernel/entry.S b/trunk/arch/microblaze/kernel/entry.S index e3ecb36dd554..acc1f05d1e2c 100644 --- a/trunk/arch/microblaze/kernel/entry.S +++ b/trunk/arch/microblaze/kernel/entry.S @@ -592,8 +592,6 @@ C_ENTRY(full_exception_trap): nop mfs r7, rfsr; /* save FSR */ nop - mts rfsr, r0; /* Clear sticky fsr */ - nop la r12, r0, full_exception set_vms; rtbd r12, 0; diff --git a/trunk/arch/microblaze/kernel/hw_exception_handler.S b/trunk/arch/microblaze/kernel/hw_exception_handler.S index 2b86c03aa841..6b0288ebccd6 100644 --- a/trunk/arch/microblaze/kernel/hw_exception_handler.S +++ b/trunk/arch/microblaze/kernel/hw_exception_handler.S @@ -384,7 +384,7 @@ handle_other_ex: /* Handle Other exceptions here */ addk r8, r17, r0; /* Load exception address */ bralid r15, full_exception; /* Branch to the handler */ nop; - mts rfsr, r0; /* Clear sticky fsr */ + mts r0, rfsr; /* Clear sticky fsr */ nop /* diff --git a/trunk/arch/microblaze/kernel/process.c b/trunk/arch/microblaze/kernel/process.c index c592d475b3d8..4201c743cc9f 100644 --- a/trunk/arch/microblaze/kernel/process.c +++ b/trunk/arch/microblaze/kernel/process.c @@ -235,9 +235,7 @@ void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp) regs->pc = pc; regs->r1 = usp; regs->pt_mode = 0; -#ifdef CONFIG_MMU regs->msr |= MSR_UMS; -#endif } #ifdef CONFIG_MMU diff --git a/trunk/arch/parisc/Kconfig b/trunk/arch/parisc/Kconfig index 524d9352f17e..f388dc68f605 100644 --- a/trunk/arch/parisc/Kconfig +++ b/trunk/arch/parisc/Kconfig @@ -18,7 +18,6 @@ config PARISC select BUG select HAVE_PERF_EVENTS select GENERIC_ATOMIC64 if !64BIT - select HAVE_ARCH_TRACEHOOK help The PA-RISC microprocessor is designed by Hewlett-Packard and used in many of their workstations & servers (HP9000 700 and 800 series, diff --git a/trunk/arch/parisc/include/asm/fixmap.h b/trunk/arch/parisc/include/asm/fixmap.h index 6fec4d4a1a18..de3fe3a18229 100644 --- a/trunk/arch/parisc/include/asm/fixmap.h +++ b/trunk/arch/parisc/include/asm/fixmap.h @@ -21,9 +21,9 @@ #define KERNEL_MAP_END (TMPALIAS_MAP_START) #ifndef __ASSEMBLY__ -extern void *parisc_vmalloc_start; +extern void *vmalloc_start; #define PCXL_DMA_MAP_SIZE (8*1024*1024) -#define VMALLOC_START ((unsigned long)parisc_vmalloc_start) +#define VMALLOC_START ((unsigned long)vmalloc_start) #define VMALLOC_END (KERNEL_MAP_END) #endif /*__ASSEMBLY__*/ diff --git a/trunk/arch/parisc/include/asm/hardirq.h b/trunk/arch/parisc/include/asm/hardirq.h index 0d68184a76cb..ce93133d5112 100644 --- a/trunk/arch/parisc/include/asm/hardirq.h +++ b/trunk/arch/parisc/include/asm/hardirq.h @@ -1,11 +1,29 @@ /* hardirq.h: PA-RISC hard IRQ support. * * Copyright (C) 2001 Matthew Wilcox + * + * The locking is really quite interesting. There's a cpu-local + * count of how many interrupts are being handled, and a global + * lock. An interrupt can only be serviced if the global lock + * is free. You can't be sure no more interrupts are being + * serviced until you've acquired the lock and then checked + * all the per-cpu interrupt counts are all zero. It's a specialised + * br_lock, and that's exactly how Sparc does it. We don't because + * it's more locking for us. This way is lock-free in the interrupt path. */ #ifndef _PARISC_HARDIRQ_H #define _PARISC_HARDIRQ_H -#include +#include +#include + +typedef struct { + unsigned long __softirq_pending; /* set_bit is used on this */ +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + +void ack_bad_irq(unsigned int irq); #endif /* _PARISC_HARDIRQ_H */ diff --git a/trunk/arch/parisc/include/asm/ptrace.h b/trunk/arch/parisc/include/asm/ptrace.h index aead40b16dd8..302f68dc889c 100644 --- a/trunk/arch/parisc/include/asm/ptrace.h +++ b/trunk/arch/parisc/include/asm/ptrace.h @@ -59,11 +59,8 @@ void user_enable_block_step(struct task_struct *task); #define user_mode(regs) (((regs)->iaoq[0] & 3) ? 1 : 0) #define user_space(regs) (((regs)->iasq[1] != 0) ? 1 : 0) #define instruction_pointer(regs) ((regs)->iaoq[0] & ~3) -#define user_stack_pointer(regs) ((regs)->gr[30]) unsigned long profile_pc(struct pt_regs *); extern void show_regs(struct pt_regs *); - - -#endif /* __KERNEL__ */ +#endif #endif diff --git a/trunk/arch/parisc/include/asm/syscall.h b/trunk/arch/parisc/include/asm/syscall.h deleted file mode 100644 index 8bdfd2c8c39f..000000000000 --- a/trunk/arch/parisc/include/asm/syscall.h +++ /dev/null @@ -1,40 +0,0 @@ -/* syscall.h */ - -#ifndef _ASM_PARISC_SYSCALL_H_ -#define _ASM_PARISC_SYSCALL_H_ - -#include -#include - -static inline long syscall_get_nr(struct task_struct *tsk, - struct pt_regs *regs) -{ - return regs->gr[20]; -} - -static inline void syscall_get_arguments(struct task_struct *tsk, - struct pt_regs *regs, unsigned int i, - unsigned int n, unsigned long *args) -{ - BUG_ON(i); - - switch (n) { - case 6: - args[5] = regs->gr[21]; - case 5: - args[4] = regs->gr[22]; - case 4: - args[3] = regs->gr[23]; - case 3: - args[2] = regs->gr[24]; - case 2: - args[1] = regs->gr[25]; - case 1: - args[0] = regs->gr[26]; - break; - default: - BUG(); - } -} - -#endif /*_ASM_PARISC_SYSCALL_H_*/ diff --git a/trunk/arch/parisc/include/asm/thread_info.h b/trunk/arch/parisc/include/asm/thread_info.h index 7ecc1039cfed..ac775a76bff7 100644 --- a/trunk/arch/parisc/include/asm/thread_info.h +++ b/trunk/arch/parisc/include/asm/thread_info.h @@ -32,11 +32,6 @@ struct thread_info { #define init_thread_info (init_thread_union.thread_info) #define init_stack (init_thread_union.stack) -/* how to get the thread information struct from C */ -#define current_thread_info() ((struct thread_info *)mfctl(30)) - -#endif /* !__ASSEMBLY */ - /* thread information allocation */ #define THREAD_SIZE_ORDER 2 @@ -45,6 +40,11 @@ struct thread_info { #define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) +/* how to get the thread information struct from C */ +#define current_thread_info() ((struct thread_info *)mfctl(30)) + +#endif /* !__ASSEMBLY */ + #define PREEMPT_ACTIVE_BIT 28 #define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT) @@ -60,8 +60,6 @@ struct thread_info { #define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */ #define TIF_FREEZE 7 /* is freezing for suspend */ #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ -#define TIF_SINGLESTEP 9 /* single stepping? */ -#define TIF_BLOCKSTEP 10 /* branch stepping? */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -71,8 +69,6 @@ struct thread_info { #define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) #define _TIF_FREEZE (1 << TIF_FREEZE) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) -#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) -#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) diff --git a/trunk/arch/parisc/kernel/asm-offsets.c b/trunk/arch/parisc/kernel/asm-offsets.c index fcd3c707bf12..699cf8ef2118 100644 --- a/trunk/arch/parisc/kernel/asm-offsets.c +++ b/trunk/arch/parisc/kernel/asm-offsets.c @@ -270,8 +270,8 @@ int main(void) DEFINE(DTLB_OFF_COUNT, offsetof(struct pdc_cache_info, dt_off_count)); DEFINE(DTLB_LOOP, offsetof(struct pdc_cache_info, dt_loop)); BLANK(); - DEFINE(TIF_BLOCKSTEP_PA_BIT, 31-TIF_BLOCKSTEP); - DEFINE(TIF_SINGLESTEP_PA_BIT, 31-TIF_SINGLESTEP); + DEFINE(PA_BLOCKSTEP_BIT, 31-PT_BLOCKSTEP_BIT); + DEFINE(PA_SINGLESTEP_BIT, 31-PT_SINGLESTEP_BIT); BLANK(); DEFINE(ASM_PMD_SHIFT, PMD_SHIFT); DEFINE(ASM_PGDIR_SHIFT, PGDIR_SHIFT); diff --git a/trunk/arch/parisc/kernel/entry.S b/trunk/arch/parisc/kernel/entry.S index 3a44f7f704fa..8c4712b74dc1 100644 --- a/trunk/arch/parisc/kernel/entry.S +++ b/trunk/arch/parisc/kernel/entry.S @@ -2047,13 +2047,12 @@ syscall_do_signal: b,n syscall_check_sig syscall_restore: + /* Are we being ptraced? */ LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 - /* Are we being ptraced? */ - ldw TASK_FLAGS(%r1),%r19 - ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 - and,COND(=) %r19,%r2,%r0 - b,n syscall_restore_rfi + ldw TASK_PTRACE(%r1), %r19 + bb,< %r19,31,syscall_restore_rfi + nop ldo TASK_PT_FR31(%r1),%r19 /* reload fpregs */ rest_fp %r19 @@ -2114,16 +2113,16 @@ syscall_restore_rfi: ldi 0x0b,%r20 /* Create new PSW */ depi -1,13,1,%r20 /* C, Q, D, and I bits */ - /* The values of SINGLESTEP_BIT and BLOCKSTEP_BIT are - * set in thread_info.h and converted to PA bitmap + /* The values of PA_SINGLESTEP_BIT and PA_BLOCKSTEP_BIT are + * set in include/linux/ptrace.h and converted to PA bitmap * numbers in asm-offsets.c */ - /* if ((%r19.SINGLESTEP_BIT)) { %r20.27=1} */ - extru,= %r19,TIF_SINGLESTEP_PA_BIT,1,%r0 + /* if ((%r19.PA_SINGLESTEP_BIT)) { %r20.27=1} */ + extru,= %r19,PA_SINGLESTEP_BIT,1,%r0 depi -1,27,1,%r20 /* R bit */ - /* if ((%r19.BLOCKSTEP_BIT)) { %r20.7=1} */ - extru,= %r19,TIF_BLOCKSTEP_PA_BIT,1,%r0 + /* if ((%r19.PA_BLOCKSTEP_BIT)) { %r20.7=1} */ + extru,= %r19,PA_BLOCKSTEP_BIT,1,%r0 depi -1,7,1,%r20 /* T bit */ STREG %r20,TASK_PT_PSW(%r1) diff --git a/trunk/arch/parisc/kernel/irq.c b/trunk/arch/parisc/kernel/irq.c index 2e7610cb33d5..330f536a9324 100644 --- a/trunk/arch/parisc/kernel/irq.c +++ b/trunk/arch/parisc/kernel/irq.c @@ -423,3 +423,8 @@ void __init init_IRQ(void) set_eiem(cpu_eiem); /* EIEM : enable all external intr */ } + +void ack_bad_irq(unsigned int irq) +{ + printk(KERN_WARNING "unexpected IRQ %d\n", irq); +} diff --git a/trunk/arch/parisc/kernel/module.c b/trunk/arch/parisc/kernel/module.c index 212074653df7..61ee0eec4e69 100644 --- a/trunk/arch/parisc/kernel/module.c +++ b/trunk/arch/parisc/kernel/module.c @@ -893,7 +893,7 @@ int module_finalize(const Elf_Ehdr *hdr, * ourselves */ for (i = 1; i < hdr->e_shnum; i++) { if(sechdrs[i].sh_type == SHT_SYMTAB - && (sechdrs[i].sh_flags & SHF_ALLOC)) { + && (sechdrs[i].sh_type & SHF_ALLOC)) { int strindex = sechdrs[i].sh_link; /* FIXME: AWFUL HACK * The cast is to drop the const from diff --git a/trunk/arch/parisc/kernel/ptrace.c b/trunk/arch/parisc/kernel/ptrace.c index c4f49e45129d..927db3668b6f 100644 --- a/trunk/arch/parisc/kernel/ptrace.c +++ b/trunk/arch/parisc/kernel/ptrace.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -36,8 +35,7 @@ */ void ptrace_disable(struct task_struct *task) { - clear_tsk_thread_flag(task, TIF_SINGLESTEP); - clear_tsk_thread_flag(task, TIF_BLOCKSTEP); + task->ptrace &= ~(PT_SINGLESTEP|PT_BLOCKSTEP); /* make sure the trap bits are not set */ pa_psw(task)->r = 0; @@ -57,8 +55,8 @@ void user_disable_single_step(struct task_struct *task) void user_enable_single_step(struct task_struct *task) { - clear_tsk_thread_flag(task, TIF_BLOCKSTEP); - set_tsk_thread_flag(task, TIF_SINGLESTEP); + task->ptrace &= ~PT_BLOCKSTEP; + task->ptrace |= PT_SINGLESTEP; if (pa_psw(task)->n) { struct siginfo si; @@ -100,8 +98,8 @@ void user_enable_single_step(struct task_struct *task) void user_enable_block_step(struct task_struct *task) { - clear_tsk_thread_flag(task, TIF_SINGLESTEP); - set_tsk_thread_flag(task, TIF_BLOCKSTEP); + task->ptrace &= ~PT_SINGLESTEP; + task->ptrace |= PT_BLOCKSTEP; /* Enable taken branch trap. */ pa_psw(task)->r = 0; @@ -265,20 +263,22 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, } #endif -long do_syscall_trace_enter(struct pt_regs *regs) -{ - if (test_thread_flag(TIF_SYSCALL_TRACE) && - tracehook_report_syscall_entry(regs)) - return -1L; - - return regs->gr[20]; -} -void do_syscall_trace_exit(struct pt_regs *regs) +void syscall_trace(void) { - int stepping = test_thread_flag(TIF_SINGLESTEP) || - test_thread_flag(TIF_BLOCKSTEP); - - if (stepping || test_thread_flag(TIF_SYSCALL_TRACE)) - tracehook_report_syscall_exit(regs, stepping); + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return; + if (!(current->ptrace & PT_PTRACED)) + return; + ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) + ? 0x80 : 0)); + /* + * this isn't the same as continuing with a signal, but it will do + * for normal use. strace only continues with a signal if the + * stopping signal is not SIGTRAP. -brl + */ + if (current->exit_code) { + send_sig(current->exit_code, current, 1); + current->exit_code = 0; + } } diff --git a/trunk/arch/parisc/kernel/signal.c b/trunk/arch/parisc/kernel/signal.c index e8467e4aa8d1..8eb3c63c407a 100644 --- a/trunk/arch/parisc/kernel/signal.c +++ b/trunk/arch/parisc/kernel/signal.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include @@ -35,6 +34,7 @@ #include #ifdef CONFIG_COMPAT +#include #include "signal32.h" #endif @@ -468,9 +468,6 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, sigaddset(¤t->blocked,sig); recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); - - tracehook_signal_handler(sig, info, ka, regs, 0); - return 1; } diff --git a/trunk/arch/parisc/kernel/syscall.S b/trunk/arch/parisc/kernel/syscall.S index f5f96021caa0..59fc1a43ec3e 100644 --- a/trunk/arch/parisc/kernel/syscall.S +++ b/trunk/arch/parisc/kernel/syscall.S @@ -288,23 +288,18 @@ tracesys: STREG %r18,PT_GR18(%r2) /* Finished saving things for the debugger */ - copy %r2,%r26 - ldil L%do_syscall_trace_enter,%r1 + ldil L%syscall_trace,%r1 ldil L%tracesys_next,%r2 - be R%do_syscall_trace_enter(%sr7,%r1) + be R%syscall_trace(%sr7,%r1) ldo R%tracesys_next(%r2),%r2 -tracesys_next: - /* do_syscall_trace_enter either returned the syscallno, or -1L, - * so we skip restoring the PT_GR20 below, since we pulled it from - * task->thread.regs.gr[20] above. - */ - copy %ret0,%r20 +tracesys_next: ldil L%sys_call_table,%r1 ldo R%sys_call_table(%r1), %r19 ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 + LDREG TASK_PT_GR20(%r1), %r20 LDREG TASK_PT_GR26(%r1), %r26 /* Restore the users args */ LDREG TASK_PT_GR25(%r1), %r25 LDREG TASK_PT_GR24(%r1), %r24 @@ -341,8 +336,7 @@ tracesys_exit: #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - ldo TASK_REGS(%r1),%r26 - bl do_syscall_trace_exit,%r2 + bl syscall_trace, %r2 STREG %r28,TASK_PT_GR28(%r1) /* save return value now */ ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ LDREG TI_TASK(%r1), %r1 @@ -359,12 +353,12 @@ tracesys_exit: tracesys_sigexit: ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */ - LDREG TI_TASK(%r1), %r1 + LDREG 0(%r1), %r1 #ifdef CONFIG_64BIT ldo -16(%r30),%r29 /* Reference param save area */ #endif - bl do_syscall_trace_exit,%r2 - ldo TASK_REGS(%r1),%r26 + bl syscall_trace, %r2 + nop ldil L%syscall_exit_rfi,%r1 be,n R%syscall_exit_rfi(%sr7,%r1) diff --git a/trunk/arch/parisc/kernel/vmlinux.lds.S b/trunk/arch/parisc/kernel/vmlinux.lds.S index fda4baa059b5..775be2791bc2 100644 --- a/trunk/arch/parisc/kernel/vmlinux.lds.S +++ b/trunk/arch/parisc/kernel/vmlinux.lds.S @@ -28,7 +28,6 @@ #include #include #include -#include /* ld script to make hppa Linux kernel */ #ifndef CONFIG_64BIT @@ -135,15 +134,6 @@ SECTIONS __init_begin = .; INIT_TEXT_SECTION(16384) INIT_DATA_SECTION(16) - /* we have to discard exit text and such at runtime, not link time */ - .exit.text : - { - EXIT_TEXT - } - .exit.data : - { - EXIT_DATA - } PERCPU(PAGE_SIZE) . = ALIGN(PAGE_SIZE); diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c index 13b6e3e59b99..d5aca31fddbb 100644 --- a/trunk/arch/parisc/mm/init.c +++ b/trunk/arch/parisc/mm/init.c @@ -434,8 +434,8 @@ void mark_rodata_ro(void) #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \ & ~(VM_MAP_OFFSET-1))) -void *parisc_vmalloc_start __read_mostly; -EXPORT_SYMBOL(parisc_vmalloc_start); +void *vmalloc_start __read_mostly; +EXPORT_SYMBOL(vmalloc_start); #ifdef CONFIG_PA11 unsigned long pcxl_dma_start __read_mostly; @@ -496,14 +496,13 @@ void __init mem_init(void) #ifdef CONFIG_PA11 if (hppa_dma_ops == &pcxl_dma_ops) { pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START); - parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start - + PCXL_DMA_MAP_SIZE); + vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start + PCXL_DMA_MAP_SIZE); } else { pcxl_dma_start = 0; - parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); + vmalloc_start = SET_MAP_OFFSET(MAP_START); } #else - parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); + vmalloc_start = SET_MAP_OFFSET(MAP_START); #endif printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init)\n", diff --git a/trunk/arch/s390/kvm/kvm-s390.h b/trunk/arch/s390/kvm/kvm-s390.h index 06cce8285ba0..ec5eee7c25d8 100644 --- a/trunk/arch/s390/kvm/kvm-s390.h +++ b/trunk/arch/s390/kvm/kvm-s390.h @@ -58,7 +58,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code); int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action); -static inline long kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) +static inline int kvm_s390_vcpu_get_memsize(struct kvm_vcpu *vcpu) { return vcpu->arch.sie_block->gmslm - vcpu->arch.sie_block->gmsor diff --git a/trunk/arch/sparc/Kconfig b/trunk/arch/sparc/Kconfig index 05ef5380a687..ac45aab741a5 100644 --- a/trunk/arch/sparc/Kconfig +++ b/trunk/arch/sparc/Kconfig @@ -26,7 +26,6 @@ config SPARC select RTC_CLASS select RTC_DRV_M48T59 select HAVE_PERF_EVENTS - select PERF_USE_VMALLOC select HAVE_DMA_ATTRS select HAVE_DMA_API_DEBUG @@ -49,7 +48,6 @@ config SPARC64 select RTC_DRV_SUN4V select RTC_DRV_STARFIRE select HAVE_PERF_EVENTS - select PERF_USE_VMALLOC config ARCH_DEFCONFIG string diff --git a/trunk/arch/sparc/include/asm/hardirq_32.h b/trunk/arch/sparc/include/asm/hardirq_32.h index 162007643cdc..4f63ed8df551 100644 --- a/trunk/arch/sparc/include/asm/hardirq_32.h +++ b/trunk/arch/sparc/include/asm/hardirq_32.h @@ -7,7 +7,17 @@ #ifndef __SPARC_HARDIRQ_H #define __SPARC_HARDIRQ_H +#include +#include +#include + +/* entry.S is sensitive to the offsets of these fields */ /* XXX P3 Is it? */ +typedef struct { + unsigned int __softirq_pending; +} ____cacheline_aligned irq_cpustat_t; + +#include /* Standard mappings for irq_cpustat_t above */ + #define HARDIRQ_BITS 8 -#include #endif /* __SPARC_HARDIRQ_H */ diff --git a/trunk/arch/sparc/include/asm/irq_32.h b/trunk/arch/sparc/include/asm/irq_32.h index cbf4801deaaf..ea43057d4763 100644 --- a/trunk/arch/sparc/include/asm/irq_32.h +++ b/trunk/arch/sparc/include/asm/irq_32.h @@ -6,10 +6,10 @@ #ifndef _SPARC_IRQ_H #define _SPARC_IRQ_H -#define NR_IRQS 16 - #include +#define NR_IRQS 16 + #define irq_canonicalize(irq) (irq) extern void __init init_IRQ(void); diff --git a/trunk/arch/sparc/include/asm/pgtable_64.h b/trunk/arch/sparc/include/asm/pgtable_64.h index f3cb790fa2ae..0ff92fa22064 100644 --- a/trunk/arch/sparc/include/asm/pgtable_64.h +++ b/trunk/arch/sparc/include/asm/pgtable_64.h @@ -41,8 +41,8 @@ #define LOW_OBP_ADDRESS _AC(0x00000000f0000000,UL) #define HI_OBP_ADDRESS _AC(0x0000000100000000,UL) #define VMALLOC_START _AC(0x0000000100000000,UL) -#define VMALLOC_END _AC(0x0000010000000000,UL) -#define VMEMMAP_BASE _AC(0x0000010000000000,UL) +#define VMALLOC_END _AC(0x0000000200000000,UL) +#define VMEMMAP_BASE _AC(0x0000000200000000,UL) #define vmemmap ((struct page *)VMEMMAP_BASE) diff --git a/trunk/arch/sparc/kernel/ktlb.S b/trunk/arch/sparc/kernel/ktlb.S index 1d361477d7d6..3ea6e8cde8c5 100644 --- a/trunk/arch/sparc/kernel/ktlb.S +++ b/trunk/arch/sparc/kernel/ktlb.S @@ -280,8 +280,8 @@ kvmap_dtlb_nonlinear: #ifdef CONFIG_SPARSEMEM_VMEMMAP /* Do not use the TSB for vmemmap. */ - mov (VMEMMAP_BASE >> 40), %g5 - sllx %g5, 40, %g5 + mov (VMEMMAP_BASE >> 24), %g5 + sllx %g5, 24, %g5 cmp %g4,%g5 bgeu,pn %xcc, kvmap_vmemmap nop @@ -293,8 +293,8 @@ kvmap_dtlb_tsbmiss: sethi %hi(MODULES_VADDR), %g5 cmp %g4, %g5 blu,pn %xcc, kvmap_dtlb_longpath - mov (VMALLOC_END >> 40), %g5 - sllx %g5, 40, %g5 + mov (VMALLOC_END >> 24), %g5 + sllx %g5, 24, %g5 cmp %g4, %g5 bgeu,pn %xcc, kvmap_dtlb_longpath nop diff --git a/trunk/arch/sparc/kernel/perf_event.c b/trunk/arch/sparc/kernel/perf_event.c index 04db92743896..2d6a1b10c81d 100644 --- a/trunk/arch/sparc/kernel/perf_event.c +++ b/trunk/arch/sparc/kernel/perf_event.c @@ -56,8 +56,7 @@ struct cpu_hw_events { struct perf_event *events[MAX_HWEVENTS]; unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; - u64 pcr; - int enabled; + int enabled; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; @@ -69,30 +68,8 @@ struct perf_event_map { #define PIC_LOWER 0x02 }; -static unsigned long perf_event_encode(const struct perf_event_map *pmap) -{ - return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; -} - -static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) -{ - *msk = val & 0xff; - *enc = val >> 16; -} - -#define C(x) PERF_COUNT_HW_CACHE_##x - -#define CACHE_OP_UNSUPPORTED 0xfffe -#define CACHE_OP_NONSENSE 0xffff - -typedef struct perf_event_map cache_map_t - [PERF_COUNT_HW_CACHE_MAX] - [PERF_COUNT_HW_CACHE_OP_MAX] - [PERF_COUNT_HW_CACHE_RESULT_MAX]; - struct sparc_pmu { const struct perf_event_map *(*event_map)(int); - const cache_map_t *cache_map; int max_events; int upper_shift; int lower_shift; @@ -103,109 +80,21 @@ struct sparc_pmu { int lower_nop; }; -static const struct perf_event_map ultra3_perfmon_event_map[] = { +static const struct perf_event_map ultra3i_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, }; -static const struct perf_event_map *ultra3_event_map(int event_id) +static const struct perf_event_map *ultra3i_event_map(int event_id) { - return &ultra3_perfmon_event_map[event_id]; + return &ultra3i_perfmon_event_map[event_id]; } -static const cache_map_t ultra3_cache_map = { -[C(L1D)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, - [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, - [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(L1I)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, - [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, - [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(LL)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, - [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, - [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(DTLB)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(ITLB)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(BPU)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -}; - -static const struct sparc_pmu ultra3_pmu = { - .event_map = ultra3_event_map, - .cache_map = &ultra3_cache_map, - .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), +static const struct sparc_pmu ultra3i_pmu = { + .event_map = ultra3i_event_map, + .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), .upper_shift = 11, .lower_shift = 4, .event_mask = 0x3f, @@ -213,121 +102,6 @@ static const struct sparc_pmu ultra3_pmu = { .lower_nop = 0x14, }; -/* Niagara1 is very limited. The upper PIC is hard-locked to count - * only instructions, so it is free running which creates all kinds of - * problems. Some hardware designs make one wonder if the creator - * even looked at how this stuff gets used by software. - */ -static const struct perf_event_map niagara1_perfmon_event_map[] = { - [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, - [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, - [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, - [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, -}; - -static const struct perf_event_map *niagara1_event_map(int event_id) -{ - return &niagara1_perfmon_event_map[event_id]; -} - -static const cache_map_t niagara1_cache_map = { -[C(L1D)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(L1I)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, - [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, - [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(LL)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(DTLB)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(ITLB)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(BPU)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -}; - -static const struct sparc_pmu niagara1_pmu = { - .event_map = niagara1_event_map, - .cache_map = &niagara1_cache_map, - .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), - .upper_shift = 0, - .lower_shift = 4, - .event_mask = 0x7, - .upper_nop = 0x0, - .lower_nop = 0x0, -}; - static const struct perf_event_map niagara2_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, @@ -342,96 +116,8 @@ static const struct perf_event_map *niagara2_event_map(int event_id) return &niagara2_perfmon_event_map[event_id]; } -static const cache_map_t niagara2_cache_map = { -[C(L1D)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, - [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, - [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(L1I)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, - [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, - [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(LL)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, - [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, - }, - [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, - [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, - }, - [C(OP_PREFETCH)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(DTLB)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(ITLB)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -[C(BPU)] = { - [C(OP_READ)] = { - [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, - [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_WRITE) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, - [ C(OP_PREFETCH) ] = { - [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, - [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, - }, -}, -}; - static const struct sparc_pmu niagara2_pmu = { .event_map = niagara2_event_map, - .cache_map = &niagara2_cache_map, .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), .upper_shift = 19, .lower_shift = 6, @@ -465,30 +151,23 @@ static u64 nop_for_index(int idx) sparc_pmu->lower_nop, idx); } -static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) +static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, + int idx) { u64 val, mask = mask_for_index(idx); - val = cpuc->pcr; - val &= ~mask; - val |= hwc->config; - cpuc->pcr = val; - - pcr_ops->write(cpuc->pcr); + val = pcr_ops->read(); + pcr_ops->write((val & ~mask) | hwc->config); } -static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) +static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, + int idx) { u64 mask = mask_for_index(idx); u64 nop = nop_for_index(idx); - u64 val; + u64 val = pcr_ops->read(); - val = cpuc->pcr; - val &= ~mask; - val |= nop; - cpuc->pcr = val; - - pcr_ops->write(cpuc->pcr); + pcr_ops->write((val & ~mask) | nop); } void hw_perf_enable(void) @@ -503,7 +182,7 @@ void hw_perf_enable(void) cpuc->enabled = 1; barrier(); - val = cpuc->pcr; + val = pcr_ops->read(); for (i = 0; i < MAX_HWEVENTS; i++) { struct perf_event *cp = cpuc->events[i]; @@ -515,9 +194,7 @@ void hw_perf_enable(void) val |= hwc->config_base; } - cpuc->pcr = val; - - pcr_ops->write(cpuc->pcr); + pcr_ops->write(val); } void hw_perf_disable(void) @@ -530,12 +207,10 @@ void hw_perf_disable(void) cpuc->enabled = 0; - val = cpuc->pcr; + val = pcr_ops->read(); val &= ~(PCR_UTRACE | PCR_STRACE | sparc_pmu->hv_bit | sparc_pmu->irq_bit); - cpuc->pcr = val; - - pcr_ops->write(cpuc->pcr); + pcr_ops->write(val); } static u32 read_pmc(int idx) @@ -567,7 +242,7 @@ static void write_pmc(int idx, u64 val) } static int sparc_perf_event_set_period(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct hw_perf_event *hwc, int idx) { s64 left = atomic64_read(&hwc->period_left); s64 period = hwc->sample_period; @@ -607,19 +282,19 @@ static int sparc_pmu_enable(struct perf_event *event) if (test_and_set_bit(idx, cpuc->used_mask)) return -EAGAIN; - sparc_pmu_disable_event(cpuc, hwc, idx); + sparc_pmu_disable_event(hwc, idx); cpuc->events[idx] = event; set_bit(idx, cpuc->active_mask); sparc_perf_event_set_period(event, hwc, idx); - sparc_pmu_enable_event(cpuc, hwc, idx); + sparc_pmu_enable_event(hwc, idx); perf_event_update_userpage(event); return 0; } static u64 sparc_perf_event_update(struct perf_event *event, - struct hw_perf_event *hwc, int idx) + struct hw_perf_event *hwc, int idx) { int shift = 64 - 32; u64 prev_raw_count, new_raw_count; @@ -649,7 +324,7 @@ static void sparc_pmu_disable(struct perf_event *event) int idx = hwc->idx; clear_bit(idx, cpuc->active_mask); - sparc_pmu_disable_event(cpuc, hwc, idx); + sparc_pmu_disable_event(hwc, idx); barrier(); @@ -663,29 +338,18 @@ static void sparc_pmu_disable(struct perf_event *event) static void sparc_pmu_read(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; - sparc_perf_event_update(event, hwc, hwc->idx); } static void sparc_pmu_unthrottle(struct perf_event *event) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - - sparc_pmu_enable_event(cpuc, hwc, hwc->idx); + sparc_pmu_enable_event(hwc, hwc->idx); } static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmc_grab_mutex); -static void perf_stop_nmi_watchdog(void *unused) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - - stop_nmi_watchdog(NULL); - cpuc->pcr = pcr_ops->read(); -} - void perf_event_grab_pmc(void) { if (atomic_inc_not_zero(&active_events)) @@ -694,7 +358,7 @@ void perf_event_grab_pmc(void) mutex_lock(&pmc_grab_mutex); if (atomic_read(&active_events) == 0) { if (atomic_read(&nmi_active) > 0) { - on_each_cpu(perf_stop_nmi_watchdog, NULL, 1); + on_each_cpu(stop_nmi_watchdog, NULL, 1); BUG_ON(atomic_read(&nmi_active) != 0); } atomic_inc(&active_events); @@ -711,160 +375,30 @@ void perf_event_release_pmc(void) } } -static const struct perf_event_map *sparc_map_cache_event(u64 config) -{ - unsigned int cache_type, cache_op, cache_result; - const struct perf_event_map *pmap; - - if (!sparc_pmu->cache_map) - return ERR_PTR(-ENOENT); - - cache_type = (config >> 0) & 0xff; - if (cache_type >= PERF_COUNT_HW_CACHE_MAX) - return ERR_PTR(-EINVAL); - - cache_op = (config >> 8) & 0xff; - if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) - return ERR_PTR(-EINVAL); - - cache_result = (config >> 16) & 0xff; - if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) - return ERR_PTR(-EINVAL); - - pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); - - if (pmap->encoding == CACHE_OP_UNSUPPORTED) - return ERR_PTR(-ENOENT); - - if (pmap->encoding == CACHE_OP_NONSENSE) - return ERR_PTR(-EINVAL); - - return pmap; -} - static void hw_perf_event_destroy(struct perf_event *event) { perf_event_release_pmc(); } -/* Make sure all events can be scheduled into the hardware at - * the same time. This is simplified by the fact that we only - * need to support 2 simultaneous HW events. - */ -static int sparc_check_constraints(unsigned long *events, int n_ev) -{ - if (n_ev <= perf_max_events) { - u8 msk1, msk2; - u16 dummy; - - if (n_ev == 1) - return 0; - BUG_ON(n_ev != 2); - perf_event_decode(events[0], &dummy, &msk1); - perf_event_decode(events[1], &dummy, &msk2); - - /* If both events can go on any counter, OK. */ - if (msk1 == (PIC_UPPER | PIC_LOWER) && - msk2 == (PIC_UPPER | PIC_LOWER)) - return 0; - - /* If one event is limited to a specific counter, - * and the other can go on both, OK. - */ - if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && - msk2 == (PIC_UPPER | PIC_LOWER)) - return 0; - if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && - msk1 == (PIC_UPPER | PIC_LOWER)) - return 0; - - /* If the events are fixed to different counters, OK. */ - if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || - (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) - return 0; - - /* Otherwise, there is a conflict. */ - } - - return -1; -} - -static int check_excludes(struct perf_event **evts, int n_prev, int n_new) -{ - int eu = 0, ek = 0, eh = 0; - struct perf_event *event; - int i, n, first; - - n = n_prev + n_new; - if (n <= 1) - return 0; - - first = 1; - for (i = 0; i < n; i++) { - event = evts[i]; - if (first) { - eu = event->attr.exclude_user; - ek = event->attr.exclude_kernel; - eh = event->attr.exclude_hv; - first = 0; - } else if (event->attr.exclude_user != eu || - event->attr.exclude_kernel != ek || - event->attr.exclude_hv != eh) { - return -EAGAIN; - } - } - - return 0; -} - -static int collect_events(struct perf_event *group, int max_count, - struct perf_event *evts[], unsigned long *events) -{ - struct perf_event *event; - int n = 0; - - if (!is_software_event(group)) { - if (n >= max_count) - return -1; - evts[n] = group; - events[n++] = group->hw.event_base; - } - list_for_each_entry(event, &group->sibling_list, group_entry) { - if (!is_software_event(event) && - event->state != PERF_EVENT_STATE_OFF) { - if (n >= max_count) - return -1; - evts[n] = event; - events[n++] = event->hw.event_base; - } - } - return n; -} - static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; - struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; - unsigned long events[MAX_HWEVENTS]; const struct perf_event_map *pmap; u64 enc; - int n; if (atomic_read(&nmi_active) < 0) return -ENODEV; - if (attr->type == PERF_TYPE_HARDWARE) { - if (attr->config >= sparc_pmu->max_events) - return -EINVAL; - pmap = sparc_pmu->event_map(attr->config); - } else if (attr->type == PERF_TYPE_HW_CACHE) { - pmap = sparc_map_cache_event(attr->config); - if (IS_ERR(pmap)) - return PTR_ERR(pmap); - } else + if (attr->type != PERF_TYPE_HARDWARE) return -EOPNOTSUPP; + if (attr->config >= sparc_pmu->max_events) + return -EINVAL; + + perf_event_grab_pmc(); + event->destroy = hw_perf_event_destroy; + /* We save the enable bits in the config_base. So to * turn off sampling just write 'config', and to enable * things write 'config | config_base'. @@ -877,39 +411,15 @@ static int __hw_perf_event_init(struct perf_event *event) if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; - hwc->event_base = perf_event_encode(pmap); - - enc = pmap->encoding; - - n = 0; - if (event->group_leader != event) { - n = collect_events(event->group_leader, - perf_max_events - 1, - evts, events); - if (n < 0) - return -EINVAL; - } - events[n] = hwc->event_base; - evts[n] = event; - - if (check_excludes(evts, n, 1)) - return -EINVAL; - - if (sparc_check_constraints(events, n + 1)) - return -EINVAL; - - /* Try to do all error checking before this point, as unwinding - * state after grabbing the PMC is difficult. - */ - perf_event_grab_pmc(); - event->destroy = hw_perf_event_destroy; - if (!hwc->sample_period) { hwc->sample_period = MAX_PERIOD; hwc->last_period = hwc->sample_period; atomic64_set(&hwc->period_left, hwc->sample_period); } + pmap = sparc_pmu->event_map(attr->config); + + enc = pmap->encoding; if (pmap->pic_mask & PIC_UPPER) { hwc->idx = PIC_UPPER_INDEX; enc <<= sparc_pmu->upper_shift; @@ -962,7 +472,7 @@ void perf_event_print_debug(void) } static int __kprobes perf_event_nmi_handler(struct notifier_block *self, - unsigned long cmd, void *__args) + unsigned long cmd, void *__args) { struct die_args *args = __args; struct perf_sample_data data; @@ -1003,7 +513,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, continue; if (perf_event_overflow(event, 1, &data, regs)) - sparc_pmu_disable_event(cpuc, hwc, idx); + sparc_pmu_disable_event(hwc, idx); } return NOTIFY_STOP; @@ -1015,15 +525,8 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = { static bool __init supported_pmu(void) { - if (!strcmp(sparc_pmu_type, "ultra3") || - !strcmp(sparc_pmu_type, "ultra3+") || - !strcmp(sparc_pmu_type, "ultra3i") || - !strcmp(sparc_pmu_type, "ultra4+")) { - sparc_pmu = &ultra3_pmu; - return true; - } - if (!strcmp(sparc_pmu_type, "niagara")) { - sparc_pmu = &niagara1_pmu; + if (!strcmp(sparc_pmu_type, "ultra3i")) { + sparc_pmu = &ultra3i_pmu; return true; } if (!strcmp(sparc_pmu_type, "niagara2")) { diff --git a/trunk/arch/sparc/oprofile/init.c b/trunk/arch/sparc/oprofile/init.c index f9024bccff16..f97cb8b6ee5f 100644 --- a/trunk/arch/sparc/oprofile/init.c +++ b/trunk/arch/sparc/oprofile/init.c @@ -11,7 +11,6 @@ #include #include #include -#include /* for HZ */ #ifdef CONFIG_SPARC64 #include diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index c876bace8fdc..8da93745c087 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -86,6 +86,10 @@ config STACKTRACE_SUPPORT config HAVE_LATENCYTOP_SUPPORT def_bool y +config FAST_CMPXCHG_LOCAL + bool + default y + config MMU def_bool y diff --git a/trunk/arch/x86/Kconfig.cpu b/trunk/arch/x86/Kconfig.cpu index f2824fb8c79c..527519b8a9f9 100644 --- a/trunk/arch/x86/Kconfig.cpu +++ b/trunk/arch/x86/Kconfig.cpu @@ -400,7 +400,7 @@ config X86_TSC config X86_CMPXCHG64 def_bool y - depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM + depends on X86_PAE || X86_64 # this should be set for all -march=.. options where the compiler # generates cmov. @@ -412,7 +412,6 @@ config X86_MINIMUM_CPU_FAMILY int default "64" if X86_64 default "6" if X86_32 && X86_P6_NOP - default "5" if X86_32 && X86_CMPXCHG64 default "4" if X86_32 && (X86_XADD || X86_CMPXCHG || X86_BSWAP || X86_WP_WORKS_OK) default "3" diff --git a/trunk/arch/x86/ia32/ia32entry.S b/trunk/arch/x86/ia32/ia32entry.S index 1733f9f65e82..74619c4f9fda 100644 --- a/trunk/arch/x86/ia32/ia32entry.S +++ b/trunk/arch/x86/ia32/ia32entry.S @@ -21,8 +21,8 @@ #define __AUDIT_ARCH_LE 0x40000000 #ifndef CONFIG_AUDITSYSCALL -#define sysexit_audit ia32_ret_from_sys_call -#define sysretl_audit ia32_ret_from_sys_call +#define sysexit_audit int_ret_from_sys_call +#define sysretl_audit int_ret_from_sys_call #endif #define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8) @@ -39,12 +39,12 @@ .endm /* clobbers %eax */ - .macro CLEAR_RREGS offset=0, _r9=rax + .macro CLEAR_RREGS _r9=rax xorl %eax,%eax - movq %rax,\offset+R11(%rsp) - movq %rax,\offset+R10(%rsp) - movq %\_r9,\offset+R9(%rsp) - movq %rax,\offset+R8(%rsp) + movq %rax,R11(%rsp) + movq %rax,R10(%rsp) + movq %\_r9,R9(%rsp) + movq %rax,R8(%rsp) .endm /* @@ -172,10 +172,6 @@ sysexit_from_sys_call: movl RIP-R11(%rsp),%edx /* User %eip */ CFI_REGISTER rip,rdx RESTORE_ARGS 1,24,1,1,1,1 - xorq %r8,%r8 - xorq %r9,%r9 - xorq %r10,%r10 - xorq %r11,%r11 popfq CFI_ADJUST_CFA_OFFSET -8 /*CFI_RESTORE rflags*/ @@ -206,7 +202,7 @@ sysexit_from_sys_call: .macro auditsys_exit exit,ebpsave=RBP testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10) - jnz ia32_ret_from_sys_call + jnz int_ret_from_sys_call TRACE_IRQS_ON sti movl %eax,%esi /* second arg, syscall return value */ @@ -222,9 +218,8 @@ sysexit_from_sys_call: cli TRACE_IRQS_OFF testl %edi,TI_flags(%r10) - jz \exit - CLEAR_RREGS -ARGOFFSET - jmp int_with_check + jnz int_with_check + jmp \exit .endm sysenter_auditsys: @@ -334,9 +329,6 @@ sysretl_from_sys_call: CFI_REGISTER rip,rcx movl EFLAGS-ARGOFFSET(%rsp),%r11d /*CFI_REGISTER rflags,r11*/ - xorq %r10,%r10 - xorq %r9,%r9 - xorq %r8,%r8 TRACE_IRQS_ON movl RSP-ARGOFFSET(%rsp),%esp CFI_RESTORE rsp @@ -361,7 +353,7 @@ cstar_tracesys: #endif xchgl %r9d,%ebp SAVE_REST - CLEAR_RREGS 0, r9 + CLEAR_RREGS r9 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */ movq %rsp,%rdi /* &pt_regs -> arg1 */ call syscall_trace_enter @@ -433,8 +425,6 @@ ia32_do_call: call *ia32_sys_call_table(,%rax,8) # xxx: rip relative ia32_sysret: movq %rax,RAX-ARGOFFSET(%rsp) -ia32_ret_from_sys_call: - CLEAR_RREGS -ARGOFFSET jmp int_ret_from_sys_call ia32_tracesys: @@ -452,8 +442,8 @@ END(ia32_syscall) ia32_badsys: movq $0,ORIG_RAX-ARGOFFSET(%rsp) - movq $-ENOSYS,%rax - jmp ia32_sysret + movq $-ENOSYS,RAX-ARGOFFSET(%rsp) + jmp int_ret_from_sys_call quiet_ni_syscall: movq $-ENOSYS,%rax diff --git a/trunk/arch/x86/include/asm/kvm_host.h b/trunk/arch/x86/include/asm/kvm_host.h index d83892226f73..3be000435fad 100644 --- a/trunk/arch/x86/include/asm/kvm_host.h +++ b/trunk/arch/x86/include/asm/kvm_host.h @@ -796,7 +796,6 @@ asmlinkage void kvm_handle_fault_on_reboot(void); #define KVM_ARCH_WANT_MMU_NOTIFIER int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); int kvm_age_hva(struct kvm *kvm, unsigned long hva); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); int cpuid_maxphyaddr(struct kvm_vcpu *vcpu); int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu); int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu); diff --git a/trunk/arch/x86/kernel/early_printk.c b/trunk/arch/x86/kernel/early_printk.c index b9c830c12b4a..41fd965c80c6 100644 --- a/trunk/arch/x86/kernel/early_printk.c +++ b/trunk/arch/x86/kernel/early_printk.c @@ -206,11 +206,8 @@ static int __init setup_early_printk(char *buf) while (*buf != '\0') { if (!strncmp(buf, "serial", 6)) { - buf += 6; - early_serial_init(buf); + early_serial_init(buf + 6); early_console_register(&early_serial_console, keep); - if (!strncmp(buf, ",ttyS", 5)) - buf += 5; } if (!strncmp(buf, "ttyS", 4)) { early_serial_init(buf + 4); diff --git a/trunk/arch/x86/kernel/i386_ksyms_32.c b/trunk/arch/x86/kernel/i386_ksyms_32.c index 9c3bd4a2050e..1736c5a725aa 100644 --- a/trunk/arch/x86/kernel/i386_ksyms_32.c +++ b/trunk/arch/x86/kernel/i386_ksyms_32.c @@ -15,10 +15,8 @@ EXPORT_SYMBOL(mcount); * the export, but dont use it from C code, it is used * by assembly code and is not using C calling convention! */ -#ifndef CONFIG_X86_CMPXCHG64 extern void cmpxchg8b_emu(void); EXPORT_SYMBOL(cmpxchg8b_emu); -#endif /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_generic); diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 391206199515..74656d1d4e30 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) __func__, smp_processor_id(), vector, irq); } - run_local_timers(); irq_exit(); set_irq_regs(old_regs); @@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs) if (generic_interrupt_extension) generic_interrupt_extension(); - run_local_timers(); irq_exit(); set_irq_regs(old_regs); diff --git a/trunk/arch/x86/kernel/smp.c b/trunk/arch/x86/kernel/smp.c index d915d956e66d..ec1de97600e7 100644 --- a/trunk/arch/x86/kernel/smp.c +++ b/trunk/arch/x86/kernel/smp.c @@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs) { ack_APIC_irq(); inc_irq_stat(irq_resched_count); - run_local_timers(); /* * KVM uses this interrupt to force a cpu out of guest mode */ diff --git a/trunk/arch/x86/kvm/lapic.c b/trunk/arch/x86/kvm/lapic.c index 7024224f0fc8..1ae5ceba7eb2 100644 --- a/trunk/arch/x86/kvm/lapic.c +++ b/trunk/arch/x86/kvm/lapic.c @@ -664,7 +664,7 @@ static void start_apic_timer(struct kvm_lapic *apic) { ktime_t now = apic->lapic_timer.timer.base->get_time(); - apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) * + apic->lapic_timer.period = apic_get_reg(apic, APIC_TMICT) * APIC_BUS_CYCLE_NS * apic->divide_count; atomic_set(&apic->lapic_timer.pending, 0); diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index 685a4ffac8e6..eca41ae9f453 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -156,8 +156,6 @@ module_param(oos_shadow, bool, 0644); #define CREATE_TRACE_POINTS #include "mmutrace.h" -#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT) - #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) struct kvm_rmap_desc { @@ -636,7 +634,9 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) if (*spte & shadow_accessed_mask) kvm_set_pfn_accessed(pfn); if (is_writeble_pte(*spte)) - kvm_set_pfn_dirty(pfn); + kvm_release_pfn_dirty(pfn); + else + kvm_release_pfn_clean(pfn); rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); if (!*rmapp) { printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); @@ -748,7 +748,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) return write_protected; } -static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) +static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) { u64 *spte; int need_tlb_flush = 0; @@ -763,45 +763,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) return need_tlb_flush; } -static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) -{ - int need_flush = 0; - u64 *spte, new_spte; - pte_t *ptep = (pte_t *)data; - pfn_t new_pfn; - - WARN_ON(pte_huge(*ptep)); - new_pfn = pte_pfn(*ptep); - spte = rmap_next(kvm, rmapp, NULL); - while (spte) { - BUG_ON(!is_shadow_present_pte(*spte)); - rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); - need_flush = 1; - if (pte_write(*ptep)) { - rmap_remove(kvm, spte); - __set_spte(spte, shadow_trap_nonpresent_pte); - spte = rmap_next(kvm, rmapp, NULL); - } else { - new_spte = *spte &~ (PT64_BASE_ADDR_MASK); - new_spte |= (u64)new_pfn << PAGE_SHIFT; - - new_spte &= ~PT_WRITABLE_MASK; - new_spte &= ~SPTE_HOST_WRITEABLE; - if (is_writeble_pte(*spte)) - kvm_set_pfn_dirty(spte_to_pfn(*spte)); - __set_spte(spte, new_spte); - spte = rmap_next(kvm, rmapp, spte); - } - } - if (need_flush) - kvm_flush_remote_tlbs(kvm); - - return 0; -} - -static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data, - int (*handler)(struct kvm *kvm, unsigned long *rmapp, - u64 data)) +static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, + int (*handler)(struct kvm *kvm, unsigned long *rmapp)) { int i, j; int retval = 0; @@ -823,15 +786,13 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data, if (hva >= start && hva < end) { gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; - retval |= handler(kvm, &memslot->rmap[gfn_offset], - data); + retval |= handler(kvm, &memslot->rmap[gfn_offset]); for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) { int idx = gfn_offset; idx /= KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL + j); retval |= handler(kvm, - &memslot->lpage_info[j][idx].rmap_pde, - data); + &memslot->lpage_info[j][idx].rmap_pde); } } } @@ -841,15 +802,10 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, u64 data, int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) { - return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp); + return kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); } -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) -{ - kvm_handle_hva(kvm, hva, (u64)&pte, kvm_set_pte_rmapp); -} - -static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, u64 data) +static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp) { u64 *spte; int young = 0; @@ -885,13 +841,13 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn) gfn = unalias_gfn(vcpu->kvm, gfn); rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level); - kvm_unmap_rmapp(vcpu->kvm, rmapp, 0); + kvm_unmap_rmapp(vcpu->kvm, rmapp); kvm_flush_remote_tlbs(vcpu->kvm); } int kvm_age_hva(struct kvm *kvm, unsigned long hva) { - return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp); + return kvm_handle_hva(kvm, hva, kvm_age_rmapp); } #ifdef MMU_DEBUG @@ -1800,7 +1756,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access, int user_fault, int write_fault, int dirty, int level, gfn_t gfn, pfn_t pfn, bool speculative, - bool can_unsync, bool reset_host_protection) + bool can_unsync) { u64 spte; int ret = 0; @@ -1827,9 +1783,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, kvm_is_mmio_pfn(pfn)); - if (reset_host_protection) - spte |= SPTE_HOST_WRITEABLE; - spte |= (u64)pfn << PAGE_SHIFT; if ((pte_access & ACC_WRITE_MASK) @@ -1875,8 +1828,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pt_access, unsigned pte_access, int user_fault, int write_fault, int dirty, int *ptwrite, int level, gfn_t gfn, - pfn_t pfn, bool speculative, - bool reset_host_protection) + pfn_t pfn, bool speculative) { int was_rmapped = 0; int was_writeble = is_writeble_pte(*sptep); @@ -1908,8 +1860,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, } if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, - dirty, level, gfn, pfn, speculative, true, - reset_host_protection)) { + dirty, level, gfn, pfn, speculative, true)) { if (write_fault) *ptwrite = 1; kvm_x86_ops->tlb_flush(vcpu); @@ -1926,7 +1877,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, page_header_update_slot(vcpu->kvm, sptep, gfn); if (!was_rmapped) { rmap_count = rmap_add(vcpu, sptep, gfn); - kvm_release_pfn_clean(pfn); + if (!is_rmap_spte(*sptep)) + kvm_release_pfn_clean(pfn); if (rmap_count > RMAP_RECYCLE_THRESHOLD) rmap_recycle(vcpu, sptep, gfn); } else { @@ -1957,7 +1909,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, if (iterator.level == level) { mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL, 0, write, 1, &pt_write, - level, gfn, pfn, false, true); + level, gfn, pfn, false); ++vcpu->stat.pf_fixed; break; } diff --git a/trunk/arch/x86/kvm/paging_tmpl.h b/trunk/arch/x86/kvm/paging_tmpl.h index 72558f8ff3f5..d2fec9c12d22 100644 --- a/trunk/arch/x86/kvm/paging_tmpl.h +++ b/trunk/arch/x86/kvm/paging_tmpl.h @@ -273,13 +273,9 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, if (mmu_notifier_retry(vcpu, vcpu->arch.update_pte.mmu_seq)) return; kvm_get_pfn(pfn); - /* - * we call mmu_set_spte() with reset_host_protection = true beacuse that - * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). - */ mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0, gpte & PT_DIRTY_MASK, NULL, PT_PAGE_TABLE_LEVEL, - gpte_to_gfn(gpte), pfn, true, true); + gpte_to_gfn(gpte), pfn, true); } /* @@ -312,7 +308,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, user_fault, write_fault, gw->ptes[gw->level-1] & PT_DIRTY_MASK, ptwrite, level, - gw->gfn, pfn, false, true); + gw->gfn, pfn, false); break; } @@ -562,7 +558,6 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) { int i, offset, nr_present; - bool reset_host_protection; offset = nr_present = 0; @@ -600,16 +595,9 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) nr_present++; pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); - if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) { - pte_access &= ~ACC_WRITE_MASK; - reset_host_protection = 0; - } else { - reset_host_protection = 1; - } set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, - spte_to_pfn(sp->spt[i]), true, false, - reset_host_protection); + spte_to_pfn(sp->spt[i]), true, false); } return !nr_present; diff --git a/trunk/arch/x86/kvm/svm.c b/trunk/arch/x86/kvm/svm.c index c17404add91f..944cc9c04b3c 100644 --- a/trunk/arch/x86/kvm/svm.c +++ b/trunk/arch/x86/kvm/svm.c @@ -767,8 +767,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) rdtscll(tsc_this); delta = vcpu->arch.host_tsc - tsc_this; svm->vmcb->control.tsc_offset += delta; - if (is_nested(svm)) - svm->nested.hsave->control.tsc_offset += delta; vcpu->cpu = cpu; kvm_migrate_timers(vcpu); svm->asid_generation = 0; @@ -2059,14 +2057,10 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) switch (ecx) { case MSR_IA32_TSC: { - u64 tsc_offset; - - if (is_nested(svm)) - tsc_offset = svm->nested.hsave->control.tsc_offset; - else - tsc_offset = svm->vmcb->control.tsc_offset; + u64 tsc; - *data = tsc_offset + native_read_tsc(); + rdtscll(tsc); + *data = svm->vmcb->control.tsc_offset + tsc; break; } case MSR_K6_STAR: @@ -2152,17 +2146,10 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) switch (ecx) { case MSR_IA32_TSC: { - u64 tsc_offset = data - native_read_tsc(); - u64 g_tsc_offset = 0; - - if (is_nested(svm)) { - g_tsc_offset = svm->vmcb->control.tsc_offset - - svm->nested.hsave->control.tsc_offset; - svm->nested.hsave->control.tsc_offset = tsc_offset; - } - - svm->vmcb->control.tsc_offset = tsc_offset + g_tsc_offset; + u64 tsc; + rdtscll(tsc); + svm->vmcb->control.tsc_offset = data - tsc; break; } case MSR_K6_STAR: diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index ed53b42caba1..f3812014bd0b 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -709,7 +709,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (vcpu->cpu != cpu) { vcpu_clear(vmx); kvm_migrate_timers(vcpu); - set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests); + vpid_sync_vcpu_all(vmx); local_irq_disable(); list_add(&vmx->local_vcpus_link, &per_cpu(vcpus_on_cpu, cpu)); diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 9b9695322f56..be451ee44249 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -1591,8 +1591,6 @@ static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, if (cpuid->nent < 1) goto out; - if (cpuid->nent > KVM_MAX_CPUID_ENTRIES) - cpuid->nent = KVM_MAX_CPUID_ENTRIES; r = -ENOMEM; cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent); if (!cpuid_entries) diff --git a/trunk/arch/x86/lib/Makefile b/trunk/arch/x86/lib/Makefile index 85f5db95c60f..3e549b8ec8c9 100644 --- a/trunk/arch/x86/lib/Makefile +++ b/trunk/arch/x86/lib/Makefile @@ -15,10 +15,8 @@ ifeq ($(CONFIG_X86_32),y) obj-y += atomic64_32.o lib-y += checksum_32.o lib-y += strstr_32.o - lib-y += semaphore_32.o string_32.o -ifneq ($(CONFIG_X86_CMPXCHG64),y) - lib-y += cmpxchg8b_emu.o -endif + lib-y += semaphore_32.o string_32.o cmpxchg8b_emu.o + lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o else obj-y += io_64.o iomap_copy_64.o diff --git a/trunk/block/blk-barrier.c b/trunk/block/blk-barrier.c index 8873b9b439ff..6593ab39cfe9 100644 --- a/trunk/block/blk-barrier.c +++ b/trunk/block/blk-barrier.c @@ -350,7 +350,6 @@ static void blkdev_discard_end_io(struct bio *bio, int err) if (bio->bi_private) complete(bio->bi_private); - __free_page(bio_page(bio)); bio_put(bio); } @@ -373,50 +372,30 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, struct request_queue *q = bdev_get_queue(bdev); int type = flags & DISCARD_FL_BARRIER ? DISCARD_BARRIER : DISCARD_NOBARRIER; - struct bio *bio; - struct page *page; int ret = 0; if (!q) return -ENXIO; - if (!blk_queue_discard(q)) + if (!q->prepare_discard_fn) return -EOPNOTSUPP; while (nr_sects && !ret) { - unsigned int sector_size = q->limits.logical_block_size; - unsigned int max_discard_sectors = - min(q->limits.max_discard_sectors, UINT_MAX >> 9); - - bio = bio_alloc(gfp_mask, 1); + struct bio *bio = bio_alloc(gfp_mask, 0); if (!bio) - goto out; - bio->bi_sector = sector; + return -ENOMEM; + bio->bi_end_io = blkdev_discard_end_io; bio->bi_bdev = bdev; if (flags & DISCARD_FL_WAIT) bio->bi_private = &wait; - /* - * Add a zeroed one-sector payload as that's what - * our current implementations need. If we'll ever need - * more the interface will need revisiting. - */ - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - goto out_free_bio; - if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) - goto out_free_page; + bio->bi_sector = sector; - /* - * And override the bio size - the way discard works we - * touch many more blocks on disk than the actual payload - * length. - */ - if (nr_sects > max_discard_sectors) { - bio->bi_size = max_discard_sectors << 9; - nr_sects -= max_discard_sectors; - sector += max_discard_sectors; + if (nr_sects > queue_max_hw_sectors(q)) { + bio->bi_size = queue_max_hw_sectors(q) << 9; + nr_sects -= queue_max_hw_sectors(q); + sector += queue_max_hw_sectors(q); } else { bio->bi_size = nr_sects << 9; nr_sects = 0; @@ -435,11 +414,5 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, bio_put(bio); } return ret; -out_free_page: - __free_page(page); -out_free_bio: - bio_put(bio); -out: - return -ENOMEM; } EXPORT_SYMBOL(blkdev_issue_discard); diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 81f34311659a..8135228e4b29 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -34,7 +34,6 @@ #include "blk.h" EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); -EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); static int __make_request(struct request_queue *q, struct bio *bio); @@ -70,7 +69,7 @@ static void drive_stat_acct(struct request *rq, int new_io) part_stat_inc(cpu, part, merges[rw]); else { part_round_stats(cpu, part); - part_inc_in_flight(part); + part_inc_in_flight(part, rw); } part_stat_unlock(); @@ -1032,7 +1031,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part, if (part->in_flight) { __part_stat_add(cpu, part, time_in_queue, - part->in_flight * (now - part->stamp)); + part_in_flight(part) * (now - part->stamp)); __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); } part->stamp = now; @@ -1125,6 +1124,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) req->cmd_flags |= REQ_DISCARD; if (bio_rw_flagged(bio, BIO_RW_BARRIER)) req->cmd_flags |= REQ_SOFTBARRIER; + req->q->prepare_discard_fn(req->q, req); } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) req->cmd_flags |= REQ_HARDBARRIER; @@ -1437,8 +1437,7 @@ static inline void __generic_make_request(struct bio *bio) goto end_io; } - if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && - nr_sectors > queue_max_hw_sectors(q))) { + if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { printk(KERN_ERR "bio too big device %s (%u > %u)\n", bdevname(bio->bi_bdev, b), bio_sectors(bio), @@ -1471,7 +1470,7 @@ static inline void __generic_make_request(struct bio *bio) goto end_io; if (bio_rw_flagged(bio, BIO_RW_DISCARD) && - !blk_queue_discard(q)) { + !q->prepare_discard_fn) { err = -EOPNOTSUPP; goto end_io; } @@ -1739,7 +1738,7 @@ static void blk_account_io_done(struct request *req) part_stat_inc(cpu, part, ios[rw]); part_stat_add(cpu, part, ticks[rw], duration); part_round_stats(cpu, part); - part_dec_in_flight(part); + part_dec_in_flight(part, rw); part_stat_unlock(); } @@ -2492,14 +2491,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) } EXPORT_SYMBOL(kblockd_schedule_work); -int kblockd_schedule_delayed_work(struct request_queue *q, - struct delayed_work *work, - unsigned long delay) -{ - return queue_delayed_work(kblockd_workqueue, work, delay); -} -EXPORT_SYMBOL(kblockd_schedule_delayed_work); - int __init blk_dev_init(void) { BUILD_BUG_ON(__REQ_NR_BITS > 8 * diff --git a/trunk/block/blk-merge.c b/trunk/block/blk-merge.c index b0de8574fdc8..99cb5cf1f447 100644 --- a/trunk/block/blk-merge.c +++ b/trunk/block/blk-merge.c @@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req) part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); part_round_stats(cpu, part); - part_dec_in_flight(part); + part_dec_in_flight(part, rq_data_dir(req)); part_stat_unlock(); } diff --git a/trunk/block/blk-settings.c b/trunk/block/blk-settings.c index e0695bca7027..83413ff83739 100644 --- a/trunk/block/blk-settings.c +++ b/trunk/block/blk-settings.c @@ -33,6 +33,23 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) } EXPORT_SYMBOL(blk_queue_prep_rq); +/** + * blk_queue_set_discard - set a discard_sectors function for queue + * @q: queue + * @dfn: prepare_discard function + * + * It's possible for a queue to register a discard callback which is used + * to transform a discard request into the appropriate type for the + * hardware. If none is registered, then discard requests are failed + * with %EOPNOTSUPP. + * + */ +void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn) +{ + q->prepare_discard_fn = dfn; +} +EXPORT_SYMBOL(blk_queue_set_discard); + /** * blk_queue_merge_bvec - set a merge_bvec function for queue * @q: queue @@ -94,9 +111,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->max_hw_segments = MAX_HW_SEGMENTS; lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; lim->max_segment_size = MAX_SEGMENT_SIZE; - lim->max_sectors = BLK_DEF_MAX_SECTORS; - lim->max_hw_sectors = INT_MAX; - lim->max_discard_sectors = SAFE_MAX_SECTORS; + lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); lim->alignment_offset = 0; @@ -149,7 +164,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) q->unplug_timer.data = (unsigned long)q; blk_set_default_limits(&q->limits); - blk_queue_max_sectors(q, SAFE_MAX_SECTORS); /* * If the caller didn't supply a lock, fall back to our embedded @@ -239,18 +253,6 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) } EXPORT_SYMBOL(blk_queue_max_hw_sectors); -/** - * blk_queue_max_discard_sectors - set max sectors for a single discard - * @q: the request queue for the device - * @max_discard: maximum number of sectors to discard - **/ -void blk_queue_max_discard_sectors(struct request_queue *q, - unsigned int max_discard_sectors) -{ - q->limits.max_discard_sectors = max_discard_sectors; -} -EXPORT_SYMBOL(blk_queue_max_discard_sectors); - /** * blk_queue_max_phys_segments - set max phys segments for a request for this queue * @q: the request queue for the device diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c index 8a6d81afb284..b78c9c3e2670 100644 --- a/trunk/block/blk-sysfs.c +++ b/trunk/block/blk-sysfs.c @@ -452,7 +452,6 @@ int blk_register_queue(struct gendisk *disk) if (ret) { kobject_uevent(&q->kobj, KOBJ_REMOVE); kobject_del(&q->kobj); - blk_trace_remove_sysfs(disk_to_dev(disk)); return ret; } @@ -466,11 +465,11 @@ void blk_unregister_queue(struct gendisk *disk) if (WARN_ON(!q)) return; - if (q->request_fn) + if (q->request_fn) { elv_unregister_queue(q); - kobject_uevent(&q->kobj, KOBJ_REMOVE); - kobject_del(&q->kobj); - blk_trace_remove_sysfs(disk_to_dev(disk)); - kobject_put(&disk_to_dev(disk)->kobj); + kobject_uevent(&q->kobj, KOBJ_REMOVE); + kobject_del(&q->kobj); + kobject_put(&disk_to_dev(disk)->kobj); + } } diff --git a/trunk/block/cfq-iosched.c b/trunk/block/cfq-iosched.c index 9c4b679908f4..1ca813b16e78 100644 --- a/trunk/block/cfq-iosched.c +++ b/trunk/block/cfq-iosched.c @@ -150,7 +150,7 @@ struct cfq_data { * idle window management */ struct timer_list idle_slice_timer; - struct delayed_work unplug_work; + struct work_struct unplug_work; struct cfq_queue *active_queue; struct cfq_io_context *active_cic; @@ -173,7 +173,6 @@ struct cfq_data { unsigned int cfq_slice[2]; unsigned int cfq_slice_async_rq; unsigned int cfq_slice_idle; - unsigned int cfq_latency; struct list_head cic_list; @@ -181,8 +180,6 @@ struct cfq_data { * Fallback dummy cfqq for extreme OOM conditions */ struct cfq_queue oom_cfqq; - - unsigned long last_end_sync_rq; }; enum cfqq_state_flags { @@ -268,13 +265,11 @@ static inline int cfq_bio_sync(struct bio *bio) * scheduler run of queue, if there are requests pending and no one in the * driver that will restart queueing */ -static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, - unsigned long delay) +static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) { if (cfqd->busy_queues) { cfq_log(cfqd, "schedule dispatch"); - kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, - delay); + kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); } } @@ -1331,30 +1326,12 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) return 0; /* - * Sole queue user, allow bigger slice + * we are the only queue, allow up to 4 times of 'quantum' */ - max_dispatch *= 4; - } - - /* - * Async queues must wait a bit before being allowed dispatch. - * We also ramp up the dispatch depth gradually for async IO, - * based on the last sync IO we serviced - */ - if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { - unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; - unsigned int depth; - - depth = last_sync / cfqd->cfq_slice[1]; - if (!depth && !cfqq->dispatched) - depth = 1; - if (depth < max_dispatch) - max_dispatch = depth; + if (cfqq->dispatched >= 4 * max_dispatch) + return 0; } - if (cfqq->dispatched >= max_dispatch) - return 0; - /* * Dispatch a request from this cfqq */ @@ -1399,7 +1376,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) if (unlikely(cfqd->active_queue == cfqq)) { __cfq_slice_expired(cfqd, cfqq, 0); - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); } kmem_cache_free(cfq_pool, cfqq); @@ -1494,7 +1471,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) { if (unlikely(cfqq == cfqd->active_queue)) { __cfq_slice_expired(cfqd, cfqq, 0); - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); } cfq_put_queue(cfqq); @@ -1974,7 +1951,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || - (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) + (cfqd->hw_tag && CIC_SEEKY(cic))) enable_idle = 0; else if (sample_valid(cic->ttime_samples)) { if (cic->ttime_mean > cfqd->cfq_slice_idle) @@ -2180,10 +2157,8 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) if (cfq_cfqq_sync(cfqq)) cfqd->sync_flight--; - if (sync) { + if (sync) RQ_CIC(rq)->last_end_request = now; - cfqd->last_end_sync_rq = now; - } /* * If this is the active queue, check if it needs to be expired, @@ -2211,7 +2186,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) } if (!rq_in_driver(cfqd)) - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); } /* @@ -2341,7 +2316,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) if (cic) put_io_context(cic->ioc); - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); spin_unlock_irqrestore(q->queue_lock, flags); cfq_log(cfqd, "set_request fail"); return 1; @@ -2350,7 +2325,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) static void cfq_kick_queue(struct work_struct *work) { struct cfq_data *cfqd = - container_of(work, struct cfq_data, unplug_work.work); + container_of(work, struct cfq_data, unplug_work); struct request_queue *q = cfqd->queue; spin_lock_irq(q->queue_lock); @@ -2404,7 +2379,7 @@ static void cfq_idle_slice_timer(unsigned long data) expire: cfq_slice_expired(cfqd, timed_out); out_kick: - cfq_schedule_dispatch(cfqd, 0); + cfq_schedule_dispatch(cfqd); out_cont: spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); } @@ -2412,7 +2387,7 @@ static void cfq_idle_slice_timer(unsigned long data) static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) { del_timer_sync(&cfqd->idle_slice_timer); - cancel_delayed_work_sync(&cfqd->unplug_work); + cancel_work_sync(&cfqd->unplug_work); } static void cfq_put_async_queues(struct cfq_data *cfqd) @@ -2494,7 +2469,7 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->idle_slice_timer.function = cfq_idle_slice_timer; cfqd->idle_slice_timer.data = (unsigned long) cfqd; - INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); cfqd->cfq_quantum = cfq_quantum; cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; @@ -2505,9 +2480,8 @@ static void *cfq_init_queue(struct request_queue *q) cfqd->cfq_slice[1] = cfq_slice_sync; cfqd->cfq_slice_async_rq = cfq_slice_async_rq; cfqd->cfq_slice_idle = cfq_slice_idle; - cfqd->cfq_latency = 1; cfqd->hw_tag = 1; - cfqd->last_end_sync_rq = jiffies; + return cfqd; } @@ -2575,7 +2549,6 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); -SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -2607,7 +2580,6 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); -STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); #undef STORE_FUNCTION #define CFQ_ATTR(name) \ @@ -2623,7 +2595,6 @@ static struct elv_fs_entry cfq_attrs[] = { CFQ_ATTR(slice_async), CFQ_ATTR(slice_async_rq), CFQ_ATTR(slice_idle), - CFQ_ATTR(low_latency), __ATTR_NULL }; diff --git a/trunk/block/compat_ioctl.c b/trunk/block/compat_ioctl.c index 9bd086c1a4d5..7865a34e0faa 100644 --- a/trunk/block/compat_ioctl.c +++ b/trunk/block/compat_ioctl.c @@ -21,11 +21,6 @@ static int compat_put_int(unsigned long arg, int val) return put_user(val, (compat_int_t __user *)compat_ptr(arg)); } -static int compat_put_uint(unsigned long arg, unsigned int val) -{ - return put_user(val, (compat_uint_t __user *)compat_ptr(arg)); -} - static int compat_put_long(unsigned long arg, long val) { return put_user(val, (compat_long_t __user *)compat_ptr(arg)); @@ -739,14 +734,6 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) switch (cmd) { case HDIO_GETGEO: return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); - case BLKPBSZGET: - return compat_put_uint(arg, bdev_physical_block_size(bdev)); - case BLKIOMIN: - return compat_put_uint(arg, bdev_io_min(bdev)); - case BLKIOOPT: - return compat_put_uint(arg, bdev_io_opt(bdev)); - case BLKALIGNOFF: - return compat_put_int(arg, bdev_alignment_offset(bdev)); case BLKFLSBUF: case BLKROSET: case BLKDISCARD: diff --git a/trunk/block/genhd.c b/trunk/block/genhd.c index 5a0861da324d..517e4332cb37 100644 --- a/trunk/block/genhd.c +++ b/trunk/block/genhd.c @@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); +static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); @@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_alignment_offset.attr, &dev_attr_capability.attr, &dev_attr_stat.attr, + &dev_attr_inflight.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif @@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) part_stat_read(hd, merges[1]), (unsigned long long)part_stat_read(hd, sectors[1]), jiffies_to_msecs(part_stat_read(hd, ticks[1])), - hd->in_flight, + part_in_flight(hd), jiffies_to_msecs(part_stat_read(hd, io_ticks)), jiffies_to_msecs(part_stat_read(hd, time_in_queue)) ); diff --git a/trunk/block/ioctl.c b/trunk/block/ioctl.c index 1f4d1de12b09..d3e6b5827a34 100644 --- a/trunk/block/ioctl.c +++ b/trunk/block/ioctl.c @@ -138,11 +138,6 @@ static int put_int(unsigned long arg, int val) return put_user(val, (int __user *)arg); } -static int put_uint(unsigned long arg, unsigned int val) -{ - return put_user(val, (unsigned int __user *)arg); -} - static int put_long(unsigned long arg, long val) { return put_user(val, (long __user *)arg); @@ -268,18 +263,10 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); case BLKROGET: return put_int(arg, bdev_read_only(bdev) != 0); - case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ + case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ return put_int(arg, block_size(bdev)); - case BLKSSZGET: /* get block device logical block size */ + case BLKSSZGET: /* get block device hardware sector size */ return put_int(arg, bdev_logical_block_size(bdev)); - case BLKPBSZGET: /* get block device physical block size */ - return put_uint(arg, bdev_physical_block_size(bdev)); - case BLKIOMIN: - return put_uint(arg, bdev_io_min(bdev)); - case BLKIOOPT: - return put_uint(arg, bdev_io_opt(bdev)); - case BLKALIGNOFF: - return put_int(arg, bdev_alignment_offset(bdev)); case BLKSECTGET: return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); case BLKRASET: diff --git a/trunk/drivers/acpi/Kconfig b/trunk/drivers/acpi/Kconfig index 0ed42d8870c7..dd8729d674e5 100644 --- a/trunk/drivers/acpi/Kconfig +++ b/trunk/drivers/acpi/Kconfig @@ -211,18 +211,6 @@ config ACPI_HOTPLUG_CPU select ACPI_CONTAINER default y -config ACPI_PROCESSOR_AGGREGATOR - tristate "Processor Aggregator" - depends on ACPI_PROCESSOR - depends on EXPERIMENTAL - depends on X86 - help - ACPI 4.0 defines processor Aggregator, which enables OS to perform - specfic processor configuration and control that applies to all - processors in the platform. Currently only logical processor idling - is defined, which is to reduce power consumption. This driver - support the new device. - config ACPI_THERMAL tristate "Thermal Zone" depends on ACPI_PROCESSOR diff --git a/trunk/drivers/acpi/Makefile b/trunk/drivers/acpi/Makefile index 7702118509a0..82cd49dc603b 100644 --- a/trunk/drivers/acpi/Makefile +++ b/trunk/drivers/acpi/Makefile @@ -62,5 +62,3 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o processor-y := processor_core.o processor_throttling.o processor-y += processor_idle.o processor_thermal.o processor-$(CONFIG_CPU_FREQ) += processor_perflib.o - -obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o diff --git a/trunk/drivers/acpi/acpi_pad.c b/trunk/drivers/acpi/acpi_pad.c deleted file mode 100644 index 0d2cdb86158b..000000000000 --- a/trunk/drivers/acpi/acpi_pad.c +++ /dev/null @@ -1,514 +0,0 @@ -/* - * acpi_pad.c ACPI Processor Aggregator Driver - * - * Copyright (c) 2009, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator" -#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" -#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 -static DEFINE_MUTEX(isolated_cpus_lock); - -#define MWAIT_SUBSTATE_MASK (0xf) -#define MWAIT_CSTATE_MASK (0xf) -#define MWAIT_SUBSTATE_SIZE (4) -#define CPUID_MWAIT_LEAF (5) -#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) -#define CPUID5_ECX_INTERRUPT_BREAK (0x2) -static unsigned long power_saving_mwait_eax; -static void power_saving_mwait_init(void) -{ - unsigned int eax, ebx, ecx, edx; - unsigned int highest_cstate = 0; - unsigned int highest_subcstate = 0; - int i; - - if (!boot_cpu_has(X86_FEATURE_MWAIT)) - return; - if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF) - return; - - cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx); - - if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) || - !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) - return; - - edx >>= MWAIT_SUBSTATE_SIZE; - for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { - if (edx & MWAIT_SUBSTATE_MASK) { - highest_cstate = i; - highest_subcstate = edx & MWAIT_SUBSTATE_MASK; - } - } - power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | - (highest_subcstate - 1); - - for_each_online_cpu(i) - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i); - -#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86) - switch (boot_cpu_data.x86_vendor) { - case X86_VENDOR_AMD: - case X86_VENDOR_INTEL: - /* - * AMD Fam10h TSC will tick in all - * C/P/S0/S1 states when this bit is set. - */ - if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) - return; - - /*FALL THROUGH*/ - default: - /* TSC could halt in idle, so notify users */ - mark_tsc_unstable("TSC halts in idle"); - } -#endif -} - -static unsigned long cpu_weight[NR_CPUS]; -static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1}; -static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS); -static void round_robin_cpu(unsigned int tsk_index) -{ - struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); - cpumask_var_t tmp; - int cpu; - unsigned long min_weight = -1, preferred_cpu; - - if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) - return; - - mutex_lock(&isolated_cpus_lock); - cpumask_clear(tmp); - for_each_cpu(cpu, pad_busy_cpus) - cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); - cpumask_andnot(tmp, cpu_online_mask, tmp); - /* avoid HT sibilings if possible */ - if (cpumask_empty(tmp)) - cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); - if (cpumask_empty(tmp)) { - mutex_unlock(&isolated_cpus_lock); - return; - } - for_each_cpu(cpu, tmp) { - if (cpu_weight[cpu] < min_weight) { - min_weight = cpu_weight[cpu]; - preferred_cpu = cpu; - } - } - - if (tsk_in_cpu[tsk_index] != -1) - cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); - tsk_in_cpu[tsk_index] = preferred_cpu; - cpumask_set_cpu(preferred_cpu, pad_busy_cpus); - cpu_weight[preferred_cpu]++; - mutex_unlock(&isolated_cpus_lock); - - set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); -} - -static void exit_round_robin(unsigned int tsk_index) -{ - struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits); - cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus); - tsk_in_cpu[tsk_index] = -1; -} - -static unsigned int idle_pct = 5; /* percentage */ -static unsigned int round_robin_time = 10; /* second */ -static int power_saving_thread(void *data) -{ - struct sched_param param = {.sched_priority = 1}; - int do_sleep; - unsigned int tsk_index = (unsigned long)data; - u64 last_jiffies = 0; - - sched_setscheduler(current, SCHED_RR, ¶m); - - while (!kthread_should_stop()) { - int cpu; - u64 expire_time; - - try_to_freeze(); - - /* round robin to cpus */ - if (last_jiffies + round_robin_time * HZ < jiffies) { - last_jiffies = jiffies; - round_robin_cpu(tsk_index); - } - - do_sleep = 0; - - current_thread_info()->status &= ~TS_POLLING; - /* - * TS_POLLING-cleared state must be visible before we test - * NEED_RESCHED: - */ - smp_mb(); - - expire_time = jiffies + HZ * (100 - idle_pct) / 100; - - while (!need_resched()) { - local_irq_disable(); - cpu = smp_processor_id(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, - &cpu); - stop_critical_timings(); - - __monitor((void *)¤t_thread_info()->flags, 0, 0); - smp_mb(); - if (!need_resched()) - __mwait(power_saving_mwait_eax, 1); - - start_critical_timings(); - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, - &cpu); - local_irq_enable(); - - if (jiffies > expire_time) { - do_sleep = 1; - break; - } - } - - current_thread_info()->status |= TS_POLLING; - - /* - * current sched_rt has threshold for rt task running time. - * When a rt task uses 95% CPU time, the rt thread will be - * scheduled out for 5% CPU time to not starve other tasks. But - * the mechanism only works when all CPUs have RT task running, - * as if one CPU hasn't RT task, RT task from other CPUs will - * borrow CPU time from this CPU and cause RT task use > 95% - * CPU time. To make 'avoid staration' work, takes a nap here. - */ - if (do_sleep) - schedule_timeout_killable(HZ * idle_pct / 100); - } - - exit_round_robin(tsk_index); - return 0; -} - -static struct task_struct *ps_tsks[NR_CPUS]; -static unsigned int ps_tsk_num; -static int create_power_saving_task(void) -{ - ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, - (void *)(unsigned long)ps_tsk_num, - "power_saving/%d", ps_tsk_num); - if (ps_tsks[ps_tsk_num]) { - ps_tsk_num++; - return 0; - } - return -EINVAL; -} - -static void destroy_power_saving_task(void) -{ - if (ps_tsk_num > 0) { - ps_tsk_num--; - kthread_stop(ps_tsks[ps_tsk_num]); - } -} - -static void set_power_saving_task_num(unsigned int num) -{ - if (num > ps_tsk_num) { - while (ps_tsk_num < num) { - if (create_power_saving_task()) - return; - } - } else if (num < ps_tsk_num) { - while (ps_tsk_num > num) - destroy_power_saving_task(); - } -} - -static int acpi_pad_idle_cpus(unsigned int num_cpus) -{ - get_online_cpus(); - - num_cpus = min_t(unsigned int, num_cpus, num_online_cpus()); - set_power_saving_task_num(num_cpus); - - put_online_cpus(); - return 0; -} - -static uint32_t acpi_pad_idle_cpus_num(void) -{ - return ps_tsk_num; -} - -static ssize_t acpi_pad_rrtime_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - unsigned long num; - if (strict_strtoul(buf, 0, &num)) - return -EINVAL; - if (num < 1 || num >= 100) - return -EINVAL; - mutex_lock(&isolated_cpus_lock); - round_robin_time = num; - mutex_unlock(&isolated_cpus_lock); - return count; -} - -static ssize_t acpi_pad_rrtime_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time); -} -static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR, - acpi_pad_rrtime_show, - acpi_pad_rrtime_store); - -static ssize_t acpi_pad_idlepct_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - unsigned long num; - if (strict_strtoul(buf, 0, &num)) - return -EINVAL; - if (num < 1 || num >= 100) - return -EINVAL; - mutex_lock(&isolated_cpus_lock); - idle_pct = num; - mutex_unlock(&isolated_cpus_lock); - return count; -} - -static ssize_t acpi_pad_idlepct_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return scnprintf(buf, PAGE_SIZE, "%d", idle_pct); -} -static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR, - acpi_pad_idlepct_show, - acpi_pad_idlepct_store); - -static ssize_t acpi_pad_idlecpus_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - unsigned long num; - if (strict_strtoul(buf, 0, &num)) - return -EINVAL; - mutex_lock(&isolated_cpus_lock); - acpi_pad_idle_cpus(num); - mutex_unlock(&isolated_cpus_lock); - return count; -} - -static ssize_t acpi_pad_idlecpus_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return cpumask_scnprintf(buf, PAGE_SIZE, - to_cpumask(pad_busy_cpus_bits)); -} -static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR, - acpi_pad_idlecpus_show, - acpi_pad_idlecpus_store); - -static int acpi_pad_add_sysfs(struct acpi_device *device) -{ - int result; - - result = device_create_file(&device->dev, &dev_attr_idlecpus); - if (result) - return -ENODEV; - result = device_create_file(&device->dev, &dev_attr_idlepct); - if (result) { - device_remove_file(&device->dev, &dev_attr_idlecpus); - return -ENODEV; - } - result = device_create_file(&device->dev, &dev_attr_rrtime); - if (result) { - device_remove_file(&device->dev, &dev_attr_idlecpus); - device_remove_file(&device->dev, &dev_attr_idlepct); - return -ENODEV; - } - return 0; -} - -static void acpi_pad_remove_sysfs(struct acpi_device *device) -{ - device_remove_file(&device->dev, &dev_attr_idlecpus); - device_remove_file(&device->dev, &dev_attr_idlepct); - device_remove_file(&device->dev, &dev_attr_rrtime); -} - -/* Query firmware how many CPUs should be idle */ -static int acpi_pad_pur(acpi_handle handle, int *num_cpus) -{ - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - acpi_status status; - union acpi_object *package; - int rev, num, ret = -EINVAL; - - status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer); - if (ACPI_FAILURE(status)) - return -EINVAL; - package = buffer.pointer; - if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) - goto out; - rev = package->package.elements[0].integer.value; - num = package->package.elements[1].integer.value; - if (rev != 1) - goto out; - *num_cpus = num; - ret = 0; -out: - kfree(buffer.pointer); - return ret; -} - -/* Notify firmware how many CPUs are idle */ -static void acpi_pad_ost(acpi_handle handle, int stat, - uint32_t idle_cpus) -{ - union acpi_object params[3] = { - {.type = ACPI_TYPE_INTEGER,}, - {.type = ACPI_TYPE_INTEGER,}, - {.type = ACPI_TYPE_BUFFER,}, - }; - struct acpi_object_list arg_list = {3, params}; - - params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY; - params[1].integer.value = stat; - params[2].buffer.length = 4; - params[2].buffer.pointer = (void *)&idle_cpus; - acpi_evaluate_object(handle, "_OST", &arg_list, NULL); -} - -static void acpi_pad_handle_notify(acpi_handle handle) -{ - int num_cpus, ret; - uint32_t idle_cpus; - - mutex_lock(&isolated_cpus_lock); - if (acpi_pad_pur(handle, &num_cpus)) { - mutex_unlock(&isolated_cpus_lock); - return; - } - ret = acpi_pad_idle_cpus(num_cpus); - idle_cpus = acpi_pad_idle_cpus_num(); - if (!ret) - acpi_pad_ost(handle, 0, idle_cpus); - else - acpi_pad_ost(handle, 1, 0); - mutex_unlock(&isolated_cpus_lock); -} - -static void acpi_pad_notify(acpi_handle handle, u32 event, - void *data) -{ - struct acpi_device *device = data; - - switch (event) { - case ACPI_PROCESSOR_AGGREGATOR_NOTIFY: - acpi_pad_handle_notify(handle); - acpi_bus_generate_proc_event(device, event, 0); - acpi_bus_generate_netlink_event(device->pnp.device_class, - dev_name(&device->dev), event, 0); - break; - default: - printk(KERN_WARNING"Unsupported event [0x%x]\n", event); - break; - } -} - -static int acpi_pad_add(struct acpi_device *device) -{ - acpi_status status; - - strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME); - strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS); - - if (acpi_pad_add_sysfs(device)) - return -ENODEV; - - status = acpi_install_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, acpi_pad_notify, device); - if (ACPI_FAILURE(status)) { - acpi_pad_remove_sysfs(device); - return -ENODEV; - } - - return 0; -} - -static int acpi_pad_remove(struct acpi_device *device, - int type) -{ - mutex_lock(&isolated_cpus_lock); - acpi_pad_idle_cpus(0); - mutex_unlock(&isolated_cpus_lock); - - acpi_remove_notify_handler(device->handle, - ACPI_DEVICE_NOTIFY, acpi_pad_notify); - acpi_pad_remove_sysfs(device); - return 0; -} - -static const struct acpi_device_id pad_device_ids[] = { - {"ACPI000C", 0}, - {"", 0}, -}; -MODULE_DEVICE_TABLE(acpi, pad_device_ids); - -static struct acpi_driver acpi_pad_driver = { - .name = "processor_aggregator", - .class = ACPI_PROCESSOR_AGGREGATOR_CLASS, - .ids = pad_device_ids, - .ops = { - .add = acpi_pad_add, - .remove = acpi_pad_remove, - }, -}; - -static int __init acpi_pad_init(void) -{ - power_saving_mwait_init(); - if (power_saving_mwait_eax == 0) - return -EINVAL; - - return acpi_bus_register_driver(&acpi_pad_driver); -} - -static void __exit acpi_pad_exit(void) -{ - acpi_bus_unregister_driver(&acpi_pad_driver); -} - -module_init(acpi_pad_init); -module_exit(acpi_pad_exit); -MODULE_AUTHOR("Shaohua Li"); -MODULE_DESCRIPTION("ACPI Processor Aggregator Driver"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/acpi/dock.c b/trunk/drivers/acpi/dock.c index 7338b6a3e049..3a2cfefc71ab 100644 --- a/trunk/drivers/acpi/dock.c +++ b/trunk/drivers/acpi/dock.c @@ -67,7 +67,7 @@ struct dock_station { struct list_head dependent_devices; struct list_head hotplug_devices; - struct list_head sibling; + struct list_head sibiling; struct platform_device *dock_device; }; static LIST_HEAD(dock_stations); @@ -275,7 +275,7 @@ int is_dock_device(acpi_handle handle) if (is_dock(handle)) return 1; - list_for_each_entry(dock_station, &dock_stations, sibling) { + list_for_each_entry(dock_station, &dock_stations, sibiling) { if (find_dock_dependent_device(dock_station, handle)) return 1; } @@ -619,7 +619,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, * make sure this handle is for a device dependent on the dock, * this would include the dock station itself */ - list_for_each_entry(dock_station, &dock_stations, sibling) { + list_for_each_entry(dock_station, &dock_stations, sibiling) { /* * An ATA bay can be in a dock and itself can be ejected * seperately, so there are two 'dock stations' which need the @@ -651,7 +651,7 @@ void unregister_hotplug_dock_device(acpi_handle handle) if (!dock_station_count) return; - list_for_each_entry(dock_station, &dock_stations, sibling) { + list_for_each_entry(dock_station, &dock_stations, sibiling) { dd = find_dock_dependent_device(dock_station, handle); if (dd) dock_del_hotplug_device(dock_station, dd); @@ -787,7 +787,7 @@ static int acpi_dock_notifier_call(struct notifier_block *this, if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK && event != ACPI_NOTIFY_EJECT_REQUEST) return 0; - list_for_each_entry(dock_station, &dock_stations, sibling) { + list_for_each_entry(dock_station, &dock_stations, sibiling) { if (dock_station->handle == handle) { struct dock_data *dock_data; @@ -958,7 +958,7 @@ static int dock_add(acpi_handle handle) dock_station->last_dock_time = jiffies - HZ; INIT_LIST_HEAD(&dock_station->dependent_devices); INIT_LIST_HEAD(&dock_station->hotplug_devices); - INIT_LIST_HEAD(&dock_station->sibling); + INIT_LIST_HEAD(&dock_station->sibiling); spin_lock_init(&dock_station->dd_lock); mutex_init(&dock_station->hp_lock); ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); @@ -1044,7 +1044,7 @@ static int dock_add(acpi_handle handle) add_dock_dependent_device(dock_station, dd); dock_station_count++; - list_add(&dock_station->sibling, &dock_stations); + list_add(&dock_station->sibiling, &dock_stations); return 0; dock_add_err_unregister: @@ -1149,7 +1149,7 @@ static void __exit dock_exit(void) struct dock_station *tmp; unregister_acpi_bus_notifier(&dock_acpi_notifier); - list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling) + list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) dock_remove(dock_station); } diff --git a/trunk/drivers/acpi/ec.c b/trunk/drivers/acpi/ec.c index baef28c1e630..f70796081c4c 100644 --- a/trunk/drivers/acpi/ec.c +++ b/trunk/drivers/acpi/ec.c @@ -119,8 +119,6 @@ static struct acpi_ec { } *boot_ec, *first_ec; static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ -static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ -static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ /* -------------------------------------------------------------------------- Transaction Management @@ -234,8 +232,10 @@ static int ec_poll(struct acpi_ec *ec) } advance_transaction(ec, acpi_ec_read_status(ec)); } while (time_before(jiffies, delay)); - if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) + if (!ec->curr->irq_count || + (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)) break; + /* try restart command if we get any false interrupts */ pr_debug(PREFIX "controller reset, restart transaction\n"); spin_lock_irqsave(&ec->curr_lock, flags); start_transaction(ec); @@ -899,44 +899,6 @@ static const struct acpi_device_id ec_device_ids[] = { {"", 0}, }; -/* Some BIOS do not survive early DSDT scan, skip it */ -static int ec_skip_dsdt_scan(const struct dmi_system_id *id) -{ - EC_FLAGS_SKIP_DSDT_SCAN = 1; - return 0; -} - -/* ASUStek often supplies us with broken ECDT, validate it */ -static int ec_validate_ecdt(const struct dmi_system_id *id) -{ - EC_FLAGS_VALIDATE_ECDT = 1; - return 0; -} - -/* MSI EC needs special treatment, enable it */ -static int ec_flag_msi(const struct dmi_system_id *id) -{ - EC_FLAGS_MSI = 1; - EC_FLAGS_VALIDATE_ECDT = 1; - return 0; -} - -static struct dmi_system_id __initdata ec_dmi_table[] = { - { - ec_skip_dsdt_scan, "Compal JFL92", { - DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), - DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, - { - ec_flag_msi, "MSI hardware", { - DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"), - DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL}, - { - ec_validate_ecdt, "ASUS hardware", { - DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, - {}, -}; - - int __init acpi_ec_ecdt_probe(void) { acpi_status status; @@ -949,7 +911,11 @@ int __init acpi_ec_ecdt_probe(void) /* * Generate a boot ec context */ - dmi_check_system(ec_dmi_table); + if (dmi_name_in_vendors("Micro-Star") || + dmi_name_in_vendors("Notebook")) { + pr_info(PREFIX "Enabling special treatment for EC from MSI.\n"); + EC_FLAGS_MSI = 1; + } status = acpi_get_table(ACPI_SIG_ECDT, 1, (struct acpi_table_header **)&ecdt_ptr); if (ACPI_SUCCESS(status)) { @@ -960,7 +926,7 @@ int __init acpi_ec_ecdt_probe(void) boot_ec->handle = ACPI_ROOT_OBJECT; acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); /* Don't trust ECDT, which comes from ASUSTek */ - if (!EC_FLAGS_VALIDATE_ECDT) + if (!dmi_name_in_vendors("ASUS") && EC_FLAGS_MSI == 0) goto install; saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); if (!saved_ec) @@ -968,10 +934,6 @@ int __init acpi_ec_ecdt_probe(void) memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); /* fall through */ } - - if (EC_FLAGS_SKIP_DSDT_SCAN) - return -ENODEV; - /* This workaround is needed only on some broken machines, * which require early EC, but fail to provide ECDT */ printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); diff --git a/trunk/drivers/acpi/proc.c b/trunk/drivers/acpi/proc.c index f8b6f555ba52..d0d550d22a6d 100644 --- a/trunk/drivers/acpi/proc.c +++ b/trunk/drivers/acpi/proc.c @@ -398,8 +398,6 @@ acpi_system_write_wakeup_device(struct file *file, if (len > 4) len = 4; - if (len < 0) - return -EFAULT; if (copy_from_user(strbuf, buffer, len)) return -EFAULT; diff --git a/trunk/drivers/acpi/processor_core.c b/trunk/drivers/acpi/processor_core.c index c567b46dfa0f..c2d4d6e09364 100644 --- a/trunk/drivers/acpi/processor_core.c +++ b/trunk/drivers/acpi/processor_core.c @@ -863,6 +863,13 @@ static int acpi_processor_add(struct acpi_device *device) goto err_remove_sysfs; } + if (pr->flags.throttling) { + printk(KERN_INFO PREFIX "%s [%s] (supports", + acpi_device_name(device), acpi_device_bid(device)); + printk(" %d throttling states", pr->throttling.state_count); + printk(")\n"); + } + return 0; err_remove_sysfs: diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c index 14a7481c97d7..468921bed22f 100644 --- a/trunk/drivers/acpi/scan.c +++ b/trunk/drivers/acpi/scan.c @@ -1052,8 +1052,6 @@ static void acpi_device_set_id(struct acpi_device *device) device->flags.bus_address = 1; } - kfree(info); - /* * Some devices don't reliably have _HIDs & _CIDs, so add * synthetic HIDs to make sure drivers can find them. @@ -1327,8 +1325,13 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops, struct acpi_device **child) { acpi_status status; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; void *device = NULL; + acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); + printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n", + (char *) buffer.pointer); + status = acpi_bus_check_add(handle, 0, ops, &device); if (ACPI_SUCCESS(status)) acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX, diff --git a/trunk/drivers/block/DAC960.c b/trunk/drivers/block/DAC960.c index eb4fa1943944..6fa7b0fdbdfd 100644 --- a/trunk/drivers/block/DAC960.c +++ b/trunk/drivers/block/DAC960.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include @@ -6423,10 +6422,16 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller, return true; } -static int dac960_proc_show(struct seq_file *m, void *v) + +/* + DAC960_ProcReadStatus implements reading /proc/rd/status. +*/ + +static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, + int Count, int *EOF, void *Data) { unsigned char *StatusMessage = "OK\n"; - int ControllerNumber; + int ControllerNumber, BytesAvailable; for (ControllerNumber = 0; ControllerNumber < DAC960_ControllerCount; ControllerNumber++) @@ -6439,49 +6444,52 @@ static int dac960_proc_show(struct seq_file *m, void *v) break; } } - seq_puts(m, StatusMessage); - return 0; + BytesAvailable = strlen(StatusMessage) - Offset; + if (Count >= BytesAvailable) + { + Count = BytesAvailable; + *EOF = true; + } + if (Count <= 0) return 0; + *Start = Page; + memcpy(Page, &StatusMessage[Offset], Count); + return Count; } -static int dac960_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, dac960_proc_show, NULL); -} -static const struct file_operations dac960_proc_fops = { - .owner = THIS_MODULE, - .open = dac960_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +/* + DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. +*/ -static int dac960_initial_status_proc_show(struct seq_file *m, void *v) +static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, + int Count, int *EOF, void *Data) { - DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; - seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer); - return 0; + DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; + int BytesAvailable = Controller->InitialStatusLength - Offset; + if (Count >= BytesAvailable) + { + Count = BytesAvailable; + *EOF = true; + } + if (Count <= 0) return 0; + *Start = Page; + memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count); + return Count; } -static int dac960_initial_status_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data); -} -static const struct file_operations dac960_initial_status_proc_fops = { - .owner = THIS_MODULE, - .open = dac960_initial_status_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +/* + DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. +*/ -static int dac960_current_status_proc_show(struct seq_file *m, void *v) +static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, + int Count, int *EOF, void *Data) { - DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private; + DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; unsigned char *StatusMessage = "No Rebuild or Consistency Check in Progress\n"; int ProgressMessageLength = strlen(StatusMessage); + int BytesAvailable; if (jiffies != Controller->LastCurrentStatusTime) { Controller->CurrentStatusLength = 0; @@ -6505,41 +6513,49 @@ static int dac960_current_status_proc_show(struct seq_file *m, void *v) } Controller->LastCurrentStatusTime = jiffies; } - seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer); - return 0; + BytesAvailable = Controller->CurrentStatusLength - Offset; + if (Count >= BytesAvailable) + { + Count = BytesAvailable; + *EOF = true; + } + if (Count <= 0) return 0; + *Start = Page; + memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count); + return Count; } -static int dac960_current_status_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, dac960_current_status_proc_show, PDE(inode)->data); -} -static const struct file_operations dac960_current_status_proc_fops = { - .owner = THIS_MODULE, - .open = dac960_current_status_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +/* + DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. +*/ -static int dac960_user_command_proc_show(struct seq_file *m, void *v) +static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, + int Count, int *EOF, void *Data) { - DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; - - seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer); - return 0; + DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; + int BytesAvailable = Controller->UserStatusLength - Offset; + if (Count >= BytesAvailable) + { + Count = BytesAvailable; + *EOF = true; + } + if (Count <= 0) return 0; + *Start = Page; + memcpy(Page, &Controller->UserStatusBuffer[Offset], Count); + return Count; } -static int dac960_user_command_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, dac960_user_command_proc_show, PDE(inode)->data); -} -static ssize_t dac960_user_command_proc_write(struct file *file, +/* + DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. +*/ + +static int DAC960_ProcWriteUserCommand(struct file *file, const char __user *Buffer, - size_t Count, loff_t *pos) + unsigned long Count, void *Data) { - DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data; + DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; unsigned char CommandBuffer[80]; int Length; if (Count > sizeof(CommandBuffer)-1) return -EINVAL; @@ -6556,14 +6572,6 @@ static ssize_t dac960_user_command_proc_write(struct file *file, ? Count : -EBUSY); } -static const struct file_operations dac960_user_command_proc_fops = { - .owner = THIS_MODULE, - .open = dac960_user_command_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .write = dac960_user_command_proc_write, -}; /* DAC960_CreateProcEntries creates the /proc/rd/... entries for the @@ -6578,17 +6586,23 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) if (DAC960_ProcDirectoryEntry == NULL) { DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); - StatusProcEntry = proc_create("status", 0, + StatusProcEntry = create_proc_read_entry("status", 0, DAC960_ProcDirectoryEntry, - &dac960_proc_fops); + DAC960_ProcReadStatus, NULL); } sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); ControllerProcEntry = proc_mkdir(Controller->ControllerName, DAC960_ProcDirectoryEntry); - proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); - proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); - UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); + create_proc_read_entry("initial_status", 0, ControllerProcEntry, + DAC960_ProcReadInitialStatus, Controller); + create_proc_read_entry("current_status", 0, ControllerProcEntry, + DAC960_ProcReadCurrentStatus, Controller); + UserCommandProcEntry = + create_proc_read_entry("user_command", S_IWUSR | S_IRUSR, + ControllerProcEntry, DAC960_ProcReadUserCommand, + Controller); + UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand; Controller->ControllerProcEntry = ControllerProcEntry; } diff --git a/trunk/drivers/block/cciss.c b/trunk/drivers/block/cciss.c index fb5be2d95d52..1ece0b47b581 100644 --- a/trunk/drivers/block/cciss.c +++ b/trunk/drivers/block/cciss.c @@ -36,11 +36,9 @@ #include #include #include -#include #include #include #include -#include #include #include @@ -157,10 +155,6 @@ static struct board_type products[] = { static ctlr_info_t *hba[MAX_CTLR]; -static struct task_struct *cciss_scan_thread; -static DEFINE_MUTEX(scan_mutex); -static LIST_HEAD(scan_q); - static void do_cciss_request(struct request_queue *q); static irqreturn_t do_cciss_intr(int irq, void *dev_id); static int cciss_open(struct block_device *bdev, fmode_t mode); @@ -170,9 +164,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); static int cciss_revalidate(struct gendisk *disk); -static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); +static int rebuild_lun_table(ctlr_info_t *h, int first_time); static int deregister_disk(ctlr_info_t *h, int drv_index, - int clear_all, int via_ioctl); + int clear_all); static void cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, unsigned int *block_size); @@ -195,13 +189,8 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); static void fail_all_cmds(unsigned long ctlr); -static int add_to_scan_list(struct ctlr_info *h); static int scan_thread(void *data); static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); -static void cciss_hba_release(struct device *dev); -static void cciss_device_release(struct device *dev); -static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); -static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); #ifdef CONFIG_PROC_FS static void cciss_procinit(int i); @@ -256,10 +245,7 @@ static inline void removeQ(CommandList_struct *c) #include "cciss_scsi.c" /* For SCSI tape support */ -static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", - "UNKNOWN" -}; -#define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1) +#define RAID_UNKNOWN 6 #ifdef CONFIG_PROC_FS @@ -269,6 +255,9 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", #define ENG_GIG 1000000000 #define ENG_GIG_FACTOR (ENG_GIG/512) #define ENGAGE_SCSI "engage scsi" +static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", + "UNKNOWN" +}; static struct proc_dir_entry *proc_cciss; @@ -329,7 +318,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v) ctlr_info_t *h = seq->private; unsigned ctlr = h->ctlr; loff_t *pos = v; - drive_info_struct *drv = h->drv[*pos]; + drive_info_struct *drv = &h->drv[*pos]; if (*pos > h->highest_lun) return 0; @@ -342,7 +331,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v) vol_sz_frac *= 100; sector_div(vol_sz_frac, ENG_GIG_FACTOR); - if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) + if (drv->raid_level > 5) drv->raid_level = RAID_UNKNOWN; seq_printf(seq, "cciss/c%dd%d:" "\t%4u.%02uGB\tRAID %s\n", @@ -465,19 +454,9 @@ static void __devinit cciss_procinit(int i) #define to_hba(n) container_of(n, struct ctlr_info, dev) #define to_drv(n) container_of(n, drive_info_struct, dev) -static ssize_t host_store_rescan(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct ctlr_info *h = to_hba(dev); - - add_to_scan_list(h); - wake_up_process(cciss_scan_thread); - wait_for_completion_interruptible(&h->scan_wait); - - return count; -} -DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static struct device_type cciss_host_type = { + .name = "cciss_host", +}; static ssize_t dev_show_unique_id(struct device *dev, struct device_attribute *attr, @@ -581,101 +560,11 @@ static ssize_t dev_show_rev(struct device *dev, } DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); -static ssize_t cciss_show_lunid(struct device *dev, - struct device_attribute *attr, char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - unsigned long flags; - unsigned char lunid[8]; - - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); - if (h->busy_configuring) { - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - return -EBUSY; - } - if (!drv->heads) { - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - return -ENOTTY; - } - memcpy(lunid, drv->LunID, sizeof(lunid)); - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - lunid[0], lunid[1], lunid[2], lunid[3], - lunid[4], lunid[5], lunid[6], lunid[7]); -} -DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); - -static ssize_t cciss_show_raid_level(struct device *dev, - struct device_attribute *attr, char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - int raid; - unsigned long flags; - - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); - if (h->busy_configuring) { - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - return -EBUSY; - } - raid = drv->raid_level; - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - if (raid < 0 || raid > RAID_UNKNOWN) - raid = RAID_UNKNOWN; - - return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", - raid_label[raid]); -} -DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); - -static ssize_t cciss_show_usage_count(struct device *dev, - struct device_attribute *attr, char *buf) -{ - drive_info_struct *drv = to_drv(dev); - struct ctlr_info *h = to_hba(drv->dev.parent); - unsigned long flags; - int count; - - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); - if (h->busy_configuring) { - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - return -EBUSY; - } - count = drv->usage_count; - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - return snprintf(buf, 20, "%d\n", count); -} -DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); - -static struct attribute *cciss_host_attrs[] = { - &dev_attr_rescan.attr, - NULL -}; - -static struct attribute_group cciss_host_attr_group = { - .attrs = cciss_host_attrs, -}; - -static const struct attribute_group *cciss_host_attr_groups[] = { - &cciss_host_attr_group, - NULL -}; - -static struct device_type cciss_host_type = { - .name = "cciss_host", - .groups = cciss_host_attr_groups, - .release = cciss_hba_release, -}; - static struct attribute *cciss_dev_attrs[] = { &dev_attr_unique_id.attr, &dev_attr_model.attr, &dev_attr_vendor.attr, &dev_attr_rev.attr, - &dev_attr_lunid.attr, - &dev_attr_raid_level.attr, - &dev_attr_usage_count.attr, NULL }; @@ -691,24 +580,12 @@ static const struct attribute_group *cciss_dev_attr_groups[] = { static struct device_type cciss_dev_type = { .name = "cciss_device", .groups = cciss_dev_attr_groups, - .release = cciss_device_release, }; static struct bus_type cciss_bus_type = { .name = "cciss", }; -/* - * cciss_hba_release is called when the reference count - * of h->dev goes to zero. - */ -static void cciss_hba_release(struct device *dev) -{ - /* - * nothing to do, but need this to avoid a warning - * about not having a release handler from lib/kref.c. - */ -} /* * Initialize sysfs entry for each controller. This sets up and registers @@ -732,16 +609,6 @@ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) { device_del(&h->dev); - put_device(&h->dev); /* final put. */ -} - -/* cciss_device_release is called when the reference count - * of h->drv[x]dev goes to zero. - */ -static void cciss_device_release(struct device *dev) -{ - drive_info_struct *drv = to_drv(dev); - kfree(drv); } /* @@ -750,39 +617,24 @@ static void cciss_device_release(struct device *dev) * /sys/bus/pci/devices/drv[drv_index]->device_initialized) - return 0; - - dev = &h->drv[drv_index]->dev; - device_initialize(dev); - dev->type = &cciss_dev_type; - dev->bus = &cciss_bus_type; - dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); - dev->parent = &h->dev; - h->drv[drv_index]->device_initialized = 1; - return device_add(dev); + device_initialize(&drv->dev); + drv->dev.type = &cciss_dev_type; + drv->dev.bus = &cciss_bus_type; + dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); + drv->dev.parent = &h->dev; + return device_add(&drv->dev); } /* * Remove sysfs entries for a logical drive. */ -static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, - int ctlr_exiting) +static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) { - struct device *dev = &h->drv[drv_index]->dev; - - /* special case for c*d0, we only destroy it on controller exit */ - if (drv_index == 0 && !ctlr_exiting) - return; - - device_del(dev); - put_device(dev); /* the "final" put. */ - h->drv[drv_index] = NULL; + device_del(&drv->dev); } /* @@ -899,7 +751,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); #endif /* CCISS_DEBUG */ - if (drv->busy_configuring) + if (host->busy_initializing || drv->busy_configuring) return -EBUSY; /* * Root is allowed to open raw volume zero even if it's not configured @@ -915,8 +767,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) if (MINOR(bdev->bd_dev) & 0x0f) { return -ENXIO; /* if it is, make sure we have a LUN ID */ - } else if (memcmp(drv->LunID, CTLR_LUNID, - sizeof(drv->LunID))) { + } else if (drv->LunID == 0) { return -ENXIO; } } @@ -1281,13 +1132,12 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, case CCISS_DEREGDISK: case CCISS_REGNEWD: case CCISS_REVALIDVOLS: - return rebuild_lun_table(host, 0, 1); + return rebuild_lun_table(host, 0); case CCISS_GETLUNINFO:{ LogvolInfo_struct luninfo; - memcpy(&luninfo.LunID, drv->LunID, - sizeof(luninfo.LunID)); + luninfo.LunID = drv->LunID; luninfo.num_opens = drv->usage_count; luninfo.num_parts = 0; if (copy_to_user(argp, &luninfo, @@ -1625,10 +1475,7 @@ static void cciss_check_queues(ctlr_info_t *h) /* make sure the disk has been added and the drive is real * because this can be called from the middle of init_one. */ - if (!h->drv[curr_queue]) - continue; - if (!(h->drv[curr_queue]->queue) || - !(h->drv[curr_queue]->heads)) + if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) continue; blk_start_queue(h->gendisk[curr_queue]->queue); @@ -1685,11 +1532,13 @@ static void cciss_softirq_done(struct request *rq) spin_unlock_irqrestore(&h->lock, flags); } -static inline void log_unit_to_scsi3addr(ctlr_info_t *h, - unsigned char scsi3addr[], uint32_t log_unit) +static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], + uint32_t log_unit) { - memcpy(scsi3addr, h->drv[log_unit]->LunID, - sizeof(h->drv[log_unit]->LunID)); + log_unit = h->drv[log_unit].LunID & 0x03fff; + memset(&scsi3addr[4], 0, 4); + memcpy(&scsi3addr[0], &log_unit, 4); + scsi3addr[3] |= 0x40; } /* This function gets the SCSI vendor, model, and revision of a logical drive @@ -1766,23 +1615,16 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq, return; } -/* - * cciss_add_disk sets up the block device queue for a logical drive - */ -static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, +static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, int drv_index) { disk->queue = blk_init_queue(do_cciss_request, &h->lock); - if (!disk->queue) - goto init_queue_failure; sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); disk->major = h->major; disk->first_minor = drv_index << NWD_SHIFT; disk->fops = &cciss_fops; - if (cciss_create_ld_sysfs_entry(h, drv_index)) - goto cleanup_queue; - disk->private_data = h->drv[drv_index]; - disk->driverfs_dev = &h->drv[drv_index]->dev; + disk->private_data = &h->drv[drv_index]; + disk->driverfs_dev = &h->drv[drv_index].dev; /* Set up queue information */ blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); @@ -1800,21 +1642,14 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, disk->queue->queuedata = h; blk_queue_logical_block_size(disk->queue, - h->drv[drv_index]->block_size); + h->drv[drv_index].block_size); /* Make sure all queue data is written out before */ - /* setting h->drv[drv_index]->queue, as setting this */ + /* setting h->drv[drv_index].queue, as setting this */ /* allows the interrupt handler to start the queue */ wmb(); - h->drv[drv_index]->queue = disk->queue; + h->drv[drv_index].queue = disk->queue; add_disk(disk); - return 0; - -cleanup_queue: - blk_cleanup_queue(disk->queue); - disk->queue = NULL; -init_queue_failure: - return -1; } /* This function will check the usage_count of the drive to be updated/added. @@ -1827,8 +1662,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, * is also the controller node. Any changes to disk 0 will show up on * the next reboot. */ -static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, - int via_ioctl) +static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) { ctlr_info_t *h = hba[ctlr]; struct gendisk *disk; @@ -1838,13 +1672,21 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, unsigned long flags = 0; int ret = 0; drive_info_struct *drvinfo; + int was_only_controller_node; /* Get information about the disk and modify the driver structure */ inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); - drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); + drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); if (inq_buff == NULL || drvinfo == NULL) goto mem_msg; + /* See if we're trying to update the "controller node" + * this will happen the when the first logical drive gets + * created by ACU. + */ + was_only_controller_node = (drv_index == 0 && + h->drv[0].raid_level == -1); + /* testing to see if 16-byte CDBs are already being used */ if (h->cciss_read == CCISS_READ_16) { cciss_read_capacity_16(h->ctlr, drv_index, 1, @@ -1877,19 +1719,16 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, drvinfo->model, drvinfo->rev); cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, sizeof(drvinfo->serial_no)); - /* Save the lunid in case we deregister the disk, below. */ - memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, - sizeof(drvinfo->LunID)); /* Is it the same disk we already know, and nothing's changed? */ - if (h->drv[drv_index]->raid_level != -1 && + if (h->drv[drv_index].raid_level != -1 && ((memcmp(drvinfo->serial_no, - h->drv[drv_index]->serial_no, 16) == 0) && - drvinfo->block_size == h->drv[drv_index]->block_size && - drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && - drvinfo->heads == h->drv[drv_index]->heads && - drvinfo->sectors == h->drv[drv_index]->sectors && - drvinfo->cylinders == h->drv[drv_index]->cylinders)) + h->drv[drv_index].serial_no, 16) == 0) && + drvinfo->block_size == h->drv[drv_index].block_size && + drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && + drvinfo->heads == h->drv[drv_index].heads && + drvinfo->sectors == h->drv[drv_index].sectors && + drvinfo->cylinders == h->drv[drv_index].cylinders)) /* The disk is unchanged, nothing to update */ goto freeret; @@ -1899,17 +1738,18 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, * If the disk already exists then deregister it before proceeding * (unless it's the first disk (for the controller node). */ - if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { + if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { printk(KERN_WARNING "disk %d has changed.\n", drv_index); spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); - h->drv[drv_index]->busy_configuring = 1; + h->drv[drv_index].busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - /* deregister_disk sets h->drv[drv_index]->queue = NULL + /* deregister_disk sets h->drv[drv_index].queue = NULL * which keeps the interrupt handler from starting * the queue. */ - ret = deregister_disk(h, drv_index, 0, via_ioctl); + ret = deregister_disk(h, drv_index, 0); + h->drv[drv_index].busy_configuring = 0; } /* If the disk is in use return */ @@ -1917,31 +1757,22 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, goto freeret; /* Save the new information from cciss_geometry_inquiry - * and serial number inquiry. If the disk was deregistered - * above, then h->drv[drv_index] will be NULL. + * and serial number inquiry. */ - if (h->drv[drv_index] == NULL) { - drvinfo->device_initialized = 0; - h->drv[drv_index] = drvinfo; - drvinfo = NULL; /* so it won't be freed below. */ - } else { - /* special case for cxd0 */ - h->drv[drv_index]->block_size = drvinfo->block_size; - h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; - h->drv[drv_index]->heads = drvinfo->heads; - h->drv[drv_index]->sectors = drvinfo->sectors; - h->drv[drv_index]->cylinders = drvinfo->cylinders; - h->drv[drv_index]->raid_level = drvinfo->raid_level; - memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); - memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, - VENDOR_LEN + 1); - memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); - memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); - } + h->drv[drv_index].block_size = drvinfo->block_size; + h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; + h->drv[drv_index].heads = drvinfo->heads; + h->drv[drv_index].sectors = drvinfo->sectors; + h->drv[drv_index].cylinders = drvinfo->cylinders; + h->drv[drv_index].raid_level = drvinfo->raid_level; + memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); + memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); + memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); + memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); ++h->num_luns; disk = h->gendisk[drv_index]; - set_capacity(disk, h->drv[drv_index]->nr_blocks); + set_capacity(disk, h->drv[drv_index].nr_blocks); /* If it's not disk 0 (drv_index != 0) * or if it was disk 0, but there was previously @@ -1949,15 +1780,8 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, * (raid_leve == -1) then we want to update the * logical drive's information. */ - if (drv_index || first_time) { - if (cciss_add_disk(h, disk, drv_index) != 0) { - cciss_free_gendisk(h, drv_index); - cciss_free_drive_info(h, drv_index); - printk(KERN_WARNING "cciss:%d could not update " - "disk %d\n", h->ctlr, drv_index); - --h->num_luns; - } - } + if (drv_index || first_time) + cciss_add_disk(h, disk, drv_index); freeret: kfree(inq_buff); @@ -1969,70 +1793,28 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, } /* This function will find the first index of the controllers drive array - * that has a null drv pointer and allocate the drive info struct and - * will return that index This is where new drives will be added. - * If the index to be returned is greater than the highest_lun index for - * the controller then highest_lun is set * to this new index. - * If there are no available indexes or if tha allocation fails, then -1 - * is returned. * "controller_node" is used to know if this is a real - * logical drive, or just the controller node, which determines if this - * counts towards highest_lun. + * that has a -1 for the raid_level and will return that index. This is + * where new drives will be added. If the index to be returned is greater + * than the highest_lun index for the controller then highest_lun is set + * to this new index. If there are no available indexes then -1 is returned. + * "controller_node" is used to know if this is a real logical drive, or just + * the controller node, which determines if this counts towards highest_lun. */ -static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) +static int cciss_find_free_drive_index(int ctlr, int controller_node) { int i; - drive_info_struct *drv; - /* Search for an empty slot for our drive info */ for (i = 0; i < CISS_MAX_LUN; i++) { - - /* if not cxd0 case, and it's occupied, skip it. */ - if (h->drv[i] && i != 0) - continue; - /* - * If it's cxd0 case, and drv is alloc'ed already, and a - * disk is configured there, skip it. - */ - if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) - continue; - - /* - * We've found an empty slot. Update highest_lun - * provided this isn't just the fake cxd0 controller node. - */ - if (i > h->highest_lun && !controller_node) - h->highest_lun = i; - - /* If adding a real disk at cxd0, and it's already alloc'ed */ - if (i == 0 && h->drv[i] != NULL) + if (hba[ctlr]->drv[i].raid_level == -1) { + if (i > hba[ctlr]->highest_lun) + if (!controller_node) + hba[ctlr]->highest_lun = i; return i; - - /* - * Found an empty slot, not already alloc'ed. Allocate it. - * Mark it with raid_level == -1, so we know it's new later on. - */ - drv = kzalloc(sizeof(*drv), GFP_KERNEL); - if (!drv) - return -1; - drv->raid_level = -1; /* so we know it's new */ - h->drv[i] = drv; - return i; + } } return -1; } -static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) -{ - kfree(h->drv[drv_index]); - h->drv[drv_index] = NULL; -} - -static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) -{ - put_disk(h->gendisk[drv_index]); - h->gendisk[drv_index] = NULL; -} - /* cciss_add_gendisk finds a free hba[]->drv structure * and allocates a gendisk if needed, and sets the lunid * in the drvinfo structure. It returns the index into @@ -2042,15 +1824,13 @@ static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) * a means to talk to the controller in case no logical * drives have yet been configured. */ -static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], - int controller_node) +static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) { int drv_index; - drv_index = cciss_alloc_drive_info(h, controller_node); + drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); if (drv_index == -1) return -1; - /*Check if the gendisk needs to be allocated */ if (!h->gendisk[drv_index]) { h->gendisk[drv_index] = @@ -2059,24 +1839,23 @@ static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], printk(KERN_ERR "cciss%d: could not " "allocate a new disk %d\n", h->ctlr, drv_index); - goto err_free_drive_info; + return -1; } } - memcpy(h->drv[drv_index]->LunID, lunid, - sizeof(h->drv[drv_index]->LunID)); - if (cciss_create_ld_sysfs_entry(h, drv_index)) + h->drv[drv_index].LunID = lunid; + if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) goto err_free_disk; + /* Don't need to mark this busy because nobody */ /* else knows about this disk yet to contend */ /* for access to it. */ - h->drv[drv_index]->busy_configuring = 0; + h->drv[drv_index].busy_configuring = 0; wmb(); return drv_index; err_free_disk: - cciss_free_gendisk(h, drv_index); -err_free_drive_info: - cciss_free_drive_info(h, drv_index); + put_disk(h->gendisk[drv_index]); + h->gendisk[drv_index] = NULL; return -1; } @@ -2093,25 +1872,21 @@ static void cciss_add_controller_node(ctlr_info_t *h) if (h->gendisk[0] != NULL) /* already did this? Then bail. */ return; - drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); - if (drv_index == -1) - goto error; - h->drv[drv_index]->block_size = 512; - h->drv[drv_index]->nr_blocks = 0; - h->drv[drv_index]->heads = 0; - h->drv[drv_index]->sectors = 0; - h->drv[drv_index]->cylinders = 0; - h->drv[drv_index]->raid_level = -1; - memset(h->drv[drv_index]->serial_no, 0, 16); - disk = h->gendisk[drv_index]; - if (cciss_add_disk(h, disk, drv_index) == 0) + drv_index = cciss_add_gendisk(h, 0, 1); + if (drv_index == -1) { + printk(KERN_WARNING "cciss%d: could not " + "add disk 0.\n", h->ctlr); return; - cciss_free_gendisk(h, drv_index); - cciss_free_drive_info(h, drv_index); -error: - printk(KERN_WARNING "cciss%d: could not " - "add disk 0.\n", h->ctlr); - return; + } + h->drv[drv_index].block_size = 512; + h->drv[drv_index].nr_blocks = 0; + h->drv[drv_index].heads = 0; + h->drv[drv_index].sectors = 0; + h->drv[drv_index].cylinders = 0; + h->drv[drv_index].raid_level = -1; + memset(h->drv[drv_index].serial_no, 0, 16); + disk = h->gendisk[drv_index]; + cciss_add_disk(h, disk, drv_index); } /* This function will add and remove logical drives from the Logical @@ -2122,8 +1897,7 @@ static void cciss_add_controller_node(ctlr_info_t *h) * INPUT * h = The controller to perform the operations on */ -static int rebuild_lun_table(ctlr_info_t *h, int first_time, - int via_ioctl) +static int rebuild_lun_table(ctlr_info_t *h, int first_time) { int ctlr = h->ctlr; int num_luns; @@ -2133,7 +1907,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, int i; int drv_found; int drv_index = 0; - unsigned char lunid[8] = CTLR_LUNID; + __u32 lunid = 0; unsigned long flags; if (!capable(CAP_SYS_RAWIO)) @@ -2186,13 +1960,13 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, drv_found = 0; /* skip holes in the array from already deleted drives */ - if (h->drv[i] == NULL) + if (h->drv[i].raid_level == -1) continue; for (j = 0; j < num_luns; j++) { - memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); - if (memcmp(h->drv[i]->LunID, lunid, - sizeof(lunid)) == 0) { + memcpy(&lunid, &ld_buff->LUN[j][0], 4); + lunid = le32_to_cpu(lunid); + if (h->drv[i].LunID == lunid) { drv_found = 1; break; } @@ -2200,11 +1974,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, if (!drv_found) { /* Deregister it from the OS, it's gone. */ spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); - h->drv[i]->busy_configuring = 1; + h->drv[i].busy_configuring = 1; spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); - return_code = deregister_disk(h, i, 1, via_ioctl); - if (h->drv[i] != NULL) - h->drv[i]->busy_configuring = 0; + return_code = deregister_disk(h, i, 1); + cciss_destroy_ld_sysfs_entry(&h->drv[i]); + h->drv[i].busy_configuring = 0; } } @@ -2218,16 +1992,17 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, drv_found = 0; - memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); + memcpy(&lunid, &ld_buff->LUN[i][0], 4); + lunid = le32_to_cpu(lunid); + /* Find if the LUN is already in the drive array * of the driver. If so then update its info * if not in use. If it does not exist then find * the first free index and add it. */ for (j = 0; j <= h->highest_lun; j++) { - if (h->drv[j] != NULL && - memcmp(h->drv[j]->LunID, lunid, - sizeof(h->drv[j]->LunID)) == 0) { + if (h->drv[j].raid_level != -1 && + h->drv[j].LunID == lunid) { drv_index = j; drv_found = 1; break; @@ -2240,8 +2015,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, if (drv_index == -1) goto freeret; } - cciss_update_drive_info(ctlr, drv_index, first_time, - via_ioctl); + cciss_update_drive_info(ctlr, drv_index, first_time); } /* end for */ freeret: @@ -2258,25 +2032,6 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time, goto freeret; } -static void cciss_clear_drive_info(drive_info_struct *drive_info) -{ - /* zero out the disk size info */ - drive_info->nr_blocks = 0; - drive_info->block_size = 0; - drive_info->heads = 0; - drive_info->sectors = 0; - drive_info->cylinders = 0; - drive_info->raid_level = -1; - memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); - memset(drive_info->model, 0, sizeof(drive_info->model)); - memset(drive_info->rev, 0, sizeof(drive_info->rev)); - memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); - /* - * don't clear the LUNID though, we need to remember which - * one this one is. - */ -} - /* This function will deregister the disk and it's queue from the * kernel. It must be called with the controller lock held and the * drv structures busy_configuring flag set. It's parameters are: @@ -2291,48 +2046,43 @@ static void cciss_clear_drive_info(drive_info_struct *drive_info) * the disk in preparation for re-adding it. In this case * the highest_lun should be left unchanged and the LunID * should not be cleared. - * via_ioctl - * This indicates whether we've reached this path via ioctl. - * This affects the maximum usage count allowed for c0d0 to be messed with. - * If this path is reached via ioctl(), then the max_usage_count will - * be 1, as the process calling ioctl() has got to have the device open. - * If we get here via sysfs, then the max usage count will be zero. */ static int deregister_disk(ctlr_info_t *h, int drv_index, - int clear_all, int via_ioctl) + int clear_all) { int i; struct gendisk *disk; drive_info_struct *drv; - int recalculate_highest_lun; if (!capable(CAP_SYS_RAWIO)) return -EPERM; - drv = h->drv[drv_index]; + drv = &h->drv[drv_index]; disk = h->gendisk[drv_index]; /* make sure logical volume is NOT is use */ if (clear_all || (h->gendisk[0] == disk)) { - if (drv->usage_count > via_ioctl) + if (drv->usage_count > 1) return -EBUSY; } else if (drv->usage_count > 0) return -EBUSY; - recalculate_highest_lun = (drv == h->drv[h->highest_lun]); - /* invalidate the devices and deregister the disk. If it is disk * zero do not deregister it but just zero out it's values. This * allows us to delete disk zero but keep the controller registered. */ if (h->gendisk[0] != disk) { struct request_queue *q = disk->queue; - if (disk->flags & GENHD_FL_UP) { - cciss_destroy_ld_sysfs_entry(h, drv_index, 0); + if (disk->flags & GENHD_FL_UP) del_gendisk(disk); - } - if (q) + if (q) { blk_cleanup_queue(q); + /* Set drv->queue to NULL so that we do not try + * to call blk_start_queue on this queue in the + * interrupt handler + */ + drv->queue = NULL; + } /* If clear_all is set then we are deleting the logical * drive, not just refreshing its info. For drives * other than disk 0 we will call put_disk. We do not @@ -2355,20 +2105,34 @@ static int deregister_disk(ctlr_info_t *h, int drv_index, } } else { set_capacity(disk, 0); - cciss_clear_drive_info(drv); } --h->num_luns; - - /* if it was the last disk, find the new hightest lun */ - if (clear_all && recalculate_highest_lun) { - int i, newhighest = -1; - for (i = 0; i <= h->highest_lun; i++) { - /* if the disk has size > 0, it is available */ - if (h->drv[i] && h->drv[i]->heads) - newhighest = i; + /* zero out the disk size info */ + drv->nr_blocks = 0; + drv->block_size = 0; + drv->heads = 0; + drv->sectors = 0; + drv->cylinders = 0; + drv->raid_level = -1; /* This can be used as a flag variable to + * indicate that this element of the drive + * array is free. + */ + + if (clear_all) { + /* check to see if it was the last disk */ + if (drv == h->drv + h->highest_lun) { + /* if so, find the new hightest lun */ + int i, newhighest = -1; + for (i = 0; i <= h->highest_lun; i++) { + /* if the disk has size > 0, it is available */ + if (h->drv[i].heads) + newhighest = i; + } + h->highest_lun = newhighest; } - h->highest_lun = newhighest; + + drv->LunID = 0; } return 0; } @@ -2715,6 +2479,8 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, } else { /* Get geometry failed */ printk(KERN_WARNING "cciss: reading geometry failed\n"); } + printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n", + drv->heads, drv->sectors, drv->cylinders); } static void @@ -2748,6 +2514,9 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, *total_size = 0; *block_size = BLOCK_SIZE; } + if (*total_size != 0) + printk(KERN_INFO " blocks= %llu block_size= %d\n", + (unsigned long long)*total_size+1, *block_size); kfree(buf); } @@ -2799,8 +2568,7 @@ static int cciss_revalidate(struct gendisk *disk) InquiryData_struct *inq_buff = NULL; for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { - if (memcmp(h->drv[logvol]->LunID, drv->LunID, - sizeof(drv->LunID)) == 0) { + if (h->drv[logvol].LunID == drv->LunID) { FOUND = 1; break; } @@ -3285,7 +3053,8 @@ static void do_cciss_request(struct request_queue *q) /* The first 2 bits are reserved for controller error reporting. */ c->Header.Tag.lower = (c->cmdindex << 3); c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ - memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); + c->Header.LUN.LogDev.VolId = drv->LunID; + c->Header.LUN.LogDev.Mode = 1; c->Request.CDBLen = 10; // 12 byte commands not in FW yet; c->Request.Type.Type = TYPE_CMD; // It is a command. c->Request.Type.Attribute = ATTR_SIMPLE; @@ -3463,121 +3232,20 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id) return IRQ_HANDLED; } -/** - * add_to_scan_list() - add controller to rescan queue - * @h: Pointer to the controller. - * - * Adds the controller to the rescan queue if not already on the queue. - * - * returns 1 if added to the queue, 0 if skipped (could be on the - * queue already, or the controller could be initializing or shutting - * down). - **/ -static int add_to_scan_list(struct ctlr_info *h) -{ - struct ctlr_info *test_h; - int found = 0; - int ret = 0; - - if (h->busy_initializing) - return 0; - - if (!mutex_trylock(&h->busy_shutting_down)) - return 0; - - mutex_lock(&scan_mutex); - list_for_each_entry(test_h, &scan_q, scan_list) { - if (test_h == h) { - found = 1; - break; - } - } - if (!found && !h->busy_scanning) { - INIT_COMPLETION(h->scan_wait); - list_add_tail(&h->scan_list, &scan_q); - ret = 1; - } - mutex_unlock(&scan_mutex); - mutex_unlock(&h->busy_shutting_down); - - return ret; -} - -/** - * remove_from_scan_list() - remove controller from rescan queue - * @h: Pointer to the controller. - * - * Removes the controller from the rescan queue if present. Blocks if - * the controller is currently conducting a rescan. - **/ -static void remove_from_scan_list(struct ctlr_info *h) -{ - struct ctlr_info *test_h, *tmp_h; - int scanning = 0; - - mutex_lock(&scan_mutex); - list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { - if (test_h == h) { - list_del(&h->scan_list); - complete_all(&h->scan_wait); - mutex_unlock(&scan_mutex); - return; - } - } - if (&h->busy_scanning) - scanning = 0; - mutex_unlock(&scan_mutex); - - if (scanning) - wait_for_completion(&h->scan_wait); -} - -/** - * scan_thread() - kernel thread used to rescan controllers - * @data: Ignored. - * - * A kernel thread used scan for drive topology changes on - * controllers. The thread processes only one controller at a time - * using a queue. Controllers are added to the queue using - * add_to_scan_list() and removed from the queue either after done - * processing or using remove_from_scan_list(). - * - * returns 0. - **/ static int scan_thread(void *data) { - struct ctlr_info *h; + ctlr_info_t *h = data; + int rc; + DECLARE_COMPLETION_ONSTACK(wait); + h->rescan_wait = &wait; - while (1) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); + for (;;) { + rc = wait_for_completion_interruptible(&wait); if (kthread_should_stop()) break; - - while (1) { - mutex_lock(&scan_mutex); - if (list_empty(&scan_q)) { - mutex_unlock(&scan_mutex); - break; - } - - h = list_entry(scan_q.next, - struct ctlr_info, - scan_list); - list_del(&h->scan_list); - h->busy_scanning = 1; - mutex_unlock(&scan_mutex); - - if (h) { - rebuild_lun_table(h, 0, 0); - complete_all(&h->scan_wait); - mutex_lock(&scan_mutex); - h->busy_scanning = 0; - mutex_unlock(&scan_mutex); - } - } + if (!rc) + rebuild_lun_table(h, 0); } - return 0; } @@ -3600,8 +3268,8 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) case REPORT_LUNS_CHANGED: printk(KERN_WARNING "cciss%d: report LUN data " "changed\n", h->ctlr); - add_to_scan_list(h); - wake_up_process(cciss_scan_thread); + if (h->rescan_wait) + complete(h->rescan_wait); return 1; break; case POWER_OR_RESET: @@ -3821,7 +3489,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) if (scratchpad == CCISS_FIRMWARE_READY) break; set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */ + schedule_timeout(HZ / 10); /* wait 100ms */ } if (scratchpad != CCISS_FIRMWARE_READY) { printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); @@ -3947,7 +3615,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) break; /* delay and try again */ set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(msecs_to_jiffies(1)); + schedule_timeout(10); } #ifdef CCISS_DEBUG @@ -4001,16 +3669,15 @@ static int alloc_cciss_hba(void) return -1; } -static void free_hba(int n) +static void free_hba(int i) { - ctlr_info_t *h = hba[n]; - int i; + ctlr_info_t *p = hba[i]; + int n; - hba[n] = NULL; - for (i = 0; i < h->highest_lun + 1; i++) - if (h->gendisk[i] != NULL) - put_disk(h->gendisk[i]); - kfree(h); + hba[i] = NULL; + for (n = 0; n < CISS_MAX_LUN; n++) + put_disk(p->gendisk[n]); + kfree(p); } /* Send a message CDB to the firmware. */ @@ -4251,7 +3918,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->busy_initializing = 1; INIT_HLIST_HEAD(&hba[i]->cmpQ); INIT_HLIST_HEAD(&hba[i]->reqQ); - mutex_init(&hba[i]->busy_shutting_down); if (cciss_pci_init(hba[i], pdev) != 0) goto clean0; @@ -4260,8 +3926,6 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->ctlr = i; hba[i]->pdev = pdev; - init_completion(&hba[i]->scan_wait); - if (cciss_create_hba_sysfs_entry(hba[i])) goto clean0; @@ -4337,7 +4001,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->num_luns = 0; hba[i]->highest_lun = -1; for (j = 0; j < CISS_MAX_LUN; j++) { - hba[i]->drv[j] = NULL; + hba[i]->drv[j].raid_level = -1; + hba[i]->drv[j].queue = NULL; hba[i]->gendisk[j] = NULL; } @@ -4370,8 +4035,14 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, hba[i]->cciss_max_sectors = 2048; - rebuild_lun_table(hba[i], 1, 0); hba[i]->busy_initializing = 0; + + rebuild_lun_table(hba[i], 1); + hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i], + "cciss_scan%02d", i); + if (IS_ERR(hba[i]->cciss_scan_thread)) + return PTR_ERR(hba[i]->cciss_scan_thread); + return 1; clean4: @@ -4392,7 +4063,12 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, cciss_destroy_hba_sysfs_entry(hba[i]); clean0: hba[i]->busy_initializing = 0; - + /* cleanup any queues that may have been initialized */ + for (j=0; j <= hba[i]->highest_lun; j++){ + drive_info_struct *drv = &(hba[i]->drv[j]); + if (drv->queue) + blk_cleanup_queue(drv->queue); + } /* * Deliberately omit pci_disable_device(): it does something nasty to * Smart Array controllers that pci_enable_device does not undo @@ -4449,9 +4125,8 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) return; } - mutex_lock(&hba[i]->busy_shutting_down); + kthread_stop(hba[i]->cciss_scan_thread); - remove_from_scan_list(hba[i]); remove_proc_entry(hba[i]->devname, proc_cciss); unregister_blkdev(hba[i]->major, hba[i]->devname); @@ -4461,10 +4136,8 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) if (disk) { struct request_queue *q = disk->queue; - if (disk->flags & GENHD_FL_UP) { - cciss_destroy_ld_sysfs_entry(hba[i], j, 1); + if (disk->flags & GENHD_FL_UP) del_gendisk(disk); - } if (q) blk_cleanup_queue(q); } @@ -4497,7 +4170,6 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); pci_set_drvdata(pdev, NULL); cciss_destroy_hba_sysfs_entry(hba[i]); - mutex_unlock(&hba[i]->busy_shutting_down); free_hba(i); } @@ -4530,25 +4202,15 @@ static int __init cciss_init(void) if (err) return err; - /* Start the scan thread */ - cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); - if (IS_ERR(cciss_scan_thread)) { - err = PTR_ERR(cciss_scan_thread); - goto err_bus_unregister; - } - /* Register for our PCI devices */ err = pci_register_driver(&cciss_pci_driver); if (err) - goto err_thread_stop; + goto err_bus_register; - return err; + return 0; -err_thread_stop: - kthread_stop(cciss_scan_thread); -err_bus_unregister: +err_bus_register: bus_unregister(&cciss_bus_type); - return err; } @@ -4565,7 +4227,6 @@ static void __exit cciss_cleanup(void) cciss_remove_one(hba[i]->pdev); } } - kthread_stop(cciss_scan_thread); remove_proc_entry("driver/cciss", NULL); bus_unregister(&cciss_bus_type); } diff --git a/trunk/drivers/block/cciss.h b/trunk/drivers/block/cciss.h index 31524cf42c77..06a5db25b298 100644 --- a/trunk/drivers/block/cciss.h +++ b/trunk/drivers/block/cciss.h @@ -2,7 +2,6 @@ #define CCISS_H #include -#include #include "cciss_cmd.h" @@ -30,7 +29,7 @@ struct access_method { }; typedef struct _drive_info_struct { - unsigned char LunID[8]; + __u32 LunID; int usage_count; struct request_queue *queue; sector_t nr_blocks; @@ -52,7 +51,6 @@ typedef struct _drive_info_struct char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ char model[MODEL_LEN + 1]; /* SCSI model string */ char rev[REV_LEN + 1]; /* SCSI revision string */ - char device_initialized; /* indicates whether dev is initialized */ } drive_info_struct; struct ctlr_info @@ -88,7 +86,7 @@ struct ctlr_info BYTE cciss_read_capacity; // information about each logical volume - drive_info_struct *drv[CISS_MAX_LUN]; + drive_info_struct drv[CISS_MAX_LUN]; struct access_method access; @@ -110,8 +108,6 @@ struct ctlr_info int nr_frees; int busy_configuring; int busy_initializing; - int busy_scanning; - struct mutex busy_shutting_down; /* This element holds the zero based queue number of the last * queue to be started. It is used for fairness. @@ -126,8 +122,8 @@ struct ctlr_info /* and saved for later processing */ #endif unsigned char alive; - struct list_head scan_list; - struct completion scan_wait; + struct completion *rescan_wait; + struct task_struct *cciss_scan_thread; struct device dev; }; diff --git a/trunk/drivers/block/cpqarray.c b/trunk/drivers/block/cpqarray.c index 6422651ec364..b82d438e2607 100644 --- a/trunk/drivers/block/cpqarray.c +++ b/trunk/drivers/block/cpqarray.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -178,6 +177,7 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev); #ifdef CONFIG_PROC_FS static void ida_procinit(int i); +static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data); #else static void ida_procinit(int i) {} #endif @@ -206,7 +206,6 @@ static const struct block_device_operations ida_fops = { #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_array; -static const struct file_operations ida_proc_fops; /* * Get us a file in /proc/array that says something about each controller. @@ -219,16 +218,19 @@ static void __init ida_procinit(int i) if (!proc_array) return; } - proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]); + create_proc_read_entry(hba[i]->devname, 0, proc_array, + ida_proc_get_info, hba[i]); } /* * Report information about this controller. */ -static int ida_proc_show(struct seq_file *m, void *v) +static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) { - int i, ctlr; - ctlr_info_t *h = (ctlr_info_t*)m->private; + off_t pos = 0; + off_t len = 0; + int size, i, ctlr; + ctlr_info_t *h = (ctlr_info_t*)data; drv_info_t *drv; #ifdef CPQ_PROC_PRINT_QUEUES cmdlist_t *c; @@ -236,7 +238,7 @@ static int ida_proc_show(struct seq_file *m, void *v) #endif ctlr = h->ctlr; - seq_printf(m, "%s: Compaq %s Controller\n" + size = sprintf(buffer, "%s: Compaq %s Controller\n" " Board ID: 0x%08lx\n" " Firmware Revision: %c%c%c%c\n" " Controller Sig: 0x%08lx\n" @@ -256,54 +258,55 @@ static int ida_proc_show(struct seq_file *m, void *v) h->log_drives, h->phys_drives, h->Qdepth, h->maxQsinceinit); - seq_puts(m, "Logical Drive Info:\n"); + pos += size; len += size; + + size = sprintf(buffer+len, "Logical Drive Info:\n"); + pos += size; len += size; for(i=0; ilog_drives; i++) { drv = &h->drv[i]; - seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n", + size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", ctlr, i, drv->blk_size, drv->nr_blks); + pos += size; len += size; } #ifdef CPQ_PROC_PRINT_QUEUES spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); - seq_puts(m, "\nCurrent Queues:\n"); + size = sprintf(buffer+len, "\nCurrent Queues:\n"); + pos += size; len += size; c = h->reqQ; - seq_printf(m, "reqQ = %p", c); + size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; if (c) c=c->next; while(c && c != h->reqQ) { - seq_printf(m, "->%p", c); + size = sprintf(buffer+len, "->%p", c); + pos += size; len += size; c=c->next; } c = h->cmpQ; - seq_printf(m, "\ncmpQ = %p", c); + size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; if (c) c=c->next; while(c && c != h->cmpQ) { - seq_printf(m, "->%p", c); + size = sprintf(buffer+len, "->%p", c); + pos += size; len += size; c=c->next; } - seq_putc(m, '\n'); + size = sprintf(buffer+len, "\n"); pos += size; len += size; spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); #endif - seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n", + size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", h->nr_allocs, h->nr_frees); - return 0; -} - -static int ida_proc_open(struct inode *inode, struct file *file) -{ - return single_open(file, ida_proc_show, PDE(inode)->data); + pos += size; len += size; + + *eof = 1; + *start = buffer+offset; + len -= offset; + if (len>length) + len = length; + return len; } - -static const struct file_operations ida_proc_fops = { - .owner = THIS_MODULE, - .open = ida_proc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; #endif /* CONFIG_PROC_FS */ module_param_array(eisa, int, NULL, 0); diff --git a/trunk/drivers/char/agp/parisc-agp.c b/trunk/drivers/char/agp/parisc-agp.c index 1c129211302d..60ab75104da9 100644 --- a/trunk/drivers/char/agp/parisc-agp.c +++ b/trunk/drivers/char/agp/parisc-agp.c @@ -217,7 +217,7 @@ static const struct agp_bridge_driver parisc_agp_driver = { .configure = parisc_agp_configure, .fetch_size = parisc_agp_fetch_size, .tlb_flush = parisc_agp_tlbflush, - .mask_memory = parisc_agp_mask_memory, + .mask_memory = parisc_agp_page_mask_memory, .masks = parisc_agp_masks, .agp_enable = parisc_agp_enable, .cache_flush = global_cache_flush, diff --git a/trunk/drivers/char/dtlk.c b/trunk/drivers/char/dtlk.c index 045c930e6320..52e06589821d 100644 --- a/trunk/drivers/char/dtlk.c +++ b/trunk/drivers/char/dtlk.c @@ -56,7 +56,6 @@ #include /* for -EBUSY */ #include /* for request_region */ #include /* for loops_per_jiffy */ -#include #include /* cycle_kernel_lock() */ #include /* for inb_p, outb_p, inb, outb, etc. */ #include /* for get_user, etc. */ diff --git a/trunk/drivers/char/ipmi/ipmi_devintf.c b/trunk/drivers/char/ipmi/ipmi_devintf.c index 65545de3dbf4..41fc11dc921c 100644 --- a/trunk/drivers/char/ipmi/ipmi_devintf.c +++ b/trunk/drivers/char/ipmi/ipmi_devintf.c @@ -36,7 +36,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/char/ipmi/ipmi_msghandler.c b/trunk/drivers/char/ipmi/ipmi_msghandler.c index ec5e3f8df648..09050797c76a 100644 --- a/trunk/drivers/char/ipmi/ipmi_msghandler.c +++ b/trunk/drivers/char/ipmi/ipmi_msghandler.c @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/connector/cn_proc.c b/trunk/drivers/connector/cn_proc.c index 60697909ebdb..abf4a2529f80 100644 --- a/trunk/drivers/connector/cn_proc.c +++ b/trunk/drivers/connector/cn_proc.c @@ -227,8 +227,7 @@ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) * cn_proc_mcast_ctl * @data: message sent from userspace via the connector */ -static void cn_proc_mcast_ctl(struct cn_msg *msg, - struct netlink_skb_parms *nsp) +static void cn_proc_mcast_ctl(struct cn_msg *msg) { enum proc_cn_mcast_op *mc_op = NULL; int err = 0; diff --git a/trunk/drivers/edac/amd64_edac.c b/trunk/drivers/edac/amd64_edac.c index 4f4ac82382f7..4e551e63b6dc 100644 --- a/trunk/drivers/edac/amd64_edac.c +++ b/trunk/drivers/edac/amd64_edac.c @@ -15,8 +15,8 @@ module_param(ecc_enable_override, int, 0644); /* Lookup table for all possible MC control instances */ struct amd64_pvt; -static struct mem_ctl_info *mci_lookup[EDAC_MAX_NUMNODES]; -static struct amd64_pvt *pvt_lookup[EDAC_MAX_NUMNODES]; +static struct mem_ctl_info *mci_lookup[MAX_NUMNODES]; +static struct amd64_pvt *pvt_lookup[MAX_NUMNODES]; /* * See F2x80 for K8 and F2x[1,0]80 for Fam10 and later. The table below is only @@ -189,10 +189,7 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci, u32 *bw) /* Map from a CSROW entry to the mask entry that operates on it */ static inline u32 amd64_map_to_dcs_mask(struct amd64_pvt *pvt, int csrow) { - if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) - return csrow; - else - return csrow >> 1; + return csrow >> (pvt->num_dcsm >> 3); } /* return the 'base' address the i'th CS entry of the 'dct' DRAM controller */ @@ -282,26 +279,29 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, intlv_en = pvt->dram_IntlvEn[0]; if (intlv_en == 0) { - for (node_id = 0; node_id < DRAM_REG_COUNT; node_id++) { + for (node_id = 0; ; ) { if (amd64_base_limit_match(pvt, sys_addr, node_id)) - goto found; + break; + + if (++node_id >= DRAM_REG_COUNT) + goto err_no_match; } - goto err_no_match; + goto found; } - if (unlikely((intlv_en != 0x01) && - (intlv_en != 0x03) && - (intlv_en != 0x07))) { + if (unlikely((intlv_en != (0x01 << 8)) && + (intlv_en != (0x03 << 8)) && + (intlv_en != (0x07 << 8)))) { amd64_printk(KERN_WARNING, "junk value of 0x%x extracted from " "IntlvEn field of DRAM Base Register for node 0: " - "this probably indicates a BIOS bug.\n", intlv_en); + "This probably indicates a BIOS bug.\n", intlv_en); return NULL; } bits = (((u32) sys_addr) >> 12) & intlv_en; for (node_id = 0; ; ) { - if ((pvt->dram_IntlvSel[node_id] & intlv_en) == bits) + if ((pvt->dram_limit[node_id] & intlv_en) == bits) break; /* intlv_sel field matches */ if (++node_id >= DRAM_REG_COUNT) @@ -311,10 +311,10 @@ static struct mem_ctl_info *find_mc_by_sys_addr(struct mem_ctl_info *mci, /* sanity test for sys_addr */ if (unlikely(!amd64_base_limit_match(pvt, sys_addr, node_id))) { amd64_printk(KERN_WARNING, - "%s(): sys_addr 0x%llx falls outside base/limit " - "address range for node %d with node interleaving " - "enabled.\n", - __func__, sys_addr, node_id); + "%s(): sys_addr 0x%lx falls outside base/limit " + "address range for node %d with node interleaving " + "enabled.\n", __func__, (unsigned long)sys_addr, + node_id); return NULL; } @@ -377,7 +377,7 @@ static int input_addr_to_csrow(struct mem_ctl_info *mci, u64 input_addr) * base/mask register pair, test the condition shown near the start of * section 3.5.4 (p. 84, BKDG #26094, K8, revA-E). */ - for (csrow = 0; csrow < pvt->cs_count; csrow++) { + for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { /* This DRAM chip select is disabled on this node */ if ((pvt->dcsb0[csrow] & K8_DCSB_CS_ENABLE) == 0) @@ -734,7 +734,7 @@ static void find_csrow_limits(struct mem_ctl_info *mci, int csrow, u64 base, mask; pvt = mci->pvt_info; - BUG_ON((csrow < 0) || (csrow >= pvt->cs_count)); + BUG_ON((csrow < 0) || (csrow >= CHIPSELECT_COUNT)); base = base_from_dct_base(pvt, csrow); mask = mask_from_dct_mask(pvt, csrow); @@ -962,27 +962,35 @@ static void amd64_read_dbam_reg(struct amd64_pvt *pvt) */ static void amd64_set_dct_base_and_mask(struct amd64_pvt *pvt) { - - if (boot_cpu_data.x86 == 0xf && pvt->ext_model < OPTERON_CPU_REV_F) { - pvt->dcsb_base = REV_E_DCSB_BASE_BITS; - pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; - pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; - pvt->dcs_shift = REV_E_DCS_SHIFT; - pvt->cs_count = 8; - pvt->num_dcsm = 8; - } else { + if (pvt->ext_model >= OPTERON_CPU_REV_F) { pvt->dcsb_base = REV_F_F1Xh_DCSB_BASE_BITS; pvt->dcsm_mask = REV_F_F1Xh_DCSM_MASK_BITS; pvt->dcs_mask_notused = REV_F_F1Xh_DCS_NOTUSED_BITS; pvt->dcs_shift = REV_F_F1Xh_DCS_SHIFT; - if (boot_cpu_data.x86 == 0x11) { - pvt->cs_count = 4; - pvt->num_dcsm = 2; - } else { - pvt->cs_count = 8; - pvt->num_dcsm = 4; + switch (boot_cpu_data.x86) { + case 0xf: + pvt->num_dcsm = REV_F_DCSM_COUNT; + break; + + case 0x10: + pvt->num_dcsm = F10_DCSM_COUNT; + break; + + case 0x11: + pvt->num_dcsm = F11_DCSM_COUNT; + break; + + default: + amd64_printk(KERN_ERR, "Unsupported family!\n"); + break; } + } else { + pvt->dcsb_base = REV_E_DCSB_BASE_BITS; + pvt->dcsm_mask = REV_E_DCSM_MASK_BITS; + pvt->dcs_mask_notused = REV_E_DCS_NOTUSED_BITS; + pvt->dcs_shift = REV_E_DCS_SHIFT; + pvt->num_dcsm = REV_E_DCSM_COUNT; } } @@ -995,7 +1003,7 @@ static void amd64_read_dct_base_mask(struct amd64_pvt *pvt) amd64_set_dct_base_and_mask(pvt); - for (cs = 0; cs < pvt->cs_count; cs++) { + for (cs = 0; cs < CHIPSELECT_COUNT; cs++) { reg = K8_DCSB0 + (cs * 4); err = pci_read_config_dword(pvt->dram_f2_ctl, reg, &pvt->dcsb0[cs]); @@ -1122,7 +1130,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) debugf0("Reading K8_DRAM_BASE_LOW failed\n"); /* Extract parts into separate data entries */ - pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 24; + pvt->dram_base[dram] = ((u64) low & 0xFFFF0000) << 8; pvt->dram_IntlvEn[dram] = (low >> 8) & 0x7; pvt->dram_rw_en[dram] = (low & 0x3); @@ -1135,7 +1143,7 @@ static void k8_read_dram_base_limit(struct amd64_pvt *pvt, int dram) * Extract parts into separate data entries. Limit is the HIGHEST memory * location of the region, so lower 24 bits need to be all ones */ - pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 24) | 0x00FFFFFF; + pvt->dram_limit[dram] = (((u64) low & 0xFFFF0000) << 8) | 0x00FFFFFF; pvt->dram_IntlvSel[dram] = (low >> 8) & 0x7; pvt->dram_DstNode[dram] = (low & 0x7); } @@ -1185,7 +1193,7 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, * different from the node that detected the error. */ src_mci = find_mc_by_sys_addr(mci, SystemAddress); - if (!src_mci) { + if (src_mci) { amd64_mc_printk(mci, KERN_ERR, "failed to map error address 0x%lx to a node\n", (unsigned long)SystemAddress); @@ -1368,8 +1376,8 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) pvt->dram_IntlvEn[dram] = (low_base >> 8) & 0x7; - pvt->dram_base[dram] = (((u64)high_base & 0x000000FF) << 40) | - (((u64)low_base & 0xFFFF0000) << 24); + pvt->dram_base[dram] = (((((u64) high_base & 0x000000FF) << 32) | + ((u64) low_base & 0xFFFF0000))) << 8; low_offset = K8_DRAM_LIMIT_LOW + (dram << 3); high_offset = F10_DRAM_LIMIT_HIGH + (dram << 3); @@ -1390,9 +1398,9 @@ static void f10_read_dram_base_limit(struct amd64_pvt *pvt, int dram) * Extract address values and form a LIMIT address. Limit is the HIGHEST * memory location of the region, so low 24 bits need to be all ones. */ - pvt->dram_limit[dram] = (((u64)high_limit & 0x000000FF) << 40) | - (((u64) low_limit & 0xFFFF0000) << 24) | - 0x00FFFFFF; + low_limit |= 0x0000FFFF; + pvt->dram_limit[dram] = + ((((u64) high_limit << 32) + (u64) low_limit) << 8) | (0xFF); } static void f10_read_dram_ctl_register(struct amd64_pvt *pvt) @@ -1558,7 +1566,7 @@ static int f10_lookup_addr_in_dct(u32 in_addr, u32 nid, u32 cs) debugf1("InputAddr=0x%x channelselect=%d\n", in_addr, cs); - for (csrow = 0; csrow < pvt->cs_count; csrow++) { + for (csrow = 0; csrow < CHIPSELECT_COUNT; csrow++) { cs_base = amd64_get_dct_base(pvt, cs, csrow); if (!(cs_base & K8_DCSB_CS_ENABLE)) @@ -2489,7 +2497,7 @@ static void amd64_read_mc_registers(struct amd64_pvt *pvt) * NOTE: CPU Revision Dependent code * * Input: - * @csrow_nr ChipSelect Row Number (0..pvt->cs_count-1) + * @csrow_nr ChipSelect Row Number (0..CHIPSELECT_COUNT-1) * k8 private pointer to --> * DRAM Bank Address mapping register * node_id @@ -2569,7 +2577,7 @@ static int amd64_init_csrows(struct mem_ctl_info *mci) (pvt->nbcfg & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled" ); - for (i = 0; i < pvt->cs_count; i++) { + for (i = 0; i < CHIPSELECT_COUNT; i++) { csrow = &mci->csrows[i]; if ((pvt->dcsb0[i] & K8_DCSB_CS_ENABLE) == 0) { @@ -2980,7 +2988,7 @@ static int amd64_init_2nd_stage(struct amd64_pvt *pvt) goto err_exit; ret = -ENOMEM; - mci = edac_mc_alloc(0, pvt->cs_count, pvt->channel_count, node_id); + mci = edac_mc_alloc(0, CHIPSELECT_COUNT, pvt->channel_count, node_id); if (!mci) goto err_exit; diff --git a/trunk/drivers/edac/amd64_edac.h b/trunk/drivers/edac/amd64_edac.h index c6f359a85207..8ea07e2715dc 100644 --- a/trunk/drivers/edac/amd64_edac.h +++ b/trunk/drivers/edac/amd64_edac.h @@ -132,8 +132,6 @@ #define EDAC_AMD64_VERSION " Ver: 3.2.0 " __DATE__ #define EDAC_MOD_STR "amd64_edac" -#define EDAC_MAX_NUMNODES 8 - /* Extended Model from CPUID, for CPU Revision numbers */ #define OPTERON_CPU_LE_REV_C 0 #define OPTERON_CPU_REV_D 1 @@ -144,7 +142,7 @@ #define OPTERON_CPU_REV_FA 5 /* Hardware limit on ChipSelect rows per MC and processors per system */ -#define MAX_CS_COUNT 8 +#define CHIPSELECT_COUNT 8 #define DRAM_REG_COUNT 8 @@ -195,6 +193,7 @@ */ #define REV_E_DCSB_BASE_BITS (0xFFE0FE00ULL) #define REV_E_DCS_SHIFT 4 +#define REV_E_DCSM_COUNT 8 #define REV_F_F1Xh_DCSB_BASE_BITS (0x1FF83FE0ULL) #define REV_F_F1Xh_DCS_SHIFT 8 @@ -205,6 +204,9 @@ */ #define REV_F_DCSB_BASE_BITS (0x1FF83FE0ULL) #define REV_F_DCS_SHIFT 8 +#define REV_F_DCSM_COUNT 4 +#define F10_DCSM_COUNT 4 +#define F11_DCSM_COUNT 2 /* DRAM CS Mask Registers */ #define K8_DCSM0 0x60 @@ -372,11 +374,13 @@ enum { #define SET_NB_DRAM_INJECTION_WRITE(word, bits) \ (BIT(((word) & 0xF) + 20) | \ - BIT(17) | bits) + BIT(17) | \ + ((bits) & 0xF)) #define SET_NB_DRAM_INJECTION_READ(word, bits) \ (BIT(((word) & 0xF) + 20) | \ - BIT(16) | bits) + BIT(16) | \ + ((bits) & 0xF)) #define K8_NBCAP 0xE8 #define K8_NBCAP_CORES (BIT(12)|BIT(13)) @@ -441,12 +445,12 @@ struct amd64_pvt { u32 dbam1; /* DRAM Base Address Mapping reg for DCT1 */ /* DRAM CS Base Address Registers F2x[1,0][5C:40] */ - u32 dcsb0[MAX_CS_COUNT]; - u32 dcsb1[MAX_CS_COUNT]; + u32 dcsb0[CHIPSELECT_COUNT]; + u32 dcsb1[CHIPSELECT_COUNT]; /* DRAM CS Mask Registers F2x[1,0][6C:60] */ - u32 dcsm0[MAX_CS_COUNT]; - u32 dcsm1[MAX_CS_COUNT]; + u32 dcsm0[CHIPSELECT_COUNT]; + u32 dcsm1[CHIPSELECT_COUNT]; /* * Decoded parts of DRAM BASE and LIMIT Registers @@ -466,7 +470,6 @@ struct amd64_pvt { */ u32 dcsb_base; /* DCSB base bits */ u32 dcsm_mask; /* DCSM mask bits */ - u32 cs_count; /* num chip selects (== num DCSB registers) */ u32 num_dcsm; /* Number of DCSM registers */ u32 dcs_mask_notused; /* DCSM notused mask bits */ u32 dcs_shift; /* DCSB and DCSM shift value */ diff --git a/trunk/drivers/edac/amd64_edac_inj.c b/trunk/drivers/edac/amd64_edac_inj.c index 29f1f7a612d9..d3675b76b3a7 100644 --- a/trunk/drivers/edac/amd64_edac_inj.c +++ b/trunk/drivers/edac/amd64_edac_inj.c @@ -1,11 +1,5 @@ #include "amd64_edac.h" -static ssize_t amd64_inject_section_show(struct mem_ctl_info *mci, char *buf) -{ - struct amd64_pvt *pvt = mci->pvt_info; - return sprintf(buf, "0x%x\n", pvt->injection.section); -} - /* * store error injection section value which refers to one of 4 16-byte sections * within a 64-byte cacheline @@ -21,26 +15,12 @@ static ssize_t amd64_inject_section_store(struct mem_ctl_info *mci, ret = strict_strtoul(data, 10, &value); if (ret != -EINVAL) { - - if (value > 3) { - amd64_printk(KERN_WARNING, - "%s: invalid section 0x%lx\n", - __func__, value); - return -EINVAL; - } - pvt->injection.section = (u32) value; return count; } return ret; } -static ssize_t amd64_inject_word_show(struct mem_ctl_info *mci, char *buf) -{ - struct amd64_pvt *pvt = mci->pvt_info; - return sprintf(buf, "0x%x\n", pvt->injection.word); -} - /* * store error injection word value which refers to one of 9 16-bit word of the * 16-byte (128-bit + ECC bits) section @@ -57,25 +37,14 @@ static ssize_t amd64_inject_word_store(struct mem_ctl_info *mci, ret = strict_strtoul(data, 10, &value); if (ret != -EINVAL) { - if (value > 8) { - amd64_printk(KERN_WARNING, - "%s: invalid word 0x%lx\n", - __func__, value); - return -EINVAL; - } - + value = (value <= 8) ? value : 0; pvt->injection.word = (u32) value; + return count; } return ret; } -static ssize_t amd64_inject_ecc_vector_show(struct mem_ctl_info *mci, char *buf) -{ - struct amd64_pvt *pvt = mci->pvt_info; - return sprintf(buf, "0x%x\n", pvt->injection.bit_map); -} - /* * store 16 bit error injection vector which enables injecting errors to the * corresponding bit within the error injection word above. When used during a @@ -91,14 +60,8 @@ static ssize_t amd64_inject_ecc_vector_store(struct mem_ctl_info *mci, ret = strict_strtoul(data, 16, &value); if (ret != -EINVAL) { - if (value & 0xFFFF0000) { - amd64_printk(KERN_WARNING, - "%s: invalid EccVector: 0x%lx\n", - __func__, value); - return -EINVAL; - } + pvt->injection.bit_map = (u32) value & 0xFFFF; - pvt->injection.bit_map = (u32) value; return count; } return ret; @@ -184,7 +147,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { .name = "inject_section", .mode = (S_IRUGO | S_IWUSR) }, - .show = amd64_inject_section_show, + .show = NULL, .store = amd64_inject_section_store, }, { @@ -192,7 +155,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { .name = "inject_word", .mode = (S_IRUGO | S_IWUSR) }, - .show = amd64_inject_word_show, + .show = NULL, .store = amd64_inject_word_store, }, { @@ -200,7 +163,7 @@ struct mcidev_sysfs_attribute amd64_inj_attrs[] = { .name = "inject_ecc_vector", .mode = (S_IRUGO | S_IWUSR) }, - .show = amd64_inject_ecc_vector_show, + .show = NULL, .store = amd64_inject_ecc_vector_store, }, { diff --git a/trunk/drivers/firewire/core-cdev.c b/trunk/drivers/firewire/core-cdev.c index 5089331544ed..ced186d7e9a9 100644 --- a/trunk/drivers/firewire/core-cdev.c +++ b/trunk/drivers/firewire/core-cdev.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/firmware/iscsi_ibft.c b/trunk/drivers/firmware/iscsi_ibft.c index 051d1ebbd287..420a96e7f2db 100644 --- a/trunk/drivers/firmware/iscsi_ibft.c +++ b/trunk/drivers/firmware/iscsi_ibft.c @@ -939,7 +939,7 @@ static int __init ibft_init(void) if (ibft_addr) { printk(KERN_INFO "iBFT detected at 0x%llx.\n", - (u64)isa_virt_to_bus(ibft_addr)); + (u64)virt_to_phys((void *)ibft_addr)); rc = ibft_check_device(); if (rc) diff --git a/trunk/drivers/firmware/iscsi_ibft_find.c b/trunk/drivers/firmware/iscsi_ibft_find.c index dfb15c06c88f..d53fbbfefa3e 100644 --- a/trunk/drivers/firmware/iscsi_ibft_find.c +++ b/trunk/drivers/firmware/iscsi_ibft_find.c @@ -65,10 +65,10 @@ void __init reserve_ibft_region(void) * so skip that area */ if (pos == VGA_MEM) pos += VGA_SIZE; - virt = isa_bus_to_virt(pos); + virt = phys_to_virt(pos); if (memcmp(virt, IBFT_SIGN, IBFT_SIGN_LEN) == 0) { unsigned long *addr = - (unsigned long *)isa_bus_to_virt(pos + 4); + (unsigned long *)phys_to_virt(pos + 4); len = *addr; /* if the length of the table extends past 1M, * the table cannot be valid. */ diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index 5cae0b3eee9b..8e7b0ebece0c 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -1556,6 +1556,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, struct drm_crtc *crtc; int ret = 0; + DRM_DEBUG_KMS("\n"); + if (!req->flags) { DRM_ERROR("no operation set\n"); return -EINVAL; diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c index 23dc9c115fd9..819ddcbfcce5 100644 --- a/trunk/drivers/gpu/drm/drm_fb_helper.c +++ b/trunk/drivers/gpu/drm/drm_fb_helper.c @@ -454,96 +454,6 @@ int drm_fb_helper_init_crtc_count(struct drm_fb_helper *helper, int crtc_count, } EXPORT_SYMBOL(drm_fb_helper_init_crtc_count); -static void setcolreg(struct drm_crtc *crtc, u16 red, u16 green, - u16 blue, u16 regno, struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_framebuffer *fb = fb_helper->fb; - int pindex; - - pindex = regno; - - if (fb->bits_per_pixel == 16) { - pindex = regno << 3; - - if (fb->depth == 16 && regno > 63) - return; - if (fb->depth == 15 && regno > 31) - return; - - if (fb->depth == 16) { - u16 r, g, b; - int i; - if (regno < 32) { - for (i = 0; i < 8; i++) - fb_helper->funcs->gamma_set(crtc, red, - green, blue, pindex + i); - } - - fb_helper->funcs->gamma_get(crtc, &r, - &g, &b, - pindex >> 1); - - for (i = 0; i < 4; i++) - fb_helper->funcs->gamma_set(crtc, r, - green, b, - (pindex >> 1) + i); - } - } - - if (fb->depth != 16) - fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex); - - if (regno < 16 && info->fix.visual == FB_VISUAL_DIRECTCOLOR) { - ((u32 *) fb->pseudo_palette)[regno] = - (regno << info->var.red.offset) | - (regno << info->var.green.offset) | - (regno << info->var.blue.offset); - } -} - -int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) -{ - struct drm_fb_helper *fb_helper = info->par; - struct drm_device *dev = fb_helper->dev; - u16 *red, *green, *blue, *transp; - struct drm_crtc *crtc; - int i, rc = 0; - int start; - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - for (i = 0; i < fb_helper->crtc_count; i++) { - if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) - break; - } - if (i == fb_helper->crtc_count) - continue; - - red = cmap->red; - green = cmap->green; - blue = cmap->blue; - transp = cmap->transp; - start = cmap->start; - - for (i = 0; i < cmap->len; i++) { - u16 hred, hgreen, hblue, htransp = 0xffff; - - hred = *red++; - hgreen = *green++; - hblue = *blue++; - - if (transp) - htransp = *transp++; - - setcolreg(crtc, hred, hgreen, hblue, start++, info); - } - crtc_funcs->load_lut(crtc); - } - return rc; -} -EXPORT_SYMBOL(drm_fb_helper_setcmap); - int drm_fb_helper_setcolreg(unsigned regno, unsigned red, unsigned green, @@ -556,11 +466,9 @@ int drm_fb_helper_setcolreg(unsigned regno, struct drm_crtc *crtc; int i; - if (regno > 255) - return 1; - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + struct drm_framebuffer *fb = fb_helper->fb; + for (i = 0; i < fb_helper->crtc_count; i++) { if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) break; @@ -568,9 +476,35 @@ int drm_fb_helper_setcolreg(unsigned regno, if (i == fb_helper->crtc_count) continue; + if (regno > 255) + return 1; + + if (fb->depth == 8) { + fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); + return 0; + } - setcolreg(crtc, red, green, blue, regno, info); - crtc_funcs->load_lut(crtc); + if (regno < 16) { + switch (fb->depth) { + case 15: + fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | + ((green & 0xf800) >> 6) | + ((blue & 0xf800) >> 11); + break; + case 16: + fb->pseudo_palette[regno] = (red & 0xf800) | + ((green & 0xfc00) >> 5) | + ((blue & 0xf800) >> 11); + break; + case 24: + case 32: + fb->pseudo_palette[regno] = + (((red >> 8) & 0xff) << info->var.red.offset) | + (((green >> 8) & 0xff) << info->var.green.offset) | + (((blue >> 8) & 0xff) << info->var.blue.offset); + break; + } + } } return 0; } @@ -740,7 +674,6 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, EXPORT_SYMBOL(drm_fb_helper_pan_display); int drm_fb_helper_single_fb_probe(struct drm_device *dev, - int preferred_bpp, int (*fb_create)(struct drm_device *dev, uint32_t fb_width, uint32_t fb_height, @@ -763,11 +696,6 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev, struct drm_fb_helper *fb_helper; uint32_t surface_depth = 24, surface_bpp = 32; - /* if driver picks 8 or 16 by default use that - for both depth/bpp */ - if (preferred_bpp != surface_bpp) { - surface_depth = surface_bpp = preferred_bpp; - } /* first up get a count of crtcs now in use and new min/maxes width/heights */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private; @@ -923,12 +851,10 @@ void drm_fb_helper_free(struct drm_fb_helper *helper) } EXPORT_SYMBOL(drm_fb_helper_free); -void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, - uint32_t depth) +void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch) { info->fix.type = FB_TYPE_PACKED_PIXELS; - info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR : - FB_VISUAL_DIRECTCOLOR; + info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.type_aux = 0; info->fix.xpanstep = 1; /* doing it in hw */ info->fix.ypanstep = 1; /* doing it in hw */ diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index ffa39671751f..93ff6c03733e 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -3244,16 +3244,6 @@ void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, intel_crtc->lut_b[regno] = blue >> 8; } -void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, int regno) -{ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - - *red = intel_crtc->lut_r[regno] << 8; - *green = intel_crtc->lut_g[regno] << 8; - *blue = intel_crtc->lut_b[regno] << 8; -} - static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size) { @@ -3845,7 +3835,6 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = { .mode_set_base = intel_pipe_set_base, .prepare = intel_crtc_prepare, .commit = intel_crtc_commit, - .load_lut = intel_crtc_load_lut, }; static const struct drm_crtc_funcs intel_crtc_funcs = { diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index ef61fe9507e2..8aa4b7f30daa 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -175,8 +175,6 @@ extern int intelfb_resize(struct drm_device *dev, struct drm_crtc *crtc); extern void intelfb_restore(void); extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); -extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, int regno); extern int intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, diff --git a/trunk/drivers/gpu/drm/i915/intel_fb.c b/trunk/drivers/gpu/drm/i915/intel_fb.c index 2b0fe54cd92c..e85d7e9eed7d 100644 --- a/trunk/drivers/gpu/drm/i915/intel_fb.c +++ b/trunk/drivers/gpu/drm/i915/intel_fb.c @@ -60,12 +60,10 @@ static struct fb_ops intelfb_ops = { .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, - .fb_setcmap = drm_fb_helper_setcmap, }; static struct drm_fb_helper_funcs intel_fb_helper_funcs = { .gamma_set = intel_crtc_fb_gamma_set, - .gamma_get = intel_crtc_fb_gamma_get, }; @@ -125,10 +123,6 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, struct device *device = &dev->pdev->dev; int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1; - /* we don't do packed 24bpp */ - if (surface_bpp == 24) - surface_bpp = 32; - mode_cmd.width = surface_width; mode_cmd.height = surface_height; @@ -212,7 +206,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width, // memset(info->screen_base, 0, size); - drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); + drm_fb_helper_fill_fix(info, fb->pitch); drm_fb_helper_fill_var(info, fb, fb_width, fb_height); /* FIXME: we really shouldn't expose mmio space at all */ @@ -250,7 +244,7 @@ int intelfb_probe(struct drm_device *dev) int ret; DRM_DEBUG("\n"); - ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create); + ret = drm_fb_helper_single_fb_probe(dev, intelfb_create); return ret; } EXPORT_SYMBOL(intelfb_probe); diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index 14fa9701aeb3..6a015929deee 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -733,7 +733,6 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = { .mode_set_base = atombios_crtc_set_base, .prepare = atombios_crtc_prepare, .commit = atombios_crtc_commit, - .load_lut = radeon_crtc_load_lut, }; void radeon_atombios_init_crtc(struct drm_device *dev, diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c index 161094c07d94..e6cce24de802 100644 --- a/trunk/drivers/gpu/drm/radeon/r100.c +++ b/trunk/drivers/gpu/drm/radeon/r100.c @@ -32,9 +32,6 @@ #include "radeon_reg.h" #include "radeon.h" #include "r100d.h" -#include "rs100d.h" -#include "rv200d.h" -#include "rv250d.h" #include #include @@ -63,7 +60,18 @@ MODULE_FIRMWARE(FIRMWARE_R520); /* This files gather functions specifics to: * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 + * + * Some of these functions might be used by newer ASICs. */ +int r200_init(struct radeon_device *rdev); +void r100_hdp_reset(struct radeon_device *rdev); +void r100_gpu_init(struct radeon_device *rdev); +int r100_gui_wait_for_idle(struct radeon_device *rdev); +int r100_mc_wait_for_idle(struct radeon_device *rdev); +void r100_gpu_wait_for_vsync(struct radeon_device *rdev); +void r100_gpu_wait_for_vsync2(struct radeon_device *rdev); +int r100_debugfs_mc_info_init(struct radeon_device *rdev); + /* * PCI GART @@ -144,6 +152,136 @@ void r100_pci_gart_fini(struct radeon_device *rdev) radeon_gart_fini(rdev); } + +/* + * MC + */ +void r100_mc_disable_clients(struct radeon_device *rdev) +{ + uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl; + + /* FIXME: is this function correct for rs100,rs200,rs300 ? */ + if (r100_gui_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait GUI idle while " + "programming pipes. Bad things might happen.\n"); + } + + /* stop display and memory access */ + ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL); + WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE); + crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); + WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS); + crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL); + + r100_gpu_wait_for_vsync(rdev); + + WREG32(RADEON_CRTC_GEN_CNTL, + (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) | + RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN); + + if (!(rdev->flags & RADEON_SINGLE_CRTC)) { + crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); + + r100_gpu_wait_for_vsync2(rdev); + WREG32(RADEON_CRTC2_GEN_CNTL, + (crtc2_gen_cntl & + ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) | + RADEON_CRTC2_DISP_REQ_EN_B); + } + + udelay(500); +} + +void r100_mc_setup(struct radeon_device *rdev) +{ + uint32_t tmp; + int r; + + r = r100_debugfs_mc_info_init(rdev); + if (r) { + DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); + } + /* Write VRAM size in case we are limiting it */ + WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); + /* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, + * if the aperture is 64MB but we have 32MB VRAM + * we report only 32MB VRAM but we have to set MC_FB_LOCATION + * to 64MB, otherwise the gpu accidentially dies */ + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); + tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); + WREG32(RADEON_MC_FB_LOCATION, tmp); + + /* Enable bus mastering */ + tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; + WREG32(RADEON_BUS_CNTL, tmp); + + if (rdev->flags & RADEON_IS_AGP) { + tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; + tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16); + tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16); + WREG32(RADEON_MC_AGP_LOCATION, tmp); + WREG32(RADEON_AGP_BASE, rdev->mc.agp_base); + } else { + WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF); + WREG32(RADEON_AGP_BASE, 0); + } + + tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; + tmp |= (7 << 28); + WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); + (void)RREG32(RADEON_HOST_PATH_CNTL); + WREG32(RADEON_HOST_PATH_CNTL, tmp); + (void)RREG32(RADEON_HOST_PATH_CNTL); +} + +int r100_mc_init(struct radeon_device *rdev) +{ + int r; + + if (r100_debugfs_rbbm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for RBBM !\n"); + } + + r100_gpu_init(rdev); + /* Disable gart which also disable out of gart access */ + r100_pci_gart_disable(rdev); + + /* Setup GPU memory space */ + rdev->mc.gtt_location = 0xFFFFFFFFUL; + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + printk(KERN_WARNING "[drm] Disabling AGP\n"); + rdev->flags &= ~RADEON_IS_AGP; + rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; + } else { + rdev->mc.gtt_location = rdev->mc.agp_base; + } + } + r = radeon_mc_setup(rdev); + if (r) { + return r; + } + + r100_mc_disable_clients(rdev); + if (r100_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); + } + + r100_mc_setup(rdev); + return 0; +} + +void r100_mc_fini(struct radeon_device *rdev) +{ +} + + +/* + * Interrupts + */ int r100_irq_set(struct radeon_device *rdev) { uint32_t tmp = 0; @@ -220,6 +358,10 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) return RREG32(RADEON_CRTC2_CRNT_FRAME); } + +/* + * Fence emission + */ void r100_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -235,6 +377,10 @@ void r100_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, RADEON_SW_INT_FIRE); } + +/* + * Writeback + */ int r100_wb_init(struct radeon_device *rdev) { int r; @@ -358,6 +504,10 @@ int r100_copy_blit(struct radeon_device *rdev, return r; } + +/* + * CP + */ static int r100_cp_wait_for_idle(struct radeon_device *rdev) { unsigned i; @@ -462,7 +612,6 @@ static int r100_cp_init_microcode(struct radeon_device *rdev) } return err; } - static void r100_cp_load_microcode(struct radeon_device *rdev) { const __be32 *fw_data; @@ -829,7 +978,7 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 5); - reg = CP_PACKET0_GET_REG(header); + reg = header >> 2; mutex_lock(&p->rdev->ddev->mode_config.mutex); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { @@ -1841,7 +1990,7 @@ void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) r100_pll_errata_after_data(rdev); } -void r100_set_safe_registers(struct radeon_device *rdev) +int r100_init(struct radeon_device *rdev) { if (ASIC_IS_RN50(rdev)) { rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; @@ -1850,8 +1999,9 @@ void r100_set_safe_registers(struct radeon_device *rdev) rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); } else { - r200_set_safe_registers(rdev); + return r200_init(rdev); } + return 0; } /* @@ -2149,11 +2299,9 @@ void r100_bandwidth_update(struct radeon_device *rdev) mode1 = &rdev->mode_info.crtcs[0]->base.mode; pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; } - if (!(rdev->flags & RADEON_SINGLE_CRTC)) { - if (rdev->mode_info.crtcs[1]->base.enabled) { - mode2 = &rdev->mode_info.crtcs[1]->base.mode; - pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; - } + if (rdev->mode_info.crtcs[1]->base.enabled) { + mode2 = &rdev->mode_info.crtcs[1]->base.mode; + pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; } min_mem_eff.full = rfixed_const_8(0); @@ -2966,7 +3114,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) WREG32(R_000740_CP_CSQ_CNTL, 0); /* Save few CRTC registers */ - save->GENMO_WT = RREG8(R_0003C2_GENMO_WT); + save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); @@ -2976,7 +3124,7 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) } /* Disable VGA aperture access */ - WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & save->GENMO_WT); + WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); /* Disable cursor, overlay, crtc */ WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | @@ -3008,264 +3156,10 @@ void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) rdev->mc.vram_location); } /* Restore CRTC registers */ - WREG8(R_0003C2_GENMO_WT, save->GENMO_WT); + WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); if (!(rdev->flags & RADEON_SINGLE_CRTC)) { WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); } } - -void r100_vga_render_disable(struct radeon_device *rdev) -{ - u32 tmp; - - tmp = RREG8(R_0003C2_GENMO_WT); - WREG8(R_0003C2_GENMO_WT, C_0003C2_VGA_RAM_EN & tmp); -} - -static void r100_debugfs(struct radeon_device *rdev) -{ - int r; - - r = r100_debugfs_mc_info_init(rdev); - if (r) - dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n"); -} - -static void r100_mc_program(struct radeon_device *rdev) -{ - struct r100_mc_save save; - - /* Stops all mc clients */ - r100_mc_stop(rdev, &save); - if (rdev->flags & RADEON_IS_AGP) { - WREG32(R_00014C_MC_AGP_LOCATION, - S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | - S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); - WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); - if (rdev->family > CHIP_RV200) - WREG32(R_00015C_AGP_BASE_2, - upper_32_bits(rdev->mc.agp_base) & 0xff); - } else { - WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); - WREG32(R_000170_AGP_BASE, 0); - if (rdev->family > CHIP_RV200) - WREG32(R_00015C_AGP_BASE_2, 0); - } - /* Wait for mc idle */ - if (r100_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "Wait for MC idle timeout.\n"); - /* Program MC, should be a 32bits limited address space */ - WREG32(R_000148_MC_FB_LOCATION, - S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | - S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); - r100_mc_resume(rdev, &save); -} - -void r100_clock_startup(struct radeon_device *rdev) -{ - u32 tmp; - - if (radeon_dynclks != -1 && radeon_dynclks) - radeon_legacy_set_clock_gating(rdev, 1); - /* We need to force on some of the block */ - tmp = RREG32_PLL(R_00000D_SCLK_CNTL); - tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); - if ((rdev->family == CHIP_RV250) || (rdev->family == CHIP_RV280)) - tmp |= S_00000D_FORCE_DISP1(1) | S_00000D_FORCE_DISP2(1); - WREG32_PLL(R_00000D_SCLK_CNTL, tmp); -} - -static int r100_startup(struct radeon_device *rdev) -{ - int r; - - r100_mc_program(rdev); - /* Resume clock */ - r100_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ - r100_gpu_init(rdev); - /* Initialize GART (initialize after TTM so we can allocate - * memory through TTM but finalize after TTM) */ - if (rdev->flags & RADEON_IS_PCI) { - r = r100_pci_gart_enable(rdev); - if (r) - return r; - } - /* Enable IRQ */ - rdev->irq.sw_int = true; - r100_irq_set(rdev); - /* 1M ring buffer */ - r = r100_cp_init(rdev, 1024 * 1024); - if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); - return r; - } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); - r = r100_ib_init(rdev); - if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); - return r; - } - return 0; -} - -int r100_resume(struct radeon_device *rdev) -{ - /* Make sur GART are not working */ - if (rdev->flags & RADEON_IS_PCI) - r100_pci_gart_disable(rdev); - /* Resume clock before doing reset */ - r100_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* post */ - radeon_combios_asic_init(rdev->ddev); - /* Resume clock after posting */ - r100_clock_startup(rdev); - return r100_startup(rdev); -} - -int r100_suspend(struct radeon_device *rdev) -{ - r100_cp_disable(rdev); - r100_wb_disable(rdev); - r100_irq_disable(rdev); - if (rdev->flags & RADEON_IS_PCI) - r100_pci_gart_disable(rdev); - return 0; -} - -void r100_fini(struct radeon_device *rdev) -{ - r100_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - radeon_gem_fini(rdev); - if (rdev->flags & RADEON_IS_PCI) - r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); - radeon_atombios_fini(rdev); - kfree(rdev->bios); - rdev->bios = NULL; -} - -int r100_mc_init(struct radeon_device *rdev) -{ - int r; - u32 tmp; - - /* Setup GPU memory space */ - rdev->mc.vram_location = 0xFFFFFFFFUL; - rdev->mc.gtt_location = 0xFFFFFFFFUL; - if (rdev->flags & RADEON_IS_IGP) { - tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); - rdev->mc.vram_location = tmp << 16; - } - if (rdev->flags & RADEON_IS_AGP) { - r = radeon_agp_init(rdev); - if (r) { - printk(KERN_WARNING "[drm] Disabling AGP\n"); - rdev->flags &= ~RADEON_IS_AGP; - rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; - } else { - rdev->mc.gtt_location = rdev->mc.agp_base; - } - } - r = radeon_mc_setup(rdev); - if (r) - return r; - return 0; -} - -int r100_init(struct radeon_device *rdev) -{ - int r; - - /* Register debugfs file specific to this group of asics */ - r100_debugfs(rdev); - /* Disable VGA */ - r100_vga_render_disable(rdev); - /* Initialize scratch registers */ - radeon_scratch_init(rdev); - /* Initialize surface registers */ - radeon_surface_init(rdev); - /* TODO: disable VGA need to use VGA request */ - /* BIOS*/ - if (!radeon_get_bios(rdev)) { - if (ASIC_IS_AVIVO(rdev)) - return -EINVAL; - } - if (rdev->is_atom_bios) { - dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); - return -EINVAL; - } else { - r = radeon_combios_init(rdev); - if (r) - return r; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - radeon_combios_asic_init(rdev->ddev); - } - /* Set asic errata */ - r100_errata(rdev); - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); - /* Get vram informations */ - r100_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r100_mc_init(rdev); - if (r) - return r; - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) - return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - /* Memory manager */ - r = radeon_object_init(rdev); - if (r) - return r; - if (rdev->flags & RADEON_IS_PCI) { - r = r100_pci_gart_init(rdev); - if (r) - return r; - } - r100_set_safe_registers(rdev); - rdev->accel_working = true; - r = r100_startup(rdev); - if (r) { - /* Somethings want wront with the accel init stop accel */ - dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r100_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - if (rdev->flags & RADEON_IS_PCI) - r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - rdev->accel_working = false; - } - return 0; -} diff --git a/trunk/drivers/gpu/drm/radeon/r100d.h b/trunk/drivers/gpu/drm/radeon/r100d.h index df29a630c466..c4b257ec920e 100644 --- a/trunk/drivers/gpu/drm/radeon/r100d.h +++ b/trunk/drivers/gpu/drm/radeon/r100d.h @@ -381,24 +381,6 @@ #define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) #define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) #define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF -#define R_000148_MC_FB_LOCATION 0x000148 -#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) -#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) -#define C_000148_MC_FB_START 0xFFFF0000 -#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) -#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) -#define C_000148_MC_FB_TOP 0x0000FFFF -#define R_00014C_MC_AGP_LOCATION 0x00014C -#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0) -#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) -#define C_00014C_MC_AGP_START 0xFFFF0000 -#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) -#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) -#define C_00014C_MC_AGP_TOP 0x0000FFFF -#define R_000170_AGP_BASE 0x000170 -#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) -#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) -#define C_000170_AGP_BASE_ADDR 0x00000000 #define R_00023C_DISPLAY_BASE_ADDR 0x00023C #define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) #define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) @@ -421,25 +403,25 @@ #define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) #define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) #define C_000360_CUR2_LOCK 0x7FFFFFFF -#define R_0003C2_GENMO_WT 0x0003C0 -#define S_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) -#define G_0003C2_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) -#define C_0003C2_GENMO_MONO_ADDRESS_B 0xFE -#define S_0003C2_VGA_RAM_EN(x) (((x) & 0x1) << 1) -#define G_0003C2_VGA_RAM_EN(x) (((x) >> 1) & 0x1) -#define C_0003C2_VGA_RAM_EN 0xFD -#define S_0003C2_VGA_CKSEL(x) (((x) & 0x3) << 2) -#define G_0003C2_VGA_CKSEL(x) (((x) >> 2) & 0x3) -#define C_0003C2_VGA_CKSEL 0xF3 -#define S_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) -#define G_0003C2_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) -#define C_0003C2_ODD_EVEN_MD_PGSEL 0xDF -#define S_0003C2_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) -#define G_0003C2_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) -#define C_0003C2_VGA_HSYNC_POL 0xBF -#define S_0003C2_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) -#define G_0003C2_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) -#define C_0003C2_VGA_VSYNC_POL 0x7F +#define R_0003C0_GENMO_WT 0x0003C0 +#define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) +#define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) +#define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE +#define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1) +#define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1) +#define C_0003C0_VGA_RAM_EN 0xFFFFFFFD +#define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2) +#define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3) +#define C_0003C0_VGA_CKSEL 0xFFFFFFF3 +#define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) +#define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) +#define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF +#define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) +#define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) +#define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF +#define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) +#define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) +#define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F #define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 #define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) #define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) @@ -563,46 +545,6 @@ #define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) #define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) #define C_000774_SCRATCH_ADDR 0x0000001F -#define R_0007C0_CP_STAT 0x0007C0 -#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) -#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) -#define C_0007C0_MRU_BUSY 0xFFFFFFFE -#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) -#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) -#define C_0007C0_MWU_BUSY 0xFFFFFFFD -#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) -#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) -#define C_0007C0_RSIU_BUSY 0xFFFFFFFB -#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) -#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) -#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 -#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) -#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) -#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF -#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) -#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) -#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF -#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) -#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) -#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF -#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) -#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) -#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF -#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) -#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) -#define C_0007C0_CSI_BUSY 0xFFFFDFFF -#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) -#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) -#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF -#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) -#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) -#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF -#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) -#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) -#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF -#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) -#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) -#define C_0007C0_CP_BUSY 0x7FFFFFFF #define R_000E40_RBBM_STATUS 0x000E40 #define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) #define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) @@ -662,53 +604,4 @@ #define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) #define C_000E40_GUI_ACTIVE 0x7FFFFFFF - -#define R_00000D_SCLK_CNTL 0x00000D -#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) -#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) -#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 -#define S_00000D_TCLK_SRC_SEL(x) (((x) & 0x7) << 8) -#define G_00000D_TCLK_SRC_SEL(x) (((x) >> 8) & 0x7) -#define C_00000D_TCLK_SRC_SEL 0xFFFFF8FF -#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) -#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) -#define C_00000D_FORCE_CP 0xFFFEFFFF -#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) -#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) -#define C_00000D_FORCE_HDP 0xFFFDFFFF -#define S_00000D_FORCE_DISP(x) (((x) & 0x1) << 18) -#define G_00000D_FORCE_DISP(x) (((x) >> 18) & 0x1) -#define C_00000D_FORCE_DISP 0xFFFBFFFF -#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) -#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) -#define C_00000D_FORCE_TOP 0xFFF7FFFF -#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) -#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) -#define C_00000D_FORCE_E2 0xFFEFFFFF -#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) -#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) -#define C_00000D_FORCE_SE 0xFFDFFFFF -#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) -#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) -#define C_00000D_FORCE_IDCT 0xFFBFFFFF -#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) -#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) -#define C_00000D_FORCE_VIP 0xFF7FFFFF -#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) -#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) -#define C_00000D_FORCE_RE 0xFEFFFFFF -#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) -#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) -#define C_00000D_FORCE_PB 0xFDFFFFFF -#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) -#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) -#define C_00000D_FORCE_TAM 0xFBFFFFFF -#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) -#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) -#define C_00000D_FORCE_TDM 0xF7FFFFFF -#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) -#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) -#define C_00000D_FORCE_RB 0xEFFFFFFF - - #endif diff --git a/trunk/drivers/gpu/drm/radeon/r200.c b/trunk/drivers/gpu/drm/radeon/r200.c index eb740fc3549f..cf7fea5ff2e5 100644 --- a/trunk/drivers/gpu/drm/radeon/r200.c +++ b/trunk/drivers/gpu/drm/radeon/r200.c @@ -447,8 +447,9 @@ int r200_packet0_check(struct radeon_cs_parser *p, return 0; } -void r200_set_safe_registers(struct radeon_device *rdev) +int r200_init(struct radeon_device *rdev) { rdev->config.r100.reg_safe_bm = r200_reg_safe_bm; rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r200_reg_safe_bm); + return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/r300.c b/trunk/drivers/gpu/drm/radeon/r300.c index e08c4a8974ca..1ebea8cc8c93 100644 --- a/trunk/drivers/gpu/drm/radeon/r300.c +++ b/trunk/drivers/gpu/drm/radeon/r300.c @@ -33,16 +33,43 @@ #include "radeon_drm.h" #include "r100_track.h" #include "r300d.h" -#include "rv350d.h" + #include "r300_reg_safe.h" -/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */ +/* r300,r350,rv350,rv370,rv380 depends on : */ +void r100_hdp_reset(struct radeon_device *rdev); +int r100_cp_reset(struct radeon_device *rdev); +int r100_rb2d_reset(struct radeon_device *rdev); +int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); +int r100_pci_gart_enable(struct radeon_device *rdev); +void r100_mc_setup(struct radeon_device *rdev); +void r100_mc_disable_clients(struct radeon_device *rdev); +int r100_gui_wait_for_idle(struct radeon_device *rdev); +int r100_cs_packet_parse(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + unsigned idx); +int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); +int r100_cs_parse_packet0(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + const unsigned *auth, unsigned n, + radeon_packet0_check_t check); +int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, + struct radeon_cs_packet *pkt, + struct radeon_object *robj); + +/* This files gather functions specifics to: + * r300,r350,rv350,rv370,rv380 + * + * Some of these functions might be used by newer ASICs. + */ +void r300_gpu_init(struct radeon_device *rdev); +int r300_mc_wait_for_idle(struct radeon_device *rdev); +int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); + /* * rv370,rv380 PCIE GART */ -static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); - void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) { uint32_t tmp; @@ -155,6 +182,59 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev) radeon_gart_fini(rdev); } +/* + * MC + */ +int r300_mc_init(struct radeon_device *rdev) +{ + int r; + + if (r100_debugfs_rbbm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for RBBM !\n"); + } + + r300_gpu_init(rdev); + r100_pci_gart_disable(rdev); + if (rdev->flags & RADEON_IS_PCIE) { + rv370_pcie_gart_disable(rdev); + } + + /* Setup GPU memory space */ + rdev->mc.vram_location = 0xFFFFFFFFUL; + rdev->mc.gtt_location = 0xFFFFFFFFUL; + if (rdev->flags & RADEON_IS_AGP) { + r = radeon_agp_init(rdev); + if (r) { + printk(KERN_WARNING "[drm] Disabling AGP\n"); + rdev->flags &= ~RADEON_IS_AGP; + rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; + } else { + rdev->mc.gtt_location = rdev->mc.agp_base; + } + } + r = radeon_mc_setup(rdev); + if (r) { + return r; + } + + /* Program GPU memory space */ + r100_mc_disable_clients(rdev); + if (r300_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); + } + r100_mc_setup(rdev); + return 0; +} + +void r300_mc_fini(struct radeon_device *rdev) +{ +} + + +/* + * Fence emission + */ void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -180,6 +260,10 @@ void r300_fence_ring_emit(struct radeon_device *rdev, radeon_ring_write(rdev, RADEON_SW_INT_FIRE); } + +/* + * Global GPU functions + */ int r300_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, @@ -498,6 +582,11 @@ void r300_vram_info(struct radeon_device *rdev) r100_vram_init_sizes(rdev); } + +/* + * PCIE Lanes + */ + void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) { uint32_t link_width_cntl, mask; @@ -557,6 +646,10 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) } + +/* + * Debugfs info + */ #if defined(CONFIG_DEBUG_FS) static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data) { @@ -587,7 +680,7 @@ static struct drm_info_list rv370_pcie_gart_info_list[] = { }; #endif -static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) +int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, rv370_pcie_gart_info_list, 1); @@ -596,6 +689,10 @@ static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) #endif } + +/* + * CS functions + */ static int r300_packet0_check(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, unsigned idx, unsigned reg) @@ -1129,6 +1226,12 @@ void r300_set_reg_safe(struct radeon_device *rdev) rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); } +int r300_init(struct radeon_device *rdev) +{ + r300_set_reg_safe(rdev); + return 0; +} + void r300_mc_program(struct radeon_device *rdev) { struct r100_mc_save save; @@ -1162,198 +1265,3 @@ void r300_mc_program(struct radeon_device *rdev) S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); r100_mc_resume(rdev, &save); } - -void r300_clock_startup(struct radeon_device *rdev) -{ - u32 tmp; - - if (radeon_dynclks != -1 && radeon_dynclks) - radeon_legacy_set_clock_gating(rdev, 1); - /* We need to force on some of the block */ - tmp = RREG32_PLL(R_00000D_SCLK_CNTL); - tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); - if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) - tmp |= S_00000D_FORCE_VAP(1); - WREG32_PLL(R_00000D_SCLK_CNTL, tmp); -} - -static int r300_startup(struct radeon_device *rdev) -{ - int r; - - r300_mc_program(rdev); - /* Resume clock */ - r300_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ - r300_gpu_init(rdev); - /* Initialize GART (initialize after TTM so we can allocate - * memory through TTM but finalize after TTM) */ - if (rdev->flags & RADEON_IS_PCIE) { - r = rv370_pcie_gart_enable(rdev); - if (r) - return r; - } - if (rdev->flags & RADEON_IS_PCI) { - r = r100_pci_gart_enable(rdev); - if (r) - return r; - } - /* Enable IRQ */ - rdev->irq.sw_int = true; - r100_irq_set(rdev); - /* 1M ring buffer */ - r = r100_cp_init(rdev, 1024 * 1024); - if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); - return r; - } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); - r = r100_ib_init(rdev); - if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); - return r; - } - return 0; -} - -int r300_resume(struct radeon_device *rdev) -{ - /* Make sur GART are not working */ - if (rdev->flags & RADEON_IS_PCIE) - rv370_pcie_gart_disable(rdev); - if (rdev->flags & RADEON_IS_PCI) - r100_pci_gart_disable(rdev); - /* Resume clock before doing reset */ - r300_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* post */ - radeon_combios_asic_init(rdev->ddev); - /* Resume clock after posting */ - r300_clock_startup(rdev); - return r300_startup(rdev); -} - -int r300_suspend(struct radeon_device *rdev) -{ - r100_cp_disable(rdev); - r100_wb_disable(rdev); - r100_irq_disable(rdev); - if (rdev->flags & RADEON_IS_PCIE) - rv370_pcie_gart_disable(rdev); - if (rdev->flags & RADEON_IS_PCI) - r100_pci_gart_disable(rdev); - return 0; -} - -void r300_fini(struct radeon_device *rdev) -{ - r300_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - radeon_gem_fini(rdev); - if (rdev->flags & RADEON_IS_PCIE) - rv370_pcie_gart_fini(rdev); - if (rdev->flags & RADEON_IS_PCI) - r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); - radeon_atombios_fini(rdev); - kfree(rdev->bios); - rdev->bios = NULL; -} - -int r300_init(struct radeon_device *rdev) -{ - int r; - - /* Disable VGA */ - r100_vga_render_disable(rdev); - /* Initialize scratch registers */ - radeon_scratch_init(rdev); - /* Initialize surface registers */ - radeon_surface_init(rdev); - /* TODO: disable VGA need to use VGA request */ - /* BIOS*/ - if (!radeon_get_bios(rdev)) { - if (ASIC_IS_AVIVO(rdev)) - return -EINVAL; - } - if (rdev->is_atom_bios) { - dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); - return -EINVAL; - } else { - r = radeon_combios_init(rdev); - if (r) - return r; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - radeon_combios_asic_init(rdev->ddev); - } - /* Set asic errata */ - r300_errata(rdev); - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); - /* Get vram informations */ - r300_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); - if (r) - return r; - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) - return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - /* Memory manager */ - r = radeon_object_init(rdev); - if (r) - return r; - if (rdev->flags & RADEON_IS_PCIE) { - r = rv370_pcie_gart_init(rdev); - if (r) - return r; - } - if (rdev->flags & RADEON_IS_PCI) { - r = r100_pci_gart_init(rdev); - if (r) - return r; - } - r300_set_reg_safe(rdev); - rdev->accel_working = true; - r = r300_startup(rdev); - if (r) { - /* Somethings want wront with the accel init stop accel */ - dev_err(rdev->dev, "Disabling GPU acceleration\n"); - r300_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - if (rdev->flags & RADEON_IS_PCIE) - rv370_pcie_gart_fini(rdev); - if (rdev->flags & RADEON_IS_PCI) - r100_pci_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - rdev->accel_working = false; - } - return 0; -} diff --git a/trunk/drivers/gpu/drm/radeon/r300d.h b/trunk/drivers/gpu/drm/radeon/r300d.h index 4c73114f0de9..d4fa3eb1074f 100644 --- a/trunk/drivers/gpu/drm/radeon/r300d.h +++ b/trunk/drivers/gpu/drm/radeon/r300d.h @@ -96,211 +96,6 @@ #define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) #define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) #define C_000170_AGP_BASE_ADDR 0x00000000 -#define R_0007C0_CP_STAT 0x0007C0 -#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) -#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) -#define C_0007C0_MRU_BUSY 0xFFFFFFFE -#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) -#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) -#define C_0007C0_MWU_BUSY 0xFFFFFFFD -#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) -#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) -#define C_0007C0_RSIU_BUSY 0xFFFFFFFB -#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) -#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) -#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 -#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) -#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) -#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF -#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) -#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) -#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF -#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) -#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) -#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF -#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) -#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) -#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF -#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) -#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) -#define C_0007C0_CSI_BUSY 0xFFFFDFFF -#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) -#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) -#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF -#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) -#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) -#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF -#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) -#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) -#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF -#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) -#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) -#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF -#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) -#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) -#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF -#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) -#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) -#define C_0007C0_CP_BUSY 0x7FFFFFFF -#define R_000E40_RBBM_STATUS 0x000E40 -#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) -#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) -#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 -#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) -#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) -#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF -#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) -#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) -#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF -#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) -#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) -#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF -#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) -#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) -#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF -#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) -#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) -#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF -#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) -#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) -#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF -#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) -#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) -#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF -#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) -#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) -#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF -#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) -#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) -#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF -#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) -#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) -#define C_000E40_E2_BUSY 0xFFFDFFFF -#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) -#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) -#define C_000E40_RB2D_BUSY 0xFFFBFFFF -#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) -#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) -#define C_000E40_RB3D_BUSY 0xFFF7FFFF -#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) -#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) -#define C_000E40_VAP_BUSY 0xFFEFFFFF -#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) -#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) -#define C_000E40_RE_BUSY 0xFFDFFFFF -#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) -#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) -#define C_000E40_TAM_BUSY 0xFFBFFFFF -#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) -#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) -#define C_000E40_TDM_BUSY 0xFF7FFFFF -#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) -#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) -#define C_000E40_PB_BUSY 0xFEFFFFFF -#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) -#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) -#define C_000E40_TIM_BUSY 0xFDFFFFFF -#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) -#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) -#define C_000E40_GA_BUSY 0xFBFFFFFF -#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) -#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) -#define C_000E40_CBA2D_BUSY 0xF7FFFFFF -#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) -#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) -#define C_000E40_GUI_ACTIVE 0x7FFFFFFF -#define R_00000D_SCLK_CNTL 0x00000D -#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) -#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) -#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 -#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) -#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) -#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 -#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) -#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) -#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF -#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) -#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) -#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF -#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) -#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) -#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF -#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) -#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) -#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F -#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) -#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) -#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF -#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) -#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) -#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF -#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) -#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) -#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF -#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) -#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) -#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF -#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) -#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) -#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF -#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) -#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) -#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF -#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) -#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) -#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF -#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) -#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) -#define C_00000D_FORCE_DISP2 0xFFFF7FFF -#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) -#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) -#define C_00000D_FORCE_CP 0xFFFEFFFF -#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) -#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) -#define C_00000D_FORCE_HDP 0xFFFDFFFF -#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) -#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) -#define C_00000D_FORCE_DISP1 0xFFFBFFFF -#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) -#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) -#define C_00000D_FORCE_TOP 0xFFF7FFFF -#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) -#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) -#define C_00000D_FORCE_E2 0xFFEFFFFF -#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) -#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) -#define C_00000D_FORCE_SE 0xFFDFFFFF -#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) -#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) -#define C_00000D_FORCE_IDCT 0xFFBFFFFF -#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) -#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) -#define C_00000D_FORCE_VIP 0xFF7FFFFF -#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) -#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) -#define C_00000D_FORCE_RE 0xFEFFFFFF -#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) -#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) -#define C_00000D_FORCE_PB 0xFDFFFFFF -#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) -#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) -#define C_00000D_FORCE_TAM 0xFBFFFFFF -#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) -#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) -#define C_00000D_FORCE_TDM 0xF7FFFFFF -#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) -#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) -#define C_00000D_FORCE_RB 0xEFFFFFFF -#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) -#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) -#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF -#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) -#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) -#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF -#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) -#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) -#define C_00000D_FORCE_OV0 0x7FFFFFFF - #endif diff --git a/trunk/drivers/gpu/drm/radeon/r420.c b/trunk/drivers/gpu/drm/radeon/r420.c index 5c7fe52de30e..49a2fdc57d27 100644 --- a/trunk/drivers/gpu/drm/radeon/r420.c +++ b/trunk/drivers/gpu/drm/radeon/r420.c @@ -155,9 +155,6 @@ static void r420_debugfs(struct radeon_device *rdev) static void r420_clock_resume(struct radeon_device *rdev) { u32 sclk_cntl; - - if (radeon_dynclks != -1 && radeon_dynclks) - radeon_atom_set_clock_gating(rdev, 1); sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); if (rdev->family == CHIP_R420) @@ -170,8 +167,6 @@ static int r420_startup(struct radeon_device *rdev) int r; r300_mc_program(rdev); - /* Resume clock */ - r420_clock_resume(rdev); /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ if (rdev->flags & RADEON_IS_PCIE) { @@ -272,6 +267,7 @@ int r420_init(struct radeon_device *rdev) { int r; + rdev->new_init_path = true; /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ diff --git a/trunk/drivers/gpu/drm/radeon/r420d.h b/trunk/drivers/gpu/drm/radeon/r420d.h index fc78d31a0b4a..a48a7db1e2aa 100644 --- a/trunk/drivers/gpu/drm/radeon/r420d.h +++ b/trunk/drivers/gpu/drm/radeon/r420d.h @@ -212,9 +212,9 @@ #define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) #define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) #define C_00000D_FORCE_E2 0xFFEFFFFF -#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) -#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) -#define C_00000D_FORCE_VAP 0xFFDFFFFF +#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) +#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) +#define C_00000D_FORCE_SE 0xFFDFFFFF #define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) #define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) #define C_00000D_FORCE_IDCT 0xFFBFFFFF @@ -224,24 +224,24 @@ #define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) #define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) #define C_00000D_FORCE_RE 0xFEFFFFFF -#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) -#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) -#define C_00000D_FORCE_SR 0xFDFFFFFF +#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) +#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) +#define C_00000D_FORCE_PB 0xFDFFFFFF #define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) #define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) #define C_00000D_FORCE_PX 0xFBFFFFFF #define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) #define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) #define C_00000D_FORCE_TX 0xF7FFFFFF -#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) -#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) -#define C_00000D_FORCE_US 0xEFFFFFFF +#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) +#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) +#define C_00000D_FORCE_RB 0xEFFFFFFF #define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) #define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) #define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF -#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) -#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) -#define C_00000D_FORCE_SU 0xBFFFFFFF +#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) +#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) +#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF #define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) #define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) #define C_00000D_FORCE_OV0 0x7FFFFFFF diff --git a/trunk/drivers/gpu/drm/radeon/r520.c b/trunk/drivers/gpu/drm/radeon/r520.c index a555b7b19b48..0bf13fccdaf2 100644 --- a/trunk/drivers/gpu/drm/radeon/r520.c +++ b/trunk/drivers/gpu/drm/radeon/r520.c @@ -186,7 +186,7 @@ static int r520_startup(struct radeon_device *rdev) } /* Enable IRQ */ rdev->irq.sw_int = true; - rs600_irq_set(rdev); + r100_irq_set(rdev); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -228,6 +228,7 @@ int r520_init(struct radeon_device *rdev) { int r; + rdev->new_init_path = true; /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 609719490ec2..2e4e60edbff4 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -65,11 +65,16 @@ MODULE_FIRMWARE("radeon/RV710_me.bin"); int r600_debugfs_mc_info_init(struct radeon_device *rdev); -/* r600,rv610,rv630,rv620,rv635,rv670 */ +/* This files gather functions specifics to: + * r600,rv610,rv630,rv620,rv635,rv670 + * + * Some of these functions might be used by newer ASICs. + */ int r600_mc_wait_for_idle(struct radeon_device *rdev); void r600_gpu_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); + /* * R600 PCIE GART */ @@ -163,7 +168,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); @@ -220,40 +225,6 @@ void r600_pcie_gart_fini(struct radeon_device *rdev) radeon_gart_fini(rdev); } -void r600_agp_enable(struct radeon_device *rdev) -{ - u32 tmp; - int i; - - /* Setup L2 cache */ - WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | - ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | - EFFECTIVE_L2_QUEUE_SIZE(7)); - WREG32(VM_L2_CNTL2, 0); - WREG32(VM_L2_CNTL3, BANK_SELECT_0(0) | BANK_SELECT_1(1)); - /* Setup TLB control */ - tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | - SYSTEM_ACCESS_MODE_NOT_IN_SYS | - EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) | - ENABLE_WAIT_L2_QUERY; - WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp | ENABLE_L1_STRICT_ORDERING); - WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); - WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); - WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); - for (i = 0; i < 7; i++) - WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); -} - int r600_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; @@ -269,9 +240,14 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev) return -1; } -static void r600_mc_program(struct radeon_device *rdev) +static void r600_mc_resume(struct radeon_device *rdev) { - struct rv515_mc_save save; + u32 d1vga_control, d2vga_control; + u32 vga_render_control, vga_hdp_control; + u32 d1crtc_control, d2crtc_control; + u32 new_d1grph_primary, new_d1grph_secondary; + u32 new_d2grph_primary, new_d2grph_secondary; + u64 old_vram_start; u32 tmp; int i, j; @@ -285,51 +261,85 @@ static void r600_mc_program(struct radeon_device *rdev) } WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); - rv515_mc_stop(rdev, &save); + d1vga_control = RREG32(D1VGA_CONTROL); + d2vga_control = RREG32(D2VGA_CONTROL); + vga_render_control = RREG32(VGA_RENDER_CONTROL); + vga_hdp_control = RREG32(VGA_HDP_CONTROL); + d1crtc_control = RREG32(D1CRTC_CONTROL); + d2crtc_control = RREG32(D2CRTC_CONTROL); + old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; + new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); + new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); + new_d1grph_primary += rdev->mc.vram_start - old_vram_start; + new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; + new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); + new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); + new_d2grph_primary += rdev->mc.vram_start - old_vram_start; + new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; + + /* Stop all video */ + WREG32(D1VGA_CONTROL, 0); + WREG32(D2VGA_CONTROL, 0); + WREG32(VGA_RENDER_CONTROL, 0); + WREG32(D1CRTC_UPDATE_LOCK, 1); + WREG32(D2CRTC_UPDATE_LOCK, 1); + WREG32(D1CRTC_CONTROL, 0); + WREG32(D2CRTC_CONTROL, 0); + WREG32(D1CRTC_UPDATE_LOCK, 0); + WREG32(D2CRTC_UPDATE_LOCK, 0); + + mdelay(1); if (r600_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + printk(KERN_WARNING "[drm] MC not idle !\n"); } - /* Lockout access through VGA aperture (doesn't exist before R600) */ + + /* Lockout access through VGA aperture*/ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); + /* Update configuration */ - if (rdev->flags & RADEON_IS_AGP) { - if (rdev->mc.vram_start < rdev->mc.gtt_start) { - /* VRAM before AGP */ - WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, - rdev->mc.vram_start >> 12); - WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, - rdev->mc.gtt_end >> 12); - } else { - /* VRAM after AGP */ - WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, - rdev->mc.gtt_start >> 12); - WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, - rdev->mc.vram_end >> 12); - } - } else { - WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); - WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12); - } + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); - tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; + tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); - WREG32(HDP_NONSURFACE_SIZE, rdev->mc.mc_vram_size | 0x3FF); + WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); if (rdev->flags & RADEON_IS_AGP) { - WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 22); - WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 22); + WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); + WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); } else { WREG32(MC_VM_AGP_BASE, 0); WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); } + WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); + WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); + WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); + WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); + WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); + + /* Unlock host access */ + WREG32(VGA_HDP_CONTROL, vga_hdp_control); + + mdelay(1); if (r600_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + printk(KERN_WARNING "[drm] MC not idle !\n"); } - rv515_mc_resume(rdev, &save); + + /* Restore video state */ + WREG32(D1CRTC_UPDATE_LOCK, 1); + WREG32(D2CRTC_UPDATE_LOCK, 1); + WREG32(D1CRTC_CONTROL, d1crtc_control); + WREG32(D2CRTC_CONTROL, d2crtc_control); + WREG32(D1CRTC_UPDATE_LOCK, 0); + WREG32(D2CRTC_UPDATE_LOCK, 0); + WREG32(D1VGA_CONTROL, d1vga_control); + WREG32(D2VGA_CONTROL, d2vga_control); + WREG32(VGA_RENDER_CONTROL, vga_render_control); + /* we need to own VRAM, so turn off the VGA renderer here * to stop it overwriting our objects */ rv515_vga_render_disable(rdev); @@ -435,9 +445,9 @@ int r600_mc_init(struct radeon_device *rdev) } } rdev->mc.vram_start = rdev->mc.vram_location; - rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; rdev->mc.gtt_start = rdev->mc.gtt_location; - rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; + rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; /* FIXME: we should enforce default clock in case GPU is not in * default setup */ @@ -453,7 +463,6 @@ int r600_mc_init(struct radeon_device *rdev) */ int r600_gpu_soft_reset(struct radeon_device *rdev) { - struct rv515_mc_save save; u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) | S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) | S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) | @@ -471,25 +480,13 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) | S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1); u32 srbm_reset = 0; - u32 tmp; - dev_info(rdev->dev, "GPU softreset \n"); - dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", - RREG32(R_008010_GRBM_STATUS)); - dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", - RREG32(R_008014_GRBM_STATUS2)); - dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", - RREG32(R_000E50_SRBM_STATUS)); - rv515_mc_stop(rdev, &save); - if (r600_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); - } /* Disable CP parsing/prefetching */ WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(0xff)); /* Check if any of the rendering block is busy and reset it */ if ((RREG32(R_008010_GRBM_STATUS) & grbm_busy_mask) || (RREG32(R_008014_GRBM_STATUS2) & grbm2_busy_mask)) { - tmp = S_008020_SOFT_RESET_CR(1) | + WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CR(1) | S_008020_SOFT_RESET_DB(1) | S_008020_SOFT_RESET_CB(1) | S_008020_SOFT_RESET_PA(1) | @@ -501,18 +498,14 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) S_008020_SOFT_RESET_TC(1) | S_008020_SOFT_RESET_TA(1) | S_008020_SOFT_RESET_VC(1) | - S_008020_SOFT_RESET_VGT(1); - dev_info(rdev->dev, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(R_008020_GRBM_SOFT_RESET, tmp); + S_008020_SOFT_RESET_VGT(1)); (void)RREG32(R_008020_GRBM_SOFT_RESET); udelay(50); WREG32(R_008020_GRBM_SOFT_RESET, 0); (void)RREG32(R_008020_GRBM_SOFT_RESET); } /* Reset CP (we always reset CP) */ - tmp = S_008020_SOFT_RESET_CP(1); - dev_info(rdev->dev, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp); - WREG32(R_008020_GRBM_SOFT_RESET, tmp); + WREG32(R_008020_GRBM_SOFT_RESET, S_008020_SOFT_RESET_CP(1)); (void)RREG32(R_008020_GRBM_SOFT_RESET); udelay(50); WREG32(R_008020_GRBM_SOFT_RESET, 0); @@ -540,14 +533,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) srbm_reset |= S_000E60_SOFT_RESET_RLC(1); if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS))) srbm_reset |= S_000E60_SOFT_RESET_SEM(1); - if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS))) - srbm_reset |= S_000E60_SOFT_RESET_BIF(1); - dev_info(rdev->dev, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset); - WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); - (void)RREG32(R_000E60_SRBM_SOFT_RESET); - udelay(50); - WREG32(R_000E60_SRBM_SOFT_RESET, 0); - (void)RREG32(R_000E60_SRBM_SOFT_RESET); WREG32(R_000E60_SRBM_SOFT_RESET, srbm_reset); (void)RREG32(R_000E60_SRBM_SOFT_RESET); udelay(50); @@ -555,17 +540,6 @@ int r600_gpu_soft_reset(struct radeon_device *rdev) (void)RREG32(R_000E60_SRBM_SOFT_RESET); /* Wait a little for things to settle down */ udelay(50); - dev_info(rdev->dev, " R_008010_GRBM_STATUS=0x%08X\n", - RREG32(R_008010_GRBM_STATUS)); - dev_info(rdev->dev, " R_008014_GRBM_STATUS2=0x%08X\n", - RREG32(R_008014_GRBM_STATUS2)); - dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n", - RREG32(R_000E50_SRBM_STATUS)); - /* After reset we need to reinit the asic as GPU often endup in an - * incoherent state. - */ - atom_asic_init(rdev->mode_info.atom_context); - rv515_mc_resume(rdev, &save); return 0; } @@ -1376,47 +1350,32 @@ int r600_ring_test(struct radeon_device *rdev) return r; } -void r600_wb_disable(struct radeon_device *rdev) -{ - WREG32(SCRATCH_UMSK, 0); - if (rdev->wb.wb_obj) { - radeon_object_kunmap(rdev->wb.wb_obj); - radeon_object_unpin(rdev->wb.wb_obj); - } -} - -void r600_wb_fini(struct radeon_device *rdev) -{ - r600_wb_disable(rdev); - if (rdev->wb.wb_obj) { - radeon_object_unref(&rdev->wb.wb_obj); - rdev->wb.wb = NULL; - rdev->wb.wb_obj = NULL; - } -} - -int r600_wb_enable(struct radeon_device *rdev) +/* + * Writeback + */ +int r600_wb_init(struct radeon_device *rdev) { int r; if (rdev->wb.wb_obj == NULL) { - r = radeon_object_create(rdev, NULL, 4096, true, - RADEON_GEM_DOMAIN_GTT, false, &rdev->wb.wb_obj); + r = radeon_object_create(rdev, NULL, 4096, + true, + RADEON_GEM_DOMAIN_GTT, + false, &rdev->wb.wb_obj); if (r) { - dev_warn(rdev->dev, "failed to create WB buffer (%d).\n", r); + DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r); return r; } - r = radeon_object_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT, - &rdev->wb.gpu_addr); + r = radeon_object_pin(rdev->wb.wb_obj, + RADEON_GEM_DOMAIN_GTT, + &rdev->wb.gpu_addr); if (r) { - dev_warn(rdev->dev, "failed to pin WB buffer (%d).\n", r); - r600_wb_fini(rdev); + DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r); return r; } r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb); if (r) { - dev_warn(rdev->dev, "failed to map WB buffer (%d).\n", r); - r600_wb_fini(rdev); + DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r); return r; } } @@ -1427,6 +1386,21 @@ int r600_wb_enable(struct radeon_device *rdev) return 0; } +void r600_wb_fini(struct radeon_device *rdev) +{ + if (rdev->wb.wb_obj) { + radeon_object_kunmap(rdev->wb.wb_obj); + radeon_object_unpin(rdev->wb.wb_obj); + radeon_object_unref(&rdev->wb.wb_obj); + rdev->wb.wb = NULL; + rdev->wb.wb_obj = NULL; + } +} + + +/* + * CS + */ void r600_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence) { @@ -1503,14 +1477,11 @@ int r600_startup(struct radeon_device *rdev) { int r; - r600_mc_program(rdev); - if (rdev->flags & RADEON_IS_AGP) { - r600_agp_enable(rdev); - } else { - r = r600_pcie_gart_enable(rdev); - if (r) - return r; - } + r600_gpu_reset(rdev); + r600_mc_resume(rdev); + r = r600_pcie_gart_enable(rdev); + if (r) + return r; r600_gpu_init(rdev); r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, @@ -1529,8 +1500,9 @@ int r600_startup(struct radeon_device *rdev) r = r600_cp_resume(rdev); if (r) return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); + r = r600_wb_init(rdev); + if (r) + return r; return 0; } @@ -1552,12 +1524,15 @@ int r600_resume(struct radeon_device *rdev) { int r; - /* Do not reset GPU before posting, on r600 hw unlike on r500 hw, - * posting will perform necessary task to bring back GPU into good - * shape. - */ + if (radeon_gpu_reset(rdev)) { + /* FIXME: what do we want to do here ? */ + } /* post card */ - atom_asic_init(rdev->mode_info.atom_context); + if (rdev->is_atom_bios) { + atom_asic_init(rdev->mode_info.atom_context); + } else { + radeon_combios_asic_init(rdev->ddev); + } /* Initialize clocks */ r = radeon_clocks_init(rdev); if (r) { @@ -1570,7 +1545,7 @@ int r600_resume(struct radeon_device *rdev) return r; } - r = r600_ib_test(rdev); + r = radeon_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; @@ -1578,12 +1553,13 @@ int r600_resume(struct radeon_device *rdev) return r; } + int r600_suspend(struct radeon_device *rdev) { /* FIXME: we should wait for ring to be empty */ r600_cp_stop(rdev); rdev->cp.ready = false; - r600_wb_disable(rdev); + r600_pcie_gart_disable(rdev); /* unpin shaders bo */ radeon_object_unpin(rdev->r600_blit.shader_obj); @@ -1600,6 +1576,7 @@ int r600_init(struct radeon_device *rdev) { int r; + rdev->new_init_path = true; r = radeon_dummy_page_init(rdev); if (r) return r; @@ -1616,10 +1593,8 @@ int r600_init(struct radeon_device *rdev) return -EINVAL; } /* Must be an ATOMBIOS */ - if (!rdev->is_atom_bios) { - dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); + if (!rdev->is_atom_bios) return -EINVAL; - } r = radeon_atombios_init(rdev); if (r) return r; @@ -1641,8 +1616,15 @@ int r600_init(struct radeon_device *rdev) if (r) return r; r = r600_mc_init(rdev); - if (r) + if (r) { + if (rdev->flags & RADEON_IS_AGP) { + /* Retry with disabling AGP */ + r600_fini(rdev); + rdev->flags &= ~RADEON_IS_AGP; + return r600_init(rdev); + } return r; + } /* Memory manager */ r = radeon_object_init(rdev); if (r) @@ -1671,10 +1653,12 @@ int r600_init(struct radeon_device *rdev) r = r600_startup(rdev); if (r) { - r600_suspend(rdev); - r600_wb_fini(rdev); - radeon_ring_fini(rdev); - r600_pcie_gart_fini(rdev); + if (rdev->flags & RADEON_IS_AGP) { + /* Retry with disabling AGP */ + r600_fini(rdev); + rdev->flags &= ~RADEON_IS_AGP; + return r600_init(rdev); + } rdev->accel_working = false; } if (rdev->accel_working) { @@ -1683,7 +1667,7 @@ int r600_init(struct radeon_device *rdev) DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); rdev->accel_working = false; } - r = r600_ib_test(rdev); + r = radeon_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); rdev->accel_working = false; @@ -1699,15 +1683,19 @@ void r600_fini(struct radeon_device *rdev) r600_blit_fini(rdev); radeon_ring_fini(rdev); - r600_wb_fini(rdev); r600_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); +#if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) radeon_agp_fini(rdev); +#endif radeon_object_fini(rdev); - radeon_atombios_fini(rdev); + if (rdev->is_atom_bios) + radeon_atombios_fini(rdev); + else + radeon_combios_fini(rdev); kfree(rdev->bios); rdev->bios = NULL; radeon_dummy_page_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/r600_blit.c b/trunk/drivers/gpu/drm/radeon/r600_blit.c index dec501081608..d988eece0187 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_blit.c +++ b/trunk/drivers/gpu/drm/radeon/r600_blit.c @@ -582,6 +582,8 @@ r600_blit_copy(struct drm_device *dev, u64 vb_addr; u32 *vb; + vb = r600_nomm_get_vb_ptr(dev); + if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; @@ -617,8 +619,8 @@ r600_blit_copy(struct drm_device *dev, if (!dev_priv->blit_vb) return; set_shaders(dev); + vb = r600_nomm_get_vb_ptr(dev); } - vb = r600_nomm_get_vb_ptr(dev); vb[0] = i2f(dst_x); vb[1] = 0; @@ -706,8 +708,8 @@ r600_blit_copy(struct drm_device *dev, return; set_shaders(dev); + vb = r600_nomm_get_vb_ptr(dev); } - vb = r600_nomm_get_vb_ptr(dev); vb[0] = i2f(dst_x / 4); vb[1] = 0; @@ -775,6 +777,8 @@ r600_blit_swap(struct drm_device *dev, u64 vb_addr; u32 *vb; + vb = r600_nomm_get_vb_ptr(dev); + if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { r600_nomm_put_vb(dev); @@ -783,8 +787,8 @@ r600_blit_swap(struct drm_device *dev, return; set_shaders(dev); + vb = r600_nomm_get_vb_ptr(dev); } - vb = r600_nomm_get_vb_ptr(dev); if (cpp == 4) { cb_format = COLOR_8_8_8_8; diff --git a/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c b/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c index 93108bb31d1d..acae33e2ad51 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/trunk/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -610,6 +610,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, size_bytes, rdev->r600_blit.vb_used); + vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; @@ -652,7 +653,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, vb = r600_nomm_get_vb_ptr(dev); #endif } - vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); vb[0] = i2f(dst_x); vb[1] = 0; @@ -747,7 +747,6 @@ void r600_kms_blit_copy(struct radeon_device *rdev, vb = r600_nomm_get_vb_ptr(dev); } #endif - vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); vb[0] = i2f(dst_x / 4); vb[1] = 0; diff --git a/trunk/drivers/gpu/drm/radeon/r600_cs.c b/trunk/drivers/gpu/drm/radeon/r600_cs.c index 17e42195c632..d28970db6a2d 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_cs.c +++ b/trunk/drivers/gpu/drm/radeon/r600_cs.c @@ -252,7 +252,7 @@ static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p) header = radeon_get_ib_value(p, h_idx); crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1); - reg = CP_PACKET0_GET_REG(header); + reg = header >> 2; mutex_lock(&p->rdev->ddev->mode_config.mutex); obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); if (!obj) { diff --git a/trunk/drivers/gpu/drm/radeon/r600d.h b/trunk/drivers/gpu/drm/radeon/r600d.h index 9b64d47f1f82..4a9028a85c9b 100644 --- a/trunk/drivers/gpu/drm/radeon/r600d.h +++ b/trunk/drivers/gpu/drm/radeon/r600d.h @@ -643,7 +643,6 @@ #define G_000E50_MCDW_BUSY(x) (((x) >> 13) & 1) #define G_000E50_SEM_BUSY(x) (((x) >> 14) & 1) #define G_000E50_RLC_BUSY(x) (((x) >> 15) & 1) -#define G_000E50_BIF_BUSY(x) (((x) >> 29) & 1) #define R_000E60_SRBM_SOFT_RESET 0x0E60 #define S_000E60_SOFT_RESET_BIF(x) (((x) & 1) << 1) #define S_000E60_SOFT_RESET_CG(x) (((x) & 1) << 2) diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index 5ab35b81c86b..950b346e343f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -590,8 +590,18 @@ struct radeon_asic { void (*fini)(struct radeon_device *rdev); int (*resume)(struct radeon_device *rdev); int (*suspend)(struct radeon_device *rdev); + void (*errata)(struct radeon_device *rdev); + void (*vram_info)(struct radeon_device *rdev); void (*vga_set_state)(struct radeon_device *rdev, bool state); int (*gpu_reset)(struct radeon_device *rdev); + int (*mc_init)(struct radeon_device *rdev); + void (*mc_fini)(struct radeon_device *rdev); + int (*wb_init)(struct radeon_device *rdev); + void (*wb_fini)(struct radeon_device *rdev); + int (*gart_init)(struct radeon_device *rdev); + void (*gart_fini)(struct radeon_device *rdev); + int (*gart_enable)(struct radeon_device *rdev); + void (*gart_disable)(struct radeon_device *rdev); void (*gart_tlb_flush)(struct radeon_device *rdev); int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); @@ -601,6 +611,7 @@ struct radeon_asic { void (*ring_start)(struct radeon_device *rdev); int (*ring_test)(struct radeon_device *rdev); void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); + int (*ib_test)(struct radeon_device *rdev); int (*irq_set)(struct radeon_device *rdev); int (*irq_process)(struct radeon_device *rdev); u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); @@ -778,6 +789,7 @@ struct radeon_device { bool shutdown; bool suspend; bool need_dma32; + bool new_init_path; bool accel_working; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ @@ -937,14 +949,28 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_resume(rdev) (rdev)->asic->resume((rdev)) #define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) +#define radeon_errata(rdev) (rdev)->asic->errata((rdev)) +#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) #define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) +#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev)) +#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev)) +#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev)) +#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev)) +#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev)) +#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev)) +#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev)) +#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) #define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) +#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize)) +#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev)) +#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev)) #define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) #define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) #define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) #define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) +#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev)) #define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) #define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) #define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) @@ -970,7 +996,6 @@ extern void radeon_clocks_fini(struct radeon_device *rdev); extern void radeon_scratch_init(struct radeon_device *rdev); extern void radeon_surface_init(struct radeon_device *rdev); extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); -extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ @@ -1006,27 +1031,11 @@ extern int r100_wb_init(struct radeon_device *rdev); extern void r100_hdp_reset(struct radeon_device *rdev); extern int r100_rb2d_reset(struct radeon_device *rdev); extern int r100_cp_reset(struct radeon_device *rdev); -extern void r100_vga_render_disable(struct radeon_device *rdev); -extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - struct radeon_object *robj); -extern int r100_cs_parse_packet0(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - const unsigned *auth, unsigned n, - radeon_packet0_check_t check); -extern int r100_cs_packet_parse(struct radeon_cs_parser *p, - struct radeon_cs_packet *pkt, - unsigned idx); - -/* rv200,rv250,rv280 */ -extern void r200_set_safe_registers(struct radeon_device *rdev); /* r300,r350,rv350,rv370,rv380 */ extern void r300_set_reg_safe(struct radeon_device *rdev); extern void r300_mc_program(struct radeon_device *rdev); extern void r300_vram_info(struct radeon_device *rdev); -extern void r300_clock_startup(struct radeon_device *rdev); -extern int r300_mc_wait_for_idle(struct radeon_device *rdev); extern int rv370_pcie_gart_init(struct radeon_device *rdev); extern void rv370_pcie_gart_fini(struct radeon_device *rdev); extern int rv370_pcie_gart_enable(struct radeon_device *rdev); @@ -1057,18 +1066,6 @@ extern void rv515_clock_startup(struct radeon_device *rdev); extern void rv515_debugfs(struct radeon_device *rdev); extern int rv515_suspend(struct radeon_device *rdev); -/* rs400 */ -extern int rs400_gart_init(struct radeon_device *rdev); -extern int rs400_gart_enable(struct radeon_device *rdev); -extern void rs400_gart_adjust_size(struct radeon_device *rdev); -extern void rs400_gart_disable(struct radeon_device *rdev); -extern void rs400_gart_fini(struct radeon_device *rdev); - -/* rs600 */ -extern void rs600_set_safe_registers(struct radeon_device *rdev); -extern int rs600_irq_set(struct radeon_device *rdev); -extern void rs600_irq_disable(struct radeon_device *rdev); - /* rs690, rs740 */ extern void rs690_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode1, @@ -1086,9 +1083,8 @@ extern int r600_pcie_gart_init(struct radeon_device *rdev); extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); extern int r600_ib_test(struct radeon_device *rdev); extern int r600_ring_test(struct radeon_device *rdev); +extern int r600_wb_init(struct radeon_device *rdev); extern void r600_wb_fini(struct radeon_device *rdev); -extern int r600_wb_enable(struct radeon_device *rdev); -extern void r600_wb_disable(struct radeon_device *rdev); extern void r600_scratch_init(struct radeon_device *rdev); extern int r600_blit_init(struct radeon_device *rdev); extern void r600_blit_fini(struct radeon_device *rdev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index c3532c7a6f3f..c8a4e7b5663d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -41,17 +41,28 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable); /* * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ -extern int r100_init(struct radeon_device *rdev); -extern void r100_fini(struct radeon_device *rdev); -extern int r100_suspend(struct radeon_device *rdev); -extern int r100_resume(struct radeon_device *rdev); +int r100_init(struct radeon_device *rdev); +int r200_init(struct radeon_device *rdev); uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +void r100_errata(struct radeon_device *rdev); +void r100_vram_info(struct radeon_device *rdev); void r100_vga_set_state(struct radeon_device *rdev, bool state); int r100_gpu_reset(struct radeon_device *rdev); +int r100_mc_init(struct radeon_device *rdev); +void r100_mc_fini(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); +int r100_wb_init(struct radeon_device *rdev); +void r100_wb_fini(struct radeon_device *rdev); +int r100_pci_gart_init(struct radeon_device *rdev); +void r100_pci_gart_fini(struct radeon_device *rdev); +int r100_pci_gart_enable(struct radeon_device *rdev); +void r100_pci_gart_disable(struct radeon_device *rdev); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); +void r100_cp_fini(struct radeon_device *rdev); +void r100_cp_disable(struct radeon_device *rdev); void r100_cp_commit(struct radeon_device *rdev); void r100_ring_start(struct radeon_device *rdev); int r100_irq_set(struct radeon_device *rdev); @@ -72,21 +83,33 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, int r100_clear_surface_reg(struct radeon_device *rdev, int reg); void r100_bandwidth_update(struct radeon_device *rdev); void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); +int r100_ib_test(struct radeon_device *rdev); int r100_ring_test(struct radeon_device *rdev); static struct radeon_asic r100_asic = { .init = &r100_init, - .fini = &r100_fini, - .suspend = &r100_suspend, - .resume = &r100_resume, + .errata = &r100_errata, + .vram_info = &r100_vram_info, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r100_gpu_reset, + .mc_init = &r100_mc_init, + .mc_fini = &r100_mc_fini, + .wb_init = &r100_wb_init, + .wb_fini = &r100_wb_fini, + .gart_init = &r100_pci_gart_init, + .gart_fini = &r100_pci_gart_fini, + .gart_enable = &r100_pci_gart_enable, + .gart_disable = &r100_pci_gart_disable, .gart_tlb_flush = &r100_pci_gart_tlb_flush, .gart_set_page = &r100_pci_gart_set_page, + .cp_init = &r100_cp_init, + .cp_fini = &r100_cp_fini, + .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r100_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, + .ib_test = &r100_ib_test, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -108,38 +131,55 @@ static struct radeon_asic r100_asic = { /* * r300,r350,rv350,rv380 */ -extern int r300_init(struct radeon_device *rdev); -extern void r300_fini(struct radeon_device *rdev); -extern int r300_suspend(struct radeon_device *rdev); -extern int r300_resume(struct radeon_device *rdev); -extern int r300_gpu_reset(struct radeon_device *rdev); -extern void r300_ring_start(struct radeon_device *rdev); -extern void r300_fence_ring_emit(struct radeon_device *rdev, - struct radeon_fence *fence); -extern int r300_cs_parse(struct radeon_cs_parser *p); -extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); -extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); -extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); -extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); -extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); -extern int r300_copy_dma(struct radeon_device *rdev, - uint64_t src_offset, - uint64_t dst_offset, - unsigned num_pages, - struct radeon_fence *fence); +int r300_init(struct radeon_device *rdev); +void r300_errata(struct radeon_device *rdev); +void r300_vram_info(struct radeon_device *rdev); +int r300_gpu_reset(struct radeon_device *rdev); +int r300_mc_init(struct radeon_device *rdev); +void r300_mc_fini(struct radeon_device *rdev); +void r300_ring_start(struct radeon_device *rdev); +void r300_fence_ring_emit(struct radeon_device *rdev, + struct radeon_fence *fence); +int r300_cs_parse(struct radeon_cs_parser *p); +int rv370_pcie_gart_init(struct radeon_device *rdev); +void rv370_pcie_gart_fini(struct radeon_device *rdev); +int rv370_pcie_gart_enable(struct radeon_device *rdev); +void rv370_pcie_gart_disable(struct radeon_device *rdev); +void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); +int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); +void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); +void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes); +int r300_copy_dma(struct radeon_device *rdev, + uint64_t src_offset, + uint64_t dst_offset, + unsigned num_pages, + struct radeon_fence *fence); + static struct radeon_asic r300_asic = { .init = &r300_init, - .fini = &r300_fini, - .suspend = &r300_suspend, - .resume = &r300_resume, + .errata = &r300_errata, + .vram_info = &r300_vram_info, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, + .mc_init = &r300_mc_init, + .mc_fini = &r300_mc_fini, + .wb_init = &r100_wb_init, + .wb_fini = &r100_wb_fini, + .gart_init = &r100_pci_gart_init, + .gart_fini = &r100_pci_gart_fini, + .gart_enable = &r100_pci_gart_enable, + .gart_disable = &r100_pci_gart_disable, .gart_tlb_flush = &r100_pci_gart_tlb_flush, .gart_set_page = &r100_pci_gart_set_page, + .cp_init = &r100_cp_init, + .cp_fini = &r100_cp_fini, + .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, + .ib_test = &r100_ib_test, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -169,14 +209,26 @@ static struct radeon_asic r420_asic = { .fini = &r420_fini, .suspend = &r420_suspend, .resume = &r420_resume, + .errata = NULL, + .vram_info = NULL, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, + .mc_init = NULL, + .mc_fini = NULL, + .wb_init = NULL, + .wb_fini = NULL, + .gart_enable = NULL, + .gart_disable = NULL, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, + .cp_init = NULL, + .cp_fini = NULL, + .cp_disable = NULL, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, + .ib_test = NULL, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -198,27 +250,42 @@ static struct radeon_asic r420_asic = { /* * rs400,rs480 */ -extern int rs400_init(struct radeon_device *rdev); -extern void rs400_fini(struct radeon_device *rdev); -extern int rs400_suspend(struct radeon_device *rdev); -extern int rs400_resume(struct radeon_device *rdev); +void rs400_errata(struct radeon_device *rdev); +void rs400_vram_info(struct radeon_device *rdev); +int rs400_mc_init(struct radeon_device *rdev); +void rs400_mc_fini(struct radeon_device *rdev); +int rs400_gart_init(struct radeon_device *rdev); +void rs400_gart_fini(struct radeon_device *rdev); +int rs400_gart_enable(struct radeon_device *rdev); +void rs400_gart_disable(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); static struct radeon_asic rs400_asic = { - .init = &rs400_init, - .fini = &rs400_fini, - .suspend = &rs400_suspend, - .resume = &rs400_resume, + .init = &r300_init, + .errata = &rs400_errata, + .vram_info = &rs400_vram_info, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, + .mc_init = &rs400_mc_init, + .mc_fini = &rs400_mc_fini, + .wb_init = &r100_wb_init, + .wb_fini = &r100_wb_fini, + .gart_init = &rs400_gart_init, + .gart_fini = &rs400_gart_fini, + .gart_enable = &rs400_gart_enable, + .gart_disable = &rs400_gart_disable, .gart_tlb_flush = &rs400_gart_tlb_flush, .gart_set_page = &rs400_gart_set_page, + .cp_init = &r100_cp_init, + .cp_fini = &r100_cp_fini, + .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, + .ib_test = &r100_ib_test, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -240,13 +307,18 @@ static struct radeon_asic rs400_asic = { /* * rs600. */ -extern int rs600_init(struct radeon_device *rdev); -extern void rs600_fini(struct radeon_device *rdev); -extern int rs600_suspend(struct radeon_device *rdev); -extern int rs600_resume(struct radeon_device *rdev); +int rs600_init(struct radeon_device *rdev); +void rs600_errata(struct radeon_device *rdev); +void rs600_vram_info(struct radeon_device *rdev); +int rs600_mc_init(struct radeon_device *rdev); +void rs600_mc_fini(struct radeon_device *rdev); int rs600_irq_set(struct radeon_device *rdev); int rs600_irq_process(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); +int rs600_gart_init(struct radeon_device *rdev); +void rs600_gart_fini(struct radeon_device *rdev); +int rs600_gart_enable(struct radeon_device *rdev); +void rs600_gart_disable(struct radeon_device *rdev); void rs600_gart_tlb_flush(struct radeon_device *rdev); int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); @@ -254,17 +326,28 @@ void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs600_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rs600_asic = { .init = &rs600_init, - .fini = &rs600_fini, - .suspend = &rs600_suspend, - .resume = &rs600_resume, + .errata = &rs600_errata, + .vram_info = &rs600_vram_info, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, + .mc_init = &rs600_mc_init, + .mc_fini = &rs600_mc_fini, + .wb_init = &r100_wb_init, + .wb_fini = &r100_wb_fini, + .gart_init = &rs600_gart_init, + .gart_fini = &rs600_gart_fini, + .gart_enable = &rs600_gart_enable, + .gart_disable = &rs600_gart_disable, .gart_tlb_flush = &rs600_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, + .cp_init = &r100_cp_init, + .cp_fini = &r100_cp_fini, + .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, + .ib_test = &r100_ib_test, .irq_set = &rs600_irq_set, .irq_process = &rs600_irq_process, .get_vblank_counter = &rs600_get_vblank_counter, @@ -284,26 +367,37 @@ static struct radeon_asic rs600_asic = { /* * rs690,rs740 */ -int rs690_init(struct radeon_device *rdev); -void rs690_fini(struct radeon_device *rdev); -int rs690_resume(struct radeon_device *rdev); -int rs690_suspend(struct radeon_device *rdev); +void rs690_errata(struct radeon_device *rdev); +void rs690_vram_info(struct radeon_device *rdev); +int rs690_mc_init(struct radeon_device *rdev); +void rs690_mc_fini(struct radeon_device *rdev); uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void rs690_bandwidth_update(struct radeon_device *rdev); static struct radeon_asic rs690_asic = { - .init = &rs690_init, - .fini = &rs690_fini, - .suspend = &rs690_suspend, - .resume = &rs690_resume, + .init = &rs600_init, + .errata = &rs690_errata, + .vram_info = &rs690_vram_info, .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, + .mc_init = &rs690_mc_init, + .mc_fini = &rs690_mc_fini, + .wb_init = &r100_wb_init, + .wb_fini = &r100_wb_fini, + .gart_init = &rs400_gart_init, + .gart_fini = &rs400_gart_fini, + .gart_enable = &rs400_gart_enable, + .gart_disable = &rs400_gart_disable, .gart_tlb_flush = &rs400_gart_tlb_flush, .gart_set_page = &rs400_gart_set_page, + .cp_init = &r100_cp_init, + .cp_fini = &r100_cp_fini, + .cp_disable = &r100_cp_disable, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, + .ib_test = &r100_ib_test, .irq_set = &rs600_irq_set, .irq_process = &rs600_irq_process, .get_vblank_counter = &rs600_get_vblank_counter, @@ -341,14 +435,28 @@ static struct radeon_asic rv515_asic = { .fini = &rv515_fini, .suspend = &rv515_suspend, .resume = &rv515_resume, + .errata = NULL, + .vram_info = NULL, .vga_set_state = &r100_vga_set_state, .gpu_reset = &rv515_gpu_reset, + .mc_init = NULL, + .mc_fini = NULL, + .wb_init = NULL, + .wb_fini = NULL, + .gart_init = &rv370_pcie_gart_init, + .gart_fini = &rv370_pcie_gart_fini, + .gart_enable = NULL, + .gart_disable = NULL, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, + .cp_init = NULL, + .cp_fini = NULL, + .cp_disable = NULL, .cp_commit = &r100_cp_commit, .ring_start = &rv515_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, + .ib_test = NULL, .irq_set = &rs600_irq_set, .irq_process = &rs600_irq_process, .get_vblank_counter = &rs600_get_vblank_counter, @@ -377,14 +485,28 @@ static struct radeon_asic r520_asic = { .fini = &rv515_fini, .suspend = &rv515_suspend, .resume = &r520_resume, + .errata = NULL, + .vram_info = NULL, .vga_set_state = &r100_vga_set_state, .gpu_reset = &rv515_gpu_reset, + .mc_init = NULL, + .mc_fini = NULL, + .wb_init = NULL, + .wb_fini = NULL, + .gart_init = NULL, + .gart_fini = NULL, + .gart_enable = NULL, + .gart_disable = NULL, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, + .cp_init = NULL, + .cp_fini = NULL, + .cp_disable = NULL, .cp_commit = &r100_cp_commit, .ring_start = &rv515_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, + .ib_test = NULL, .irq_set = &rs600_irq_set, .irq_process = &rs600_irq_process, .get_vblank_counter = &rs600_get_vblank_counter, @@ -432,23 +554,37 @@ int r600_set_surface_reg(struct radeon_device *rdev, int reg, uint32_t offset, uint32_t obj_size); int r600_clear_surface_reg(struct radeon_device *rdev, int reg); void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); +int r600_ib_test(struct radeon_device *rdev); int r600_ring_test(struct radeon_device *rdev); int r600_copy_blit(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_pages, struct radeon_fence *fence); static struct radeon_asic r600_asic = { + .errata = NULL, .init = &r600_init, .fini = &r600_fini, .suspend = &r600_suspend, .resume = &r600_resume, .cp_commit = &r600_cp_commit, + .vram_info = NULL, .vga_set_state = &r600_vga_set_state, .gpu_reset = &r600_gpu_reset, + .mc_init = NULL, + .mc_fini = NULL, + .wb_init = &r600_wb_init, + .wb_fini = &r600_wb_fini, + .gart_enable = NULL, + .gart_disable = NULL, .gart_tlb_flush = &r600_pcie_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, + .cp_init = NULL, + .cp_fini = NULL, + .cp_disable = NULL, + .ring_start = NULL, .ring_test = &r600_ring_test, .ring_ib_execute = &r600_ring_ib_execute, + .ib_test = &r600_ib_test, .irq_set = &r600_irq_set, .irq_process = &r600_irq_process, .fence_ring_emit = &r600_fence_ring_emit, @@ -475,17 +611,30 @@ int rv770_resume(struct radeon_device *rdev); int rv770_gpu_reset(struct radeon_device *rdev); static struct radeon_asic rv770_asic = { + .errata = NULL, .init = &rv770_init, .fini = &rv770_fini, .suspend = &rv770_suspend, .resume = &rv770_resume, .cp_commit = &r600_cp_commit, + .vram_info = NULL, .gpu_reset = &rv770_gpu_reset, .vga_set_state = &r600_vga_set_state, + .mc_init = NULL, + .mc_fini = NULL, + .wb_init = &r600_wb_init, + .wb_fini = &r600_wb_fini, + .gart_enable = NULL, + .gart_disable = NULL, .gart_tlb_flush = &r600_pcie_gart_tlb_flush, .gart_set_page = &rs600_gart_set_page, + .cp_init = NULL, + .cp_fini = NULL, + .cp_disable = NULL, + .ring_start = NULL, .ring_test = &r600_ring_test, .ring_ib_execute = &r600_ring_ib_execute, + .ib_test = &r600_ib_test, .irq_set = &r600_irq_set, .irq_process = &r600_irq_process, .fence_ring_emit = &r600_fence_ring_emit, diff --git a/trunk/drivers/gpu/drm/radeon/radeon_bios.c b/trunk/drivers/gpu/drm/radeon/radeon_bios.c index 34a9b9119518..96e37a6e7ce4 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_bios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_bios.c @@ -33,50 +33,12 @@ /* * BIOS. */ - -/* If you boot an IGP board with a discrete card as the primary, - * the IGP rom is not accessible via the rom bar as the IGP rom is - * part of the system bios. On boot, the system bios puts a - * copy of the igp rom at the start of vram if a discrete card is - * present. - */ -static bool igp_read_bios_from_vram(struct radeon_device *rdev) -{ - uint8_t __iomem *bios; - resource_size_t vram_base; - resource_size_t size = 256 * 1024; /* ??? */ - - rdev->bios = NULL; - vram_base = drm_get_resource_start(rdev->ddev, 0); - bios = ioremap(vram_base, size); - if (!bios) { - DRM_ERROR("Unable to mmap vram\n"); - return false; - } - - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { - iounmap(bios); - DRM_ERROR("bad rom signature\n"); - return false; - } - rdev->bios = kmalloc(size, GFP_KERNEL); - if (rdev->bios == NULL) { - iounmap(bios); - DRM_ERROR("kmalloc failed\n"); - return false; - } - memcpy(rdev->bios, bios, size); - iounmap(bios); - return true; -} - static bool radeon_read_bios(struct radeon_device *rdev) { uint8_t __iomem *bios; size_t size; rdev->bios = NULL; - /* XXX: some cards may return 0 for rom size? ddx has a workaround */ bios = pci_map_rom(rdev->pdev, &size); if (!bios) { return false; @@ -379,9 +341,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) static bool radeon_read_disabled_bios(struct radeon_device *rdev) { - if (rdev->flags & RADEON_IS_IGP) - return igp_read_bios_from_vram(rdev); - else if (rdev->family >= CHIP_RV770) + if (rdev->family >= CHIP_RV770) return r700_read_disabled_bios(rdev); else if (rdev->family >= CHIP_R600) return r600_read_disabled_bios(rdev); @@ -396,12 +356,7 @@ bool radeon_get_bios(struct radeon_device *rdev) bool r; uint16_t tmp; - if (rdev->flags & RADEON_IS_IGP) { - r = igp_read_bios_from_vram(rdev); - if (r == false) - r = radeon_read_bios(rdev); - } else - r = radeon_read_bios(rdev); + r = radeon_read_bios(rdev); if (r == false) { r = radeon_read_disabled_bios(rdev); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_clocks.c b/trunk/drivers/gpu/drm/radeon/radeon_clocks.c index f5c32a766b10..152eef13197a 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_clocks.c @@ -411,7 +411,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_PIXCLK_TRANS_ALWAYS_ONb | R300_PIXCLK_TVO_ALWAYS_ONb | R300_P2G2CLK_ALWAYS_ONb | - R300_P2G2CLK_DAC_ALWAYS_ONb); + R300_P2G2CLK_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); } else if (rdev->family >= CHIP_RV350) { tmp = RREG32_PLL(R300_SCLK_CNTL2); @@ -464,7 +464,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_PIXCLK_TRANS_ALWAYS_ONb | R300_PIXCLK_TVO_ALWAYS_ONb | R300_P2G2CLK_ALWAYS_ONb | - R300_P2G2CLK_DAC_ALWAYS_ONb); + R300_P2G2CLK_ALWAYS_ONb); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); tmp = RREG32_PLL(RADEON_MCLK_MISC); @@ -654,7 +654,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_PIXCLK_TRANS_ALWAYS_ONb | R300_PIXCLK_TVO_ALWAYS_ONb | R300_P2G2CLK_ALWAYS_ONb | - R300_P2G2CLK_DAC_ALWAYS_ONb | + R300_P2G2CLK_ALWAYS_ONb | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); } else if (rdev->family >= CHIP_RV350) { @@ -705,7 +705,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable) R300_PIXCLK_TRANS_ALWAYS_ONb | R300_PIXCLK_TVO_ALWAYS_ONb | R300_P2G2CLK_ALWAYS_ONb | - R300_P2G2CLK_DAC_ALWAYS_ONb | + R300_P2G2CLK_ALWAYS_ONb | R300_DISP_DAC_PIXCLK_DAC2_BLANK_OFF); WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); } else { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index 3d667031de6e..ec835d56d30a 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -322,6 +322,10 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RV380: rdev->asic = &r300_asic; if (rdev->flags & RADEON_IS_PCIE) { + rdev->asic->gart_init = &rv370_pcie_gart_init; + rdev->asic->gart_fini = &rv370_pcie_gart_fini; + rdev->asic->gart_enable = &rv370_pcie_gart_enable; + rdev->asic->gart_disable = &rv370_pcie_gart_disable; rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; } @@ -481,6 +485,7 @@ void radeon_combios_fini(struct radeon_device *rdev) static unsigned int radeon_vga_set_decode(void *cookie, bool state) { struct radeon_device *rdev = cookie; + radeon_vga_set_state(rdev, state); if (state) return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | @@ -488,29 +493,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state) else return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } - -void radeon_agp_disable(struct radeon_device *rdev) -{ - rdev->flags &= ~RADEON_IS_AGP; - if (rdev->family >= CHIP_R600) { - DRM_INFO("Forcing AGP to PCIE mode\n"); - rdev->flags |= RADEON_IS_PCIE; - } else if (rdev->family >= CHIP_RV515 || - rdev->family == CHIP_RV380 || - rdev->family == CHIP_RV410 || - rdev->family == CHIP_R423) { - DRM_INFO("Forcing AGP to PCIE mode\n"); - rdev->flags |= RADEON_IS_PCIE; - rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; - rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; - } else { - DRM_INFO("Forcing AGP to PCI mode\n"); - rdev->flags |= RADEON_IS_PCI; - rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; - rdev->asic->gart_set_page = &r100_pci_gart_set_page; - } -} - /* * Radeon device. */ @@ -549,7 +531,32 @@ int radeon_device_init(struct radeon_device *rdev, } if (radeon_agpmode == -1) { - radeon_agp_disable(rdev); + rdev->flags &= ~RADEON_IS_AGP; + if (rdev->family >= CHIP_R600) { + DRM_INFO("Forcing AGP to PCIE mode\n"); + rdev->flags |= RADEON_IS_PCIE; + } else if (rdev->family >= CHIP_RV515 || + rdev->family == CHIP_RV380 || + rdev->family == CHIP_RV410 || + rdev->family == CHIP_R423) { + DRM_INFO("Forcing AGP to PCIE mode\n"); + rdev->flags |= RADEON_IS_PCIE; + rdev->asic->gart_init = &rv370_pcie_gart_init; + rdev->asic->gart_fini = &rv370_pcie_gart_fini; + rdev->asic->gart_enable = &rv370_pcie_gart_enable; + rdev->asic->gart_disable = &rv370_pcie_gart_disable; + rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; + } else { + DRM_INFO("Forcing AGP to PCI mode\n"); + rdev->flags |= RADEON_IS_PCI; + rdev->asic->gart_init = &r100_pci_gart_init; + rdev->asic->gart_fini = &r100_pci_gart_fini; + rdev->asic->gart_enable = &r100_pci_gart_enable; + rdev->asic->gart_disable = &r100_pci_gart_disable; + rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart_set_page = &r100_pci_gart_set_page; + } } /* set DMA mask + need_dma32 flags. @@ -581,27 +588,111 @@ int radeon_device_init(struct radeon_device *rdev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); + rdev->new_init_path = false; + r = radeon_init(rdev); + if (r) { + return r; + } + /* if we have > 1 VGA cards, then disable the radeon VGA resources */ r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); if (r) { return -EINVAL; } - r = radeon_init(rdev); - if (r) - return r; + if (!rdev->new_init_path) { + /* Setup errata flags */ + radeon_errata(rdev); + /* Initialize scratch registers */ + radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); - if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) { - /* Acceleration not working on AGP card try again - * with fallback to PCI or PCIE GART - */ - radeon_gpu_reset(rdev); - radeon_fini(rdev); - radeon_agp_disable(rdev); - r = radeon_init(rdev); + /* BIOS*/ + if (!radeon_get_bios(rdev)) { + if (ASIC_IS_AVIVO(rdev)) + return -EINVAL; + } + if (rdev->is_atom_bios) { + r = radeon_atombios_init(rdev); + if (r) { + return r; + } + } else { + r = radeon_combios_init(rdev); + if (r) { + return r; + } + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + /* FIXME: what do we want to do here ? */ + } + /* check if cards are posted or not */ + if (!radeon_card_posted(rdev) && rdev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + if (rdev->is_atom_bios) { + atom_asic_init(rdev->mode_info.atom_context); + } else { + radeon_combios_asic_init(rdev->ddev); + } + } + /* Get clock & vram information */ + radeon_get_clock_info(rdev->ddev); + radeon_vram_info(rdev); + /* Initialize clocks */ + r = radeon_clocks_init(rdev); + if (r) { + return r; + } + + /* Initialize memory controller (also test AGP) */ + r = radeon_mc_init(rdev); + if (r) { + return r; + } + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) { + return r; + } + r = radeon_irq_kms_init(rdev); + if (r) { + return r; + } + /* Memory manager */ + r = radeon_object_init(rdev); + if (r) { + return r; + } + r = radeon_gpu_gart_init(rdev); if (r) return r; + /* Initialize GART (initialize after TTM so we can allocate + * memory through TTM but finalize after TTM) */ + r = radeon_gart_enable(rdev); + if (r) + return 0; + r = radeon_gem_init(rdev); + if (r) + return 0; + + /* 1M ring buffer */ + r = radeon_cp_init(rdev, 1024 * 1024); + if (r) + return 0; + r = radeon_wb_init(rdev); + if (r) + DRM_ERROR("radeon: failled initializing WB (%d).\n", r); + r = radeon_ib_pool_init(rdev); + if (r) + return 0; + r = radeon_ib_test(rdev); + if (r) + return 0; + rdev->accel_working = true; } + DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); if (radeon_testing) { radeon_test_moves(rdev); } @@ -615,8 +706,32 @@ void radeon_device_fini(struct radeon_device *rdev) { DRM_INFO("radeon: finishing device.\n"); rdev->shutdown = true; - radeon_fini(rdev); - vga_client_register(rdev->pdev, NULL, NULL, NULL); + /* Order matter so becarefull if you rearrange anythings */ + if (!rdev->new_init_path) { + radeon_ib_pool_fini(rdev); + radeon_cp_fini(rdev); + radeon_wb_fini(rdev); + radeon_gpu_gart_fini(rdev); + radeon_gem_fini(rdev); + radeon_mc_fini(rdev); +#if __OS_HAS_AGP + radeon_agp_fini(rdev); +#endif + radeon_irq_kms_fini(rdev); + vga_client_register(rdev->pdev, NULL, NULL, NULL); + radeon_fence_driver_fini(rdev); + radeon_clocks_fini(rdev); + radeon_object_fini(rdev); + if (rdev->is_atom_bios) { + radeon_atombios_fini(rdev); + } else { + radeon_combios_fini(rdev); + } + kfree(rdev->bios); + rdev->bios = NULL; + } else { + radeon_fini(rdev); + } iounmap(rdev->rmmio); rdev->rmmio = NULL; } @@ -656,7 +771,14 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) radeon_save_bios_scratch_regs(rdev); - radeon_suspend(rdev); + if (!rdev->new_init_path) { + radeon_cp_disable(rdev); + radeon_gart_disable(rdev); + rdev->irq.sw_int = false; + radeon_irq_set(rdev); + } else { + radeon_suspend(rdev); + } /* evict remaining vram memory */ radeon_object_evict_vram(rdev); @@ -675,6 +797,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) int radeon_resume_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; + int r; acquire_console_sem(); pci_set_power_state(dev->pdev, PCI_D0); @@ -684,7 +807,43 @@ int radeon_resume_kms(struct drm_device *dev) return -1; } pci_set_master(dev->pdev); - radeon_resume(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (!rdev->new_init_path) { + if (radeon_gpu_reset(rdev)) { + /* FIXME: what do we want to do here ? */ + } + /* post card */ + if (rdev->is_atom_bios) { + atom_asic_init(rdev->mode_info.atom_context); + } else { + radeon_combios_asic_init(rdev->ddev); + } + /* Initialize clocks */ + r = radeon_clocks_init(rdev); + if (r) { + release_console_sem(); + return r; + } + /* Enable IRQ */ + rdev->irq.sw_int = true; + radeon_irq_set(rdev); + /* Initialize GPU Memory Controller */ + r = radeon_mc_init(rdev); + if (r) { + goto out; + } + r = radeon_gart_enable(rdev); + if (r) { + goto out; + } + r = radeon_cp_init(rdev, rdev->cp.ring_size); + if (r) { + goto out; + } + } else { + radeon_resume(rdev); + } +out: radeon_restore_bios_scratch_regs(rdev); fb_set_suspend(rdev->fbdev_info, 0); release_console_sem(); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_display.c b/trunk/drivers/gpu/drm/radeon/radeon_display.c index 3655d91993a6..5d8141b13765 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_display.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_display.c @@ -106,33 +106,24 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc) legacy_crtc_load_lut(crtc); } -/** Sets the color ramps on behalf of fbcon */ +/** Sets the color ramps on behalf of RandR */ void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + if (regno == 0) + DRM_DEBUG("gamma set %d\n", radeon_crtc->crtc_id); radeon_crtc->lut_r[regno] = red >> 6; radeon_crtc->lut_g[regno] = green >> 6; radeon_crtc->lut_b[regno] = blue >> 6; } -/** Gets the color ramps on behalf of fbcon */ -void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, int regno) -{ - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - - *red = radeon_crtc->lut_r[regno] << 6; - *green = radeon_crtc->lut_g[regno] << 6; - *blue = radeon_crtc->lut_b[regno] << 6; -} - static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t size) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - int i; + int i, j; if (size != 256) { return; @@ -141,11 +132,23 @@ static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, return; } - /* userspace palettes are always correct as is */ - for (i = 0; i < 256; i++) { - radeon_crtc->lut_r[i] = red[i] >> 6; - radeon_crtc->lut_g[i] = green[i] >> 6; - radeon_crtc->lut_b[i] = blue[i] >> 6; + if (crtc->fb->depth == 16) { + for (i = 0; i < 64; i++) { + if (i <= 31) { + for (j = 0; j < 8; j++) { + radeon_crtc->lut_r[i * 8 + j] = red[i] >> 6; + radeon_crtc->lut_b[i * 8 + j] = blue[i] >> 6; + } + } + for (j = 0; j < 4; j++) + radeon_crtc->lut_g[i * 4 + j] = green[i] >> 6; + } + } else { + for (i = 0; i < 256; i++) { + radeon_crtc->lut_r[i] = red[i] >> 6; + radeon_crtc->lut_g[i] = green[i] >> 6; + radeon_crtc->lut_b[i] = blue[i] >> 6; + } } radeon_crtc_load_lut(crtc); @@ -721,11 +724,7 @@ int radeon_modeset_init(struct radeon_device *rdev) if (ret) { return ret; } - - if (rdev->flags & RADEON_SINGLE_CRTC) - num_crtc = 1; - - /* allocate crtcs */ + /* allocate crtcs - TODO single crtc */ for (i = 0; i < num_crtc; i++) { radeon_crtc_init(rdev->ddev, i); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c index a65ab1a0dad2..621646752cd2 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_encoders.c @@ -1345,7 +1345,6 @@ radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) { - struct radeon_device *rdev = dev->dev_private; struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; @@ -1365,10 +1364,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su return; encoder = &radeon_encoder->base; - if (rdev->flags & RADEON_SINGLE_CRTC) - encoder->possible_crtcs = 0x1; - else - encoder->possible_crtcs = 0x3; + encoder->possible_crtcs = 0x3; encoder->possible_clones = 0; radeon_encoder->enc_priv = NULL; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fb.c b/trunk/drivers/gpu/drm/radeon/radeon_fb.c index b38c4c8e2c61..1ba704eedefb 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_fb.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_fb.c @@ -55,7 +55,6 @@ static struct fb_ops radeonfb_ops = { .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, - .fb_setcmap = drm_fb_helper_setcmap, }; /** @@ -124,7 +123,6 @@ static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bo static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { .gamma_set = radeon_crtc_fb_gamma_set, - .gamma_get = radeon_crtc_fb_gamma_get, }; int radeonfb_create(struct drm_device *dev, @@ -148,15 +146,9 @@ int radeonfb_create(struct drm_device *dev, unsigned long tmp; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; - int crtc_count; mode_cmd.width = surface_width; mode_cmd.height = surface_height; - - /* avivo can't scanout real 24bpp */ - if ((surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) - surface_bpp = 32; - mode_cmd.bpp = surface_bpp; /* need to align pitch with crtc limits */ mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); @@ -225,11 +217,7 @@ int radeonfb_create(struct drm_device *dev, rfbdev = info->par; rfbdev->helper.funcs = &radeon_fb_helper_funcs; rfbdev->helper.dev = dev; - if (rdev->flags & RADEON_SINGLE_CRTC) - crtc_count = 1; - else - crtc_count = 2; - ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count, + ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, RADEONFB_CONN_LIMIT); if (ret) goto out_unref; @@ -246,7 +234,7 @@ int radeonfb_create(struct drm_device *dev, strcpy(info->fix.id, "radeondrmfb"); - drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); + drm_fb_helper_fill_fix(info, fb->pitch); info->flags = FBINFO_DEFAULT; info->fbops = &radeonfb_ops; @@ -321,7 +309,7 @@ int radeon_parse_options(char *options) int radeonfb_probe(struct drm_device *dev) { - return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create); + return drm_fb_helper_single_fb_probe(dev, &radeonfb_create); } int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c index 8e0a8759e428..1841145a7c4f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -83,12 +83,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev) int radeon_irq_kms_init(struct radeon_device *rdev) { int r = 0; - int num_crtc = 2; - if (rdev->flags & RADEON_SINGLE_CRTC) - num_crtc = 1; - - r = drm_vblank_init(rdev->ddev, num_crtc); + r = drm_vblank_init(rdev->ddev, 2); if (r) { return r; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 36410f85d705..2b997a15fb1f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1053,7 +1053,6 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = { .mode_set_base = radeon_crtc_set_base, .prepare = radeon_crtc_prepare, .commit = radeon_crtc_commit, - .load_lut = radeon_crtc_load_lut, }; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 6ceb958fd194..b1547f700d73 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -881,7 +881,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, R420_TV_DAC_DACADJ_MASK | R420_TV_DAC_RDACPD | R420_TV_DAC_GDACPD | - R420_TV_DAC_BDACPD | + R420_TV_DAC_GDACPD | R420_TV_DAC_TVENABLE); } else { tv_dac_cntl &= ~(RADEON_TV_DAC_STD_MASK | @@ -889,7 +889,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder, RADEON_TV_DAC_DACADJ_MASK | RADEON_TV_DAC_RDACPD | RADEON_TV_DAC_GDACPD | - RADEON_TV_DAC_BDACPD); + RADEON_TV_DAC_GDACPD); } /* FIXME TV */ @@ -1318,10 +1318,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t return; encoder = &radeon_encoder->base; - if (rdev->flags & RADEON_SINGLE_CRTC) - encoder->possible_crtcs = 0x1; - else - encoder->possible_crtcs = 0x3; + encoder->possible_crtcs = 0x3; encoder->possible_clones = 0; radeon_encoder->enc_priv = NULL; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index e61226817ccf..570a58729daf 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -407,8 +407,6 @@ extern void radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, u16 blue, int regno); -extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, - u16 *blue, int regno); struct drm_framebuffer *radeon_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object *obj); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.c b/trunk/drivers/gpu/drm/radeon/radeon_object.c index 1f056dadc5c2..73af463b7a59 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.c @@ -400,9 +400,11 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj, int radeon_object_list_reserve(struct list_head *head) { struct radeon_object_list *lobj; + struct list_head *i; int r; - list_for_each_entry(lobj, head, list){ + list_for_each(i, head) { + lobj = list_entry(i, struct radeon_object_list, list); if (!lobj->robj->pin_count) { r = radeon_object_reserve(lobj->robj, true); if (unlikely(r != 0)) { @@ -418,10 +420,13 @@ int radeon_object_list_reserve(struct list_head *head) void radeon_object_list_unreserve(struct list_head *head) { struct radeon_object_list *lobj; + struct list_head *i; - list_for_each_entry(lobj, head, list) { + list_for_each(i, head) { + lobj = list_entry(i, struct radeon_object_list, list); if (!lobj->robj->pin_count) { radeon_object_unreserve(lobj->robj); + } else { } } } @@ -431,6 +436,7 @@ int radeon_object_list_validate(struct list_head *head, void *fence) struct radeon_object_list *lobj; struct radeon_object *robj; struct radeon_fence *old_fence = NULL; + struct list_head *i; int r; r = radeon_object_list_reserve(head); @@ -438,7 +444,8 @@ int radeon_object_list_validate(struct list_head *head, void *fence) radeon_object_list_unreserve(head); return r; } - list_for_each_entry(lobj, head, list) { + list_for_each(i, head) { + lobj = list_entry(i, struct radeon_object_list, list); robj = lobj->robj; if (!robj->pin_count) { if (lobj->wdomain) { @@ -475,8 +482,10 @@ void radeon_object_list_unvalidate(struct list_head *head) { struct radeon_object_list *lobj; struct radeon_fence *old_fence = NULL; + struct list_head *i; - list_for_each_entry(lobj, head, list) { + list_for_each(i, head) { + lobj = list_entry(i, struct radeon_object_list, list); old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj; lobj->robj->tobj.sync_obj = NULL; if (old_fence) { diff --git a/trunk/drivers/gpu/drm/radeon/rs100d.h b/trunk/drivers/gpu/drm/radeon/rs100d.h deleted file mode 100644 index 48a913a06cfd..000000000000 --- a/trunk/drivers/gpu/drm/radeon/rs100d.h +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef __RS100D_H__ -#define __RS100D_H__ - -/* Registers */ -#define R_00015C_NB_TOM 0x00015C -#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) -#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) -#define C_00015C_MC_FB_START 0xFFFF0000 -#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) -#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) -#define C_00015C_MC_FB_TOP 0x0000FFFF - -#endif diff --git a/trunk/drivers/gpu/drm/radeon/rs400.c b/trunk/drivers/gpu/drm/radeon/rs400.c index a769c296f6a6..a3fbdad938c7 100644 --- a/trunk/drivers/gpu/drm/radeon/rs400.c +++ b/trunk/drivers/gpu/drm/radeon/rs400.c @@ -27,12 +27,27 @@ */ #include #include +#include "radeon_reg.h" #include "radeon.h" -#include "rs400d.h" -/* This files gather functions specifics to : rs400,rs480 */ -static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); +/* rs400,rs480 depends on : */ +void r100_hdp_reset(struct radeon_device *rdev); +void r100_mc_disable_clients(struct radeon_device *rdev); +int r300_mc_wait_for_idle(struct radeon_device *rdev); +void r420_pipes_init(struct radeon_device *rdev); +/* This files gather functions specifics to : + * rs400,rs480 + * + * Some of these functions might be used by newer ASICs. + */ +void rs400_gpu_init(struct radeon_device *rdev); +int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev); + + +/* + * GART functions. + */ void rs400_gart_adjust_size(struct radeon_device *rdev) { /* Check gart size */ @@ -223,6 +238,61 @@ int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } + +/* + * MC functions. + */ +int rs400_mc_init(struct radeon_device *rdev) +{ + uint32_t tmp; + int r; + + if (r100_debugfs_rbbm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for RBBM !\n"); + } + + rs400_gpu_init(rdev); + rs400_gart_disable(rdev); + rdev->mc.gtt_location = rdev->mc.mc_vram_size; + rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); + rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); + r = radeon_mc_setup(rdev); + if (r) { + return r; + } + + r100_mc_disable_clients(rdev); + if (r300_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); + } + + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); + tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); + WREG32(RADEON_MC_FB_LOCATION, tmp); + tmp = RREG32(RADEON_HOST_PATH_CNTL) | RADEON_HP_LIN_RD_CACHE_DIS; + WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE); + (void)RREG32(RADEON_HOST_PATH_CNTL); + WREG32(RADEON_HOST_PATH_CNTL, tmp); + (void)RREG32(RADEON_HOST_PATH_CNTL); + + return 0; +} + +void rs400_mc_fini(struct radeon_device *rdev) +{ +} + + +/* + * Global GPU functions + */ +void rs400_errata(struct radeon_device *rdev) +{ + rdev->pll_errata = 0; +} + void rs400_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs400 ? */ @@ -235,6 +305,10 @@ void rs400_gpu_init(struct radeon_device *rdev) } } + +/* + * VRAM info. + */ void rs400_vram_info(struct radeon_device *rdev) { rs400_gart_adjust_size(rdev); @@ -245,6 +319,10 @@ void rs400_vram_info(struct radeon_device *rdev) r100_vram_init_sizes(rdev); } + +/* + * Indirect registers accessor + */ uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg) { uint32_t r; @@ -262,6 +340,10 @@ void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) WREG32(RS480_NB_MC_INDEX, 0xff); } + +/* + * Debugfs info + */ #if defined(CONFIG_DEBUG_FS) static int rs400_debugfs_gart_info(struct seq_file *m, void *data) { @@ -337,7 +419,7 @@ static struct drm_info_list rs400_gart_info_list[] = { }; #endif -static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) +int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) { #if defined(CONFIG_DEBUG_FS) return radeon_debugfs_add_files(rdev, rs400_gart_info_list, 1); @@ -345,188 +427,3 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev) return 0; #endif } - -static int rs400_mc_init(struct radeon_device *rdev) -{ - int r; - u32 tmp; - - /* Setup GPU memory space */ - tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM)); - rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16; - rdev->mc.gtt_location = 0xFFFFFFFFUL; - r = radeon_mc_setup(rdev); - if (r) - return r; - return 0; -} - -void rs400_mc_program(struct radeon_device *rdev) -{ - struct r100_mc_save save; - - /* Stops all mc clients */ - r100_mc_stop(rdev, &save); - - /* Wait for mc idle */ - if (r300_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); - WREG32(R_000148_MC_FB_LOCATION, - S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | - S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); - - r100_mc_resume(rdev, &save); -} - -static int rs400_startup(struct radeon_device *rdev) -{ - int r; - - rs400_mc_program(rdev); - /* Resume clock */ - r300_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ - rs400_gpu_init(rdev); - /* Initialize GART (initialize after TTM so we can allocate - * memory through TTM but finalize after TTM) */ - r = rs400_gart_enable(rdev); - if (r) - return r; - /* Enable IRQ */ - rdev->irq.sw_int = true; - r100_irq_set(rdev); - /* 1M ring buffer */ - r = r100_cp_init(rdev, 1024 * 1024); - if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); - return r; - } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); - r = r100_ib_init(rdev); - if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); - return r; - } - return 0; -} - -int rs400_resume(struct radeon_device *rdev) -{ - /* Make sur GART are not working */ - rs400_gart_disable(rdev); - /* Resume clock before doing reset */ - r300_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* post */ - radeon_combios_asic_init(rdev->ddev); - /* Resume clock after posting */ - r300_clock_startup(rdev); - return rs400_startup(rdev); -} - -int rs400_suspend(struct radeon_device *rdev) -{ - r100_cp_disable(rdev); - r100_wb_disable(rdev); - r100_irq_disable(rdev); - rs400_gart_disable(rdev); - return 0; -} - -void rs400_fini(struct radeon_device *rdev) -{ - rs400_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - radeon_gem_fini(rdev); - rs400_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); - radeon_atombios_fini(rdev); - kfree(rdev->bios); - rdev->bios = NULL; -} - -int rs400_init(struct radeon_device *rdev) -{ - int r; - - /* Disable VGA */ - r100_vga_render_disable(rdev); - /* Initialize scratch registers */ - radeon_scratch_init(rdev); - /* Initialize surface registers */ - radeon_surface_init(rdev); - /* TODO: disable VGA need to use VGA request */ - /* BIOS*/ - if (!radeon_get_bios(rdev)) { - if (ASIC_IS_AVIVO(rdev)) - return -EINVAL; - } - if (rdev->is_atom_bios) { - dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n"); - return -EINVAL; - } else { - r = radeon_combios_init(rdev); - if (r) - return r; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - radeon_combios_asic_init(rdev->ddev); - } - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); - /* Get vram informations */ - rs400_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = rs400_mc_init(rdev); - if (r) - return r; - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) - return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - /* Memory manager */ - r = radeon_object_init(rdev); - if (r) - return r; - r = rs400_gart_init(rdev); - if (r) - return r; - r300_set_reg_safe(rdev); - rdev->accel_working = true; - r = rs400_startup(rdev); - if (r) { - /* Somethings want wront with the accel init stop accel */ - dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs400_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - rs400_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - rdev->accel_working = false; - } - return 0; -} diff --git a/trunk/drivers/gpu/drm/radeon/rs400d.h b/trunk/drivers/gpu/drm/radeon/rs400d.h deleted file mode 100644 index 6d8bac58ced9..000000000000 --- a/trunk/drivers/gpu/drm/radeon/rs400d.h +++ /dev/null @@ -1,160 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef __RS400D_H__ -#define __RS400D_H__ - -/* Registers */ -#define R_000148_MC_FB_LOCATION 0x000148 -#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) -#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) -#define C_000148_MC_FB_START 0xFFFF0000 -#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) -#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) -#define C_000148_MC_FB_TOP 0x0000FFFF -#define R_00015C_NB_TOM 0x00015C -#define S_00015C_MC_FB_START(x) (((x) & 0xFFFF) << 0) -#define G_00015C_MC_FB_START(x) (((x) >> 0) & 0xFFFF) -#define C_00015C_MC_FB_START 0xFFFF0000 -#define S_00015C_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) -#define G_00015C_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) -#define C_00015C_MC_FB_TOP 0x0000FFFF -#define R_0007C0_CP_STAT 0x0007C0 -#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) -#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) -#define C_0007C0_MRU_BUSY 0xFFFFFFFE -#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) -#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) -#define C_0007C0_MWU_BUSY 0xFFFFFFFD -#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) -#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) -#define C_0007C0_RSIU_BUSY 0xFFFFFFFB -#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) -#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) -#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 -#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) -#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) -#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF -#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) -#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) -#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF -#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) -#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) -#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF -#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) -#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) -#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF -#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) -#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) -#define C_0007C0_CSI_BUSY 0xFFFFDFFF -#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) -#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) -#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF -#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) -#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) -#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF -#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) -#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) -#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF -#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) -#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) -#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF -#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) -#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) -#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF -#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) -#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) -#define C_0007C0_CP_BUSY 0x7FFFFFFF -#define R_000E40_RBBM_STATUS 0x000E40 -#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) -#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) -#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 -#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) -#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) -#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF -#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) -#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) -#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF -#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) -#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) -#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF -#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) -#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) -#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF -#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) -#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) -#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF -#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) -#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) -#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF -#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) -#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) -#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF -#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) -#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) -#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF -#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) -#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) -#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF -#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) -#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) -#define C_000E40_E2_BUSY 0xFFFDFFFF -#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) -#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) -#define C_000E40_RB2D_BUSY 0xFFFBFFFF -#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) -#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) -#define C_000E40_RB3D_BUSY 0xFFF7FFFF -#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) -#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) -#define C_000E40_VAP_BUSY 0xFFEFFFFF -#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) -#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) -#define C_000E40_RE_BUSY 0xFFDFFFFF -#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) -#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) -#define C_000E40_TAM_BUSY 0xFFBFFFFF -#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) -#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) -#define C_000E40_TDM_BUSY 0xFF7FFFFF -#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) -#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) -#define C_000E40_PB_BUSY 0xFEFFFFFF -#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) -#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) -#define C_000E40_TIM_BUSY 0xFDFFFFFF -#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) -#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) -#define C_000E40_GA_BUSY 0xFBFFFFFF -#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) -#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) -#define C_000E40_CBA2D_BUSY 0xF7FFFFFF -#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) -#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) -#define C_000E40_GUI_ACTIVE 0x7FFFFFFF - -#endif diff --git a/trunk/drivers/gpu/drm/radeon/rs600.c b/trunk/drivers/gpu/drm/radeon/rs600.c index 10dfa78762da..4a4fe1cb131c 100644 --- a/trunk/drivers/gpu/drm/radeon/rs600.c +++ b/trunk/drivers/gpu/drm/radeon/rs600.c @@ -25,26 +25,27 @@ * Alex Deucher * Jerome Glisse */ -/* RS600 / Radeon X1250/X1270 integrated GPU - * - * This file gather function specific to RS600 which is the IGP of - * the X1250/X1270 family supporting intel CPU (while RS690/RS740 - * is the X1250/X1270 supporting AMD CPU). The display engine are - * the avivo one, bios is an atombios, 3D block are the one of the - * R4XX family. The GART is different from the RS400 one and is very - * close to the one of the R600 family (R600 likely being an evolution - * of the RS600 GART block). - */ #include "drmP.h" +#include "radeon_reg.h" #include "radeon.h" -#include "atom.h" -#include "rs600d.h" #include "rs600_reg_safe.h" +/* rs600 depends on : */ +void r100_hdp_reset(struct radeon_device *rdev); +int r100_gui_wait_for_idle(struct radeon_device *rdev); +int r300_mc_wait_for_idle(struct radeon_device *rdev); +void r420_pipes_init(struct radeon_device *rdev); + +/* This files gather functions specifics to : + * rs600 + * + * Some of these functions might be used by newer ASICs. + */ void rs600_gpu_init(struct radeon_device *rdev); int rs600_mc_wait_for_idle(struct radeon_device *rdev); + /* * GART. */ @@ -52,18 +53,18 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) { uint32_t tmp; - tmp = RREG32_MC(R_000100_MC_PT0_CNTL); - tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; - WREG32_MC(R_000100_MC_PT0_CNTL, tmp); + tmp = RREG32_MC(RS600_MC_PT0_CNTL); + tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); + WREG32_MC(RS600_MC_PT0_CNTL, tmp); - tmp = RREG32_MC(R_000100_MC_PT0_CNTL); - tmp |= S_000100_INVALIDATE_ALL_L1_TLBS(1) & S_000100_INVALIDATE_L2_CACHE(1); - WREG32_MC(R_000100_MC_PT0_CNTL, tmp); + tmp = RREG32_MC(RS600_MC_PT0_CNTL); + tmp |= RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE; + WREG32_MC(RS600_MC_PT0_CNTL, tmp); - tmp = RREG32_MC(R_000100_MC_PT0_CNTL); - tmp &= C_000100_INVALIDATE_ALL_L1_TLBS & C_000100_INVALIDATE_L2_CACHE; - WREG32_MC(R_000100_MC_PT0_CNTL, tmp); - tmp = RREG32_MC(R_000100_MC_PT0_CNTL); + tmp = RREG32_MC(RS600_MC_PT0_CNTL); + tmp &= ~(RS600_INVALIDATE_ALL_L1_TLBS | RS600_INVALIDATE_L2_CACHE); + WREG32_MC(RS600_MC_PT0_CNTL, tmp); + tmp = RREG32_MC(RS600_MC_PT0_CNTL); } int rs600_gart_init(struct radeon_device *rdev) @@ -85,7 +86,7 @@ int rs600_gart_init(struct radeon_device *rdev) int rs600_gart_enable(struct radeon_device *rdev) { - u32 tmp; + uint32_t tmp; int r, i; if (rdev->gart.table.vram.robj == NULL) { @@ -95,50 +96,46 @@ int rs600_gart_enable(struct radeon_device *rdev) r = radeon_gart_table_vram_pin(rdev); if (r) return r; - /* Enable bus master */ - tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS; - WREG32(R_00004C_BUS_CNTL, tmp); /* FIXME: setup default page */ - WREG32_MC(R_000100_MC_PT0_CNTL, - (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) | - S_000100_EFFECTIVE_L2_QUEUE_SIZE(6))); + WREG32_MC(RS600_MC_PT0_CNTL, + (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | + RS600_EFFECTIVE_L2_QUEUE_SIZE(6))); for (i = 0; i < 19; i++) { - WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i, - S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) | - S_00016C_SYSTEM_ACCESS_MODE_MASK( - V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) | - S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS( - V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) | - S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) | - S_00016C_ENABLE_FRAGMENT_PROCESSING(1) | - S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1)); + WREG32_MC(RS600_MC_PT0_CLIENT0_CNTL + i, + (RS600_ENABLE_TRANSLATION_MODE_OVERRIDE | + RS600_SYSTEM_ACCESS_MODE_IN_SYS | + RS600_SYSTEM_APERTURE_UNMAPPED_ACCESS_DEFAULT_PAGE | + RS600_EFFECTIVE_L1_CACHE_SIZE(3) | + RS600_ENABLE_FRAGMENT_PROCESSING | + RS600_EFFECTIVE_L1_QUEUE_SIZE(3))); } /* System context map to GART space */ - WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start); - WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end); + WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_location); + tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; + WREG32_MC(RS600_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, tmp); /* enable first context */ - WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start); - WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end); - WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL, - S_000102_ENABLE_PAGE_TABLE(1) | - S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT)); + WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_location); + tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; + WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_END_ADDR, tmp); + WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL, + (RS600_ENABLE_PAGE_TABLE | RS600_PAGE_TABLE_TYPE_FLAT)); /* disable all other contexts */ for (i = 1; i < 8; i++) { - WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0); + WREG32_MC(RS600_MC_PT0_CONTEXT0_CNTL + i, 0); } /* setup the page table */ - WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, - rdev->gart.table_addr); - WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); + WREG32_MC(RS600_MC_PT0_CONTEXT0_FLAT_BASE_ADDR, + rdev->gart.table_addr); + WREG32_MC(RS600_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0); /* enable page tables */ - tmp = RREG32_MC(R_000100_MC_PT0_CNTL); - WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1))); - tmp = RREG32_MC(R_000009_MC_CNTL1); - WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1))); + tmp = RREG32_MC(RS600_MC_PT0_CNTL); + WREG32_MC(RS600_MC_PT0_CNTL, (tmp | RS600_ENABLE_PT)); + tmp = RREG32_MC(RS600_MC_CNTL1); + WREG32_MC(RS600_MC_CNTL1, (tmp | RS600_ENABLE_PAGE_TABLES)); rs600_gart_tlb_flush(rdev); rdev->gart.ready = true; return 0; @@ -149,9 +146,10 @@ void rs600_gart_disable(struct radeon_device *rdev) uint32_t tmp; /* FIXME: disable out of gart access */ - WREG32_MC(R_000100_MC_PT0_CNTL, 0); - tmp = RREG32_MC(R_000009_MC_CNTL1); - WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES); + WREG32_MC(RS600_MC_PT0_CNTL, 0); + tmp = RREG32_MC(RS600_MC_CNTL1); + tmp &= ~RS600_ENABLE_PAGE_TABLES; + WREG32_MC(RS600_MC_CNTL1, tmp); if (rdev->gart.table.vram.robj) { radeon_object_kunmap(rdev->gart.table.vram.robj); radeon_object_unpin(rdev->gart.table.vram.robj); @@ -185,61 +183,129 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } + +/* + * MC. + */ +void rs600_mc_disable_clients(struct radeon_device *rdev) +{ + unsigned tmp; + + if (r100_gui_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait GUI idle while " + "programming pipes. Bad things might happen.\n"); + } + + rv515_vga_render_disable(rdev); + + tmp = RREG32(AVIVO_D1VGA_CONTROL); + WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); + tmp = RREG32(AVIVO_D2VGA_CONTROL); + WREG32(AVIVO_D2VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); + + tmp = RREG32(AVIVO_D1CRTC_CONTROL); + WREG32(AVIVO_D1CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); + tmp = RREG32(AVIVO_D2CRTC_CONTROL); + WREG32(AVIVO_D2CRTC_CONTROL, tmp & ~AVIVO_CRTC_EN); + + /* make sure all previous write got through */ + tmp = RREG32(AVIVO_D2CRTC_CONTROL); + + mdelay(1); +} + +int rs600_mc_init(struct radeon_device *rdev) +{ + uint32_t tmp; + int r; + + if (r100_debugfs_rbbm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for RBBM !\n"); + } + + rs600_gpu_init(rdev); + rs600_gart_disable(rdev); + + /* Setup GPU memory space */ + rdev->mc.vram_location = 0xFFFFFFFFUL; + rdev->mc.gtt_location = 0xFFFFFFFFUL; + r = radeon_mc_setup(rdev); + if (r) { + return r; + } + + /* Program GPU memory space */ + /* Enable bus master */ + tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS; + WREG32(RADEON_BUS_CNTL, tmp); + /* FIXME: What does AGP means for such chipset ? */ + WREG32_MC(RS600_MC_AGP_LOCATION, 0x0FFFFFFF); + /* FIXME: are this AGP reg in indirect MC range ? */ + WREG32_MC(RS600_MC_AGP_BASE, 0); + WREG32_MC(RS600_MC_AGP_BASE_2, 0); + rs600_mc_disable_clients(rdev); + if (rs600_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); + } + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); + tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); + WREG32_MC(RS600_MC_FB_LOCATION, tmp); + WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); + return 0; +} + +void rs600_mc_fini(struct radeon_device *rdev) +{ +} + + +/* + * Interrupts + */ int rs600_irq_set(struct radeon_device *rdev) { uint32_t tmp = 0; uint32_t mode_int = 0; if (rdev->irq.sw_int) { - tmp |= S_000040_SW_INT_EN(1); + tmp |= RADEON_SW_INT_ENABLE; } if (rdev->irq.crtc_vblank_int[0]) { - mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1); + mode_int |= AVIVO_D1MODE_INT_MASK; } if (rdev->irq.crtc_vblank_int[1]) { - mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1); + mode_int |= AVIVO_D2MODE_INT_MASK; } - WREG32(R_000040_GEN_INT_CNTL, tmp); - WREG32(R_006540_DxMODE_INT_MASK, mode_int); + WREG32(RADEON_GEN_INT_CNTL, tmp); + WREG32(AVIVO_DxMODE_INT_MASK, mode_int); return 0; } static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_int) { - uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS); - uint32_t irq_mask = ~C_000044_SW_INT; - - if (G_000044_DISPLAY_INT_STAT(irqs)) { - *r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS); - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(*r500_disp_int)) { - WREG32(R_006534_D1MODE_VBLANK_STATUS, - S_006534_D1MODE_VBLANK_ACK(1)); + uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); + uint32_t irq_mask = RADEON_SW_INT_TEST; + + if (irqs & AVIVO_DISPLAY_INT_STATUS) { + *r500_disp_int = RREG32(AVIVO_DISP_INTERRUPT_STATUS); + if (*r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { + WREG32(AVIVO_D1MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); } - if (G_007EDC_LB_D2_VBLANK_INTERRUPT(*r500_disp_int)) { - WREG32(R_006D34_D2MODE_VBLANK_STATUS, - S_006D34_D2MODE_VBLANK_ACK(1)); + if (*r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { + WREG32(AVIVO_D2MODE_VBLANK_STATUS, AVIVO_VBLANK_ACK); } } else { *r500_disp_int = 0; } if (irqs) { - WREG32(R_000044_GEN_INT_STATUS, irqs); + WREG32(RADEON_GEN_INT_STATUS, irqs); } return irqs & irq_mask; } -void rs600_irq_disable(struct radeon_device *rdev) -{ - u32 tmp; - - WREG32(R_000040_GEN_INT_CNTL, 0); - WREG32(R_006540_DxMODE_INT_MASK, 0); - /* Wait and acknowledge irq */ - mdelay(1); - rs600_irq_ack(rdev, &tmp); -} - int rs600_irq_process(struct radeon_device *rdev) { uint32_t status; @@ -251,13 +317,16 @@ int rs600_irq_process(struct radeon_device *rdev) } while (status || r500_disp_int) { /* SW interrupt */ - if (G_000040_SW_INT_EN(status)) + if (status & RADEON_SW_INT_TEST) { radeon_fence_process(rdev); + } /* Vertical blank interrupts */ - if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) + if (r500_disp_int & AVIVO_D1_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 0); - if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) + } + if (r500_disp_int & AVIVO_D2_VBLANK_INTERRUPT) { drm_handle_vblank(rdev->ddev, 1); + } status = rs600_irq_ack(rdev, &r500_disp_int); } return IRQ_HANDLED; @@ -266,34 +335,53 @@ int rs600_irq_process(struct radeon_device *rdev) u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc) { if (crtc == 0) - return RREG32(R_0060A4_D1CRTC_STATUS_FRAME_COUNT); + return RREG32(AVIVO_D1CRTC_FRAME_COUNT); else - return RREG32(R_0068A4_D2CRTC_STATUS_FRAME_COUNT); + return RREG32(AVIVO_D2CRTC_FRAME_COUNT); } + +/* + * Global GPU functions + */ int rs600_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; + uint32_t tmp; for (i = 0; i < rdev->usec_timeout; i++) { - if (G_000000_MC_IDLE(RREG32_MC(R_000000_MC_STATUS))) + /* read MC_STATUS */ + tmp = RREG32_MC(RS600_MC_STATUS); + if (tmp & RS600_MC_STATUS_IDLE) { return 0; - udelay(1); + } + DRM_UDELAY(1); } return -1; } +void rs600_errata(struct radeon_device *rdev) +{ + rdev->pll_errata = 0; +} + void rs600_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs600 ? */ r100_hdp_reset(rdev); + rv515_vga_render_disable(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); - /* Wait for mc idle */ - if (rs600_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); + if (rs600_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); + } } + +/* + * VRAM info. + */ void rs600_vram_info(struct radeon_device *rdev) { /* FIXME: to do or is these values sane ? */ @@ -306,206 +394,31 @@ void rs600_bandwidth_update(struct radeon_device *rdev) /* FIXME: implement, should this be like rs690 ? */ } + +/* + * Indirect registers accessor + */ uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg) { - WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | - S_000070_MC_IND_CITF_ARB0(1)); - return RREG32(R_000074_MC_IND_DATA); -} + uint32_t r; -void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) -{ - WREG32(R_000070_MC_IND_INDEX, S_000070_MC_IND_ADDR(reg) | - S_000070_MC_IND_CITF_ARB0(1) | S_000070_MC_IND_WR_EN(1)); - WREG32(R_000074_MC_IND_DATA, v); + WREG32(RS600_MC_INDEX, + ((reg & RS600_MC_ADDR_MASK) | RS600_MC_IND_CITF_ARB0)); + r = RREG32(RS600_MC_DATA); + return r; } -void rs600_debugfs(struct radeon_device *rdev) +void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { - if (r100_debugfs_rbbm_init(rdev)) - DRM_ERROR("Failed to register debugfs file for RBBM !\n"); + WREG32(RS600_MC_INDEX, + RS600_MC_IND_WR_EN | RS600_MC_IND_CITF_ARB0 | + ((reg) & RS600_MC_ADDR_MASK)); + WREG32(RS600_MC_DATA, v); } -void rs600_set_safe_registers(struct radeon_device *rdev) +int rs600_init(struct radeon_device *rdev) { rdev->config.r300.reg_safe_bm = rs600_reg_safe_bm; rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rs600_reg_safe_bm); -} - -static void rs600_mc_program(struct radeon_device *rdev) -{ - struct rv515_mc_save save; - - /* Stops all mc clients */ - rv515_mc_stop(rdev, &save); - - /* Wait for mc idle */ - if (rs600_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); - - /* FIXME: What does AGP means for such chipset ? */ - WREG32_MC(R_000005_MC_AGP_LOCATION, 0x0FFFFFFF); - WREG32_MC(R_000006_AGP_BASE, 0); - WREG32_MC(R_000007_AGP_BASE_2, 0); - /* Program MC */ - WREG32_MC(R_000004_MC_FB_LOCATION, - S_000004_MC_FB_START(rdev->mc.vram_start >> 16) | - S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16)); - WREG32(R_000134_HDP_FB_LOCATION, - S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); - - rv515_mc_resume(rdev, &save); -} - -static int rs600_startup(struct radeon_device *rdev) -{ - int r; - - rs600_mc_program(rdev); - /* Resume clock */ - rv515_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ - rs600_gpu_init(rdev); - /* Initialize GART (initialize after TTM so we can allocate - * memory through TTM but finalize after TTM) */ - r = rs600_gart_enable(rdev); - if (r) - return r; - /* Enable IRQ */ - rdev->irq.sw_int = true; - rs600_irq_set(rdev); - /* 1M ring buffer */ - r = r100_cp_init(rdev, 1024 * 1024); - if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); - return r; - } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); - r = r100_ib_init(rdev); - if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); - return r; - } - return 0; -} - -int rs600_resume(struct radeon_device *rdev) -{ - /* Make sur GART are not working */ - rs600_gart_disable(rdev); - /* Resume clock before doing reset */ - rv515_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* post */ - atom_asic_init(rdev->mode_info.atom_context); - /* Resume clock after posting */ - rv515_clock_startup(rdev); - return rs600_startup(rdev); -} - -int rs600_suspend(struct radeon_device *rdev) -{ - r100_cp_disable(rdev); - r100_wb_disable(rdev); - rs600_irq_disable(rdev); - rs600_gart_disable(rdev); - return 0; -} - -void rs600_fini(struct radeon_device *rdev) -{ - rs600_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - radeon_gem_fini(rdev); - rs600_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); - radeon_atombios_fini(rdev); - kfree(rdev->bios); - rdev->bios = NULL; -} - -int rs600_init(struct radeon_device *rdev) -{ - int r; - - /* Disable VGA */ - rv515_vga_render_disable(rdev); - /* Initialize scratch registers */ - radeon_scratch_init(rdev); - /* Initialize surface registers */ - radeon_surface_init(rdev); - /* BIOS */ - if (!radeon_get_bios(rdev)) { - if (ASIC_IS_AVIVO(rdev)) - return -EINVAL; - } - if (rdev->is_atom_bios) { - r = radeon_atombios_init(rdev); - if (r) - return r; - } else { - dev_err(rdev->dev, "Expecting atombios for RS600 GPU\n"); - return -EINVAL; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - atom_asic_init(rdev->mode_info.atom_context); - } - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); - /* Get vram informations */ - rs600_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); - if (r) - return r; - rs600_debugfs(rdev); - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) - return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - /* Memory manager */ - r = radeon_object_init(rdev); - if (r) - return r; - r = rs600_gart_init(rdev); - if (r) - return r; - rs600_set_safe_registers(rdev); - rdev->accel_working = true; - r = rs600_startup(rdev); - if (r) { - /* Somethings want wront with the accel init stop accel */ - dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs600_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - rs600_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - rdev->accel_working = false; - } return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/rs600d.h b/trunk/drivers/gpu/drm/radeon/rs600d.h deleted file mode 100644 index 81308924859a..000000000000 --- a/trunk/drivers/gpu/drm/radeon/rs600d.h +++ /dev/null @@ -1,470 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef __RS600D_H__ -#define __RS600D_H__ - -/* Registers */ -#define R_000040_GEN_INT_CNTL 0x000040 -#define S_000040_DISPLAY_INT_STATUS(x) (((x) & 0x1) << 0) -#define G_000040_DISPLAY_INT_STATUS(x) (((x) >> 0) & 0x1) -#define C_000040_DISPLAY_INT_STATUS 0xFFFFFFFE -#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) -#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) -#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF -#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6) -#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1) -#define C_000040_CRTC2_VSYNC 0xFFFFFFBF -#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7) -#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1) -#define C_000040_SNAPSHOT2 0xFFFFFF7F -#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9) -#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1) -#define C_000040_CRTC2_VBLANK 0xFFFFFDFF -#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10) -#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1) -#define C_000040_FP2_DETECT 0xFFFFFBFF -#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11) -#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1) -#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF -#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) -#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) -#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF -#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14) -#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1) -#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF -#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15) -#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1) -#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF -#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17) -#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1) -#define C_000040_I2C_INT_EN 0xFFFDFFFF -#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19) -#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1) -#define C_000040_GUI_IDLE 0xFFF7FFFF -#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24) -#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1) -#define C_000040_VIPH_INT_EN 0xFEFFFFFF -#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25) -#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1) -#define C_000040_SW_INT_EN 0xFDFFFFFF -#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27) -#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1) -#define C_000040_GEYSERVILLE 0xF7FFFFFF -#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28) -#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1) -#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF -#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29) -#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1) -#define C_000040_DVI_I2C_INT 0xDFFFFFFF -#define S_000040_GUIDMA(x) (((x) & 0x1) << 30) -#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1) -#define C_000040_GUIDMA 0xBFFFFFFF -#define S_000040_VIDDMA(x) (((x) & 0x1) << 31) -#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1) -#define C_000040_VIDDMA 0x7FFFFFFF -#define R_000044_GEN_INT_STATUS 0x000044 -#define S_000044_DISPLAY_INT_STAT(x) (((x) & 0x1) << 0) -#define G_000044_DISPLAY_INT_STAT(x) (((x) >> 0) & 0x1) -#define C_000044_DISPLAY_INT_STAT 0xFFFFFFFE -#define S_000044_VGA_INT_STAT(x) (((x) & 0x1) << 1) -#define G_000044_VGA_INT_STAT(x) (((x) >> 1) & 0x1) -#define C_000044_VGA_INT_STAT 0xFFFFFFFD -#define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8) -#define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1) -#define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF -#define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12) -#define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1) -#define C_000044_DMA_VIPH0_INT 0xFFFFEFFF -#define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13) -#define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1) -#define C_000044_DMA_VIPH1_INT 0xFFFFDFFF -#define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14) -#define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1) -#define C_000044_DMA_VIPH2_INT 0xFFFFBFFF -#define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15) -#define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1) -#define C_000044_DMA_VIPH3_INT 0xFFFF7FFF -#define S_000044_MC_PROBE_FAULT_STAT(x) (((x) & 0x1) << 16) -#define G_000044_MC_PROBE_FAULT_STAT(x) (((x) >> 16) & 0x1) -#define C_000044_MC_PROBE_FAULT_STAT 0xFFFEFFFF -#define S_000044_I2C_INT(x) (((x) & 0x1) << 17) -#define G_000044_I2C_INT(x) (((x) >> 17) & 0x1) -#define C_000044_I2C_INT 0xFFFDFFFF -#define S_000044_SCRATCH_INT_STAT(x) (((x) & 0x1) << 18) -#define G_000044_SCRATCH_INT_STAT(x) (((x) >> 18) & 0x1) -#define C_000044_SCRATCH_INT_STAT 0xFFFBFFFF -#define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19) -#define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1) -#define C_000044_GUI_IDLE_STAT 0xFFF7FFFF -#define S_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) & 0x1) << 20) -#define G_000044_ATI_OVERDRIVE_INT_STAT(x) (((x) >> 20) & 0x1) -#define C_000044_ATI_OVERDRIVE_INT_STAT 0xFFEFFFFF -#define S_000044_MC_PROTECTION_FAULT_STAT(x) (((x) & 0x1) << 21) -#define G_000044_MC_PROTECTION_FAULT_STAT(x) (((x) >> 21) & 0x1) -#define C_000044_MC_PROTECTION_FAULT_STAT 0xFFDFFFFF -#define S_000044_RBBM_READ_INT_STAT(x) (((x) & 0x1) << 22) -#define G_000044_RBBM_READ_INT_STAT(x) (((x) >> 22) & 0x1) -#define C_000044_RBBM_READ_INT_STAT 0xFFBFFFFF -#define S_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) & 0x1) << 23) -#define G_000044_CB_CONTEXT_SWITCH_STAT(x) (((x) >> 23) & 0x1) -#define C_000044_CB_CONTEXT_SWITCH_STAT 0xFF7FFFFF -#define S_000044_VIPH_INT(x) (((x) & 0x1) << 24) -#define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1) -#define C_000044_VIPH_INT 0xFEFFFFFF -#define S_000044_SW_INT(x) (((x) & 0x1) << 25) -#define G_000044_SW_INT(x) (((x) >> 25) & 0x1) -#define C_000044_SW_INT 0xFDFFFFFF -#define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26) -#define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1) -#define C_000044_SW_INT_SET 0xFBFFFFFF -#define S_000044_IDCT_INT_STAT(x) (((x) & 0x1) << 27) -#define G_000044_IDCT_INT_STAT(x) (((x) >> 27) & 0x1) -#define C_000044_IDCT_INT_STAT 0xF7FFFFFF -#define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30) -#define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1) -#define C_000044_GUIDMA_STAT 0xBFFFFFFF -#define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31) -#define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1) -#define C_000044_VIDDMA_STAT 0x7FFFFFFF -#define R_00004C_BUS_CNTL 0x00004C -#define S_00004C_BUS_MASTER_DIS(x) (((x) & 0x1) << 14) -#define G_00004C_BUS_MASTER_DIS(x) (((x) >> 14) & 0x1) -#define C_00004C_BUS_MASTER_DIS 0xFFFFBFFF -#define S_00004C_BUS_MSI_REARM(x) (((x) & 0x1) << 20) -#define G_00004C_BUS_MSI_REARM(x) (((x) >> 20) & 0x1) -#define C_00004C_BUS_MSI_REARM 0xFFEFFFFF -#define R_000070_MC_IND_INDEX 0x000070 -#define S_000070_MC_IND_ADDR(x) (((x) & 0xFFFF) << 0) -#define G_000070_MC_IND_ADDR(x) (((x) >> 0) & 0xFFFF) -#define C_000070_MC_IND_ADDR 0xFFFF0000 -#define S_000070_MC_IND_SEQ_RBS_0(x) (((x) & 0x1) << 16) -#define G_000070_MC_IND_SEQ_RBS_0(x) (((x) >> 16) & 0x1) -#define C_000070_MC_IND_SEQ_RBS_0 0xFFFEFFFF -#define S_000070_MC_IND_SEQ_RBS_1(x) (((x) & 0x1) << 17) -#define G_000070_MC_IND_SEQ_RBS_1(x) (((x) >> 17) & 0x1) -#define C_000070_MC_IND_SEQ_RBS_1 0xFFFDFFFF -#define S_000070_MC_IND_SEQ_RBS_2(x) (((x) & 0x1) << 18) -#define G_000070_MC_IND_SEQ_RBS_2(x) (((x) >> 18) & 0x1) -#define C_000070_MC_IND_SEQ_RBS_2 0xFFFBFFFF -#define S_000070_MC_IND_SEQ_RBS_3(x) (((x) & 0x1) << 19) -#define G_000070_MC_IND_SEQ_RBS_3(x) (((x) >> 19) & 0x1) -#define C_000070_MC_IND_SEQ_RBS_3 0xFFF7FFFF -#define S_000070_MC_IND_AIC_RBS(x) (((x) & 0x1) << 20) -#define G_000070_MC_IND_AIC_RBS(x) (((x) >> 20) & 0x1) -#define C_000070_MC_IND_AIC_RBS 0xFFEFFFFF -#define S_000070_MC_IND_CITF_ARB0(x) (((x) & 0x1) << 21) -#define G_000070_MC_IND_CITF_ARB0(x) (((x) >> 21) & 0x1) -#define C_000070_MC_IND_CITF_ARB0 0xFFDFFFFF -#define S_000070_MC_IND_CITF_ARB1(x) (((x) & 0x1) << 22) -#define G_000070_MC_IND_CITF_ARB1(x) (((x) >> 22) & 0x1) -#define C_000070_MC_IND_CITF_ARB1 0xFFBFFFFF -#define S_000070_MC_IND_WR_EN(x) (((x) & 0x1) << 23) -#define G_000070_MC_IND_WR_EN(x) (((x) >> 23) & 0x1) -#define C_000070_MC_IND_WR_EN 0xFF7FFFFF -#define S_000070_MC_IND_RD_INV(x) (((x) & 0x1) << 24) -#define G_000070_MC_IND_RD_INV(x) (((x) >> 24) & 0x1) -#define C_000070_MC_IND_RD_INV 0xFEFFFFFF -#define R_000074_MC_IND_DATA 0x000074 -#define S_000074_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) -#define G_000074_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) -#define C_000074_MC_IND_DATA 0x00000000 -#define R_000134_HDP_FB_LOCATION 0x000134 -#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) -#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) -#define C_000134_HDP_FB_START 0xFFFF0000 -#define R_0007C0_CP_STAT 0x0007C0 -#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) -#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) -#define C_0007C0_MRU_BUSY 0xFFFFFFFE -#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) -#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) -#define C_0007C0_MWU_BUSY 0xFFFFFFFD -#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) -#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) -#define C_0007C0_RSIU_BUSY 0xFFFFFFFB -#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) -#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) -#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 -#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) -#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) -#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF -#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) -#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) -#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF -#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) -#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) -#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF -#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) -#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) -#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF -#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) -#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) -#define C_0007C0_CSI_BUSY 0xFFFFDFFF -#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) -#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) -#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF -#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) -#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) -#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF -#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) -#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) -#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF -#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) -#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) -#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF -#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) -#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) -#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF -#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) -#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) -#define C_0007C0_CP_BUSY 0x7FFFFFFF -#define R_000E40_RBBM_STATUS 0x000E40 -#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) -#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) -#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 -#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) -#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) -#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF -#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) -#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) -#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF -#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) -#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) -#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF -#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) -#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) -#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF -#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) -#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) -#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF -#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) -#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) -#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF -#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) -#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) -#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF -#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) -#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) -#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF -#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) -#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) -#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF -#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) -#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) -#define C_000E40_E2_BUSY 0xFFFDFFFF -#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) -#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) -#define C_000E40_RB2D_BUSY 0xFFFBFFFF -#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) -#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) -#define C_000E40_RB3D_BUSY 0xFFF7FFFF -#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) -#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) -#define C_000E40_VAP_BUSY 0xFFEFFFFF -#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) -#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) -#define C_000E40_RE_BUSY 0xFFDFFFFF -#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) -#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) -#define C_000E40_TAM_BUSY 0xFFBFFFFF -#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) -#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) -#define C_000E40_TDM_BUSY 0xFF7FFFFF -#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) -#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) -#define C_000E40_PB_BUSY 0xFEFFFFFF -#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) -#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) -#define C_000E40_TIM_BUSY 0xFDFFFFFF -#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) -#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) -#define C_000E40_GA_BUSY 0xFBFFFFFF -#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) -#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) -#define C_000E40_CBA2D_BUSY 0xF7FFFFFF -#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) -#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) -#define C_000E40_GUI_ACTIVE 0x7FFFFFFF -#define R_0060A4_D1CRTC_STATUS_FRAME_COUNT 0x0060A4 -#define S_0060A4_D1CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) -#define G_0060A4_D1CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) -#define C_0060A4_D1CRTC_FRAME_COUNT 0xFF000000 -#define R_006534_D1MODE_VBLANK_STATUS 0x006534 -#define S_006534_D1MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) -#define G_006534_D1MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) -#define C_006534_D1MODE_VBLANK_OCCURRED 0xFFFFFFFE -#define S_006534_D1MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) -#define G_006534_D1MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) -#define C_006534_D1MODE_VBLANK_ACK 0xFFFFFFEF -#define S_006534_D1MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) -#define G_006534_D1MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) -#define C_006534_D1MODE_VBLANK_STAT 0xFFFFEFFF -#define S_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) -#define G_006534_D1MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) -#define C_006534_D1MODE_VBLANK_INTERRUPT 0xFFFEFFFF -#define R_006540_DxMODE_INT_MASK 0x006540 -#define S_006540_D1MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 0) -#define G_006540_D1MODE_VBLANK_INT_MASK(x) (((x) >> 0) & 0x1) -#define C_006540_D1MODE_VBLANK_INT_MASK 0xFFFFFFFE -#define S_006540_D1MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 4) -#define G_006540_D1MODE_VLINE_INT_MASK(x) (((x) >> 4) & 0x1) -#define C_006540_D1MODE_VLINE_INT_MASK 0xFFFFFFEF -#define S_006540_D2MODE_VBLANK_INT_MASK(x) (((x) & 0x1) << 8) -#define G_006540_D2MODE_VBLANK_INT_MASK(x) (((x) >> 8) & 0x1) -#define C_006540_D2MODE_VBLANK_INT_MASK 0xFFFFFEFF -#define S_006540_D2MODE_VLINE_INT_MASK(x) (((x) & 0x1) << 12) -#define G_006540_D2MODE_VLINE_INT_MASK(x) (((x) >> 12) & 0x1) -#define C_006540_D2MODE_VLINE_INT_MASK 0xFFFFEFFF -#define S_006540_D1MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 30) -#define G_006540_D1MODE_VBLANK_CP_SEL(x) (((x) >> 30) & 0x1) -#define C_006540_D1MODE_VBLANK_CP_SEL 0xBFFFFFFF -#define S_006540_D2MODE_VBLANK_CP_SEL(x) (((x) & 0x1) << 31) -#define G_006540_D2MODE_VBLANK_CP_SEL(x) (((x) >> 31) & 0x1) -#define C_006540_D2MODE_VBLANK_CP_SEL 0x7FFFFFFF -#define R_0068A4_D2CRTC_STATUS_FRAME_COUNT 0x0068A4 -#define S_0068A4_D2CRTC_FRAME_COUNT(x) (((x) & 0xFFFFFF) << 0) -#define G_0068A4_D2CRTC_FRAME_COUNT(x) (((x) >> 0) & 0xFFFFFF) -#define C_0068A4_D2CRTC_FRAME_COUNT 0xFF000000 -#define R_006D34_D2MODE_VBLANK_STATUS 0x006D34 -#define S_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) & 0x1) << 0) -#define G_006D34_D2MODE_VBLANK_OCCURRED(x) (((x) >> 0) & 0x1) -#define C_006D34_D2MODE_VBLANK_OCCURRED 0xFFFFFFFE -#define S_006D34_D2MODE_VBLANK_ACK(x) (((x) & 0x1) << 4) -#define G_006D34_D2MODE_VBLANK_ACK(x) (((x) >> 4) & 0x1) -#define C_006D34_D2MODE_VBLANK_ACK 0xFFFFFFEF -#define S_006D34_D2MODE_VBLANK_STAT(x) (((x) & 0x1) << 12) -#define G_006D34_D2MODE_VBLANK_STAT(x) (((x) >> 12) & 0x1) -#define C_006D34_D2MODE_VBLANK_STAT 0xFFFFEFFF -#define S_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) & 0x1) << 16) -#define G_006D34_D2MODE_VBLANK_INTERRUPT(x) (((x) >> 16) & 0x1) -#define C_006D34_D2MODE_VBLANK_INTERRUPT 0xFFFEFFFF -#define R_007EDC_DISP_INTERRUPT_STATUS 0x007EDC -#define S_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) & 0x1) << 4) -#define G_007EDC_LB_D1_VBLANK_INTERRUPT(x) (((x) >> 4) & 0x1) -#define C_007EDC_LB_D1_VBLANK_INTERRUPT 0xFFFFFFEF -#define S_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) & 0x1) << 5) -#define G_007EDC_LB_D2_VBLANK_INTERRUPT(x) (((x) >> 5) & 0x1) -#define C_007EDC_LB_D2_VBLANK_INTERRUPT 0xFFFFFFDF - - -/* MC registers */ -#define R_000000_MC_STATUS 0x000000 -#define S_000000_MC_IDLE(x) (((x) & 0x1) << 0) -#define G_000000_MC_IDLE(x) (((x) >> 0) & 0x1) -#define C_000000_MC_IDLE 0xFFFFFFFE -#define R_000004_MC_FB_LOCATION 0x000004 -#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0) -#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF) -#define C_000004_MC_FB_START 0xFFFF0000 -#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) -#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) -#define C_000004_MC_FB_TOP 0x0000FFFF -#define R_000005_MC_AGP_LOCATION 0x000005 -#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0) -#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) -#define C_000005_MC_AGP_START 0xFFFF0000 -#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) -#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) -#define C_000005_MC_AGP_TOP 0x0000FFFF -#define R_000006_AGP_BASE 0x000006 -#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) -#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) -#define C_000006_AGP_BASE_ADDR 0x00000000 -#define R_000007_AGP_BASE_2 0x000007 -#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) -#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) -#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0 -#define R_000009_MC_CNTL1 0x000009 -#define S_000009_ENABLE_PAGE_TABLES(x) (((x) & 0x1) << 26) -#define G_000009_ENABLE_PAGE_TABLES(x) (((x) >> 26) & 0x1) -#define C_000009_ENABLE_PAGE_TABLES 0xFBFFFFFF -/* FIXME don't know the various field size need feedback from AMD */ -#define R_000100_MC_PT0_CNTL 0x000100 -#define S_000100_ENABLE_PT(x) (((x) & 0x1) << 0) -#define G_000100_ENABLE_PT(x) (((x) >> 0) & 0x1) -#define C_000100_ENABLE_PT 0xFFFFFFFE -#define S_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) & 0x7) << 15) -#define G_000100_EFFECTIVE_L2_CACHE_SIZE(x) (((x) >> 15) & 0x7) -#define C_000100_EFFECTIVE_L2_CACHE_SIZE 0xFFFC7FFF -#define S_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 0x7) << 21) -#define G_000100_EFFECTIVE_L2_QUEUE_SIZE(x) (((x) >> 21) & 0x7) -#define C_000100_EFFECTIVE_L2_QUEUE_SIZE 0xFF1FFFFF -#define S_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) & 0x1) << 28) -#define G_000100_INVALIDATE_ALL_L1_TLBS(x) (((x) >> 28) & 0x1) -#define C_000100_INVALIDATE_ALL_L1_TLBS 0xEFFFFFFF -#define S_000100_INVALIDATE_L2_CACHE(x) (((x) & 0x1) << 29) -#define G_000100_INVALIDATE_L2_CACHE(x) (((x) >> 29) & 0x1) -#define C_000100_INVALIDATE_L2_CACHE 0xDFFFFFFF -#define R_000102_MC_PT0_CONTEXT0_CNTL 0x000102 -#define S_000102_ENABLE_PAGE_TABLE(x) (((x) & 0x1) << 0) -#define G_000102_ENABLE_PAGE_TABLE(x) (((x) >> 0) & 0x1) -#define C_000102_ENABLE_PAGE_TABLE 0xFFFFFFFE -#define S_000102_PAGE_TABLE_DEPTH(x) (((x) & 0x3) << 1) -#define G_000102_PAGE_TABLE_DEPTH(x) (((x) >> 1) & 0x3) -#define C_000102_PAGE_TABLE_DEPTH 0xFFFFFFF9 -#define V_000102_PAGE_TABLE_FLAT 0 -/* R600 documentation suggest that this should be a number of pages */ -#define R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR 0x000112 -#define R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR 0x000114 -#define R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR 0x00011C -#define R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR 0x00012C -#define R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR 0x00013C -#define R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR 0x00014C -#define R_00016C_MC_PT0_CLIENT0_CNTL 0x00016C -#define S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 0) -#define G_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 0) & 0x1) -#define C_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFE -#define S_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) & 0x1) << 1) -#define G_00016C_TRANSLATION_MODE_OVERRIDE(x) (((x) >> 1) & 0x1) -#define C_00016C_TRANSLATION_MODE_OVERRIDE 0xFFFFFFFD -#define S_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) & 0x3) << 8) -#define G_00016C_SYSTEM_ACCESS_MODE_MASK(x) (((x) >> 8) & 0x3) -#define C_00016C_SYSTEM_ACCESS_MODE_MASK 0xFFFFFCFF -#define V_00016C_SYSTEM_ACCESS_MODE_PA_ONLY 0 -#define V_00016C_SYSTEM_ACCESS_MODE_USE_SYS_MAP 1 -#define V_00016C_SYSTEM_ACCESS_MODE_IN_SYS 2 -#define V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS 3 -#define S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) & 0x1) << 10) -#define G_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(x) (((x) >> 10) & 0x1) -#define C_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS 0xFFFFFBFF -#define V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH 0 -#define V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE 1 -#define S_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) & 0x7) << 11) -#define G_00016C_EFFECTIVE_L1_CACHE_SIZE(x) (((x) >> 11) & 0x7) -#define C_00016C_EFFECTIVE_L1_CACHE_SIZE 0xFFFFC7FF -#define S_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) & 0x1) << 14) -#define G_00016C_ENABLE_FRAGMENT_PROCESSING(x) (((x) >> 14) & 0x1) -#define C_00016C_ENABLE_FRAGMENT_PROCESSING 0xFFFFBFFF -#define S_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) & 0x7) << 15) -#define G_00016C_EFFECTIVE_L1_QUEUE_SIZE(x) (((x) >> 15) & 0x7) -#define C_00016C_EFFECTIVE_L1_QUEUE_SIZE 0xFFFC7FFF -#define S_00016C_INVALIDATE_L1_TLB(x) (((x) & 0x1) << 20) -#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1) -#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF - -#endif diff --git a/trunk/drivers/gpu/drm/radeon/rs690.c b/trunk/drivers/gpu/drm/radeon/rs690.c index 025e3225346c..7a0098ddf977 100644 --- a/trunk/drivers/gpu/drm/radeon/rs690.c +++ b/trunk/drivers/gpu/drm/radeon/rs690.c @@ -26,29 +26,105 @@ * Jerome Glisse */ #include "drmP.h" +#include "radeon_reg.h" #include "radeon.h" +#include "rs690r.h" #include "atom.h" -#include "rs690d.h" +#include "atom-bits.h" + +/* rs690,rs740 depends on : */ +void r100_hdp_reset(struct radeon_device *rdev); +int r300_mc_wait_for_idle(struct radeon_device *rdev); +void r420_pipes_init(struct radeon_device *rdev); +void rs400_gart_disable(struct radeon_device *rdev); +int rs400_gart_enable(struct radeon_device *rdev); +void rs400_gart_adjust_size(struct radeon_device *rdev); +void rs600_mc_disable_clients(struct radeon_device *rdev); + +/* This files gather functions specifics to : + * rs690,rs740 + * + * Some of these functions might be used by newer ASICs. + */ +void rs690_gpu_init(struct radeon_device *rdev); +int rs690_mc_wait_for_idle(struct radeon_device *rdev); + + +/* + * MC functions. + */ +int rs690_mc_init(struct radeon_device *rdev) +{ + uint32_t tmp; + int r; + + if (r100_debugfs_rbbm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for RBBM !\n"); + } + + rs690_gpu_init(rdev); + rs400_gart_disable(rdev); + + /* Setup GPU memory space */ + rdev->mc.gtt_location = rdev->mc.mc_vram_size; + rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); + rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); + rdev->mc.vram_location = 0xFFFFFFFFUL; + r = radeon_mc_setup(rdev); + if (r) { + return r; + } + + /* Program GPU memory space */ + rs600_mc_disable_clients(rdev); + if (rs690_mc_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait MC idle while " + "programming pipes. Bad things might happen.\n"); + } + tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); + tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); + WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); + /* FIXME: Does this reg exist on RS480,RS740 ? */ + WREG32(0x310, rdev->mc.vram_location); + WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); + return 0; +} + +void rs690_mc_fini(struct radeon_device *rdev) +{ +} + -static int rs690_mc_wait_for_idle(struct radeon_device *rdev) +/* + * Global GPU functions + */ +int rs690_mc_wait_for_idle(struct radeon_device *rdev) { unsigned i; uint32_t tmp; for (i = 0; i < rdev->usec_timeout; i++) { /* read MC_STATUS */ - tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS); - if (G_000090_MC_SYSTEM_IDLE(tmp)) + tmp = RREG32_MC(RS690_MC_STATUS); + if (tmp & RS690_MC_STATUS_IDLE) { return 0; - udelay(1); + } + DRM_UDELAY(1); } return -1; } -static void rs690_gpu_init(struct radeon_device *rdev) +void rs690_errata(struct radeon_device *rdev) +{ + rdev->pll_errata = 0; +} + +void rs690_gpu_init(struct radeon_device *rdev) { /* FIXME: HDP same place on rs690 ? */ r100_hdp_reset(rdev); + rv515_vga_render_disable(rdev); /* FIXME: is this correct ? */ r420_pipes_init(rdev); if (rs690_mc_wait_for_idle(rdev)) { @@ -57,6 +133,10 @@ static void rs690_gpu_init(struct radeon_device *rdev) } } + +/* + * VRAM info. + */ void rs690_pm_info(struct radeon_device *rdev) { int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); @@ -170,39 +250,39 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev, /* * Line Buffer Setup * There is a single line buffer shared by both display controllers. - * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between + * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between * the display controllers. The paritioning can either be done * manually or via one of four preset allocations specified in bits 1:0: * 0 - line buffer is divided in half and shared between crtc * 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 * 2 - D1 gets the whole buffer * 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 - * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual + * Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual * allocation mode. In manual allocation mode, D1 always starts at 0, * D1 end/2 is specified in bits 14:4; D2 allocation follows D1. */ - tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT; - tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE; + tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; + tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; /* auto */ if (mode1 && mode2) { if (mode1->hdisplay > mode2->hdisplay) { if (mode1->hdisplay > 2560) - tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; + tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; else - tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else if (mode2->hdisplay > mode1->hdisplay) { if (mode2->hdisplay > 2560) - tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; + tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; else - tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else - tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; + tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; } else if (mode1) { - tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY; + tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; } else if (mode2) { - tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; + tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; } - WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp); + WREG32(DC_LB_MEMORY_SPLIT, tmp); } struct rs690_watermark { @@ -407,28 +487,28 @@ void rs690_bandwidth_update(struct radeon_device *rdev) * option. */ if (rdev->disp_priority == 2) { - tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER); - tmp &= C_000104_MC_DISP0R_INIT_LAT; - tmp &= C_000104_MC_DISP1R_INIT_LAT; - if (mode0) - tmp |= S_000104_MC_DISP0R_INIT_LAT(1); + tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); + tmp &= ~MC_DISP1R_INIT_LAT_MASK; + tmp &= ~MC_DISP0R_INIT_LAT_MASK; if (mode1) - tmp |= S_000104_MC_DISP1R_INIT_LAT(1); - WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp); + tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); + if (mode0) + tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); + WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); } rs690_line_buffer_adjust(rdev, mode0, mode1); if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) - WREG32(R_006C9C_DCP_CONTROL, 0); + WREG32(DCP_CONTROL, 0); if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) - WREG32(R_006C9C_DCP_CONTROL, 2); + WREG32(DCP_CONTROL, 2); rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); tmp = (wm0.lb_request_fifo_depth - 1); tmp |= (wm1.lb_request_fifo_depth - 1) << 16; - WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp); + WREG32(LB_MAX_REQ_OUTSTANDING, tmp); if (mode0 && mode1) { if (rfixed_trunc(wm0.dbpp) > 64) @@ -481,10 +561,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); } else if (mode0) { if (rfixed_trunc(wm0.dbpp) > 64) a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); @@ -511,12 +591,10 @@ void rs690_bandwidth_update(struct radeon_device *rdev) priority_mark02.full = 0; if (wm0.priority_mark_max.full > priority_mark02.full) priority_mark02.full = wm0.priority_mark_max.full; - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, - S_006D48_D2MODE_PRIORITY_A_OFF(1)); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, - S_006D4C_D2MODE_PRIORITY_B_OFF(1)); + WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); + WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); + WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); } else { if (rfixed_trunc(wm1.dbpp) > 64) a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); @@ -543,203 +621,30 @@ void rs690_bandwidth_update(struct radeon_device *rdev) priority_mark12.full = 0; if (wm1.priority_mark_max.full > priority_mark12.full) priority_mark12.full = wm1.priority_mark_max.full; - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, - S_006548_D1MODE_PRIORITY_A_OFF(1)); - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, - S_00654C_D1MODE_PRIORITY_B_OFF(1)); - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); + WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); + WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); + WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); + WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); } } +/* + * Indirect registers accessor + */ uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) { uint32_t r; - WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg)); - r = RREG32(R_00007C_MC_DATA); - WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR); + WREG32(RS690_MC_INDEX, (reg & RS690_MC_INDEX_MASK)); + r = RREG32(RS690_MC_DATA); + WREG32(RS690_MC_INDEX, RS690_MC_INDEX_MASK); return r; } void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { - WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) | - S_000078_MC_IND_WR_EN(1)); - WREG32(R_00007C_MC_DATA, v); - WREG32(R_000078_MC_INDEX, 0x7F); -} - -void rs690_mc_program(struct radeon_device *rdev) -{ - struct rv515_mc_save save; - - /* Stops all mc clients */ - rv515_mc_stop(rdev, &save); - - /* Wait for mc idle */ - if (rs690_mc_wait_for_idle(rdev)) - dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n"); - /* Program MC, should be a 32bits limited address space */ - WREG32_MC(R_000100_MCCFG_FB_LOCATION, - S_000100_MC_FB_START(rdev->mc.vram_start >> 16) | - S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16)); - WREG32(R_000134_HDP_FB_LOCATION, - S_000134_HDP_FB_START(rdev->mc.vram_start >> 16)); - - rv515_mc_resume(rdev, &save); -} - -static int rs690_startup(struct radeon_device *rdev) -{ - int r; - - rs690_mc_program(rdev); - /* Resume clock */ - rv515_clock_startup(rdev); - /* Initialize GPU configuration (# pipes, ...) */ - rs690_gpu_init(rdev); - /* Initialize GART (initialize after TTM so we can allocate - * memory through TTM but finalize after TTM) */ - r = rs400_gart_enable(rdev); - if (r) - return r; - /* Enable IRQ */ - rdev->irq.sw_int = true; - rs600_irq_set(rdev); - /* 1M ring buffer */ - r = r100_cp_init(rdev, 1024 * 1024); - if (r) { - dev_err(rdev->dev, "failled initializing CP (%d).\n", r); - return r; - } - r = r100_wb_init(rdev); - if (r) - dev_err(rdev->dev, "failled initializing WB (%d).\n", r); - r = r100_ib_init(rdev); - if (r) { - dev_err(rdev->dev, "failled initializing IB (%d).\n", r); - return r; - } - return 0; -} - -int rs690_resume(struct radeon_device *rdev) -{ - /* Make sur GART are not working */ - rs400_gart_disable(rdev); - /* Resume clock before doing reset */ - rv515_clock_startup(rdev); - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* post */ - atom_asic_init(rdev->mode_info.atom_context); - /* Resume clock after posting */ - rv515_clock_startup(rdev); - return rs690_startup(rdev); -} - -int rs690_suspend(struct radeon_device *rdev) -{ - r100_cp_disable(rdev); - r100_wb_disable(rdev); - rs600_irq_disable(rdev); - rs400_gart_disable(rdev); - return 0; -} - -void rs690_fini(struct radeon_device *rdev) -{ - rs690_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - radeon_gem_fini(rdev); - rs400_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - radeon_fence_driver_fini(rdev); - radeon_object_fini(rdev); - radeon_atombios_fini(rdev); - kfree(rdev->bios); - rdev->bios = NULL; -} - -int rs690_init(struct radeon_device *rdev) -{ - int r; - - /* Disable VGA */ - rv515_vga_render_disable(rdev); - /* Initialize scratch registers */ - radeon_scratch_init(rdev); - /* Initialize surface registers */ - radeon_surface_init(rdev); - /* TODO: disable VGA need to use VGA request */ - /* BIOS*/ - if (!radeon_get_bios(rdev)) { - if (ASIC_IS_AVIVO(rdev)) - return -EINVAL; - } - if (rdev->is_atom_bios) { - r = radeon_atombios_init(rdev); - if (r) - return r; - } else { - dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n"); - return -EINVAL; - } - /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - dev_warn(rdev->dev, - "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", - RREG32(R_000E40_RBBM_STATUS), - RREG32(R_0007C0_CP_STAT)); - } - /* check if cards are posted or not */ - if (!radeon_card_posted(rdev) && rdev->bios) { - DRM_INFO("GPU not posted. posting now...\n"); - atom_asic_init(rdev->mode_info.atom_context); - } - /* Initialize clocks */ - radeon_get_clock_info(rdev->ddev); - /* Get vram informations */ - rs690_vram_info(rdev); - /* Initialize memory controller (also test AGP) */ - r = r420_mc_init(rdev); - if (r) - return r; - rv515_debugfs(rdev); - /* Fence driver */ - r = radeon_fence_driver_init(rdev); - if (r) - return r; - r = radeon_irq_kms_init(rdev); - if (r) - return r; - /* Memory manager */ - r = radeon_object_init(rdev); - if (r) - return r; - r = rs400_gart_init(rdev); - if (r) - return r; - rs600_set_safe_registers(rdev); - rdev->accel_working = true; - r = rs690_startup(rdev); - if (r) { - /* Somethings want wront with the accel init stop accel */ - dev_err(rdev->dev, "Disabling GPU acceleration\n"); - rs690_suspend(rdev); - r100_cp_fini(rdev); - r100_wb_fini(rdev); - r100_ib_fini(rdev); - rs400_gart_fini(rdev); - radeon_irq_kms_fini(rdev); - rdev->accel_working = false; - } - return 0; + WREG32(RS690_MC_INDEX, + RS690_MC_INDEX_WR_EN | ((reg) & RS690_MC_INDEX_MASK)); + WREG32(RS690_MC_DATA, v); + WREG32(RS690_MC_INDEX, RS690_MC_INDEX_WR_ACK); } diff --git a/trunk/drivers/gpu/drm/radeon/rs690d.h b/trunk/drivers/gpu/drm/radeon/rs690d.h deleted file mode 100644 index 62d31e7a897f..000000000000 --- a/trunk/drivers/gpu/drm/radeon/rs690d.h +++ /dev/null @@ -1,307 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef __RS690D_H__ -#define __RS690D_H__ - -/* Registers */ -#define R_000078_MC_INDEX 0x000078 -#define S_000078_MC_IND_ADDR(x) (((x) & 0x1FF) << 0) -#define G_000078_MC_IND_ADDR(x) (((x) >> 0) & 0x1FF) -#define C_000078_MC_IND_ADDR 0xFFFFFE00 -#define S_000078_MC_IND_WR_EN(x) (((x) & 0x1) << 9) -#define G_000078_MC_IND_WR_EN(x) (((x) >> 9) & 0x1) -#define C_000078_MC_IND_WR_EN 0xFFFFFDFF -#define R_00007C_MC_DATA 0x00007C -#define S_00007C_MC_DATA(x) (((x) & 0xFFFFFFFF) << 0) -#define G_00007C_MC_DATA(x) (((x) >> 0) & 0xFFFFFFFF) -#define C_00007C_MC_DATA 0x00000000 -#define R_0000F8_CONFIG_MEMSIZE 0x0000F8 -#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0) -#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF) -#define C_0000F8_CONFIG_MEMSIZE 0x00000000 -#define R_000134_HDP_FB_LOCATION 0x000134 -#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0) -#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF) -#define C_000134_HDP_FB_START 0xFFFF0000 -#define R_0007C0_CP_STAT 0x0007C0 -#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) -#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) -#define C_0007C0_MRU_BUSY 0xFFFFFFFE -#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) -#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) -#define C_0007C0_MWU_BUSY 0xFFFFFFFD -#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) -#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) -#define C_0007C0_RSIU_BUSY 0xFFFFFFFB -#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) -#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) -#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 -#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) -#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) -#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF -#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) -#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) -#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF -#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) -#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) -#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF -#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) -#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) -#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF -#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) -#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) -#define C_0007C0_CSI_BUSY 0xFFFFDFFF -#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) -#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) -#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF -#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) -#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) -#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF -#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) -#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) -#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF -#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) -#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) -#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF -#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) -#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) -#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF -#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) -#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) -#define C_0007C0_CP_BUSY 0x7FFFFFFF -#define R_000E40_RBBM_STATUS 0x000E40 -#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) -#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) -#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 -#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) -#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) -#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF -#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) -#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) -#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF -#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) -#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) -#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF -#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) -#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) -#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF -#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) -#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) -#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF -#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) -#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) -#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF -#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) -#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) -#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF -#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) -#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) -#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF -#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) -#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) -#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF -#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) -#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) -#define C_000E40_E2_BUSY 0xFFFDFFFF -#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) -#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) -#define C_000E40_RB2D_BUSY 0xFFFBFFFF -#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) -#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) -#define C_000E40_RB3D_BUSY 0xFFF7FFFF -#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) -#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) -#define C_000E40_VAP_BUSY 0xFFEFFFFF -#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) -#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) -#define C_000E40_RE_BUSY 0xFFDFFFFF -#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) -#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) -#define C_000E40_TAM_BUSY 0xFFBFFFFF -#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) -#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) -#define C_000E40_TDM_BUSY 0xFF7FFFFF -#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) -#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) -#define C_000E40_PB_BUSY 0xFEFFFFFF -#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) -#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) -#define C_000E40_TIM_BUSY 0xFDFFFFFF -#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) -#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) -#define C_000E40_GA_BUSY 0xFBFFFFFF -#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) -#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) -#define C_000E40_CBA2D_BUSY 0xF7FFFFFF -#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) -#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) -#define C_000E40_GUI_ACTIVE 0x7FFFFFFF -#define R_006520_DC_LB_MEMORY_SPLIT 0x006520 -#define S_006520_DC_LB_MEMORY_SPLIT(x) (((x) & 0x3) << 0) -#define G_006520_DC_LB_MEMORY_SPLIT(x) (((x) >> 0) & 0x3) -#define C_006520_DC_LB_MEMORY_SPLIT 0xFFFFFFFC -#define S_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) & 0x1) << 2) -#define G_006520_DC_LB_MEMORY_SPLIT_MODE(x) (((x) >> 2) & 0x1) -#define C_006520_DC_LB_MEMORY_SPLIT_MODE 0xFFFFFFFB -#define V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 -#define V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 -#define V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY 2 -#define V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 -#define S_006520_DC_LB_DISP1_END_ADR(x) (((x) & 0x7FF) << 4) -#define G_006520_DC_LB_DISP1_END_ADR(x) (((x) >> 4) & 0x7FF) -#define C_006520_DC_LB_DISP1_END_ADR 0xFFFF800F -#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548 -#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) -#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) -#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000 -#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) -#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) -#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF -#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) -#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) -#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF -#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C -#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) -#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) -#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000 -#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) -#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) -#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF -#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) -#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) -#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF -#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) -#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) -#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF -#define R_006C9C_DCP_CONTROL 0x006C9C -#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48 -#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0) -#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF) -#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000 -#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16) -#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1) -#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF -#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20) -#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1) -#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF -#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24) -#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1) -#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF -#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C -#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0) -#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF) -#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000 -#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16) -#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1) -#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF -#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20) -#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1) -#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF -#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24) -#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1) -#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF -#define R_006D58_LB_MAX_REQ_OUTSTANDING 0x006D58 -#define S_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 0) -#define G_006D58_LB_D1_MAX_REQ_OUTSTANDING(x) (((x) >> 0) & 0xF) -#define C_006D58_LB_D1_MAX_REQ_OUTSTANDING 0xFFFFFFF0 -#define S_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) & 0xF) << 16) -#define G_006D58_LB_D2_MAX_REQ_OUTSTANDING(x) (((x) >> 16) & 0xF) -#define C_006D58_LB_D2_MAX_REQ_OUTSTANDING 0xFFF0FFFF - - -#define R_000090_MC_SYSTEM_STATUS 0x000090 -#define S_000090_MC_SYSTEM_IDLE(x) (((x) & 0x1) << 0) -#define G_000090_MC_SYSTEM_IDLE(x) (((x) >> 0) & 0x1) -#define C_000090_MC_SYSTEM_IDLE 0xFFFFFFFE -#define S_000090_MC_SEQUENCER_IDLE(x) (((x) & 0x1) << 1) -#define G_000090_MC_SEQUENCER_IDLE(x) (((x) >> 1) & 0x1) -#define C_000090_MC_SEQUENCER_IDLE 0xFFFFFFFD -#define S_000090_MC_ARBITER_IDLE(x) (((x) & 0x1) << 2) -#define G_000090_MC_ARBITER_IDLE(x) (((x) >> 2) & 0x1) -#define C_000090_MC_ARBITER_IDLE 0xFFFFFFFB -#define S_000090_MC_SELECT_PM(x) (((x) & 0x1) << 3) -#define G_000090_MC_SELECT_PM(x) (((x) >> 3) & 0x1) -#define C_000090_MC_SELECT_PM 0xFFFFFFF7 -#define S_000090_RESERVED4(x) (((x) & 0xF) << 4) -#define G_000090_RESERVED4(x) (((x) >> 4) & 0xF) -#define C_000090_RESERVED4 0xFFFFFF0F -#define S_000090_RESERVED8(x) (((x) & 0xF) << 8) -#define G_000090_RESERVED8(x) (((x) >> 8) & 0xF) -#define C_000090_RESERVED8 0xFFFFF0FF -#define S_000090_RESERVED12(x) (((x) & 0xF) << 12) -#define G_000090_RESERVED12(x) (((x) >> 12) & 0xF) -#define C_000090_RESERVED12 0xFFFF0FFF -#define S_000090_MCA_INIT_EXECUTED(x) (((x) & 0x1) << 16) -#define G_000090_MCA_INIT_EXECUTED(x) (((x) >> 16) & 0x1) -#define C_000090_MCA_INIT_EXECUTED 0xFFFEFFFF -#define S_000090_MCA_IDLE(x) (((x) & 0x1) << 17) -#define G_000090_MCA_IDLE(x) (((x) >> 17) & 0x1) -#define C_000090_MCA_IDLE 0xFFFDFFFF -#define S_000090_MCA_SEQ_IDLE(x) (((x) & 0x1) << 18) -#define G_000090_MCA_SEQ_IDLE(x) (((x) >> 18) & 0x1) -#define C_000090_MCA_SEQ_IDLE 0xFFFBFFFF -#define S_000090_MCA_ARB_IDLE(x) (((x) & 0x1) << 19) -#define G_000090_MCA_ARB_IDLE(x) (((x) >> 19) & 0x1) -#define C_000090_MCA_ARB_IDLE 0xFFF7FFFF -#define S_000090_RESERVED20(x) (((x) & 0xFFF) << 20) -#define G_000090_RESERVED20(x) (((x) >> 20) & 0xFFF) -#define C_000090_RESERVED20 0x000FFFFF -#define R_000100_MCCFG_FB_LOCATION 0x000100 -#define S_000100_MC_FB_START(x) (((x) & 0xFFFF) << 0) -#define G_000100_MC_FB_START(x) (((x) >> 0) & 0xFFFF) -#define C_000100_MC_FB_START 0xFFFF0000 -#define S_000100_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) -#define G_000100_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) -#define C_000100_MC_FB_TOP 0x0000FFFF -#define R_000104_MC_INIT_MISC_LAT_TIMER 0x000104 -#define S_000104_MC_CPR_INIT_LAT(x) (((x) & 0xF) << 0) -#define G_000104_MC_CPR_INIT_LAT(x) (((x) >> 0) & 0xF) -#define C_000104_MC_CPR_INIT_LAT 0xFFFFFFF0 -#define S_000104_MC_VF_INIT_LAT(x) (((x) & 0xF) << 4) -#define G_000104_MC_VF_INIT_LAT(x) (((x) >> 4) & 0xF) -#define C_000104_MC_VF_INIT_LAT 0xFFFFFF0F -#define S_000104_MC_DISP0R_INIT_LAT(x) (((x) & 0xF) << 8) -#define G_000104_MC_DISP0R_INIT_LAT(x) (((x) >> 8) & 0xF) -#define C_000104_MC_DISP0R_INIT_LAT 0xFFFFF0FF -#define S_000104_MC_DISP1R_INIT_LAT(x) (((x) & 0xF) << 12) -#define G_000104_MC_DISP1R_INIT_LAT(x) (((x) >> 12) & 0xF) -#define C_000104_MC_DISP1R_INIT_LAT 0xFFFF0FFF -#define S_000104_MC_FIXED_INIT_LAT(x) (((x) & 0xF) << 16) -#define G_000104_MC_FIXED_INIT_LAT(x) (((x) >> 16) & 0xF) -#define C_000104_MC_FIXED_INIT_LAT 0xFFF0FFFF -#define S_000104_MC_E2R_INIT_LAT(x) (((x) & 0xF) << 20) -#define G_000104_MC_E2R_INIT_LAT(x) (((x) >> 20) & 0xF) -#define C_000104_MC_E2R_INIT_LAT 0xFF0FFFFF -#define S_000104_SAME_PAGE_PRIO(x) (((x) & 0xF) << 24) -#define G_000104_SAME_PAGE_PRIO(x) (((x) >> 24) & 0xF) -#define C_000104_SAME_PAGE_PRIO 0xF0FFFFFF -#define S_000104_MC_GLOBW_INIT_LAT(x) (((x) & 0xF) << 28) -#define G_000104_MC_GLOBW_INIT_LAT(x) (((x) >> 28) & 0xF) -#define C_000104_MC_GLOBW_INIT_LAT 0x0FFFFFFF - -#endif diff --git a/trunk/drivers/gpu/drm/radeon/rs690r.h b/trunk/drivers/gpu/drm/radeon/rs690r.h new file mode 100644 index 000000000000..c0d9faa2175b --- /dev/null +++ b/trunk/drivers/gpu/drm/radeon/rs690r.h @@ -0,0 +1,99 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef RS690R_H +#define RS690R_H + +/* RS690/RS740 registers */ +#define MC_INDEX 0x0078 +# define MC_INDEX_MASK 0x1FF +# define MC_INDEX_WR_EN (1 << 9) +# define MC_INDEX_WR_ACK 0x7F +#define MC_DATA 0x007C +#define HDP_FB_LOCATION 0x0134 +#define DC_LB_MEMORY_SPLIT 0x6520 +#define DC_LB_MEMORY_SPLIT_MASK 0x00000003 +#define DC_LB_MEMORY_SPLIT_SHIFT 0 +#define DC_LB_MEMORY_SPLIT_D1HALF_D2HALF 0 +#define DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q 1 +#define DC_LB_MEMORY_SPLIT_D1_ONLY 2 +#define DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q 3 +#define DC_LB_MEMORY_SPLIT_SHIFT_MODE (1 << 2) +#define DC_LB_DISP1_END_ADR_SHIFT 4 +#define DC_LB_DISP1_END_ADR_MASK 0x00007FF0 +#define D1MODE_PRIORITY_A_CNT 0x6548 +#define MODE_PRIORITY_MARK_MASK 0x00007FFF +#define MODE_PRIORITY_OFF (1 << 16) +#define MODE_PRIORITY_ALWAYS_ON (1 << 20) +#define MODE_PRIORITY_FORCE_MASK (1 << 24) +#define D1MODE_PRIORITY_B_CNT 0x654C +#define LB_MAX_REQ_OUTSTANDING 0x6D58 +#define LB_D1_MAX_REQ_OUTSTANDING_MASK 0x0000000F +#define LB_D1_MAX_REQ_OUTSTANDING_SHIFT 0 +#define LB_D2_MAX_REQ_OUTSTANDING_MASK 0x000F0000 +#define LB_D2_MAX_REQ_OUTSTANDING_SHIFT 16 +#define DCP_CONTROL 0x6C9C +#define D2MODE_PRIORITY_A_CNT 0x6D48 +#define D2MODE_PRIORITY_B_CNT 0x6D4C + +/* MC indirect registers */ +#define MC_STATUS_IDLE (1 << 0) +#define MC_MISC_CNTL 0x18 +#define DISABLE_GTW (1 << 1) +#define GART_INDEX_REG_EN (1 << 12) +#define BLOCK_GFX_D3_EN (1 << 14) +#define GART_FEATURE_ID 0x2B +#define HANG_EN (1 << 11) +#define TLB_ENABLE (1 << 18) +#define P2P_ENABLE (1 << 19) +#define GTW_LAC_EN (1 << 25) +#define LEVEL2_GART (0 << 30) +#define LEVEL1_GART (1 << 30) +#define PDC_EN (1 << 31) +#define GART_BASE 0x2C +#define GART_CACHE_CNTRL 0x2E +# define GART_CACHE_INVALIDATE (1 << 0) +#define MC_STATUS 0x90 +#define MCCFG_FB_LOCATION 0x100 +#define MC_FB_START_MASK 0x0000FFFF +#define MC_FB_START_SHIFT 0 +#define MC_FB_TOP_MASK 0xFFFF0000 +#define MC_FB_TOP_SHIFT 16 +#define MCCFG_AGP_LOCATION 0x101 +#define MC_AGP_START_MASK 0x0000FFFF +#define MC_AGP_START_SHIFT 0 +#define MC_AGP_TOP_MASK 0xFFFF0000 +#define MC_AGP_TOP_SHIFT 16 +#define MCCFG_AGP_BASE 0x102 +#define MCCFG_AGP_BASE_2 0x103 +#define MC_INIT_MISC_LAT_TIMER 0x104 +#define MC_DISP0R_INIT_LAT_SHIFT 8 +#define MC_DISP0R_INIT_LAT_MASK 0x00000F00 +#define MC_DISP1R_INIT_LAT_SHIFT 12 +#define MC_DISP1R_INIT_LAT_MASK 0x0000F000 + +#endif diff --git a/trunk/drivers/gpu/drm/radeon/rv200d.h b/trunk/drivers/gpu/drm/radeon/rv200d.h deleted file mode 100644 index c5b398330c26..000000000000 --- a/trunk/drivers/gpu/drm/radeon/rv200d.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef __RV200D_H__ -#define __RV200D_H__ - -#define R_00015C_AGP_BASE_2 0x00015C -#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) -#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) -#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0 - -#endif diff --git a/trunk/drivers/gpu/drm/radeon/rv250d.h b/trunk/drivers/gpu/drm/radeon/rv250d.h deleted file mode 100644 index e5a70b06fe1f..000000000000 --- a/trunk/drivers/gpu/drm/radeon/rv250d.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef __RV250D_H__ -#define __RV250D_H__ - -#define R_00000D_SCLK_CNTL_M6 0x00000D -#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) -#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) -#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 -#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) -#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) -#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 -#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) -#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) -#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF -#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) -#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) -#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF -#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) -#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) -#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF -#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) -#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) -#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F -#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) -#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) -#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF -#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) -#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) -#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF -#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) -#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) -#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF -#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) -#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) -#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF -#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) -#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) -#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF -#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) -#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) -#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF -#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) -#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) -#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF -#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) -#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) -#define C_00000D_FORCE_DISP2 0xFFFF7FFF -#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) -#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) -#define C_00000D_FORCE_CP 0xFFFEFFFF -#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) -#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) -#define C_00000D_FORCE_HDP 0xFFFDFFFF -#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) -#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) -#define C_00000D_FORCE_DISP1 0xFFFBFFFF -#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) -#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) -#define C_00000D_FORCE_TOP 0xFFF7FFFF -#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) -#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) -#define C_00000D_FORCE_E2 0xFFEFFFFF -#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) -#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) -#define C_00000D_FORCE_SE 0xFFDFFFFF -#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) -#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) -#define C_00000D_FORCE_IDCT 0xFFBFFFFF -#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) -#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) -#define C_00000D_FORCE_VIP 0xFF7FFFFF -#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) -#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) -#define C_00000D_FORCE_RE 0xFEFFFFFF -#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) -#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) -#define C_00000D_FORCE_PB 0xFDFFFFFF -#define S_00000D_FORCE_TAM(x) (((x) & 0x1) << 26) -#define G_00000D_FORCE_TAM(x) (((x) >> 26) & 0x1) -#define C_00000D_FORCE_TAM 0xFBFFFFFF -#define S_00000D_FORCE_TDM(x) (((x) & 0x1) << 27) -#define G_00000D_FORCE_TDM(x) (((x) >> 27) & 0x1) -#define C_00000D_FORCE_TDM 0xF7FFFFFF -#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) -#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) -#define C_00000D_FORCE_RB 0xEFFFFFFF -#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) -#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) -#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF -#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) -#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) -#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF -#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) -#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) -#define C_00000D_FORCE_OV0 0x7FFFFFFF - -#endif diff --git a/trunk/drivers/gpu/drm/radeon/rv350d.h b/trunk/drivers/gpu/drm/radeon/rv350d.h deleted file mode 100644 index c75c5ed9e654..000000000000 --- a/trunk/drivers/gpu/drm/radeon/rv350d.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef __RV350D_H__ -#define __RV350D_H__ - -/* RV350, RV380 registers */ -/* #define R_00000D_SCLK_CNTL 0x00000D */ -#define S_00000D_FORCE_VAP(x) (((x) & 0x1) << 21) -#define G_00000D_FORCE_VAP(x) (((x) >> 21) & 0x1) -#define C_00000D_FORCE_VAP 0xFFDFFFFF -#define S_00000D_FORCE_SR(x) (((x) & 0x1) << 25) -#define G_00000D_FORCE_SR(x) (((x) >> 25) & 0x1) -#define C_00000D_FORCE_SR 0xFDFFFFFF -#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) -#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) -#define C_00000D_FORCE_PX 0xFBFFFFFF -#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) -#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) -#define C_00000D_FORCE_TX 0xF7FFFFFF -#define S_00000D_FORCE_US(x) (((x) & 0x1) << 28) -#define G_00000D_FORCE_US(x) (((x) >> 28) & 0x1) -#define C_00000D_FORCE_US 0xEFFFFFFF -#define S_00000D_FORCE_SU(x) (((x) & 0x1) << 30) -#define G_00000D_FORCE_SU(x) (((x) >> 30) & 0x1) -#define C_00000D_FORCE_SU 0xBFFFFFFF - -#endif diff --git a/trunk/drivers/gpu/drm/radeon/rv515.c b/trunk/drivers/gpu/drm/radeon/rv515.c index 41a34c23e6d8..e53b5ca7a253 100644 --- a/trunk/drivers/gpu/drm/radeon/rv515.c +++ b/trunk/drivers/gpu/drm/radeon/rv515.c @@ -478,7 +478,7 @@ static int rv515_startup(struct radeon_device *rdev) } /* Enable IRQ */ rdev->irq.sw_int = true; - rs600_irq_set(rdev); + r100_irq_set(rdev); /* 1M ring buffer */ r = r100_cp_init(rdev, 1024 * 1024); if (r) { @@ -520,7 +520,7 @@ int rv515_suspend(struct radeon_device *rdev) { r100_cp_disable(rdev); r100_wb_disable(rdev); - rs600_irq_disable(rdev); + r100_irq_disable(rdev); if (rdev->flags & RADEON_IS_PCIE) rv370_pcie_gart_disable(rdev); return 0; @@ -553,6 +553,7 @@ int rv515_init(struct radeon_device *rdev) { int r; + rdev->new_init_path = true; /* Initialize scratch registers */ radeon_scratch_init(rdev); /* Initialize surface registers */ diff --git a/trunk/drivers/gpu/drm/radeon/rv770.c b/trunk/drivers/gpu/drm/radeon/rv770.c index 595ac638039d..e0b97d161397 100644 --- a/trunk/drivers/gpu/drm/radeon/rv770.c +++ b/trunk/drivers/gpu/drm/radeon/rv770.c @@ -75,7 +75,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); - WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12); + WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, (rdev->mc.gtt_end - 1) >> 12); WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12); WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT); @@ -126,36 +126,17 @@ void rv770_pcie_gart_fini(struct radeon_device *rdev) } -void rv770_agp_enable(struct radeon_device *rdev) -{ - u32 tmp; - int i; - - /* Setup L2 cache */ - WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | - ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | - EFFECTIVE_L2_QUEUE_SIZE(7)); - WREG32(VM_L2_CNTL2, 0); - WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2)); - /* Setup TLB control */ - tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING | - SYSTEM_ACCESS_MODE_NOT_IN_SYS | - SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU | - EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5); - WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); - WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); - WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); - WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); - WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); - WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); - WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); - for (i = 0; i < 7; i++) - WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); -} - -static void rv770_mc_program(struct radeon_device *rdev) +/* + * MC + */ +static void rv770_mc_resume(struct radeon_device *rdev) { - struct rv515_mc_save save; + u32 d1vga_control, d2vga_control; + u32 vga_render_control, vga_hdp_control; + u32 d1crtc_control, d2crtc_control; + u32 new_d1grph_primary, new_d1grph_secondary; + u32 new_d2grph_primary, new_d2grph_secondary; + u64 old_vram_start; u32 tmp; int i, j; @@ -169,42 +150,53 @@ static void rv770_mc_program(struct radeon_device *rdev) } WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0); - rv515_mc_stop(rdev, &save); + d1vga_control = RREG32(D1VGA_CONTROL); + d2vga_control = RREG32(D2VGA_CONTROL); + vga_render_control = RREG32(VGA_RENDER_CONTROL); + vga_hdp_control = RREG32(VGA_HDP_CONTROL); + d1crtc_control = RREG32(D1CRTC_CONTROL); + d2crtc_control = RREG32(D2CRTC_CONTROL); + old_vram_start = (u64)(RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; + new_d1grph_primary = RREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS); + new_d1grph_secondary = RREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS); + new_d1grph_primary += rdev->mc.vram_start - old_vram_start; + new_d1grph_secondary += rdev->mc.vram_start - old_vram_start; + new_d2grph_primary = RREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS); + new_d2grph_secondary = RREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS); + new_d2grph_primary += rdev->mc.vram_start - old_vram_start; + new_d2grph_secondary += rdev->mc.vram_start - old_vram_start; + + /* Stop all video */ + WREG32(D1VGA_CONTROL, 0); + WREG32(D2VGA_CONTROL, 0); + WREG32(VGA_RENDER_CONTROL, 0); + WREG32(D1CRTC_UPDATE_LOCK, 1); + WREG32(D2CRTC_UPDATE_LOCK, 1); + WREG32(D1CRTC_CONTROL, 0); + WREG32(D2CRTC_CONTROL, 0); + WREG32(D1CRTC_UPDATE_LOCK, 0); + WREG32(D2CRTC_UPDATE_LOCK, 0); + + mdelay(1); if (r600_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + printk(KERN_WARNING "[drm] MC not idle !\n"); } + /* Lockout access through VGA aperture*/ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); + /* Update configuration */ - if (rdev->flags & RADEON_IS_AGP) { - if (rdev->mc.vram_start < rdev->mc.gtt_start) { - /* VRAM before AGP */ - WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, - rdev->mc.vram_start >> 12); - WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, - rdev->mc.gtt_end >> 12); - } else { - /* VRAM after AGP */ - WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, - rdev->mc.gtt_start >> 12); - WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, - rdev->mc.vram_end >> 12); - } - } else { - WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, - rdev->mc.vram_start >> 12); - WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, - rdev->mc.vram_end >> 12); - } + WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); + WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, (rdev->mc.vram_end - 1) >> 12); WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); - tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16; + tmp = (((rdev->mc.vram_end - 1) >> 24) & 0xFFFF) << 16; tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF); WREG32(MC_VM_FB_LOCATION, tmp); WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8)); WREG32(HDP_NONSURFACE_INFO, (2 << 7)); WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF); if (rdev->flags & RADEON_IS_AGP) { - WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16); + WREG32(MC_VM_AGP_TOP, (rdev->mc.gtt_end - 1) >> 16); WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16); WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22); } else { @@ -212,10 +204,31 @@ static void rv770_mc_program(struct radeon_device *rdev) WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF); } + WREG32(D1GRPH_PRIMARY_SURFACE_ADDRESS, new_d1grph_primary); + WREG32(D1GRPH_SECONDARY_SURFACE_ADDRESS, new_d1grph_secondary); + WREG32(D2GRPH_PRIMARY_SURFACE_ADDRESS, new_d2grph_primary); + WREG32(D2GRPH_SECONDARY_SURFACE_ADDRESS, new_d2grph_secondary); + WREG32(VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start); + + /* Unlock host access */ + WREG32(VGA_HDP_CONTROL, vga_hdp_control); + + mdelay(1); if (r600_mc_wait_for_idle(rdev)) { - dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); + printk(KERN_WARNING "[drm] MC not idle !\n"); } - rv515_mc_resume(rdev, &save); + + /* Restore video state */ + WREG32(D1CRTC_UPDATE_LOCK, 1); + WREG32(D2CRTC_UPDATE_LOCK, 1); + WREG32(D1CRTC_CONTROL, d1crtc_control); + WREG32(D2CRTC_CONTROL, d2crtc_control); + WREG32(D1CRTC_UPDATE_LOCK, 0); + WREG32(D2CRTC_UPDATE_LOCK, 0); + WREG32(D1VGA_CONTROL, d1vga_control); + WREG32(D2VGA_CONTROL, d2vga_control); + WREG32(VGA_RENDER_CONTROL, vga_render_control); + /* we need to own VRAM, so turn off the VGA renderer here * to stop it overwriting our objects */ rv515_vga_render_disable(rdev); @@ -827,9 +840,9 @@ int rv770_mc_init(struct radeon_device *rdev) rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; } rdev->mc.vram_start = rdev->mc.vram_location; - rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size; rdev->mc.gtt_start = rdev->mc.gtt_location; - rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; + rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size; /* FIXME: we should enforce default clock in case GPU is not in * default setup */ @@ -848,14 +861,11 @@ static int rv770_startup(struct radeon_device *rdev) { int r; - rv770_mc_program(rdev); - if (rdev->flags & RADEON_IS_AGP) { - rv770_agp_enable(rdev); - } else { - r = rv770_pcie_gart_enable(rdev); - if (r) - return r; - } + radeon_gpu_reset(rdev); + rv770_mc_resume(rdev); + r = rv770_pcie_gart_enable(rdev); + if (r) + return r; rv770_gpu_init(rdev); r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, @@ -874,8 +884,9 @@ static int rv770_startup(struct radeon_device *rdev) r = r600_cp_resume(rdev); if (r) return r; - /* write back buffer are not vital so don't worry about failure */ - r600_wb_enable(rdev); + r = r600_wb_init(rdev); + if (r) + return r; return 0; } @@ -883,12 +894,15 @@ int rv770_resume(struct radeon_device *rdev) { int r; - /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw, - * posting will perform necessary task to bring back GPU into good - * shape. - */ + if (radeon_gpu_reset(rdev)) { + /* FIXME: what do we want to do here ? */ + } /* post card */ - atom_asic_init(rdev->mode_info.atom_context); + if (rdev->is_atom_bios) { + atom_asic_init(rdev->mode_info.atom_context); + } else { + radeon_combios_asic_init(rdev->ddev); + } /* Initialize clocks */ r = radeon_clocks_init(rdev); if (r) { @@ -901,7 +915,7 @@ int rv770_resume(struct radeon_device *rdev) return r; } - r = r600_ib_test(rdev); + r = radeon_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); return r; @@ -915,8 +929,8 @@ int rv770_suspend(struct radeon_device *rdev) /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); rdev->cp.ready = false; - r600_wb_disable(rdev); rv770_pcie_gart_disable(rdev); + /* unpin shaders bo */ radeon_object_unpin(rdev->r600_blit.shader_obj); return 0; @@ -932,6 +946,7 @@ int rv770_init(struct radeon_device *rdev) { int r; + rdev->new_init_path = true; r = radeon_dummy_page_init(rdev); if (r) return r; @@ -945,10 +960,8 @@ int rv770_init(struct radeon_device *rdev) return -EINVAL; } /* Must be an ATOMBIOS */ - if (!rdev->is_atom_bios) { - dev_err(rdev->dev, "Expecting atombios for R600 GPU\n"); + if (!rdev->is_atom_bios) return -EINVAL; - } r = radeon_atombios_init(rdev); if (r) return r; @@ -970,8 +983,15 @@ int rv770_init(struct radeon_device *rdev) if (r) return r; r = rv770_mc_init(rdev); - if (r) + if (r) { + if (rdev->flags & RADEON_IS_AGP) { + /* Retry with disabling AGP */ + rv770_fini(rdev); + rdev->flags &= ~RADEON_IS_AGP; + return rv770_init(rdev); + } return r; + } /* Memory manager */ r = radeon_object_init(rdev); if (r) @@ -1000,10 +1020,12 @@ int rv770_init(struct radeon_device *rdev) r = rv770_startup(rdev); if (r) { - rv770_suspend(rdev); - r600_wb_fini(rdev); - radeon_ring_fini(rdev); - rv770_pcie_gart_fini(rdev); + if (rdev->flags & RADEON_IS_AGP) { + /* Retry with disabling AGP */ + rv770_fini(rdev); + rdev->flags &= ~RADEON_IS_AGP; + return rv770_init(rdev); + } rdev->accel_working = false; } if (rdev->accel_working) { @@ -1012,7 +1034,7 @@ int rv770_init(struct radeon_device *rdev) DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); rdev->accel_working = false; } - r = r600_ib_test(rdev); + r = radeon_ib_test(rdev); if (r) { DRM_ERROR("radeon: failled testing IB (%d).\n", r); rdev->accel_working = false; @@ -1027,15 +1049,20 @@ void rv770_fini(struct radeon_device *rdev) r600_blit_fini(rdev); radeon_ring_fini(rdev); - r600_wb_fini(rdev); rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); +#if __OS_HAS_AGP if (rdev->flags & RADEON_IS_AGP) radeon_agp_fini(rdev); +#endif radeon_object_fini(rdev); - radeon_atombios_fini(rdev); + if (rdev->is_atom_bios) { + radeon_atombios_fini(rdev); + } else { + radeon_combios_fini(rdev); + } kfree(rdev->bios); rdev->bios = NULL; radeon_dummy_page_fini(rdev); diff --git a/trunk/drivers/gpu/drm/ttm/ttm_global.c b/trunk/drivers/gpu/drm/ttm/ttm_global.c index b17007178a36..541744d00d3e 100644 --- a/trunk/drivers/gpu/drm/ttm/ttm_global.c +++ b/trunk/drivers/gpu/drm/ttm/ttm_global.c @@ -82,8 +82,8 @@ int ttm_global_item_ref(struct ttm_global_reference *ref) if (unlikely(ret != 0)) goto out_err; + ++item->refcount; } - ++item->refcount; ref->object = item->object; object = item->object; mutex_unlock(&item->mutex); diff --git a/trunk/drivers/hid/hidraw.c b/trunk/drivers/hid/hidraw.c index ba05275e5104..0c6639ea03dd 100644 --- a/trunk/drivers/hid/hidraw.c +++ b/trunk/drivers/hid/hidraw.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/trunk/drivers/hwmon/lis3lv02d_spi.c b/trunk/drivers/hwmon/lis3lv02d_spi.c index 82b16808a274..ecd739534f6a 100644 --- a/trunk/drivers/hwmon/lis3lv02d_spi.c +++ b/trunk/drivers/hwmon/lis3lv02d_spi.c @@ -83,8 +83,7 @@ static int __devexit lis302dl_spi_remove(struct spi_device *spi) struct lis3lv02d *lis3 = spi_get_drvdata(spi); lis3lv02d_joystick_disable(); lis3lv02d_poweroff(lis3); - - return lis3lv02d_remove_fs(&lis3_dev); + return 0; } #ifdef CONFIG_PM diff --git a/trunk/drivers/hwmon/ltc4215.c b/trunk/drivers/hwmon/ltc4215.c index 00d975eb5b83..6c9a04136e0a 100644 --- a/trunk/drivers/hwmon/ltc4215.c +++ b/trunk/drivers/hwmon/ltc4215.c @@ -20,6 +20,11 @@ #include #include +static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; + +/* Insmod parameters */ +I2C_CLIENT_INSMOD_1(ltc4215); + /* Here are names of the chip's registers (a.k.a. commands) */ enum ltc4215_cmd { LTC4215_CONTROL = 0x00, /* rw */ @@ -241,13 +246,9 @@ static const struct attribute_group ltc4215_group = { static int ltc4215_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = client->adapter; struct ltc4215_data *data; int ret; - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; - data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; @@ -293,20 +294,56 @@ static int ltc4215_remove(struct i2c_client *client) return 0; } +static int ltc4215_detect(struct i2c_client *client, + int kind, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + if (kind < 0) { /* probed detection - check the chip type */ + s32 v; /* 8 bits from the chip, or -ERRNO */ + + /* + * Register 0x01 bit b7 is reserved, expect 0 + * Register 0x03 bit b6 and b7 are reserved, expect 0 + */ + v = i2c_smbus_read_byte_data(client, LTC4215_ALERT); + if (v < 0 || (v & (1 << 7)) != 0) + return -ENODEV; + + v = i2c_smbus_read_byte_data(client, LTC4215_FAULT); + if (v < 0 || (v & ((1 << 6) | (1 << 7))) != 0) + return -ENODEV; + } + + strlcpy(info->type, "ltc4215", I2C_NAME_SIZE); + dev_info(&adapter->dev, "ltc4215 %s at address 0x%02x\n", + kind < 0 ? "probed" : "forced", + client->addr); + + return 0; +} + static const struct i2c_device_id ltc4215_id[] = { - { "ltc4215", 0 }, + { "ltc4215", ltc4215 }, { } }; MODULE_DEVICE_TABLE(i2c, ltc4215_id); /* This is the driver that will be inserted */ static struct i2c_driver ltc4215_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "ltc4215", }, .probe = ltc4215_probe, .remove = ltc4215_remove, .id_table = ltc4215_id, + .detect = ltc4215_detect, + .address_data = &addr_data, }; static int __init ltc4215_init(void) diff --git a/trunk/drivers/hwmon/ltc4245.c b/trunk/drivers/hwmon/ltc4245.c index 65c232a9d0c5..e38964333612 100644 --- a/trunk/drivers/hwmon/ltc4245.c +++ b/trunk/drivers/hwmon/ltc4245.c @@ -22,6 +22,15 @@ #include #include +/* Valid addresses are 0x20 - 0x3f + * + * For now, we do not probe, since some of these addresses + * are known to be unfriendly to probing */ +static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; + +/* Insmod parameters */ +I2C_CLIENT_INSMOD_1(ltc4245); + /* Here are names of the chip's registers (a.k.a. commands) */ enum ltc4245_cmd { LTC4245_STATUS = 0x00, /* readonly */ @@ -360,13 +369,9 @@ static const struct attribute_group ltc4245_group = { static int ltc4245_probe(struct i2c_client *client, const struct i2c_device_id *id) { - struct i2c_adapter *adapter = client->adapter; struct ltc4245_data *data; int ret; - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) - return -ENODEV; - data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) { ret = -ENOMEM; @@ -413,20 +418,136 @@ static int ltc4245_remove(struct i2c_client *client) return 0; } +/* Check that some bits in a control register appear at all possible + * locations without changing value + * + * @client: the i2c client to use + * @reg: the register to read + * @bits: the bits to check (0xff checks all bits, + * 0x03 checks only the last two bits) + * + * return -ERRNO if the register read failed + * return -ENODEV if the register value doesn't stay constant at all + * possible addresses + * + * return 0 for success + */ +static int ltc4245_check_control_reg(struct i2c_client *client, u8 reg, u8 bits) +{ + int i; + s32 v, voff1, voff2; + + /* Read register and check for error */ + v = i2c_smbus_read_byte_data(client, reg); + if (v < 0) + return v; + + v &= bits; + + for (i = 0x00; i < 0xff; i += 0x20) { + + voff1 = i2c_smbus_read_byte_data(client, reg + i); + if (voff1 < 0) + return voff1; + + voff2 = i2c_smbus_read_byte_data(client, reg + i + 0x08); + if (voff2 < 0) + return voff2; + + voff1 &= bits; + voff2 &= bits; + + if (v != voff1 || v != voff2) + return -ENODEV; + } + + return 0; +} + +static int ltc4245_detect(struct i2c_client *client, + int kind, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -ENODEV; + + if (kind < 0) { /* probed detection - check the chip type */ + s32 v; /* 8 bits from the chip, or -ERRNO */ + + /* Chip registers 0x00-0x07 are control registers + * Chip registers 0x10-0x1f are data registers + * + * Address bits b7-b5 are ignored. This makes the chip "repeat" + * in steps of 0x20. Any control registers should appear with + * the same values across all duplicated addresses. + * + * Register 0x02 bit b2 is reserved, expect 0 + * Register 0x07 bits b7 to b4 are reserved, expect 0 + * + * Registers 0x01, 0x02 are control registers and should not + * change on their own. + * + * Register 0x06 bits b6 and b7 are control bits, and should + * not change on their own. + * + * Register 0x07 bits b3 to b0 are control bits, and should + * not change on their own. + */ + + /* read register 0x02 reserved bit, expect 0 */ + v = i2c_smbus_read_byte_data(client, LTC4245_CONTROL); + if (v < 0 || (v & 0x04) != 0) + return -ENODEV; + + /* read register 0x07 reserved bits, expect 0 */ + v = i2c_smbus_read_byte_data(client, LTC4245_ADCADR); + if (v < 0 || (v & 0xf0) != 0) + return -ENODEV; + + /* check that the alert register appears at all locations */ + if (ltc4245_check_control_reg(client, LTC4245_ALERT, 0xff)) + return -ENODEV; + + /* check that the control register appears at all locations */ + if (ltc4245_check_control_reg(client, LTC4245_CONTROL, 0xff)) + return -ENODEV; + + /* check that register 0x06 bits b6 and b7 stay constant */ + if (ltc4245_check_control_reg(client, LTC4245_GPIO, 0xc0)) + return -ENODEV; + + /* check that register 0x07 bits b3-b0 stay constant */ + if (ltc4245_check_control_reg(client, LTC4245_ADCADR, 0x0f)) + return -ENODEV; + } + + strlcpy(info->type, "ltc4245", I2C_NAME_SIZE); + dev_info(&adapter->dev, "ltc4245 %s at address 0x%02x\n", + kind < 0 ? "probed" : "forced", + client->addr); + + return 0; +} + static const struct i2c_device_id ltc4245_id[] = { - { "ltc4245", 0 }, + { "ltc4245", ltc4245 }, { } }; MODULE_DEVICE_TABLE(i2c, ltc4245_id); /* This is the driver that will be inserted */ static struct i2c_driver ltc4245_driver = { + .class = I2C_CLASS_HWMON, .driver = { .name = "ltc4245", }, .probe = ltc4245_probe, .remove = ltc4245_remove, .id_table = ltc4245_id, + .detect = ltc4245_detect, + .address_data = &addr_data, }; static int __init ltc4245_init(void) diff --git a/trunk/drivers/i2c/busses/i2c-amd756.c b/trunk/drivers/i2c/busses/i2c-amd756.c index 8f0b90ef8c76..f7d6fe9c49ba 100644 --- a/trunk/drivers/i2c/busses/i2c-amd756.c +++ b/trunk/drivers/i2c/busses/i2c-amd756.c @@ -364,7 +364,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev, error = acpi_check_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name); if (error) - return -ENODEV; + return error; if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) { dev_err(&pdev->dev, "SMB region 0x%x already in use!\n", diff --git a/trunk/drivers/i2c/busses/i2c-amd8111.c b/trunk/drivers/i2c/busses/i2c-amd8111.c index 5b4ad86ca166..a7c59908c457 100644 --- a/trunk/drivers/i2c/busses/i2c-amd8111.c +++ b/trunk/drivers/i2c/busses/i2c-amd8111.c @@ -376,10 +376,8 @@ static int __devinit amd8111_probe(struct pci_dev *dev, smbus->size = pci_resource_len(dev, 0); error = acpi_check_resource_conflict(&dev->resource[0]); - if (error) { - error = -ENODEV; + if (error) goto out_kfree; - } if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) { error = -EBUSY; diff --git a/trunk/drivers/i2c/busses/i2c-i801.c b/trunk/drivers/i2c/busses/i2c-i801.c index 55edcfe5b851..9d2c5adf5d4f 100644 --- a/trunk/drivers/i2c/busses/i2c-i801.c +++ b/trunk/drivers/i2c/busses/i2c-i801.c @@ -732,10 +732,8 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id } err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); - if (err) { - err = -ENODEV; + if (err) goto exit; - } err = pci_request_region(dev, SMBBAR, i801_driver.name); if (err) { diff --git a/trunk/drivers/i2c/busses/i2c-isch.c b/trunk/drivers/i2c/busses/i2c-isch.c index dba6eb053e2f..9f6b8e0f8632 100644 --- a/trunk/drivers/i2c/busses/i2c-isch.c +++ b/trunk/drivers/i2c/busses/i2c-isch.c @@ -281,7 +281,7 @@ static int __devinit sch_probe(struct pci_dev *dev, return -ENODEV; } if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name)) - return -ENODEV; + return -EBUSY; if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) { dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", sch_smba); diff --git a/trunk/drivers/i2c/busses/i2c-piix4.c b/trunk/drivers/i2c/busses/i2c-piix4.c index d26a972aacaa..a782c7a08f9e 100644 --- a/trunk/drivers/i2c/busses/i2c-piix4.c +++ b/trunk/drivers/i2c/busses/i2c-piix4.c @@ -169,7 +169,7 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, } if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) - return -ENODEV; + return -EBUSY; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", @@ -260,7 +260,7 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev, piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) - return -ENODEV; + return -EBUSY; if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", diff --git a/trunk/drivers/i2c/busses/i2c-sis96x.c b/trunk/drivers/i2c/busses/i2c-sis96x.c index 1649963b00dc..8295885b2fdb 100644 --- a/trunk/drivers/i2c/busses/i2c-sis96x.c +++ b/trunk/drivers/i2c/busses/i2c-sis96x.c @@ -280,7 +280,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev, retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); if (retval) - return -ENODEV; + return retval; /* Everything is happy, let's grab the memory and set things up. */ if (!request_region(sis96x_smbus_base, SMB_IOSIZE, diff --git a/trunk/drivers/i2c/busses/i2c-viapro.c b/trunk/drivers/i2c/busses/i2c-viapro.c index e4b1543015af..54d810a4d00f 100644 --- a/trunk/drivers/i2c/busses/i2c-viapro.c +++ b/trunk/drivers/i2c/busses/i2c-viapro.c @@ -365,7 +365,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev, found: error = acpi_check_region(vt596_smba, 8, vt596_driver.name); if (error) - return -ENODEV; + return error; if (!request_region(vt596_smba, 8, vt596_driver.name)) { dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", diff --git a/trunk/drivers/ide/ide-proc.c b/trunk/drivers/ide/ide-proc.c index 017c09540c2f..28d09a5d8450 100644 --- a/trunk/drivers/ide/ide-proc.c +++ b/trunk/drivers/ide/ide-proc.c @@ -273,8 +273,14 @@ static const struct ide_proc_devset ide_generic_settings[] = { static void proc_ide_settings_warn(void) { - printk_once(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " + static int warned; + + if (warned) + return; + + printk(KERN_WARNING "Warning: /proc/ide/hd?/settings interface is " "obsolete, and will be removed soon!\n"); + warned = 1; } static int ide_settings_proc_show(struct seq_file *m, void *v) diff --git a/trunk/drivers/ide/sis5513.c b/trunk/drivers/ide/sis5513.c index 3b88eba04c9c..afca22beaadf 100644 --- a/trunk/drivers/ide/sis5513.c +++ b/trunk/drivers/ide/sis5513.c @@ -2,7 +2,7 @@ * Copyright (C) 1999-2000 Andre Hedrick * Copyright (C) 2002 Lionel Bouton , Maintainer * Copyright (C) 2003 Vojtech Pavlik - * Copyright (C) 2007-2009 Bartlomiej Zolnierkiewicz + * Copyright (C) 2007 Bartlomiej Zolnierkiewicz * * May be copied or modified under the terms of the GNU General Public License * @@ -281,13 +281,11 @@ static void config_drive_art_rwp(ide_drive_t *drive) pci_read_config_byte(dev, 0x4b, ®4bh); - rw_prefetch = reg4bh & ~(0x11 << drive->dn); - if (drive->media == ide_disk) - rw_prefetch |= 0x11 << drive->dn; + rw_prefetch = 0x11 << drive->dn; - if (reg4bh != rw_prefetch) - pci_write_config_byte(dev, 0x4b, rw_prefetch); + if ((reg4bh & (0x11 << drive->dn)) != rw_prefetch) + pci_write_config_byte(dev, 0x4b, reg4bh|rw_prefetch); } static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio) diff --git a/trunk/drivers/infiniband/core/ucm.c b/trunk/drivers/infiniband/core/ucm.c index f504c9b00c1b..51bd9669cb1f 100644 --- a/trunk/drivers/infiniband/core/ucm.c +++ b/trunk/drivers/infiniband/core/ucm.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/infiniband/core/user_mad.c b/trunk/drivers/infiniband/core/user_mad.c index 7de02969ed7d..8c46f2257098 100644 --- a/trunk/drivers/infiniband/core/user_mad.c +++ b/trunk/drivers/infiniband/core/user_mad.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include diff --git a/trunk/drivers/infiniband/core/uverbs_main.c b/trunk/drivers/infiniband/core/uverbs_main.c index aec0fbdfe7f0..d3fff9e008a3 100644 --- a/trunk/drivers/infiniband/core/uverbs_main.c +++ b/trunk/drivers/infiniband/core/uverbs_main.c @@ -40,7 +40,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/input/evdev.c b/trunk/drivers/input/evdev.c index dee6706038aa..1148140d08a1 100644 --- a/trunk/drivers/input/evdev.c +++ b/trunk/drivers/input/evdev.c @@ -13,7 +13,6 @@ #define EVDEV_BUFFER_SIZE 64 #include -#include #include #include #include diff --git a/trunk/drivers/input/input.c b/trunk/drivers/input/input.c index c6f88ebb40c7..16ec33f27c5d 100644 --- a/trunk/drivers/input/input.c +++ b/trunk/drivers/input/input.c @@ -17,7 +17,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/input/joydev.c b/trunk/drivers/input/joydev.c index b1bd6dd32286..901b2525993e 100644 --- a/trunk/drivers/input/joydev.c +++ b/trunk/drivers/input/joydev.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/input/misc/uinput.c b/trunk/drivers/input/misc/uinput.c index d3f57245420a..c5a49aba418f 100644 --- a/trunk/drivers/input/misc/uinput.c +++ b/trunk/drivers/input/misc/uinput.c @@ -30,7 +30,6 @@ * - first public version */ #include -#include #include #include #include diff --git a/trunk/drivers/input/mousedev.c b/trunk/drivers/input/mousedev.c index a13d80f7da17..966b8868f792 100644 --- a/trunk/drivers/input/mousedev.c +++ b/trunk/drivers/input/mousedev.c @@ -13,7 +13,6 @@ #define MOUSEDEV_MINORS 32 #define MOUSEDEV_MIX 31 -#include #include #include #include diff --git a/trunk/drivers/isdn/capi/capi.c b/trunk/drivers/isdn/capi/capi.c index 65bf91e16a42..2d8352419c0d 100644 --- a/trunk/drivers/isdn/capi/capi.c +++ b/trunk/drivers/isdn/capi/capi.c @@ -603,7 +603,7 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb) if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) { u16 info = CAPIMSG_U16(skb->data, 12); // Info field - if ((info & 0xff00) == 0) { + if (info == 0) { mutex_lock(&cdev->ncci_list_mtx); capincci_alloc(cdev, CAPIMSG_NCCI(skb->data)); mutex_unlock(&cdev->ncci_list_mtx); diff --git a/trunk/drivers/isdn/capi/capidrv.c b/trunk/drivers/isdn/capi/capidrv.c index 3e6d17f42a98..650120261abf 100644 --- a/trunk/drivers/isdn/capi/capidrv.c +++ b/trunk/drivers/isdn/capi/capidrv.c @@ -40,7 +40,7 @@ static int debugmode = 0; MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); -module_param(debugmode, uint, S_IRUGO|S_IWUSR); +module_param(debugmode, uint, 0); /* -------- type definitions ----------------------------------------- */ @@ -671,8 +671,8 @@ static void n0(capidrv_contr * card, capidrv_ncci * ncci) NULL, /* Useruserdata */ /* $$$$ */ NULL /* Facilitydataarray */ ); - plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); send_message(card, &cmsg); + plci_change_state(card, ncci->plcip, EV_PLCI_DISCONNECT_REQ); cmd.command = ISDN_STAT_BHUP; cmd.driver = card->myid; @@ -924,8 +924,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) */ capi_cmsg_answer(cmsg); cmsg->Reject = 1; /* ignore */ - plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); send_message(card, cmsg); + plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); printk(KERN_INFO "capidrv-%d: incoming call %s,%d,%d,%s ignored\n", card->contrnr, cmd.parm.setup.phone, @@ -974,8 +974,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) case 2: /* Call will be rejected. */ capi_cmsg_answer(cmsg); cmsg->Reject = 2; /* reject call, normal call clearing */ - plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); send_message(card, cmsg); + plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); break; default: @@ -983,8 +983,8 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg) capi_cmsg_answer(cmsg); cmsg->Reject = 8; /* reject call, destination out of order */ - plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); send_message(card, cmsg); + plci_change_state(card, plcip, EV_PLCI_CONNECT_REJECT); break; } return; @@ -1020,8 +1020,8 @@ static void handle_plci(_cmsg * cmsg) card->bchans[plcip->chan].disconnecting = 1; plci_change_state(card, plcip, EV_PLCI_DISCONNECT_IND); capi_cmsg_answer(cmsg); - plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); send_message(card, cmsg); + plci_change_state(card, plcip, EV_PLCI_DISCONNECT_RESP); break; case CAPI_DISCONNECT_CONF: /* plci */ @@ -1078,8 +1078,8 @@ static void handle_plci(_cmsg * cmsg) if (card->bchans[plcip->chan].incoming) { capi_cmsg_answer(cmsg); - plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); send_message(card, cmsg); + plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); } else { capidrv_ncci *nccip; capi_cmsg_answer(cmsg); @@ -1098,14 +1098,13 @@ static void handle_plci(_cmsg * cmsg) NULL /* NCPI */ ); nccip->msgid = cmsg->Messagenumber; - plci_change_state(card, plcip, - EV_PLCI_CONNECT_ACTIVE_IND); - ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ); send_message(card, cmsg); cmd.command = ISDN_STAT_DCONN; cmd.driver = card->myid; cmd.arg = plcip->chan; card->interface.statcallb(&cmd); + plci_change_state(card, plcip, EV_PLCI_CONNECT_ACTIVE_IND); + ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_REQ); } break; @@ -1194,8 +1193,8 @@ static void handle_ncci(_cmsg * cmsg) goto notfound; capi_cmsg_answer(cmsg); - ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); send_message(card, cmsg); + ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_ACTIVE_IND); cmd.command = ISDN_STAT_BCONN; cmd.driver = card->myid; @@ -1223,8 +1222,8 @@ static void handle_ncci(_cmsg * cmsg) 0, /* Reject */ NULL /* NCPI */ ); - ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); send_message(card, cmsg); + ncci_change_state(card, nccip, EV_NCCI_CONNECT_B3_RESP); break; } printk(KERN_ERR "capidrv-%d: no mem for ncci, sorry\n", card->contrnr); @@ -1300,8 +1299,8 @@ static void handle_ncci(_cmsg * cmsg) card->bchans[nccip->chan].disconnecting = 1; ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_IND); capi_cmsg_answer(cmsg); - ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); send_message(card, cmsg); + ncci_change_state(card, nccip, EV_NCCI_DISCONNECT_B3_RESP); break; case CAPI_DISCONNECT_B3_CONF: /* ncci */ @@ -2015,8 +2014,8 @@ static void send_listen(capidrv_contr *card) card->cipmask, card->cipmask2, NULL, NULL); - listen_change_state(card, EV_LISTEN_REQ); send_message(card, &cmdcmsg); + listen_change_state(card, EV_LISTEN_REQ); } static void listentimerfunc(unsigned long x) diff --git a/trunk/drivers/isdn/divert/divert_procfs.c b/trunk/drivers/isdn/divert/divert_procfs.c index 3697c409bec6..8b256a617c8a 100644 --- a/trunk/drivers/isdn/divert/divert_procfs.c +++ b/trunk/drivers/isdn/divert/divert_procfs.c @@ -16,7 +16,6 @@ #else #include #endif -#include #include #include #include "isdn_divert.h" diff --git a/trunk/drivers/isdn/gigaset/asyncdata.c b/trunk/drivers/isdn/gigaset/asyncdata.c index 44a58e6f8f65..234cc5d53312 100644 --- a/trunk/drivers/isdn/gigaset/asyncdata.c +++ b/trunk/drivers/isdn/gigaset/asyncdata.c @@ -334,14 +334,7 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, return startbytes - numbytes; } -/** - * gigaset_m10x_input() - process a block of data received from the device - * @inbuf: received data and device descriptor structure. - * - * Called by hardware module {ser,usb}_gigaset with a block of received - * bytes. Separates the bytes received over the serial data channel into - * user data and command replies (locked/unlocked) according to the - * current state of the interface. +/* process a block of data received from the device */ void gigaset_m10x_input(struct inbuf_t *inbuf) { @@ -550,17 +543,16 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail) return iraw_skb; } -/** - * gigaset_m10x_send_skb() - queue an skb for sending - * @bcs: B channel descriptor structure. - * @skb: data to send. - * - * Called by i4l.c to encode and queue an skb for sending, and start - * transmission if necessary. - * +/* gigaset_send_skb + * called by common.c to queue an skb for sending + * and start transmission if necessary + * parameters: + * B Channel control structure + * skb * Return value: - * number of bytes accepted for sending (skb->len) if ok, - * error code < 0 (eg. -ENOMEM) on error + * number of bytes accepted for sending + * (skb->len if ok, 0 if out of buffer space) + * or error code (< 0, eg. -EINVAL) */ int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) { diff --git a/trunk/drivers/isdn/gigaset/bas-gigaset.c b/trunk/drivers/isdn/gigaset/bas-gigaset.c index 5ed1d99eb9f3..781c4041f7b0 100644 --- a/trunk/drivers/isdn/gigaset/bas-gigaset.c +++ b/trunk/drivers/isdn/gigaset/bas-gigaset.c @@ -134,7 +134,6 @@ struct bas_cardstate { #define BS_ATRDPEND 0x040 /* urb_cmd_in in use */ #define BS_ATWRPEND 0x080 /* urb_cmd_out in use */ #define BS_SUSPEND 0x100 /* USB port suspended */ -#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */ static struct gigaset_driver *driver = NULL; @@ -320,21 +319,6 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) return -EINVAL; } -/* set/clear bits in base connection state, return previous state - */ -static inline int update_basstate(struct bas_cardstate *ucs, - int set, int clear) -{ - unsigned long flags; - int state; - - spin_lock_irqsave(&ucs->lock, flags); - state = ucs->basstate; - ucs->basstate = (state & ~clear) | set; - spin_unlock_irqrestore(&ucs->lock, flags); - return state; -} - /* error_hangup * hang up any existing connection because of an unrecoverable error * This function may be called from any context and takes care of scheduling @@ -366,9 +350,12 @@ static inline void error_hangup(struct bc_state *bcs) */ static inline void error_reset(struct cardstate *cs) { - /* reset interrupt pipe to recover (ignore errors) */ - update_basstate(cs->hw.bas, BS_RESETTING, 0); - req_submit(cs->bcs, HD_RESET_INTERRUPT_PIPE, 0, BAS_TIMEOUT); + /* close AT command channel to recover (ignore errors) */ + req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); + + //FIXME try to recover without bothering the user + dev_err(cs->dev, + "unrecoverable error - please disconnect Gigaset base to reset\n"); } /* check_pending @@ -411,13 +398,8 @@ static void check_pending(struct bas_cardstate *ucs) case HD_DEVICE_INIT_ACK: /* no reply expected */ ucs->pending = 0; break; - case HD_RESET_INTERRUPT_PIPE: - if (!(ucs->basstate & BS_RESETTING)) - ucs->pending = 0; - break; - /* - * HD_READ_ATMESSAGE and HD_WRITE_ATMESSAGE are handled separately - * and should never end up here + /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE + * are handled separately and should never end up here */ default: dev_warn(&ucs->interface->dev, @@ -467,6 +449,21 @@ static void cmd_in_timeout(unsigned long data) error_reset(cs); } +/* set/clear bits in base connection state, return previous state + */ +inline static int update_basstate(struct bas_cardstate *ucs, + int set, int clear) +{ + unsigned long flags; + int state; + + spin_lock_irqsave(&ucs->lock, flags); + state = ucs->basstate; + ucs->basstate = (state & ~clear) | set; + spin_unlock_irqrestore(&ucs->lock, flags); + return state; +} + /* read_ctrl_callback * USB completion handler for control pipe input * called by the USB subsystem in interrupt context @@ -765,8 +762,7 @@ static void read_int_callback(struct urb *urb) break; case HD_RESET_INTERRUPT_PIPE_ACK: - update_basstate(ucs, 0, BS_RESETTING); - dev_notice(cs->dev, "interrupt pipe reset\n"); + gig_dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK"); break; case HD_SUSPEND_END: @@ -1335,24 +1331,28 @@ static void read_iso_tasklet(unsigned long data) rcvbuf = urb->transfer_buffer; totleft = urb->actual_length; for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { - numbytes = urb->iso_frame_desc[frame].actual_length; - if (unlikely(urb->iso_frame_desc[frame].status)) + if (unlikely(urb->iso_frame_desc[frame].status)) { dev_warn(cs->dev, - "isochronous read: frame %d[%d]: %s\n", - frame, numbytes, + "isochronous read: frame %d: %s\n", + frame, get_usb_statmsg( urb->iso_frame_desc[frame].status)); - if (unlikely(numbytes > BAS_MAXFRAME)) + break; + } + numbytes = urb->iso_frame_desc[frame].actual_length; + if (unlikely(numbytes > BAS_MAXFRAME)) { dev_warn(cs->dev, "isochronous read: frame %d: " "numbytes (%d) > BAS_MAXFRAME\n", frame, numbytes); + break; + } if (unlikely(numbytes > totleft)) { dev_warn(cs->dev, "isochronous read: frame %d: " "numbytes (%d) > totleft (%d)\n", frame, numbytes, totleft); - numbytes = totleft; + break; } offset = urb->iso_frame_desc[frame].offset; if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { @@ -1361,7 +1361,7 @@ static void read_iso_tasklet(unsigned long data) "offset (%d) + numbytes (%d) " "> BAS_INBUFSIZE\n", frame, offset, numbytes); - numbytes = BAS_INBUFSIZE - offset; + break; } gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); totleft -= numbytes; @@ -1433,7 +1433,6 @@ static void req_timeout(unsigned long data) case HD_CLOSE_ATCHANNEL: dev_err(bcs->cs->dev, "timeout closing AT channel\n"); - error_reset(bcs->cs); break; case HD_CLOSE_B2CHANNEL: @@ -1443,13 +1442,6 @@ static void req_timeout(unsigned long data) error_reset(bcs->cs); break; - case HD_RESET_INTERRUPT_PIPE: - /* error recovery escalation */ - dev_err(bcs->cs->dev, - "reset interrupt pipe timeout, attempting USB reset\n"); - usb_queue_reset_device(bcs->cs->hw.bas->interface); - break; - default: dev_warn(bcs->cs->dev, "request 0x%02x timed out, clearing\n", pending); @@ -1942,15 +1934,6 @@ static int gigaset_write_cmd(struct cardstate *cs, goto notqueued; } - /* translate "+++" escape sequence sent as a single separate command - * into "close AT channel" command for error recovery - * The next command will reopen the AT channel automatically. - */ - if (len == 3 && !memcmp(buf, "+++", 3)) { - rc = req_submit(cs->bcs, HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT); - goto notqueued; - } - if (len > IF_WRITEBUF) len = IF_WRITEBUF; if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { diff --git a/trunk/drivers/isdn/gigaset/common.c b/trunk/drivers/isdn/gigaset/common.c index 33dcd8d72b7c..e4141bf8b2f3 100644 --- a/trunk/drivers/isdn/gigaset/common.c +++ b/trunk/drivers/isdn/gigaset/common.c @@ -22,12 +22,6 @@ #define DRIVER_AUTHOR "Hansjoerg Lipp , Tilman Schmidt , Stefan Eilers" #define DRIVER_DESC "Driver for Gigaset 307x" -#ifdef CONFIG_GIGASET_DEBUG -#define DRIVER_DESC_DEBUG " (debug build)" -#else -#define DRIVER_DESC_DEBUG "" -#endif - /* Module parameters */ int gigaset_debuglevel = DEBUG_DEFAULT; EXPORT_SYMBOL_GPL(gigaset_debuglevel); @@ -38,17 +32,6 @@ MODULE_PARM_DESC(debug, "debug level"); #define VALID_MINOR 0x01 #define VALID_ID 0x02 -/** - * gigaset_dbg_buffer() - dump data in ASCII and hex for debugging - * @level: debugging level. - * @msg: message prefix. - * @len: number of bytes to dump. - * @buf: data to dump. - * - * If the current debugging level includes one of the bits set in @level, - * @len bytes starting at @buf are logged to dmesg at KERN_DEBUG prio, - * prefixed by the text @msg. - */ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, size_t len, const unsigned char *buf) { @@ -291,20 +274,6 @@ static void clear_events(struct cardstate *cs) spin_unlock_irqrestore(&cs->ev_lock, flags); } -/** - * gigaset_add_event() - add event to device event queue - * @cs: device descriptor structure. - * @at_state: connection state structure. - * @type: event type. - * @ptr: pointer parameter for event. - * @parameter: integer parameter for event. - * @arg: pointer parameter for event. - * - * Allocate an event queue entry from the device's event queue, and set it up - * with the parameters given. - * - * Return value: added event - */ struct event_t *gigaset_add_event(struct cardstate *cs, struct at_state_t *at_state, int type, void *ptr, int parameter, void *arg) @@ -429,15 +398,6 @@ static void make_invalid(struct cardstate *cs, unsigned mask) spin_unlock_irqrestore(&drv->lock, flags); } -/** - * gigaset_freecs() - free all associated ressources of a device - * @cs: device descriptor structure. - * - * Stops all tasklets and timers, unregisters the device from all - * subsystems it was registered to, deallocates the device structure - * @cs and all structures referenced from it. - * Operations on the device should be stopped before calling this. - */ void gigaset_freecs(struct cardstate *cs) { int i; @@ -546,12 +506,7 @@ static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, inbuf->inputstate = inputstate; } -/** - * gigaset_fill_inbuf() - append received data to input buffer - * @inbuf: buffer structure. - * @src: received data. - * @numbytes: number of bytes received. - */ +/* append received bytes to inbuf */ int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, unsigned numbytes) { @@ -651,22 +606,20 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs, return NULL; } -/** - * gigaset_initcs() - initialize device structure - * @drv: hardware driver the device belongs to - * @channels: number of B channels supported by device - * @onechannel: !=0 if B channel data and AT commands share one - * communication channel (M10x), - * ==0 if B channels have separate communication channels (base) - * @ignoreframes: number of frames to ignore after setting up B channel - * @cidmode: !=0: start in CallID mode - * @modulename: name of driver module for LL registration - * +/* gigaset_initcs * Allocate and initialize cardstate structure for Gigaset driver * Calls hardware dependent gigaset_initcshw() function * Calls B channel initialization function gigaset_initbcs() for each B channel - * - * Return value: + * parameters: + * drv hardware driver the device belongs to + * channels number of B channels supported by device + * onechannel !=0: B channel data and AT commands share one + * communication channel + * ==0: B channels have separate communication channels + * ignoreframes number of frames to ignore after setting up B channel + * cidmode !=0: start in CallID mode + * modulename name of driver module (used for I4L registration) + * return value: * pointer to cardstate structure */ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, @@ -884,17 +837,6 @@ static void cleanup_cs(struct cardstate *cs) } -/** - * gigaset_start() - start device operations - * @cs: device descriptor structure. - * - * Prepares the device for use by setting up communication parameters, - * scheduling an EV_START event to initiate device initialization, and - * waiting for completion of the initialization. - * - * Return value: - * 1 - success, 0 - error - */ int gigaset_start(struct cardstate *cs) { unsigned long flags; @@ -937,15 +879,9 @@ int gigaset_start(struct cardstate *cs) } EXPORT_SYMBOL_GPL(gigaset_start); -/** - * gigaset_shutdown() - shut down device operations - * @cs: device descriptor structure. - * - * Deactivates the device by scheduling an EV_SHUTDOWN event and - * waiting for completion of the shutdown. - * - * Return value: - * 0 - success, -1 - error (no device associated) +/* gigaset_shutdown + * check if a device is associated to the cardstate structure and stop it + * return value: 0 if ok, -1 if no device was associated */ int gigaset_shutdown(struct cardstate *cs) { @@ -976,13 +912,6 @@ int gigaset_shutdown(struct cardstate *cs) } EXPORT_SYMBOL_GPL(gigaset_shutdown); -/** - * gigaset_stop() - stop device operations - * @cs: device descriptor structure. - * - * Stops operations on the device by scheduling an EV_STOP event and - * waiting for completion of the shutdown. - */ void gigaset_stop(struct cardstate *cs) { mutex_lock(&cs->mutex); @@ -1091,14 +1020,6 @@ struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty) return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); } -/** - * gigaset_freedriver() - free all associated ressources of a driver - * @drv: driver descriptor structure. - * - * Unregisters the driver from the system and deallocates the driver - * structure @drv and all structures referenced from it. - * All devices should be shut down before calling this. - */ void gigaset_freedriver(struct gigaset_driver *drv) { unsigned long flags; @@ -1114,16 +1035,14 @@ void gigaset_freedriver(struct gigaset_driver *drv) } EXPORT_SYMBOL_GPL(gigaset_freedriver); -/** - * gigaset_initdriver() - initialize driver structure - * @minor: First minor number - * @minors: Number of minors this driver can handle - * @procname: Name of the driver - * @devname: Name of the device files (prefix without minor number) - * +/* gigaset_initdriver * Allocate and initialize gigaset_driver structure. Initialize interface. - * - * Return value: + * parameters: + * minor First minor number + * minors Number of minors this driver can handle + * procname Name of the driver + * devname Name of the device files (prefix without minor number) + * return value: * Pointer to the gigaset_driver structure on success, NULL on failure. */ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, @@ -1176,13 +1095,6 @@ struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, } EXPORT_SYMBOL_GPL(gigaset_initdriver); -/** - * gigaset_blockdriver() - block driver - * @drv: driver descriptor structure. - * - * Prevents the driver from attaching new devices, in preparation for - * deregistration. - */ void gigaset_blockdriver(struct gigaset_driver *drv) { drv->blocked = 1; @@ -1198,7 +1110,7 @@ static int __init gigaset_init_module(void) if (gigaset_debuglevel == 1) gigaset_debuglevel = DEBUG_DEFAULT; - pr_info(DRIVER_DESC DRIVER_DESC_DEBUG "\n"); + pr_info(DRIVER_DESC "\n"); return 0; } diff --git a/trunk/drivers/isdn/gigaset/ev-layer.c b/trunk/drivers/isdn/gigaset/ev-layer.c index cc768caa38f5..2d91049571a4 100644 --- a/trunk/drivers/isdn/gigaset/ev-layer.c +++ b/trunk/drivers/isdn/gigaset/ev-layer.c @@ -207,6 +207,7 @@ struct reply_t gigaset_tab_nocid[] = /* leave dle mode */ {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, {RSP_OK, 201,201, -1, 202,-1}, + //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, @@ -264,7 +265,6 @@ struct reply_t gigaset_tab_nocid[] = {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME /* misc. */ - {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME @@ -328,9 +328,10 @@ struct reply_t gigaset_tab_cid[] = {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? {RSP_OK, 401,401, -1, 402, 5}, {RSP_ZVLS, 402,402, 0, 403, 5}, - {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, - {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, - {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, + {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */ + //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver? + {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? + {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, @@ -473,13 +474,8 @@ static int cid_of_response(char *s) //FIXME is ;+ at end of non-CID response really impossible? } -/** - * gigaset_handle_modem_response() - process received modem response - * @cs: device descriptor structure. - * - * Called by asyncdata/isocdata if a block of data received from the - * device must be processed as a modem command response. The data is - * already in the cs structure. +/* This function will be called via task queue from the callback handler. + * We received a modem response and have to handle it.. */ void gigaset_handle_modem_response(struct cardstate *cs) { @@ -711,11 +707,6 @@ static void disconnect(struct at_state_t **at_state_p) if (bcs) { /* B channel assigned: invoke hardware specific handler */ cs->ops->close_bchannel(bcs); - /* notify LL */ - if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { - bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); - gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); - } } else { /* no B channel assigned: just deallocate */ spin_lock_irqsave(&cs->lock, flags); @@ -1438,12 +1429,11 @@ static void do_action(int action, struct cardstate *cs, cs->gotfwver = -1; dev_err(cs->dev, "could not read firmware version.\n"); break; +#ifdef CONFIG_GIGASET_DEBUG case ACT_ERROR: - gig_dbg(DEBUG_ANY, "%s: ERROR response in ConState %d", - __func__, at_state->ConState); - cs->cur_at_seq = SEQ_NONE; + *p_genresp = 1; + *p_resp_code = RSP_ERROR; break; -#ifdef CONFIG_GIGASET_DEBUG case ACT_TEST: { static int count = 3; //2; //1; diff --git a/trunk/drivers/isdn/gigaset/i4l.c b/trunk/drivers/isdn/gigaset/i4l.c index 654489d836cd..9b22f9cf2f33 100644 --- a/trunk/drivers/isdn/gigaset/i4l.c +++ b/trunk/drivers/isdn/gigaset/i4l.c @@ -51,12 +51,6 @@ static int writebuf_from_LL(int driverID, int channel, int ack, return -ENODEV; } bcs = &cs->bcs[channel]; - - /* can only handle linear sk_buffs */ - if (skb_linearize(skb) < 0) { - dev_err(cs->dev, "%s: skb_linearize failed\n", __func__); - return -ENOMEM; - } len = skb->len; gig_dbg(DEBUG_LLDATA, @@ -85,14 +79,6 @@ static int writebuf_from_LL(int driverID, int channel, int ack, return cs->ops->send_skb(bcs, skb); } -/** - * gigaset_skb_sent() - acknowledge sending an skb - * @bcs: B channel descriptor structure. - * @skb: sent data. - * - * Called by hardware module {bas,ser,usb}_gigaset when the data in a - * skb has been successfully sent, for signalling completion to the LL. - */ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) { unsigned len; @@ -469,15 +455,6 @@ int gigaset_isdn_setup_accept(struct at_state_t *at_state) return 0; } -/** - * gigaset_isdn_icall() - signal incoming call - * @at_state: connection state structure. - * - * Called by main module to notify the LL that an incoming call has been - * received. @at_state contains the parameters of the call. - * - * Return value: call disposition (ICALL_*) - */ int gigaset_isdn_icall(struct at_state_t *at_state) { struct cardstate *cs = at_state->cs; diff --git a/trunk/drivers/isdn/gigaset/interface.c b/trunk/drivers/isdn/gigaset/interface.c index 6a8e1384e7bd..f33ac27de643 100644 --- a/trunk/drivers/isdn/gigaset/interface.c +++ b/trunk/drivers/isdn/gigaset/interface.c @@ -616,15 +616,6 @@ void gigaset_if_free(struct cardstate *cs) tty_unregister_device(drv->tty, cs->minor_index); } -/** - * gigaset_if_receive() - pass a received block of data to the tty device - * @cs: device descriptor structure. - * @buffer: received data. - * @len: number of bytes received. - * - * Called by asyncdata/isocdata if a block of data received from the - * device must be sent to userspace through the ttyG* device. - */ void gigaset_if_receive(struct cardstate *cs, unsigned char *buffer, size_t len) { diff --git a/trunk/drivers/isdn/gigaset/isocdata.c b/trunk/drivers/isdn/gigaset/isocdata.c index 9f3ef7b4248c..bed38fcc432b 100644 --- a/trunk/drivers/isdn/gigaset/isocdata.c +++ b/trunk/drivers/isdn/gigaset/isocdata.c @@ -429,7 +429,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, return -EAGAIN; } - dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count); + dump_bytes(DEBUG_STREAM, "snd data", in, count); /* bitstuff and checksum input data */ fcs = PPP_INITFCS; @@ -448,6 +448,7 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb, /* put closing flag and repeat byte for flag idle */ isowbuf_putflag(iwb); end = isowbuf_donewrite(iwb); + dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1); return end; } @@ -481,8 +482,6 @@ static inline int trans_buildframe(struct isowbuf_t *iwb, } gig_dbg(DEBUG_STREAM, "put %d bytes", count); - dump_bytes(DEBUG_STREAM_DUMP, "snd data", in, count); - write = iwb->write; do { c = bitrev8(*in++); @@ -584,7 +583,7 @@ static inline void hdlc_done(struct bc_state *bcs) procskb->tail -= 2; gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, procskb->len); - dump_bytes(DEBUG_STREAM_DUMP, + dump_bytes(DEBUG_STREAM, "rcv data", procskb->data, procskb->len); bcs->hw.bas->goodbytes += procskb->len; gigaset_rcv_skb(procskb, bcs->cs, bcs); @@ -879,8 +878,6 @@ static inline void trans_receive(unsigned char *src, unsigned count, dobytes--; } if (dobytes == 0) { - dump_bytes(DEBUG_STREAM_DUMP, - "rcv data", skb->data, skb->len); gigaset_rcv_skb(skb, bcs->cs, bcs); bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); if (!skb) { @@ -976,17 +973,16 @@ void gigaset_isoc_input(struct inbuf_t *inbuf) /* == data output ========================================================== */ -/** - * gigaset_isoc_send_skb() - queue an skb for sending - * @bcs: B channel descriptor structure. - * @skb: data to send. - * - * Called by i4l.c to queue an skb for sending, and start transmission if - * necessary. - * - * Return value: - * number of bytes accepted for sending (skb->len) if ok, - * error code < 0 (eg. -ENODEV) on error +/* gigaset_send_skb + * called by common.c to queue an skb for sending + * and start transmission if necessary + * parameters: + * B Channel control structure + * skb + * return value: + * number of bytes accepted for sending + * (skb->len if ok, 0 if out of buffer space) + * or error code (< 0, eg. -EINVAL) */ int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) { diff --git a/trunk/drivers/leds/leds-pca9532.c b/trunk/drivers/leds/leds-pca9532.c index adc561eb59d2..708a8017c21d 100644 --- a/trunk/drivers/leds/leds-pca9532.c +++ b/trunk/drivers/leds/leds-pca9532.c @@ -19,6 +19,9 @@ #include #include +static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END}; +I2C_CLIENT_INSMOD_1(pca9532); + #define PCA9532_REG_PSC(i) (0x2+(i)*2) #define PCA9532_REG_PWM(i) (0x3+(i)*2) #define PCA9532_REG_LS0 0x6 diff --git a/trunk/drivers/macintosh/therm_adt746x.c b/trunk/drivers/macintosh/therm_adt746x.c index 556f0feaa4df..fde377c60cca 100644 --- a/trunk/drivers/macintosh/therm_adt746x.c +++ b/trunk/drivers/macintosh/therm_adt746x.c @@ -124,8 +124,6 @@ read_reg(struct thermostat* th, int reg) return data; } -static struct i2c_driver thermostat_driver; - static int attach_thermostat(struct i2c_adapter *adapter) { @@ -150,7 +148,7 @@ attach_thermostat(struct i2c_adapter *adapter) * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&client->detected, &thermostat_driver.clients); + list_add_tail(&client->detected, &client->driver->clients); return 0; } diff --git a/trunk/drivers/macintosh/therm_pm72.c b/trunk/drivers/macintosh/therm_pm72.c index ea32c7e5a9af..a028598af2d3 100644 --- a/trunk/drivers/macintosh/therm_pm72.c +++ b/trunk/drivers/macintosh/therm_pm72.c @@ -286,8 +286,6 @@ struct fcu_fan_table fcu_fans[] = { }, }; -static struct i2c_driver therm_pm72_driver; - /* * Utility function to create an i2c_client structure and * attach it to one of u3 adapters @@ -320,7 +318,7 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name) * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&clt->detected, &therm_pm72_driver.clients); + list_add_tail(&clt->detected, &clt->driver->clients); return clt; } diff --git a/trunk/drivers/macintosh/windfarm_lm75_sensor.c b/trunk/drivers/macintosh/windfarm_lm75_sensor.c index ed6426a10773..529886c7a826 100644 --- a/trunk/drivers/macintosh/windfarm_lm75_sensor.c +++ b/trunk/drivers/macintosh/windfarm_lm75_sensor.c @@ -115,8 +115,6 @@ static int wf_lm75_probe(struct i2c_client *client, return rc; } -static struct i2c_driver wf_lm75_driver; - static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, u8 addr, int ds1775, const char *loc) @@ -159,7 +157,7 @@ static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&client->detected, &wf_lm75_driver.clients); + list_add_tail(&client->detected, &client->driver->clients); return client; fail: return NULL; diff --git a/trunk/drivers/macintosh/windfarm_max6690_sensor.c b/trunk/drivers/macintosh/windfarm_max6690_sensor.c index a67b349319e9..e2a55ecda2b2 100644 --- a/trunk/drivers/macintosh/windfarm_max6690_sensor.c +++ b/trunk/drivers/macintosh/windfarm_max6690_sensor.c @@ -88,8 +88,6 @@ static int wf_max6690_probe(struct i2c_client *client, return rc; } -static struct i2c_driver wf_max6690_driver; - static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, u8 addr, const char *loc) { @@ -121,7 +119,7 @@ static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&client->detected, &wf_max6690_driver.clients); + list_add_tail(&client->detected, &client->driver->clients); return client; fail: diff --git a/trunk/drivers/macintosh/windfarm_smu_sat.c b/trunk/drivers/macintosh/windfarm_smu_sat.c index e20330a28959..5da729e58f99 100644 --- a/trunk/drivers/macintosh/windfarm_smu_sat.c +++ b/trunk/drivers/macintosh/windfarm_smu_sat.c @@ -194,8 +194,6 @@ static struct wf_sensor_ops wf_sat_ops = { .owner = THIS_MODULE, }; -static struct i2c_driver wf_sat_driver; - static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) { struct i2c_board_info info; @@ -224,7 +222,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ - list_add_tail(&client->detected, &wf_sat_driver.clients); + list_add_tail(&client->detected, &client->driver->clients); } static int wf_sat_probe(struct i2c_client *client, diff --git a/trunk/drivers/md/dm.c b/trunk/drivers/md/dm.c index 23e76fe0d359..376f1ab48a24 100644 --- a/trunk/drivers/md/dm.c +++ b/trunk/drivers/md/dm.c @@ -130,7 +130,7 @@ struct mapped_device { /* * A list of ios that arrived while we were suspended. */ - atomic_t pending; + atomic_t pending[2]; wait_queue_head_t wait; struct work_struct work; struct bio_list deferred; @@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io) { struct mapped_device *md = io->md; int cpu; + int rw = bio_data_dir(io->bio); io->start_time = jiffies; cpu = part_stat_lock(); part_round_stats(cpu, &dm_disk(md)->part0); part_stat_unlock(); - dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); + dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); } static void end_io_acct(struct dm_io *io) @@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io) * After this is decremented the bio must not be touched if it is * a barrier. */ - dm_disk(md)->part0.in_flight = pending = - atomic_dec_return(&md->pending); + dm_disk(md)->part0.in_flight[rw] = pending = + atomic_dec_return(&md->pending[rw]); + pending += atomic_read(&md->pending[rw^0x1]); /* nudge anyone waiting on suspend queue */ if (!pending) @@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor) if (!md->disk) goto bad_disk; - atomic_set(&md->pending, 0); + atomic_set(&md->pending[0], 0); + atomic_set(&md->pending[1], 0); init_waitqueue_head(&md->wait); INIT_WORK(&md->work, dm_wq_work); init_waitqueue_head(&md->eventq); @@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) break; } spin_unlock_irqrestore(q->queue_lock, flags); - } else if (!atomic_read(&md->pending)) + } else if (!atomic_read(&md->pending[0]) && + !atomic_read(&md->pending[1])) break; if (interruptible == TASK_INTERRUPTIBLE && diff --git a/trunk/drivers/media/dvb/dvb-core/dmxdev.c b/trunk/drivers/media/dvb/dvb-core/dmxdev.c index c37790ad92d0..516414983593 100644 --- a/trunk/drivers/media/dvb/dvb-core/dmxdev.c +++ b/trunk/drivers/media/dvb/dvb-core/dmxdev.c @@ -20,7 +20,6 @@ * */ -#include #include #include #include diff --git a/trunk/drivers/media/dvb/dvb-core/dvb_demux.c b/trunk/drivers/media/dvb/dvb-core/dvb_demux.c index 91c537bca8ad..eef6d3616626 100644 --- a/trunk/drivers/media/dvb/dvb-core/dvb_demux.c +++ b/trunk/drivers/media/dvb/dvb-core/dvb_demux.c @@ -21,7 +21,6 @@ * */ -#include #include #include #include diff --git a/trunk/drivers/media/radio/radio-cadet.c b/trunk/drivers/media/radio/radio-cadet.c index 482d0f3be5ff..8b1440136c45 100644 --- a/trunk/drivers/media/radio/radio-cadet.c +++ b/trunk/drivers/media/radio/radio-cadet.c @@ -38,7 +38,6 @@ #include /* V4L2 API defs */ #include #include -#include #include /* outb, outb_p */ #include #include diff --git a/trunk/drivers/media/video/cpia.c b/trunk/drivers/media/video/cpia.c index 2377313c041a..43ab0adf3b61 100644 --- a/trunk/drivers/media/video/cpia.c +++ b/trunk/drivers/media/video/cpia.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/mfd/ab3100-core.c b/trunk/drivers/mfd/ab3100-core.c index 613481028272..5447da16a170 100644 --- a/trunk/drivers/mfd/ab3100-core.c +++ b/trunk/drivers/mfd/ab3100-core.c @@ -57,6 +57,8 @@ * The AB3100 is usually assigned address 0x48 (7-bit) * The chip is defined in the platform i2c_board_data section. */ +static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END }; +I2C_CLIENT_INSMOD_1(ab3100); u8 ab3100_get_chip_type(struct ab3100 *ab3100) { @@ -964,7 +966,7 @@ static int __exit ab3100_remove(struct i2c_client *client) } static const struct i2c_device_id ab3100_id[] = { - { "ab3100", 0 }, + { "ab3100", ab3100 }, { } }; MODULE_DEVICE_TABLE(i2c, ab3100_id); diff --git a/trunk/drivers/mfd/ucb1400_core.c b/trunk/drivers/mfd/ucb1400_core.c index fa294b6d600a..2afc08006e6d 100644 --- a/trunk/drivers/mfd/ucb1400_core.c +++ b/trunk/drivers/mfd/ucb1400_core.c @@ -21,7 +21,6 @@ */ #include -#include #include unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, diff --git a/trunk/drivers/misc/eeprom/max6875.c b/trunk/drivers/misc/eeprom/max6875.c index 5a6b2bce8ad5..3c0c58eed347 100644 --- a/trunk/drivers/misc/eeprom/max6875.c +++ b/trunk/drivers/misc/eeprom/max6875.c @@ -33,6 +33,12 @@ #include #include +/* Do not scan - the MAX6875 access method will write to some EEPROM chips */ +static const unsigned short normal_i2c[] = { I2C_CLIENT_END }; + +/* Insmod parameters */ +I2C_CLIENT_INSMOD_1(max6875); + /* The MAX6875 can only read/write 16 bytes at a time */ #define SLICE_SIZE 16 #define SLICE_BITS 4 @@ -140,21 +146,31 @@ static struct bin_attribute user_eeprom_attr = { .read = max6875_read, }; -static int max6875_probe(struct i2c_client *client, - const struct i2c_device_id *id) +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max6875_detect(struct i2c_client *client, int kind, + struct i2c_board_info *info) { struct i2c_adapter *adapter = client->adapter; - struct max6875_data *data; - int err; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA | I2C_FUNC_SMBUS_READ_BYTE)) return -ENODEV; - /* Only bind to even addresses */ + /* Only check even addresses */ if (client->addr & 1) return -ENODEV; + strlcpy(info->type, "max6875", I2C_NAME_SIZE); + + return 0; +} + +static int max6875_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct max6875_data *data; + int err; + if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) return -ENOMEM; @@ -206,6 +222,9 @@ static struct i2c_driver max6875_driver = { .probe = max6875_probe, .remove = max6875_remove, .id_table = max6875_id, + + .detect = max6875_detect, + .address_data = &addr_data, }; static int __init max6875_init(void) diff --git a/trunk/drivers/mmc/core/sdio_cis.c b/trunk/drivers/mmc/core/sdio_cis.c index f85dcd536508..e1035c895808 100644 --- a/trunk/drivers/mmc/core/sdio_cis.c +++ b/trunk/drivers/mmc/core/sdio_cis.c @@ -29,8 +29,6 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, unsigned i, nr_strings; char **buffer, *string; - /* Find all null-terminated (including zero length) strings in - the TPLLV1_INFO field. Trailing garbage is ignored. */ buf += 2; size -= 2; @@ -41,8 +39,11 @@ static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, if (buf[i] == 0) nr_strings++; } - if (nr_strings == 0) + + if (nr_strings < 4) { + printk(KERN_WARNING "SDIO: ignoring broken CISTPL_VERS_1\n"); return 0; + } size = i; diff --git a/trunk/drivers/mtd/mtd_blkdevs.c b/trunk/drivers/mtd/mtd_blkdevs.c index 8ca17a3e96ea..0acbf4f5be50 100644 --- a/trunk/drivers/mtd/mtd_blkdevs.c +++ b/trunk/drivers/mtd/mtd_blkdevs.c @@ -32,6 +32,14 @@ struct mtd_blkcore_priv { spinlock_t queue_lock; }; +static int blktrans_discard_request(struct request_queue *q, + struct request *req) +{ + req->cmd_type = REQ_TYPE_LINUX_BLOCK; + req->cmd[0] = REQ_LB_OP_DISCARD; + return 0; +} + static int do_blktrans_request(struct mtd_blktrans_ops *tr, struct mtd_blktrans_dev *dev, struct request *req) @@ -44,6 +52,10 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, buf = req->buffer; + if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && + req->cmd[0] == REQ_LB_OP_DISCARD) + return tr->discard(dev, block, nsect); + if (!blk_fs_request(req)) return -EIO; @@ -51,9 +63,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, get_capacity(req->rq_disk)) return -EIO; - if (blk_discard_rq(req)) - return tr->discard(dev, block, nsect); - switch(rq_data_dir(req)) { case READ: for (; nsect > 0; nsect--, block++, buf += tr->blksize) @@ -371,8 +380,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) tr->blkcore_priv->rq->queuedata = tr; blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); if (tr->discard) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, - tr->blkcore_priv->rq); + blk_queue_set_discard(tr->blkcore_priv->rq, + blktrans_discard_request); tr->blkshift = ffs(tr->blksize) - 1; diff --git a/trunk/drivers/net/au1000_eth.c b/trunk/drivers/net/au1000_eth.c index 04f63c77071d..fdf5937233fc 100644 --- a/trunk/drivers/net/au1000_eth.c +++ b/trunk/drivers/net/au1000_eth.c @@ -721,7 +721,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status) ps->rx_errors++; if (status & RX_MISSED_FRAME) ps->rx_missed_errors++; - if (status & (RX_OVERLEN | RX_RUNT | RX_LEN_ERROR)) + if (status & (RX_OVERLEN | RX_OVERLEN | RX_LEN_ERROR)) ps->rx_length_errors++; if (status & RX_CRC_ERROR) ps->rx_crc_errors++; @@ -794,6 +794,8 @@ static int au1000_rx(struct net_device *dev) printk("rx len error\n"); if (status & RX_U_CNTRL_FRAME) printk("rx u control frame\n"); + if (status & RX_MISSED_FRAME) + printk("rx miss\n"); } } prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); diff --git a/trunk/drivers/net/benet/be_cmds.c b/trunk/drivers/net/benet/be_cmds.c index 89876ade5e33..79d35d122c08 100644 --- a/trunk/drivers/net/benet/be_cmds.c +++ b/trunk/drivers/net/benet/be_cmds.c @@ -1129,6 +1129,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); + req = embedded_payload(wrb); sge = nonembedded_sgl(wrb); be_wrb_hdr_prepare(wrb, cmd->size, false, 1); diff --git a/trunk/drivers/net/benet/be_cmds.h b/trunk/drivers/net/benet/be_cmds.h index a86f917f85f4..8b4c2cb9ad62 100644 --- a/trunk/drivers/net/benet/be_cmds.h +++ b/trunk/drivers/net/benet/be_cmds.h @@ -62,7 +62,7 @@ enum { MCC_STATUS_QUEUE_FLUSHING = 0x4, /* The command is completing with a DMA error */ MCC_STATUS_DMA_FAILED = 0x5, - MCC_STATUS_NOT_SUPPORTED = 66 + MCC_STATUS_NOT_SUPPORTED = 0x66 }; #define CQE_STATUS_COMPL_MASK 0xFFFF diff --git a/trunk/drivers/net/benet/be_ethtool.c b/trunk/drivers/net/benet/be_ethtool.c index cda5bf2fc50a..11445df3dbc0 100644 --- a/trunk/drivers/net/benet/be_ethtool.c +++ b/trunk/drivers/net/benet/be_ethtool.c @@ -358,7 +358,7 @@ const struct ethtool_ops be_ethtool_ops = { .get_rx_csum = be_get_rx_csum, .set_rx_csum = be_set_rx_csum, .get_tx_csum = ethtool_op_get_tx_csum, - .set_tx_csum = ethtool_op_set_tx_hw_csum, + .set_tx_csum = ethtool_op_set_tx_csum, .get_sg = ethtool_op_get_sg, .set_sg = ethtool_op_set_sg, .get_tso = ethtool_op_get_tso, diff --git a/trunk/drivers/net/benet/be_main.c b/trunk/drivers/net/benet/be_main.c index 6d5e81f7046f..2f9b50156e0c 100644 --- a/trunk/drivers/net/benet/be_main.c +++ b/trunk/drivers/net/benet/be_main.c @@ -197,7 +197,7 @@ void netdev_stats_update(struct be_adapter *adapter) /* no space available in linux */ dev_stats->tx_dropped = 0; - dev_stats->multicast = port_stats->rx_multicast_frames; + dev_stats->multicast = port_stats->tx_multicastframes; dev_stats->collisions = 0; /* detailed tx_errors */ @@ -1899,8 +1899,8 @@ static void be_netdev_init(struct net_device *netdev) struct be_adapter *adapter = netdev_priv(netdev); netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | - NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | - NETIF_F_GRO; + NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | NETIF_F_GRO; netdev->flags |= IFF_MULTICAST; diff --git a/trunk/drivers/net/e1000e/82571.c b/trunk/drivers/net/e1000e/82571.c index d1e0563a67df..b53b40ba88a8 100644 --- a/trunk/drivers/net/e1000e/82571.c +++ b/trunk/drivers/net/e1000e/82571.c @@ -1803,7 +1803,7 @@ struct e1000_info e1000_82574_info = { | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, .pba = 20, - .max_hw_frame_size = DEFAULT_JUMBO, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_bm, @@ -1820,7 +1820,7 @@ struct e1000_info e1000_82583_info = { | FLAG_HAS_AMT | FLAG_HAS_CTRLEXT_ON_LOAD, .pba = 20, - .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .max_hw_frame_size = DEFAULT_JUMBO, .get_variants = e1000_get_variants_82571, .mac_ops = &e82571_mac_ops, .phy_ops = &e82_phy_ops_bm, diff --git a/trunk/drivers/net/ethoc.c b/trunk/drivers/net/ethoc.c index 34d0c69e67f7..b7311bc00258 100644 --- a/trunk/drivers/net/ethoc.c +++ b/trunk/drivers/net/ethoc.c @@ -19,10 +19,6 @@ #include #include -static int buffer_size = 0x8000; /* 32 KBytes */ -module_param(buffer_size, int, 0); -MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); - /* register offsets */ #define MODER 0x00 #define INT_SOURCE 0x04 @@ -171,7 +167,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); * struct ethoc - driver-private device structure * @iobase: pointer to I/O memory region * @membase: pointer to buffer memory region - * @dma_alloc: dma allocated buffer size * @num_tx: number of send buffers * @cur_tx: last send buffer written * @dty_tx: last buffer actually sent @@ -190,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size"); struct ethoc { void __iomem *iobase; void __iomem *membase; - int dma_alloc; unsigned int num_tx; unsigned int cur_tx; @@ -290,7 +284,7 @@ static int ethoc_init_ring(struct ethoc *dev) dev->cur_rx = 0; /* setup transmission buffers */ - bd.addr = virt_to_phys(dev->membase); + bd.addr = 0; bd.stat = TX_BD_IRQ | TX_BD_CRC; for (i = 0; i < dev->num_tx; i++) { @@ -301,6 +295,7 @@ static int ethoc_init_ring(struct ethoc *dev) bd.addr += ETHOC_BUFSIZ; } + bd.addr = dev->num_tx * ETHOC_BUFSIZ; bd.stat = RX_BD_EMPTY | RX_BD_IRQ; for (i = 0; i < dev->num_rx; i++) { @@ -405,12 +400,8 @@ static int ethoc_rx(struct net_device *dev, int limit) if (ethoc_update_rx_stats(priv, &bd) == 0) { int size = bd.stat >> 16; struct sk_buff *skb = netdev_alloc_skb(dev, size); - - size -= 4; /* strip the CRC */ - skb_reserve(skb, 2); /* align TCP/IP header */ - if (likely(skb)) { - void *src = phys_to_virt(bd.addr); + void *src = priv->membase + bd.addr; memcpy_fromio(skb_put(skb, size), src, size); skb->protocol = eth_type_trans(skb, dev); priv->stats.rx_packets++; @@ -662,9 +653,9 @@ static int ethoc_open(struct net_device *dev) if (ret) return ret; - /* calculate the number of TX/RX buffers, maximum 128 supported */ - num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ); - priv->num_tx = max(min_tx, num_bd / 4); + /* calculate the number of TX/RX buffers */ + num_bd = (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ; + priv->num_tx = min(min_tx, num_bd / 4); priv->num_rx = num_bd - priv->num_tx; ethoc_write(priv, TX_BD_NUM, priv->num_tx); @@ -832,7 +823,7 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev) else bd.stat &= ~TX_BD_PAD; - dest = phys_to_virt(bd.addr); + dest = priv->membase + bd.addr; memcpy_toio(dest, skb->data, skb->len); bd.stat &= ~(TX_BD_STATS | TX_BD_LEN_MASK); @@ -912,19 +903,22 @@ static int ethoc_probe(struct platform_device *pdev) /* obtain buffer memory space */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (res) { - mem = devm_request_mem_region(&pdev->dev, res->start, - res->end - res->start + 1, res->name); - if (!mem) { - dev_err(&pdev->dev, "cannot request memory space\n"); - ret = -ENXIO; - goto free; - } + if (!res) { + dev_err(&pdev->dev, "cannot obtain memory space\n"); + ret = -ENXIO; + goto free; + } - netdev->mem_start = mem->start; - netdev->mem_end = mem->end; + mem = devm_request_mem_region(&pdev->dev, res->start, + res->end - res->start + 1, res->name); + if (!mem) { + dev_err(&pdev->dev, "cannot request memory space\n"); + ret = -ENXIO; + goto free; } + netdev->mem_start = mem->start; + netdev->mem_end = mem->end; /* obtain device IRQ number */ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); @@ -939,7 +933,6 @@ static int ethoc_probe(struct platform_device *pdev) /* setup driver-private data */ priv = netdev_priv(netdev); priv->netdev = netdev; - priv->dma_alloc = 0; priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr, mmio->end - mmio->start + 1); @@ -949,27 +942,12 @@ static int ethoc_probe(struct platform_device *pdev) goto error; } - if (netdev->mem_end) { - priv->membase = devm_ioremap_nocache(&pdev->dev, - netdev->mem_start, mem->end - mem->start + 1); - if (!priv->membase) { - dev_err(&pdev->dev, "cannot remap memory space\n"); - ret = -ENXIO; - goto error; - } - } else { - /* Allocate buffer memory */ - priv->membase = dma_alloc_coherent(NULL, - buffer_size, (void *)&netdev->mem_start, - GFP_KERNEL); - if (!priv->membase) { - dev_err(&pdev->dev, "cannot allocate %dB buffer\n", - buffer_size); - ret = -ENOMEM; - goto error; - } - netdev->mem_end = netdev->mem_start + buffer_size; - priv->dma_alloc = buffer_size; + priv->membase = devm_ioremap_nocache(&pdev->dev, netdev->mem_start, + mem->end - mem->start + 1); + if (!priv->membase) { + dev_err(&pdev->dev, "cannot remap memory space\n"); + ret = -ENXIO; + goto error; } /* Allow the platform setup code to pass in a MAC address. */ @@ -1056,9 +1034,6 @@ static int ethoc_probe(struct platform_device *pdev) kfree(priv->mdio->irq); mdiobus_free(priv->mdio); free: - if (priv->dma_alloc) - dma_free_coherent(NULL, priv->dma_alloc, priv->membase, - netdev->mem_start); free_netdev(netdev); out: return ret; @@ -1084,9 +1059,7 @@ static int ethoc_remove(struct platform_device *pdev) kfree(priv->mdio->irq); mdiobus_free(priv->mdio); } - if (priv->dma_alloc) - dma_free_coherent(NULL, priv->dma_alloc, priv->membase, - netdev->mem_start); + unregister_netdev(netdev); free_netdev(netdev); } diff --git a/trunk/drivers/net/ixgbe/ixgbe_82599.c b/trunk/drivers/net/ixgbe/ixgbe_82599.c index 34b04924c8a1..2ec58dcdb82b 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_82599.c +++ b/trunk/drivers/net/ixgbe/ixgbe_82599.c @@ -330,8 +330,6 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) switch (hw->device_id) { case IXGBE_DEV_ID_82599_KX4: - case IXGBE_DEV_ID_82599_KX4_MEZZ: - case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ media_type = ixgbe_media_type_backplane; diff --git a/trunk/drivers/net/ixgbe/ixgbe_main.c b/trunk/drivers/net/ixgbe/ixgbe_main.c index cbb143ca1eb8..28fbb9d281f9 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ixgbe/ixgbe_main.c @@ -97,12 +97,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = { board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), - board_82599 }, {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, - {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), - board_82599 }, /* required last entry */ {0, } diff --git a/trunk/drivers/net/ixgbe/ixgbe_type.h b/trunk/drivers/net/ixgbe/ixgbe_type.h index ef4bdd58e016..7c93e923bf2e 100644 --- a/trunk/drivers/net/ixgbe/ixgbe_type.h +++ b/trunk/drivers/net/ixgbe/ixgbe_type.h @@ -49,11 +49,9 @@ #define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 #define IXGBE_DEV_ID_82599_KX4 0x10F7 -#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 #define IXGBE_DEV_ID_82599_CX4 0x10F9 #define IXGBE_DEV_ID_82599_SFP 0x10FB #define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC -#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 /* General Registers */ #define IXGBE_CTRL 0x00000 diff --git a/trunk/drivers/net/netxen/netxen_nic_main.c b/trunk/drivers/net/netxen/netxen_nic_main.c index 9b9eab107704..b5aa974827e5 100644 --- a/trunk/drivers/net/netxen/netxen_nic_main.c +++ b/trunk/drivers/net/netxen/netxen_nic_main.c @@ -1714,7 +1714,7 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) /* 4 fragments per cmd des */ no_of_desc = (frag_count + 3) >> 2; - if (unlikely(no_of_desc + 2 > netxen_tx_avail(tx_ring))) { + if (unlikely(no_of_desc + 2) > netxen_tx_avail(tx_ring)) { netif_stop_queue(netdev); return NETDEV_TX_BUSY; } diff --git a/trunk/drivers/net/pasemi_mac_ethtool.c b/trunk/drivers/net/pasemi_mac_ethtool.c index 28a86224879d..064a4fe1dd90 100644 --- a/trunk/drivers/net/pasemi_mac_ethtool.c +++ b/trunk/drivers/net/pasemi_mac_ethtool.c @@ -71,9 +71,6 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev, struct pasemi_mac *mac = netdev_priv(netdev); struct phy_device *phydev = mac->phydev; - if (!phydev) - return -EOPNOTSUPP; - return phy_ethtool_gset(phydev, cmd); } diff --git a/trunk/drivers/net/pcmcia/pcnet_cs.c b/trunk/drivers/net/pcmcia/pcnet_cs.c index bd3447f04902..474876c879cb 100644 --- a/trunk/drivers/net/pcmcia/pcnet_cs.c +++ b/trunk/drivers/net/pcmcia/pcnet_cs.c @@ -1754,14 +1754,14 @@ static struct pcmcia_device_id pcnet_ids[] = { PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(0, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), - PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "DP83903.cis"), PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"), PCMCIA_DEVICE_PROD_ID12("Ethernet", "CF Size PC Card", 0x00b2e941, 0x43ac239b), PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0", 0xb4be14e3, 0x43ac239b, 0x0877b627), diff --git a/trunk/drivers/net/qlge/qlge.h b/trunk/drivers/net/qlge/qlge.h index 3ec6e85587a2..30d5585beeee 100644 --- a/trunk/drivers/net/qlge/qlge.h +++ b/trunk/drivers/net/qlge/qlge.h @@ -9,7 +9,6 @@ #include #include -#include /* * General definitions... @@ -136,9 +135,9 @@ enum { RST_FO_TFO = (1 << 0), RST_FO_RR_MASK = 0x00060000, RST_FO_RR_CQ_CAM = 0x00000000, - RST_FO_RR_DROP = 0x00000002, - RST_FO_RR_DQ = 0x00000004, - RST_FO_RR_RCV_FUNC_CQ = 0x00000006, + RST_FO_RR_DROP = 0x00000001, + RST_FO_RR_DQ = 0x00000002, + RST_FO_RR_RCV_FUNC_CQ = 0x00000003, RST_FO_FRB = (1 << 12), RST_FO_MOP = (1 << 13), RST_FO_REG = (1 << 14), @@ -1478,6 +1477,7 @@ struct ql_adapter { u32 mailbox_in; u32 mailbox_out; struct mbox_params idc_mbc; + struct mutex mpi_mutex; int tx_ring_size; int rx_ring_size; diff --git a/trunk/drivers/net/qlge/qlge_ethtool.c b/trunk/drivers/net/qlge/qlge_ethtool.c index 52073946bce3..68f9bd280f86 100644 --- a/trunk/drivers/net/qlge/qlge_ethtool.c +++ b/trunk/drivers/net/qlge/qlge_ethtool.c @@ -45,6 +45,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) if (!netif_running(qdev->ndev)) return status; + spin_lock(&qdev->hw_lock); /* Skip the default queue, and update the outbound handler * queues if they changed. */ @@ -91,6 +92,7 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev) } } exit: + spin_unlock(&qdev->hw_lock); return status; } diff --git a/trunk/drivers/net/qlge/qlge_main.c b/trunk/drivers/net/qlge/qlge_main.c index 61680715cde0..3d0efea32111 100644 --- a/trunk/drivers/net/qlge/qlge_main.c +++ b/trunk/drivers/net/qlge/qlge_main.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -1925,10 +1926,12 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return; + spin_lock(&qdev->hw_lock); if (ql_set_mac_addr_reg (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n"); } + spin_unlock(&qdev->hw_lock); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } @@ -1942,10 +1945,12 @@ static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) if (status) return; + spin_lock(&qdev->hw_lock); if (ql_set_mac_addr_reg (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) { QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n"); } + spin_unlock(&qdev->hw_lock); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); } @@ -1996,17 +2001,15 @@ static irqreturn_t qlge_isr(int irq, void *dev_id) /* * Check MPI processor activity. */ - if ((var & STS_PI) && - (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) { + if (var & STS_PI) { /* * We've got an async event or mailbox completion. * Handle it and clear the source of the interrupt. */ QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n"); ql_disable_completion_interrupt(qdev, intr_context->intr); - ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); - queue_delayed_work_on(smp_processor_id(), - qdev->workqueue, &qdev->mpi_work, 0); + queue_delayed_work_on(smp_processor_id(), qdev->workqueue, + &qdev->mpi_work, 0); work_done++; } @@ -3582,6 +3585,7 @@ static void qlge_set_multicast_list(struct net_device *ndev) status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); if (status) return; + spin_lock(&qdev->hw_lock); /* * Set or clear promiscuous mode if a * transition is taking place. @@ -3658,6 +3662,7 @@ static void qlge_set_multicast_list(struct net_device *ndev) } } exit: + spin_unlock(&qdev->hw_lock); ql_sem_unlock(qdev, SEM_RT_IDX_MASK); } @@ -3677,8 +3682,10 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p) status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK); if (status) return status; + spin_lock(&qdev->hw_lock); status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr, MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ); + spin_unlock(&qdev->hw_lock); if (status) QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n"); ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); @@ -3921,6 +3928,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev, INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work); INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work); INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work); + mutex_init(&qdev->mpi_mutex); init_completion(&qdev->ide_completion); if (!cards_found) { diff --git a/trunk/drivers/net/qlge/qlge_mpi.c b/trunk/drivers/net/qlge/qlge_mpi.c index c2e43073047e..6685bd97da91 100644 --- a/trunk/drivers/net/qlge/qlge_mpi.c +++ b/trunk/drivers/net/qlge/qlge_mpi.c @@ -472,6 +472,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) { int status, count; + mutex_lock(&qdev->mpi_mutex); /* Begin polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16)); @@ -540,6 +541,7 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp) status = -EIO; } end: + mutex_unlock(&qdev->mpi_mutex); /* End polled mode for MPI */ ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI); return status; @@ -774,9 +776,7 @@ static int ql_idc_wait(struct ql_adapter *qdev) static int ql_set_port_cfg(struct ql_adapter *qdev) { int status; - rtnl_lock(); status = ql_mb_set_port_cfg(qdev); - rtnl_unlock(); if (status) return status; status = ql_idc_wait(qdev); @@ -797,9 +797,7 @@ void ql_mpi_port_cfg_work(struct work_struct *work) container_of(work, struct ql_adapter, mpi_port_cfg_work.work); int status; - rtnl_lock(); status = ql_mb_get_port_cfg(qdev); - rtnl_unlock(); if (status) { QPRINTK(qdev, DRV, ERR, "Bug: Failed to get port config data.\n"); @@ -857,9 +855,7 @@ void ql_mpi_idc_work(struct work_struct *work) * needs to be set. * */ set_bit(QL_CAM_RT_SET, &qdev->flags); - rtnl_lock(); status = ql_mb_idc_ack(qdev); - rtnl_unlock(); if (status) { QPRINTK(qdev, DRV, ERR, "Bug: No pending IDC!\n"); @@ -875,7 +871,7 @@ void ql_mpi_work(struct work_struct *work) struct mbox_params *mbcp = &mbc; int err = 0; - rtnl_lock(); + mutex_lock(&qdev->mpi_mutex); while (ql_read32(qdev, STS) & STS_PI) { memset(mbcp, 0, sizeof(struct mbox_params)); @@ -888,7 +884,7 @@ void ql_mpi_work(struct work_struct *work) break; } - rtnl_unlock(); + mutex_unlock(&qdev->mpi_mutex); ql_enable_completion_interrupt(qdev, 0); } diff --git a/trunk/drivers/net/tg3.c b/trunk/drivers/net/tg3.c index ba5d3fe753b6..f09bc5dfe8b2 100644 --- a/trunk/drivers/net/tg3.c +++ b/trunk/drivers/net/tg3.c @@ -902,12 +902,11 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) struct tg3 *tp = bp->priv; u32 val; - spin_lock_bh(&tp->lock); + if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) + return -EAGAIN; if (tg3_readphy(tp, reg, &val)) - val = -EIO; - - spin_unlock_bh(&tp->lock); + return -EIO; return val; } @@ -915,16 +914,14 @@ static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg) static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val) { struct tg3 *tp = bp->priv; - u32 ret = 0; - spin_lock_bh(&tp->lock); + if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED) + return -EAGAIN; if (tg3_writephy(tp, reg, val)) - ret = -EIO; - - spin_unlock_bh(&tp->lock); + return -EIO; - return ret; + return 0; } static int tg3_mdio_reset(struct mii_bus *bp) @@ -1014,6 +1011,12 @@ static void tg3_mdio_config_5785(struct tg3 *tp) static void tg3_mdio_start(struct tg3 *tp) { + if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { + mutex_lock(&tp->mdio_bus->mdio_lock); + tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; + mutex_unlock(&tp->mdio_bus->mdio_lock); + } + tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL; tw32_f(MAC_MI_MODE, tp->mi_mode); udelay(80); @@ -1038,6 +1041,15 @@ static void tg3_mdio_start(struct tg3 *tp) tg3_mdio_config_5785(tp); } +static void tg3_mdio_stop(struct tg3 *tp) +{ + if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) { + mutex_lock(&tp->mdio_bus->mdio_lock); + tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED; + mutex_unlock(&tp->mdio_bus->mdio_lock); + } +} + static int tg3_mdio_init(struct tg3 *tp) { int i; @@ -1129,6 +1141,7 @@ static void tg3_mdio_fini(struct tg3 *tp) tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED; mdiobus_unregister(tp->mdio_bus); mdiobus_free(tp->mdio_bus); + tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED; } } @@ -1350,7 +1363,7 @@ static void tg3_adjust_link(struct net_device *dev) struct tg3 *tp = netdev_priv(dev); struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; - spin_lock_bh(&tp->lock); + spin_lock(&tp->lock); mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX); @@ -1418,7 +1431,7 @@ static void tg3_adjust_link(struct net_device *dev) tp->link_config.active_speed = phydev->speed; tp->link_config.active_duplex = phydev->duplex; - spin_unlock_bh(&tp->lock); + spin_unlock(&tp->lock); if (linkmesg) tg3_link_report(tp); @@ -6379,6 +6392,8 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_nvram_lock(tp); + tg3_mdio_stop(tp); + tg3_ape_lock(tp, TG3_APE_LOCK_GRC); /* No matching tg3_nvram_unlock() after this because @@ -8683,8 +8698,6 @@ static int tg3_close(struct net_device *dev) del_timer_sync(&tp->timer); - tg3_phy_stop(tp); - tg3_full_lock(tp, 1); #if 0 tg3_dump_state(tp); diff --git a/trunk/drivers/net/tg3.h b/trunk/drivers/net/tg3.h index bab7940158e6..524691cd9896 100644 --- a/trunk/drivers/net/tg3.h +++ b/trunk/drivers/net/tg3.h @@ -2748,6 +2748,7 @@ struct tg3 { #define TG3_FLG3_5701_DMA_BUG 0x00000008 #define TG3_FLG3_USE_PHYLIB 0x00000010 #define TG3_FLG3_MDIOBUS_INITED 0x00000020 +#define TG3_FLG3_MDIOBUS_PAUSED 0x00000040 #define TG3_FLG3_PHY_CONNECTED 0x00000080 #define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100 #define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200 diff --git a/trunk/drivers/net/usb/rndis_host.c b/trunk/drivers/net/usb/rndis_host.c index 0caa8008c51c..d032bba9bc4c 100644 --- a/trunk/drivers/net/usb/rndis_host.c +++ b/trunk/drivers/net/usb/rndis_host.c @@ -418,7 +418,6 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags) goto halt_fail_and_release; } memcpy(net->dev_addr, bp, ETH_ALEN); - memcpy(net->perm_addr, bp, ETH_ALEN); /* set a nonzero filter to enable data transfers */ memset(u.set, 0, sizeof *u.set); diff --git a/trunk/drivers/platform/x86/sony-laptop.c b/trunk/drivers/platform/x86/sony-laptop.c index a2a742c8ff7e..afdbdaaf80cb 100644 --- a/trunk/drivers/platform/x86/sony-laptop.c +++ b/trunk/drivers/platform/x86/sony-laptop.c @@ -1211,6 +1211,15 @@ static int sony_nc_add(struct acpi_device *device) } } + /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1 + * should be respected as we already checked for the device presence above */ + if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) { + dprintk("Invoking _INI\n"); + if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI, + NULL, NULL))) + dprintk("_INI Method failed\n"); + } + if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", &handle))) { if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) @@ -1390,20 +1399,27 @@ struct sonypi_eventtypes { struct sonypi_event *events; }; -struct sony_pic_dev { - struct acpi_device *acpi_dev; - struct sony_pic_irq *cur_irq; - struct sony_pic_ioport *cur_ioport; - struct list_head interrupts; - struct list_head ioports; - struct mutex lock; - struct sonypi_eventtypes *event_types; - int (*handle_irq)(const u8, const u8); +struct device_ctrl { int model; + int (*handle_irq)(const u8, const u8); u16 evport_offset; - u8 camera_power; - u8 bluetooth_power; - u8 wwan_power; + u8 has_camera; + u8 has_bluetooth; + u8 has_wwan; + struct sonypi_eventtypes *event_types; +}; + +struct sony_pic_dev { + struct device_ctrl *control; + struct acpi_device *acpi_dev; + struct sony_pic_irq *cur_irq; + struct sony_pic_ioport *cur_ioport; + struct list_head interrupts; + struct list_head ioports; + struct mutex lock; + u8 camera_power; + u8 bluetooth_power; + u8 wwan_power; }; static struct sony_pic_dev spic_dev = { @@ -1411,8 +1427,6 @@ static struct sony_pic_dev spic_dev = { .ioports = LIST_HEAD_INIT(spic_dev.ioports), }; -static int spic_drv_registered; - /* Event masks */ #define SONYPI_JOGGER_MASK 0x00000001 #define SONYPI_CAPTURE_MASK 0x00000002 @@ -1710,6 +1724,27 @@ static int type3_handle_irq(const u8 data_mask, const u8 ev) return 1; } +static struct device_ctrl spic_types[] = { + { + .model = SONYPI_DEVICE_TYPE1, + .handle_irq = NULL, + .evport_offset = SONYPI_TYPE1_OFFSET, + .event_types = type1_events, + }, + { + .model = SONYPI_DEVICE_TYPE2, + .handle_irq = NULL, + .evport_offset = SONYPI_TYPE2_OFFSET, + .event_types = type2_events, + }, + { + .model = SONYPI_DEVICE_TYPE3, + .handle_irq = type3_handle_irq, + .evport_offset = SONYPI_TYPE3_OFFSET, + .event_types = type3_events, + }, +}; + static void sony_pic_detect_device_type(struct sony_pic_dev *dev) { struct pci_dev *pcidev; @@ -1717,63 +1752,48 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev) pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); if (pcidev) { - dev->model = SONYPI_DEVICE_TYPE1; - dev->evport_offset = SONYPI_TYPE1_OFFSET; - dev->event_types = type1_events; + dev->control = &spic_types[0]; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, NULL); if (pcidev) { - dev->model = SONYPI_DEVICE_TYPE2; - dev->evport_offset = SONYPI_TYPE2_OFFSET; - dev->event_types = type2_events; + dev->control = &spic_types[2]; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, NULL); if (pcidev) { - dev->model = SONYPI_DEVICE_TYPE3; - dev->handle_irq = type3_handle_irq; - dev->evport_offset = SONYPI_TYPE3_OFFSET; - dev->event_types = type3_events; + dev->control = &spic_types[2]; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4, NULL); if (pcidev) { - dev->model = SONYPI_DEVICE_TYPE3; - dev->handle_irq = type3_handle_irq; - dev->evport_offset = SONYPI_TYPE3_OFFSET; - dev->event_types = type3_events; + dev->control = &spic_types[2]; goto out; } pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_1, NULL); if (pcidev) { - dev->model = SONYPI_DEVICE_TYPE3; - dev->handle_irq = type3_handle_irq; - dev->evport_offset = SONYPI_TYPE3_OFFSET; - dev->event_types = type3_events; + dev->control = &spic_types[2]; goto out; } /* default */ - dev->model = SONYPI_DEVICE_TYPE2; - dev->evport_offset = SONYPI_TYPE2_OFFSET; - dev->event_types = type2_events; + dev->control = &spic_types[1]; out: if (pcidev) pci_dev_put(pcidev); printk(KERN_INFO DRV_PFX "detected Type%d model\n", - dev->model == SONYPI_DEVICE_TYPE1 ? 1 : - dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); + dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 : + dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); } /* camera tests and poweron/poweroff */ @@ -2546,7 +2566,7 @@ static int sony_pic_enable(struct acpi_device *device, buffer.pointer = resource; /* setup Type 1 resources */ - if (spic_dev.model == SONYPI_DEVICE_TYPE1) { + if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) { /* setup io resources */ resource->res1.type = ACPI_RESOURCE_TYPE_IO; @@ -2629,28 +2649,29 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) data_mask = inb_p(dev->cur_ioport->io2.minimum); else data_mask = inb_p(dev->cur_ioport->io1.minimum + - dev->evport_offset); + dev->control->evport_offset); dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ev, data_mask, dev->cur_ioport->io1.minimum, - dev->evport_offset); + dev->control->evport_offset); if (ev == 0x00 || ev == 0xff) return IRQ_HANDLED; - for (i = 0; dev->event_types[i].mask; i++) { + for (i = 0; dev->control->event_types[i].mask; i++) { - if ((data_mask & dev->event_types[i].data) != - dev->event_types[i].data) + if ((data_mask & dev->control->event_types[i].data) != + dev->control->event_types[i].data) continue; - if (!(mask & dev->event_types[i].mask)) + if (!(mask & dev->control->event_types[i].mask)) continue; - for (j = 0; dev->event_types[i].events[j].event; j++) { - if (ev == dev->event_types[i].events[j].data) { + for (j = 0; dev->control->event_types[i].events[j].event; j++) { + if (ev == dev->control->event_types[i].events[j].data) { device_event = - dev->event_types[i].events[j].event; + dev->control-> + event_types[i].events[j].event; goto found; } } @@ -2658,12 +2679,13 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id) /* Still not able to decode the event try to pass * it over to the minidriver */ - if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0) + if (dev->control->handle_irq && + dev->control->handle_irq(data_mask, ev) == 0) return IRQ_HANDLED; dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", ev, data_mask, dev->cur_ioport->io1.minimum, - dev->evport_offset); + dev->control->evport_offset); return IRQ_HANDLED; found: @@ -2794,7 +2816,7 @@ static int sony_pic_add(struct acpi_device *device) /* request IRQ */ list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, - IRQF_DISABLED, "sony-laptop", &spic_dev)) { + IRQF_SHARED, "sony-laptop", &spic_dev)) { dprintk("IRQ: %d - triggering: %d - " "polarity: %d - shr: %d\n", irq->irq.interrupts[0], @@ -2927,7 +2949,6 @@ static int __init sony_laptop_init(void) "Unable to register SPIC driver."); goto out; } - spic_drv_registered = 1; } result = acpi_bus_register_driver(&sony_nc_driver); @@ -2939,7 +2960,7 @@ static int __init sony_laptop_init(void) return 0; out_unregister_pic: - if (spic_drv_registered) + if (!no_spic) acpi_bus_unregister_driver(&sony_pic_driver); out: return result; @@ -2948,7 +2969,7 @@ static int __init sony_laptop_init(void) static void __exit sony_laptop_exit(void) { acpi_bus_unregister_driver(&sony_nc_driver); - if (spic_drv_registered) + if (!no_spic) acpi_bus_unregister_driver(&sony_pic_driver); } diff --git a/trunk/drivers/serial/serial_cs.c b/trunk/drivers/serial/serial_cs.c index ff4617e21426..a3bb49031a7f 100644 --- a/trunk/drivers/serial/serial_cs.c +++ b/trunk/drivers/serial/serial_cs.c @@ -873,10 +873,10 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet", 0xf5f025c2, 0x338e8155, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "Psion Dacom", "Gold Card V34 Ethernet GSM", 0xf5f025c2, 0x4ae85d35, "cis/PCMLM28.cis"), PCMCIA_PFC_DEVICE_CIS_PROD_ID12(1, "LINKSYS", "PCMLM28", 0xf7cb0b07, 0x66881874, "cis/PCMLM28.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"), - PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID12(1, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_PROD_ID4(1, "NSC MF LAN/Modem", 0x58fc6056, "DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0556, "cis/3CCFEM556.cis"), - PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "cis/DP83903.cis"), + PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0175, 0x0000, "DP83903.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ @@ -884,9 +884,9 @@ static struct pcmcia_device_id serial_ids[] = { PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "cis/MT5634ZLX.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "cis/COMpad2.cis"), - PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), - PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-2", 0x96913a85, 0x27ab5437, "COMpad2.cis"), + PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"), + PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"), PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), diff --git a/trunk/drivers/sfi/sfi_core.c b/trunk/drivers/sfi/sfi_core.c index b204a0929139..d3b496800477 100644 --- a/trunk/drivers/sfi/sfi_core.c +++ b/trunk/drivers/sfi/sfi_core.c @@ -90,11 +90,7 @@ static struct sfi_table_simple *syst_va __read_mostly; */ static u32 sfi_use_ioremap __read_mostly; -/* - * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function - * and introduces section mismatch. So use __ref to make it calm. - */ -static void __iomem * __ref sfi_map_memory(u64 phys, u32 size) +static void __iomem *sfi_map_memory(u64 phys, u32 size) { if (!phys || !size) return NULL; @@ -105,7 +101,7 @@ static void __iomem * __ref sfi_map_memory(u64 phys, u32 size) return early_ioremap(phys, size); } -static void __ref sfi_unmap_memory(void __iomem *virt, u32 size) +static void sfi_unmap_memory(void __iomem *virt, u32 size) { if (!virt || !size) return; @@ -129,7 +125,7 @@ static void sfi_print_table_header(unsigned long long pa, * sfi_verify_table() * Sanity check table lengh, calculate checksum */ -static int sfi_verify_table(struct sfi_table_header *table) +static __init int sfi_verify_table(struct sfi_table_header *table) { u8 checksum = 0; @@ -217,17 +213,12 @@ static int sfi_table_check_key(struct sfi_table_header *th, * the mapped virt address will be returned, and the virt space * will be released by call sfi_put_table() later * - * This two cases are from two different functions with two different - * sections and causes section mismatch warning. So use __ref to tell - * modpost not to make any noise. - * * Return value: * NULL: when can't find a table matching the key * ERR_PTR(error): error value * virt table address: when a matched table is found */ -struct sfi_table_header * - __ref sfi_check_table(u64 pa, struct sfi_table_key *key) +struct sfi_table_header *sfi_check_table(u64 pa, struct sfi_table_key *key) { struct sfi_table_header *th; void *ret = NULL; diff --git a/trunk/drivers/staging/dst/dcore.c b/trunk/drivers/staging/dst/dcore.c index c24e4e0367a2..ee1601026fb0 100644 --- a/trunk/drivers/staging/dst/dcore.c +++ b/trunk/drivers/staging/dst/dcore.c @@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio) struct dst_node *n = q->queuedata; int err = -EIO; - if (bio_empty_barrier(bio) && !blk_queue_discard(q)) { + if (bio_empty_barrier(bio) && !q->prepare_discard_fn) { /* * This is a dirty^Wnice hack, but if we complete this * operation with -EOPNOTSUPP like intended, XFS diff --git a/trunk/drivers/staging/iio/light/tsl2561.c b/trunk/drivers/staging/iio/light/tsl2561.c index fc2107f4c049..ea8a5efc19bc 100644 --- a/trunk/drivers/staging/iio/light/tsl2561.c +++ b/trunk/drivers/staging/iio/light/tsl2561.c @@ -239,6 +239,10 @@ static int __devexit tsl2561_remove(struct i2c_client *client) return tsl2561_powerdown(client); } +static unsigned short normal_i2c[] = { 0x29, 0x39, 0x49, I2C_CLIENT_END }; + +I2C_CLIENT_INSMOD; + static const struct i2c_device_id tsl2561_id[] = { { "tsl2561", 0 }, { } diff --git a/trunk/drivers/usb/gadget/inode.c b/trunk/drivers/usb/gadget/inode.c index bf0f6520c6df..c44367fea185 100644 --- a/trunk/drivers/usb/gadget/inode.c +++ b/trunk/drivers/usb/gadget/inode.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/drivers/video/da8xx-fb.c b/trunk/drivers/video/da8xx-fb.c index d065894ce38f..42e1005e2916 100644 --- a/trunk/drivers/video/da8xx-fb.c +++ b/trunk/drivers/video/da8xx-fb.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include