extern void abort(void);
extern void __assert_fail(const char *, const char *, unsigned int, const char *) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
void reach_error() { __assert_fail("0", "drivers--gpu--drm--i915--i915.ko_022.e7d22bc.43_1a.cil_true-unreach-call.i", 3, "reach_error"); }

/* Generated by CIL v. 1.5.1 */
/* print_CIL_Input is false */

typedef __builtin_va_list __gnuc_va_list[1U];
typedef __gnuc_va_list va_list[1U];
typedef unsigned int __kernel_mode_t;
typedef unsigned long __kernel_nlink_t;
typedef long __kernel_off_t;
typedef int __kernel_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef long __kernel_time_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef long long __kernel_loff_t;
typedef __kernel_uid_t __kernel_uid32_t;
typedef __kernel_gid_t __kernel_gid32_t;
typedef signed char __s8;
typedef short __s16;
typedef unsigned short __u16;
typedef int __s32;
typedef unsigned int __u32;
typedef long long __s64;
typedef unsigned long long __u64;
typedef signed char s8;
typedef unsigned char u8;
typedef unsigned short u16;
typedef int s32;
typedef unsigned int u32;
typedef long long s64;
typedef unsigned long long u64;
typedef unsigned short umode_t;
typedef u64 dma_addr_t;
typedef __u32 __kernel_dev_t;
typedef __kernel_dev_t dev_t;
typedef __kernel_mode_t mode_t;
typedef __kernel_nlink_t nlink_t;
typedef __kernel_off_t off_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_timer_t timer_t;
typedef __kernel_clockid_t clockid_t;
typedef _Bool bool;
typedef __kernel_uid32_t uid_t;
typedef __kernel_gid32_t gid_t;
typedef __kernel_loff_t loff_t;
typedef __kernel_size_t size_t;
typedef __kernel_ssize_t ssize_t;
typedef __kernel_time_t time_t;
typedef __kernel_clock_t clock_t;
typedef __u32 uint32_t;
typedef unsigned long sector_t;
typedef unsigned long blkcnt_t;
typedef unsigned int gfp_t;
typedef u64 phys_addr_t;
typedef phys_addr_t resource_size_t;
struct module;
struct bug_entry {
   unsigned long bug_addr ;
   char const   *file ;
   unsigned short line ;
   unsigned short flags ;
};
struct completion;
struct pt_regs;
struct pid;
struct task_struct;
struct mm_struct;
typedef void (*ds_ovfl_callback_t)(struct task_struct * );
struct ds_context {
   unsigned char *ds ;
   struct task_struct *owner[2U] ;
   ds_ovfl_callback_t callback[2U] ;
   void *buffer[2U] ;
   unsigned int pages[2U] ;
   unsigned long count ;
   struct ds_context **this ;
   struct task_struct *task ;
};
struct pt_regs {
   unsigned long r15 ;
   unsigned long r14 ;
   unsigned long r13 ;
   unsigned long r12 ;
   unsigned long bp ;
   unsigned long bx ;
   unsigned long r11 ;
   unsigned long r10 ;
   unsigned long r9 ;
   unsigned long r8 ;
   unsigned long ax ;
   unsigned long cx ;
   unsigned long dx ;
   unsigned long si ;
   unsigned long di ;
   unsigned long orig_ax ;
   unsigned long ip ;
   unsigned long cs ;
   unsigned long flags ;
   unsigned long sp ;
   unsigned long ss ;
};
struct info {
   long ___orig_eip ;
   long ___ebx ;
   long ___ecx ;
   long ___edx ;
   long ___esi ;
   long ___edi ;
   long ___ebp ;
   long ___eax ;
   long ___ds ;
   long ___es ;
   long ___fs ;
   long ___orig_eax ;
   long ___eip ;
   long ___cs ;
   long ___eflags ;
   long ___esp ;
   long ___ss ;
   long ___vm86_es ;
   long ___vm86_ds ;
   long ___vm86_fs ;
   long ___vm86_gs ;
};
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
struct page;
struct __anonstruct_pgd_t_7 {
   pgdval_t pgd ;
};
typedef struct __anonstruct_pgd_t_7 pgd_t;
struct __anonstruct_pgprot_t_8 {
   pgprotval_t pgprot ;
};
typedef struct __anonstruct_pgprot_t_8 pgprot_t;
struct __anonstruct_ldv_2011_12 {
   unsigned int a ;
   unsigned int b ;
};
struct __anonstruct_ldv_2026_13 {
   u16 limit0 ;
   u16 base0 ;
   unsigned char base1 ;
   unsigned char type : 4 ;
   unsigned char s : 1 ;
   unsigned char dpl : 2 ;
   unsigned char p : 1 ;
   unsigned char limit : 4 ;
   unsigned char avl : 1 ;
   unsigned char l : 1 ;
   unsigned char d : 1 ;
   unsigned char g : 1 ;
   unsigned char base2 ;
};
union __anonunion_ldv_2027_11 {
   struct __anonstruct_ldv_2011_12 ldv_2011 ;
   struct __anonstruct_ldv_2026_13 ldv_2026 ;
};
struct desc_struct {
   union __anonunion_ldv_2027_11 ldv_2027 ;
};
struct __anonstruct_cpumask_t_14 {
   unsigned long bits[1U] ;
};
typedef struct __anonstruct_cpumask_t_14 cpumask_t;
struct thread_struct;
struct raw_spinlock;
struct exec_domain;
struct map_segment;
struct exec_domain {
   char const   *name ;
   void (*handler)(int  , struct pt_regs * ) ;
   unsigned char pers_low ;
   unsigned char pers_high ;
   unsigned long *signal_map ;
   unsigned long *signal_invmap ;
   struct map_segment *err_map ;
   struct map_segment *socktype_map ;
   struct map_segment *sockopt_map ;
   struct map_segment *af_map ;
   struct module *module ;
   struct exec_domain *next ;
};
struct i387_fsave_struct {
   u32 cwd ;
   u32 swd ;
   u32 twd ;
   u32 fip ;
   u32 fcs ;
   u32 foo ;
   u32 fos ;
   u32 st_space[20U] ;
   u32 status ;
};
struct __anonstruct_ldv_4459_16 {
   u64 rip ;
   u64 rdp ;
};
struct __anonstruct_ldv_4465_17 {
   u32 fip ;
   u32 fcs ;
   u32 foo ;
   u32 fos ;
};
union __anonunion_ldv_4466_15 {
   struct __anonstruct_ldv_4459_16 ldv_4459 ;
   struct __anonstruct_ldv_4465_17 ldv_4465 ;
};
union __anonunion_ldv_4475_18 {
   u32 padding1[12U] ;
   u32 sw_reserved[12U] ;
};
struct i387_fxsave_struct {
   u16 cwd ;
   u16 swd ;
   u16 twd ;
   u16 fop ;
   union __anonunion_ldv_4466_15 ldv_4466 ;
   u32 mxcsr ;
   u32 mxcsr_mask ;
   u32 st_space[32U] ;
   u32 xmm_space[64U] ;
   u32 padding[12U] ;
   union __anonunion_ldv_4475_18 ldv_4475 ;
};
struct i387_soft_struct {
   u32 cwd ;
   u32 swd ;
   u32 twd ;
   u32 fip ;
   u32 fcs ;
   u32 foo ;
   u32 fos ;
   u32 st_space[20U] ;
   u8 ftop ;
   u8 changed ;
   u8 lookahead ;
   u8 no_update ;
   u8 rm ;
   u8 alimit ;
   struct info *info ;
   u32 entry_eip ;
};
struct xsave_hdr_struct {
   u64 xstate_bv ;
   u64 reserved1[2U] ;
   u64 reserved2[5U] ;
};
struct xsave_struct {
   struct i387_fxsave_struct i387 ;
   struct xsave_hdr_struct xsave_hdr ;
};
union thread_xstate {
   struct i387_fsave_struct fsave ;
   struct i387_fxsave_struct fxsave ;
   struct i387_soft_struct soft ;
   struct xsave_struct xsave ;
};
struct kmem_cache;
struct thread_struct {
   struct desc_struct tls_array[3U] ;
   unsigned long sp0 ;
   unsigned long sp ;
   unsigned long usersp ;
   unsigned short es ;
   unsigned short ds ;
   unsigned short fsindex ;
   unsigned short gsindex ;
   unsigned long ip ;
   unsigned long fs ;
   unsigned long gs ;
   unsigned long debugreg0 ;
   unsigned long debugreg1 ;
   unsigned long debugreg2 ;
   unsigned long debugreg3 ;
   unsigned long debugreg6 ;
   unsigned long debugreg7 ;
   unsigned long cr2 ;
   unsigned long trap_no ;
   unsigned long error_code ;
   union thread_xstate *xstate ;
   unsigned long *io_bitmap_ptr ;
   unsigned long iopl ;
   unsigned int io_bitmap_max ;
   unsigned long debugctlmsr ;
   struct ds_context *ds_ctx ;
   unsigned int bts_ovfl_signal ;
};
struct __anonstruct_mm_segment_t_19 {
   unsigned long seg ;
};
typedef struct __anonstruct_mm_segment_t_19 mm_segment_t;
struct list_head {
   struct list_head *next ;
   struct list_head *prev ;
};
struct hlist_node;
struct hlist_head {
   struct hlist_node *first ;
};
struct hlist_node {
   struct hlist_node *next ;
   struct hlist_node **pprev ;
};
struct timespec;
struct compat_timespec;
struct __anonstruct_ldv_4899_21 {
   unsigned long arg0 ;
   unsigned long arg1 ;
   unsigned long arg2 ;
   unsigned long arg3 ;
};
struct __anonstruct_futex_22 {
   u32 *uaddr ;
   u32 val ;
   u32 flags ;
   u32 bitset ;
   u64 time ;
};
struct __anonstruct_nanosleep_23 {
   clockid_t index ;
   struct timespec *rmtp ;
   struct compat_timespec *compat_rmtp ;
   u64 expires ;
};
union __anonunion_ldv_4913_20 {
   struct __anonstruct_ldv_4899_21 ldv_4899 ;
   struct __anonstruct_futex_22 futex ;
   struct __anonstruct_nanosleep_23 nanosleep ;
};
struct restart_block {
   long (*fn)(struct restart_block * ) ;
   union __anonunion_ldv_4913_20 ldv_4913 ;
};
struct thread_info {
   struct task_struct *task ;
   struct exec_domain *exec_domain ;
   unsigned long flags ;
   __u32 status ;
   __u32 cpu ;
   int preempt_count ;
   mm_segment_t addr_limit ;
   struct restart_block restart_block ;
   void *sysenter_return ;
};
struct raw_spinlock {
   unsigned int slock ;
};
typedef struct raw_spinlock raw_spinlock_t;
struct __anonstruct_raw_rwlock_t_24 {
   unsigned int lock ;
};
typedef struct __anonstruct_raw_rwlock_t_24 raw_rwlock_t;
struct lockdep_map;
struct stack_trace {
   unsigned int nr_entries ;
   unsigned int max_entries ;
   unsigned long *entries ;
   int skip ;
};
struct lockdep_subclass_key {
   char __one_byte ;
};
struct lock_class_key {
   struct lockdep_subclass_key subkeys[8U] ;
};
struct lock_class {
   struct list_head hash_entry ;
   struct list_head lock_entry ;
   struct lockdep_subclass_key *key ;
   unsigned int subclass ;
   unsigned int dep_gen_id ;
   unsigned long usage_mask ;
   struct stack_trace usage_traces[9U] ;
   struct list_head locks_after ;
   struct list_head locks_before ;
   unsigned int version ;
   unsigned long ops ;
   char const   *name ;
   int name_version ;
   unsigned long contention_point[4U] ;
};
struct lockdep_map {
   struct lock_class_key *key ;
   struct lock_class *class_cache ;
   char const   *name ;
   int cpu ;
};
struct held_lock {
   u64 prev_chain_key ;
   unsigned long acquire_ip ;
   struct lockdep_map *instance ;
   struct lockdep_map *nest_lock ;
   u64 waittime_stamp ;
   u64 holdtime_stamp ;
   unsigned short class_idx : 13 ;
   unsigned char irq_context : 2 ;
   unsigned char trylock : 1 ;
   unsigned char read : 2 ;
   unsigned char check : 2 ;
   unsigned char hardirqs_off : 1 ;
};
struct __anonstruct_spinlock_t_25 {
   raw_spinlock_t raw_lock ;
   unsigned int magic ;
   unsigned int owner_cpu ;
   void *owner ;
   struct lockdep_map dep_map ;
};
typedef struct __anonstruct_spinlock_t_25 spinlock_t;
struct __anonstruct_rwlock_t_26 {
   raw_rwlock_t raw_lock ;
   unsigned int magic ;
   unsigned int owner_cpu ;
   void *owner ;
   struct lockdep_map dep_map ;
};
typedef struct __anonstruct_rwlock_t_26 rwlock_t;
struct __anonstruct_atomic_t_27 {
   int counter ;
};
typedef struct __anonstruct_atomic_t_27 atomic_t;
struct __anonstruct_atomic64_t_28 {
   long counter ;
};
typedef struct __anonstruct_atomic64_t_28 atomic64_t;
typedef atomic64_t atomic_long_t;
struct timespec {
   time_t tv_sec ;
   long tv_nsec ;
};
struct kstat {
   u64 ino ;
   dev_t dev ;
   umode_t mode ;
   unsigned int nlink ;
   uid_t uid ;
   gid_t gid ;
   dev_t rdev ;
   loff_t size ;
   struct timespec atime ;
   struct timespec mtime ;
   struct timespec ctime ;
   unsigned long blksize ;
   unsigned long long blocks ;
};
struct __wait_queue;
typedef struct __wait_queue wait_queue_t;
struct __wait_queue {
   unsigned int flags ;
   void *private ;
   int (*func)(wait_queue_t * , unsigned int  , int  , void * ) ;
   struct list_head task_list ;
};
struct __wait_queue_head {
   spinlock_t lock ;
   struct list_head task_list ;
};
typedef struct __wait_queue_head wait_queue_head_t;
struct __anonstruct_nodemask_t_30 {
   unsigned long bits[1U] ;
};
typedef struct __anonstruct_nodemask_t_30 nodemask_t;
struct mutex {
   atomic_t count ;
   spinlock_t wait_lock ;
   struct list_head wait_list ;
   struct thread_info *owner ;
   char const   *name ;
   void *magic ;
   struct lockdep_map dep_map ;
};
struct mutex_waiter {
   struct list_head list ;
   struct task_struct *task ;
   struct mutex *lock ;
   void *magic ;
};
struct rw_semaphore;
struct rw_semaphore {
   __s32 activity ;
   spinlock_t wait_lock ;
   struct list_head wait_list ;
   struct lockdep_map dep_map ;
};
struct file;
struct device;
struct pm_message {
   int event ;
};
typedef struct pm_message pm_message_t;
struct pm_ops {
   int (*prepare)(struct device * ) ;
   void (*complete)(struct device * ) ;
   int (*suspend)(struct device * ) ;
   int (*resume)(struct device * ) ;
   int (*freeze)(struct device * ) ;
   int (*thaw)(struct device * ) ;
   int (*poweroff)(struct device * ) ;
   int (*restore)(struct device * ) ;
};
struct pm_ext_ops {
   struct pm_ops base ;
   int (*suspend_noirq)(struct device * ) ;
   int (*resume_noirq)(struct device * ) ;
   int (*freeze_noirq)(struct device * ) ;
   int (*thaw_noirq)(struct device * ) ;
   int (*poweroff_noirq)(struct device * ) ;
   int (*restore_noirq)(struct device * ) ;
};
enum dpm_state {
    DPM_INVALID = 0,
    DPM_ON = 1,
    DPM_PREPARING = 2,
    DPM_RESUMING = 3,
    DPM_SUSPENDING = 4,
    DPM_OFF = 5,
    DPM_OFF_IRQ = 6
} ;
struct dev_pm_info {
   pm_message_t power_state ;
   unsigned char can_wakeup : 1 ;
   unsigned char should_wakeup : 1 ;
   enum dpm_state status ;
   struct list_head entry ;
};
struct __anonstruct_mm_context_t_77 {
   void *ldt ;
   int size ;
   struct mutex lock ;
   void *vdso ;
};
typedef struct __anonstruct_mm_context_t_77 mm_context_t;
struct pci_bus;
struct vm_area_struct;
struct key;
typedef __u64 Elf64_Addr;
typedef __u16 Elf64_Half;
typedef __u32 Elf64_Word;
typedef __u64 Elf64_Xword;
struct elf64_sym {
   Elf64_Word st_name ;
   unsigned char st_info ;
   unsigned char st_other ;
   Elf64_Half st_shndx ;
   Elf64_Addr st_value ;
   Elf64_Xword st_size ;
};
typedef struct elf64_sym Elf64_Sym;
struct kobject;
struct attribute {
   char const   *name ;
   struct module *owner ;
   mode_t mode ;
};
struct attribute_group {
   char const   *name ;
   mode_t (*is_visible)(struct kobject * , struct attribute * , int  ) ;
   struct attribute **attrs ;
};
struct bin_attribute {
   struct attribute attr ;
   size_t size ;
   void *private ;
   ssize_t (*read)(struct kobject * , struct bin_attribute * , char * , loff_t  ,
                   size_t  ) ;
   ssize_t (*write)(struct kobject * , struct bin_attribute * , char * , loff_t  ,
                    size_t  ) ;
   int (*mmap)(struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ;
};
struct sysfs_ops {
   ssize_t (*show)(struct kobject * , struct attribute * , char * ) ;
   ssize_t (*store)(struct kobject * , struct attribute * , char const   * , size_t  ) ;
};
struct sysfs_dirent;
struct kref {
   atomic_t refcount ;
};
struct kset;
struct kobj_type;
struct kobject {
   char const   *name ;
   struct list_head entry ;
   struct kobject *parent ;
   struct kset *kset ;
   struct kobj_type *ktype ;
   struct sysfs_dirent *sd ;
   struct kref kref ;
   unsigned char state_initialized : 1 ;
   unsigned char state_in_sysfs : 1 ;
   unsigned char state_add_uevent_sent : 1 ;
   unsigned char state_remove_uevent_sent : 1 ;
};
struct kobj_type {
   void (*release)(struct kobject * ) ;
   struct sysfs_ops *sysfs_ops ;
   struct attribute **default_attrs ;
};
struct kobj_uevent_env {
   char *envp[32U] ;
   int envp_idx ;
   char buf[2048U] ;
   int buflen ;
};
struct kset_uevent_ops {
   int (*filter)(struct kset * , struct kobject * ) ;
   char const   *(*name)(struct kset * , struct kobject * ) ;
   int (*uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ;
};
struct kset {
   struct list_head list ;
   spinlock_t list_lock ;
   struct kobject kobj ;
   struct kset_uevent_ops *uevent_ops ;
};
struct marker;
typedef void marker_probe_func(void * , void * , char const   * , va_list * );
struct marker_probe_closure {
   marker_probe_func *func ;
   void *probe_private ;
};
struct marker {
   char const   *name ;
   char const   *format ;
   char state ;
   char ptype ;
   void (*call)(struct marker  const  * , void *  , ...) ;
   struct marker_probe_closure single ;
   struct marker_probe_closure *multi ;
};
typedef unsigned long long cycles_t;
union ktime {
   s64 tv64 ;
};
typedef union ktime ktime_t;
struct tvec_base;
struct timer_list {
   struct list_head entry ;
   unsigned long expires ;
   void (*function)(unsigned long  ) ;
   unsigned long data ;
   struct tvec_base *base ;
   void *start_site ;
   char start_comm[16U] ;
   int start_pid ;
};
struct hrtimer;
enum hrtimer_restart;
struct work_struct;
struct work_struct {
   atomic_long_t data ;
   struct list_head entry ;
   void (*func)(struct work_struct * ) ;
   struct lockdep_map lockdep_map ;
};
struct delayed_work {
   struct work_struct work ;
   struct timer_list timer ;
};
struct kmem_cache_cpu {
   void **freelist ;
   struct page *page ;
   int node ;
   unsigned int offset ;
   unsigned int objsize ;
   unsigned int stat[18U] ;
};
struct kmem_cache_node {
   spinlock_t list_lock ;
   unsigned long nr_partial ;
   unsigned long min_partial ;
   struct list_head partial ;
   atomic_long_t nr_slabs ;
   atomic_long_t total_objects ;
   struct list_head full ;
};
struct kmem_cache_order_objects {
   unsigned long x ;
};
struct kmem_cache {
   unsigned long flags ;
   int size ;
   int objsize ;
   int offset ;
   struct kmem_cache_order_objects oo ;
   struct kmem_cache_node local_node ;
   struct kmem_cache_order_objects max ;
   struct kmem_cache_order_objects min ;
   gfp_t allocflags ;
   int refcount ;
   void (*ctor)(void * ) ;
   int inuse ;
   int align ;
   char const   *name ;
   struct list_head list ;
   struct kobject kobj ;
   int remote_node_defrag_ratio ;
   struct kmem_cache_node *node[64U] ;
   struct kmem_cache_cpu *cpu_slab[8U] ;
};
struct __anonstruct_local_t_88 {
   atomic_long_t a ;
};
typedef struct __anonstruct_local_t_88 local_t;
struct mod_arch_specific {

};
struct kernel_symbol {
   unsigned long value ;
   char const   *name ;
};
struct module_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct module_attribute * , struct module * , char * ) ;
   ssize_t (*store)(struct module_attribute * , struct module * , char const   * ,
                    size_t  ) ;
   void (*setup)(struct module * , char const   * ) ;
   int (*test)(struct module * ) ;
   void (*free)(struct module * ) ;
};
struct module_kobject {
   struct kobject kobj ;
   struct module *mod ;
   struct kobject *drivers_dir ;
};
struct exception_table_entry;
struct module_ref {
   local_t count ;
};
enum module_state {
    MODULE_STATE_LIVE = 0,
    MODULE_STATE_COMING = 1,
    MODULE_STATE_GOING = 2
} ;
struct module_param_attrs;
struct module_sect_attrs;
struct module_notes_attrs;
struct module {
   enum module_state state ;
   struct list_head list ;
   char name[56U] ;
   struct module_kobject mkobj ;
   struct module_param_attrs *param_attrs ;
   struct module_attribute *modinfo_attrs ;
   char const   *version ;
   char const   *srcversion ;
   struct kobject *holders_dir ;
   struct kernel_symbol  const  *syms ;
   unsigned long const   *crcs ;
   unsigned int num_syms ;
   unsigned int num_gpl_syms ;
   struct kernel_symbol  const  *gpl_syms ;
   unsigned long const   *gpl_crcs ;
   struct kernel_symbol  const  *unused_syms ;
   unsigned long const   *unused_crcs ;
   unsigned int num_unused_syms ;
   unsigned int num_unused_gpl_syms ;
   struct kernel_symbol  const  *unused_gpl_syms ;
   unsigned long const   *unused_gpl_crcs ;
   struct kernel_symbol  const  *gpl_future_syms ;
   unsigned long const   *gpl_future_crcs ;
   unsigned int num_gpl_future_syms ;
   unsigned int num_exentries ;
   struct exception_table_entry  const  *extable ;
   int (*init)(void) ;
   void *module_init ;
   void *module_core ;
   unsigned int init_size ;
   unsigned int core_size ;
   unsigned int init_text_size ;
   unsigned int core_text_size ;
   void *unwind_info ;
   struct mod_arch_specific arch ;
   unsigned int taints ;
   unsigned int num_bugs ;
   struct list_head bug_list ;
   struct bug_entry *bug_table ;
   Elf64_Sym *symtab ;
   unsigned int num_symtab ;
   char *strtab ;
   struct module_sect_attrs *sect_attrs ;
   struct module_notes_attrs *notes_attrs ;
   void *percpu ;
   char *args ;
   struct marker *markers ;
   unsigned int num_markers ;
   struct list_head modules_which_use_me ;
   struct task_struct *waiter ;
   void (*exit)(void) ;
   struct module_ref ref[8U] ;
};
struct device_driver;
struct file_operations;
struct completion {
   unsigned int done ;
   wait_queue_head_t wait ;
};
struct rcu_head {
   struct rcu_head *next ;
   void (*func)(struct rcu_head * ) ;
};
struct nameidata;
struct path;
struct vfsmount;
struct qstr {
   unsigned int hash ;
   unsigned int len ;
   unsigned char const   *name ;
};
struct dcookie_struct;
struct inode;
union __anonunion_d_u_89 {
   struct list_head d_child ;
   struct rcu_head d_rcu ;
};
struct dentry_operations;
struct super_block;
struct dentry {
   atomic_t d_count ;
   unsigned int d_flags ;
   spinlock_t d_lock ;
   struct inode *d_inode ;
   struct hlist_node d_hash ;
   struct dentry *d_parent ;
   struct qstr d_name ;
   struct list_head d_lru ;
   union __anonunion_d_u_89 d_u ;
   struct list_head d_subdirs ;
   struct list_head d_alias ;
   unsigned long d_time ;
   struct dentry_operations *d_op ;
   struct super_block *d_sb ;
   void *d_fsdata ;
   struct dcookie_struct *d_cookie ;
   int d_mounted ;
   unsigned char d_iname[36U] ;
};
struct dentry_operations {
   int (*d_revalidate)(struct dentry * , struct nameidata * ) ;
   int (*d_hash)(struct dentry * , struct qstr * ) ;
   int (*d_compare)(struct dentry * , struct qstr * , struct qstr * ) ;
   int (*d_delete)(struct dentry * ) ;
   void (*d_release)(struct dentry * ) ;
   void (*d_iput)(struct dentry * , struct inode * ) ;
   char *(*d_dname)(struct dentry * , char * , int  ) ;
};
struct path {
   struct vfsmount *mnt ;
   struct dentry *dentry ;
};
struct radix_tree_node;
struct radix_tree_root {
   unsigned int height ;
   gfp_t gfp_mask ;
   struct radix_tree_node *rnode ;
};
struct prio_tree_node;
struct raw_prio_tree_node {
   struct prio_tree_node *left ;
   struct prio_tree_node *right ;
   struct prio_tree_node *parent ;
};
struct prio_tree_node {
   struct prio_tree_node *left ;
   struct prio_tree_node *right ;
   struct prio_tree_node *parent ;
   unsigned long start ;
   unsigned long last ;
};
struct prio_tree_root {
   struct prio_tree_node *prio_tree_node ;
   unsigned short index_bits ;
   unsigned short raw ;
};
enum pid_type {
    PIDTYPE_PID = 0,
    PIDTYPE_PGID = 1,
    PIDTYPE_SID = 2,
    PIDTYPE_MAX = 3
} ;
struct pid_namespace;
struct upid {
   int nr ;
   struct pid_namespace *ns ;
   struct hlist_node pid_chain ;
};
struct pid {
   atomic_t count ;
   unsigned int level ;
   struct hlist_head tasks[3U] ;
   struct rcu_head rcu ;
   struct upid numbers[1U] ;
};
struct pid_link {
   struct hlist_node node ;
   struct pid *pid ;
};
struct kernel_cap_struct {
   __u32 cap[2U] ;
};
typedef struct kernel_cap_struct kernel_cap_t;
struct semaphore {
   spinlock_t lock ;
   unsigned int count ;
   struct list_head wait_list ;
};
struct fiemap_extent {
   __u64 fe_logical ;
   __u64 fe_physical ;
   __u64 fe_length ;
   __u64 fe_reserved64[2U] ;
   __u32 fe_flags ;
   __u32 fe_reserved[3U] ;
};
struct export_operations;
struct iovec;
struct kiocb;
struct pipe_inode_info;
struct poll_table_struct;
struct kstatfs;
struct iattr {
   unsigned int ia_valid ;
   umode_t ia_mode ;
   uid_t ia_uid ;
   gid_t ia_gid ;
   loff_t ia_size ;
   struct timespec ia_atime ;
   struct timespec ia_mtime ;
   struct timespec ia_ctime ;
   struct file *ia_file ;
};
struct if_dqblk {
   __u64 dqb_bhardlimit ;
   __u64 dqb_bsoftlimit ;
   __u64 dqb_curspace ;
   __u64 dqb_ihardlimit ;
   __u64 dqb_isoftlimit ;
   __u64 dqb_curinodes ;
   __u64 dqb_btime ;
   __u64 dqb_itime ;
   __u32 dqb_valid ;
};
struct if_dqinfo {
   __u64 dqi_bgrace ;
   __u64 dqi_igrace ;
   __u32 dqi_flags ;
   __u32 dqi_valid ;
};
struct fs_disk_quota {
   __s8 d_version ;
   __s8 d_flags ;
   __u16 d_fieldmask ;
   __u32 d_id ;
   __u64 d_blk_hardlimit ;
   __u64 d_blk_softlimit ;
   __u64 d_ino_hardlimit ;
   __u64 d_ino_softlimit ;
   __u64 d_bcount ;
   __u64 d_icount ;
   __s32 d_itimer ;
   __s32 d_btimer ;
   __u16 d_iwarns ;
   __u16 d_bwarns ;
   __s32 d_padding2 ;
   __u64 d_rtb_hardlimit ;
   __u64 d_rtb_softlimit ;
   __u64 d_rtbcount ;
   __s32 d_rtbtimer ;
   __u16 d_rtbwarns ;
   __s16 d_padding3 ;
   char d_padding4[8U] ;
};
struct fs_qfilestat {
   __u64 qfs_ino ;
   __u64 qfs_nblks ;
   __u32 qfs_nextents ;
};
typedef struct fs_qfilestat fs_qfilestat_t;
struct fs_quota_stat {
   __s8 qs_version ;
   __u16 qs_flags ;
   __s8 qs_pad ;
   fs_qfilestat_t qs_uquota ;
   fs_qfilestat_t qs_gquota ;
   __u32 qs_incoredqs ;
   __s32 qs_btimelimit ;
   __s32 qs_itimelimit ;
   __s32 qs_rtbtimelimit ;
   __u16 qs_bwarnlimit ;
   __u16 qs_iwarnlimit ;
};
struct v1_mem_dqinfo {

};
struct v2_mem_dqinfo {
   unsigned int dqi_blocks ;
   unsigned int dqi_free_blk ;
   unsigned int dqi_free_entry ;
};
typedef __kernel_uid32_t qid_t;
typedef __u64 qsize_t;
struct mem_dqblk {
   __u32 dqb_bhardlimit ;
   __u32 dqb_bsoftlimit ;
   qsize_t dqb_curspace ;
   __u32 dqb_ihardlimit ;
   __u32 dqb_isoftlimit ;
   __u32 dqb_curinodes ;
   time_t dqb_btime ;
   time_t dqb_itime ;
};
struct quota_format_type;
union __anonunion_u_91 {
   struct v1_mem_dqinfo v1_i ;
   struct v2_mem_dqinfo v2_i ;
};
struct mem_dqinfo {
   struct quota_format_type *dqi_format ;
   int dqi_fmt_id ;
   struct list_head dqi_dirty_list ;
   unsigned long dqi_flags ;
   unsigned int dqi_bgrace ;
   unsigned int dqi_igrace ;
   qsize_t dqi_maxblimit ;
   qsize_t dqi_maxilimit ;
   union __anonunion_u_91 u ;
};
struct dquot {
   struct hlist_node dq_hash ;
   struct list_head dq_inuse ;
   struct list_head dq_free ;
   struct list_head dq_dirty ;
   struct mutex dq_lock ;
   atomic_t dq_count ;
   wait_queue_head_t dq_wait_unused ;
   struct super_block *dq_sb ;
   unsigned int dq_id ;
   loff_t dq_off ;
   unsigned long dq_flags ;
   short dq_type ;
   struct mem_dqblk dq_dqb ;
};
struct quota_format_ops {
   int (*check_quota_file)(struct super_block * , int  ) ;
   int (*read_file_info)(struct super_block * , int  ) ;
   int (*write_file_info)(struct super_block * , int  ) ;
   int (*free_file_info)(struct super_block * , int  ) ;
   int (*read_dqblk)(struct dquot * ) ;
   int (*commit_dqblk)(struct dquot * ) ;
   int (*release_dqblk)(struct dquot * ) ;
};
struct dquot_operations {
   int (*initialize)(struct inode * , int  ) ;
   int (*drop)(struct inode * ) ;
   int (*alloc_space)(struct inode * , qsize_t  , int  ) ;
   int (*alloc_inode)(struct inode  const  * , unsigned long  ) ;
   int (*free_space)(struct inode * , qsize_t  ) ;
   int (*free_inode)(struct inode  const  * , unsigned long  ) ;
   int (*transfer)(struct inode * , struct iattr * ) ;
   int (*write_dquot)(struct dquot * ) ;
   int (*acquire_dquot)(struct dquot * ) ;
   int (*release_dquot)(struct dquot * ) ;
   int (*mark_dirty)(struct dquot * ) ;
   int (*write_info)(struct super_block * , int  ) ;
};
struct quotactl_ops {
   int (*quota_on)(struct super_block * , int  , int  , char * , int  ) ;
   int (*quota_off)(struct super_block * , int  , int  ) ;
   int (*quota_sync)(struct super_block * , int  ) ;
   int (*get_info)(struct super_block * , int  , struct if_dqinfo * ) ;
   int (*set_info)(struct super_block * , int  , struct if_dqinfo * ) ;
   int (*get_dqblk)(struct super_block * , int  , qid_t  , struct if_dqblk * ) ;
   int (*set_dqblk)(struct super_block * , int  , qid_t  , struct if_dqblk * ) ;
   int (*get_xstate)(struct super_block * , struct fs_quota_stat * ) ;
   int (*set_xstate)(struct super_block * , unsigned int  , int  ) ;
   int (*get_xquota)(struct super_block * , int  , qid_t  , struct fs_disk_quota * ) ;
   int (*set_xquota)(struct super_block * , int  , qid_t  , struct fs_disk_quota * ) ;
};
struct quota_format_type {
   int qf_fmt_id ;
   struct quota_format_ops *qf_ops ;
   struct module *qf_owner ;
   struct quota_format_type *qf_next ;
};
struct quota_info {
   unsigned int flags ;
   struct mutex dqio_mutex ;
   struct mutex dqonoff_mutex ;
   struct rw_semaphore dqptr_sem ;
   struct inode *files[2U] ;
   struct mem_dqinfo info[2U] ;
   struct quota_format_ops *ops[2U] ;
};
struct address_space;
struct writeback_control;
union __anonunion_arg_93 {
   char *buf ;
   void *data ;
};
struct __anonstruct_read_descriptor_t_92 {
   size_t written ;
   size_t count ;
   union __anonunion_arg_93 arg ;
   int error ;
};
typedef struct __anonstruct_read_descriptor_t_92 read_descriptor_t;
struct address_space_operations {
   int (*writepage)(struct page * , struct writeback_control * ) ;
   int (*readpage)(struct file * , struct page * ) ;
   void (*sync_page)(struct page * ) ;
   int (*writepages)(struct address_space * , struct writeback_control * ) ;
   int (*set_page_dirty)(struct page * ) ;
   int (*readpages)(struct file * , struct address_space * , struct list_head * ,
                    unsigned int  ) ;
   int (*prepare_write)(struct file * , struct page * , unsigned int  , unsigned int  ) ;
   int (*commit_write)(struct file * , struct page * , unsigned int  , unsigned int  ) ;
   int (*write_begin)(struct file * , struct address_space * , loff_t  , unsigned int  ,
                      unsigned int  , struct page ** , void ** ) ;
   int (*write_end)(struct file * , struct address_space * , loff_t  , unsigned int  ,
                    unsigned int  , struct page * , void * ) ;
   sector_t (*bmap)(struct address_space * , sector_t  ) ;
   void (*invalidatepage)(struct page * , unsigned long  ) ;
   int (*releasepage)(struct page * , gfp_t  ) ;
   ssize_t (*direct_IO)(int  , struct kiocb * , struct iovec  const  * , loff_t  ,
                        unsigned long  ) ;
   int (*get_xip_mem)(struct address_space * , unsigned long  , int  , void ** , unsigned long * ) ;
   int (*migratepage)(struct address_space * , struct page * , struct page * ) ;
   int (*launder_page)(struct page * ) ;
   int (*is_partially_uptodate)(struct page * , read_descriptor_t * , unsigned long  ) ;
};
struct backing_dev_info;
struct address_space {
   struct inode *host ;
   struct radix_tree_root page_tree ;
   spinlock_t tree_lock ;
   unsigned int i_mmap_writable ;
   struct prio_tree_root i_mmap ;
   struct list_head i_mmap_nonlinear ;
   spinlock_t i_mmap_lock ;
   unsigned int truncate_count ;
   unsigned long nrpages ;
   unsigned long writeback_index ;
   struct address_space_operations  const  *a_ops ;
   unsigned long flags ;
   struct backing_dev_info *backing_dev_info ;
   spinlock_t private_lock ;
   struct list_head private_list ;
   struct address_space *assoc_mapping ;
};
struct hd_struct;
struct gendisk;
struct block_device {
   dev_t bd_dev ;
   struct inode *bd_inode ;
   int bd_openers ;
   struct mutex bd_mutex ;
   struct semaphore bd_mount_sem ;
   struct list_head bd_inodes ;
   void *bd_holder ;
   int bd_holders ;
   struct list_head bd_holder_list ;
   struct block_device *bd_contains ;
   unsigned int bd_block_size ;
   struct hd_struct *bd_part ;
   unsigned int bd_part_count ;
   int bd_invalidated ;
   struct gendisk *bd_disk ;
   struct list_head bd_list ;
   struct backing_dev_info *bd_inode_backing_dev_info ;
   unsigned long bd_private ;
};
struct inode_operations;
struct file_lock;
struct cdev;
union __anonunion_ldv_10830_94 {
   struct pipe_inode_info *i_pipe ;
   struct block_device *i_bdev ;
   struct cdev *i_cdev ;
};
struct dnotify_struct;
struct inode {
   struct hlist_node i_hash ;
   struct list_head i_list ;
   struct list_head i_sb_list ;
   struct list_head i_dentry ;
   unsigned long i_ino ;
   atomic_t i_count ;
   unsigned int i_nlink ;
   uid_t i_uid ;
   gid_t i_gid ;
   dev_t i_rdev ;
   u64 i_version ;
   loff_t i_size ;
   struct timespec i_atime ;
   struct timespec i_mtime ;
   struct timespec i_ctime ;
   unsigned int i_blkbits ;
   blkcnt_t i_blocks ;
   unsigned short i_bytes ;
   umode_t i_mode ;
   spinlock_t i_lock ;
   struct mutex i_mutex ;
   struct rw_semaphore i_alloc_sem ;
   struct inode_operations  const  *i_op ;
   struct file_operations  const  *i_fop ;
   struct super_block *i_sb ;
   struct file_lock *i_flock ;
   struct address_space *i_mapping ;
   struct address_space i_data ;
   struct dquot *i_dquot[2U] ;
   struct list_head i_devices ;
   union __anonunion_ldv_10830_94 ldv_10830 ;
   int i_cindex ;
   __u32 i_generation ;
   unsigned long i_dnotify_mask ;
   struct dnotify_struct *i_dnotify ;
   struct list_head inotify_watches ;
   struct mutex inotify_mutex ;
   unsigned long i_state ;
   unsigned long dirtied_when ;
   unsigned int i_flags ;
   atomic_t i_writecount ;
   void *i_security ;
   void *i_private ;
};
struct fown_struct {
   rwlock_t lock ;
   struct pid *pid ;
   enum pid_type pid_type ;
   uid_t uid ;
   uid_t euid ;
   int signum ;
};
struct file_ra_state {
   unsigned long start ;
   unsigned int size ;
   unsigned int async_size ;
   unsigned int ra_pages ;
   int mmap_miss ;
   loff_t prev_pos ;
};
union __anonunion_f_u_95 {
   struct list_head fu_list ;
   struct rcu_head fu_rcuhead ;
};
struct file {
   union __anonunion_f_u_95 f_u ;
   struct path f_path ;
   struct file_operations  const  *f_op ;
   atomic_long_t f_count ;
   unsigned int f_flags ;
   mode_t f_mode ;
   loff_t f_pos ;
   struct fown_struct f_owner ;
   unsigned int f_uid ;
   unsigned int f_gid ;
   struct file_ra_state f_ra ;
   u64 f_version ;
   void *f_security ;
   void *private_data ;
   struct list_head f_ep_links ;
   spinlock_t f_ep_lock ;
   struct address_space *f_mapping ;
   unsigned long f_mnt_write_state ;
};
struct files_struct;
typedef struct files_struct *fl_owner_t;
struct file_lock_operations {
   void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ;
   void (*fl_release_private)(struct file_lock * ) ;
};
struct lock_manager_operations {
   int (*fl_compare_owner)(struct file_lock * , struct file_lock * ) ;
   void (*fl_notify)(struct file_lock * ) ;
   int (*fl_grant)(struct file_lock * , struct file_lock * , int  ) ;
   void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ;
   void (*fl_release_private)(struct file_lock * ) ;
   void (*fl_break)(struct file_lock * ) ;
   int (*fl_mylease)(struct file_lock * , struct file_lock * ) ;
   int (*fl_change)(struct file_lock ** , int  ) ;
};
struct nlm_lockowner;
struct nfs_lock_info {
   u32 state ;
   struct nlm_lockowner *owner ;
   struct list_head list ;
};
struct nfs4_lock_state;
struct nfs4_lock_info {
   struct nfs4_lock_state *owner ;
};
struct fasync_struct;
struct __anonstruct_afs_97 {
   struct list_head link ;
   int state ;
};
union __anonunion_fl_u_96 {
   struct nfs_lock_info nfs_fl ;
   struct nfs4_lock_info nfs4_fl ;
   struct __anonstruct_afs_97 afs ;
};
struct file_lock {
   struct file_lock *fl_next ;
   struct list_head fl_link ;
   struct list_head fl_block ;
   fl_owner_t fl_owner ;
   unsigned char fl_flags ;
   unsigned char fl_type ;
   unsigned int fl_pid ;
   struct pid *fl_nspid ;
   wait_queue_head_t fl_wait ;
   struct file *fl_file ;
   loff_t fl_start ;
   loff_t fl_end ;
   struct fasync_struct *fl_fasync ;
   unsigned long fl_break_time ;
   struct file_lock_operations *fl_ops ;
   struct lock_manager_operations *fl_lmops ;
   union __anonunion_fl_u_96 fl_u ;
};
struct fasync_struct {
   int magic ;
   int fa_fd ;
   struct fasync_struct *fa_next ;
   struct file *fa_file ;
};
struct file_system_type;
struct super_operations;
struct xattr_handler;
struct mtd_info;
struct super_block {
   struct list_head s_list ;
   dev_t s_dev ;
   unsigned long s_blocksize ;
   unsigned char s_blocksize_bits ;
   unsigned char s_dirt ;
   unsigned long long s_maxbytes ;
   struct file_system_type *s_type ;
   struct super_operations  const  *s_op ;
   struct dquot_operations *dq_op ;
   struct quotactl_ops *s_qcop ;
   struct export_operations  const  *s_export_op ;
   unsigned long s_flags ;
   unsigned long s_magic ;
   struct dentry *s_root ;
   struct rw_semaphore s_umount ;
   struct mutex s_lock ;
   int s_count ;
   int s_syncing ;
   int s_need_sync_fs ;
   atomic_t s_active ;
   void *s_security ;
   struct xattr_handler **s_xattr ;
   struct list_head s_inodes ;
   struct list_head s_dirty ;
   struct list_head s_io ;
   struct list_head s_more_io ;
   struct hlist_head s_anon ;
   struct list_head s_files ;
   struct list_head s_dentry_lru ;
   int s_nr_dentry_unused ;
   struct block_device *s_bdev ;
   struct mtd_info *s_mtd ;
   struct list_head s_instances ;
   struct quota_info s_dquot ;
   int s_frozen ;
   wait_queue_head_t s_wait_unfrozen ;
   char s_id[32U] ;
   void *s_fs_info ;
   struct mutex s_vfs_rename_mutex ;
   u32 s_time_gran ;
   char *s_subtype ;
   char *s_options ;
};
struct fiemap_extent_info {
   unsigned int fi_flags ;
   unsigned int fi_extents_mapped ;
   unsigned int fi_extents_max ;
   struct fiemap_extent *fi_extents_start ;
};
struct file_operations {
   struct module *owner ;
   loff_t (*llseek)(struct file * , loff_t  , int  ) ;
   ssize_t (*read)(struct file * , char * , size_t  , loff_t * ) ;
   ssize_t (*write)(struct file * , char const   * , size_t  , loff_t * ) ;
   ssize_t (*aio_read)(struct kiocb * , struct iovec  const  * , unsigned long  ,
                       loff_t  ) ;
   ssize_t (*aio_write)(struct kiocb * , struct iovec  const  * , unsigned long  ,
                        loff_t  ) ;
   int (*readdir)(struct file * , void * , int (*)(void * , char const   * , int  ,
                                                   loff_t  , u64  , unsigned int  ) ) ;
   unsigned int (*poll)(struct file * , struct poll_table_struct * ) ;
   int (*ioctl)(struct inode * , struct file * , unsigned int  , unsigned long  ) ;
   long (*unlocked_ioctl)(struct file * , unsigned int  , unsigned long  ) ;
   long (*compat_ioctl)(struct file * , unsigned int  , unsigned long  ) ;
   int (*mmap)(struct file * , struct vm_area_struct * ) ;
   int (*open)(struct inode * , struct file * ) ;
   int (*flush)(struct file * , fl_owner_t  ) ;
   int (*release)(struct inode * , struct file * ) ;
   int (*fsync)(struct file * , struct dentry * , int  ) ;
   int (*aio_fsync)(struct kiocb * , int  ) ;
   int (*fasync)(int  , struct file * , int  ) ;
   int (*lock)(struct file * , int  , struct file_lock * ) ;
   ssize_t (*sendpage)(struct file * , struct page * , int  , size_t  , loff_t * ,
                       int  ) ;
   unsigned long (*get_unmapped_area)(struct file * , unsigned long  , unsigned long  ,
                                      unsigned long  , unsigned long  ) ;
   int (*check_flags)(int  ) ;
   int (*dir_notify)(struct file * , unsigned long  ) ;
   int (*flock)(struct file * , int  , struct file_lock * ) ;
   ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t  ,
                           unsigned int  ) ;
   ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t  ,
                          unsigned int  ) ;
   int (*setlease)(struct file * , long  , struct file_lock ** ) ;
};
struct inode_operations {
   int (*create)(struct inode * , struct dentry * , int  , struct nameidata * ) ;
   struct dentry *(*lookup)(struct inode * , struct dentry * , struct nameidata * ) ;
   int (*link)(struct dentry * , struct inode * , struct dentry * ) ;
   int (*unlink)(struct inode * , struct dentry * ) ;
   int (*symlink)(struct inode * , struct dentry * , char const   * ) ;
   int (*mkdir)(struct inode * , struct dentry * , int  ) ;
   int (*rmdir)(struct inode * , struct dentry * ) ;
   int (*mknod)(struct inode * , struct dentry * , int  , dev_t  ) ;
   int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ;
   int (*readlink)(struct dentry * , char * , int  ) ;
   void *(*follow_link)(struct dentry * , struct nameidata * ) ;
   void (*put_link)(struct dentry * , struct nameidata * , void * ) ;
   void (*truncate)(struct inode * ) ;
   int (*permission)(struct inode * , int  ) ;
   int (*setattr)(struct dentry * , struct iattr * ) ;
   int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ;
   int (*setxattr)(struct dentry * , char const   * , void const   * , size_t  , int  ) ;
   ssize_t (*getxattr)(struct dentry * , char const   * , void * , size_t  ) ;
   ssize_t (*listxattr)(struct dentry * , char * , size_t  ) ;
   int (*removexattr)(struct dentry * , char const   * ) ;
   void (*truncate_range)(struct inode * , loff_t  , loff_t  ) ;
   long (*fallocate)(struct inode * , int  , loff_t  , loff_t  ) ;
   int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64  , u64  ) ;
};
struct seq_file;
struct super_operations {
   struct inode *(*alloc_inode)(struct super_block * ) ;
   void (*destroy_inode)(struct inode * ) ;
   void (*dirty_inode)(struct inode * ) ;
   int (*write_inode)(struct inode * , int  ) ;
   void (*drop_inode)(struct inode * ) ;
   void (*delete_inode)(struct inode * ) ;
   void (*put_super)(struct super_block * ) ;
   void (*write_super)(struct super_block * ) ;
   int (*sync_fs)(struct super_block * , int  ) ;
   void (*write_super_lockfs)(struct super_block * ) ;
   void (*unlockfs)(struct super_block * ) ;
   int (*statfs)(struct dentry * , struct kstatfs * ) ;
   int (*remount_fs)(struct super_block * , int * , char * ) ;
   void (*clear_inode)(struct inode * ) ;
   void (*umount_begin)(struct super_block * ) ;
   int (*show_options)(struct seq_file * , struct vfsmount * ) ;
   int (*show_stats)(struct seq_file * , struct vfsmount * ) ;
   ssize_t (*quota_read)(struct super_block * , int  , char * , size_t  , loff_t  ) ;
   ssize_t (*quota_write)(struct super_block * , int  , char const   * , size_t  ,
                          loff_t  ) ;
};
struct file_system_type {
   char const   *name ;
   int fs_flags ;
   int (*get_sb)(struct file_system_type * , int  , char const   * , void * , struct vfsmount * ) ;
   void (*kill_sb)(struct super_block * ) ;
   struct module *owner ;
   struct file_system_type *next ;
   struct list_head fs_supers ;
   struct lock_class_key s_lock_key ;
   struct lock_class_key s_umount_key ;
   struct lock_class_key i_lock_key ;
   struct lock_class_key i_mutex_key ;
   struct lock_class_key i_mutex_dir_key ;
   struct lock_class_key i_alloc_sem_key ;
};
struct bio;
typedef int read_proc_t(char * , char ** , off_t  , int  , int * , void * );
typedef int write_proc_t(struct file * , char const   * , unsigned long  , void * );
struct proc_dir_entry {
   unsigned int low_ino ;
   unsigned short namelen ;
   char const   *name ;
   mode_t mode ;
   nlink_t nlink ;
   uid_t uid ;
   gid_t gid ;
   loff_t size ;
   struct inode_operations  const  *proc_iops ;
   struct file_operations  const  *proc_fops ;
   struct module *owner ;
   struct proc_dir_entry *next ;
   struct proc_dir_entry *parent ;
   struct proc_dir_entry *subdir ;
   void *data ;
   read_proc_t *read_proc ;
   write_proc_t *write_proc ;
   atomic_t count ;
   int pde_users ;
   spinlock_t pde_unload_lock ;
   struct completion *pde_unload_completion ;
   struct list_head pde_openers ;
};
typedef unsigned long kernel_ulong_t;
struct pci_device_id {
   __u32 vendor ;
   __u32 device ;
   __u32 subvendor ;
   __u32 subdevice ;
   __u32 class ;
   __u32 class_mask ;
   kernel_ulong_t driver_data ;
};
struct resource {
   resource_size_t start ;
   resource_size_t end ;
   char const   *name ;
   unsigned long flags ;
   struct resource *parent ;
   struct resource *sibling ;
   struct resource *child ;
};
struct pci_dev;
struct klist_node;
struct klist {
   spinlock_t k_lock ;
   struct list_head k_list ;
   void (*get)(struct klist_node * ) ;
   void (*put)(struct klist_node * ) ;
};
struct klist_node {
   void *n_klist ;
   struct list_head n_node ;
   struct kref n_ref ;
   struct completion n_removed ;
};
struct dma_mapping_ops;
struct dev_archdata {
   void *acpi_handle ;
   struct dma_mapping_ops *dma_ops ;
   void *iommu ;
};
struct driver_private;
struct class;
struct class_private;
struct bus_type;
struct bus_type_private;
struct bus_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct bus_type * , char * ) ;
   ssize_t (*store)(struct bus_type * , char const   * , size_t  ) ;
};
struct device_attribute;
struct driver_attribute;
struct bus_type {
   char const   *name ;
   struct bus_attribute *bus_attrs ;
   struct device_attribute *dev_attrs ;
   struct driver_attribute *drv_attrs ;
   int (*match)(struct device * , struct device_driver * ) ;
   int (*uevent)(struct device * , struct kobj_uevent_env * ) ;
   int (*probe)(struct device * ) ;
   int (*remove)(struct device * ) ;
   void (*shutdown)(struct device * ) ;
   int (*suspend)(struct device * , pm_message_t  ) ;
   int (*suspend_late)(struct device * , pm_message_t  ) ;
   int (*resume_early)(struct device * ) ;
   int (*resume)(struct device * ) ;
   struct pm_ext_ops *pm ;
   struct bus_type_private *p ;
};
struct device_driver {
   char const   *name ;
   struct bus_type *bus ;
   struct module *owner ;
   char const   *mod_name ;
   int (*probe)(struct device * ) ;
   int (*remove)(struct device * ) ;
   void (*shutdown)(struct device * ) ;
   int (*suspend)(struct device * , pm_message_t  ) ;
   int (*resume)(struct device * ) ;
   struct attribute_group **groups ;
   struct pm_ops *pm ;
   struct driver_private *p ;
};
struct driver_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct device_driver * , char * ) ;
   ssize_t (*store)(struct device_driver * , char const   * , size_t  ) ;
};
struct class_attribute;
struct class {
   char const   *name ;
   struct module *owner ;
   struct class_attribute *class_attrs ;
   struct device_attribute *dev_attrs ;
   struct kobject *dev_kobj ;
   int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ;
   void (*class_release)(struct class * ) ;
   void (*dev_release)(struct device * ) ;
   int (*suspend)(struct device * , pm_message_t  ) ;
   int (*resume)(struct device * ) ;
   struct pm_ops *pm ;
   struct class_private *p ;
};
struct device_type;
struct class_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct class * , char * ) ;
   ssize_t (*store)(struct class * , char const   * , size_t  ) ;
};
struct device_type {
   char const   *name ;
   struct attribute_group **groups ;
   int (*uevent)(struct device * , struct kobj_uevent_env * ) ;
   void (*release)(struct device * ) ;
   int (*suspend)(struct device * , pm_message_t  ) ;
   int (*resume)(struct device * ) ;
   struct pm_ops *pm ;
};
struct device_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct device * , struct device_attribute * , char * ) ;
   ssize_t (*store)(struct device * , struct device_attribute * , char const   * ,
                    size_t  ) ;
};
struct device_dma_parameters {
   unsigned int max_segment_size ;
   unsigned long segment_boundary_mask ;
};
struct dma_coherent_mem;
struct device {
   struct klist klist_children ;
   struct klist_node knode_parent ;
   struct klist_node knode_driver ;
   struct klist_node knode_bus ;
   struct device *parent ;
   struct kobject kobj ;
   char bus_id[20U] ;
   char const   *init_name ;
   struct device_type *type ;
   unsigned char uevent_suppress : 1 ;
   struct semaphore sem ;
   struct bus_type *bus ;
   struct device_driver *driver ;
   void *driver_data ;
   void *platform_data ;
   struct dev_pm_info power ;
   int numa_node ;
   u64 *dma_mask ;
   u64 coherent_dma_mask ;
   struct device_dma_parameters *dma_parms ;
   struct list_head dma_pools ;
   struct dma_coherent_mem *dma_mem ;
   struct dev_archdata archdata ;
   spinlock_t devres_lock ;
   struct list_head devres_head ;
   struct klist_node knode_class ;
   struct class *class ;
   dev_t devt ;
   struct attribute_group **groups ;
   void (*release)(struct device * ) ;
};
struct hotplug_slot;
struct pci_slot {
   struct pci_bus *bus ;
   struct list_head list ;
   struct hotplug_slot *hotplug ;
   unsigned char number ;
   struct kobject kobj ;
};
typedef int pci_power_t;
typedef unsigned int pci_channel_state_t;
enum pci_channel_state {
    pci_channel_io_normal = 1,
    pci_channel_io_frozen = 2,
    pci_channel_io_perm_failure = 3
} ;
typedef unsigned short pci_dev_flags_t;
typedef unsigned short pci_bus_flags_t;
struct pcie_link_state;
struct pci_vpd;
struct pci_driver;
struct pci_dev {
   struct list_head bus_list ;
   struct pci_bus *bus ;
   struct pci_bus *subordinate ;
   void *sysdata ;
   struct proc_dir_entry *procent ;
   struct pci_slot *slot ;
   unsigned int devfn ;
   unsigned short vendor ;
   unsigned short device ;
   unsigned short subsystem_vendor ;
   unsigned short subsystem_device ;
   unsigned int class ;
   u8 revision ;
   u8 hdr_type ;
   u8 pcie_type ;
   u8 rom_base_reg ;
   u8 pin ;
   struct pci_driver *driver ;
   u64 dma_mask ;
   struct device_dma_parameters dma_parms ;
   pci_power_t current_state ;
   int pm_cap ;
   unsigned char pme_support : 5 ;
   unsigned char d1_support : 1 ;
   unsigned char d2_support : 1 ;
   unsigned char no_d1d2 : 1 ;
   struct pcie_link_state *link_state ;
   pci_channel_state_t error_state ;
   struct device dev ;
   int cfg_size ;
   unsigned int irq ;
   struct resource resource[12U] ;
   unsigned char transparent : 1 ;
   unsigned char multifunction : 1 ;
   unsigned char is_added : 1 ;
   unsigned char is_busmaster : 1 ;
   unsigned char no_msi : 1 ;
   unsigned char block_ucfg_access : 1 ;
   unsigned char broken_parity_status : 1 ;
   unsigned char msi_enabled : 1 ;
   unsigned char msix_enabled : 1 ;
   unsigned char is_managed : 1 ;
   unsigned char is_pcie : 1 ;
   pci_dev_flags_t dev_flags ;
   atomic_t enable_cnt ;
   u32 saved_config_space[16U] ;
   struct hlist_head saved_cap_space ;
   struct bin_attribute *rom_attr ;
   int rom_attr_enabled ;
   struct bin_attribute *res_attr[12U] ;
   struct bin_attribute *res_attr_wc[12U] ;
   struct list_head msi_list ;
   struct pci_vpd *vpd ;
};
struct pci_ops;
struct pci_bus {
   struct list_head node ;
   struct pci_bus *parent ;
   struct list_head children ;
   struct list_head devices ;
   struct pci_dev *self ;
   struct list_head slots ;
   struct resource *resource[16U] ;
   struct pci_ops *ops ;
   void *sysdata ;
   struct proc_dir_entry *procdir ;
   unsigned char number ;
   unsigned char primary ;
   unsigned char secondary ;
   unsigned char subordinate ;
   char name[48U] ;
   unsigned short bridge_ctl ;
   pci_bus_flags_t bus_flags ;
   struct device *bridge ;
   struct device dev ;
   struct bin_attribute *legacy_io ;
   struct bin_attribute *legacy_mem ;
   unsigned char is_added : 1 ;
};
struct pci_ops {
   int (*read)(struct pci_bus * , unsigned int  , int  , int  , u32 * ) ;
   int (*write)(struct pci_bus * , unsigned int  , int  , int  , u32  ) ;
};
struct pci_dynids {
   spinlock_t lock ;
   struct list_head list ;
   unsigned char use_driver_data : 1 ;
};
typedef unsigned int pci_ers_result_t;
struct pci_error_handlers {
   pci_ers_result_t (*error_detected)(struct pci_dev * , enum pci_channel_state  ) ;
   pci_ers_result_t (*mmio_enabled)(struct pci_dev * ) ;
   pci_ers_result_t (*link_reset)(struct pci_dev * ) ;
   pci_ers_result_t (*slot_reset)(struct pci_dev * ) ;
   void (*resume)(struct pci_dev * ) ;
};
struct pci_driver {
   struct list_head node ;
   char *name ;
   struct pci_device_id  const  *id_table ;
   int (*probe)(struct pci_dev * , struct pci_device_id  const  * ) ;
   void (*remove)(struct pci_dev * ) ;
   int (*suspend)(struct pci_dev * , pm_message_t  ) ;
   int (*suspend_late)(struct pci_dev * , pm_message_t  ) ;
   int (*resume_early)(struct pci_dev * ) ;
   int (*resume)(struct pci_dev * ) ;
   void (*shutdown)(struct pci_dev * ) ;
   struct pm_ext_ops *pm ;
   struct pci_error_handlers *err_handler ;
   struct device_driver driver ;
   struct pci_dynids dynids ;
};
struct scatterlist {
   unsigned long sg_magic ;
   unsigned long page_link ;
   unsigned int offset ;
   unsigned int length ;
   dma_addr_t dma_address ;
   unsigned int dma_length ;
};
struct rb_node {
   unsigned long rb_parent_color ;
   struct rb_node *rb_right ;
   struct rb_node *rb_left ;
};
struct rb_root {
   struct rb_node *rb_node ;
};
typedef atomic_long_t mm_counter_t;
struct __anonstruct_ldv_14864_100 {
   u16 inuse ;
   u16 objects ;
};
union __anonunion_ldv_14865_99 {
   atomic_t _mapcount ;
   struct __anonstruct_ldv_14864_100 ldv_14864 ;
};
struct __anonstruct_ldv_14870_102 {
   unsigned long private ;
   struct address_space *mapping ;
};
union __anonunion_ldv_14874_101 {
   struct __anonstruct_ldv_14870_102 ldv_14870 ;
   spinlock_t ptl ;
   struct kmem_cache *slab ;
   struct page *first_page ;
};
union __anonunion_ldv_14878_103 {
   unsigned long index ;
   void *freelist ;
};
struct page {
   unsigned long flags ;
   atomic_t _count ;
   union __anonunion_ldv_14865_99 ldv_14865 ;
   union __anonunion_ldv_14874_101 ldv_14874 ;
   union __anonunion_ldv_14878_103 ldv_14878 ;
   struct list_head lru ;
   unsigned long page_cgroup ;
};
struct __anonstruct_vm_set_105 {
   struct list_head list ;
   void *parent ;
   struct vm_area_struct *head ;
};
union __anonunion_shared_104 {
   struct __anonstruct_vm_set_105 vm_set ;
   struct raw_prio_tree_node prio_tree_node ;
};
struct anon_vma;
struct vm_operations_struct;
struct mempolicy;
struct vm_area_struct {
   struct mm_struct *vm_mm ;
   unsigned long vm_start ;
   unsigned long vm_end ;
   struct vm_area_struct *vm_next ;
   pgprot_t vm_page_prot ;
   unsigned long vm_flags ;
   struct rb_node vm_rb ;
   union __anonunion_shared_104 shared ;
   struct list_head anon_vma_node ;
   struct anon_vma *anon_vma ;
   struct vm_operations_struct *vm_ops ;
   unsigned long vm_pgoff ;
   struct file *vm_file ;
   void *vm_private_data ;
   unsigned long vm_truncate_count ;
   struct mempolicy *vm_policy ;
};
struct core_thread {
   struct task_struct *task ;
   struct core_thread *next ;
};
struct core_state {
   atomic_t nr_threads ;
   struct core_thread dumper ;
   struct completion startup ;
};
struct kioctx;
struct mmu_notifier_mm;
struct mm_struct {
   struct vm_area_struct *mmap ;
   struct rb_root mm_rb ;
   struct vm_area_struct *mmap_cache ;
   unsigned long (*get_unmapped_area)(struct file * , unsigned long  , unsigned long  ,
                                      unsigned long  , unsigned long  ) ;
   void (*unmap_area)(struct mm_struct * , unsigned long  ) ;
   unsigned long mmap_base ;
   unsigned long task_size ;
   unsigned long cached_hole_size ;
   unsigned long free_area_cache ;
   pgd_t *pgd ;
   atomic_t mm_users ;
   atomic_t mm_count ;
   int map_count ;
   struct rw_semaphore mmap_sem ;
   spinlock_t page_table_lock ;
   struct list_head mmlist ;
   mm_counter_t _file_rss ;
   mm_counter_t _anon_rss ;
   unsigned long hiwater_rss ;
   unsigned long hiwater_vm ;
   unsigned long total_vm ;
   unsigned long locked_vm ;
   unsigned long shared_vm ;
   unsigned long exec_vm ;
   unsigned long stack_vm ;
   unsigned long reserved_vm ;
   unsigned long def_flags ;
   unsigned long nr_ptes ;
   unsigned long start_code ;
   unsigned long end_code ;
   unsigned long start_data ;
   unsigned long end_data ;
   unsigned long start_brk ;
   unsigned long brk ;
   unsigned long start_stack ;
   unsigned long arg_start ;
   unsigned long arg_end ;
   unsigned long env_start ;
   unsigned long env_end ;
   unsigned long saved_auxv[42U] ;
   cpumask_t cpu_vm_mask ;
   mm_context_t context ;
   unsigned int faultstamp ;
   unsigned int token_priority ;
   unsigned int last_interval ;
   unsigned long flags ;
   struct core_state *core_state ;
   rwlock_t ioctx_list_lock ;
   struct kioctx *ioctx_list ;
   struct task_struct *owner ;
   struct file *exe_file ;
   unsigned long num_exe_file_vmas ;
   struct mmu_notifier_mm *mmu_notifier_mm ;
};
struct user_struct;
struct vm_fault {
   unsigned int flags ;
   unsigned long pgoff ;
   void *virtual_address ;
   struct page *page ;
};
struct vm_operations_struct {
   void (*open)(struct vm_area_struct * ) ;
   void (*close)(struct vm_area_struct * ) ;
   int (*fault)(struct vm_area_struct * , struct vm_fault * ) ;
   int (*page_mkwrite)(struct vm_area_struct * , struct page * ) ;
   int (*access)(struct vm_area_struct * , unsigned long  , void * , int  , int  ) ;
   int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ;
   struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long  ) ;
   int (*migrate)(struct vm_area_struct * , nodemask_t const   * , nodemask_t const   * ,
                  unsigned long  ) ;
};
struct dma_mapping_ops {
   int (*mapping_error)(struct device * , dma_addr_t  ) ;
   void *(*alloc_coherent)(struct device * , size_t  , dma_addr_t * , gfp_t  ) ;
   void (*free_coherent)(struct device * , size_t  , void * , dma_addr_t  ) ;
   dma_addr_t (*map_single)(struct device * , phys_addr_t  , size_t  , int  ) ;
   void (*unmap_single)(struct device * , dma_addr_t  , size_t  , int  ) ;
   void (*sync_single_for_cpu)(struct device * , dma_addr_t  , size_t  , int  ) ;
   void (*sync_single_for_device)(struct device * , dma_addr_t  , size_t  , int  ) ;
   void (*sync_single_range_for_cpu)(struct device * , dma_addr_t  , unsigned long  ,
                                     size_t  , int  ) ;
   void (*sync_single_range_for_device)(struct device * , dma_addr_t  , unsigned long  ,
                                        size_t  , int  ) ;
   void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int  , int  ) ;
   void (*sync_sg_for_device)(struct device * , struct scatterlist * , int  , int  ) ;
   int (*map_sg)(struct device * , struct scatterlist * , int  , int  ) ;
   void (*unmap_sg)(struct device * , struct scatterlist * , int  , int  ) ;
   int (*dma_supported)(struct device * , u64  ) ;
   int is_phys ;
};
typedef unsigned long cputime_t;
struct sem_undo_list;
struct sem_undo_list {
   atomic_t refcnt ;
   spinlock_t lock ;
   struct list_head list_proc ;
};
struct sysv_sem {
   struct sem_undo_list *undo_list ;
};
struct siginfo;
struct __anonstruct_sigset_t_106 {
   unsigned long sig[1U] ;
};
typedef struct __anonstruct_sigset_t_106 sigset_t;
typedef void __signalfn_t(int  );
typedef __signalfn_t *__sighandler_t;
typedef void __restorefn_t(void);
typedef __restorefn_t *__sigrestore_t;
struct sigaction {
   __sighandler_t sa_handler ;
   unsigned long sa_flags ;
   __sigrestore_t sa_restorer ;
   sigset_t sa_mask ;
};
struct k_sigaction {
   struct sigaction sa ;
};
union sigval {
   int sival_int ;
   void *sival_ptr ;
};
typedef union sigval sigval_t;
struct __anonstruct__kill_108 {
   pid_t _pid ;
   uid_t _uid ;
};
struct __anonstruct__timer_109 {
   timer_t _tid ;
   int _overrun ;
   char _pad[0U] ;
   sigval_t _sigval ;
   int _sys_private ;
};
struct __anonstruct__rt_110 {
   pid_t _pid ;
   uid_t _uid ;
   sigval_t _sigval ;
};
struct __anonstruct__sigchld_111 {
   pid_t _pid ;
   uid_t _uid ;
   int _status ;
   clock_t _utime ;
   clock_t _stime ;
};
struct __anonstruct__sigfault_112 {
   void *_addr ;
};
struct __anonstruct__sigpoll_113 {
   long _band ;
   int _fd ;
};
union __anonunion__sifields_107 {
   int _pad[28U] ;
   struct __anonstruct__kill_108 _kill ;
   struct __anonstruct__timer_109 _timer ;
   struct __anonstruct__rt_110 _rt ;
   struct __anonstruct__sigchld_111 _sigchld ;
   struct __anonstruct__sigfault_112 _sigfault ;
   struct __anonstruct__sigpoll_113 _sigpoll ;
};
struct siginfo {
   int si_signo ;
   int si_errno ;
   int si_code ;
   union __anonunion__sifields_107 _sifields ;
};
typedef struct siginfo siginfo_t;
struct sigpending {
   struct list_head list ;
   sigset_t signal ;
};
struct fs_struct {
   atomic_t count ;
   rwlock_t lock ;
   int umask ;
   struct path root ;
   struct path pwd ;
};
struct prop_local_single {
   unsigned long events ;
   unsigned long period ;
   int shift ;
   spinlock_t lock ;
};
struct __anonstruct_seccomp_t_116 {
   int mode ;
};
typedef struct __anonstruct_seccomp_t_116 seccomp_t;
struct plist_head {
   struct list_head prio_list ;
   struct list_head node_list ;
   spinlock_t *lock ;
};
struct rt_mutex_waiter;
struct rlimit {
   unsigned long rlim_cur ;
   unsigned long rlim_max ;
};
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
enum hrtimer_restart {
    HRTIMER_NORESTART = 0,
    HRTIMER_RESTART = 1
} ;
enum hrtimer_cb_mode {
    HRTIMER_CB_SOFTIRQ = 0,
    HRTIMER_CB_IRQSAFE = 1,
    HRTIMER_CB_IRQSAFE_NO_RESTART = 2,
    HRTIMER_CB_IRQSAFE_PERCPU = 3,
    HRTIMER_CB_IRQSAFE_UNLOCKED = 4
} ;
struct hrtimer {
   struct rb_node node ;
   ktime_t expires ;
   enum hrtimer_restart (*function)(struct hrtimer * ) ;
   struct hrtimer_clock_base *base ;
   unsigned long state ;
   enum hrtimer_cb_mode cb_mode ;
   struct list_head cb_entry ;
   void *start_site ;
   char start_comm[16U] ;
   int start_pid ;
};
struct hrtimer_clock_base {
   struct hrtimer_cpu_base *cpu_base ;
   clockid_t index ;
   struct rb_root active ;
   struct rb_node *first ;
   ktime_t resolution ;
   ktime_t (*get_time)(void) ;
   ktime_t (*get_softirq_time)(void) ;
   ktime_t softirq_time ;
   ktime_t offset ;
   int (*reprogram)(struct hrtimer * , struct hrtimer_clock_base * , ktime_t  ) ;
};
struct hrtimer_cpu_base {
   spinlock_t lock ;
   struct hrtimer_clock_base clock_base[2U] ;
   struct list_head cb_pending ;
   ktime_t expires_next ;
   int hres_active ;
   unsigned long nr_events ;
};
struct task_io_accounting {
   u64 rchar ;
   u64 wchar ;
   u64 syscr ;
   u64 syscw ;
   u64 read_bytes ;
   u64 write_bytes ;
   u64 cancelled_write_bytes ;
};
struct latency_record {
   unsigned long backtrace[12U] ;
   unsigned int count ;
   unsigned long time ;
   unsigned long max ;
};
struct futex_pi_state;
struct robust_list_head;
struct cfs_rq;
struct task_group;
struct nsproxy;
struct io_event {
   __u64 data ;
   __u64 obj ;
   __s64 res ;
   __s64 res2 ;
};
struct iovec {
   void *iov_base ;
   __kernel_size_t iov_len ;
};
union __anonunion_ki_obj_117 {
   void *user ;
   struct task_struct *tsk ;
};
struct kiocb {
   struct list_head ki_run_list ;
   unsigned long ki_flags ;
   int ki_users ;
   unsigned int ki_key ;
   struct file *ki_filp ;
   struct kioctx *ki_ctx ;
   int (*ki_cancel)(struct kiocb * , struct io_event * ) ;
   ssize_t (*ki_retry)(struct kiocb * ) ;
   void (*ki_dtor)(struct kiocb * ) ;
   union __anonunion_ki_obj_117 ki_obj ;
   __u64 ki_user_data ;
   wait_queue_t ki_wait ;
   loff_t ki_pos ;
   void *private ;
   unsigned short ki_opcode ;
   size_t ki_nbytes ;
   char *ki_buf ;
   size_t ki_left ;
   struct iovec ki_inline_vec ;
   struct iovec *ki_iovec ;
   unsigned long ki_nr_segs ;
   unsigned long ki_cur_seg ;
   struct list_head ki_list ;
   struct file *ki_eventfd ;
};
struct aio_ring_info {
   unsigned long mmap_base ;
   unsigned long mmap_size ;
   struct page **ring_pages ;
   spinlock_t ring_lock ;
   long nr_pages ;
   unsigned int nr ;
   unsigned int tail ;
   struct page *internal_pages[8U] ;
};
struct kioctx {
   atomic_t users ;
   int dead ;
   struct mm_struct *mm ;
   unsigned long user_id ;
   struct kioctx *next ;
   wait_queue_head_t wait ;
   spinlock_t ctx_lock ;
   int reqs_active ;
   struct list_head active_reqs ;
   struct list_head run_list ;
   unsigned int max_reqs ;
   struct aio_ring_info ring_info ;
   struct delayed_work wq ;
};
struct sighand_struct {
   atomic_t count ;
   struct k_sigaction action[64U] ;
   spinlock_t siglock ;
   wait_queue_head_t signalfd_wqh ;
};
struct pacct_struct {
   int ac_flag ;
   long ac_exitcode ;
   unsigned long ac_mem ;
   cputime_t ac_utime ;
   cputime_t ac_stime ;
   unsigned long ac_minflt ;
   unsigned long ac_majflt ;
};
union __anonunion_ldv_18331_118 {
   pid_t pgrp ;
   pid_t __pgrp ;
};
union __anonunion_ldv_18336_119 {
   pid_t session ;
   pid_t __session ;
};
struct tty_struct;
struct taskstats;
struct tty_audit_buf;
struct signal_struct {
   atomic_t count ;
   atomic_t live ;
   wait_queue_head_t wait_chldexit ;
   struct task_struct *curr_target ;
   struct sigpending shared_pending ;
   int group_exit_code ;
   int notify_count ;
   struct task_struct *group_exit_task ;
   int group_stop_count ;
   unsigned int flags ;
   struct list_head posix_timers ;
   struct hrtimer real_timer ;
   struct pid *leader_pid ;
   ktime_t it_real_incr ;
   cputime_t it_prof_expires ;
   cputime_t it_virt_expires ;
   cputime_t it_prof_incr ;
   cputime_t it_virt_incr ;
   union __anonunion_ldv_18331_118 ldv_18331 ;
   struct pid *tty_old_pgrp ;
   union __anonunion_ldv_18336_119 ldv_18336 ;
   int leader ;
   struct tty_struct *tty ;
   cputime_t utime ;
   cputime_t stime ;
   cputime_t cutime ;
   cputime_t cstime ;
   cputime_t gtime ;
   cputime_t cgtime ;
   unsigned long nvcsw ;
   unsigned long nivcsw ;
   unsigned long cnvcsw ;
   unsigned long cnivcsw ;
   unsigned long min_flt ;
   unsigned long maj_flt ;
   unsigned long cmin_flt ;
   unsigned long cmaj_flt ;
   unsigned long inblock ;
   unsigned long oublock ;
   unsigned long cinblock ;
   unsigned long coublock ;
   struct task_io_accounting ioac ;
   unsigned long long sum_sched_runtime ;
   struct rlimit rlim[16U] ;
   struct list_head cpu_timers[3U] ;
   struct key *session_keyring ;
   struct key *process_keyring ;
   struct pacct_struct pacct ;
   struct taskstats *stats ;
   unsigned int audit_tty ;
   struct tty_audit_buf *tty_audit_buf ;
};
struct user_struct {
   atomic_t __count ;
   atomic_t processes ;
   atomic_t files ;
   atomic_t sigpending ;
   atomic_t inotify_watches ;
   atomic_t inotify_devs ;
   unsigned long mq_bytes ;
   unsigned long locked_shm ;
   struct key *uid_keyring ;
   struct key *session_keyring ;
   struct hlist_node uidhash_node ;
   uid_t uid ;
   struct task_group *tg ;
   struct kobject kobj ;
   struct work_struct work ;
};
struct reclaim_state;
struct sched_info {
   unsigned long pcount ;
   unsigned long long cpu_time ;
   unsigned long long run_delay ;
   unsigned long long last_arrival ;
   unsigned long long last_queued ;
   unsigned int bkl_count ;
};
struct task_delay_info {
   spinlock_t lock ;
   unsigned int flags ;
   struct timespec blkio_start ;
   struct timespec blkio_end ;
   u64 blkio_delay ;
   u64 swapin_delay ;
   u32 blkio_count ;
   u32 swapin_count ;
   struct timespec freepages_start ;
   struct timespec freepages_end ;
   u64 freepages_delay ;
   u32 freepages_count ;
};
enum cpu_idle_type {
    CPU_IDLE = 0,
    CPU_NOT_IDLE = 1,
    CPU_NEWLY_IDLE = 2,
    CPU_MAX_IDLE_TYPES = 3
} ;
struct sched_group {
   struct sched_group *next ;
   cpumask_t cpumask ;
   unsigned int __cpu_power ;
   u32 reciprocal_cpu_power ;
};
enum sched_domain_level {
    SD_LV_NONE = 0,
    SD_LV_SIBLING = 1,
    SD_LV_MC = 2,
    SD_LV_CPU = 3,
    SD_LV_NODE = 4,
    SD_LV_ALLNODES = 5,
    SD_LV_MAX = 6
} ;
struct sched_domain {
   struct sched_domain *parent ;
   struct sched_domain *child ;
   struct sched_group *groups ;
   cpumask_t span ;
   unsigned long min_interval ;
   unsigned long max_interval ;
   unsigned int busy_factor ;
   unsigned int imbalance_pct ;
   unsigned int cache_nice_tries ;
   unsigned int busy_idx ;
   unsigned int idle_idx ;
   unsigned int newidle_idx ;
   unsigned int wake_idx ;
   unsigned int forkexec_idx ;
   int flags ;
   enum sched_domain_level level ;
   unsigned long last_balance ;
   unsigned int balance_interval ;
   unsigned int nr_balance_failed ;
   u64 last_update ;
   unsigned int lb_count[3U] ;
   unsigned int lb_failed[3U] ;
   unsigned int lb_balanced[3U] ;
   unsigned int lb_imbalance[3U] ;
   unsigned int lb_gained[3U] ;
   unsigned int lb_hot_gained[3U] ;
   unsigned int lb_nobusyg[3U] ;
   unsigned int lb_nobusyq[3U] ;
   unsigned int alb_count ;
   unsigned int alb_failed ;
   unsigned int alb_pushed ;
   unsigned int sbe_count ;
   unsigned int sbe_balanced ;
   unsigned int sbe_pushed ;
   unsigned int sbf_count ;
   unsigned int sbf_balanced ;
   unsigned int sbf_pushed ;
   unsigned int ttwu_wake_remote ;
   unsigned int ttwu_move_affine ;
   unsigned int ttwu_move_balance ;
   char *name ;
};
struct io_context;
struct group_info {
   int ngroups ;
   atomic_t usage ;
   gid_t small_block[32U] ;
   int nblocks ;
   gid_t *blocks[0U] ;
};
struct audit_context;
struct rq;
struct sched_class {
   struct sched_class  const  *next ;
   void (*enqueue_task)(struct rq * , struct task_struct * , int  ) ;
   void (*dequeue_task)(struct rq * , struct task_struct * , int  ) ;
   void (*yield_task)(struct rq * ) ;
   int (*select_task_rq)(struct task_struct * , int  ) ;
   void (*check_preempt_curr)(struct rq * , struct task_struct * , int  ) ;
   struct task_struct *(*pick_next_task)(struct rq * ) ;
   void (*put_prev_task)(struct rq * , struct task_struct * ) ;
   unsigned long (*load_balance)(struct rq * , int  , struct rq * , unsigned long  ,
                                 struct sched_domain * , enum cpu_idle_type  , int * ,
                                 int * ) ;
   int (*move_one_task)(struct rq * , int  , struct rq * , struct sched_domain * ,
                        enum cpu_idle_type  ) ;
   void (*pre_schedule)(struct rq * , struct task_struct * ) ;
   void (*post_schedule)(struct rq * ) ;
   void (*task_wake_up)(struct rq * , struct task_struct * ) ;
   void (*set_curr_task)(struct rq * ) ;
   void (*task_tick)(struct rq * , struct task_struct * , int  ) ;
   void (*task_new)(struct rq * , struct task_struct * ) ;
   void (*set_cpus_allowed)(struct task_struct * , cpumask_t const   * ) ;
   void (*rq_online)(struct rq * ) ;
   void (*rq_offline)(struct rq * ) ;
   void (*switched_from)(struct rq * , struct task_struct * , int  ) ;
   void (*switched_to)(struct rq * , struct task_struct * , int  ) ;
   void (*prio_changed)(struct rq * , struct task_struct * , int  , int  ) ;
   void (*moved_group)(struct task_struct * ) ;
};
struct load_weight {
   unsigned long weight ;
   unsigned long inv_weight ;
};
struct sched_entity {
   struct load_weight load ;
   struct rb_node run_node ;
   struct list_head group_node ;
   unsigned int on_rq ;
   u64 exec_start ;
   u64 sum_exec_runtime ;
   u64 vruntime ;
   u64 prev_sum_exec_runtime ;
   u64 last_wakeup ;
   u64 avg_overlap ;
   u64 wait_start ;
   u64 wait_max ;
   u64 wait_count ;
   u64 wait_sum ;
   u64 sleep_start ;
   u64 sleep_max ;
   s64 sum_sleep_runtime ;
   u64 block_start ;
   u64 block_max ;
   u64 exec_max ;
   u64 slice_max ;
   u64 nr_migrations ;
   u64 nr_migrations_cold ;
   u64 nr_failed_migrations_affine ;
   u64 nr_failed_migrations_running ;
   u64 nr_failed_migrations_hot ;
   u64 nr_forced_migrations ;
   u64 nr_forced2_migrations ;
   u64 nr_wakeups ;
   u64 nr_wakeups_sync ;
   u64 nr_wakeups_migrate ;
   u64 nr_wakeups_local ;
   u64 nr_wakeups_remote ;
   u64 nr_wakeups_affine ;
   u64 nr_wakeups_affine_attempts ;
   u64 nr_wakeups_passive ;
   u64 nr_wakeups_idle ;
   struct sched_entity *parent ;
   struct cfs_rq *cfs_rq ;
   struct cfs_rq *my_q ;
};
struct rt_rq;
struct sched_rt_entity {
   struct list_head run_list ;
   unsigned long timeout ;
   unsigned int time_slice ;
   int nr_cpus_allowed ;
   struct sched_rt_entity *back ;
   struct sched_rt_entity *parent ;
   struct rt_rq *rt_rq ;
   struct rt_rq *my_q ;
};
struct linux_binfmt;
struct css_set;
struct compat_robust_list_head;
struct task_struct {
   long volatile   state ;
   void *stack ;
   atomic_t usage ;
   unsigned int flags ;
   unsigned int ptrace ;
   int lock_depth ;
   int prio ;
   int static_prio ;
   int normal_prio ;
   unsigned int rt_priority ;
   struct sched_class  const  *sched_class ;
   struct sched_entity se ;
   struct sched_rt_entity rt ;
   struct hlist_head preempt_notifiers ;
   unsigned char fpu_counter ;
   s8 oomkilladj ;
   unsigned int btrace_seq ;
   unsigned int policy ;
   cpumask_t cpus_allowed ;
   struct sched_info sched_info ;
   struct list_head tasks ;
   struct mm_struct *mm ;
   struct mm_struct *active_mm ;
   struct linux_binfmt *binfmt ;
   int exit_state ;
   int exit_code ;
   int exit_signal ;
   int pdeath_signal ;
   unsigned int personality ;
   unsigned char did_exec : 1 ;
   pid_t pid ;
   pid_t tgid ;
   struct task_struct *real_parent ;
   struct task_struct *parent ;
   struct list_head children ;
   struct list_head sibling ;
   struct task_struct *group_leader ;
   struct list_head ptraced ;
   struct list_head ptrace_entry ;
   struct pid_link pids[3U] ;
   struct list_head thread_group ;
   struct completion *vfork_done ;
   int *set_child_tid ;
   int *clear_child_tid ;
   cputime_t utime ;
   cputime_t stime ;
   cputime_t utimescaled ;
   cputime_t stimescaled ;
   cputime_t gtime ;
   cputime_t prev_utime ;
   cputime_t prev_stime ;
   unsigned long nvcsw ;
   unsigned long nivcsw ;
   struct timespec start_time ;
   struct timespec real_start_time ;
   unsigned long min_flt ;
   unsigned long maj_flt ;
   cputime_t it_prof_expires ;
   cputime_t it_virt_expires ;
   unsigned long long it_sched_expires ;
   struct list_head cpu_timers[3U] ;
   uid_t uid ;
   uid_t euid ;
   uid_t suid ;
   uid_t fsuid ;
   gid_t gid ;
   gid_t egid ;
   gid_t sgid ;
   gid_t fsgid ;
   struct group_info *group_info ;
   kernel_cap_t cap_effective ;
   kernel_cap_t cap_inheritable ;
   kernel_cap_t cap_permitted ;
   kernel_cap_t cap_bset ;
   struct user_struct *user ;
   unsigned int securebits ;
   unsigned char jit_keyring ;
   struct key *request_key_auth ;
   struct key *thread_keyring ;
   char comm[16U] ;
   int link_count ;
   int total_link_count ;
   struct sysv_sem sysvsem ;
   unsigned long last_switch_timestamp ;
   unsigned long last_switch_count ;
   struct thread_struct thread ;
   struct fs_struct *fs ;
   struct files_struct *files ;
   struct nsproxy *nsproxy ;
   struct signal_struct *signal ;
   struct sighand_struct *sighand ;
   sigset_t blocked ;
   sigset_t real_blocked ;
   sigset_t saved_sigmask ;
   struct sigpending pending ;
   unsigned long sas_ss_sp ;
   size_t sas_ss_size ;
   int (*notifier)(void * ) ;
   void *notifier_data ;
   sigset_t *notifier_mask ;
   void *security ;
   struct audit_context *audit_context ;
   uid_t loginuid ;
   unsigned int sessionid ;
   seccomp_t seccomp ;
   u32 parent_exec_id ;
   u32 self_exec_id ;
   spinlock_t alloc_lock ;
   spinlock_t pi_lock ;
   struct plist_head pi_waiters ;
   struct rt_mutex_waiter *pi_blocked_on ;
   struct mutex_waiter *blocked_on ;
   unsigned int irq_events ;
   int hardirqs_enabled ;
   unsigned long hardirq_enable_ip ;
   unsigned int hardirq_enable_event ;
   unsigned long hardirq_disable_ip ;
   unsigned int hardirq_disable_event ;
   int softirqs_enabled ;
   unsigned long softirq_disable_ip ;
   unsigned int softirq_disable_event ;
   unsigned long softirq_enable_ip ;
   unsigned int softirq_enable_event ;
   int hardirq_context ;
   int softirq_context ;
   u64 curr_chain_key ;
   int lockdep_depth ;
   unsigned int lockdep_recursion ;
   struct held_lock held_locks[48U] ;
   void *journal_info ;
   struct bio *bio_list ;
   struct bio **bio_tail ;
   struct reclaim_state *reclaim_state ;
   struct backing_dev_info *backing_dev_info ;
   struct io_context *io_context ;
   unsigned long ptrace_message ;
   siginfo_t *last_siginfo ;
   struct task_io_accounting ioac ;
   u64 acct_rss_mem1 ;
   u64 acct_vm_mem1 ;
   cputime_t acct_timexpd ;
   nodemask_t mems_allowed ;
   int cpuset_mems_generation ;
   int cpuset_mem_spread_rotor ;
   struct css_set *cgroups ;
   struct list_head cg_list ;
   struct robust_list_head *robust_list ;
   struct compat_robust_list_head *compat_robust_list ;
   struct list_head pi_state_list ;
   struct futex_pi_state *pi_state_cache ;
   struct mempolicy *mempolicy ;
   short il_next ;
   atomic_t fs_excl ;
   struct rcu_head rcu ;
   struct pipe_inode_info *splice_pipe ;
   struct task_delay_info *delays ;
   int make_it_fail ;
   struct prop_local_single dirties ;
   int latency_record_count ;
   struct latency_record latency_record[32U] ;
};
struct cdev {
   struct kobject kobj ;
   struct module *owner ;
   struct file_operations  const  *ops ;
   struct list_head list ;
   dev_t dev ;
   unsigned int count ;
};
struct exception_table_entry {
   unsigned long insn ;
   unsigned long fixup ;
};
typedef s32 compat_time_t;
typedef s32 compat_long_t;
struct compat_timespec {
   compat_time_t tv_sec ;
   s32 tv_nsec ;
};
typedef u32 compat_uptr_t;
struct compat_robust_list {
   compat_uptr_t next ;
};
struct compat_robust_list_head {
   struct compat_robust_list list ;
   compat_long_t futex_offset ;
   compat_uptr_t list_op_pending ;
};
enum chipset_type {
    NOT_SUPPORTED = 0,
    SUPPORTED = 1
} ;
struct agp_version {
   u16 major ;
   u16 minor ;
};
struct agp_kern_info {
   struct agp_version version ;
   struct pci_dev *device ;
   enum chipset_type chipset ;
   unsigned long mode ;
   unsigned long aper_base ;
   size_t aper_size ;
   int max_memory ;
   int current_memory ;
   bool cant_use_aperture ;
   unsigned long page_mask ;
   struct vm_operations_struct *vm_ops ;
};
struct agp_bridge_data;
struct poll_table_struct {
   void (*qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ;
};
typedef int irqreturn_t;
typedef unsigned int drm_handle_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
struct drm_tex_region {
   unsigned char next ;
   unsigned char prev ;
   unsigned char in_use ;
   unsigned char padding ;
   unsigned int age ;
};
struct drm_hw_lock {
   unsigned int volatile   lock ;
   char padding[60U] ;
};
enum drm_map_type {
    _DRM_FRAME_BUFFER = 0,
    _DRM_REGISTERS = 1,
    _DRM_SHM = 2,
    _DRM_AGP = 3,
    _DRM_SCATTER_GATHER = 4,
    _DRM_CONSISTENT = 5
} ;
enum drm_map_flags {
    _DRM_RESTRICTED = 1,
    _DRM_READ_ONLY = 2,
    _DRM_LOCKED = 4,
    _DRM_KERNEL = 8,
    _DRM_WRITE_COMBINING = 16,
    _DRM_CONTAINS_LOCK = 32,
    _DRM_REMOVABLE = 64,
    _DRM_DRIVER = 128
} ;
struct drm_map {
   unsigned long offset ;
   unsigned long size ;
   enum drm_map_type type ;
   enum drm_map_flags flags ;
   void *handle ;
   int mtrr ;
};
enum drm_stat_type {
    _DRM_STAT_LOCK = 0,
    _DRM_STAT_OPENS = 1,
    _DRM_STAT_CLOSES = 2,
    _DRM_STAT_IOCTLS = 3,
    _DRM_STAT_LOCKS = 4,
    _DRM_STAT_UNLOCKS = 5,
    _DRM_STAT_VALUE = 6,
    _DRM_STAT_BYTE = 7,
    _DRM_STAT_COUNT = 8,
    _DRM_STAT_IRQ = 9,
    _DRM_STAT_PRIMARY = 10,
    _DRM_STAT_SECONDARY = 11,
    _DRM_STAT_DMA = 12,
    _DRM_STAT_SPECIAL = 13,
    _DRM_STAT_MISSED = 14
} ;
enum drm_ctx_flags {
    _DRM_CONTEXT_PRESERVED = 1,
    _DRM_CONTEXT_2DONLY = 2
} ;
struct drm_set_version {
   int drm_di_major ;
   int drm_di_minor ;
   int drm_dd_major ;
   int drm_dd_minor ;
};
struct idr_layer {
   unsigned long bitmap ;
   struct idr_layer *ary[64U] ;
   int count ;
   struct rcu_head rcu_head ;
};
struct idr {
   struct idr_layer *top ;
   struct idr_layer *id_free ;
   int layers ;
   int id_free_cnt ;
   spinlock_t lock ;
};
struct drm_file;
struct drm_device;
struct drm_open_hash {
   unsigned int size ;
   unsigned int order ;
   unsigned int fill ;
   struct hlist_head *table ;
   int use_vmalloc ;
};
typedef int drm_ioctl_t(struct drm_device * , void * , struct drm_file * );
struct drm_ioctl_desc {
   unsigned int cmd ;
   drm_ioctl_t *func ;
   int flags ;
};
enum ldv_18039 {
    DRM_LIST_NONE = 0,
    DRM_LIST_FREE = 1,
    DRM_LIST_WAIT = 2,
    DRM_LIST_PEND = 3,
    DRM_LIST_PRIO = 4,
    DRM_LIST_RECLAIM = 5
} ;
struct drm_buf {
   int idx ;
   int total ;
   int order ;
   int used ;
   unsigned long offset ;
   void *address ;
   unsigned long bus_address ;
   struct drm_buf *next ;
   int volatile   waiting ;
   int volatile   pending ;
   wait_queue_head_t dma_wait ;
   struct drm_file *file_priv ;
   int context ;
   int while_locked ;
   enum ldv_18039 list ;
   int dev_priv_size ;
   void *dev_private ;
};
struct drm_waitlist {
   int count ;
   struct drm_buf **bufs ;
   struct drm_buf **rp ;
   struct drm_buf **wp ;
   struct drm_buf **end ;
   spinlock_t read_lock ;
   spinlock_t write_lock ;
};
struct drm_freelist {
   int initialized ;
   atomic_t count ;
   struct drm_buf *next ;
   wait_queue_head_t waiting ;
   int low_mark ;
   int high_mark ;
   atomic_t wfh ;
   spinlock_t lock ;
};
struct drm_dma_handle {
   dma_addr_t busaddr ;
   void *vaddr ;
   size_t size ;
};
typedef struct drm_dma_handle drm_dma_handle_t;
struct drm_buf_entry {
   int buf_size ;
   int buf_count ;
   struct drm_buf *buflist ;
   int seg_count ;
   int page_order ;
   struct drm_dma_handle **seglist ;
   struct drm_freelist freelist ;
};
struct drm_minor;
struct drm_file {
   int authenticated ;
   int master ;
   pid_t pid ;
   uid_t uid ;
   drm_magic_t magic ;
   unsigned long ioctl_count ;
   struct list_head lhead ;
   struct drm_minor *minor ;
   int remove_auth_on_close ;
   unsigned long lock_count ;
   struct idr object_idr ;
   spinlock_t table_lock ;
   struct file *filp ;
   void *driver_priv ;
};
struct drm_queue {
   atomic_t use_count ;
   atomic_t finalization ;
   atomic_t block_count ;
   atomic_t block_read ;
   wait_queue_head_t read_queue ;
   atomic_t block_write ;
   wait_queue_head_t write_queue ;
   atomic_t total_queued ;
   atomic_t total_flushed ;
   atomic_t total_locks ;
   enum drm_ctx_flags flags ;
   struct drm_waitlist waitlist ;
   wait_queue_head_t flush_queue ;
};
struct drm_lock_data {
   struct drm_hw_lock *hw_lock ;
   struct drm_file *file_priv ;
   wait_queue_head_t lock_queue ;
   unsigned long lock_time ;
   spinlock_t spinlock ;
   uint32_t kernel_waiters ;
   uint32_t user_waiters ;
   int idle_has_lock ;
};
enum ldv_18057 {
    _DRM_DMA_USE_AGP = 1,
    _DRM_DMA_USE_SG = 2,
    _DRM_DMA_USE_FB = 4,
    _DRM_DMA_USE_PCI_RO = 8
} ;
struct drm_device_dma {
   struct drm_buf_entry bufs[23U] ;
   int buf_count ;
   struct drm_buf **buflist ;
   int seg_count ;
   int page_count ;
   unsigned long *pagelist ;
   unsigned long byte_count ;
   enum ldv_18057 flags ;
};
struct drm_agp_head {
   struct agp_kern_info agp_info ;
   struct list_head memory ;
   unsigned long mode ;
   struct agp_bridge_data *bridge ;
   int enabled ;
   int acquired ;
   unsigned long base ;
   int agp_mtrr ;
   int cant_use_aperture ;
   unsigned long page_mask ;
};
struct drm_sg_mem {
   unsigned long handle ;
   void *virtual ;
   int pages ;
   struct page **pagelist ;
   dma_addr_t *busaddr ;
};
struct drm_sigdata {
   int context ;
   struct drm_hw_lock *lock ;
};
struct drm_mm;
struct drm_mm {
   struct list_head fl_entry ;
   struct list_head ml_entry ;
};
typedef struct drm_map drm_local_map_t;
struct drm_gem_object {
   struct kref refcount ;
   struct kref handlecount ;
   struct drm_device *dev ;
   struct file *filp ;
   size_t size ;
   int name ;
   uint32_t read_domains ;
   uint32_t write_domain ;
   uint32_t pending_read_domains ;
   uint32_t pending_write_domain ;
   void *driver_private ;
};
struct drm_driver {
   int (*load)(struct drm_device * , unsigned long  ) ;
   int (*firstopen)(struct drm_device * ) ;
   int (*open)(struct drm_device * , struct drm_file * ) ;
   void (*preclose)(struct drm_device * , struct drm_file * ) ;
   void (*postclose)(struct drm_device * , struct drm_file * ) ;
   void (*lastclose)(struct drm_device * ) ;
   int (*unload)(struct drm_device * ) ;
   int (*suspend)(struct drm_device * , pm_message_t  ) ;
   int (*resume)(struct drm_device * ) ;
   int (*dma_ioctl)(struct drm_device * , void * , struct drm_file * ) ;
   void (*dma_ready)(struct drm_device * ) ;
   int (*dma_quiescent)(struct drm_device * ) ;
   int (*context_ctor)(struct drm_device * , int  ) ;
   int (*context_dtor)(struct drm_device * , int  ) ;
   int (*kernel_context_switch)(struct drm_device * , int  , int  ) ;
   void (*kernel_context_switch_unlock)(struct drm_device * ) ;
   int (*dri_library_name)(struct drm_device * , char * ) ;
   u32 (*get_vblank_counter)(struct drm_device * , int  ) ;
   int (*enable_vblank)(struct drm_device * , int  ) ;
   void (*disable_vblank)(struct drm_device * , int  ) ;
   int (*device_is_agp)(struct drm_device * ) ;
   irqreturn_t (*irq_handler)(int  , void * ) ;
   void (*irq_preinstall)(struct drm_device * ) ;
   int (*irq_postinstall)(struct drm_device * ) ;
   void (*irq_uninstall)(struct drm_device * ) ;
   void (*reclaim_buffers)(struct drm_device * , struct drm_file * ) ;
   void (*reclaim_buffers_locked)(struct drm_device * , struct drm_file * ) ;
   void (*reclaim_buffers_idlelocked)(struct drm_device * , struct drm_file * ) ;
   unsigned long (*get_map_ofs)(struct drm_map * ) ;
   unsigned long (*get_reg_ofs)(struct drm_device * ) ;
   void (*set_version)(struct drm_device * , struct drm_set_version * ) ;
   int (*proc_init)(struct drm_minor * ) ;
   void (*proc_cleanup)(struct drm_minor * ) ;
   int (*gem_init_object)(struct drm_gem_object * ) ;
   void (*gem_free_object)(struct drm_gem_object * ) ;
   int major ;
   int minor ;
   int patchlevel ;
   char *name ;
   char *desc ;
   char *date ;
   u32 driver_features ;
   int dev_priv_size ;
   struct drm_ioctl_desc *ioctls ;
   int num_ioctls ;
   struct file_operations fops ;
   struct pci_driver pci_driver ;
};
struct drm_minor {
   int index ;
   int type ;
   dev_t device ;
   struct device kdev ;
   struct drm_device *dev ;
   struct proc_dir_entry *dev_root ;
};
struct drm_device {
   char *unique ;
   int unique_len ;
   char *devname ;
   int if_version ;
   int blocked ;
   spinlock_t count_lock ;
   struct mutex struct_mutex ;
   int open_count ;
   atomic_t ioctl_count ;
   atomic_t vma_count ;
   int buf_use ;
   atomic_t buf_alloc ;
   unsigned long counters ;
   enum drm_stat_type types[15U] ;
   atomic_t counts[15U] ;
   struct list_head filelist ;
   struct drm_open_hash magiclist ;
   struct list_head magicfree ;
   struct list_head maplist ;
   int map_count ;
   struct drm_open_hash map_hash ;
   struct list_head ctxlist ;
   int ctx_count ;
   struct mutex ctxlist_mutex ;
   struct idr ctx_idr ;
   struct list_head vmalist ;
   struct drm_lock_data lock ;
   int queue_count ;
   int queue_reserved ;
   int queue_slots ;
   struct drm_queue **queuelist ;
   struct drm_device_dma *dma ;
   int irq ;
   int irq_enabled ;
   long volatile   context_flag ;
   long volatile   interrupt_flag ;
   long volatile   dma_flag ;
   struct timer_list timer ;
   wait_queue_head_t context_wait ;
   int last_checked ;
   int last_context ;
   unsigned long last_switch ;
   struct work_struct work ;
   int vblank_disable_allowed ;
   wait_queue_head_t *vbl_queue ;
   atomic_t *_vblank_count ;
   spinlock_t vbl_lock ;
   struct list_head *vbl_sigs ;
   atomic_t vbl_signal_pending ;
   atomic_t *vblank_refcount ;
   u32 *last_vblank ;
   int *vblank_enabled ;
   int *vblank_inmodeset ;
   struct timer_list vblank_disable_timer ;
   u32 max_vblank_count ;
   spinlock_t tasklet_lock ;
   void (*locked_tasklet_func)(struct drm_device * ) ;
   cycles_t ctx_start ;
   cycles_t lck_start ;
   struct fasync_struct *buf_async ;
   wait_queue_head_t buf_readers ;
   wait_queue_head_t buf_writers ;
   struct drm_agp_head *agp ;
   struct pci_dev *pdev ;
   int pci_vendor ;
   int pci_device ;
   struct drm_sg_mem *sg ;
   int num_crtcs ;
   void *dev_private ;
   struct drm_sigdata sigdata ;
   sigset_t sigmask ;
   struct drm_driver *driver ;
   drm_local_map_t *agp_buffer_map ;
   unsigned int agp_buffer_token ;
   struct drm_minor *primary ;
   spinlock_t drw_lock ;
   struct idr drw_idr ;
   spinlock_t object_name_lock ;
   struct idr object_name_idr ;
   atomic_t object_count ;
   atomic_t object_memory ;
   atomic_t pin_count ;
   atomic_t pin_memory ;
   atomic_t gtt_count ;
   atomic_t gtt_memory ;
   uint32_t gtt_total ;
   uint32_t invalidate_domains ;
   uint32_t flush_domains ;
};
struct _drm_i915_sarea {
   struct drm_tex_region texList[256U] ;
   int last_upload ;
   int last_enqueue ;
   int last_dispatch ;
   int ctxOwner ;
   int texAge ;
   int pf_enabled ;
   int pf_active ;
   int pf_current_page ;
   int perf_boxes ;
   int width ;
   int height ;
   drm_handle_t front_handle ;
   int front_offset ;
   int front_size ;
   drm_handle_t back_handle ;
   int back_offset ;
   int back_size ;
   drm_handle_t depth_handle ;
   int depth_offset ;
   int depth_size ;
   drm_handle_t tex_handle ;
   int tex_offset ;
   int tex_size ;
   int log_tex_granularity ;
   int pitch ;
   int rotation ;
   int rotated_offset ;
   int rotated_size ;
   int rotated_pitch ;
   int virtualX ;
   int virtualY ;
   unsigned int front_tiled ;
   unsigned int back_tiled ;
   unsigned int depth_tiled ;
   unsigned int rotated_tiled ;
   unsigned int rotated2_tiled ;
   int pipeA_x ;
   int pipeA_y ;
   int pipeA_w ;
   int pipeA_h ;
   int pipeB_x ;
   int pipeB_y ;
   int pipeB_w ;
   int pipeB_h ;
};
typedef struct _drm_i915_sarea drm_i915_sarea_t;
struct _drm_i915_ring_buffer {
   int tail_mask ;
   unsigned long Size ;
   u8 *virtual_start ;
   int head ;
   int tail ;
   int space ;
   drm_local_map_t map ;
   struct drm_gem_object *ring_obj ;
};
typedef struct _drm_i915_ring_buffer drm_i915_ring_buffer_t;
struct mem_block {
   struct mem_block *next ;
   struct mem_block *prev ;
   int start ;
   int size ;
   struct drm_file *file_priv ;
};
struct _drm_i915_vbl_swap {
   struct list_head head ;
   drm_drawable_t drw_id ;
   unsigned int plane ;
   unsigned int sequence ;
};
typedef struct _drm_i915_vbl_swap drm_i915_vbl_swap_t;
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
struct intel_opregion {
   struct opregion_header *header ;
   struct opregion_acpi *acpi ;
   struct opregion_swsci *swsci ;
   struct opregion_asle *asle ;
   int enabled ;
};
struct __anonstruct_mm_125 {
   struct drm_mm gtt_space ;
   struct list_head active_list ;
   struct list_head flushing_list ;
   struct list_head inactive_list ;
   struct list_head request_list ;
   struct delayed_work retire_work ;
   struct work_struct vblank_work ;
   uint32_t next_gem_seqno ;
   uint32_t waiting_gem_seqno ;
   uint32_t irq_gem_seqno ;
   int suspended ;
   int wedged ;
   uint32_t bit_6_swizzle_x ;
   uint32_t bit_6_swizzle_y ;
};
struct drm_i915_private {
   struct drm_device *dev ;
   void *regs ;
   drm_local_map_t *sarea ;
   drm_i915_sarea_t *sarea_priv ;
   drm_i915_ring_buffer_t ring ;
   drm_dma_handle_t *status_page_dmah ;
   void *hw_status_page ;
   dma_addr_t dma_status_page ;
   uint32_t counter ;
   unsigned int status_gfx_addr ;
   drm_local_map_t hws_map ;
   struct drm_gem_object *hws_obj ;
   unsigned int cpp ;
   int back_offset ;
   int front_offset ;
   int current_page ;
   int page_flipping ;
   wait_queue_head_t irq_queue ;
   atomic_t irq_received ;
   spinlock_t user_irq_lock ;
   int user_irq_refcount ;
   u32 irq_mask_reg ;
   int tex_lru_log_granularity ;
   int allow_batchbuffer ;
   struct mem_block *agp_heap ;
   unsigned int sr01 ;
   unsigned int adpa ;
   unsigned int ppcr ;
   unsigned int dvob ;
   unsigned int dvoc ;
   unsigned int lvds ;
   int vblank_pipe ;
   spinlock_t swaps_lock ;
   drm_i915_vbl_swap_t vbl_swaps ;
   unsigned int swaps_pending ;
   struct intel_opregion opregion ;
   u8 saveLBB ;
   u32 saveDSPACNTR ;
   u32 saveDSPBCNTR ;
   u32 saveDSPARB ;
   u32 savePIPEACONF ;
   u32 savePIPEBCONF ;
   u32 savePIPEASRC ;
   u32 savePIPEBSRC ;
   u32 saveFPA0 ;
   u32 saveFPA1 ;
   u32 saveDPLL_A ;
   u32 saveDPLL_A_MD ;
   u32 saveHTOTAL_A ;
   u32 saveHBLANK_A ;
   u32 saveHSYNC_A ;
   u32 saveVTOTAL_A ;
   u32 saveVBLANK_A ;
   u32 saveVSYNC_A ;
   u32 saveBCLRPAT_A ;
   u32 savePIPEASTAT ;
   u32 saveDSPASTRIDE ;
   u32 saveDSPASIZE ;
   u32 saveDSPAPOS ;
   u32 saveDSPAADDR ;
   u32 saveDSPASURF ;
   u32 saveDSPATILEOFF ;
   u32 savePFIT_PGM_RATIOS ;
   u32 saveBLC_PWM_CTL ;
   u32 saveBLC_PWM_CTL2 ;
   u32 saveFPB0 ;
   u32 saveFPB1 ;
   u32 saveDPLL_B ;
   u32 saveDPLL_B_MD ;
   u32 saveHTOTAL_B ;
   u32 saveHBLANK_B ;
   u32 saveHSYNC_B ;
   u32 saveVTOTAL_B ;
   u32 saveVBLANK_B ;
   u32 saveVSYNC_B ;
   u32 saveBCLRPAT_B ;
   u32 savePIPEBSTAT ;
   u32 saveDSPBSTRIDE ;
   u32 saveDSPBSIZE ;
   u32 saveDSPBPOS ;
   u32 saveDSPBADDR ;
   u32 saveDSPBSURF ;
   u32 saveDSPBTILEOFF ;
   u32 saveVGA0 ;
   u32 saveVGA1 ;
   u32 saveVGA_PD ;
   u32 saveVGACNTRL ;
   u32 saveADPA ;
   u32 saveLVDS ;
   u32 savePP_ON_DELAYS ;
   u32 savePP_OFF_DELAYS ;
   u32 saveDVOA ;
   u32 saveDVOB ;
   u32 saveDVOC ;
   u32 savePP_ON ;
   u32 savePP_OFF ;
   u32 savePP_CONTROL ;
   u32 savePP_DIVISOR ;
   u32 savePFIT_CONTROL ;
   u32 save_palette_a[256U] ;
   u32 save_palette_b[256U] ;
   u32 saveFBC_CFB_BASE ;
   u32 saveFBC_LL_BASE ;
   u32 saveFBC_CONTROL ;
   u32 saveFBC_CONTROL2 ;
   u32 saveIER ;
   u32 saveIIR ;
   u32 saveIMR ;
   u32 saveCACHE_MODE_0 ;
   u32 saveD_STATE ;
   u32 saveCG_2D_DIS ;
   u32 saveMI_ARB_STATE ;
   u32 saveSWF0[16U] ;
   u32 saveSWF1[16U] ;
   u32 saveSWF2[3U] ;
   u8 saveMSR ;
   u8 saveSR[8U] ;
   u8 saveGR[25U] ;
   u8 saveAR_INDEX ;
   u8 saveAR[21U] ;
   u8 saveDACMASK ;
   u8 saveDACDATA[768U] ;
   u8 saveCR[37U] ;
   struct __anonstruct_mm_125 mm ;
};
typedef __u64 uint64_t;
struct x8664_pda {
   struct task_struct *pcurrent ;
   unsigned long data_offset ;
   unsigned long kernelstack ;
   unsigned long oldrsp ;
   int irqcount ;
   unsigned int cpunumber ;
   char *irqstackptr ;
   short nodenumber ;
   short in_bootmem ;
   unsigned int __softirq_pending ;
   unsigned int __nmi_count ;
   short mmu_state ;
   short isidle ;
   struct mm_struct *active_mm ;
   unsigned int apic_timer_irqs ;
   unsigned int irq0_irqs ;
   unsigned int irq_resched_count ;
   unsigned int irq_call_count ;
   unsigned int irq_tlb_count ;
   unsigned int irq_thermal_count ;
   unsigned int irq_threshold_count ;
   unsigned int irq_spurious_count ;
};
enum hrtimer_restart;
struct __large_struct {
   unsigned long buf[100U] ;
};
struct drm_clip_rect {
   unsigned short x1 ;
   unsigned short y1 ;
   unsigned short x2 ;
   unsigned short y2 ;
};
enum ldv_18367 {
    I915_INIT_DMA = 1,
    I915_CLEANUP_DMA = 2,
    I915_RESUME_DMA = 3
} ;
struct _drm_i915_init {
   enum ldv_18367 func ;
   unsigned int mmio_offset ;
   int sarea_priv_offset ;
   unsigned int ring_start ;
   unsigned int ring_end ;
   unsigned int ring_size ;
   unsigned int front_offset ;
   unsigned int back_offset ;
   unsigned int depth_offset ;
   unsigned int w ;
   unsigned int h ;
   unsigned int pitch ;
   unsigned int pitch_bits ;
   unsigned int back_pitch ;
   unsigned int depth_pitch ;
   unsigned int cpp ;
   unsigned int chipset ;
};
typedef struct _drm_i915_init drm_i915_init_t;
struct _drm_i915_batchbuffer {
   int start ;
   int used ;
   int DR1 ;
   int DR4 ;
   int num_cliprects ;
   struct drm_clip_rect *cliprects ;
};
typedef struct _drm_i915_batchbuffer drm_i915_batchbuffer_t;
struct _drm_i915_cmdbuffer {
   char *buf ;
   int sz ;
   int DR1 ;
   int DR4 ;
   int num_cliprects ;
   struct drm_clip_rect *cliprects ;
};
typedef struct _drm_i915_cmdbuffer drm_i915_cmdbuffer_t;
struct drm_i915_getparam {
   int param ;
   int *value ;
};
typedef struct drm_i915_getparam drm_i915_getparam_t;
struct drm_i915_setparam {
   int param ;
   int value ;
};
typedef struct drm_i915_setparam drm_i915_setparam_t;
struct drm_i915_hws_addr {
   uint64_t addr ;
};
typedef struct drm_i915_hws_addr drm_i915_hws_addr_t;
typedef struct drm_i915_private drm_i915_private_t;
struct __anonstruct_mm_126 {
   uint32_t last_gem_seqno ;
   uint32_t last_gem_throttle_seqno ;
};
struct drm_i915_file_private {
   struct __anonstruct_mm_126 mm ;
};
enum hrtimer_restart;
struct drm_drawable_info {
   unsigned int num_rects ;
   struct drm_clip_rect *rects ;
};
enum drm_vblank_seq_type {
    _DRM_VBLANK_ABSOLUTE = 0,
    _DRM_VBLANK_RELATIVE = 1,
    _DRM_VBLANK_FLIP = 134217728,
    _DRM_VBLANK_NEXTONMISS = 268435456,
    _DRM_VBLANK_SECONDARY = 536870912,
    _DRM_VBLANK_SIGNAL = 1073741824
} ;
struct drm_i915_irq_emit {
   int *irq_seq ;
};
typedef struct drm_i915_irq_emit drm_i915_irq_emit_t;
struct drm_i915_irq_wait {
   int irq_seq ;
};
typedef struct drm_i915_irq_wait drm_i915_irq_wait_t;
struct drm_i915_vblank_pipe {
   int pipe ;
};
typedef struct drm_i915_vblank_pipe drm_i915_vblank_pipe_t;
struct drm_i915_vblank_swap {
   drm_drawable_t drawable ;
   enum drm_vblank_seq_type seqtype ;
   unsigned int sequence ;
};
typedef struct drm_i915_vblank_swap drm_i915_vblank_swap_t;
enum hrtimer_restart;
struct drm_i915_mem_alloc {
   int region ;
   int alignment ;
   int size ;
   int *region_offset ;
};
typedef struct drm_i915_mem_alloc drm_i915_mem_alloc_t;
struct drm_i915_mem_free {
   int region ;
   int region_offset ;
};
typedef struct drm_i915_mem_free drm_i915_mem_free_t;
struct drm_i915_mem_init_heap {
   int region ;
   int size ;
   int start ;
};
typedef struct drm_i915_mem_init_heap drm_i915_mem_init_heap_t;
struct drm_i915_mem_destroy_heap {
   int region ;
};
typedef struct drm_i915_mem_destroy_heap drm_i915_mem_destroy_heap_t;
struct notifier_block {
   int (*notifier_call)(struct notifier_block * , unsigned long  , void * ) ;
   struct notifier_block *next ;
   int priority ;
};
enum hrtimer_restart;
struct opregion_header {
   u8 signature[16U] ;
   u32 size ;
   u32 opregion_ver ;
   u8 bios_ver[32U] ;
   u8 vbios_ver[16U] ;
   u8 driver_ver[16U] ;
   u32 mboxes ;
   u8 reserved[164U] ;
};
struct opregion_acpi {
   u32 drdy ;
   u32 csts ;
   u32 cevt ;
   u8 rsvd1[20U] ;
   u32 didl[8U] ;
   u32 cpdl[8U] ;
   u32 cadl[8U] ;
   u32 nadl[8U] ;
   u32 aslp ;
   u32 tidx ;
   u32 chpd ;
   u32 clid ;
   u32 cdck ;
   u32 sxsw ;
   u32 evts ;
   u32 cnot ;
   u32 nrdy ;
   u8 rsvd2[60U] ;
};
struct opregion_swsci {
   u32 scic ;
   u32 parm ;
   u32 dslp ;
   u8 rsvd[244U] ;
};
struct opregion_asle {
   u32 ardy ;
   u32 aslc ;
   u32 tche ;
   u32 alsi ;
   u32 bclp ;
   u32 pfit ;
   u32 cblv ;
   u16 bclm[20U] ;
   u32 cpfm ;
   u32 epfm ;
   u8 plut[74U] ;
   u32 pfmb ;
   u8 rsvd[102U] ;
};
enum hrtimer_restart;
enum pipe {
    PIPE_A = 0,
    PIPE_B = 1
} ;
typedef unsigned char __u8;
typedef __u8 uint8_t;
enum hrtimer_restart;
struct agp_memory {
   struct agp_memory *next ;
   struct agp_memory *prev ;
   struct agp_bridge_data *bridge ;
   unsigned long *memory ;
   size_t page_count ;
   int key ;
   int num_scratch_pages ;
   off_t pg_start ;
   u32 type ;
   u32 physical ;
   bool is_bound ;
   bool is_flushed ;
   bool vmalloc_flag ;
   struct list_head mapped_list ;
};
typedef int filler_t(void * , struct page * );
struct drm_mm_node {
   struct list_head fl_entry ;
   struct list_head ml_entry ;
   int free ;
   unsigned long start ;
   unsigned long size ;
   struct drm_mm *mm ;
   void *private ;
};
struct drm_i915_gem_init {
   uint64_t gtt_start ;
   uint64_t gtt_end ;
};
struct drm_i915_gem_create {
   uint64_t size ;
   uint32_t handle ;
   uint32_t pad ;
};
struct drm_i915_gem_pread {
   uint32_t handle ;
   uint32_t pad ;
   uint64_t offset ;
   uint64_t size ;
   uint64_t data_ptr ;
};
struct drm_i915_gem_pwrite {
   uint32_t handle ;
   uint32_t pad ;
   uint64_t offset ;
   uint64_t size ;
   uint64_t data_ptr ;
};
struct drm_i915_gem_mmap {
   uint32_t handle ;
   uint32_t pad ;
   uint64_t offset ;
   uint64_t size ;
   uint64_t addr_ptr ;
};
struct drm_i915_gem_set_domain {
   uint32_t handle ;
   uint32_t read_domains ;
   uint32_t write_domain ;
};
struct drm_i915_gem_sw_finish {
   uint32_t handle ;
};
struct drm_i915_gem_relocation_entry {
   uint32_t target_handle ;
   uint32_t delta ;
   uint64_t offset ;
   uint64_t presumed_offset ;
   uint32_t read_domains ;
   uint32_t write_domain ;
};
struct drm_i915_gem_exec_object {
   uint32_t handle ;
   uint32_t relocation_count ;
   uint64_t relocs_ptr ;
   uint64_t alignment ;
   uint64_t offset ;
};
struct drm_i915_gem_execbuffer {
   uint64_t buffers_ptr ;
   uint32_t buffer_count ;
   uint32_t batch_start_offset ;
   uint32_t batch_len ;
   uint32_t DR1 ;
   uint32_t DR4 ;
   uint32_t num_cliprects ;
   uint64_t cliprects_ptr ;
};
struct drm_i915_gem_pin {
   uint32_t handle ;
   uint32_t pad ;
   uint64_t alignment ;
   uint64_t offset ;
};
struct drm_i915_gem_busy {
   uint32_t handle ;
   uint32_t busy ;
};
struct drm_i915_gem_object {
   struct drm_gem_object *obj ;
   struct drm_mm_node *gtt_space ;
   struct list_head list ;
   int active ;
   int dirty ;
   struct agp_memory *agp_mem ;
   struct page **page_list ;
   uint32_t gtt_offset ;
   int gtt_bound ;
   int pin_count ;
   uint32_t last_rendering_seqno ;
   uint32_t tiling_mode ;
   uint8_t *page_cpu_valid ;
};
struct drm_i915_gem_request {
   uint32_t seqno ;
   unsigned long emitted_jiffies ;
   uint32_t flush_domains ;
   struct list_head list ;
};
struct reclaim_state {
   unsigned long reclaimed_slab ;
};
enum hrtimer_restart;
enum hrtimer_restart;
struct drm_proc_list {
   char const   *name ;
   int (*f)(char * , char ** , off_t  , int  , int * , void * ) ;
};
enum hrtimer_restart;
struct drm_i915_gem_set_tiling {
   uint32_t handle ;
   uint32_t tiling_mode ;
   uint32_t stride ;
   uint32_t swizzle_mode ;
};
struct drm_i915_gem_get_tiling {
   uint32_t handle ;
   uint32_t tiling_mode ;
   uint32_t swizzle_mode ;
};
enum hrtimer_restart;
typedef int drm_ioctl_compat_t(struct file * , unsigned int  , unsigned long  );
struct _drm_i915_batchbuffer32 {
   int start ;
   int used ;
   int DR1 ;
   int DR4 ;
   int num_cliprects ;
   u32 cliprects ;
};
typedef struct _drm_i915_batchbuffer32 drm_i915_batchbuffer32_t;
struct _drm_i915_cmdbuffer32 {
   u32 buf ;
   int sz ;
   int DR1 ;
   int DR4 ;
   int num_cliprects ;
   u32 cliprects ;
};
typedef struct _drm_i915_cmdbuffer32 drm_i915_cmdbuffer32_t;
struct drm_i915_irq_emit32 {
   u32 irq_seq ;
};
typedef struct drm_i915_irq_emit32 drm_i915_irq_emit32_t;
struct drm_i915_getparam32 {
   int param ;
   u32 value ;
};
typedef struct drm_i915_getparam32 drm_i915_getparam32_t;
struct drm_i915_mem_alloc32 {
   int region ;
   int alignment ;
   int size ;
   u32 region_offset ;
};
typedef struct drm_i915_mem_alloc32 drm_i915_mem_alloc32_t;
enum __anonenum_96 {
    LDV_SPIN_UNLOCKED = 0,
    LDV_SPIN_LOCKED = 1
} ;
extern int printk(char const   *  , ...) ;
extern struct page *alloc_page_vma(gfp_t  , struct vm_area_struct * , unsigned long  ) ;
struct page *ldv_alloc_page_vma_12(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) ;
extern unsigned long __get_free_pages(gfp_t  , unsigned int  ) ;
unsigned long ldv___get_free_pages_2(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
extern void *kmem_cache_alloc(struct kmem_cache * , gfp_t  ) ;
void *ldv_kmem_cache_alloc_4(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_8(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
extern struct module __this_module ;
void ldv_check_alloc_flags(gfp_t flags ) ;
extern int pci_enable_device(struct pci_dev * ) ;
extern void pci_disable_device(struct pci_dev * ) ;
extern void pci_set_master(struct pci_dev * ) ;
extern int pci_save_state(struct pci_dev * ) ;
extern int pci_restore_state(struct pci_dev * ) ;
extern int pci_set_power_state(struct pci_dev * , pci_power_t  ) ;
extern int drm_init(struct drm_driver * ) ;
extern void drm_exit(struct drm_driver * ) ;
extern int drm_ioctl(struct inode * , struct file * , unsigned int  , unsigned long  ) ;
extern int drm_open(struct inode * , struct file * ) ;
extern int drm_fasync(int  , struct file * , int  ) ;
extern int drm_release(struct inode * , struct file * ) ;
extern int drm_mmap(struct file * , struct vm_area_struct * ) ;
extern unsigned long drm_core_get_map_ofs(struct drm_map * ) ;
extern unsigned long drm_core_get_reg_ofs(struct drm_device * ) ;
extern unsigned int drm_poll(struct file * , struct poll_table_struct * ) ;
extern void drm_core_reclaim_buffers(struct drm_device * , struct drm_file * ) ;
struct drm_ioctl_desc i915_ioctls[35U] ;
int i915_max_ioctl ;
int i915_driver_load(struct drm_device *dev , unsigned long flags ) ;
int i915_driver_unload(struct drm_device *dev ) ;
int i915_driver_open(struct drm_device *dev , struct drm_file *file_priv ) ;
void i915_driver_lastclose(struct drm_device *dev ) ;
void i915_driver_preclose(struct drm_device *dev , struct drm_file *file_priv ) ;
void i915_driver_postclose(struct drm_device *dev , struct drm_file *file_priv ) ;
int i915_driver_device_is_agp(struct drm_device *dev ) ;
long i915_compat_ioctl(struct file *filp , unsigned int cmd , unsigned long arg ) ;
irqreturn_t i915_driver_irq_handler(int irq , void *arg ) ;
void i915_driver_irq_preinstall(struct drm_device *dev ) ;
int i915_driver_irq_postinstall(struct drm_device *dev ) ;
void i915_driver_irq_uninstall(struct drm_device *dev ) ;
int i915_enable_vblank(struct drm_device *dev , int plane ) ;
void i915_disable_vblank(struct drm_device *dev , int plane ) ;
u32 i915_get_vblank_counter(struct drm_device *dev , int plane ) ;
int i915_gem_proc_init(struct drm_minor *minor ) ;
void i915_gem_proc_cleanup(struct drm_minor *minor ) ;
int i915_gem_init_object(struct drm_gem_object *obj ) ;
void i915_gem_free_object(struct drm_gem_object *obj ) ;
int i915_save_state(struct drm_device *dev ) ;
int i915_restore_state(struct drm_device *dev ) ;
int intel_opregion_init(struct drm_device *dev ) ;
void intel_opregion_free(struct drm_device *dev ) ;
static struct pci_device_id pciidlist[24U]  = 
  {      {32902U, 13687U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 9570U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 13698U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 9586U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 9602U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 9610U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 9618U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10098U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10146U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10158U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10610U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10626U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10642U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10658U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10674U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10690U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10706U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10754U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10770U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 10818U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 11778U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 11794U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {32902U, 11810U, 4294967295U, 4294967295U, 0U, 0U, 0UL}, 
        {0U, 0U, 0U, 0U, 0U, 0U, 0UL}};
static int i915_suspend(struct drm_device *dev , pm_message_t state ) 
{ 
  struct drm_i915_private *dev_priv ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  if ((unsigned long )dev == (unsigned long )((struct drm_device *)0) || (unsigned long )dev_priv == (unsigned long )((struct drm_i915_private *)0)) {
    printk("<3>dev: %p, dev_priv: %p\n", dev, dev_priv);
    printk("<3>DRM not initialized, aborting suspend.\n");
    return (-19);
  } else {

  }
  if (state.event == 8) {
    return (0);
  } else {

  }
  pci_save_state(dev->pdev);
  i915_save_state(dev);
  intel_opregion_free(dev);
  if (state.event == 2) {
    pci_disable_device(dev->pdev);
    pci_set_power_state(dev->pdev, 3);
  } else {

  }
  return (0);
}
}
static int i915_resume(struct drm_device *dev ) 
{ 
  int tmp ;

  {
  pci_set_power_state(dev->pdev, 0);
  pci_restore_state(dev->pdev);
  tmp = pci_enable_device(dev->pdev);
  if (tmp != 0) {
    return (-1);
  } else {

  }
  pci_set_master(dev->pdev);
  i915_restore_state(dev);
  intel_opregion_init(dev);
  return (0);
}
}
static struct drm_driver driver  = 
     {& i915_driver_load, 0, & i915_driver_open, & i915_driver_preclose, & i915_driver_postclose,
    & i915_driver_lastclose, & i915_driver_unload, & i915_suspend, & i915_resume,
    0, 0, 0, 0, 0, 0, 0, 0, & i915_get_vblank_counter, & i915_enable_vblank, & i915_disable_vblank,
    & i915_driver_device_is_agp, & i915_driver_irq_handler, & i915_driver_irq_preinstall,
    & i915_driver_irq_postinstall, & i915_driver_irq_uninstall, & drm_core_reclaim_buffers,
    0, 0, & drm_core_get_map_ofs, & drm_core_get_reg_ofs, 0, & i915_gem_proc_init,
    & i915_gem_proc_cleanup, & i915_gem_init_object, & i915_gem_free_object, 1, 6,
    0, (char *)"i915", (char *)"Intel Graphics", (char *)"20080730", 4291U, 0, (struct drm_ioctl_desc *)(& i915_ioctls),
    0, {& __this_module, 0, 0, 0, 0, 0, 0, & drm_poll, & drm_ioctl, 0, & i915_compat_ioctl,
        & drm_mmap, & drm_open, 0, & drm_release, 0, 0, & drm_fasync, 0, 0, 0, 0,
        0, 0, 0, 0, 0}, {{0, 0}, (char *)"i915", (struct pci_device_id  const  *)(& pciidlist),
                         0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
                                                     0, 0}, {{{0U}, 0U, 0U, 0, {0,
                                                                                0,
                                                                                0,
                                                                                0}},
                                                             {0, 0}, (unsigned char)0}}};
static int i915_init(void) 
{ 
  int tmp ;

  {
  driver.num_ioctls = i915_max_ioctl;
  tmp = drm_init(& driver);
  return (tmp);
}
}
static void i915_exit(void) 
{ 


  {
  drm_exit(& driver);
  return;
}
}
extern void ldv_check_final_state(void) ;
extern void ldv_initialize(void) ;
extern void ldv_handler_precall(void) ;
extern int nondet_int(void) ;
int LDV_IN_INTERRUPT  ;
int main(void) 
{ 
  struct drm_device *var_group1 ;
  pm_message_t var_i915_suspend_0_p1 ;
  int tmp ;
  int tmp___0 ;
  int tmp___1 ;

  {
  LDV_IN_INTERRUPT = 1;
  ldv_initialize();
  ldv_handler_precall();
  tmp = i915_init();
  if (tmp != 0) {
    goto ldv_final;
  } else {

  }
  goto ldv_23295;
  ldv_23294: 
  tmp___0 = nondet_int();
  switch (tmp___0) {
  case 0: 
  ldv_handler_precall();
  i915_suspend(var_group1, var_i915_suspend_0_p1);
  goto ldv_23291;
  case 1: 
  ldv_handler_precall();
  i915_resume(var_group1);
  goto ldv_23291;
  default: ;
  goto ldv_23291;
  }
  ldv_23291: ;
  ldv_23295: 
  tmp___1 = nondet_int();
  if (tmp___1 != 0) {
    goto ldv_23294;
  } else {

  }

  ldv_handler_precall();
  i915_exit();
  ldv_final: 
  ldv_check_final_state();
  return 0;
}
}
unsigned long ldv___get_free_pages_2(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_4(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_8(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_12(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
long ldv__builtin_expect(long exp , long c ) ;
extern void *memset(void * , int  , size_t  ) ;
extern void __bad_pda_field(void) ;
extern struct x8664_pda _proxy_pda ;
__inline static struct thread_info *current_thread_info(void) 
{ 
  struct thread_info *ti ;
  unsigned long ret__ ;

  {
  switch (8UL) {
  case 2UL: 
  __asm__  ("movw %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack));
  goto ldv_4932;
  case 4UL: 
  __asm__  ("movl %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack));
  goto ldv_4932;
  case 8UL: 
  __asm__  ("movq %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack));
  goto ldv_4932;
  default: 
  __bad_pda_field();
  }
  ldv_4932: 
  ti = (struct thread_info *)(ret__ - 8152UL);
  return (ti);
}
}
extern void __spin_lock_init(spinlock_t * , char const   * , struct lock_class_key * ) ;
extern void mutex_lock_nested(struct mutex * , unsigned int  ) ;
extern void mutex_unlock(struct mutex * ) ;
extern unsigned long msleep_interruptible(unsigned int  ) ;
struct page *ldv_alloc_page_vma_28(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_18(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
extern void kfree(void const   * ) ;
void *ldv_kmem_cache_alloc_20(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_24(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
extern void *__kmalloc(size_t  , gfp_t  ) ;
__inline static void *ldv_kmalloc_19(size_t size , gfp_t flags ) 
{ 
  void *tmp___2 ;

  {
  tmp___2 = __kmalloc(size, flags);
  return (tmp___2);
}
}
__inline static void *kmalloc(size_t size , gfp_t flags ) ;
__inline static unsigned int readl(void const volatile   *addr ) 
{ 
  unsigned int ret ;

  {
  __asm__  volatile   ("movl %1,%0": "=r" (ret): "m" (*((unsigned int volatile   *)addr)): "memory");
  return (ret);
}
}
__inline static void writel(unsigned int val , void volatile   *addr ) 
{ 


  {
  __asm__  volatile   ("movl %0,%1": : "r" (val), "m" (*((unsigned int volatile   *)addr)): "memory");
  return;
}
}
extern void *ioremap_nocache(resource_size_t  , unsigned long  ) ;
__inline static void *ioremap(resource_size_t offset , unsigned long size ) 
{ 
  void *tmp ;

  {
  tmp = ioremap_nocache(offset, size);
  return (tmp);
}
}
extern void iounmap(void volatile   * ) ;
extern int pci_enable_msi(struct pci_dev * ) ;
extern void pci_disable_msi(struct pci_dev * ) ;
extern unsigned long copy_user_generic(void * , void const   * , unsigned int  ) ;
extern unsigned long copy_to_user(void * , void const   * , unsigned int  ) ;
__inline static int __copy_from_user(void *dst , void const   *src , unsigned int size ) 
{ 
  int ret ;
  unsigned long tmp ;
  long tmp___0 ;
  long tmp___1 ;
  unsigned long tmp___2 ;

  {
  ret = 0;
  tmp = copy_user_generic(dst, src, size);
  return ((int )tmp);
  switch (size) {
  case 1U: 
  __asm__  volatile   ("1:\tmovb %2,%b1\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorb %b1,%b1\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (ret),
                       "=q" (*((u8 *)dst)): "m" (*((struct __large_struct *)src)),
                       "i" (1), "0" (ret));
  return (ret);
  case 2U: 
  __asm__  volatile   ("1:\tmovw %2,%w1\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorw %w1,%w1\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (ret),
                       "=r" (*((u16 *)dst)): "m" (*((struct __large_struct *)src)),
                       "i" (2), "0" (ret));
  return (ret);
  case 4U: 
  __asm__  volatile   ("1:\tmovl %2,%k1\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorl %k1,%k1\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (ret),
                       "=r" (*((u32 *)dst)): "m" (*((struct __large_struct *)src)),
                       "i" (4), "0" (ret));
  return (ret);
  case 8U: 
  __asm__  volatile   ("1:\tmovq %2,%1\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (ret),
                       "=r" (*((u64 *)dst)): "m" (*((struct __large_struct *)src)),
                       "i" (8), "0" (ret));
  return (ret);
  case 10U: 
  __asm__  volatile   ("1:\tmovq %2,%1\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (ret),
                       "=r" (*((u64 *)dst)): "m" (*((struct __large_struct *)src)),
                       "i" (16), "0" (ret));
  tmp___0 = ldv__builtin_expect(ret != 0, 0L);
  if (tmp___0 != 0L) {
    return (ret);
  } else {

  }
  __asm__  volatile   ("1:\tmovw %2,%w1\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorw %w1,%w1\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (ret),
                       "=r" (*((u16 *)dst + 8U)): "m" (*((struct __large_struct *)src + 8U)),
                       "i" (2), "0" (ret));
  return (ret);
  case 16U: 
  __asm__  volatile   ("1:\tmovq %2,%1\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (ret),
                       "=r" (*((u64 *)dst)): "m" (*((struct __large_struct *)src)),
                       "i" (16), "0" (ret));
  tmp___1 = ldv__builtin_expect(ret != 0, 0L);
  if (tmp___1 != 0L) {
    return (ret);
  } else {

  }
  __asm__  volatile   ("1:\tmovq %2,%1\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\txorq %1,%1\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (ret),
                       "=r" (*((u64 *)dst + 8U)): "m" (*((struct __large_struct *)src + 8U)),
                       "i" (8), "0" (ret));
  return (ret);
  default: 
  tmp___2 = copy_user_generic(dst, src, size);
  return ((int )tmp___2);
  }
}
}
extern unsigned long drm_get_resource_start(struct drm_device * , unsigned int  ) ;
extern unsigned long drm_get_resource_len(struct drm_device * , unsigned int  ) ;
extern int drm_irq_uninstall(struct drm_device * ) ;
extern unsigned int drm_debug ;
extern drm_local_map_t *drm_getsarea(struct drm_device * ) ;
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device * , size_t  , size_t  , dma_addr_t  ) ;
extern void drm_pci_free(struct drm_device * , drm_dma_handle_t * ) ;
extern void drm_core_ioremap(struct drm_map * , struct drm_device * ) ;
extern void drm_core_ioremapfree(struct drm_map * , struct drm_device * ) ;
__inline static void *drm_alloc(size_t size , int area ) 
{ 
  void *tmp ;

  {
  tmp = kmalloc(size, 208U);
  return (tmp);
}
}
__inline static void drm_free(void *pt , size_t size , int area ) 
{ 


  {
  kfree((void const   *)pt);
  return;
}
}
void i915_kernel_lost_context(struct drm_device *dev ) ;
int i915_emit_box(struct drm_device *dev , struct drm_clip_rect *boxes , int i , int DR1 ,
                  int DR4 ) ;
int i915_irq_emit(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_irq_wait(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_vblank_pipe_set(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_vblank_pipe_get(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_vblank_swap(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_mem_alloc(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_mem_free(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_mem_init_heap(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_mem_destroy_heap(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
void i915_mem_takedown(struct mem_block **heap ) ;
void i915_mem_release(struct drm_device *dev , struct drm_file *file_priv , struct mem_block *heap ) ;
int i915_gem_init_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_create_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_pread_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_pwrite_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_mmap_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_set_domain_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_sw_finish_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_execbuffer(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_pin_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_unpin_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_busy_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_throttle_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_entervt_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_leavevt_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_set_tiling(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
int i915_gem_get_tiling(struct drm_device *dev , void *data , struct drm_file *file_priv ) ;
void i915_gem_load(struct drm_device *dev ) ;
void i915_gem_lastclose(struct drm_device *dev ) ;
int i915_wait_ring(struct drm_device *dev , int n , char const   *caller ) ;
int i915_wait_ring(struct drm_device *dev , int n , char const   *caller ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_ring_buffer_t *ring ;
  u32 acthd_reg ;
  u32 last_acthd ;
  unsigned int tmp ;
  u32 acthd ;
  u32 last_head ;
  unsigned int tmp___0 ;
  int i ;
  unsigned int tmp___1 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  ring = & dev_priv->ring;
  acthd_reg = ((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810 ? 8308U : 8392U;
  tmp = readl((void const volatile   *)dev_priv->regs + (unsigned long )acthd_reg);
  last_acthd = tmp;
  tmp___0 = readl((void const volatile   *)dev_priv->regs + 8244U);
  last_head = tmp___0 & 2097148U;
  i = 0;
  goto ldv_23255;
  ldv_23254: 
  tmp___1 = readl((void const volatile   *)dev_priv->regs + 8244U);
  ring->head = (int )tmp___1 & 2097148;
  acthd = readl((void const volatile   *)dev_priv->regs + (unsigned long )acthd_reg);
  ring->space = ring->head + (-8 - ring->tail);
  if (ring->space < 0) {
    ring->space = (int )((unsigned int )ring->space + (unsigned int )ring->Size);
  } else {

  }
  if (ring->space >= n) {
    return (0);
  } else {

  }
  if ((unsigned long )dev_priv->sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    (dev_priv->sarea_priv)->perf_boxes = (dev_priv->sarea_priv)->perf_boxes | 4;
  } else {

  }
  if ((u32 )ring->head != last_head) {
    i = 0;
  } else {

  }
  if (acthd != last_acthd) {
    i = 0;
  } else {

  }
  last_head = (u32 )ring->head;
  last_acthd = acthd;
  msleep_interruptible(10U);
  i = i + 1;
  ldv_23255: ;
  if (i <= 99999) {
    goto ldv_23254;
  } else {

  }

  return (-16);
}
}
static int i915_init_phys_hws(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  dev_priv->status_page_dmah = drm_pci_alloc(dev, 4096UL, 4096UL, 4294967295ULL);
  if ((unsigned long )dev_priv->status_page_dmah == (unsigned long )((drm_dma_handle_t *)0)) {
    printk("<3>[drm:%s] *ERROR* Can not allocate hardware status page\n", "i915_init_phys_hws");
    return (-12);
  } else {

  }
  dev_priv->hw_status_page = (dev_priv->status_page_dmah)->vaddr;
  dev_priv->dma_status_page = (dev_priv->status_page_dmah)->busaddr;
  memset(dev_priv->hw_status_page, 0, 4096UL);
  writel((unsigned int )dev_priv->dma_status_page, (void volatile   *)dev_priv->regs + 8320U);
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] Enabled hardware status page\n", "i915_init_phys_hws");
  } else {

  }
  return (0);
}
}
static void i915_free_hws(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((unsigned long )dev_priv->status_page_dmah != (unsigned long )((drm_dma_handle_t *)0)) {
    drm_pci_free(dev, dev_priv->status_page_dmah);
    dev_priv->status_page_dmah = 0;
  } else {

  }
  if (dev_priv->status_gfx_addr != 0U) {
    dev_priv->status_gfx_addr = 0U;
    drm_core_ioremapfree(& dev_priv->hws_map, dev);
  } else {

  }
  writel(536866816U, (void volatile   *)dev_priv->regs + 8320U);
  return;
}
}
void i915_kernel_lost_context(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_ring_buffer_t *ring ;
  unsigned int tmp ;
  unsigned int tmp___0 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  ring = & dev_priv->ring;
  tmp = readl((void const volatile   *)dev_priv->regs + 8244U);
  ring->head = (int )tmp & 2097148;
  tmp___0 = readl((void const volatile   *)dev_priv->regs + 8240U);
  ring->tail = (int )tmp___0 & 2097144;
  ring->space = ring->head + (-8 - ring->tail);
  if (ring->space < 0) {
    ring->space = (int )((unsigned int )ring->space + (unsigned int )ring->Size);
  } else {

  }
  if (ring->head == ring->tail && (unsigned long )dev_priv->sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    (dev_priv->sarea_priv)->perf_boxes = (dev_priv->sarea_priv)->perf_boxes | 1;
  } else {

  }
  return;
}
}
static int i915_dma_cleanup(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if (dev->irq_enabled != 0) {
    drm_irq_uninstall(dev);
  } else {

  }
  if ((unsigned long )dev_priv->ring.virtual_start != (unsigned long )((u8 *)0)) {
    drm_core_ioremapfree(& dev_priv->ring.map, dev);
    dev_priv->ring.virtual_start = 0;
    dev_priv->ring.map.handle = 0;
    dev_priv->ring.map.size = 0UL;
  } else {

  }
  if ((((dev->pci_device == 10690 || dev->pci_device == 10674) || dev->pci_device == 10706) || dev->pci_device == 10818) || ((dev->pci_device == 11778 || dev->pci_device == 11794) || dev->pci_device == 11810)) {
    i915_free_hws(dev);
  } else {

  }
  return (0);
}
}
static int i915_initialize(struct drm_device *dev , drm_i915_init_t *init ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  dev_priv->sarea = drm_getsarea(dev);
  if ((unsigned long )dev_priv->sarea == (unsigned long )((drm_local_map_t *)0)) {
    printk("<3>[drm:%s] *ERROR* can not find sarea!\n", "i915_initialize");
    i915_dma_cleanup(dev);
    return (-22);
  } else {

  }
  dev_priv->sarea_priv = (drm_i915_sarea_t *)(dev_priv->sarea)->handle + (unsigned long )init->sarea_priv_offset;
  if (init->ring_size != 0U) {
    if ((unsigned long )dev_priv->ring.ring_obj != (unsigned long )((struct drm_gem_object *)0)) {
      i915_dma_cleanup(dev);
      printk("<3>[drm:%s] *ERROR* Client tried to initialize ringbuffer in GEM mode\n",
             "i915_initialize");
      return (-22);
    } else {

    }
    dev_priv->ring.Size = (unsigned long )init->ring_size;
    dev_priv->ring.tail_mask = (int )((unsigned int )dev_priv->ring.Size - 1U);
    dev_priv->ring.map.offset = (unsigned long )init->ring_start;
    dev_priv->ring.map.size = (unsigned long )init->ring_size;
    dev_priv->ring.map.type = _DRM_FRAME_BUFFER;
    dev_priv->ring.map.flags = 0;
    dev_priv->ring.map.mtrr = 0;
    drm_core_ioremap(& dev_priv->ring.map, dev);
    if ((unsigned long )dev_priv->ring.map.handle == (unsigned long )((void *)0)) {
      i915_dma_cleanup(dev);
      printk("<3>[drm:%s] *ERROR* can not ioremap virtual address for ring buffer\n",
             "i915_initialize");
      return (-12);
    } else {

    }
  } else {

  }
  dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.handle;
  dev_priv->cpp = init->cpp;
  dev_priv->back_offset = (int )init->back_offset;
  dev_priv->front_offset = (int )init->front_offset;
  dev_priv->current_page = 0;
  (dev_priv->sarea_priv)->pf_current_page = dev_priv->current_page;
  dev_priv->allow_batchbuffer = 1;
  return (0);
}
}
static int i915_dma_resume(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] %s\n", "i915_dma_resume", "i915_dma_resume");
  } else {

  }
  if ((unsigned long )dev_priv->sarea == (unsigned long )((drm_local_map_t *)0)) {
    printk("<3>[drm:%s] *ERROR* can not find sarea!\n", "i915_dma_resume");
    return (-22);
  } else {

  }
  if ((unsigned long )dev_priv->ring.map.handle == (unsigned long )((void *)0)) {
    printk("<3>[drm:%s] *ERROR* can not ioremap virtual address for ring buffer\n",
           "i915_dma_resume");
    return (-12);
  } else {

  }
  if ((unsigned long )dev_priv->hw_status_page == (unsigned long )((void *)0)) {
    printk("<3>[drm:%s] *ERROR* Can not find hardware status page\n", "i915_dma_resume");
    return (-22);
  } else {

  }
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] hw status page @ %p\n", "i915_dma_resume", dev_priv->hw_status_page);
  } else {

  }
  if (dev_priv->status_gfx_addr != 0U) {
    writel(dev_priv->status_gfx_addr, (void volatile   *)dev_priv->regs + 8320U);
  } else {
    writel((unsigned int )dev_priv->dma_status_page, (void volatile   *)dev_priv->regs + 8320U);
  }
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] Enabled hardware status page\n", "i915_dma_resume");
  } else {

  }
  return (0);
}
}
static int i915_dma_init(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_init_t *init ;
  int retcode ;

  {
  init = (drm_i915_init_t *)data;
  retcode = 0;
  switch ((unsigned int )init->func) {
  case 1U: 
  retcode = i915_initialize(dev, init);
  goto ldv_23294;
  case 2U: 
  retcode = i915_dma_cleanup(dev);
  goto ldv_23294;
  case 3U: 
  retcode = i915_dma_resume(dev);
  goto ldv_23294;
  default: 
  retcode = -22;
  goto ldv_23294;
  }
  ldv_23294: ;
  return (retcode);
}
}
static int do_validate_cmd(int cmd ) 
{ 


  {
  switch ((int )((unsigned int )cmd >> 29)) {
  case 0: ;
  switch ((cmd >> 23) & 63) {
  case 0: ;
  return (1);
  case 4: ;
  return (1);
  default: ;
  return (0);
  }
  goto ldv_23305;
  case 1: ;
  return (0);
  case 2: ;
  return ((cmd & 255) + 2);
  case 3: ;
  if (((cmd >> 24) & 31) <= 24) {
    return (1);
  } else {

  }
  switch ((cmd >> 24) & 31) {
  case 28: ;
  return (1);
  case 29: ;
  switch ((cmd >> 16) & 255) {
  case 3: ;
  return ((cmd & 31) + 2);
  case 4: ;
  return ((cmd & 15) + 2);
  default: ;
  return ((cmd & 65535) + 2);
  }
  case 30: ;
  if ((cmd & 8388608) != 0) {
    return ((cmd & 65535) + 1);
  } else {
    return (1);
  }
  case 31: ;
  if ((cmd & 8388608) == 0) {
    return ((cmd & 131071) + 2);
  } else
  if ((cmd & 131072) != 0) {
    if ((cmd & 65535) == 0) {
      return (0);
    } else {
      return (((cmd & 65535) + 1) / 2 + 1);
    }
  } else {
    return (2);
  }
  default: ;
  return (0);
  }
  default: ;
  return (0);
  }
  ldv_23305: ;
  return (0);
}
}
static int validate_cmd(int cmd ) 
{ 
  int ret ;
  int tmp ;

  {
  tmp = do_validate_cmd(cmd);
  ret = tmp;
  return (ret);
}
}
static int i915_emit_cmds(struct drm_device *dev , int *buffer , int dwords ) 
{ 
  drm_i915_private_t *dev_priv ;
  int i ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;
  int cmd ;
  int sz ;
  int tmp ;
  int tmp___0 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((unsigned long )(dwords + 1) * 4UL >= dev_priv->ring.Size - 8UL) {
    return (-22);
  } else {

  }
  if (dev_priv->ring.space < ((dwords + 1) & -2) * 4) {
    i915_wait_ring(dev, ((dwords + 1) & -2) * 4, "i915_emit_cmds");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  i = 0;
  goto ldv_23340;
  ldv_23339: 
  tmp = __copy_from_user((void *)(& cmd), (void const   *)buffer + (unsigned long )i,
                         4U);
  if (tmp != 0) {
    return (-22);
  } else {

  }
  sz = validate_cmd(cmd);
  if (sz == 0 || i + sz > dwords) {
    return (-22);
  } else {

  }
  *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )cmd;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  goto ldv_23337;
  ldv_23336: 
  tmp___0 = __copy_from_user((void *)(& cmd), (void const   *)buffer + (unsigned long )i,
                             4U);
  if (tmp___0 != 0) {
    return (-22);
  } else {

  }
  *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )cmd;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  ldv_23337: 
  i = i + 1;
  sz = sz - 1;
  if (sz != 0) {
    goto ldv_23336;
  } else {

  }

  ldv_23340: ;
  if (i < dwords) {
    goto ldv_23339;
  } else {

  }

  if (dwords & 1) {
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
  } else {

  }
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  return (0);
}
}
int i915_emit_box(struct drm_device *dev , struct drm_clip_rect *boxes , int i , int DR1 ,
                  int DR4 ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_clip_rect box ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;
  int tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  tmp = __copy_from_user((void *)(& box), (void const   *)boxes + (unsigned long )i,
                         8U);
  if (tmp != 0) {
    return (-14);
  } else {

  }
  if ((((int )box.y2 <= (int )box.y1 || (int )box.x2 <= (int )box.x1) || (unsigned int )box.y2 == 0U) || (unsigned int )box.x2 == 0U) {
    printk("<3>[drm:%s] *ERROR* Bad box %d,%d..%d,%d\n", "i915_emit_box", (int )box.x1,
           (int )box.y1, (int )box.x2, (int )box.y2);
    return (-22);
  } else {

  }
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    if (dev_priv->ring.space <= 15) {
      i915_wait_ring(dev, 16, "i915_emit_box");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 2030043138U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )((int )box.x1 | ((int )box.y1 << 16));
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )((((int )box.x2 + -1) & 65535) | (((int )box.y2 + -1) << 16));
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )DR4;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  } else {
    if (dev_priv->ring.space <= 23) {
      i915_wait_ring(dev, 24, "i915_emit_box");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 2105540611U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )DR1;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )((int )box.x1 | ((int )box.y1 << 16));
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )((((int )box.x2 + -1) & 65535) | (((int )box.y2 + -1) << 16));
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )DR4;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  }
  return (0);
}
}
static void i915_emit_breadcrumb(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  dev_priv->counter = dev_priv->counter + (uint32_t )1;
  if ((int )dev_priv->counter < 0) {
    dev_priv->counter = 0U;
  } else {

  }
  if ((unsigned long )dev_priv->sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    (dev_priv->sarea_priv)->last_enqueue = (int )dev_priv->counter;
  } else {

  }
  if (dev_priv->ring.space <= 15) {
    i915_wait_ring(dev, 16, "i915_emit_breadcrumb");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 276824065U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 20U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = dev_priv->counter;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  return;
}
}
static int i915_dispatch_cmdbuffer(struct drm_device *dev , drm_i915_cmdbuffer_t *cmd ) 
{ 
  int nbox ;
  int i ;
  int count ;
  int ret ;

  {
  nbox = cmd->num_cliprects;
  i = 0;
  if ((cmd->sz & 3) != 0) {
    printk("<3>[drm:%s] *ERROR* alignment", "i915_dispatch_cmdbuffer");
    return (-22);
  } else {

  }
  i915_kernel_lost_context(dev);
  count = nbox != 0 ? nbox : 1;
  i = 0;
  goto ldv_23375;
  ldv_23374: ;
  if (i < nbox) {
    ret = i915_emit_box(dev, cmd->cliprects, i, cmd->DR1, cmd->DR4);
    if (ret != 0) {
      return (ret);
    } else {

    }
  } else {

  }
  ret = i915_emit_cmds(dev, (int *)cmd->buf, cmd->sz / 4);
  if (ret != 0) {
    return (ret);
  } else {

  }
  i = i + 1;
  ldv_23375: ;
  if (i < count) {
    goto ldv_23374;
  } else {

  }
  i915_emit_breadcrumb(dev);
  return (0);
}
}
static int i915_dispatch_batchbuffer(struct drm_device *dev , drm_i915_batchbuffer_t *batch ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_clip_rect *boxes ;
  int nbox ;
  int i ;
  int count ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;
  int ret ;
  int tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  boxes = batch->cliprects;
  nbox = batch->num_cliprects;
  i = 0;
  if (((batch->start | batch->used) & 7) != 0) {
    printk("<3>[drm:%s] *ERROR* alignment", "i915_dispatch_batchbuffer");
    return (-22);
  } else {

  }
  i915_kernel_lost_context(dev);
  count = nbox != 0 ? nbox : 1;
  i = 0;
  goto ldv_23393;
  ldv_23392: ;
  if (i < nbox) {
    tmp = i915_emit_box(dev, boxes, i, batch->DR1, batch->DR4);
    ret = tmp;
    if (ret != 0) {
      return (ret);
    } else {

    }
  } else {

  }
  if (dev->pci_device != 13687 && dev->pci_device != 9570) {
    if (dev_priv->ring.space <= 7) {
      i915_wait_ring(dev, 8, "i915_dispatch_batchbuffer");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
      *((unsigned int volatile   *)virt + (unsigned long )outring) = 411042176U;
      outcount = outcount + 1U;
      outring = outring + 4U;
      outring = outring & ringmask;
      *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )batch->start;
      outcount = outcount + 1U;
      outring = outring + 4U;
      outring = outring & ringmask;
    } else {
      *((unsigned int volatile   *)virt + (unsigned long )outring) = 411041920U;
      outcount = outcount + 1U;
      outring = outring + 4U;
      outring = outring & ringmask;
      *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )(batch->start | 1);
      outcount = outcount + 1U;
      outring = outring + 4U;
      outring = outring & ringmask;
    }
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  } else {
    if (dev_priv->ring.space <= 15) {
      i915_wait_ring(dev, 16, "i915_dispatch_batchbuffer");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 402653185U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )(batch->start | 1);
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )((batch->start + batch->used) + -4);
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  }
  i = i + 1;
  ldv_23393: ;
  if (i < count) {
    goto ldv_23392;
  } else {

  }
  i915_emit_breadcrumb(dev);
  return (0);
}
}
static int i915_dispatch_flip(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;
  uint32_t tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((unsigned long )dev_priv->sarea_priv == (unsigned long )((drm_i915_sarea_t *)0)) {
    return (-22);
  } else {

  }
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] %s: page=%d pfCurrentPage=%d\n", "i915_dispatch_flip", "i915_dispatch_flip",
           dev_priv->current_page, (dev_priv->sarea_priv)->pf_current_page);
  } else {

  }
  i915_kernel_lost_context(dev);
  if (dev_priv->ring.space <= 7) {
    i915_wait_ring(dev, 8, "i915_dispatch_flip");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 33554433U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  if (dev_priv->ring.space <= 23) {
    i915_wait_ring(dev, 24, "i915_dispatch_flip");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 171966466U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  if (dev_priv->current_page == 0) {
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )dev_priv->back_offset;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->current_page = 1;
  } else {
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )dev_priv->front_offset;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->current_page = 0;
  }
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  if (dev_priv->ring.space <= 7) {
    i915_wait_ring(dev, 8, "i915_dispatch_flip");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 25165828U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  tmp = dev_priv->counter;
  dev_priv->counter = dev_priv->counter + (uint32_t )1;
  (dev_priv->sarea_priv)->last_enqueue = (int )tmp;
  if (dev_priv->ring.space <= 15) {
    i915_wait_ring(dev, 16, "i915_dispatch_flip");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 276824065U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 20U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = dev_priv->counter;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  (dev_priv->sarea_priv)->pf_current_page = dev_priv->current_page;
  return (0);
}
}
static int i915_quiescent(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  int tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  i915_kernel_lost_context(dev);
  tmp = i915_wait_ring(dev, (int )((unsigned int )dev_priv->ring.Size - 8U), "i915_quiescent");
  return (tmp);
}
}
static int i915_flush_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  int ret ;

  {
  if ((unsigned long )((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == (unsigned long )((struct drm_gem_object *)0)) {
    if ((int )(dev->lock.hw_lock)->lock >= 0 || (unsigned long )dev->lock.file_priv != (unsigned long )file_priv) {
      printk("<3>[drm:%s] *ERROR* %s called without lock held, held  %d owner %p %p\n",
             "i915_flush_ioctl", "i915_flush_ioctl", (unsigned int )(dev->lock.hw_lock)->lock & 2147483648U,
             dev->lock.file_priv, file_priv);
      return (-22);
    } else {

    }
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_quiescent(dev);
  mutex_unlock(& dev->struct_mutex);
  return (ret);
}
}
static int i915_batchbuffer(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  u32 *hw_status ;
  drm_i915_sarea_t *sarea_priv ;
  drm_i915_batchbuffer_t *batch ;
  int ret ;
  unsigned long flag ;
  unsigned long roksum ;
  struct thread_info *tmp ;
  long tmp___0 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  hw_status = (u32 *)dev_priv->hw_status_page;
  sarea_priv = dev_priv->sarea_priv;
  batch = (drm_i915_batchbuffer_t *)data;
  if (dev_priv->allow_batchbuffer == 0) {
    printk("<3>[drm:%s] *ERROR* Batchbuffer ioctl disabled\n", "i915_batchbuffer");
    return (-22);
  } else {

  }
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] i915 batchbuffer, start %x used %d cliprects %d\n", "i915_batchbuffer",
           batch->start, batch->used, batch->num_cliprects);
  } else {

  }
  if ((unsigned long )((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == (unsigned long )((struct drm_gem_object *)0)) {
    if ((int )(dev->lock.hw_lock)->lock >= 0 || (unsigned long )dev->lock.file_priv != (unsigned long )file_priv) {
      printk("<3>[drm:%s] *ERROR* %s called without lock held, held  %d owner %p %p\n",
             "i915_batchbuffer", "i915_batchbuffer", (unsigned int )(dev->lock.hw_lock)->lock & 2147483648U,
             dev->lock.file_priv, file_priv);
      return (-22);
    } else {

    }
  } else {

  }
  if (batch->num_cliprects != 0) {
    tmp = current_thread_info();
    __asm__  ("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0": "=&r" (flag), "=r" (roksum): "1" (batch->cliprects),
              "g" ((long )((unsigned long )batch->num_cliprects * 8UL)), "rm" (tmp->addr_limit.seg));
    tmp___0 = ldv__builtin_expect(flag == 0UL, 1L);
    if (tmp___0 == 0L) {
      return (-14);
    } else {

    }
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_dispatch_batchbuffer(dev, batch);
  mutex_unlock(& dev->struct_mutex);
  if ((unsigned long )sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    sarea_priv->last_dispatch = (int )*(hw_status + 5UL);
  } else {

  }
  return (ret);
}
}
static int i915_cmdbuffer(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  u32 *hw_status ;
  drm_i915_sarea_t *sarea_priv ;
  drm_i915_cmdbuffer_t *cmdbuf ;
  int ret ;
  unsigned long flag ;
  unsigned long roksum ;
  struct thread_info *tmp ;
  long tmp___0 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  hw_status = (u32 *)dev_priv->hw_status_page;
  sarea_priv = dev_priv->sarea_priv;
  cmdbuf = (drm_i915_cmdbuffer_t *)data;
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] i915 cmdbuffer, buf %p sz %d cliprects %d\n", "i915_cmdbuffer",
           cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
  } else {

  }
  if ((unsigned long )((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == (unsigned long )((struct drm_gem_object *)0)) {
    if ((int )(dev->lock.hw_lock)->lock >= 0 || (unsigned long )dev->lock.file_priv != (unsigned long )file_priv) {
      printk("<3>[drm:%s] *ERROR* %s called without lock held, held  %d owner %p %p\n",
             "i915_cmdbuffer", "i915_cmdbuffer", (unsigned int )(dev->lock.hw_lock)->lock & 2147483648U,
             dev->lock.file_priv, file_priv);
      return (-22);
    } else {

    }
  } else {

  }
  if (cmdbuf->num_cliprects != 0) {
    tmp = current_thread_info();
    __asm__  ("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0": "=&r" (flag), "=r" (roksum): "1" (cmdbuf->cliprects),
              "g" ((long )((unsigned long )cmdbuf->num_cliprects * 8UL)), "rm" (tmp->addr_limit.seg));
    tmp___0 = ldv__builtin_expect(flag == 0UL, 1L);
    if (tmp___0 == 0L) {
      printk("<3>[drm:%s] *ERROR* Fault accessing cliprects\n", "i915_cmdbuffer");
      return (-14);
    } else {

    }
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
  mutex_unlock(& dev->struct_mutex);
  if (ret != 0) {
    printk("<3>[drm:%s] *ERROR* i915_dispatch_cmdbuffer failed\n", "i915_cmdbuffer");
    return (ret);
  } else {

  }
  if ((unsigned long )sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    sarea_priv->last_dispatch = (int )*(hw_status + 5UL);
  } else {

  }
  return (0);
}
}
static int i915_flip_bufs(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  int ret ;

  {
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] %s\n", "i915_flip_bufs", "i915_flip_bufs");
  } else {

  }
  if ((unsigned long )((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == (unsigned long )((struct drm_gem_object *)0)) {
    if ((int )(dev->lock.hw_lock)->lock >= 0 || (unsigned long )dev->lock.file_priv != (unsigned long )file_priv) {
      printk("<3>[drm:%s] *ERROR* %s called without lock held, held  %d owner %p %p\n",
             "i915_flip_bufs", "i915_flip_bufs", (unsigned int )(dev->lock.hw_lock)->lock & 2147483648U,
             dev->lock.file_priv, file_priv);
      return (-22);
    } else {

    }
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_dispatch_flip(dev);
  mutex_unlock(& dev->struct_mutex);
  return (ret);
}
}
static int i915_getparam(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_getparam_t *param ;
  int value ;
  unsigned long tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  param = (drm_i915_getparam_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_getparam");
    return (-22);
  } else {

  }
  switch (param->param) {
  case 1: 
  value = (dev->pdev)->irq != 0U;
  goto ldv_23461;
  case 2: 
  value = dev_priv->allow_batchbuffer != 0;
  goto ldv_23461;
  case 3: 
  value = (int )*((u32 volatile   *)dev_priv->hw_status_page + 5UL);
  goto ldv_23461;
  case 4: 
  value = dev->pci_device;
  goto ldv_23461;
  case 5: 
  value = 1;
  goto ldv_23461;
  default: 
  printk("<3>[drm:%s] *ERROR* Unknown parameter %d\n", "i915_getparam", param->param);
  return (-22);
  }
  ldv_23461: 
  tmp = copy_to_user((void *)param->value, (void const   *)(& value), 4U);
  if (tmp != 0UL) {
    printk("<3>[drm:%s] *ERROR* DRM_COPY_TO_USER failed\n", "i915_getparam");
    return (-14);
  } else {

  }
  return (0);
}
}
static int i915_setparam(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_setparam_t *param ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  param = (drm_i915_setparam_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_setparam");
    return (-22);
  } else {

  }
  switch (param->param) {
  case 1: ;
  goto ldv_23476;
  case 2: 
  dev_priv->tex_lru_log_granularity = param->value;
  goto ldv_23476;
  case 3: 
  dev_priv->allow_batchbuffer = param->value;
  goto ldv_23476;
  default: 
  printk("<3>[drm:%s] *ERROR* unknown parameter %d\n", "i915_setparam", param->param);
  return (-22);
  }
  ldv_23476: ;
  return (0);
}
}
static int i915_set_status_page(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_hws_addr_t *hws ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  hws = (drm_i915_hws_addr_t *)data;
  if ((((dev->pci_device != 10690 && dev->pci_device != 10674) && dev->pci_device != 10706) && dev->pci_device != 10818) && ((dev->pci_device != 11778 && dev->pci_device != 11794) && dev->pci_device != 11810)) {
    return (-22);
  } else {

  }
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_set_status_page");
    return (-22);
  } else {

  }
  printk("<7>set status page addr 0x%08x\n", (unsigned int )hws->addr);
  dev_priv->status_gfx_addr = (unsigned int )hws->addr & 536866816U;
  dev_priv->hws_map.offset = (unsigned long )((unsigned long long )(dev->agp)->base + hws->addr);
  dev_priv->hws_map.size = 4096UL;
  dev_priv->hws_map.type = _DRM_FRAME_BUFFER;
  dev_priv->hws_map.flags = 0;
  dev_priv->hws_map.mtrr = 0;
  drm_core_ioremap(& dev_priv->hws_map, dev);
  if ((unsigned long )dev_priv->hws_map.handle == (unsigned long )((void *)0)) {
    i915_dma_cleanup(dev);
    dev_priv->status_gfx_addr = 0U;
    printk("<3>[drm:%s] *ERROR* can not ioremap virtual address for G33 hw status page\n",
           "i915_set_status_page");
    return (-12);
  } else {

  }
  dev_priv->hw_status_page = dev_priv->hws_map.handle;
  memset(dev_priv->hw_status_page, 0, 4096UL);
  writel(dev_priv->status_gfx_addr, (void volatile   *)dev_priv->regs + 8320U);
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] load hws HWS_PGA with gfx mem 0x%x\n", "i915_set_status_page",
           dev_priv->status_gfx_addr);
  } else {

  }
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] load hws at %p\n", "i915_set_status_page", dev_priv->hw_status_page);
  } else {

  }
  return (0);
}
}
int i915_driver_load(struct drm_device *dev , unsigned long flags ) 
{ 
  struct drm_i915_private *dev_priv ;
  unsigned long base ;
  unsigned long size ;
  int ret ;
  int mmio_bar ;
  void *tmp ;
  int tmp___0 ;
  struct lock_class_key __key ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  ret = 0;
  mmio_bar = (((((dev->pci_device != 9602 && dev->pci_device != 9610) && dev->pci_device != 9618) && dev->pci_device != 10098) && (dev->pci_device != 10146 && dev->pci_device != 10158)) && (((((((((dev->pci_device != 10610 && dev->pci_device != 10626) && dev->pci_device != 10642) && dev->pci_device != 10658) && dev->pci_device != 10754) && dev->pci_device != 10770) && dev->pci_device != 10818) && dev->pci_device != 11778) && dev->pci_device != 11794) && dev->pci_device != 11810)) && ((dev->pci_device != 10690 && dev->pci_device != 10674) && dev->pci_device != 10706);
  dev->counters = dev->counters + 4UL;
  dev->types[6] = _DRM_STAT_IRQ;
  dev->types[7] = _DRM_STAT_PRIMARY;
  dev->types[8] = _DRM_STAT_SECONDARY;
  dev->types[9] = _DRM_STAT_DMA;
  tmp = drm_alloc(4240UL, 2);
  dev_priv = (struct drm_i915_private *)tmp;
  if ((unsigned long )dev_priv == (unsigned long )((struct drm_i915_private *)0)) {
    return (-12);
  } else {

  }
  memset((void *)dev_priv, 0, 4240UL);
  dev->dev_private = (void *)dev_priv;
  dev_priv->dev = dev;
  base = drm_get_resource_start(dev, (unsigned int )mmio_bar);
  size = drm_get_resource_len(dev, (unsigned int )mmio_bar);
  dev_priv->regs = ioremap((resource_size_t )base, size);
  i915_gem_load(dev);
  if ((((dev->pci_device != 10690 && dev->pci_device != 10674) && dev->pci_device != 10706) && dev->pci_device != 10818) && ((dev->pci_device != 11778 && dev->pci_device != 11794) && dev->pci_device != 11810)) {
    ret = i915_init_phys_hws(dev);
    if (ret != 0) {
      return (ret);
    } else {

    }
  } else {

  }
  if (dev->pci_device != 10098 && (dev->pci_device != 10146 && dev->pci_device != 10158)) {
    tmp___0 = pci_enable_msi(dev->pdev);
    if (tmp___0 != 0) {
      printk("<3>[drm:%s] *ERROR* failed to enable MSI\n", "i915_driver_load");
    } else {

    }
  } else {

  }
  intel_opregion_init(dev);
  __spin_lock_init(& dev_priv->user_irq_lock, "&dev_priv->user_irq_lock", & __key);
  return (ret);
}
}
int i915_driver_unload(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  if ((unsigned int )*((unsigned char *)dev->pdev + 1808UL) != 0U) {
    pci_disable_msi(dev->pdev);
  } else {

  }
  i915_free_hws(dev);
  if ((unsigned long )dev_priv->regs != (unsigned long )((void *)0)) {
    iounmap((void volatile   *)dev_priv->regs);
  } else {

  }
  intel_opregion_free(dev);
  drm_free(dev->dev_private, 4240UL, 2);
  return (0);
}
}
int i915_driver_open(struct drm_device *dev , struct drm_file *file_priv ) 
{ 
  struct drm_i915_file_private *i915_file_priv ;
  void *tmp ;

  {
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] \n", "i915_driver_open");
  } else {

  }
  tmp = drm_alloc(8UL, 10);
  i915_file_priv = (struct drm_i915_file_private *)tmp;
  if ((unsigned long )i915_file_priv == (unsigned long )((struct drm_i915_file_private *)0)) {
    return (-12);
  } else {

  }
  file_priv->driver_priv = (void *)i915_file_priv;
  i915_file_priv->mm.last_gem_seqno = 0U;
  i915_file_priv->mm.last_gem_throttle_seqno = 0U;
  return (0);
}
}
void i915_driver_lastclose(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    return;
  } else {

  }
  i915_gem_lastclose(dev);
  if ((unsigned long )dev_priv->agp_heap != (unsigned long )((struct mem_block *)0)) {
    i915_mem_takedown(& dev_priv->agp_heap);
  } else {

  }
  i915_dma_cleanup(dev);
  return;
}
}
void i915_driver_preclose(struct drm_device *dev , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  i915_mem_release(dev, file_priv, dev_priv->agp_heap);
  return;
}
}
void i915_driver_postclose(struct drm_device *dev , struct drm_file *file_priv ) 
{ 
  struct drm_i915_file_private *i915_file_priv ;

  {
  i915_file_priv = (struct drm_i915_file_private *)file_priv->driver_priv;
  drm_free((void *)i915_file_priv, 8UL, 10);
  return;
}
}
struct drm_ioctl_desc i915_ioctls[35U]  = 
  {      {0U, & i915_dma_init, 7}, 
        {1U, & i915_flush_ioctl, 1}, 
        {2U, & i915_flip_bufs, 1}, 
        {3U, & i915_batchbuffer, 1}, 
        {4U, & i915_irq_emit, 1}, 
        {5U, & i915_irq_wait, 1}, 
        {6U, & i915_getparam, 1}, 
        {7U, & i915_setparam, 7}, 
        {8U, & i915_mem_alloc, 1}, 
        {9U, & i915_mem_free, 1}, 
        {10U, & i915_mem_init_heap, 7}, 
        {11U, & i915_cmdbuffer, 1}, 
        {12U, & i915_mem_destroy_heap, 7}, 
        {13U, & i915_vblank_pipe_set, 7}, 
        {14U, & i915_vblank_pipe_get, 1}, 
        {15U, & i915_vblank_swap, 1}, 
        {0U, 0, 0}, 
        {17U, & i915_set_status_page, 1}, 
        {0U, 0, 0}, 
        {19U, & i915_gem_init_ioctl, 7}, 
        {20U, & i915_gem_execbuffer, 1}, 
        {21U, & i915_gem_pin_ioctl, 5}, 
        {22U, & i915_gem_unpin_ioctl, 5}, 
        {23U, & i915_gem_busy_ioctl, 1}, 
        {24U, & i915_gem_throttle_ioctl, 1}, 
        {25U, & i915_gem_entervt_ioctl, 7}, 
        {26U, & i915_gem_leavevt_ioctl, 7}, 
        {27U, & i915_gem_create_ioctl, 0}, 
        {28U, & i915_gem_pread_ioctl, 0}, 
        {29U, & i915_gem_pwrite_ioctl, 0}, 
        {30U, & i915_gem_mmap_ioctl, 0}, 
        {31U, & i915_gem_set_domain_ioctl, 0}, 
        {32U, & i915_gem_sw_finish_ioctl, 0}, 
        {33U, & i915_gem_set_tiling, 0}, 
        {34U, & i915_gem_get_tiling, 0}};
int i915_max_ioctl  =    35;
int i915_driver_device_is_agp(struct drm_device *dev ) 
{ 


  {
  return (1);
}
}
unsigned long ldv___get_free_pages_18(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
__inline static void *kmalloc(size_t size , gfp_t flags ) 
{ 


  {
  ldv_check_alloc_flags(flags);
  ldv_kmalloc_19(size, flags);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_20(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_24(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_28(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
void __builtin_prefetch(void const   *  , ...) ;
void ldv_spin_lock(void) ;
void ldv_spin_unlock(void) ;
__inline static int variable_test_bit(int nr , unsigned long const volatile   *addr ) 
{ 
  int oldbit ;

  {
  __asm__  volatile   ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)),
                       "Ir" (nr));
  return (oldbit);
}
}
__inline static struct task_struct *get_current(void) 
{ 
  struct task_struct *ret__ ;

  {
  switch (8UL) {
  case 2UL: 
  __asm__  ("movw %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent));
  goto ldv_4064;
  case 4UL: 
  __asm__  ("movl %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent));
  goto ldv_4064;
  case 8UL: 
  __asm__  ("movq %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent));
  goto ldv_4064;
  default: 
  __bad_pda_field();
  }
  ldv_4064: ;
  return (ret__);
}
}
__inline static void INIT_LIST_HEAD(struct list_head *list ) 
{ 


  {
  list->next = list;
  list->prev = list;
  return;
}
}
extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ;
__inline static void list_add_tail(struct list_head *new , struct list_head *head ) 
{ 


  {
  __list_add(new, head->prev, head);
  return;
}
}
extern void list_del(struct list_head * ) ;
__inline static int test_ti_thread_flag(struct thread_info *ti , int flag ) 
{ 
  int tmp ;

  {
  tmp = variable_test_bit(flag, (unsigned long const volatile   *)(& ti->flags));
  return (tmp);
}
}
extern void _spin_lock(spinlock_t * ) ;
extern void _spin_unlock(spinlock_t * ) ;
extern int default_wake_function(wait_queue_t * , unsigned int  , int  , void * ) ;
extern void init_waitqueue_head(wait_queue_head_t * ) ;
extern void add_wait_queue(wait_queue_head_t * , wait_queue_t * ) ;
extern void remove_wait_queue(wait_queue_head_t * , wait_queue_t * ) ;
extern void __wake_up(wait_queue_head_t * , unsigned int  , int  , void * ) ;
struct page *ldv_alloc_page_vma_44(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_34(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
extern unsigned long volatile   jiffies ;
extern int schedule_work(struct work_struct * ) ;
void *ldv_kmem_cache_alloc_36(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_40(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
__inline static void *ldv_kcalloc_38(size_t n , size_t size , gfp_t flags ) 
{ 
  void *tmp ;

  {
  if (size != 0UL && 0xffffffffffffffffUL / size < n) {
    return (0);
  } else {

  }
  tmp = __kmalloc(n * size, flags | 32768U);
  return (tmp);
}
}
__inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) ;
extern long schedule_timeout(long  ) ;
__inline static int test_tsk_thread_flag(struct task_struct *tsk , int flag ) 
{ 
  int tmp ;

  {
  tmp = test_ti_thread_flag((struct thread_info *)tsk->stack, flag);
  return (tmp);
}
}
__inline static int signal_pending(struct task_struct *p ) 
{ 
  int tmp ;
  long tmp___0 ;

  {
  tmp = test_tsk_thread_flag(p, 2);
  tmp___0 = ldv__builtin_expect(tmp != 0, 0L);
  return ((int )tmp___0);
}
}
extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device * , drm_drawable_t  ) ;
extern int drm_vblank_init(struct drm_device * , int  ) ;
extern void drm_locked_tasklet(struct drm_device * , void (*)(struct drm_device * ) ) ;
extern u32 drm_vblank_count(struct drm_device * , int  ) ;
extern void drm_handle_vblank(struct drm_device * , int  ) ;
extern int drm_vblank_get(struct drm_device * , int  ) ;
extern void drm_vblank_put(struct drm_device * , int  ) ;
__inline static void *drm_calloc(size_t nmemb , size_t size , int area ) 
{ 
  void *tmp ;

  {
  tmp = kcalloc(nmemb, size, 208U);
  return (tmp);
}
}
void i915_user_irq_get(struct drm_device *dev ) ;
void i915_user_irq_put(struct drm_device *dev ) ;
void i915_gem_vblank_work_handler(struct work_struct *work ) ;
void i915_enable_irq(drm_i915_private_t *dev_priv , u32 mask ) ;
uint32_t i915_get_gem_seqno(struct drm_device *dev ) ;
void opregion_asle_intr(struct drm_device *dev ) ;
void opregion_enable_asle(struct drm_device *dev ) ;
void i915_enable_irq(drm_i915_private_t *dev_priv , u32 mask ) 
{ 


  {
  if ((dev_priv->irq_mask_reg & mask) != 0U) {
    dev_priv->irq_mask_reg = dev_priv->irq_mask_reg & ~ mask;
    writel(dev_priv->irq_mask_reg, (void volatile   *)dev_priv->regs + 8360U);
    readl((void const volatile   *)dev_priv->regs + 8360U);
  } else {

  }
  return;
}
}
__inline static void i915_disable_irq(drm_i915_private_t *dev_priv , u32 mask ) 
{ 


  {
  if ((dev_priv->irq_mask_reg & mask) != mask) {
    dev_priv->irq_mask_reg = dev_priv->irq_mask_reg | mask;
    writel(dev_priv->irq_mask_reg, (void volatile   *)dev_priv->regs + 8360U);
    readl((void const volatile   *)dev_priv->regs + 8360U);
  } else {

  }
  return;
}
}
static int i915_get_pipe(struct drm_device *dev , int plane ) 
{ 
  drm_i915_private_t *dev_priv ;
  u32 dspcntr ;
  unsigned int tmp ;
  unsigned int tmp___0 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if (plane != 0) {
    tmp = readl((void const volatile   *)dev_priv->regs + 463232U);
    dspcntr = tmp;
  } else {
    tmp___0 = readl((void const volatile   *)dev_priv->regs + 459136U);
    dspcntr = tmp___0;
  }
  return ((dspcntr & 16777216U) != 0U);
}
}
static int i915_get_plane(struct drm_device *dev , int pipe ) 
{ 
  int tmp ;

  {
  tmp = i915_get_pipe(dev, 0);
  if (tmp == pipe) {
    return (0);
  } else {

  }
  return (1);
}
}
static int i915_pipe_enabled(struct drm_device *dev , int pipe ) 
{ 
  drm_i915_private_t *dev_priv ;
  unsigned long pipeconf ;
  unsigned int tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  pipeconf = pipe != 0 ? 462856UL : 458760UL;
  tmp = readl((void const volatile   *)(dev_priv->regs + pipeconf));
  if ((int )tmp < 0) {
    return (1);
  } else {

  }
  return (0);
}
}
static void i915_vblank_tasklet(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct list_head *list ;
  struct list_head *tmp ;
  struct list_head hits ;
  struct list_head *hit ;
  int nhits ;
  int nrects ;
  int slice[2U] ;
  int upper[2U] ;
  int lower[2U] ;
  int i ;
  unsigned int counter[2U] ;
  struct drm_drawable_info *drw ;
  drm_i915_sarea_t *sarea_priv ;
  u32 cpp ;
  u32 cmd ;
  u32 src_pitch ;
  u32 dst_pitch ;
  u32 ropcpp ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;
  drm_i915_vbl_swap_t *vbl_swap ;
  struct list_head  const  *__mptr ;
  int pipe ;
  int tmp___0 ;
  drm_i915_vbl_swap_t *swap_cmp ;
  struct list_head  const  *__mptr___0 ;
  struct drm_drawable_info *drw_cmp ;
  struct drm_drawable_info *tmp___1 ;
  int _max1 ;
  int _max2 ;
  int _max1___0 ;
  int _max2___0 ;
  drm_i915_vbl_swap_t *swap_hit ;
  struct list_head  const  *__mptr___1 ;
  struct drm_clip_rect *rect ;
  int num_rects ;
  int plane ;
  unsigned short top ;
  unsigned short bottom ;
  int y1 ;
  unsigned short _max1___1 ;
  unsigned short _max2___1 ;
  int y2 ;
  unsigned short _min1 ;
  unsigned short _min2 ;
  int tmp___2 ;
  int tmp___3 ;
  drm_i915_vbl_swap_t *swap_hit___0 ;
  struct list_head  const  *__mptr___2 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  sarea_priv = dev_priv->sarea_priv;
  cpp = dev_priv->cpp;
  cmd = cpp == 4U ? 1425014790U : 1421869062U;
  src_pitch = (u32 )sarea_priv->pitch * cpp;
  dst_pitch = (u32 )sarea_priv->pitch * cpp;
  ropcpp = ((cpp - 1U) << 24) | 13369344U;
  if ((((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) && sarea_priv->front_tiled != 0U) {
    cmd = cmd | 2048U;
    dst_pitch = dst_pitch >> 2;
  } else {

  }
  if ((((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) && sarea_priv->back_tiled != 0U) {
    cmd = cmd | 32768U;
    src_pitch = src_pitch >> 2;
  } else {

  }
  counter[0] = drm_vblank_count(dev, 0);
  counter[1] = drm_vblank_count(dev, 1);
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] \n", "i915_vblank_tasklet");
  } else {

  }
  INIT_LIST_HEAD(& hits);
  nrects = 0;
  nhits = nrects;
  ldv_spin_lock();
  list = dev_priv->vbl_swaps.head.next;
  tmp = list->next;
  goto ldv_23307;
  ldv_23306: 
  __mptr = (struct list_head  const  *)list;
  vbl_swap = (drm_i915_vbl_swap_t *)__mptr;
  tmp___0 = i915_get_pipe(dev, (int )vbl_swap->plane);
  pipe = tmp___0;
  if (counter[pipe] - vbl_swap->sequence > 8388608U) {
    goto ldv_23298;
  } else {

  }
  list_del(list);
  dev_priv->swaps_pending = dev_priv->swaps_pending - 1U;
  drm_vblank_put(dev, pipe);
  _spin_unlock(& dev_priv->swaps_lock);
  _spin_lock(& dev->drw_lock);
  drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
  if ((unsigned long )drw == (unsigned long )((struct drm_drawable_info *)0)) {
    _spin_unlock(& dev->drw_lock);
    drm_free((void *)vbl_swap, 32UL, 2);
    _spin_lock(& dev_priv->swaps_lock);
    goto ldv_23298;
  } else {

  }
  hit = hits.next;
  goto ldv_23305;
  ldv_23304: 
  __mptr___0 = (struct list_head  const  *)hit;
  swap_cmp = (drm_i915_vbl_swap_t *)__mptr___0;
  tmp___1 = drm_get_drawable_info(dev, swap_cmp->drw_id);
  drw_cmp = tmp___1;
  if ((unsigned long )drw_cmp != (unsigned long )((struct drm_drawable_info *)0) && (int )(drw_cmp->rects)->y1 > (int )(drw->rects)->y1) {
    list_add_tail(list, hit);
    goto ldv_23303;
  } else {

  }
  hit = hit->next;
  ldv_23305: 
  __builtin_prefetch((void const   *)hit->next);
  if ((unsigned long )(& hits) != (unsigned long )hit) {
    goto ldv_23304;
  } else {

  }
  ldv_23303: 
  _spin_unlock(& dev->drw_lock);
  if ((unsigned long )(& hits) == (unsigned long )hit) {
    list_add_tail(list, hits.prev);
  } else {

  }
  nhits = nhits + 1;
  _spin_lock(& dev_priv->swaps_lock);
  ldv_23298: 
  list = tmp;
  tmp = list->next;
  ldv_23307: ;
  if ((unsigned long )(& dev_priv->vbl_swaps.head) != (unsigned long )list) {
    goto ldv_23306;
  } else {

  }

  if (nhits == 0) {
    ldv_spin_unlock();
    return;
  } else {

  }
  _spin_unlock(& dev_priv->swaps_lock);
  i915_kernel_lost_context(dev);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    if (dev_priv->ring.space <= 15) {
      i915_wait_ring(dev, 16, "i915_vblank_tasklet");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 2030043138U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )(((sarea_priv->width + -1) & 65535) | ((sarea_priv->height + -1) << 16));
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  } else {
    if (dev_priv->ring.space <= 23) {
      i915_wait_ring(dev, 24, "i915_vblank_tasklet");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 2105540611U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )(sarea_priv->width | (sarea_priv->height << 16));
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )(sarea_priv->width | (sarea_priv->height << 16));
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  }
  sarea_priv->ctxOwner = 0;
  upper[1] = 0;
  upper[0] = upper[1];
  _max1 = sarea_priv->pipeA_h / nhits;
  _max2 = 1;
  slice[0] = _max1 > _max2 ? _max1 : _max2;
  _max1___0 = sarea_priv->pipeB_h / nhits;
  _max2___0 = 1;
  slice[1] = _max1___0 > _max2___0 ? _max1___0 : _max2___0;
  lower[0] = sarea_priv->pipeA_y + slice[0];
  lower[1] = sarea_priv->pipeB_y + slice[0];
  _spin_lock(& dev->drw_lock);
  i = 0;
  goto ldv_23340;
  ldv_23339: ;
  if (i == nhits) {
    lower[1] = sarea_priv->height;
    lower[0] = lower[1];
  } else {

  }
  hit = hits.next;
  goto ldv_23337;
  ldv_23336: 
  __mptr___1 = (struct list_head  const  *)hit;
  swap_hit = (drm_i915_vbl_swap_t *)__mptr___1;
  drw = drm_get_drawable_info(dev, swap_hit->drw_id);
  if ((unsigned long )drw == (unsigned long )((struct drm_drawable_info *)0)) {
    goto ldv_23323;
  } else {

  }
  rect = drw->rects;
  plane = (int )swap_hit->plane;
  top = (unsigned short )upper[plane];
  bottom = (unsigned short )lower[plane];
  num_rects = (int )drw->num_rects;
  goto ldv_23334;
  ldv_23333: 
  _max1___1 = rect->y1;
  _max2___1 = top;
  y1 = (int )_max1___1 > (int )_max2___1 ? (int )_max1___1 : (int )_max2___1;
  _min1 = rect->y2;
  _min2 = bottom;
  y2 = (int )_min1 < (int )_min2 ? (int )_min1 : (int )_min2;
  if (y1 >= y2) {
    goto ldv_23332;
  } else {

  }
  if (dev_priv->ring.space <= 31) {
    i915_wait_ring(dev, 32, "i915_vblank_tasklet");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = cmd;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = ropcpp | dst_pitch;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )((y1 << 16) | (int )rect->x1);
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )((y2 << 16) | (int )rect->x2);
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )sarea_priv->front_offset;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )((y1 << 16) | (int )rect->x1);
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = src_pitch;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = (unsigned int volatile   )sarea_priv->back_offset;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  ldv_23332: 
  rect = rect + 1;
  ldv_23334: 
  tmp___2 = num_rects;
  num_rects = num_rects - 1;
  if (tmp___2 != 0) {
    goto ldv_23333;
  } else {

  }

  ldv_23323: 
  hit = hit->next;
  ldv_23337: 
  __builtin_prefetch((void const   *)hit->next);
  if ((unsigned long )(& hits) != (unsigned long )hit) {
    goto ldv_23336;
  } else {

  }
  upper[0] = lower[0];
  lower[0] = lower[0] + slice[0];
  upper[1] = lower[1];
  lower[1] = lower[1] + slice[1];
  ldv_23340: 
  tmp___3 = i;
  i = i + 1;
  if (tmp___3 < nhits) {
    goto ldv_23339;
  } else {

  }
  ldv_spin_unlock();
  hit = hits.next;
  tmp = hit->next;
  goto ldv_23346;
  ldv_23345: 
  __mptr___2 = (struct list_head  const  *)hit;
  swap_hit___0 = (drm_i915_vbl_swap_t *)__mptr___2;
  list_del(hit);
  drm_free((void *)swap_hit___0, 32UL, 2);
  hit = tmp;
  tmp = hit->next;
  ldv_23346: ;
  if ((unsigned long )(& hits) != (unsigned long )hit) {
    goto ldv_23345;
  } else {

  }

  return;
}
}
u32 i915_get_vblank_counter(struct drm_device *dev , int plane ) 
{ 
  drm_i915_private_t *dev_priv ;
  unsigned long high_frame ;
  unsigned long low_frame ;
  u32 high1 ;
  u32 high2 ;
  u32 low ;
  u32 count ;
  int pipe ;
  int tmp ;
  unsigned int tmp___0 ;
  unsigned int tmp___1 ;
  unsigned int tmp___2 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  pipe = i915_get_pipe(dev, plane);
  high_frame = pipe != 0 ? 462912UL : 458816UL;
  low_frame = pipe != 0 ? 462916UL : 458820UL;
  tmp = i915_pipe_enabled(dev, pipe);
  if (tmp == 0) {
    printk("<3>[drm:%s] *ERROR* trying to get vblank count for disabled pipe %d\n",
           "i915_get_vblank_counter", pipe);
    return (0U);
  } else {

  }
  ldv_23361: 
  tmp___0 = readl((void const volatile   *)(dev_priv->regs + high_frame));
  high1 = tmp___0 & 65535U;
  tmp___1 = readl((void const volatile   *)(dev_priv->regs + low_frame));
  low = tmp___1 >> 24;
  tmp___2 = readl((void const volatile   *)(dev_priv->regs + high_frame));
  high2 = tmp___2 & 65535U;
  if (high1 != high2) {
    goto ldv_23361;
  } else {

  }
  count = (high1 << 8) | low;
  return (count);
}
}
void i915_gem_vblank_work_handler(struct work_struct *work ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_device *dev ;
  struct work_struct  const  *__mptr ;

  {
  __mptr = (struct work_struct  const  *)work;
  dev_priv = (drm_i915_private_t *)__mptr + 0xffffffffffffefd0UL;
  dev = dev_priv->dev;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  i915_vblank_tasklet(dev);
  mutex_unlock(& dev->struct_mutex);
  return;
}
}
irqreturn_t i915_driver_irq_handler(int irq , void *arg ) 
{ 
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  u32 iir ;
  u32 pipea_stats ;
  u32 pipeb_stats ;
  int vblank ;
  int tmp ;
  int tmp___0 ;

  {
  dev = (struct drm_device *)arg;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  vblank = 0;
  if ((unsigned int )*((unsigned char *)dev->pdev + 1808UL) != 0U) {
    writel(4294967295U, (void volatile   *)dev_priv->regs + 8360U);
  } else {

  }
  iir = readl((void const volatile   *)dev_priv->regs + 8356U);
  if (iir == 0U) {
    if ((unsigned int )*((unsigned char *)dev->pdev + 1808UL) != 0U) {
      writel(dev_priv->irq_mask_reg, (void volatile   *)dev_priv->regs + 8360U);
      readl((void const volatile   *)dev_priv->regs + 8360U);
    } else {

    }
    return (0);
  } else {

  }
  if ((iir & 64U) != 0U) {
    pipea_stats = readl((void const volatile   *)dev_priv->regs + 458788U);
    if ((dev_priv->vblank_pipe & 1) == 0) {
      pipea_stats = pipea_stats & 4294574079U;
    } else
    if (((unsigned long )pipea_stats & 6UL) != 0UL) {
      vblank = vblank + 1;
      tmp = i915_get_plane(dev, 0);
      drm_handle_vblank(dev, tmp);
    } else {

    }
    writel(pipea_stats, (void volatile   *)dev_priv->regs + 458788U);
  } else {

  }
  if ((iir & 16U) != 0U) {
    pipeb_stats = readl((void const volatile   *)dev_priv->regs + 462884U);
    writel(pipeb_stats, (void volatile   *)dev_priv->regs + 462884U);
    if ((dev_priv->vblank_pipe & 2) == 0) {
      pipeb_stats = pipeb_stats & 4294574079U;
    } else
    if (((unsigned long )pipeb_stats & 6UL) != 0UL) {
      vblank = vblank + 1;
      tmp___0 = i915_get_plane(dev, 1);
      drm_handle_vblank(dev, tmp___0);
    } else {

    }
    if (((unsigned long )pipeb_stats & 64UL) != 0UL) {
      opregion_asle_intr(dev);
    } else {

    }
    writel(pipeb_stats, (void volatile   *)dev_priv->regs + 462884U);
  } else {

  }
  writel(iir, (void volatile   *)dev_priv->regs + 8356U);
  if ((unsigned int )*((unsigned char *)dev->pdev + 1808UL) != 0U) {
    writel(dev_priv->irq_mask_reg, (void volatile   *)dev_priv->regs + 8360U);
  } else {

  }
  readl((void const volatile   *)dev_priv->regs + 8356U);
  if ((unsigned long )dev_priv->sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    (dev_priv->sarea_priv)->last_dispatch = (int )*((u32 volatile   *)dev_priv->hw_status_page + 5UL);
  } else {

  }
  if ((iir & 2U) != 0U) {
    dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
    __wake_up(& dev_priv->irq_queue, 1U, 1, 0);
  } else {

  }
  if ((int )iir & 1) {
    opregion_asle_intr(dev);
  } else {

  }
  if (vblank != 0 && dev_priv->swaps_pending != 0U) {
    if ((unsigned long )dev_priv->ring.ring_obj == (unsigned long )((struct drm_gem_object *)0)) {
      drm_locked_tasklet(dev, & i915_vblank_tasklet);
    } else {
      schedule_work(& dev_priv->mm.vblank_work);
    }
  } else {

  }
  return (1);
}
}
static int i915_emit_irq(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  i915_kernel_lost_context(dev);
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] \n", "i915_emit_irq");
  } else {

  }
  dev_priv->counter = dev_priv->counter + (uint32_t )1;
  if ((int )dev_priv->counter < 0) {
    dev_priv->counter = 1U;
  } else {

  }
  if ((unsigned long )dev_priv->sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    (dev_priv->sarea_priv)->last_enqueue = (int )dev_priv->counter;
  } else {

  }
  if (dev_priv->ring.space <= 23) {
    i915_wait_ring(dev, 24, "i915_emit_irq");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 276824065U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 20U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = dev_priv->counter;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 16777216U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  return ((int )dev_priv->counter);
}
}
void i915_user_irq_get(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  _spin_lock(& dev_priv->user_irq_lock);
  if (dev->irq_enabled != 0) {
    dev_priv->user_irq_refcount = dev_priv->user_irq_refcount + 1;
    if (dev_priv->user_irq_refcount == 1) {
      i915_enable_irq(dev_priv, 2U);
    } else {

    }
  } else {

  }
  _spin_unlock(& dev_priv->user_irq_lock);
  return;
}
}
void i915_user_irq_put(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  long tmp ;
  long tmp___0 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  _spin_lock(& dev_priv->user_irq_lock);
  tmp = ldv__builtin_expect(dev->irq_enabled != 0, 0L);
  if (tmp != 0L) {
    tmp___0 = ldv__builtin_expect(dev_priv->user_irq_refcount <= 0, 0L);
    if (tmp___0 != 0L) {
      __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_irq.c.prepared"),
                           "i" (508), "i" (24UL));
      ldv_23397: ;
      goto ldv_23397;
    } else {

    }
  } else {

  }
  if (dev->irq_enabled != 0) {
    dev_priv->user_irq_refcount = dev_priv->user_irq_refcount - 1;
    if (dev_priv->user_irq_refcount == 0) {
      i915_disable_irq(dev_priv, 2U);
    } else {

    }
  } else {

  }
  _spin_unlock(& dev_priv->user_irq_lock);
  return;
}
}
static int i915_wait_irq(struct drm_device *dev , int irq_nr ) 
{ 
  drm_i915_private_t *dev_priv ;
  int ret ;
  wait_queue_t entry ;
  struct task_struct *tmp ;
  unsigned long end ;
  struct task_struct *tmp___0 ;
  struct task_struct *tmp___1 ;
  int tmp___2 ;
  struct task_struct *tmp___3 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  ret = 0;
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] irq_nr=%d breadcrumb=%d\n", "i915_wait_irq", irq_nr, *((u32 volatile   *)dev_priv->hw_status_page + 5UL));
  } else {

  }
  if ((unsigned int )*((u32 volatile   *)dev_priv->hw_status_page + 5UL) >= (unsigned int )irq_nr) {
    if ((unsigned long )dev_priv->sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
      (dev_priv->sarea_priv)->last_dispatch = (int )*((u32 volatile   *)dev_priv->hw_status_page + 5UL);
    } else {

    }
    return (0);
  } else {

  }
  if ((unsigned long )dev_priv->sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    (dev_priv->sarea_priv)->perf_boxes = (dev_priv->sarea_priv)->perf_boxes | 4;
  } else {

  }
  i915_user_irq_get(dev);
  tmp = get_current();
  entry.flags = 0U;
  entry.private = (void *)tmp;
  entry.func = & default_wake_function;
  entry.task_list.next = 0;
  entry.task_list.prev = 0;
  end = (unsigned long )jiffies + 750UL;
  add_wait_queue(& dev_priv->irq_queue, & entry);
  ldv_23414: 
  tmp___0 = get_current();
  tmp___0->state = 1L;
  if ((unsigned int )*((u32 volatile   *)dev_priv->hw_status_page + 5UL) >= (unsigned int )irq_nr) {
    goto ldv_23407;
  } else {

  }
  if ((1 != 0 && 1 != 0) && (long )jiffies - (long )end >= 0L) {
    ret = -16;
    goto ldv_23407;
  } else {

  }
  schedule_timeout(2L);
  tmp___1 = get_current();
  tmp___2 = signal_pending(tmp___1);
  if (tmp___2 != 0) {
    ret = -4;
    goto ldv_23407;
  } else {

  }
  goto ldv_23414;
  ldv_23407: 
  tmp___3 = get_current();
  tmp___3->state = 0L;
  remove_wait_queue(& dev_priv->irq_queue, & entry);
  i915_user_irq_put(dev);
  if (ret == -16) {
    printk("<3>[drm:%s] *ERROR* EBUSY -- rec: %d emitted: %d\n", "i915_wait_irq",
           *((u32 volatile   *)dev_priv->hw_status_page + 5UL), (int )dev_priv->counter);
  } else {

  }
  if ((unsigned long )dev_priv->sarea_priv != (unsigned long )((drm_i915_sarea_t *)0)) {
    (dev_priv->sarea_priv)->last_dispatch = (int )*((u32 volatile   *)dev_priv->hw_status_page + 5UL);
  } else {

  }
  return (ret);
}
}
int i915_irq_emit(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_irq_emit_t *emit ;
  int result ;
  unsigned long tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  emit = (drm_i915_irq_emit_t *)data;
  if ((unsigned long )((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == (unsigned long )((struct drm_gem_object *)0)) {
    if ((int )(dev->lock.hw_lock)->lock >= 0 || (unsigned long )dev->lock.file_priv != (unsigned long )file_priv) {
      printk("<3>[drm:%s] *ERROR* %s called without lock held, held  %d owner %p %p\n",
             "i915_irq_emit", "i915_irq_emit", (unsigned int )(dev->lock.hw_lock)->lock & 2147483648U,
             dev->lock.file_priv, file_priv);
      return (-22);
    } else {

    }
  } else {

  }
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_irq_emit");
    return (-22);
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  result = i915_emit_irq(dev);
  mutex_unlock(& dev->struct_mutex);
  tmp = copy_to_user((void *)emit->irq_seq, (void const   *)(& result), 4U);
  if (tmp != 0UL) {
    printk("<3>[drm:%s] *ERROR* copy_to_user\n", "i915_irq_emit");
    return (-14);
  } else {

  }
  return (0);
}
}
int i915_irq_wait(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_irq_wait_t *irqwait ;
  int tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  irqwait = (drm_i915_irq_wait_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_irq_wait");
    return (-22);
  } else {

  }
  tmp = i915_wait_irq(dev, irqwait->irq_seq);
  return (tmp);
}
}
int i915_enable_vblank(struct drm_device *dev , int plane ) 
{ 
  drm_i915_private_t *dev_priv ;
  int pipe ;
  int tmp ;
  u32 pipestat_reg ;
  u32 pipestat ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  tmp = i915_get_pipe(dev, plane);
  pipe = tmp;
  pipestat_reg = 0U;
  switch (pipe) {
  case 0: 
  pipestat_reg = 458788U;
  i915_enable_irq(dev_priv, 64U);
  goto ldv_23441;
  case 1: 
  pipestat_reg = 462884U;
  i915_enable_irq(dev_priv, 16U);
  goto ldv_23441;
  default: 
  printk("<3>[drm:%s] *ERROR* tried to enable vblank on non-existent pipe %d\n", "i915_enable_vblank",
         pipe);
  goto ldv_23441;
  }
  ldv_23441: ;
  if (pipestat_reg != 0U) {
    pipestat = readl((void const volatile   *)dev_priv->regs + (unsigned long )pipestat_reg);
    if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
      pipestat = pipestat | 262144U;
    } else {
      pipestat = pipestat | 131072U;
    }
    pipestat = pipestat | 6U;
    writel(pipestat, (void volatile   *)dev_priv->regs + (unsigned long )pipestat_reg);
  } else {

  }
  return (0);
}
}
void i915_disable_vblank(struct drm_device *dev , int plane ) 
{ 
  drm_i915_private_t *dev_priv ;
  int pipe ;
  int tmp ;
  u32 pipestat_reg ;
  u32 pipestat ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  tmp = i915_get_pipe(dev, plane);
  pipe = tmp;
  pipestat_reg = 0U;
  switch (pipe) {
  case 0: 
  pipestat_reg = 458788U;
  i915_disable_irq(dev_priv, 64U);
  goto ldv_23454;
  case 1: 
  pipestat_reg = 462884U;
  i915_disable_irq(dev_priv, 16U);
  goto ldv_23454;
  default: 
  printk("<3>[drm:%s] *ERROR* tried to disable vblank on non-existent pipe %d\n",
         "i915_disable_vblank", pipe);
  goto ldv_23454;
  }
  ldv_23454: ;
  if (pipestat_reg != 0U) {
    pipestat = readl((void const volatile   *)dev_priv->regs + (unsigned long )pipestat_reg);
    pipestat = pipestat & 4294574079U;
    pipestat = pipestat | 6U;
    writel(pipestat, (void volatile   *)dev_priv->regs + (unsigned long )pipestat_reg);
  } else {

  }
  return;
}
}
int i915_vblank_pipe_set(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_vblank_pipe_set");
    return (-22);
  } else {

  }
  return (0);
}
}
int i915_vblank_pipe_get(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_vblank_pipe_t *pipe ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  pipe = (drm_i915_vblank_pipe_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_vblank_pipe_get");
    return (-22);
  } else {

  }
  pipe->pipe = 3;
  return (0);
}
}
int i915_vblank_swap(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_vblank_swap_t *swap ;
  drm_i915_vbl_swap_t *vbl_swap ;
  unsigned int pipe ;
  unsigned int seqtype ;
  unsigned int curseq ;
  unsigned int plane ;
  struct list_head *list ;
  int ret ;
  int tmp ;
  struct drm_drawable_info *tmp___0 ;
  struct list_head  const  *__mptr ;
  void *tmp___1 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  swap = (drm_i915_vblank_swap_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0) || (unsigned long )dev_priv->sarea_priv == (unsigned long )((drm_i915_sarea_t *)0)) {
    printk("<3>[drm:%s] *ERROR* %s called with no initialization\n", "i915_vblank_swap",
           "i915_vblank_swap");
    return (-22);
  } else {

  }
  if ((dev_priv->sarea_priv)->rotation != 0) {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] Rotation not supported\n", "i915_vblank_swap");
    } else {

    }
    return (-22);
  } else {

  }
  if (((unsigned int )swap->seqtype & 3489660926U) != 0U) {
    printk("<3>[drm:%s] *ERROR* Invalid sequence type 0x%x\n", "i915_vblank_swap",
           (unsigned int )swap->seqtype);
    return (-22);
  } else {

  }
  plane = ((unsigned int )swap->seqtype & 536870912U) != 0U;
  tmp = i915_get_pipe(dev, (int )plane);
  pipe = (unsigned int )tmp;
  seqtype = (unsigned int )swap->seqtype & 1U;
  if (((dev_priv->vblank_pipe >> (int )pipe) & 1) == 0) {
    printk("<3>[drm:%s] *ERROR* Invalid pipe %d\n", "i915_vblank_swap", pipe);
    return (-22);
  } else {

  }
  ldv_spin_lock();
  tmp___0 = drm_get_drawable_info(dev, swap->drawable);
  if ((unsigned long )tmp___0 == (unsigned long )((struct drm_drawable_info *)0)) {
    ldv_spin_unlock();
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] Invalid drawable ID %d\n", "i915_vblank_swap", swap->drawable);
    } else {

    }
    return (-22);
  } else {

  }
  ldv_spin_unlock();
  ret = drm_vblank_get(dev, (int )pipe);
  if (ret != 0) {
    return (ret);
  } else {

  }
  curseq = drm_vblank_count(dev, (int )pipe);
  if (seqtype == 1U) {
    swap->sequence = swap->sequence + curseq;
  } else {

  }
  if (curseq - swap->sequence <= 8388608U) {
    if (((unsigned int )swap->seqtype & 268435456U) != 0U) {
      swap->sequence = curseq + 1U;
    } else {
      if (drm_debug != 0U) {
        printk("<7>[drm:%s] Missed target sequence\n", "i915_vblank_swap");
      } else {

      }
      drm_vblank_put(dev, (int )pipe);
      return (-22);
    }
  } else {

  }
  ldv_spin_lock();
  list = dev_priv->vbl_swaps.head.next;
  goto ldv_23492;
  ldv_23491: 
  __mptr = (struct list_head  const  *)list;
  vbl_swap = (drm_i915_vbl_swap_t *)__mptr;
  if ((vbl_swap->drw_id == swap->drawable && vbl_swap->plane == plane) && vbl_swap->sequence == swap->sequence) {
    ldv_spin_unlock();
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] Already scheduled\n", "i915_vblank_swap");
    } else {

    }
    return (0);
  } else {

  }
  list = list->next;
  ldv_23492: 
  __builtin_prefetch((void const   *)list->next);
  if ((unsigned long )(& dev_priv->vbl_swaps.head) != (unsigned long )list) {
    goto ldv_23491;
  } else {

  }
  ldv_spin_unlock();
  if (dev_priv->swaps_pending > 99U) {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] Too many swaps queued\n", "i915_vblank_swap");
    } else {

    }
    drm_vblank_put(dev, (int )pipe);
    return (-16);
  } else {

  }
  tmp___1 = drm_calloc(1UL, 32UL, 2);
  vbl_swap = (drm_i915_vbl_swap_t *)tmp___1;
  if ((unsigned long )vbl_swap == (unsigned long )((drm_i915_vbl_swap_t *)0)) {
    printk("<3>[drm:%s] *ERROR* Failed to allocate memory to queue swap\n", "i915_vblank_swap");
    drm_vblank_put(dev, (int )pipe);
    return (-12);
  } else {

  }
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] \n", "i915_vblank_swap");
  } else {

  }
  vbl_swap->drw_id = swap->drawable;
  vbl_swap->plane = plane;
  vbl_swap->sequence = swap->sequence;
  ldv_spin_lock();
  list_add_tail(& vbl_swap->head, & dev_priv->vbl_swaps.head);
  dev_priv->swaps_pending = dev_priv->swaps_pending + 1U;
  ldv_spin_unlock();
  return (0);
}
}
void i915_driver_irq_preinstall(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  writel(61438U, (void volatile   *)dev_priv->regs + 8344U);
  writel(4294967295U, (void volatile   *)dev_priv->regs + 8360U);
  writel(0U, (void volatile   *)dev_priv->regs + 8352U);
  return;
}
}
int i915_driver_irq_postinstall(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  int ret ;
  int num_pipes ;
  struct lock_class_key __key ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  num_pipes = 2;
  __spin_lock_init(& dev_priv->swaps_lock, "&dev_priv->swaps_lock", & __key);
  INIT_LIST_HEAD(& dev_priv->vbl_swaps.head);
  dev_priv->swaps_pending = 0U;
  dev_priv->irq_mask_reg = 4294967295U;
  ret = drm_vblank_init(dev, num_pipes);
  if (ret != 0) {
    return (ret);
  } else {

  }
  dev_priv->vblank_pipe = 3;
  dev_priv->irq_mask_reg = dev_priv->irq_mask_reg & 4294967167U;
  dev_priv->irq_mask_reg = dev_priv->irq_mask_reg & 4294967263U;
  dev->max_vblank_count = 16777215U;
  dev_priv->irq_mask_reg = dev_priv->irq_mask_reg & 83U;
  writel(dev_priv->irq_mask_reg, (void volatile   *)dev_priv->regs + 8360U);
  writel(83U, (void volatile   *)dev_priv->regs + 8352U);
  readl((void const volatile   *)dev_priv->regs + 8352U);
  opregion_enable_asle(dev);
  init_waitqueue_head(& dev_priv->irq_queue);
  return (0);
}
}
void i915_driver_irq_uninstall(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  u32 temp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    return;
  } else {

  }
  dev_priv->vblank_pipe = 0;
  writel(4294967295U, (void volatile   *)dev_priv->regs + 8344U);
  writel(4294967295U, (void volatile   *)dev_priv->regs + 8360U);
  writel(0U, (void volatile   *)dev_priv->regs + 8352U);
  temp = readl((void const volatile   *)dev_priv->regs + 458788U);
  writel(temp, (void volatile   *)dev_priv->regs + 458788U);
  temp = readl((void const volatile   *)dev_priv->regs + 462884U);
  writel(temp, (void volatile   *)dev_priv->regs + 462884U);
  temp = readl((void const volatile   *)dev_priv->regs + 8356U);
  writel(temp, (void volatile   *)dev_priv->regs + 8356U);
  return;
}
}
unsigned long ldv___get_free_pages_34(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_36(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
__inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) 
{ 


  {
  ldv_check_alloc_flags(flags);
  ldv_kcalloc_38(n, size, flags);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_40(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_44(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
struct page *ldv_alloc_page_vma_60(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_50(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_52(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_56(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
__inline static void *kmalloc(size_t size , gfp_t flags ) ;
__inline static void *drm_alloc___0(size_t size , int area ) 
{ 
  void *tmp ;

  {
  tmp = kmalloc(size, 208U);
  return (tmp);
}
}
static void mark_block(struct drm_device *dev , struct mem_block *p , int in_use ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_sarea_t *sarea_priv ;
  struct drm_tex_region *list ;
  unsigned int shift ;
  unsigned int nr ;
  unsigned int start ;
  unsigned int end ;
  unsigned int i ;
  int age ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  sarea_priv = dev_priv->sarea_priv;
  shift = (unsigned int )dev_priv->tex_lru_log_granularity;
  nr = 255U;
  start = (unsigned int )(p->start >> (int )shift);
  end = (unsigned int )(((p->start + p->size) + -1) >> (int )shift);
  sarea_priv->texAge = sarea_priv->texAge + 1;
  age = sarea_priv->texAge;
  list = (struct drm_tex_region *)(& sarea_priv->texList);
  i = start;
  goto ldv_23257;
  ldv_23256: 
  (list + (unsigned long )i)->in_use = (unsigned char )in_use;
  (list + (unsigned long )i)->age = (unsigned int )age;
  (list + (unsigned long )(list + (unsigned long )i)->next)->prev = (list + (unsigned long )i)->prev;
  (list + (unsigned long )(list + (unsigned long )i)->prev)->next = (list + (unsigned long )i)->next;
  (list + (unsigned long )i)->prev = (unsigned char )nr;
  (list + (unsigned long )i)->next = (list + (unsigned long )nr)->next;
  (list + (unsigned long )(list + (unsigned long )nr)->next)->prev = (unsigned char )i;
  (list + (unsigned long )nr)->next = (unsigned char )i;
  i = i + 1U;
  ldv_23257: ;
  if (i <= end) {
    goto ldv_23256;
  } else {

  }

  return;
}
}
static struct mem_block *split_block(struct mem_block *p , int start , int size ,
                                     struct drm_file *file_priv ) 
{ 
  struct mem_block *newblock ;
  void *tmp ;
  struct mem_block *newblock___0 ;
  void *tmp___0 ;

  {
  if (p->start < start) {
    tmp = drm_alloc___0(32UL, 14);
    newblock = (struct mem_block *)tmp;
    if ((unsigned long )newblock == (unsigned long )((struct mem_block *)0)) {
      goto out;
    } else {

    }
    newblock->start = start;
    newblock->size = p->size + (p->start - start);
    newblock->file_priv = 0;
    newblock->next = p->next;
    newblock->prev = p;
    (p->next)->prev = newblock;
    p->next = newblock;
    p->size = p->size - newblock->size;
    p = newblock;
  } else {

  }
  if (p->size > size) {
    tmp___0 = drm_alloc___0(32UL, 14);
    newblock___0 = (struct mem_block *)tmp___0;
    if ((unsigned long )newblock___0 == (unsigned long )((struct mem_block *)0)) {
      goto out;
    } else {

    }
    newblock___0->start = start + size;
    newblock___0->size = p->size - size;
    newblock___0->file_priv = 0;
    newblock___0->next = p->next;
    newblock___0->prev = p;
    (p->next)->prev = newblock___0;
    p->next = newblock___0;
    p->size = size;
  } else {

  }
  out: 
  p->file_priv = file_priv;
  return (p);
}
}
static struct mem_block *alloc_block(struct mem_block *heap , int size , int align2 ,
                                     struct drm_file *file_priv ) 
{ 
  struct mem_block *p ;
  int mask ;
  int start ;
  struct mem_block *tmp ;

  {
  mask = (1 << align2) + -1;
  p = heap->next;
  goto ldv_23278;
  ldv_23277: 
  start = (p->start + mask) & ~ mask;
  if ((unsigned long )p->file_priv == (unsigned long )((struct drm_file *)0) && start + size <= p->start + p->size) {
    tmp = split_block(p, start, size, file_priv);
    return (tmp);
  } else {

  }
  p = p->next;
  ldv_23278: ;
  if ((unsigned long )p != (unsigned long )heap) {
    goto ldv_23277;
  } else {

  }

  return (0);
}
}
static struct mem_block *find_block(struct mem_block *heap , int start ) 
{ 
  struct mem_block *p ;

  {
  p = heap->next;
  goto ldv_23286;
  ldv_23285: ;
  if (p->start == start) {
    return (p);
  } else {

  }
  p = p->next;
  ldv_23286: ;
  if ((unsigned long )p != (unsigned long )heap) {
    goto ldv_23285;
  } else {

  }

  return (0);
}
}
static void free_block(struct mem_block *p ) 
{ 
  struct mem_block *q ;
  struct mem_block *q___0 ;

  {
  p->file_priv = 0;
  if ((unsigned long )(p->next)->file_priv == (unsigned long )((struct drm_file *)0)) {
    q = p->next;
    p->size = p->size + q->size;
    p->next = q->next;
    (p->next)->prev = p;
    drm_free((void *)q, 32UL, 14);
  } else {

  }
  if ((unsigned long )(p->prev)->file_priv == (unsigned long )((struct drm_file *)0)) {
    q___0 = p->prev;
    q___0->size = q___0->size + p->size;
    q___0->next = p->next;
    (q___0->next)->prev = q___0;
    drm_free((void *)p, 32UL, 14);
  } else {

  }
  return;
}
}
static int init_heap(struct mem_block **heap , int start , int size ) 
{ 
  struct mem_block *blocks ;
  void *tmp ;
  void *tmp___0 ;
  struct mem_block *tmp___1 ;
  struct mem_block *tmp___2 ;

  {
  tmp = drm_alloc___0(32UL, 14);
  blocks = (struct mem_block *)tmp;
  if ((unsigned long )blocks == (unsigned long )((struct mem_block *)0)) {
    return (-12);
  } else {

  }
  tmp___0 = drm_alloc___0(32UL, 14);
  *heap = (struct mem_block *)tmp___0;
  if ((unsigned long )*heap == (unsigned long )((struct mem_block *)0)) {
    drm_free((void *)blocks, 32UL, 14);
    return (-12);
  } else {

  }
  blocks->start = start;
  blocks->size = size;
  blocks->file_priv = 0;
  tmp___1 = *heap;
  blocks->prev = tmp___1;
  blocks->next = tmp___1;
  memset((void *)*heap, 0, 32UL);
  (*heap)->file_priv = 0xffffffffffffffffUL;
  tmp___2 = blocks;
  (*heap)->prev = tmp___2;
  (*heap)->next = tmp___2;
  return (0);
}
}
void i915_mem_release(struct drm_device *dev , struct drm_file *file_priv , struct mem_block *heap ) 
{ 
  struct mem_block *p ;
  struct mem_block *q ;

  {
  if ((unsigned long )heap == (unsigned long )((struct mem_block *)0) || (unsigned long )heap->next == (unsigned long )((struct mem_block *)0)) {
    return;
  } else {

  }
  p = heap->next;
  goto ldv_23306;
  ldv_23305: ;
  if ((unsigned long )p->file_priv == (unsigned long )file_priv) {
    p->file_priv = 0;
    mark_block(dev, p, 0);
  } else {

  }
  p = p->next;
  ldv_23306: ;
  if ((unsigned long )p != (unsigned long )heap) {
    goto ldv_23305;
  } else {

  }
  p = heap->next;
  goto ldv_23313;
  ldv_23312: ;
  goto ldv_23310;
  ldv_23309: 
  q = p->next;
  p->size = p->size + q->size;
  p->next = q->next;
  (p->next)->prev = p;
  drm_free((void *)q, 32UL, 14);
  ldv_23310: ;
  if ((unsigned long )p->file_priv == (unsigned long )((struct drm_file *)0) && (unsigned long )(p->next)->file_priv == (unsigned long )((struct drm_file *)0)) {
    goto ldv_23309;
  } else {

  }
  p = p->next;
  ldv_23313: ;
  if ((unsigned long )p != (unsigned long )heap) {
    goto ldv_23312;
  } else {

  }

  return;
}
}
void i915_mem_takedown(struct mem_block **heap ) 
{ 
  struct mem_block *p ;
  struct mem_block *q ;

  {
  if ((unsigned long )*heap == (unsigned long )((struct mem_block *)0)) {
    return;
  } else {

  }
  p = (*heap)->next;
  goto ldv_23321;
  ldv_23320: 
  q = p;
  p = p->next;
  drm_free((void *)q, 32UL, 14);
  ldv_23321: ;
  if ((unsigned long )*heap != (unsigned long )p) {
    goto ldv_23320;
  } else {

  }
  drm_free((void *)*heap, 32UL, 14);
  *heap = 0;
  return;
}
}
static struct mem_block **get_heap(drm_i915_private_t *dev_priv , int region ) 
{ 


  {
  switch (region) {
  case 1: ;
  return (& dev_priv->agp_heap);
  default: ;
  return (0);
  }
}
}
int i915_mem_alloc(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_mem_alloc_t *alloc ;
  struct mem_block *block ;
  struct mem_block **heap ;
  unsigned long tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  alloc = (drm_i915_mem_alloc_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_mem_alloc");
    return (-22);
  } else {

  }
  heap = get_heap(dev_priv, alloc->region);
  if ((unsigned long )heap == (unsigned long )((struct mem_block **)0) || (unsigned long )*heap == (unsigned long )((struct mem_block *)0)) {
    return (-14);
  } else {

  }
  if (alloc->alignment <= 11) {
    alloc->alignment = 12;
  } else {

  }
  block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
  if ((unsigned long )block == (unsigned long )((struct mem_block *)0)) {
    return (-12);
  } else {

  }
  mark_block(dev, block, 1);
  tmp = copy_to_user((void *)alloc->region_offset, (void const   *)(& block->start),
                     4U);
  if (tmp != 0UL) {
    printk("<3>[drm:%s] *ERROR* copy_to_user\n", "i915_mem_alloc");
    return (-14);
  } else {

  }
  return (0);
}
}
int i915_mem_free(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_mem_free_t *memfree ;
  struct mem_block *block ;
  struct mem_block **heap ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  memfree = (drm_i915_mem_free_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_mem_free");
    return (-22);
  } else {

  }
  heap = get_heap(dev_priv, memfree->region);
  if ((unsigned long )heap == (unsigned long )((struct mem_block **)0) || (unsigned long )*heap == (unsigned long )((struct mem_block *)0)) {
    return (-14);
  } else {

  }
  block = find_block(*heap, memfree->region_offset);
  if ((unsigned long )block == (unsigned long )((struct mem_block *)0)) {
    return (-14);
  } else {

  }
  if ((unsigned long )block->file_priv != (unsigned long )file_priv) {
    return (-1);
  } else {

  }
  mark_block(dev, block, 0);
  free_block(block);
  return (0);
}
}
int i915_mem_init_heap(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_mem_init_heap_t *initheap ;
  struct mem_block **heap ;
  int tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  initheap = (drm_i915_mem_init_heap_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_mem_init_heap");
    return (-22);
  } else {

  }
  heap = get_heap(dev_priv, initheap->region);
  if ((unsigned long )heap == (unsigned long )((struct mem_block **)0)) {
    return (-14);
  } else {

  }
  if ((unsigned long )*heap != (unsigned long )((struct mem_block *)0)) {
    printk("<3>[drm:%s] *ERROR* heap already initialized?", "i915_mem_init_heap");
    return (-14);
  } else {

  }
  tmp = init_heap(heap, initheap->start, initheap->size);
  return (tmp);
}
}
int i915_mem_destroy_heap(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  drm_i915_mem_destroy_heap_t *destroyheap ;
  struct mem_block **heap ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  destroyheap = (drm_i915_mem_destroy_heap_t *)data;
  if ((unsigned long )dev_priv == (unsigned long )((drm_i915_private_t *)0)) {
    printk("<3>[drm:%s] *ERROR* called with no initialization\n", "i915_mem_destroy_heap");
    return (-22);
  } else {

  }
  heap = get_heap(dev_priv, destroyheap->region);
  if ((unsigned long )heap == (unsigned long )((struct mem_block **)0)) {
    printk("<3>[drm:%s] *ERROR* get_heap failed", "i915_mem_destroy_heap");
    return (-14);
  } else {

  }
  if ((unsigned long )*heap == (unsigned long )((struct mem_block *)0)) {
    printk("<3>[drm:%s] *ERROR* heap not initialized?", "i915_mem_destroy_heap");
    return (-14);
  } else {

  }
  i915_mem_takedown(heap);
  return (0);
}
}
unsigned long ldv___get_free_pages_50(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_52(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_56(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_60(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
extern int memcmp(void const   * , void const   * , size_t  ) ;
struct page *ldv_alloc_page_vma_76(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_66(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_68(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_72(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
extern int register_acpi_notifier(struct notifier_block * ) ;
extern int unregister_acpi_notifier(struct notifier_block * ) ;
extern int pci_bus_read_config_dword(struct pci_bus * , unsigned int  , int  , u32 * ) ;
extern int pci_bus_write_config_dword(struct pci_bus * , unsigned int  , int  , u32  ) ;
__inline static int pci_read_config_dword(struct pci_dev *dev , int where , u32 *val ) 
{ 
  int tmp ;

  {
  tmp = pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
  return (tmp);
}
}
__inline static int pci_write_config_dword(struct pci_dev *dev , int where , u32 val ) 
{ 
  int tmp ;

  {
  tmp = pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
  return (tmp);
}
}
static u32 asle_set_backlight(struct drm_device *dev , u32 bclp ) 
{ 
  struct drm_i915_private *dev_priv ;
  struct opregion_asle *asle ;
  u32 blc_pwm_ctl ;
  u32 blc_pwm_ctl2 ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  asle = dev_priv->opregion.asle;
  if ((int )bclp >= 0) {
    return (8192U);
  } else {

  }
  bclp = bclp & 2147483647U;
  if (bclp > 255U) {
    return (8192U);
  } else {

  }
  blc_pwm_ctl = readl((void const volatile   *)dev_priv->regs + 397908U);
  blc_pwm_ctl = blc_pwm_ctl & 4294901760U;
  blc_pwm_ctl2 = readl((void const volatile   *)dev_priv->regs + 397904U);
  if ((blc_pwm_ctl2 & 1073741824U) != 0U) {
    pci_write_config_dword(dev->pdev, 244, bclp);
  } else {
    writel((bclp * 257U - 1U) | blc_pwm_ctl, (void volatile   *)dev_priv->regs + 397908U);
  }
  asle->cblv = (bclp * 100U) / 255U | 2147483648U;
  return (0U);
}
}
static u32 asle_set_als_illum(struct drm_device *dev , u32 alsi ) 
{ 


  {
  return (0U);
}
}
static u32 asle_set_pwm_freq(struct drm_device *dev , u32 pfmb ) 
{ 
  struct drm_i915_private *dev_priv ;
  u32 blc_pwm_ctl ;
  unsigned int tmp ;
  u32 pwm ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  if ((int )pfmb < 0) {
    tmp = readl((void const volatile   *)dev_priv->regs + 397908U);
    blc_pwm_ctl = tmp;
    pwm = pfmb & 2147483136U;
    blc_pwm_ctl = blc_pwm_ctl & 65535U;
    pwm = pwm >> 9;
  } else {

  }
  return (0U);
}
}
static u32 asle_set_pfit(struct drm_device *dev , u32 pfit ) 
{ 


  {
  if ((int )pfit >= 0) {
    return (32768U);
  } else {

  }
  return (0U);
}
}
void opregion_asle_intr(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;
  struct opregion_asle *asle ;
  u32 asle_stat ;
  u32 asle_req ;
  u32 tmp ;
  u32 tmp___0 ;
  u32 tmp___1 ;
  u32 tmp___2 ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  asle = dev_priv->opregion.asle;
  asle_stat = 0U;
  if ((unsigned long )asle == (unsigned long )((struct opregion_asle *)0)) {
    return;
  } else {

  }
  asle_req = asle->aslc & 15U;
  if (asle_req == 0U) {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] non asle set request??\n", "opregion_asle_intr");
    } else {

    }
    return;
  } else {

  }
  if ((int )asle_req & 1) {
    tmp = asle_set_als_illum(dev, asle->alsi);
    asle_stat = tmp | asle_stat;
  } else {

  }
  if ((asle_req & 2U) != 0U) {
    tmp___0 = asle_set_backlight(dev, asle->bclp);
    asle_stat = tmp___0 | asle_stat;
  } else {

  }
  if ((asle_req & 4U) != 0U) {
    tmp___1 = asle_set_pfit(dev, asle->pfit);
    asle_stat = tmp___1 | asle_stat;
  } else {

  }
  if ((asle_req & 8U) != 0U) {
    tmp___2 = asle_set_pwm_freq(dev, asle->pfmb);
    asle_stat = tmp___2 | asle_stat;
  } else {

  }
  asle->aslc = asle_stat;
  return;
}
}
void opregion_enable_asle(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;
  struct opregion_asle *asle ;
  u32 pipeb_stats ;
  unsigned int tmp ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  asle = dev_priv->opregion.asle;
  if ((unsigned long )asle != (unsigned long )((struct opregion_asle *)0)) {
    tmp = readl((void const volatile   *)dev_priv->regs + 462884U);
    pipeb_stats = tmp;
    if (((((dev->pci_device == 13687 || dev->pci_device == 13698) || dev->pci_device == 9618) || (dev->pci_device == 10146 || dev->pci_device == 10158)) || dev->pci_device == 10754) || dev->pci_device == 10818) {
      pipeb_stats = pipeb_stats | 4194304U;
      writel(pipeb_stats, (void volatile   *)dev_priv->regs + 462884U);
      i915_enable_irq(dev_priv, 17U);
    } else {
      i915_enable_irq(dev_priv, 1U);
    }
    asle->tche = 15U;
    asle->ardy = 1U;
  } else {

  }
  return;
}
}
static struct intel_opregion *system_opregion  ;
int intel_opregion_video_event(struct notifier_block *nb , unsigned long val , void *data ) 
{ 
  struct opregion_acpi *acpi ;

  {
  if ((unsigned long )system_opregion == (unsigned long )((struct intel_opregion *)0)) {
    return (0);
  } else {

  }
  acpi = system_opregion->acpi;
  acpi->csts = 0U;
  return (1);
}
}
static struct notifier_block intel_opregion_notifier  =    {& intel_opregion_video_event, 0, 0};
int intel_opregion_init(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;
  struct intel_opregion *opregion ;
  void *base ;
  u32 asls ;
  u32 mboxes ;
  int err ;
  int tmp ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  opregion = & dev_priv->opregion;
  err = 0;
  pci_read_config_dword(dev->pdev, 252, & asls);
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] graphic opregion physical addr: 0x%x\n", "intel_opregion_init",
           asls);
  } else {

  }
  if (asls == 0U) {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] ACPI OpRegion not supported!\n", "intel_opregion_init");
    } else {

    }
    return (-524);
  } else {

  }
  base = ioremap((resource_size_t )asls, 8192UL);
  if ((unsigned long )base == (unsigned long )((void *)0)) {
    return (-12);
  } else {

  }
  opregion->header = (struct opregion_header *)base;
  tmp = memcmp((void const   *)(& (opregion->header)->signature), (void const   *)"IntelGraphicsMem",
               16UL);
  if (tmp != 0) {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] opregion signature mismatch\n", "intel_opregion_init");
    } else {

    }
    err = -22;
    goto err_out;
  } else {

  }
  mboxes = (opregion->header)->mboxes;
  if ((int )mboxes & 1) {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] Public ACPI methods supported\n", "intel_opregion_init");
    } else {

    }
    opregion->acpi = (struct opregion_acpi *)base + 256U;
  } else {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] Public ACPI methods not supported\n", "intel_opregion_init");
    } else {

    }
    err = -524;
    goto err_out;
  }
  opregion->enabled = 1;
  if ((mboxes & 2U) != 0U) {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] SWSCI supported\n", "intel_opregion_init");
    } else {

    }
    opregion->swsci = (struct opregion_swsci *)base + 512U;
  } else {

  }
  if ((mboxes & 4U) != 0U) {
    if (drm_debug != 0U) {
      printk("<7>[drm:%s] ASLE supported\n", "intel_opregion_init");
    } else {

    }
    opregion->asle = (struct opregion_asle *)base + 768U;
  } else {

  }
  (opregion->acpi)->csts = 0U;
  (opregion->acpi)->drdy = 1U;
  system_opregion = opregion;
  register_acpi_notifier(& intel_opregion_notifier);
  return (0);
  err_out: 
  iounmap((void volatile   *)opregion->header);
  opregion->header = 0;
  return (err);
}
}
void intel_opregion_free(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;
  struct intel_opregion *opregion ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  opregion = & dev_priv->opregion;
  if (opregion->enabled == 0) {
    return;
  } else {

  }
  (opregion->acpi)->drdy = 0U;
  system_opregion = 0;
  unregister_acpi_notifier(& intel_opregion_notifier);
  iounmap((void volatile   *)opregion->header);
  opregion->header = 0;
  opregion->acpi = 0;
  opregion->swsci = 0;
  opregion->asle = 0;
  opregion->enabled = 0;
  return;
}
}
void ldv_main4_sequence_infinite_withcheck_stateful(void) 
{ 
  struct notifier_block *var_group1 ;
  unsigned long var_intel_opregion_video_event_6_p1 ;
  void *var_intel_opregion_video_event_6_p2 ;
  int tmp ;
  int tmp___0 ;

  {
  LDV_IN_INTERRUPT = 1;
  ldv_initialize();
  goto ldv_26737;
  ldv_26736: 
  tmp = nondet_int();
  switch (tmp) {
  case 0: 
  ldv_handler_precall();
  intel_opregion_video_event(var_group1, var_intel_opregion_video_event_6_p1, var_intel_opregion_video_event_6_p2);
  goto ldv_26734;
  default: ;
  goto ldv_26734;
  }
  ldv_26734: ;
  ldv_26737: 
  tmp___0 = nondet_int();
  if (tmp___0 != 0) {
    goto ldv_26736;
  } else {

  }


  ldv_check_final_state();
  return;
}
}
unsigned long ldv___get_free_pages_66(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_68(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_72(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_76(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
extern void __const_udelay(unsigned long  ) ;
struct page *ldv_alloc_page_vma_92(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_82(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_84(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_88(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
extern int pci_bus_read_config_byte(struct pci_bus * , unsigned int  , int  , u8 * ) ;
extern int pci_bus_write_config_byte(struct pci_bus * , unsigned int  , int  , u8  ) ;
__inline static int pci_read_config_byte(struct pci_dev *dev , int where , u8 *val ) 
{ 
  int tmp ;

  {
  tmp = pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
  return (tmp);
}
}
__inline static int pci_write_config_byte(struct pci_dev *dev , int where , u8 val ) 
{ 
  int tmp ;

  {
  tmp = pci_bus_write_config_byte(dev->bus, dev->devfn, where, (int )val);
  return (tmp);
}
}
__inline static unsigned char readb(void const volatile   *addr ) 
{ 
  unsigned char ret ;

  {
  __asm__  volatile   ("movb %1,%0": "=q" (ret): "m" (*((unsigned char volatile   *)addr)): "memory");
  return (ret);
}
}
__inline static void writeb(unsigned char val , void volatile   *addr ) 
{ 


  {
  __asm__  volatile   ("movb %0,%1": : "q" (val), "m" (*((unsigned char volatile   *)addr)): "memory");
  return;
}
}
static bool i915_pipe_enabled___0(struct drm_device *dev , enum pipe pipe ) 
{ 
  struct drm_i915_private *dev_priv ;
  unsigned int tmp ;
  unsigned int tmp___0 ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  if ((unsigned int )pipe == 0U) {
    tmp = readl((void const volatile   *)dev_priv->regs + 24596U);
    return ((tmp & 2147483648U) != 0U);
  } else {
    tmp___0 = readl((void const volatile   *)dev_priv->regs + 24600U);
    return ((tmp___0 & 2147483648U) != 0U);
  }
}
}
static void i915_save_palette(struct drm_device *dev , enum pipe pipe ) 
{ 
  struct drm_i915_private *dev_priv ;
  unsigned long reg ;
  u32 *array ;
  int i ;
  bool tmp ;
  int tmp___0 ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  reg = (unsigned int )pipe == 0U ? 40960UL : 43008UL;
  tmp = i915_pipe_enabled___0(dev, pipe);
  if (tmp) {
    tmp___0 = 0;
  } else {
    tmp___0 = 1;
  }
  if (tmp___0) {
    return;
  } else {

  }
  if ((unsigned int )pipe == 0U) {
    array = (u32 *)(& dev_priv->save_palette_a);
  } else {
    array = (u32 *)(& dev_priv->save_palette_b);
  }
  i = 0;
  goto ldv_23256;
  ldv_23255: 
  *(array + (unsigned long )i) = readl((void const volatile   *)(dev_priv->regs + ((unsigned long )(i << 2) + reg)));
  i = i + 1;
  ldv_23256: ;
  if (i <= 255) {
    goto ldv_23255;
  } else {

  }

  return;
}
}
static void i915_restore_palette(struct drm_device *dev , enum pipe pipe ) 
{ 
  struct drm_i915_private *dev_priv ;
  unsigned long reg ;
  u32 *array ;
  int i ;
  bool tmp ;
  int tmp___0 ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  reg = (unsigned int )pipe == 0U ? 40960UL : 43008UL;
  tmp = i915_pipe_enabled___0(dev, pipe);
  if (tmp) {
    tmp___0 = 0;
  } else {
    tmp___0 = 1;
  }
  if (tmp___0) {
    return;
  } else {

  }
  if ((unsigned int )pipe == 0U) {
    array = (u32 *)(& dev_priv->save_palette_a);
  } else {
    array = (u32 *)(& dev_priv->save_palette_b);
  }
  i = 0;
  goto ldv_23267;
  ldv_23266: 
  writel(*(array + (unsigned long )i), (void volatile   *)(dev_priv->regs + ((unsigned long )(i << 2) + reg)));
  i = i + 1;
  ldv_23267: ;
  if (i <= 255) {
    goto ldv_23266;
  } else {

  }

  return;
}
}
static u8 i915_read_indexed(struct drm_device *dev , u16 index_port , u16 data_port ,
                            u8 reg ) 
{ 
  struct drm_i915_private *dev_priv ;
  unsigned char tmp ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  writeb((int )reg, (void volatile   *)dev_priv->regs + (unsigned long )index_port);
  tmp = readb((void const volatile   *)dev_priv->regs + (unsigned long )data_port);
  return (tmp);
}
}
static u8 i915_read_ar(struct drm_device *dev , u16 st01 , u8 reg , u16 palette_enable ) 
{ 
  struct drm_i915_private *dev_priv ;
  unsigned char tmp ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  readb((void const volatile   *)dev_priv->regs + (unsigned long )st01);
  writeb((int )((unsigned char )palette_enable) | (int )reg, (void volatile   *)dev_priv->regs + 960U);
  tmp = readb((void const volatile   *)dev_priv->regs + 961U);
  return (tmp);
}
}
static void i915_write_ar(struct drm_device *dev , u16 st01 , u8 reg , u8 val , u16 palette_enable ) 
{ 
  struct drm_i915_private *dev_priv ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  readb((void const volatile   *)dev_priv->regs + (unsigned long )st01);
  writeb((int )((unsigned char )palette_enable) | (int )reg, (void volatile   *)dev_priv->regs + 960U);
  writeb((int )val, (void volatile   *)dev_priv->regs + 960U);
  return;
}
}
static void i915_write_indexed(struct drm_device *dev , u16 index_port , u16 data_port ,
                               u8 reg , u8 val ) 
{ 
  struct drm_i915_private *dev_priv ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  writeb((int )reg, (void volatile   *)dev_priv->regs + (unsigned long )index_port);
  writeb((int )val, (void volatile   *)dev_priv->regs + (unsigned long )data_port);
  return;
}
}
static void i915_save_vga(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;
  int i ;
  u16 cr_index ;
  u16 cr_data ;
  u16 st01 ;
  u8 tmp ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  dev_priv->saveDACMASK = readb((void const volatile   *)dev_priv->regs + 966U);
  writeb(0, (void volatile   *)dev_priv->regs + 967U);
  i = 0;
  goto ldv_23308;
  ldv_23307: 
  dev_priv->saveDACDATA[i] = readb((void const volatile   *)dev_priv->regs + 969U);
  i = i + 1;
  ldv_23308: ;
  if (i <= 767) {
    goto ldv_23307;
  } else {

  }
  dev_priv->saveMSR = readb((void const volatile   *)dev_priv->regs + 972U);
  if ((int )dev_priv->saveMSR & 1) {
    cr_index = 980U;
    cr_data = 981U;
    st01 = 986U;
  } else {
    cr_index = 948U;
    cr_data = 949U;
    st01 = 954U;
  }
  tmp = i915_read_indexed(dev, (int )cr_index, (int )cr_data, 17);
  i915_write_indexed(dev, (int )cr_index, (int )cr_data, 17, (int )tmp & 127);
  i = 0;
  goto ldv_23311;
  ldv_23310: 
  dev_priv->saveCR[i] = i915_read_indexed(dev, (int )cr_index, (int )cr_data, (int )((u8 )i));
  i = i + 1;
  ldv_23311: ;
  if (i <= 36) {
    goto ldv_23310;
  } else {

  }
  dev_priv->saveCR[17] = (unsigned int )dev_priv->saveCR[17] & 127U;
  readb((void const volatile   *)dev_priv->regs + (unsigned long )st01);
  dev_priv->saveAR_INDEX = readb((void const volatile   *)dev_priv->regs + 960U);
  i = 0;
  goto ldv_23314;
  ldv_23313: 
  dev_priv->saveAR[i] = i915_read_ar(dev, (int )st01, (int )((u8 )i), 0);
  i = i + 1;
  ldv_23314: ;
  if (i <= 20) {
    goto ldv_23313;
  } else {

  }
  readb((void const volatile   *)dev_priv->regs + (unsigned long )st01);
  writeb((int )dev_priv->saveAR_INDEX, (void volatile   *)dev_priv->regs + 960U);
  readb((void const volatile   *)dev_priv->regs + (unsigned long )st01);
  i = 0;
  goto ldv_23317;
  ldv_23316: 
  dev_priv->saveGR[i] = i915_read_indexed(dev, 974, 975, (int )((u8 )i));
  i = i + 1;
  ldv_23317: ;
  if (i <= 8) {
    goto ldv_23316;
  } else {

  }
  dev_priv->saveGR[16] = i915_read_indexed(dev, 974, 975, 16);
  dev_priv->saveGR[17] = i915_read_indexed(dev, 974, 975, 17);
  dev_priv->saveGR[24] = i915_read_indexed(dev, 974, 975, 24);
  i = 0;
  goto ldv_23320;
  ldv_23319: 
  dev_priv->saveSR[i] = i915_read_indexed(dev, 964, 965, (int )((u8 )i));
  i = i + 1;
  ldv_23320: ;
  if (i <= 7) {
    goto ldv_23319;
  } else {

  }

  return;
}
}
static void i915_restore_vga(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;
  int i ;
  u16 cr_index ;
  u16 cr_data ;
  u16 st01 ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  writeb((int )dev_priv->saveMSR, (void volatile   *)dev_priv->regs + 962U);
  if ((int )dev_priv->saveMSR & 1) {
    cr_index = 980U;
    cr_data = 981U;
    st01 = 986U;
  } else {
    cr_index = 948U;
    cr_data = 949U;
    st01 = 954U;
  }
  i = 0;
  goto ldv_23331;
  ldv_23330: 
  i915_write_indexed(dev, 964, 965, (int )((u8 )i), (int )dev_priv->saveSR[i]);
  i = i + 1;
  ldv_23331: ;
  if (i <= 6) {
    goto ldv_23330;
  } else {

  }
  i915_write_indexed(dev, (int )cr_index, (int )cr_data, 17, (int )dev_priv->saveCR[17]);
  i = 0;
  goto ldv_23334;
  ldv_23333: 
  i915_write_indexed(dev, (int )cr_index, (int )cr_data, (int )((u8 )i), (int )dev_priv->saveCR[i]);
  i = i + 1;
  ldv_23334: ;
  if (i <= 36) {
    goto ldv_23333;
  } else {

  }
  i = 0;
  goto ldv_23337;
  ldv_23336: 
  i915_write_indexed(dev, 974, 975, (int )((u8 )i), (int )dev_priv->saveGR[i]);
  i = i + 1;
  ldv_23337: ;
  if (i <= 8) {
    goto ldv_23336;
  } else {

  }
  i915_write_indexed(dev, 974, 975, 16, (int )dev_priv->saveGR[16]);
  i915_write_indexed(dev, 974, 975, 17, (int )dev_priv->saveGR[17]);
  i915_write_indexed(dev, 974, 975, 24, (int )dev_priv->saveGR[24]);
  readb((void const volatile   *)dev_priv->regs + (unsigned long )st01);
  i = 0;
  goto ldv_23340;
  ldv_23339: 
  i915_write_ar(dev, (int )st01, (int )((u8 )i), (int )dev_priv->saveAR[i], 0);
  i = i + 1;
  ldv_23340: ;
  if (i <= 20) {
    goto ldv_23339;
  } else {

  }
  readb((void const volatile   *)dev_priv->regs + (unsigned long )st01);
  writeb((int )((unsigned int )dev_priv->saveAR_INDEX | 32U), (void volatile   *)dev_priv->regs + 960U);
  readb((void const volatile   *)dev_priv->regs + (unsigned long )st01);
  writeb((int )dev_priv->saveDACMASK, (void volatile   *)dev_priv->regs + 966U);
  writeb(0, (void volatile   *)dev_priv->regs + 968U);
  i = 0;
  goto ldv_23343;
  ldv_23342: 
  writeb((int )dev_priv->saveDACDATA[i], (void volatile   *)dev_priv->regs + 969U);
  i = i + 1;
  ldv_23343: ;
  if (i <= 767) {
    goto ldv_23342;
  } else {

  }

  return;
}
}
int i915_save_state(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;
  int i ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  pci_read_config_byte(dev->pdev, 244, & dev_priv->saveLBB);
  dev_priv->saveDSPARB = readl((void const volatile   *)dev_priv->regs + 458800U);
  dev_priv->savePIPEACONF = readl((void const volatile   *)dev_priv->regs + 458760U);
  dev_priv->savePIPEASRC = readl((void const volatile   *)dev_priv->regs + 393244U);
  dev_priv->saveFPA0 = readl((void const volatile   *)dev_priv->regs + 24640U);
  dev_priv->saveFPA1 = readl((void const volatile   *)dev_priv->regs + 24644U);
  dev_priv->saveDPLL_A = readl((void const volatile   *)dev_priv->regs + 24596U);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    dev_priv->saveDPLL_A_MD = readl((void const volatile   *)dev_priv->regs + 24604U);
  } else {

  }
  dev_priv->saveHTOTAL_A = readl((void const volatile   *)dev_priv->regs + 393216U);
  dev_priv->saveHBLANK_A = readl((void const volatile   *)dev_priv->regs + 393220U);
  dev_priv->saveHSYNC_A = readl((void const volatile   *)dev_priv->regs + 393224U);
  dev_priv->saveVTOTAL_A = readl((void const volatile   *)dev_priv->regs + 393228U);
  dev_priv->saveVBLANK_A = readl((void const volatile   *)dev_priv->regs + 393232U);
  dev_priv->saveVSYNC_A = readl((void const volatile   *)dev_priv->regs + 393236U);
  dev_priv->saveBCLRPAT_A = readl((void const volatile   *)dev_priv->regs + 393248U);
  dev_priv->saveDSPACNTR = readl((void const volatile   *)dev_priv->regs + 459136U);
  dev_priv->saveDSPASTRIDE = readl((void const volatile   *)dev_priv->regs + 459144U);
  dev_priv->saveDSPASIZE = readl((void const volatile   *)dev_priv->regs + 459152U);
  dev_priv->saveDSPAPOS = readl((void const volatile   *)dev_priv->regs + 459148U);
  dev_priv->saveDSPAADDR = readl((void const volatile   *)dev_priv->regs + 459140U);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    dev_priv->saveDSPASURF = readl((void const volatile   *)dev_priv->regs + 459164U);
    dev_priv->saveDSPATILEOFF = readl((void const volatile   *)dev_priv->regs + 459172U);
  } else {

  }
  i915_save_palette(dev, PIPE_A);
  dev_priv->savePIPEASTAT = readl((void const volatile   *)dev_priv->regs + 458788U);
  dev_priv->savePIPEBCONF = readl((void const volatile   *)dev_priv->regs + 462856U);
  dev_priv->savePIPEBSRC = readl((void const volatile   *)dev_priv->regs + 397340U);
  dev_priv->saveFPB0 = readl((void const volatile   *)dev_priv->regs + 24648U);
  dev_priv->saveFPB1 = readl((void const volatile   *)dev_priv->regs + 24652U);
  dev_priv->saveDPLL_B = readl((void const volatile   *)dev_priv->regs + 24600U);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    dev_priv->saveDPLL_B_MD = readl((void const volatile   *)dev_priv->regs + 24608U);
  } else {

  }
  dev_priv->saveHTOTAL_B = readl((void const volatile   *)dev_priv->regs + 397312U);
  dev_priv->saveHBLANK_B = readl((void const volatile   *)dev_priv->regs + 397316U);
  dev_priv->saveHSYNC_B = readl((void const volatile   *)dev_priv->regs + 397320U);
  dev_priv->saveVTOTAL_B = readl((void const volatile   *)dev_priv->regs + 397324U);
  dev_priv->saveVBLANK_B = readl((void const volatile   *)dev_priv->regs + 397328U);
  dev_priv->saveVSYNC_B = readl((void const volatile   *)dev_priv->regs + 397332U);
  dev_priv->saveBCLRPAT_A = readl((void const volatile   *)dev_priv->regs + 393248U);
  dev_priv->saveDSPBCNTR = readl((void const volatile   *)dev_priv->regs + 463232U);
  dev_priv->saveDSPBSTRIDE = readl((void const volatile   *)dev_priv->regs + 463240U);
  dev_priv->saveDSPBSIZE = readl((void const volatile   *)dev_priv->regs + 463248U);
  dev_priv->saveDSPBPOS = readl((void const volatile   *)dev_priv->regs + 463244U);
  dev_priv->saveDSPBADDR = readl((void const volatile   *)dev_priv->regs + 463236U);
  if (dev->pci_device == 10754 || dev->pci_device == 10818) {
    dev_priv->saveDSPBSURF = readl((void const volatile   *)dev_priv->regs + 463260U);
    dev_priv->saveDSPBTILEOFF = readl((void const volatile   *)dev_priv->regs + 463268U);
  } else {

  }
  i915_save_palette(dev, PIPE_B);
  dev_priv->savePIPEBSTAT = readl((void const volatile   *)dev_priv->regs + 462884U);
  dev_priv->saveADPA = readl((void const volatile   *)dev_priv->regs + 397568U);
  dev_priv->savePP_CONTROL = readl((void const volatile   *)dev_priv->regs + 397828U);
  dev_priv->savePFIT_PGM_RATIOS = readl((void const volatile   *)dev_priv->regs + 397876U);
  dev_priv->saveBLC_PWM_CTL = readl((void const volatile   *)dev_priv->regs + 397908U);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    dev_priv->saveBLC_PWM_CTL2 = readl((void const volatile   *)dev_priv->regs + 397904U);
  } else {

  }
  if ((((((dev->pci_device == 13687 || dev->pci_device == 13698) || dev->pci_device == 9618) || (dev->pci_device == 10146 || dev->pci_device == 10158)) || dev->pci_device == 10754) || dev->pci_device == 10818) && dev->pci_device != 13687) {
    dev_priv->saveLVDS = readl((void const volatile   *)dev_priv->regs + 397696U);
  } else {

  }
  if (dev->pci_device != 13687 && dev->pci_device != 9570) {
    dev_priv->savePFIT_CONTROL = readl((void const volatile   *)dev_priv->regs + 397872U);
  } else {

  }
  dev_priv->savePP_ON_DELAYS = readl((void const volatile   *)dev_priv->regs + 397832U);
  dev_priv->savePP_OFF_DELAYS = readl((void const volatile   *)dev_priv->regs + 397836U);
  dev_priv->savePP_DIVISOR = readl((void const volatile   *)dev_priv->regs + 397840U);
  dev_priv->saveFBC_CFB_BASE = readl((void const volatile   *)dev_priv->regs + 12800U);
  dev_priv->saveFBC_LL_BASE = readl((void const volatile   *)dev_priv->regs + 12804U);
  dev_priv->saveFBC_CONTROL2 = readl((void const volatile   *)dev_priv->regs + 12820U);
  dev_priv->saveFBC_CONTROL = readl((void const volatile   *)dev_priv->regs + 12808U);
  dev_priv->saveIIR = readl((void const volatile   *)dev_priv->regs + 8356U);
  dev_priv->saveIER = readl((void const volatile   *)dev_priv->regs + 8352U);
  dev_priv->saveIMR = readl((void const volatile   *)dev_priv->regs + 8360U);
  dev_priv->saveVGA0 = readl((void const volatile   *)dev_priv->regs + 24576U);
  dev_priv->saveVGA1 = readl((void const volatile   *)dev_priv->regs + 24580U);
  dev_priv->saveVGA_PD = readl((void const volatile   *)dev_priv->regs + 24592U);
  dev_priv->saveVGACNTRL = readl((void const volatile   *)dev_priv->regs + 463872U);
  dev_priv->saveD_STATE = readl((void const volatile   *)dev_priv->regs + 24836U);
  dev_priv->saveCG_2D_DIS = readl((void const volatile   *)dev_priv->regs + 25088U);
  dev_priv->saveCACHE_MODE_0 = readl((void const volatile   *)dev_priv->regs + 8480U);
  dev_priv->saveMI_ARB_STATE = readl((void const volatile   *)dev_priv->regs + 8420U);
  i = 0;
  goto ldv_23351;
  ldv_23350: 
  dev_priv->saveSWF0[i] = readl((void const volatile   *)dev_priv->regs + (unsigned long )((i << 2) + 463888));
  dev_priv->saveSWF1[i] = readl((void const volatile   *)dev_priv->regs + (unsigned long )((i << 2) + 459792));
  i = i + 1;
  ldv_23351: ;
  if (i <= 15) {
    goto ldv_23350;
  } else {

  }
  i = 0;
  goto ldv_23354;
  ldv_23353: 
  dev_priv->saveSWF2[i] = readl((void const volatile   *)dev_priv->regs + (unsigned long )((i << 2) + 467988));
  i = i + 1;
  ldv_23354: ;
  if (i <= 2) {
    goto ldv_23353;
  } else {

  }
  i915_save_vga(dev);
  return (0);
}
}
int i915_restore_state(struct drm_device *dev ) 
{ 
  struct drm_i915_private *dev_priv ;
  int i ;
  unsigned int tmp ;
  unsigned int tmp___0 ;

  {
  dev_priv = (struct drm_i915_private *)dev->dev_private;
  pci_write_config_byte(dev->pdev, 244, (int )dev_priv->saveLBB);
  writel(dev_priv->saveDSPARB, (void volatile   *)dev_priv->regs + 458800U);
  if ((int )dev_priv->saveDPLL_A < 0) {
    writel(dev_priv->saveDPLL_A & 2147483647U, (void volatile   *)dev_priv->regs + 24596U);
    __const_udelay(644250UL);
  } else {

  }
  writel(dev_priv->saveFPA0, (void volatile   *)dev_priv->regs + 24640U);
  writel(dev_priv->saveFPA1, (void volatile   *)dev_priv->regs + 24644U);
  writel(dev_priv->saveDPLL_A, (void volatile   *)dev_priv->regs + 24596U);
  __const_udelay(644250UL);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    writel(dev_priv->saveDPLL_A_MD, (void volatile   *)dev_priv->regs + 24604U);
  } else {

  }
  __const_udelay(644250UL);
  writel(dev_priv->saveHTOTAL_A, (void volatile   *)dev_priv->regs + 393216U);
  writel(dev_priv->saveHBLANK_A, (void volatile   *)dev_priv->regs + 393220U);
  writel(dev_priv->saveHSYNC_A, (void volatile   *)dev_priv->regs + 393224U);
  writel(dev_priv->saveVTOTAL_A, (void volatile   *)dev_priv->regs + 393228U);
  writel(dev_priv->saveVBLANK_A, (void volatile   *)dev_priv->regs + 393232U);
  writel(dev_priv->saveVSYNC_A, (void volatile   *)dev_priv->regs + 393236U);
  writel(dev_priv->saveBCLRPAT_A, (void volatile   *)dev_priv->regs + 393248U);
  writel(dev_priv->saveDSPASIZE, (void volatile   *)dev_priv->regs + 459152U);
  writel(dev_priv->saveDSPAPOS, (void volatile   *)dev_priv->regs + 459148U);
  writel(dev_priv->savePIPEASRC, (void volatile   *)dev_priv->regs + 393244U);
  writel(dev_priv->saveDSPAADDR, (void volatile   *)dev_priv->regs + 459140U);
  writel(dev_priv->saveDSPASTRIDE, (void volatile   *)dev_priv->regs + 459144U);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    writel(dev_priv->saveDSPASURF, (void volatile   *)dev_priv->regs + 459164U);
    writel(dev_priv->saveDSPATILEOFF, (void volatile   *)dev_priv->regs + 459172U);
  } else {

  }
  writel(dev_priv->savePIPEACONF, (void volatile   *)dev_priv->regs + 458760U);
  i915_restore_palette(dev, PIPE_A);
  writel(dev_priv->saveDSPACNTR, (void volatile   *)dev_priv->regs + 459136U);
  tmp = readl((void const volatile   *)dev_priv->regs + 459140U);
  writel(tmp, (void volatile   *)dev_priv->regs + 459140U);
  if ((int )dev_priv->saveDPLL_B < 0) {
    writel(dev_priv->saveDPLL_B & 2147483647U, (void volatile   *)dev_priv->regs + 24600U);
    __const_udelay(644250UL);
  } else {

  }
  writel(dev_priv->saveFPB0, (void volatile   *)dev_priv->regs + 24648U);
  writel(dev_priv->saveFPB1, (void volatile   *)dev_priv->regs + 24652U);
  writel(dev_priv->saveDPLL_B, (void volatile   *)dev_priv->regs + 24600U);
  __const_udelay(644250UL);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    writel(dev_priv->saveDPLL_B_MD, (void volatile   *)dev_priv->regs + 24608U);
  } else {

  }
  __const_udelay(644250UL);
  writel(dev_priv->saveHTOTAL_B, (void volatile   *)dev_priv->regs + 397312U);
  writel(dev_priv->saveHBLANK_B, (void volatile   *)dev_priv->regs + 397316U);
  writel(dev_priv->saveHSYNC_B, (void volatile   *)dev_priv->regs + 397320U);
  writel(dev_priv->saveVTOTAL_B, (void volatile   *)dev_priv->regs + 397324U);
  writel(dev_priv->saveVBLANK_B, (void volatile   *)dev_priv->regs + 397328U);
  writel(dev_priv->saveVSYNC_B, (void volatile   *)dev_priv->regs + 397332U);
  writel(dev_priv->saveBCLRPAT_B, (void volatile   *)dev_priv->regs + 397344U);
  writel(dev_priv->saveDSPBSIZE, (void volatile   *)dev_priv->regs + 463248U);
  writel(dev_priv->saveDSPBPOS, (void volatile   *)dev_priv->regs + 463244U);
  writel(dev_priv->savePIPEBSRC, (void volatile   *)dev_priv->regs + 397340U);
  writel(dev_priv->saveDSPBADDR, (void volatile   *)dev_priv->regs + 463236U);
  writel(dev_priv->saveDSPBSTRIDE, (void volatile   *)dev_priv->regs + 463240U);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    writel(dev_priv->saveDSPBSURF, (void volatile   *)dev_priv->regs + 463260U);
    writel(dev_priv->saveDSPBTILEOFF, (void volatile   *)dev_priv->regs + 463268U);
  } else {

  }
  writel(dev_priv->savePIPEBCONF, (void volatile   *)dev_priv->regs + 462856U);
  i915_restore_palette(dev, PIPE_B);
  writel(dev_priv->saveDSPBCNTR, (void volatile   *)dev_priv->regs + 463232U);
  tmp___0 = readl((void const volatile   *)dev_priv->regs + 463236U);
  writel(tmp___0, (void volatile   *)dev_priv->regs + 463236U);
  writel(dev_priv->saveADPA, (void volatile   *)dev_priv->regs + 397568U);
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    writel(dev_priv->saveBLC_PWM_CTL2, (void volatile   *)dev_priv->regs + 397904U);
  } else {

  }
  if ((((((dev->pci_device == 13687 || dev->pci_device == 13698) || dev->pci_device == 9618) || (dev->pci_device == 10146 || dev->pci_device == 10158)) || dev->pci_device == 10754) || dev->pci_device == 10818) && dev->pci_device != 13687) {
    writel(dev_priv->saveLVDS, (void volatile   *)dev_priv->regs + 397696U);
  } else {

  }
  if (dev->pci_device != 13687 && dev->pci_device != 9570) {
    writel(dev_priv->savePFIT_CONTROL, (void volatile   *)dev_priv->regs + 397872U);
  } else {

  }
  writel(dev_priv->savePFIT_PGM_RATIOS, (void volatile   *)dev_priv->regs + 397876U);
  writel(dev_priv->saveBLC_PWM_CTL, (void volatile   *)dev_priv->regs + 397908U);
  writel(dev_priv->savePP_ON_DELAYS, (void volatile   *)dev_priv->regs + 397832U);
  writel(dev_priv->savePP_OFF_DELAYS, (void volatile   *)dev_priv->regs + 397836U);
  writel(dev_priv->savePP_DIVISOR, (void volatile   *)dev_priv->regs + 397840U);
  writel(dev_priv->savePP_CONTROL, (void volatile   *)dev_priv->regs + 397828U);
  writel(dev_priv->saveFBC_CFB_BASE, (void volatile   *)dev_priv->regs + 12800U);
  writel(dev_priv->saveFBC_LL_BASE, (void volatile   *)dev_priv->regs + 12804U);
  writel(dev_priv->saveFBC_CONTROL2, (void volatile   *)dev_priv->regs + 12820U);
  writel(dev_priv->saveFBC_CONTROL, (void volatile   *)dev_priv->regs + 12808U);
  writel(dev_priv->saveVGACNTRL, (void volatile   *)dev_priv->regs + 463872U);
  writel(dev_priv->saveVGA0, (void volatile   *)dev_priv->regs + 24576U);
  writel(dev_priv->saveVGA1, (void volatile   *)dev_priv->regs + 24580U);
  writel(dev_priv->saveVGA_PD, (void volatile   *)dev_priv->regs + 24592U);
  __const_udelay(644250UL);
  writel(dev_priv->saveD_STATE, (void volatile   *)dev_priv->regs + 24836U);
  writel(dev_priv->saveCG_2D_DIS, (void volatile   *)dev_priv->regs + 25088U);
  writel(dev_priv->saveCACHE_MODE_0 | 4294901760U, (void volatile   *)dev_priv->regs + 8480U);
  writel(dev_priv->saveMI_ARB_STATE | 4294901760U, (void volatile   *)dev_priv->regs + 8420U);
  i = 0;
  goto ldv_23362;
  ldv_23361: 
  writel(dev_priv->saveSWF0[i], (void volatile   *)dev_priv->regs + (unsigned long )((i << 2) + 463888));
  writel(dev_priv->saveSWF1[i + 7], (void volatile   *)dev_priv->regs + (unsigned long )((i << 2) + 459792));
  i = i + 1;
  ldv_23362: ;
  if (i <= 15) {
    goto ldv_23361;
  } else {

  }
  i = 0;
  goto ldv_23365;
  ldv_23364: 
  writel(dev_priv->saveSWF2[i], (void volatile   *)dev_priv->regs + (unsigned long )((i << 2) + 467988));
  i = i + 1;
  ldv_23365: ;
  if (i <= 2) {
    goto ldv_23364;
  } else {

  }
  i915_restore_vga(dev);
  return (0);
}
}
unsigned long ldv___get_free_pages_82(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_84(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_88(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_92(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                   unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
__inline static void __list_del(struct list_head *prev , struct list_head *next ) 
{ 


  {
  next->prev = prev;
  prev->next = next;
  return;
}
}
__inline static void list_del_init(struct list_head *entry ) 
{ 


  {
  __list_del(entry->prev, entry->next);
  INIT_LIST_HEAD(entry);
  return;
}
}
__inline static void list_move_tail(struct list_head *list , struct list_head *head ) 
{ 


  {
  __list_del(list->prev, list->next);
  list_add_tail(list, head);
  return;
}
}
__inline static int list_empty(struct list_head  const  *head ) 
{ 


  {
  return ((unsigned long )((struct list_head  const  *)head->next) == (unsigned long )head);
}
}
extern void lockdep_init_map(struct lockdep_map * , char const   * , struct lock_class_key * ,
                             int  ) ;
__inline static void atomic_add(int i , atomic_t *v ) 
{ 


  {
  __asm__  volatile   (".section .smp_locks,\"a\"\n .balign 8 \n .quad 661f\n.previous\n661:\n\tlock; addl %1,%0": "=m" (v->counter): "ir" (i),
                       "m" (v->counter));
  return;
}
}
__inline static void atomic_sub(int i , atomic_t *v ) 
{ 


  {
  __asm__  volatile   (".section .smp_locks,\"a\"\n .balign 8 \n .quad 661f\n.previous\n661:\n\tlock; subl %1,%0": "=m" (v->counter): "ir" (i),
                       "m" (v->counter));
  return;
}
}
__inline static void atomic_inc(atomic_t *v ) 
{ 


  {
  __asm__  volatile   (".section .smp_locks,\"a\"\n .balign 8 \n .quad 661f\n.previous\n661:\n\tlock; incl %0": "=m" (v->counter): "m" (v->counter));
  return;
}
}
__inline static void atomic_dec(atomic_t *v ) 
{ 


  {
  __asm__  volatile   (".section .smp_locks,\"a\"\n .balign 8 \n .quad 661f\n.previous\n661:\n\tlock; decl %0": "=m" (v->counter): "m" (v->counter));
  return;
}
}
extern void prepare_to_wait(wait_queue_head_t * , wait_queue_t * , int  ) ;
extern void finish_wait(wait_queue_head_t * , wait_queue_t * ) ;
extern int autoremove_wake_function(wait_queue_t * , unsigned int  , int  , void * ) ;
__inline static int mutex_is_locked(struct mutex *lock ) 
{ 


  {
  return (lock->count.counter != 1);
}
}
extern void down_write(struct rw_semaphore * ) ;
extern void up_write(struct rw_semaphore * ) ;
extern void msleep(unsigned int  ) ;
struct page *ldv_alloc_page_vma_108(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_98(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
extern void kref_get(struct kref * ) ;
extern int kref_put(struct kref * , void (*)(struct kref * ) ) ;
extern void init_timer(struct timer_list * ) ;
extern int schedule_delayed_work(struct delayed_work * , unsigned long  ) ;
void *ldv_kmem_cache_alloc_100(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_104(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
__inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) ;
extern ssize_t vfs_read(struct file * , char * , size_t  , loff_t * ) ;
extern ssize_t vfs_write(struct file * , char const   * , size_t  , loff_t * ) ;
__inline static long PTR_ERR(void const   *ptr ) 
{ 


  {
  return ((long )ptr);
}
}
__inline static long IS_ERR(void const   *ptr ) 
{ 
  long tmp ;

  {
  tmp = ldv__builtin_expect((unsigned long )ptr > 0xfffffffffffff000UL, 0L);
  return (tmp);
}
}
extern void *ioremap_wc(unsigned long  , unsigned long  ) ;
extern void put_page(struct page * ) ;
extern int set_page_dirty(struct page * ) ;
extern unsigned long do_mmap_pgoff(struct file * , unsigned long  , unsigned long  ,
                                   unsigned long  , unsigned long  , unsigned long  ) ;
__inline static unsigned long do_mmap(struct file *file , unsigned long addr , unsigned long len ,
                                      unsigned long prot , unsigned long flag , unsigned long offset ) 
{ 
  unsigned long ret ;

  {
  ret = 0xffffffffffffffeaUL;
  if (((len + 4095UL) & 0xfffffffffffff000UL) + offset < offset) {
    goto out;
  } else {

  }
  if ((offset & 4095UL) == 0UL) {
    ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> 12);
  } else {

  }
  out: ;
  return (ret);
}
}
extern void schedule(void) ;
extern unsigned long copy_from_user(void * , void const   * , unsigned int  ) ;
extern struct page *read_cache_page(struct address_space * , unsigned long  , filler_t * ,
                                    void * ) ;
__inline static struct page *read_mapping_page(struct address_space *mapping , unsigned long index ,
                                               void *data ) 
{ 
  filler_t *filler ;
  struct page *tmp ;

  {
  filler = (filler_t *)(mapping->a_ops)->readpage;
  tmp = read_cache_page(mapping, index, filler, data);
  return (tmp);
}
}
extern int drm_free_agp(struct agp_memory * , int  ) ;
extern struct agp_memory *drm_agp_bind_pages(struct drm_device * , struct page ** ,
                                             unsigned long  , uint32_t  ) ;
extern int drm_unbind_agp(struct agp_memory * ) ;
extern void drm_clflush_pages(struct page ** , unsigned long  ) ;
extern int drm_irq_install(struct drm_device * ) ;
extern void drm_agp_chipset_flush(struct drm_device * ) ;
extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * , unsigned long  ,
                                            unsigned int  ) ;
extern void drm_mm_put_block(struct drm_mm_node * ) ;
extern struct drm_mm_node *drm_mm_search_free(struct drm_mm  const  * , unsigned long  ,
                                              unsigned int  , int  ) ;
extern int drm_mm_init(struct drm_mm * , unsigned long  , unsigned long  ) ;
extern void drm_gem_object_free(struct kref * ) ;
extern struct drm_gem_object *drm_gem_object_alloc(struct drm_device * , size_t  ) ;
extern void drm_gem_object_handle_free(struct kref * ) ;
__inline static void drm_gem_object_reference(struct drm_gem_object *obj ) 
{ 


  {
  kref_get(& obj->refcount);
  return;
}
}
__inline static void drm_gem_object_unreference(struct drm_gem_object *obj ) 
{ 


  {
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return;
  } else {

  }
  kref_put(& obj->refcount, & drm_gem_object_free);
  return;
}
}
extern int drm_gem_handle_create(struct drm_file * , struct drm_gem_object * , int * ) ;
__inline static void drm_gem_object_handle_unreference(struct drm_gem_object *obj ) 
{ 


  {
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return;
  } else {

  }
  kref_put(& obj->handlecount, & drm_gem_object_handle_free);
  drm_gem_object_unreference(obj);
  return;
}
}
extern struct drm_gem_object *drm_gem_object_lookup(struct drm_device * , struct drm_file * ,
                                                    int  ) ;
extern void drm_core_ioremap_wc(struct drm_map * , struct drm_device * ) ;
__inline static void *drm_calloc___0(size_t nmemb , size_t size , int area ) 
{ 
  void *tmp ;

  {
  tmp = kcalloc(nmemb, size, 208U);
  return (tmp);
}
}
int i915_gem_object_pin(struct drm_gem_object *obj , uint32_t alignment ) ;
void i915_gem_object_unpin(struct drm_gem_object *obj ) ;
void i915_gem_retire_requests(struct drm_device *dev ) ;
void i915_gem_retire_work_handler(struct work_struct *work ) ;
void i915_gem_clflush_object(struct drm_gem_object *obj ) ;
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev ) ;
extern void mark_page_accessed(struct page * ) ;
static int i915_gem_object_set_domain(struct drm_gem_object *obj , uint32_t read_domains ,
                                      uint32_t write_domain ) ;
static int i915_gem_object_set_domain_range(struct drm_gem_object *obj , uint64_t offset ,
                                            uint64_t size , uint32_t read_domains ,
                                            uint32_t write_domain ) ;
static int i915_gem_set_domain(struct drm_gem_object *obj , struct drm_file *file_priv ,
                               uint32_t read_domains , uint32_t write_domain ) ;
static int i915_gem_object_get_page_list(struct drm_gem_object *obj ) ;
static void i915_gem_object_free_page_list(struct drm_gem_object *obj ) ;
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj ) ;
int i915_gem_init_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_init *args ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  args = (struct drm_i915_gem_init *)data;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  if ((args->gtt_start >= args->gtt_end || (args->gtt_start & 4095ULL) != 0ULL) || (args->gtt_end & 4095ULL) != 0ULL) {
    mutex_unlock(& dev->struct_mutex);
    return (-22);
  } else {

  }
  drm_mm_init(& dev_priv->mm.gtt_space, (unsigned long )args->gtt_start, (unsigned long )(args->gtt_end - args->gtt_start));
  dev->gtt_total = (unsigned int )args->gtt_end - (unsigned int )args->gtt_start;
  mutex_unlock(& dev->struct_mutex);
  return (0);
}
}
int i915_gem_create_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_create *args ;
  struct drm_gem_object *obj ;
  int handle ;
  int ret ;

  {
  args = (struct drm_i915_gem_create *)data;
  args->size = ((args->size + 4095ULL) / 4096ULL) * 4096ULL;
  obj = drm_gem_object_alloc(dev, (size_t )args->size);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return (-12);
  } else {

  }
  ret = drm_gem_handle_create(file_priv, obj, & handle);
  mutex_lock_nested(& dev->struct_mutex, 0U);
  drm_gem_object_handle_unreference(obj);
  mutex_unlock(& dev->struct_mutex);
  if (ret != 0) {
    return (ret);
  } else {

  }
  args->handle = (uint32_t )handle;
  return (0);
}
}
int i915_gem_pread_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_pread *args ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  ssize_t read ;
  loff_t offset ;
  int ret ;

  {
  args = (struct drm_i915_gem_pread *)data;
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return (-9);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((args->offset > (unsigned long long )obj->size || args->size > (unsigned long long )obj->size) || args->offset + args->size > (unsigned long long )obj->size) {
    drm_gem_object_unreference(obj);
    return (-22);
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 1U, 0U);
  if (ret != 0) {
    drm_gem_object_unreference(obj);
    mutex_unlock(& dev->struct_mutex);
    return (ret);
  } else {

  }
  offset = (loff_t )args->offset;
  read = vfs_read(obj->filp, (char *)args->data_ptr, (size_t )args->size, & offset);
  if ((unsigned long long )read != args->size) {
    drm_gem_object_unreference(obj);
    mutex_unlock(& dev->struct_mutex);
    if (read < 0L) {
      return ((int )read);
    } else {
      return (-22);
    }
  } else {

  }
  drm_gem_object_unreference(obj);
  mutex_unlock(& dev->struct_mutex);
  return (0);
}
}
static int i915_gem_gtt_pwrite(struct drm_device *dev , struct drm_gem_object *obj ,
                               struct drm_i915_gem_pwrite *args , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_object *obj_priv ;
  ssize_t remain ;
  loff_t offset ;
  char *user_data ;
  char *vaddr ;
  int i ;
  int o ;
  int l ;
  int ret ;
  unsigned long pfn ;
  unsigned long unwritten ;
  unsigned long flag ;
  unsigned long roksum ;
  struct thread_info *tmp ;
  long tmp___0 ;
  void *tmp___1 ;
  int tmp___2 ;

  {
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  ret = 0;
  user_data = (char *)args->data_ptr;
  remain = (ssize_t )args->size;
  tmp = current_thread_info();
  __asm__  ("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0": "=&r" (flag), "=r" (roksum): "1" (user_data),
            "g" (remain), "rm" (tmp->addr_limit.seg));
  tmp___0 = ldv__builtin_expect(flag == 0UL, 1L);
  if (tmp___0 == 0L) {
    return (-14);
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_gem_object_pin(obj, 0U);
  if (ret != 0) {
    mutex_unlock(& dev->struct_mutex);
    return (ret);
  } else {

  }
  ret = i915_gem_set_domain(obj, file_priv, 64U, 64U);
  if (ret != 0) {
    goto fail;
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  offset = (loff_t )((uint64_t )obj_priv->gtt_offset + args->offset);
  obj_priv->dirty = 1;
  goto ldv_23572;
  ldv_23571: 
  i = (int )(offset >> 12);
  o = (int )offset & 4095;
  l = (int )remain;
  if ((unsigned int )(o + l) > 4096U) {
    l = (int )(4096U - (unsigned int )o);
  } else {

  }
  pfn = ((dev->agp)->base >> 12) + (unsigned long )i;
  tmp___1 = ioremap_wc(pfn << 12, 4096UL);
  vaddr = (char *)tmp___1;
  if ((unsigned long )vaddr == (unsigned long )((char *)0)) {
    ret = -14;
    goto fail;
  } else {

  }
  tmp___2 = __copy_from_user((void *)vaddr + (unsigned long )o, (void const   *)user_data,
                             (unsigned int )l);
  unwritten = (unsigned long )tmp___2;
  iounmap((void volatile   *)vaddr);
  if (unwritten != 0UL) {
    ret = -14;
    goto fail;
  } else {

  }
  remain = remain - (ssize_t )l;
  user_data = user_data + (unsigned long )l;
  offset = (loff_t )l + offset;
  ldv_23572: ;
  if (remain > 0L) {
    goto ldv_23571;
  } else {

  }

  fail: 
  i915_gem_object_unpin(obj);
  mutex_unlock(& dev->struct_mutex);
  return (ret);
}
}
static int i915_gem_shmem_pwrite(struct drm_device *dev , struct drm_gem_object *obj ,
                                 struct drm_i915_gem_pwrite *args , struct drm_file *file_priv ) 
{ 
  int ret ;
  loff_t offset ;
  ssize_t written ;

  {
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_gem_set_domain(obj, file_priv, 1U, 1U);
  if (ret != 0) {
    mutex_unlock(& dev->struct_mutex);
    return (ret);
  } else {

  }
  offset = (loff_t )args->offset;
  written = vfs_write(obj->filp, (char const   *)args->data_ptr, (size_t )args->size,
                      & offset);
  if ((unsigned long long )written != args->size) {
    mutex_unlock(& dev->struct_mutex);
    if (written < 0L) {
      return ((int )written);
    } else {
      return (-22);
    }
  } else {

  }
  mutex_unlock(& dev->struct_mutex);
  return (0);
}
}
int i915_gem_pwrite_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_pwrite *args ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;

  {
  args = (struct drm_i915_gem_pwrite *)data;
  ret = 0;
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return (-9);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((args->offset > (unsigned long long )obj->size || args->size > (unsigned long long )obj->size) || args->offset + args->size > (unsigned long long )obj->size) {
    drm_gem_object_unreference(obj);
    return (-22);
  } else {

  }
  if (obj_priv->tiling_mode == 0U && dev->gtt_total != 0U) {
    ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
  } else {
    ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
  }
  drm_gem_object_unreference(obj);
  return (ret);
}
}
int i915_gem_set_domain_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_set_domain *args ;
  struct drm_gem_object *obj ;
  int ret ;

  {
  args = (struct drm_i915_gem_set_domain *)data;
  if (((dev->driver)->driver_features & 4096U) == 0U) {
    return (-19);
  } else {

  }
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return (-9);
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_gem_set_domain(obj, file_priv, args->read_domains, args->write_domain);
  drm_gem_object_unreference(obj);
  mutex_unlock(& dev->struct_mutex);
  return (ret);
}
}
int i915_gem_sw_finish_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_sw_finish *args ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;

  {
  args = (struct drm_i915_gem_sw_finish *)data;
  ret = 0;
  if (((dev->driver)->driver_features & 4096U) == 0U) {
    return (-19);
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    mutex_unlock(& dev->struct_mutex);
    return (-9);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((int )obj->write_domain & 1 && obj_priv->pin_count != 0) {
    i915_gem_clflush_object(obj);
    drm_agp_chipset_flush(dev);
  } else {

  }
  drm_gem_object_unreference(obj);
  mutex_unlock(& dev->struct_mutex);
  return (ret);
}
}
int i915_gem_mmap_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_mmap *args ;
  struct drm_gem_object *obj ;
  loff_t offset ;
  unsigned long addr ;
  struct task_struct *tmp ;
  struct task_struct *tmp___0 ;
  long tmp___1 ;

  {
  args = (struct drm_i915_gem_mmap *)data;
  if (((dev->driver)->driver_features & 4096U) == 0U) {
    return (-19);
  } else {

  }
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return (-9);
  } else {

  }
  offset = (loff_t )args->offset;
  tmp = get_current();
  down_write(& (tmp->mm)->mmap_sem);
  addr = do_mmap(obj->filp, 0UL, (unsigned long )args->size, 3UL, 1UL, (unsigned long )args->offset);
  tmp___0 = get_current();
  up_write(& (tmp___0->mm)->mmap_sem);
  mutex_lock_nested(& dev->struct_mutex, 0U);
  drm_gem_object_unreference(obj);
  mutex_unlock(& dev->struct_mutex);
  tmp___1 = IS_ERR((void const   *)addr);
  if (tmp___1 != 0L) {
    return ((int )addr);
  } else {

  }
  args->addr_ptr = (unsigned long long )addr;
  return (0);
}
}
static void i915_gem_object_free_page_list(struct drm_gem_object *obj ) 
{ 
  struct drm_i915_gem_object *obj_priv ;
  int page_count___0 ;
  int i ;

  {
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  page_count___0 = (int )(obj->size / 4096UL);
  if ((unsigned long )obj_priv->page_list == (unsigned long )((struct page **)0)) {
    return;
  } else {

  }
  i = 0;
  goto ldv_23625;
  ldv_23624: ;
  if ((unsigned long )*(obj_priv->page_list + (unsigned long )i) != (unsigned long )((struct page *)0)) {
    if (obj_priv->dirty != 0) {
      set_page_dirty(*(obj_priv->page_list + (unsigned long )i));
    } else {

    }
    mark_page_accessed(*(obj_priv->page_list + (unsigned long )i));
    put_page(*(obj_priv->page_list + (unsigned long )i));
  } else {

  }
  i = i + 1;
  ldv_23625: ;
  if (i < page_count___0) {
    goto ldv_23624;
  } else {

  }
  obj_priv->dirty = 0;
  drm_free((void *)obj_priv->page_list, (unsigned long )page_count___0 * 8UL, 2);
  obj_priv->page_list = 0;
  return;
}
}
static void i915_gem_object_move_to_active(struct drm_gem_object *obj ) 
{ 
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_object *obj_priv ;

  {
  dev = obj->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if (obj_priv->active == 0) {
    drm_gem_object_reference(obj);
    obj_priv->active = 1;
  } else {

  }
  list_move_tail(& obj_priv->list, & dev_priv->mm.active_list);
  return;
}
}
static void i915_gem_object_move_to_inactive(struct drm_gem_object *obj ) 
{ 
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_object *obj_priv ;

  {
  dev = obj->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if (obj_priv->pin_count != 0) {
    list_del_init(& obj_priv->list);
  } else {
    list_move_tail(& obj_priv->list, & dev_priv->mm.inactive_list);
  }
  if (obj_priv->active != 0) {
    obj_priv->active = 0;
    drm_gem_object_unreference(obj);
  } else {

  }
  return;
}
}
static uint32_t i915_add_request(struct drm_device *dev , uint32_t flush_domains ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_request *request ;
  uint32_t seqno ;
  int was_empty ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;
  void *tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  tmp = drm_calloc___0(1UL, 40UL, 2);
  request = (struct drm_i915_gem_request *)tmp;
  if ((unsigned long )request == (unsigned long )((struct drm_i915_gem_request *)0)) {
    return (0U);
  } else {

  }
  seqno = dev_priv->mm.next_gem_seqno;
  dev_priv->mm.next_gem_seqno = dev_priv->mm.next_gem_seqno + (uint32_t )1;
  if (dev_priv->mm.next_gem_seqno == 0U) {
    dev_priv->mm.next_gem_seqno = dev_priv->mm.next_gem_seqno + (uint32_t )1;
  } else {

  }
  if (dev_priv->ring.space <= 15) {
    i915_wait_ring(dev, 16, "i915_add_request");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 276824065U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 64U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = seqno;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 16777216U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] %d\n", "i915_add_request", seqno);
  } else {

  }
  request->seqno = seqno;
  request->emitted_jiffies = jiffies;
  request->flush_domains = flush_domains;
  was_empty = list_empty((struct list_head  const  *)(& dev_priv->mm.request_list));
  list_add_tail(& request->list, & dev_priv->mm.request_list);
  if (was_empty != 0) {
    schedule_delayed_work(& dev_priv->mm.retire_work, 250UL);
  } else {

  }
  return (seqno);
}
}
static uint32_t i915_retire_commands(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  uint32_t cmd ;
  uint32_t flush_domains ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  cmd = 33554436U;
  flush_domains = 0U;
  if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
    flush_domains = flush_domains | 4U;
  } else {

  }
  if (dev_priv->ring.space <= 7) {
    i915_wait_ring(dev, 8, "i915_retire_commands");
  } else {

  }
  outcount = 0U;
  outring = (unsigned int )dev_priv->ring.tail;
  ringmask = (unsigned int )dev_priv->ring.tail_mask;
  virt = (char volatile   *)dev_priv->ring.virtual_start;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = cmd;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
  outcount = outcount + 1U;
  outring = outring + 4U;
  outring = outring & ringmask;
  dev_priv->ring.tail = (int )outring;
  dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
  writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  return (flush_domains);
}
}
static void i915_gem_retire_request(struct drm_device *dev , struct drm_i915_gem_request *request ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  struct list_head  const  *__mptr ;
  int tmp ;
  struct drm_i915_gem_object *obj_priv___0 ;
  struct drm_i915_gem_object *next ;
  struct list_head  const  *__mptr___0 ;
  struct list_head  const  *__mptr___1 ;
  struct drm_gem_object *obj___0 ;
  struct list_head  const  *__mptr___2 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  goto ldv_23673;
  ldv_23672: 
  __mptr = (struct list_head  const  *)dev_priv->mm.active_list.next;
  obj_priv = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffff0UL;
  obj = obj_priv->obj;
  if (obj_priv->last_rendering_seqno != request->seqno) {
    return;
  } else {

  }
  if (obj->write_domain != 0U) {
    list_move_tail(& obj_priv->list, & dev_priv->mm.flushing_list);
  } else {
    i915_gem_object_move_to_inactive(obj);
  }
  ldv_23673: 
  tmp = list_empty((struct list_head  const  *)(& dev_priv->mm.active_list));
  if (tmp == 0) {
    goto ldv_23672;
  } else {

  }

  if (request->flush_domains != 0U) {
    __mptr___0 = (struct list_head  const  *)dev_priv->mm.flushing_list.next;
    obj_priv___0 = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffff0UL;
    __mptr___1 = (struct list_head  const  *)obj_priv___0->list.next;
    next = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffff0UL;
    goto ldv_23685;
    ldv_23684: 
    obj___0 = obj_priv___0->obj;
    if ((obj___0->write_domain & request->flush_domains) != 0U) {
      obj___0->write_domain = 0U;
      i915_gem_object_move_to_inactive(obj___0);
    } else {

    }
    obj_priv___0 = next;
    __mptr___2 = (struct list_head  const  *)next->list.next;
    next = (struct drm_i915_gem_object *)__mptr___2 + 0xfffffffffffffff0UL;
    ldv_23685: ;
    if ((unsigned long )(& obj_priv___0->list) != (unsigned long )(& dev_priv->mm.flushing_list)) {
      goto ldv_23684;
    } else {

    }

  } else {

  }
  return;
}
}
static int i915_seqno_passed(uint32_t seq1 , uint32_t seq2 ) 
{ 


  {
  return ((int )(seq1 - seq2) >= 0);
}
}
uint32_t i915_get_gem_seqno(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  return ((uint32_t )*((u32 volatile   *)dev_priv->hw_status_page + 16UL));
}
}
void i915_gem_retire_requests(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  uint32_t seqno ;
  struct drm_i915_gem_request *request ;
  uint32_t retiring_seqno ;
  struct list_head  const  *__mptr ;
  int tmp ;
  int tmp___0 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  seqno = i915_get_gem_seqno(dev);
  goto ldv_23706;
  ldv_23705: 
  __mptr = (struct list_head  const  *)dev_priv->mm.request_list.next;
  request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffe8UL;
  retiring_seqno = request->seqno;
  tmp = i915_seqno_passed(seqno, retiring_seqno);
  if (tmp != 0 || dev_priv->mm.wedged != 0) {
    i915_gem_retire_request(dev, request);
    list_del(& request->list);
    drm_free((void *)request, 40UL, 2);
  } else {
    goto ldv_23704;
  }
  ldv_23706: 
  tmp___0 = list_empty((struct list_head  const  *)(& dev_priv->mm.request_list));
  if (tmp___0 == 0) {
    goto ldv_23705;
  } else {

  }
  ldv_23704: ;
  return;
}
}
void i915_gem_retire_work_handler(struct work_struct *work ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_device *dev ;
  struct work_struct  const  *__mptr ;
  int tmp ;

  {
  __mptr = (struct work_struct  const  *)work;
  dev_priv = (drm_i915_private_t *)__mptr + 0xfffffffffffff060UL;
  dev = dev_priv->dev;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  i915_gem_retire_requests(dev);
  tmp = list_empty((struct list_head  const  *)(& dev_priv->mm.request_list));
  if (tmp == 0) {
    schedule_delayed_work(& dev_priv->mm.retire_work, 250UL);
  } else {

  }
  mutex_unlock(& dev->struct_mutex);
  return;
}
}
static int i915_wait_request(struct drm_device *dev , uint32_t seqno ) 
{ 
  drm_i915_private_t *dev_priv ;
  int ret ;
  long tmp ;
  int __ret ;
  wait_queue_t __wait ;
  struct task_struct *tmp___0 ;
  uint32_t tmp___1 ;
  int tmp___2 ;
  struct task_struct *tmp___3 ;
  int tmp___4 ;
  uint32_t tmp___5 ;
  int tmp___6 ;
  uint32_t tmp___7 ;
  int tmp___8 ;
  uint32_t tmp___9 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  ret = 0;
  tmp = ldv__builtin_expect(seqno == 0U, 0L);
  if (tmp != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (763), "i" (24UL));
    ldv_23720: ;
    goto ldv_23720;
  } else {

  }
  tmp___7 = i915_get_gem_seqno(dev);
  tmp___8 = i915_seqno_passed(tmp___7, seqno);
  if (tmp___8 == 0) {
    dev_priv->mm.waiting_gem_seqno = seqno;
    i915_user_irq_get(dev);
    __ret = 0;
    tmp___5 = i915_get_gem_seqno(dev);
    tmp___6 = i915_seqno_passed(tmp___5, seqno);
    if (tmp___6 == 0 && dev_priv->mm.wedged == 0) {
      tmp___0 = get_current();
      __wait.flags = 0U;
      __wait.private = (void *)tmp___0;
      __wait.func = & autoremove_wake_function;
      __wait.task_list.next = & __wait.task_list;
      __wait.task_list.prev = & __wait.task_list;
      ldv_23725: 
      prepare_to_wait(& dev_priv->irq_queue, & __wait, 1);
      tmp___1 = i915_get_gem_seqno(dev);
      tmp___2 = i915_seqno_passed(tmp___1, seqno);
      if (tmp___2 != 0 || dev_priv->mm.wedged != 0) {
        goto ldv_23723;
      } else {

      }
      tmp___3 = get_current();
      tmp___4 = signal_pending(tmp___3);
      if (tmp___4 == 0) {
        schedule();
        goto ldv_23724;
      } else {

      }
      __ret = -512;
      goto ldv_23723;
      ldv_23724: ;
      goto ldv_23725;
      ldv_23723: 
      finish_wait(& dev_priv->irq_queue, & __wait);
    } else {

    }
    ret = __ret;
    i915_user_irq_put(dev);
    dev_priv->mm.waiting_gem_seqno = 0U;
  } else {

  }
  if (dev_priv->mm.wedged != 0) {
    ret = -5;
  } else {

  }
  if (ret != 0 && ret != -512) {
    tmp___9 = i915_get_gem_seqno(dev);
    printk("<3>[drm:%s] *ERROR* %s returns %d (awaiting %d at %d)\n", "i915_wait_request",
           "i915_wait_request", ret, seqno, tmp___9);
  } else {

  }
  if (ret == 0) {
    i915_gem_retire_requests(dev);
  } else {

  }
  return (ret);
}
}
static void i915_gem_flush(struct drm_device *dev , uint32_t invalidate_domains ,
                           uint32_t flush_domains ) 
{ 
  drm_i915_private_t *dev_priv ;
  uint32_t cmd ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((int )flush_domains & 1) {
    drm_agp_chipset_flush(dev);
  } else {

  }
  if (((invalidate_domains | flush_domains) & 4294967230U) != 0U) {
    cmd = 33554436U;
    if (((invalidate_domains | flush_domains) & 2U) != 0U) {
      cmd = cmd & 4294967291U;
    } else {

    }
    if (((((((((dev->pci_device != 10610 && dev->pci_device != 10626) && dev->pci_device != 10642) && dev->pci_device != 10658) && dev->pci_device != 10754) && dev->pci_device != 10770) && dev->pci_device != 10818) && dev->pci_device != 11778) && dev->pci_device != 11794) && dev->pci_device != 11810) {
      if ((invalidate_domains & 4U) != 0U) {
        cmd = cmd | 1U;
      } else {

      }
    } else {

    }
    if ((invalidate_domains & 16U) != 0U) {
      cmd = cmd | 2U;
    } else {

    }
    if (dev_priv->ring.space <= 7) {
      i915_wait_ring(dev, 8, "i915_gem_flush");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = cmd;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  } else {

  }
  return;
}
}
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj ) 
{ 
  struct drm_device *dev ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;
  uint32_t write_domain ;
  long tmp ;

  {
  dev = obj->dev;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((obj->write_domain & 4294967230U) != 0U) {
    write_domain = obj->write_domain;
    i915_gem_flush(dev, 0U, write_domain);
    i915_gem_object_move_to_active(obj);
    obj_priv->last_rendering_seqno = i915_add_request(dev, write_domain);
    tmp = ldv__builtin_expect(obj_priv->last_rendering_seqno == 0U, 0L);
    if (tmp != 0L) {
      __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                           "i" (890), "i" (24UL));
      ldv_23747: ;
      goto ldv_23747;
    } else {

    }
  } else {

  }
  if (obj_priv->active != 0) {
    ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
    if (ret != 0) {
      return (ret);
    } else {

    }
  } else {

  }
  return (0);
}
}
static int i915_gem_object_unbind(struct drm_gem_object *obj ) 
{ 
  struct drm_device *dev ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;
  long tmp ;
  int tmp___0 ;

  {
  dev = obj->dev;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  ret = 0;
  if ((unsigned long )obj_priv->gtt_space == (unsigned long )((struct drm_mm_node *)0)) {
    return (0);
  } else {

  }
  if (obj_priv->pin_count != 0) {
    printk("<3>[drm:%s] *ERROR* Attempting to unbind pinned buffer\n", "i915_gem_object_unbind");
    return (-22);
  } else {

  }
  ret = i915_gem_object_wait_rendering(obj);
  if (ret != 0) {
    printk("<3>[drm:%s] *ERROR* wait_rendering failed: %d\n", "i915_gem_object_unbind",
           ret);
    return (ret);
  } else {

  }
  ret = i915_gem_object_set_domain(obj, 1U, 1U);
  if (ret != 0) {
    printk("<3>[drm:%s] *ERROR* set_domain failed: %d\n", "i915_gem_object_unbind",
           ret);
    return (ret);
  } else {

  }
  if ((unsigned long )obj_priv->agp_mem != (unsigned long )((struct agp_memory *)0)) {
    drm_unbind_agp(obj_priv->agp_mem);
    drm_free_agp(obj_priv->agp_mem, (int )(obj->size / 4096UL));
    obj_priv->agp_mem = 0;
  } else {

  }
  tmp = ldv__builtin_expect(obj_priv->active != 0, 0L);
  if (tmp != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (961), "i" (24UL));
    ldv_23755: ;
    goto ldv_23755;
  } else {

  }
  i915_gem_object_free_page_list(obj);
  if ((unsigned long )obj_priv->gtt_space != (unsigned long )((struct drm_mm_node *)0)) {
    atomic_dec(& dev->gtt_count);
    atomic_sub((int )obj->size, & dev->gtt_memory);
    drm_mm_put_block(obj_priv->gtt_space);
    obj_priv->gtt_space = 0;
  } else {

  }
  tmp___0 = list_empty((struct list_head  const  *)(& obj_priv->list));
  if (tmp___0 == 0) {
    list_del_init(& obj_priv->list);
  } else {

  }
  return (0);
}
}
static int i915_gem_evict_something(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;
  struct list_head  const  *__mptr ;
  long tmp ;
  long tmp___0 ;
  int tmp___1 ;
  struct drm_i915_gem_request *request ;
  struct list_head  const  *__mptr___0 ;
  int tmp___2 ;
  int tmp___3 ;
  struct list_head  const  *__mptr___1 ;
  int tmp___4 ;
  int tmp___5 ;
  int tmp___6 ;
  int tmp___7 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  ret = 0;
  ldv_23775: 
  tmp___1 = list_empty((struct list_head  const  *)(& dev_priv->mm.inactive_list));
  if (tmp___1 == 0) {
    __mptr = (struct list_head  const  *)dev_priv->mm.inactive_list.next;
    obj_priv = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffff0UL;
    obj = obj_priv->obj;
    tmp = ldv__builtin_expect(obj_priv->pin_count != 0, 0L);
    if (tmp != 0L) {
      __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                           "i" (997), "i" (24UL));
      ldv_23765: ;
      goto ldv_23765;
    } else {

    }
    tmp___0 = ldv__builtin_expect(obj_priv->active != 0, 0L);
    if (tmp___0 != 0L) {
      __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                           "i" (1001), "i" (24UL));
      ldv_23766: ;
      goto ldv_23766;
    } else {

    }
    ret = i915_gem_object_unbind(obj);
    goto ldv_23767;
  } else {

  }
  tmp___3 = list_empty((struct list_head  const  *)(& dev_priv->mm.request_list));
  if (tmp___3 == 0) {
    __mptr___0 = (struct list_head  const  *)dev_priv->mm.request_list.next;
    request = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffe8UL;
    ret = i915_wait_request(dev, request->seqno);
    if (ret != 0) {
      goto ldv_23767;
    } else {

    }
    tmp___2 = list_empty((struct list_head  const  *)(& dev_priv->mm.inactive_list));
    if (tmp___2 == 0) {
      goto ldv_23771;
    } else {

    }
    goto ldv_23767;
  } else {

  }
  tmp___4 = list_empty((struct list_head  const  *)(& dev_priv->mm.flushing_list));
  if (tmp___4 == 0) {
    __mptr___1 = (struct list_head  const  *)dev_priv->mm.flushing_list.next;
    obj_priv = (struct drm_i915_gem_object *)__mptr___1 + 0xfffffffffffffff0UL;
    obj = obj_priv->obj;
    i915_gem_flush(dev, obj->write_domain, obj->write_domain);
    i915_add_request(dev, obj->write_domain);
    obj = 0;
    goto ldv_23771;
  } else {

  }
  tmp___5 = list_empty((struct list_head  const  *)(& dev_priv->mm.flushing_list));
  tmp___6 = list_empty((struct list_head  const  *)(& dev_priv->mm.request_list));
  tmp___7 = list_empty((struct list_head  const  *)(& dev_priv->mm.inactive_list));
  printk("<3>[drm:%s] *ERROR* inactive empty %d request empty %d flushing empty %d\n",
         "i915_gem_evict_something", tmp___7, tmp___6, tmp___5);
  return (-12);
  ldv_23771: ;
  goto ldv_23775;
  ldv_23767: ;
  return (ret);
}
}
static int i915_gem_object_get_page_list(struct drm_gem_object *obj ) 
{ 
  struct drm_i915_gem_object *obj_priv ;
  int page_count___0 ;
  int i ;
  struct address_space *mapping ;
  struct inode *inode ;
  struct page *page ;
  int ret ;
  long tmp ;
  void *tmp___0 ;
  long tmp___1 ;
  long tmp___2 ;

  {
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((unsigned long )obj_priv->page_list != (unsigned long )((struct page **)0)) {
    return (0);
  } else {

  }
  page_count___0 = (int )(obj->size / 4096UL);
  tmp = ldv__builtin_expect((unsigned long )obj_priv->page_list != (unsigned long )((struct page **)0),
                         0L);
  if (tmp != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (1083), "i" (24UL));
    ldv_23786: ;
    goto ldv_23786;
  } else {

  }
  tmp___0 = drm_calloc___0((size_t )page_count___0, 8UL, 2);
  obj_priv->page_list = (struct page **)tmp___0;
  if ((unsigned long )obj_priv->page_list == (unsigned long )((struct page **)0)) {
    printk("<3>[drm:%s] *ERROR* Faled to allocate page list\n", "i915_gem_object_get_page_list");
    return (-12);
  } else {

  }
  inode = ((obj->filp)->f_path.dentry)->d_inode;
  mapping = inode->i_mapping;
  i = 0;
  goto ldv_23789;
  ldv_23788: 
  page = read_mapping_page(mapping, (unsigned long )i, 0);
  tmp___2 = IS_ERR((void const   *)page);
  if (tmp___2 != 0L) {
    tmp___1 = PTR_ERR((void const   *)page);
    ret = (int )tmp___1;
    printk("<3>[drm:%s] *ERROR* read_mapping_page failed: %d\n", "i915_gem_object_get_page_list",
           ret);
    i915_gem_object_free_page_list(obj);
    return (ret);
  } else {

  }
  *(obj_priv->page_list + (unsigned long )i) = page;
  i = i + 1;
  ldv_23789: ;
  if (i < page_count___0) {
    goto ldv_23788;
  } else {

  }

  return (0);
}
}
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj , unsigned int alignment ) 
{ 
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_object *obj_priv ;
  struct drm_mm_node *free_space ;
  int page_count___0 ;
  int ret ;
  int tmp ;
  int tmp___0 ;
  int tmp___1 ;
  long tmp___2 ;
  long tmp___3 ;

  {
  dev = obj->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if (alignment == 0U) {
    alignment = 4096U;
  } else {

  }
  if (((unsigned long )alignment & 4095UL) != 0UL) {
    printk("<3>[drm:%s] *ERROR* Invalid object alignment requested %u\n", "i915_gem_object_bind_to_gtt",
           alignment);
    return (-22);
  } else {

  }
  search_free: 
  free_space = drm_mm_search_free((struct drm_mm  const  *)(& dev_priv->mm.gtt_space),
                                  obj->size, alignment, 0);
  if ((unsigned long )free_space != (unsigned long )((struct drm_mm_node *)0)) {
    obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, alignment);
    if ((unsigned long )obj_priv->gtt_space != (unsigned long )((struct drm_mm_node *)0)) {
      (obj_priv->gtt_space)->private = (void *)obj;
      obj_priv->gtt_offset = (uint32_t )(obj_priv->gtt_space)->start;
    } else {

    }
  } else {

  }
  if ((unsigned long )obj_priv->gtt_space == (unsigned long )((struct drm_mm_node *)0)) {
    tmp = list_empty((struct list_head  const  *)(& dev_priv->mm.inactive_list));
    if (tmp != 0) {
      tmp___0 = list_empty((struct list_head  const  *)(& dev_priv->mm.flushing_list));
      if (tmp___0 != 0) {
        tmp___1 = list_empty((struct list_head  const  *)(& dev_priv->mm.active_list));
        if (tmp___1 != 0) {
          printk("<3>[drm:%s] *ERROR* GTT full, but LRU list empty\n", "i915_gem_object_bind_to_gtt");
          return (-12);
        } else {

        }
      } else {

      }
    } else {

    }
    ret = i915_gem_evict_something(dev);
    if (ret != 0) {
      printk("<3>[drm:%s] *ERROR* Failed to evict a buffer %d\n", "i915_gem_object_bind_to_gtt",
             ret);
      return (ret);
    } else {

    }
    goto search_free;
  } else {

  }
  ret = i915_gem_object_get_page_list(obj);
  if (ret != 0) {
    drm_mm_put_block(obj_priv->gtt_space);
    obj_priv->gtt_space = 0;
    return (ret);
  } else {

  }
  page_count___0 = (int )(obj->size / 4096UL);
  obj_priv->agp_mem = drm_agp_bind_pages(dev, obj_priv->page_list, (unsigned long )page_count___0,
                                         obj_priv->gtt_offset);
  if ((unsigned long )obj_priv->agp_mem == (unsigned long )((struct agp_memory *)0)) {
    i915_gem_object_free_page_list(obj);
    drm_mm_put_block(obj_priv->gtt_space);
    obj_priv->gtt_space = 0;
    return (-12);
  } else {

  }
  atomic_inc(& dev->gtt_count);
  atomic_add((int )obj->size, & dev->gtt_memory);
  tmp___2 = ldv__builtin_expect((obj->read_domains & 4294967230U) != 0U, 0L);
  if (tmp___2 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (1190), "i" (24UL));
    ldv_23803: ;
    goto ldv_23803;
  } else {

  }
  tmp___3 = ldv__builtin_expect((obj->write_domain & 4294967230U) != 0U, 0L);
  if (tmp___3 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (1191), "i" (24UL));
    ldv_23804: ;
    goto ldv_23804;
  } else {

  }
  return (0);
}
}
void i915_gem_clflush_object(struct drm_gem_object *obj ) 
{ 
  struct drm_i915_gem_object *obj_priv ;

  {
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((unsigned long )obj_priv->page_list == (unsigned long )((struct page **)0)) {
    return;
  } else {

  }
  drm_clflush_pages(obj_priv->page_list, obj->size / 4096UL);
  return;
}
}
static int i915_gem_object_set_domain(struct drm_gem_object *obj , uint32_t read_domains ,
                                      uint32_t write_domain ) 
{ 
  struct drm_device *dev ;
  struct drm_i915_gem_object *obj_priv ;
  uint32_t invalidate_domains ;
  uint32_t flush_domains ;
  int ret ;

  {
  dev = obj->dev;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  invalidate_domains = 0U;
  flush_domains = 0U;
  if (write_domain == 0U) {
    read_domains = obj->read_domains | read_domains;
  } else {
    obj_priv->dirty = 1;
  }
  if (obj->write_domain != 0U && obj->write_domain != read_domains) {
    flush_domains = obj->write_domain | flush_domains;
    invalidate_domains = (~ obj->write_domain & read_domains) | invalidate_domains;
  } else {

  }
  invalidate_domains = (~ obj->read_domains & read_domains) | invalidate_domains;
  if ((int )(flush_domains | invalidate_domains) & 1) {
    if ((int )invalidate_domains & 1 && (flush_domains & 4294967230U) != 0U) {
      ret = i915_gem_object_wait_rendering(obj);
      if (ret != 0) {
        return (ret);
      } else {

      }
    } else {

    }
    i915_gem_clflush_object(obj);
  } else {

  }
  if ((write_domain | flush_domains) != 0U) {
    obj->write_domain = write_domain;
  } else {

  }
  if ((unsigned long )obj_priv->page_cpu_valid != (unsigned long )((uint8_t *)0) && (write_domain != 0U || (int )read_domains & 1)) {
    drm_free((void *)obj_priv->page_cpu_valid, obj->size / 4096UL, 2);
    obj_priv->page_cpu_valid = 0;
  } else {

  }
  obj->read_domains = read_domains;
  dev->invalidate_domains = dev->invalidate_domains | invalidate_domains;
  dev->flush_domains = dev->flush_domains | flush_domains;
  return (0);
}
}
static int i915_gem_object_set_domain_range(struct drm_gem_object *obj , uint64_t offset ,
                                            uint64_t size , uint32_t read_domains ,
                                            uint32_t write_domain ) 
{ 
  struct drm_i915_gem_object *obj_priv ;
  int ret ;
  int i ;
  int tmp ;
  void *tmp___0 ;

  {
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((int )obj->read_domains & 1) {
    return (0);
  } else {

  }
  if (read_domains != 1U || write_domain != 0U) {
    tmp = i915_gem_object_set_domain(obj, read_domains, write_domain);
    return (tmp);
  } else {

  }
  if ((obj->write_domain & 4294967230U) != 0U) {
    ret = i915_gem_object_wait_rendering(obj);
    if (ret != 0) {
      return (ret);
    } else {

    }
  } else {

  }
  if ((unsigned long )obj_priv->page_cpu_valid == (unsigned long )((uint8_t *)0)) {
    tmp___0 = drm_calloc___0(1UL, obj->size / 4096UL, 2);
    obj_priv->page_cpu_valid = (uint8_t *)tmp___0;
  } else {

  }
  i = (int )(offset / 4096ULL);
  goto ldv_23831;
  ldv_23830: ;
  if ((unsigned int )*(obj_priv->page_cpu_valid + (unsigned long )i) != 0U) {
    goto ldv_23829;
  } else {

  }
  drm_clflush_pages(obj_priv->page_list + (unsigned long )i, 1UL);
  *(obj_priv->page_cpu_valid + (unsigned long )i) = 1U;
  ldv_23829: 
  i = i + 1;
  ldv_23831: ;
  if ((unsigned long long )i <= ((offset + size) - 1ULL) / 4096ULL) {
    goto ldv_23830;
  } else {

  }

  return (0);
}
}
static uint32_t i915_gem_dev_set_domain(struct drm_device *dev ) 
{ 
  uint32_t flush_domains ;

  {
  flush_domains = dev->flush_domains;
  if ((dev->invalidate_domains | dev->flush_domains) != 0U) {
    i915_gem_flush(dev, dev->invalidate_domains, dev->flush_domains);
    dev->invalidate_domains = 0U;
    dev->flush_domains = 0U;
  } else {

  }
  return (flush_domains);
}
}
static int i915_gem_object_pin_and_relocate(struct drm_gem_object *obj , struct drm_file *file_priv ,
                                            struct drm_i915_gem_exec_object *entry ) 
{ 
  struct drm_device *dev ;
  struct drm_i915_gem_relocation_entry reloc ;
  struct drm_i915_gem_relocation_entry *relocs ;
  struct drm_i915_gem_object *obj_priv ;
  int i ;
  int ret ;
  uint32_t last_reloc_offset ;
  void *reloc_page ;
  struct drm_gem_object *target_obj ;
  struct drm_i915_gem_object *target_obj_priv ;
  uint32_t reloc_val ;
  uint32_t reloc_offset ;
  uint32_t *reloc_entry ;
  unsigned long tmp ;
  unsigned long tmp___0 ;

  {
  dev = obj->dev;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  last_reloc_offset = 4294967295U;
  reloc_page = 0;
  ret = i915_gem_object_pin(obj, (unsigned int )entry->alignment);
  if (ret != 0) {
    return (ret);
  } else {

  }
  entry->offset = (uint64_t )obj_priv->gtt_offset;
  relocs = (struct drm_i915_gem_relocation_entry *)entry->relocs_ptr;
  i = 0;
  goto ldv_23858;
  ldv_23857: 
  tmp = copy_from_user((void *)(& reloc), (void const   *)relocs + (unsigned long )i,
                       32U);
  ret = (int )tmp;
  if (ret != 0) {
    i915_gem_object_unpin(obj);
    return (ret);
  } else {

  }
  target_obj = drm_gem_object_lookup(obj->dev, file_priv, (int )reloc.target_handle);
  if ((unsigned long )target_obj == (unsigned long )((struct drm_gem_object *)0)) {
    i915_gem_object_unpin(obj);
    return (-9);
  } else {

  }
  target_obj_priv = (struct drm_i915_gem_object *)target_obj->driver_private;
  if ((unsigned long )target_obj_priv->gtt_space == (unsigned long )((struct drm_mm_node *)0)) {
    printk("<3>[drm:%s] *ERROR* No GTT space found for object %d\n", "i915_gem_object_pin_and_relocate",
           reloc.target_handle);
    drm_gem_object_unreference(target_obj);
    i915_gem_object_unpin(obj);
    return (-22);
  } else {

  }
  if (reloc.offset > (unsigned long long )(obj->size - 4UL)) {
    printk("<3>[drm:%s] *ERROR* Relocation beyond object bounds: obj %p target %d offset %d size %d.\n",
           "i915_gem_object_pin_and_relocate", obj, reloc.target_handle, (int )reloc.offset,
           (int )obj->size);
    drm_gem_object_unreference(target_obj);
    i915_gem_object_unpin(obj);
    return (-22);
  } else {

  }
  if ((reloc.offset & 3ULL) != 0ULL) {
    printk("<3>[drm:%s] *ERROR* Relocation not 4-byte aligned: obj %p target %d offset %d.\n",
           "i915_gem_object_pin_and_relocate", obj, reloc.target_handle, (int )reloc.offset);
    drm_gem_object_unreference(target_obj);
    i915_gem_object_unpin(obj);
    return (-22);
  } else {

  }
  if ((reloc.write_domain != 0U && target_obj->pending_write_domain != 0U) && reloc.write_domain != target_obj->pending_write_domain) {
    printk("<3>[drm:%s] *ERROR* Write domain conflict: obj %p target %d offset %d new %08x old %08x\n",
           "i915_gem_object_pin_and_relocate", obj, reloc.target_handle, (int )reloc.offset,
           reloc.write_domain, target_obj->pending_write_domain);
    drm_gem_object_unreference(target_obj);
    i915_gem_object_unpin(obj);
    return (-22);
  } else {

  }
  target_obj->pending_read_domains = target_obj->pending_read_domains | reloc.read_domains;
  target_obj->pending_write_domain = target_obj->pending_write_domain | reloc.write_domain;
  if ((uint64_t )target_obj_priv->gtt_offset == reloc.presumed_offset) {
    drm_gem_object_unreference(target_obj);
    goto ldv_23856;
  } else {

  }
  i915_gem_object_wait_rendering(obj);
  if ((int )obj->write_domain & 1) {
    i915_gem_clflush_object(obj);
    drm_agp_chipset_flush(dev);
    obj->write_domain = 0U;
  } else {

  }
  reloc_offset = obj_priv->gtt_offset + (uint32_t )reloc.offset;
  if ((unsigned long )reloc_page == (unsigned long )((void *)0) || (((unsigned long )last_reloc_offset ^ (unsigned long )reloc_offset) & 0xfffffffffffff000UL) != 0UL) {
    if ((unsigned long )reloc_page != (unsigned long )((void *)0)) {
      iounmap((void volatile   *)reloc_page);
    } else {

    }
    reloc_page = ioremap_wc((dev->agp)->base + ((unsigned long )reloc_offset & 0xfffffffffffff000UL),
                            4096UL);
    last_reloc_offset = reloc_offset;
    if ((unsigned long )reloc_page == (unsigned long )((void *)0)) {
      drm_gem_object_unreference(target_obj);
      i915_gem_object_unpin(obj);
      return (-12);
    } else {

    }
  } else {

  }
  reloc_entry = (uint32_t *)(reloc_page + ((unsigned long )reloc_offset & 4095UL));
  reloc_val = target_obj_priv->gtt_offset + reloc.delta;
  writel(reloc_val, (void volatile   *)reloc_entry);
  reloc.presumed_offset = (uint64_t )target_obj_priv->gtt_offset;
  tmp___0 = copy_to_user((void *)relocs + (unsigned long )i, (void const   *)(& reloc),
                         32U);
  ret = (int )tmp___0;
  if (ret != 0) {
    drm_gem_object_unreference(target_obj);
    i915_gem_object_unpin(obj);
    return (ret);
  } else {

  }
  drm_gem_object_unreference(target_obj);
  ldv_23856: 
  i = i + 1;
  ldv_23858: ;
  if ((uint32_t )i < entry->relocation_count) {
    goto ldv_23857;
  } else {

  }

  if ((unsigned long )reloc_page != (unsigned long )((void *)0)) {
    iounmap((void volatile   *)reloc_page);
  } else {

  }
  return (0);
}
}
static int i915_dispatch_gem_execbuffer(struct drm_device *dev , struct drm_i915_gem_execbuffer *exec ,
                                        uint64_t exec_offset ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_clip_rect *boxes ;
  int nbox ;
  int i ;
  int count ;
  uint32_t exec_start ;
  uint32_t exec_len ;
  unsigned int outring ;
  unsigned int ringmask ;
  unsigned int outcount ;
  char volatile   *virt ;
  int ret ;
  int tmp ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  boxes = (struct drm_clip_rect *)exec->cliprects_ptr;
  nbox = (int )exec->num_cliprects;
  i = 0;
  exec_start = (unsigned int )exec_offset + exec->batch_start_offset;
  exec_len = exec->batch_len;
  if (((exec_start | exec_len) & 7U) != 0U) {
    printk("<3>[drm:%s] *ERROR* alignment\n", "i915_dispatch_gem_execbuffer");
    return (-22);
  } else {

  }
  if (exec_start == 0U) {
    return (-22);
  } else {

  }
  count = nbox != 0 ? nbox : 1;
  i = 0;
  goto ldv_23879;
  ldv_23878: ;
  if (i < nbox) {
    tmp = i915_emit_box(dev, boxes, i, (int )exec->DR1, (int )exec->DR4);
    ret = tmp;
    if (ret != 0) {
      return (ret);
    } else {

    }
  } else {

  }
  if (dev->pci_device == 13687 || dev->pci_device == 9570) {
    if (dev_priv->ring.space <= 15) {
      i915_wait_ring(dev, 16, "i915_dispatch_gem_execbuffer");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 402653185U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = exec_start | 1U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = (exec_start + exec_len) - 4U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    *((unsigned int volatile   *)virt + (unsigned long )outring) = 0U;
    outcount = outcount + 1U;
    outring = outring + 4U;
    outring = outring & ringmask;
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  } else {
    if (dev_priv->ring.space <= 7) {
      i915_wait_ring(dev, 8, "i915_dispatch_gem_execbuffer");
    } else {

    }
    outcount = 0U;
    outring = (unsigned int )dev_priv->ring.tail;
    ringmask = (unsigned int )dev_priv->ring.tail_mask;
    virt = (char volatile   *)dev_priv->ring.virtual_start;
    if (((((((((dev->pci_device == 10610 || dev->pci_device == 10626) || dev->pci_device == 10642) || dev->pci_device == 10658) || dev->pci_device == 10754) || dev->pci_device == 10770) || dev->pci_device == 10818) || dev->pci_device == 11778) || dev->pci_device == 11794) || dev->pci_device == 11810) {
      *((unsigned int volatile   *)virt + (unsigned long )outring) = 411042176U;
      outcount = outcount + 1U;
      outring = outring + 4U;
      outring = outring & ringmask;
      *((unsigned int volatile   *)virt + (unsigned long )outring) = exec_start;
      outcount = outcount + 1U;
      outring = outring + 4U;
      outring = outring & ringmask;
    } else {
      *((unsigned int volatile   *)virt + (unsigned long )outring) = 411041920U;
      outcount = outcount + 1U;
      outring = outring + 4U;
      outring = outring & ringmask;
      *((unsigned int volatile   *)virt + (unsigned long )outring) = exec_start | 1U;
      outcount = outcount + 1U;
      outring = outring + 4U;
      outring = outring & ringmask;
    }
    dev_priv->ring.tail = (int )outring;
    dev_priv->ring.space = (int )((unsigned int )dev_priv->ring.space - outcount * 4U);
    writel(outring, (void volatile   *)dev_priv->regs + 8240U);
  }
  i = i + 1;
  ldv_23879: ;
  if (i < count) {
    goto ldv_23878;
  } else {

  }

  return (0);
}
}
static int i915_gem_ring_throttle(struct drm_device *dev , struct drm_file *file_priv ) 
{ 
  struct drm_i915_file_private *i915_file_priv ;
  int ret ;
  uint32_t seqno ;

  {
  i915_file_priv = (struct drm_i915_file_private *)file_priv->driver_priv;
  ret = 0;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  seqno = i915_file_priv->mm.last_gem_throttle_seqno;
  i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno;
  if (seqno != 0U) {
    ret = i915_wait_request(dev, seqno);
  } else {

  }
  mutex_unlock(& dev->struct_mutex);
  return (ret);
}
}
int i915_gem_execbuffer(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_i915_file_private *i915_file_priv ;
  struct drm_i915_gem_execbuffer *args ;
  struct drm_i915_gem_exec_object *exec_list ;
  struct drm_gem_object **object_list ;
  struct drm_gem_object *batch_obj ;
  int ret ;
  int i ;
  int pinned ;
  uint64_t exec_offset ;
  uint32_t seqno ;
  uint32_t flush_domains ;
  void *tmp ;
  void *tmp___0 ;
  unsigned long tmp___1 ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  long tmp___2 ;
  struct drm_gem_object *obj___0 ;
  struct drm_i915_gem_object *obj_priv___0 ;
  unsigned long tmp___3 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  i915_file_priv = (struct drm_i915_file_private *)file_priv->driver_priv;
  args = (struct drm_i915_gem_execbuffer *)data;
  exec_list = 0;
  object_list = 0;
  pinned = 0;
  if (args->buffer_count == 0U) {
    printk("<3>[drm:%s] *ERROR* execbuf with %d buffers\n", "i915_gem_execbuffer",
           args->buffer_count);
    return (-22);
  } else {

  }
  tmp = drm_calloc___0(32UL, (size_t )args->buffer_count, 2);
  exec_list = (struct drm_i915_gem_exec_object *)tmp;
  tmp___0 = drm_calloc___0(8UL, (size_t )args->buffer_count, 2);
  object_list = (struct drm_gem_object **)tmp___0;
  if ((unsigned long )exec_list == (unsigned long )((struct drm_i915_gem_exec_object *)0) || (unsigned long )object_list == (unsigned long )((struct drm_gem_object **)0)) {
    printk("<3>[drm:%s] *ERROR* Failed to allocate exec or object list for %d buffers\n",
           "i915_gem_execbuffer", args->buffer_count);
    ret = -12;
    goto pre_mutex_err;
  } else {

  }
  tmp___1 = copy_from_user((void *)exec_list, (void const   *)args->buffers_ptr, args->buffer_count * 32U);
  ret = (int )tmp___1;
  if (ret != 0) {
    printk("<3>[drm:%s] *ERROR* copy %d exec entries failed %d\n", "i915_gem_execbuffer",
           args->buffer_count, ret);
    goto pre_mutex_err;
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  if (dev_priv->mm.wedged != 0) {
    printk("<3>[drm:%s] *ERROR* Execbuf while wedged\n", "i915_gem_execbuffer");
    mutex_unlock(& dev->struct_mutex);
    return (-5);
  } else {

  }
  if (dev_priv->mm.suspended != 0) {
    printk("<3>[drm:%s] *ERROR* Execbuf while VT-switched.\n", "i915_gem_execbuffer");
    mutex_unlock(& dev->struct_mutex);
    return (-16);
  } else {

  }
  dev->invalidate_domains = 0U;
  dev->flush_domains = 0U;
  i = 0;
  goto ldv_23910;
  ldv_23909: 
  *(object_list + (unsigned long )i) = drm_gem_object_lookup(dev, file_priv, (int )(exec_list + (unsigned long )i)->handle);
  if ((unsigned long )*(object_list + (unsigned long )i) == (unsigned long )((struct drm_gem_object *)0)) {
    printk("<3>[drm:%s] *ERROR* Invalid object handle %d at index %d\n", "i915_gem_execbuffer",
           (exec_list + (unsigned long )i)->handle, i);
    ret = -9;
    goto err;
  } else {

  }
  (*(object_list + (unsigned long )i))->pending_read_domains = 0U;
  (*(object_list + (unsigned long )i))->pending_write_domain = 0U;
  ret = i915_gem_object_pin_and_relocate(*(object_list + (unsigned long )i), file_priv,
                                         exec_list + (unsigned long )i);
  if (ret != 0) {
    printk("<3>[drm:%s] *ERROR* object bind and relocate failed %d\n", "i915_gem_execbuffer",
           ret);
    goto err;
  } else {

  }
  pinned = i + 1;
  i = i + 1;
  ldv_23910: ;
  if ((uint32_t )i < args->buffer_count) {
    goto ldv_23909;
  } else {

  }
  batch_obj = *(object_list + (unsigned long )(args->buffer_count - 1U));
  batch_obj->pending_read_domains = 8U;
  batch_obj->pending_write_domain = 0U;
  i = 0;
  goto ldv_23915;
  ldv_23914: 
  obj = *(object_list + (unsigned long )i);
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((unsigned long )obj_priv->gtt_space == (unsigned long )((struct drm_mm_node *)0)) {
    ret = -12;
    goto err;
  } else {

  }
  ret = i915_gem_object_set_domain(obj, obj->pending_read_domains, obj->pending_write_domain);
  if (ret != 0) {
    goto err;
  } else {

  }
  i = i + 1;
  ldv_23915: ;
  if ((uint32_t )i < args->buffer_count) {
    goto ldv_23914;
  } else {

  }
  flush_domains = i915_gem_dev_set_domain(dev);
  exec_offset = (exec_list + (unsigned long )(args->buffer_count - 1U))->offset;
  i915_add_request(dev, flush_domains);
  ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
  if (ret != 0) {
    printk("<3>[drm:%s] *ERROR* dispatch failed %d\n", "i915_gem_execbuffer", ret);
    goto err;
  } else {

  }
  flush_domains = i915_retire_commands(dev);
  seqno = i915_add_request(dev, flush_domains);
  tmp___2 = ldv__builtin_expect(seqno == 0U, 0L);
  if (tmp___2 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (1936), "i" (24UL));
    ldv_23917: ;
    goto ldv_23917;
  } else {

  }
  i915_file_priv->mm.last_gem_seqno = seqno;
  i = 0;
  goto ldv_23921;
  ldv_23920: 
  obj___0 = *(object_list + (unsigned long )i);
  obj_priv___0 = (struct drm_i915_gem_object *)obj___0->driver_private;
  i915_gem_object_move_to_active(obj___0);
  obj_priv___0->last_rendering_seqno = seqno;
  i = i + 1;
  ldv_23921: ;
  if ((uint32_t )i < args->buffer_count) {
    goto ldv_23920;
  } else {

  }
  tmp___3 = copy_to_user((void *)args->buffers_ptr, (void const   *)exec_list, args->buffer_count * 32U);
  ret = (int )tmp___3;
  if (ret != 0) {
    printk("<3>[drm:%s] *ERROR* failed to copy %d exec entries back to user (%d)\n",
           "i915_gem_execbuffer", args->buffer_count, ret);
  } else {

  }
  err: ;
  if ((unsigned long )object_list != (unsigned long )((struct drm_gem_object **)0)) {
    i = 0;
    goto ldv_23924;
    ldv_23923: 
    i915_gem_object_unpin(*(object_list + (unsigned long )i));
    i = i + 1;
    ldv_23924: ;
    if (i < pinned) {
      goto ldv_23923;
    } else {

    }
    i = 0;
    goto ldv_23927;
    ldv_23926: 
    drm_gem_object_unreference(*(object_list + (unsigned long )i));
    i = i + 1;
    ldv_23927: ;
    if ((uint32_t )i < args->buffer_count) {
      goto ldv_23926;
    } else {

    }

  } else {

  }
  mutex_unlock(& dev->struct_mutex);
  pre_mutex_err: 
  drm_free((void *)object_list, (unsigned long )args->buffer_count * 8UL, 2);
  drm_free((void *)exec_list, (unsigned long )args->buffer_count * 32UL, 2);
  return (ret);
}
}
int i915_gem_object_pin(struct drm_gem_object *obj , uint32_t alignment ) 
{ 
  struct drm_device *dev ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;
  int tmp ;

  {
  dev = obj->dev;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  if ((unsigned long )obj_priv->gtt_space == (unsigned long )((struct drm_mm_node *)0)) {
    ret = i915_gem_object_bind_to_gtt(obj, alignment);
    if (ret != 0) {
      printk("<3>[drm:%s] *ERROR* Failure to bind: %d", "i915_gem_object_pin", ret);
      return (ret);
    } else {

    }
  } else {

  }
  obj_priv->pin_count = obj_priv->pin_count + 1;
  if (obj_priv->pin_count == 1) {
    atomic_inc(& dev->pin_count);
    atomic_add((int )obj->size, & dev->pin_memory);
    if (obj_priv->active == 0 && (obj->write_domain & 4294967230U) == 0U) {
      tmp = list_empty((struct list_head  const  *)(& obj_priv->list));
      if (tmp == 0) {
        list_del_init(& obj_priv->list);
      } else {

      }
    } else {

    }
  } else {

  }
  return (0);
}
}
void i915_gem_object_unpin(struct drm_gem_object *obj ) 
{ 
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_object *obj_priv ;
  long tmp ;
  long tmp___0 ;

  {
  dev = obj->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  obj_priv->pin_count = obj_priv->pin_count - 1;
  tmp = ldv__builtin_expect(obj_priv->pin_count < 0, 0L);
  if (tmp != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2025), "i" (24UL));
    ldv_23943: ;
    goto ldv_23943;
  } else {

  }
  tmp___0 = ldv__builtin_expect((unsigned long )obj_priv->gtt_space == (unsigned long )((struct drm_mm_node *)0),
                             0L);
  if (tmp___0 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2026), "i" (24UL));
    ldv_23944: ;
    goto ldv_23944;
  } else {

  }
  if (obj_priv->pin_count == 0) {
    if (obj_priv->active == 0 && (obj->write_domain & 4294967230U) == 0U) {
      list_move_tail(& obj_priv->list, & dev_priv->mm.inactive_list);
    } else {

    }
    atomic_dec(& dev->pin_count);
    atomic_sub((int )obj->size, & dev->pin_memory);
  } else {

  }
  return;
}
}
int i915_gem_pin_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_pin *args ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;

  {
  args = (struct drm_i915_gem_pin *)data;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    printk("<3>[drm:%s] *ERROR* Bad handle in i915_gem_pin_ioctl(): %d\n", "i915_gem_pin_ioctl",
           args->handle);
    mutex_unlock(& dev->struct_mutex);
    return (-9);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  ret = i915_gem_object_pin(obj, (uint32_t )args->alignment);
  if (ret != 0) {
    drm_gem_object_unreference(obj);
    mutex_unlock(& dev->struct_mutex);
    return (ret);
  } else {

  }
  if ((int )obj->write_domain & 1) {
    i915_gem_clflush_object(obj);
    drm_agp_chipset_flush(dev);
    obj->write_domain = 0U;
  } else {

  }
  args->offset = (uint64_t )obj_priv->gtt_offset;
  drm_gem_object_unreference(obj);
  mutex_unlock(& dev->struct_mutex);
  return (0);
}
}
int i915_gem_unpin_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_pin *args ;
  struct drm_gem_object *obj ;

  {
  args = (struct drm_i915_gem_pin *)data;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    printk("<3>[drm:%s] *ERROR* Bad handle in i915_gem_unpin_ioctl(): %d\n", "i915_gem_unpin_ioctl",
           args->handle);
    mutex_unlock(& dev->struct_mutex);
    return (-9);
  } else {

  }
  i915_gem_object_unpin(obj);
  drm_gem_object_unreference(obj);
  mutex_unlock(& dev->struct_mutex);
  return (0);
}
}
int i915_gem_busy_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_busy *args ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;

  {
  args = (struct drm_i915_gem_busy *)data;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    printk("<3>[drm:%s] *ERROR* Bad handle in i915_gem_busy_ioctl(): %d\n", "i915_gem_busy_ioctl",
           args->handle);
    mutex_unlock(& dev->struct_mutex);
    return (-9);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  args->busy = (uint32_t )obj_priv->active;
  drm_gem_object_unreference(obj);
  mutex_unlock(& dev->struct_mutex);
  return (0);
}
}
int i915_gem_throttle_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  int tmp ;

  {
  tmp = i915_gem_ring_throttle(dev, file_priv);
  return (tmp);
}
}
int i915_gem_init_object(struct drm_gem_object *obj ) 
{ 
  struct drm_i915_gem_object *obj_priv ;
  void *tmp ;

  {
  tmp = drm_calloc___0(1UL, 88UL, 2);
  obj_priv = (struct drm_i915_gem_object *)tmp;
  if ((unsigned long )obj_priv == (unsigned long )((struct drm_i915_gem_object *)0)) {
    return (-12);
  } else {

  }
  obj->write_domain = 1U;
  obj->read_domains = 1U;
  obj->driver_private = (void *)obj_priv;
  obj_priv->obj = obj;
  INIT_LIST_HEAD(& obj_priv->list);
  return (0);
}
}
void i915_gem_free_object(struct drm_gem_object *obj ) 
{ 
  struct drm_i915_gem_object *obj_priv ;

  {
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  goto ldv_23986;
  ldv_23985: 
  i915_gem_object_unpin(obj);
  ldv_23986: ;
  if (obj_priv->pin_count > 0) {
    goto ldv_23985;
  } else {

  }
  i915_gem_object_unbind(obj);
  drm_free((void *)obj_priv->page_cpu_valid, 1UL, 2);
  drm_free(obj->driver_private, 1UL, 2);
  return;
}
}
static int i915_gem_set_domain(struct drm_gem_object *obj , struct drm_file *file_priv ,
                               uint32_t read_domains , uint32_t write_domain ) 
{ 
  struct drm_device *dev ;
  int ret ;
  uint32_t flush_domains ;
  int tmp ;
  long tmp___0 ;

  {
  dev = obj->dev;
  tmp = mutex_is_locked(& dev->struct_mutex);
  tmp___0 = ldv__builtin_expect(tmp == 0, 0L);
  if (tmp___0 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2188), "i" (24UL));
    ldv_23997: ;
    goto ldv_23997;
  } else {

  }
  ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
  if (ret != 0) {
    return (ret);
  } else {

  }
  flush_domains = i915_gem_dev_set_domain(obj->dev);
  if ((flush_domains & 4294967230U) != 0U) {
    i915_add_request(dev, flush_domains);
  } else {

  }
  return (0);
}
}
static int i915_gem_evict_from_list(struct drm_device *dev , struct list_head *head ) 
{ 
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;
  struct list_head  const  *__mptr ;
  int tmp ;

  {
  goto ldv_24009;
  ldv_24008: 
  __mptr = (struct list_head  const  *)head->next;
  obj_priv = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffff0UL;
  obj = obj_priv->obj;
  if (obj_priv->pin_count != 0) {
    printk("<3>[drm:%s] *ERROR* Pinned object in unbind list\n", "i915_gem_evict_from_list");
    mutex_unlock(& dev->struct_mutex);
    return (-22);
  } else {

  }
  ret = i915_gem_object_unbind(obj);
  if (ret != 0) {
    printk("<3>[drm:%s] *ERROR* Error unbinding object in LeaveVT: %d\n", "i915_gem_evict_from_list",
           ret);
    mutex_unlock(& dev->struct_mutex);
    return (ret);
  } else {

  }
  ldv_24009: 
  tmp = list_empty((struct list_head  const  *)head);
  if (tmp == 0) {
    goto ldv_24008;
  } else {

  }

  return (0);
}
}
static int i915_gem_idle(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  uint32_t seqno ;
  uint32_t cur_seqno ;
  uint32_t last_seqno ;
  int stuck ;
  int ret ;
  int tmp ;
  int tmp___0 ;
  int tmp___1 ;
  long tmp___2 ;
  int tmp___3 ;
  long tmp___4 ;
  int tmp___5 ;
  long tmp___6 ;
  int tmp___7 ;
  long tmp___8 ;
  int tmp___9 ;
  long tmp___10 ;
  int tmp___11 ;
  long tmp___12 ;
  int tmp___13 ;
  long tmp___14 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if (dev_priv->mm.suspended != 0) {
    return (0);
  } else {

  }
  dev_priv->mm.suspended = 1;
  i915_kernel_lost_context(dev);
  i915_gem_flush(dev, 4294967230U, 4294967230U);
  seqno = i915_add_request(dev, 4294967230U);
  if (seqno == 0U) {
    mutex_unlock(& dev->struct_mutex);
    return (-12);
  } else {

  }
  dev_priv->mm.waiting_gem_seqno = seqno;
  last_seqno = 0U;
  stuck = 0;
  ldv_24022: 
  cur_seqno = i915_get_gem_seqno(dev);
  tmp = i915_seqno_passed(cur_seqno, seqno);
  if (tmp != 0) {
    goto ldv_24020;
  } else {

  }
  if (last_seqno == cur_seqno) {
    tmp___0 = stuck;
    stuck = stuck + 1;
    if (tmp___0 > 100) {
      printk("<3>[drm:%s] *ERROR* hardware wedged\n", "i915_gem_idle");
      dev_priv->mm.wedged = 1;
      __wake_up(& dev_priv->irq_queue, 1U, 1, 0);
      goto ldv_24020;
    } else {

    }
  } else {

  }
  msleep(10U);
  last_seqno = cur_seqno;
  goto ldv_24022;
  ldv_24020: 
  dev_priv->mm.waiting_gem_seqno = 0U;
  i915_gem_retire_requests(dev);
  tmp___1 = list_empty((struct list_head  const  *)(& dev_priv->mm.active_list));
  tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L);
  if (tmp___2 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2288), "i" (24UL));
    ldv_24023: ;
    goto ldv_24023;
  } else {

  }
  tmp___3 = list_empty((struct list_head  const  *)(& dev_priv->mm.flushing_list));
  tmp___4 = ldv__builtin_expect(tmp___3 == 0, 0L);
  if (tmp___4 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2289), "i" (24UL));
    ldv_24024: ;
    goto ldv_24024;
  } else {

  }
  tmp___5 = list_empty((struct list_head  const  *)(& dev_priv->mm.request_list));
  tmp___6 = ldv__builtin_expect(tmp___5 == 0, 0L);
  if (tmp___6 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2294), "i" (24UL));
    ldv_24025: ;
    goto ldv_24025;
  } else {

  }
  ret = i915_gem_evict_from_list(dev, & dev_priv->mm.inactive_list);
  if (ret != 0) {
    return (ret);
  } else {

  }
  tmp___7 = list_empty((struct list_head  const  *)(& dev_priv->mm.active_list));
  tmp___8 = ldv__builtin_expect(tmp___7 == 0, 0L);
  if (tmp___8 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2301), "i" (24UL));
    ldv_24026: ;
    goto ldv_24026;
  } else {

  }
  tmp___9 = list_empty((struct list_head  const  *)(& dev_priv->mm.flushing_list));
  tmp___10 = ldv__builtin_expect(tmp___9 == 0, 0L);
  if (tmp___10 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2302), "i" (24UL));
    ldv_24027: ;
    goto ldv_24027;
  } else {

  }
  tmp___11 = list_empty((struct list_head  const  *)(& dev_priv->mm.inactive_list));
  tmp___12 = ldv__builtin_expect(tmp___11 == 0, 0L);
  if (tmp___12 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2303), "i" (24UL));
    ldv_24028: ;
    goto ldv_24028;
  } else {

  }
  tmp___13 = list_empty((struct list_head  const  *)(& dev_priv->mm.request_list));
  tmp___14 = ldv__builtin_expect(tmp___13 == 0, 0L);
  if (tmp___14 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2304), "i" (24UL));
    ldv_24029: ;
    goto ldv_24029;
  } else {

  }
  return (0);
}
}
static int i915_gem_init_hws(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((((dev->pci_device != 10690 && dev->pci_device != 10674) && dev->pci_device != 10706) && dev->pci_device != 10818) && ((dev->pci_device != 11778 && dev->pci_device != 11794) && dev->pci_device != 11810)) {
    return (0);
  } else {

  }
  obj = drm_gem_object_alloc(dev, 4096UL);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    printk("<3>[drm:%s] *ERROR* Failed to allocate status page\n", "i915_gem_init_hws");
    return (-12);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  ret = i915_gem_object_pin(obj, 4096U);
  if (ret != 0) {
    drm_gem_object_unreference(obj);
    return (ret);
  } else {

  }
  dev_priv->status_gfx_addr = obj_priv->gtt_offset;
  dev_priv->hws_map.offset = (dev->agp)->base + (unsigned long )obj_priv->gtt_offset;
  dev_priv->hws_map.size = 4096UL;
  dev_priv->hws_map.type = _DRM_FRAME_BUFFER;
  dev_priv->hws_map.flags = 0;
  dev_priv->hws_map.mtrr = 0;
  drm_core_ioremap_wc(& dev_priv->hws_map, dev);
  if ((unsigned long )dev_priv->hws_map.handle == (unsigned long )((void *)0)) {
    printk("<3>[drm:%s] *ERROR* Failed to map status page.\n", "i915_gem_init_hws");
    memset((void *)(& dev_priv->hws_map), 0, 40UL);
    drm_gem_object_unreference(obj);
    return (-22);
  } else {

  }
  dev_priv->hws_obj = obj;
  dev_priv->hw_status_page = dev_priv->hws_map.handle;
  memset(dev_priv->hw_status_page, 0, 4096UL);
  writel(dev_priv->status_gfx_addr, (void volatile   *)dev_priv->regs + 8320U);
  if (drm_debug != 0U) {
    printk("<7>[drm:%s] hws offset: 0x%08x\n", "i915_gem_init_hws", dev_priv->status_gfx_addr);
  } else {

  }
  return (0);
}
}
static int i915_gem_init_ringbuffer(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;
  int ret ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  ret = i915_gem_init_hws(dev);
  if (ret != 0) {
    return (ret);
  } else {

  }
  obj = drm_gem_object_alloc(dev, 131072UL);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    printk("<3>[drm:%s] *ERROR* Failed to allocate ringbuffer\n", "i915_gem_init_ringbuffer");
    return (-12);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  ret = i915_gem_object_pin(obj, 4096U);
  if (ret != 0) {
    drm_gem_object_unreference(obj);
    return (ret);
  } else {

  }
  dev_priv->ring.Size = obj->size;
  dev_priv->ring.tail_mask = (int )((unsigned int )obj->size - 1U);
  dev_priv->ring.map.offset = (dev->agp)->base + (unsigned long )obj_priv->gtt_offset;
  dev_priv->ring.map.size = obj->size;
  dev_priv->ring.map.type = _DRM_FRAME_BUFFER;
  dev_priv->ring.map.flags = 0;
  dev_priv->ring.map.mtrr = 0;
  drm_core_ioremap_wc(& dev_priv->ring.map, dev);
  if ((unsigned long )dev_priv->ring.map.handle == (unsigned long )((void *)0)) {
    printk("<3>[drm:%s] *ERROR* Failed to map ringbuffer.\n", "i915_gem_init_ringbuffer");
    memset((void *)(& dev_priv->ring), 0, 88UL);
    drm_gem_object_unreference(obj);
    return (-22);
  } else {

  }
  dev_priv->ring.ring_obj = obj;
  dev_priv->ring.virtual_start = (u8 *)dev_priv->ring.map.handle;
  writel(0U, (void volatile   *)dev_priv->regs + 8252U);
  writel(0U, (void volatile   *)dev_priv->regs + 8244U);
  writel(0U, (void volatile   *)dev_priv->regs + 8240U);
  writel(0U, (void volatile   *)dev_priv->regs + 8248U);
  writel(obj_priv->gtt_offset, (void volatile   *)dev_priv->regs + 8248U);
  writel((((unsigned int )obj->size - 4096U) & 2093056U) | 1U, (void volatile   *)dev_priv->regs + 8252U);
  i915_kernel_lost_context(dev);
  return (0);
}
}
static void i915_gem_cleanup_ringbuffer(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if ((unsigned long )dev_priv->ring.ring_obj == (unsigned long )((struct drm_gem_object *)0)) {
    return;
  } else {

  }
  drm_core_ioremapfree(& dev_priv->ring.map, dev);
  i915_gem_object_unpin(dev_priv->ring.ring_obj);
  drm_gem_object_unreference(dev_priv->ring.ring_obj);
  dev_priv->ring.ring_obj = 0;
  memset((void *)(& dev_priv->ring), 0, 88UL);
  if ((unsigned long )dev_priv->hws_obj != (unsigned long )((struct drm_gem_object *)0)) {
    i915_gem_object_unpin(dev_priv->hws_obj);
    drm_gem_object_unreference(dev_priv->hws_obj);
    dev_priv->hws_obj = 0;
    memset((void *)(& dev_priv->hws_map), 0, 40UL);
    writel(536866816U, (void volatile   *)dev_priv->regs + 8320U);
  } else {

  }
  return;
}
}
int i915_gem_entervt_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  drm_i915_private_t *dev_priv ;
  int ret ;
  int tmp ;
  long tmp___0 ;
  int tmp___1 ;
  long tmp___2 ;
  int tmp___3 ;
  long tmp___4 ;
  int tmp___5 ;
  long tmp___6 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  if (dev_priv->mm.wedged != 0) {
    printk("<3>[drm:%s] *ERROR* Reenabling wedged hardware, good luck\n", "i915_gem_entervt_ioctl");
    dev_priv->mm.wedged = 0;
  } else {

  }
  ret = i915_gem_init_ringbuffer(dev);
  if (ret != 0) {
    return (ret);
  } else {

  }
  mutex_lock_nested(& dev->struct_mutex, 0U);
  tmp = list_empty((struct list_head  const  *)(& dev_priv->mm.active_list));
  tmp___0 = ldv__builtin_expect(tmp == 0, 0L);
  if (tmp___0 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2467), "i" (24UL));
    ldv_24058: ;
    goto ldv_24058;
  } else {

  }
  tmp___1 = list_empty((struct list_head  const  *)(& dev_priv->mm.flushing_list));
  tmp___2 = ldv__builtin_expect(tmp___1 == 0, 0L);
  if (tmp___2 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2468), "i" (24UL));
    ldv_24059: ;
    goto ldv_24059;
  } else {

  }
  tmp___3 = list_empty((struct list_head  const  *)(& dev_priv->mm.inactive_list));
  tmp___4 = ldv__builtin_expect(tmp___3 == 0, 0L);
  if (tmp___4 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2469), "i" (24UL));
    ldv_24060: ;
    goto ldv_24060;
  } else {

  }
  tmp___5 = list_empty((struct list_head  const  *)(& dev_priv->mm.request_list));
  tmp___6 = ldv__builtin_expect(tmp___5 == 0, 0L);
  if (tmp___6 != 0L) {
    __asm__  volatile   ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.quad 1b, %c0\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/gpu/drm/i915/i915.ko--X--defaultlinux--X--43_1a--X--cpachecker/linux/csd_deg_dscv/21/dscv_tempdir/dscv/ri/43_1a/drivers/gpu/drm/i915/i915_gem.c.prepared"),
                         "i" (2470), "i" (24UL));
    ldv_24061: ;
    goto ldv_24061;
  } else {

  }
  dev_priv->mm.suspended = 0;
  mutex_unlock(& dev->struct_mutex);
  drm_irq_install(dev);
  return (0);
}
}
int i915_gem_leavevt_ioctl(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  int ret ;

  {
  mutex_lock_nested(& dev->struct_mutex, 0U);
  ret = i915_gem_idle(dev);
  if (ret == 0) {
    i915_gem_cleanup_ringbuffer(dev);
  } else {

  }
  mutex_unlock(& dev->struct_mutex);
  drm_irq_uninstall(dev);
  return (0);
}
}
void i915_gem_lastclose(struct drm_device *dev ) 
{ 
  int ret ;
  drm_i915_private_t *dev_priv ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  if ((unsigned long )dev_priv->ring.ring_obj != (unsigned long )((struct drm_gem_object *)0)) {
    ret = i915_gem_idle(dev);
    if (ret != 0) {
      printk("<3>[drm:%s] *ERROR* failed to idle hardware: %d\n", "i915_gem_lastclose",
             ret);
    } else {

    }
    i915_gem_cleanup_ringbuffer(dev);
  } else {

  }
  mutex_unlock(& dev->struct_mutex);
  return;
}
}
void i915_gem_load(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  struct lock_class_key __key ;
  atomic_long_t __constr_expr_0 ;
  struct lock_class_key __key___0 ;
  atomic_long_t __constr_expr_1 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  INIT_LIST_HEAD(& dev_priv->mm.active_list);
  INIT_LIST_HEAD(& dev_priv->mm.flushing_list);
  INIT_LIST_HEAD(& dev_priv->mm.inactive_list);
  INIT_LIST_HEAD(& dev_priv->mm.request_list);
  __constr_expr_0.counter = 0L;
  dev_priv->mm.retire_work.work.data = __constr_expr_0;
  lockdep_init_map(& dev_priv->mm.retire_work.work.lockdep_map, "&(&dev_priv->mm.retire_work)->work",
                   & __key, 0);
  INIT_LIST_HEAD(& dev_priv->mm.retire_work.work.entry);
  dev_priv->mm.retire_work.work.func = & i915_gem_retire_work_handler;
  init_timer(& dev_priv->mm.retire_work.timer);
  __constr_expr_1.counter = 0L;
  dev_priv->mm.vblank_work.data = __constr_expr_1;
  lockdep_init_map(& dev_priv->mm.vblank_work.lockdep_map, "&dev_priv->mm.vblank_work",
                   & __key___0, 0);
  INIT_LIST_HEAD(& dev_priv->mm.vblank_work.entry);
  dev_priv->mm.vblank_work.func = & i915_gem_vblank_work_handler;
  dev_priv->mm.next_gem_seqno = 1U;
  i915_gem_detect_bit_6_swizzle(dev);
  return;
}
}
unsigned long ldv___get_free_pages_98(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_100(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_104(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_108(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
struct page *ldv_alloc_page_vma_124(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_114(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_116(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_120(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
unsigned long ldv___get_free_pages_114(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_116(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_120(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_124(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
extern int sprintf(char * , char const   *  , ...) ;
struct page *ldv_alloc_page_vma_140(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_130(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_132(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_136(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
extern struct proc_dir_entry *create_proc_entry(char const   * , mode_t  , struct proc_dir_entry * ) ;
extern void remove_proc_entry(char const   * , struct proc_dir_entry * ) ;
static int i915_gem_active_info(char *buf , char **start , off_t offset , int request ,
                                int *eof , void *data ) 
{ 
  struct drm_minor *minor ;
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_object *obj_priv ;
  int len ;
  int tmp ;
  struct list_head  const  *__mptr ;
  struct drm_gem_object *obj ;
  int tmp___0 ;
  int tmp___1 ;
  struct list_head  const  *__mptr___0 ;

  {
  minor = (struct drm_minor *)data;
  dev = minor->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  len = 0;
  if ((unsigned long )offset > 4016UL) {
    *eof = 1;
    return (0);
  } else {

  }
  *start = buf + (unsigned long )offset;
  *eof = 0;
  tmp = sprintf(buf + (unsigned long )len, "Active:\n");
  len = tmp + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  __mptr = (struct list_head  const  *)dev_priv->mm.active_list.next;
  obj_priv = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffff0UL;
  goto ldv_23261;
  ldv_23260: 
  obj = obj_priv->obj;
  if (obj->name != 0) {
    tmp___0 = sprintf(buf + (unsigned long )len, "    %p(%d): %08x %08x %d\n", obj,
                      obj->name, obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno);
    len = tmp___0 + len;
    if ((unsigned int )len > 4016U) {
      *eof = 1;
      return ((int )((unsigned int )len - (unsigned int )offset));
    } else {

    }
  } else {
    tmp___1 = sprintf(buf + (unsigned long )len, "       %p: %08x %08x %d\n", obj,
                      obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno);
    len = tmp___1 + len;
    if ((unsigned int )len > 4016U) {
      *eof = 1;
      return ((int )((unsigned int )len - (unsigned int )offset));
    } else {

    }
  }
  __mptr___0 = (struct list_head  const  *)obj_priv->list.next;
  obj_priv = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffff0UL;
  ldv_23261: 
  __builtin_prefetch((void const   *)obj_priv->list.next);
  if ((unsigned long )(& obj_priv->list) != (unsigned long )(& dev_priv->mm.active_list)) {
    goto ldv_23260;
  } else {

  }

  if ((off_t )len > (off_t )request + offset) {
    return (request);
  } else {

  }
  *eof = 1;
  return ((int )((unsigned int )len - (unsigned int )offset));
}
}
static int i915_gem_flushing_info(char *buf , char **start , off_t offset , int request ,
                                  int *eof , void *data ) 
{ 
  struct drm_minor *minor ;
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_object *obj_priv ;
  int len ;
  int tmp ;
  struct list_head  const  *__mptr ;
  struct drm_gem_object *obj ;
  int tmp___0 ;
  int tmp___1 ;
  struct list_head  const  *__mptr___0 ;

  {
  minor = (struct drm_minor *)data;
  dev = minor->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  len = 0;
  if ((unsigned long )offset > 4016UL) {
    *eof = 1;
    return (0);
  } else {

  }
  *start = buf + (unsigned long )offset;
  *eof = 0;
  tmp = sprintf(buf + (unsigned long )len, "Flushing:\n");
  len = tmp + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  __mptr = (struct list_head  const  *)dev_priv->mm.flushing_list.next;
  obj_priv = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffff0UL;
  goto ldv_23282;
  ldv_23281: 
  obj = obj_priv->obj;
  if (obj->name != 0) {
    tmp___0 = sprintf(buf + (unsigned long )len, "    %p(%d): %08x %08x %d\n", obj,
                      obj->name, obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno);
    len = tmp___0 + len;
    if ((unsigned int )len > 4016U) {
      *eof = 1;
      return ((int )((unsigned int )len - (unsigned int )offset));
    } else {

    }
  } else {
    tmp___1 = sprintf(buf + (unsigned long )len, "       %p: %08x %08x %d\n", obj,
                      obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno);
    len = tmp___1 + len;
    if ((unsigned int )len > 4016U) {
      *eof = 1;
      return ((int )((unsigned int )len - (unsigned int )offset));
    } else {

    }
  }
  __mptr___0 = (struct list_head  const  *)obj_priv->list.next;
  obj_priv = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffff0UL;
  ldv_23282: 
  __builtin_prefetch((void const   *)obj_priv->list.next);
  if ((unsigned long )(& obj_priv->list) != (unsigned long )(& dev_priv->mm.flushing_list)) {
    goto ldv_23281;
  } else {

  }

  if ((off_t )len > (off_t )request + offset) {
    return (request);
  } else {

  }
  *eof = 1;
  return ((int )((unsigned int )len - (unsigned int )offset));
}
}
static int i915_gem_inactive_info(char *buf , char **start , off_t offset , int request ,
                                  int *eof , void *data ) 
{ 
  struct drm_minor *minor ;
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_object *obj_priv ;
  int len ;
  int tmp ;
  struct list_head  const  *__mptr ;
  struct drm_gem_object *obj ;
  int tmp___0 ;
  int tmp___1 ;
  struct list_head  const  *__mptr___0 ;

  {
  minor = (struct drm_minor *)data;
  dev = minor->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  len = 0;
  if ((unsigned long )offset > 4016UL) {
    *eof = 1;
    return (0);
  } else {

  }
  *start = buf + (unsigned long )offset;
  *eof = 0;
  tmp = sprintf(buf + (unsigned long )len, "Inactive:\n");
  len = tmp + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  __mptr = (struct list_head  const  *)dev_priv->mm.inactive_list.next;
  obj_priv = (struct drm_i915_gem_object *)__mptr + 0xfffffffffffffff0UL;
  goto ldv_23303;
  ldv_23302: 
  obj = obj_priv->obj;
  if (obj->name != 0) {
    tmp___0 = sprintf(buf + (unsigned long )len, "    %p(%d): %08x %08x %d\n", obj,
                      obj->name, obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno);
    len = tmp___0 + len;
    if ((unsigned int )len > 4016U) {
      *eof = 1;
      return ((int )((unsigned int )len - (unsigned int )offset));
    } else {

    }
  } else {
    tmp___1 = sprintf(buf + (unsigned long )len, "       %p: %08x %08x %d\n", obj,
                      obj->read_domains, obj->write_domain, obj_priv->last_rendering_seqno);
    len = tmp___1 + len;
    if ((unsigned int )len > 4016U) {
      *eof = 1;
      return ((int )((unsigned int )len - (unsigned int )offset));
    } else {

    }
  }
  __mptr___0 = (struct list_head  const  *)obj_priv->list.next;
  obj_priv = (struct drm_i915_gem_object *)__mptr___0 + 0xfffffffffffffff0UL;
  ldv_23303: 
  __builtin_prefetch((void const   *)obj_priv->list.next);
  if ((unsigned long )(& obj_priv->list) != (unsigned long )(& dev_priv->mm.inactive_list)) {
    goto ldv_23302;
  } else {

  }

  if ((off_t )len > (off_t )request + offset) {
    return (request);
  } else {

  }
  *eof = 1;
  return ((int )((unsigned int )len - (unsigned int )offset));
}
}
static int i915_gem_request_info(char *buf , char **start , off_t offset , int request ,
                                 int *eof , void *data ) 
{ 
  struct drm_minor *minor ;
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  struct drm_i915_gem_request *gem_request ;
  int len ;
  int tmp ;
  struct list_head  const  *__mptr ;
  int tmp___0 ;
  struct list_head  const  *__mptr___0 ;

  {
  minor = (struct drm_minor *)data;
  dev = minor->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  len = 0;
  if ((unsigned long )offset > 4016UL) {
    *eof = 1;
    return (0);
  } else {

  }
  *start = buf + (unsigned long )offset;
  *eof = 0;
  tmp = sprintf(buf + (unsigned long )len, "Request:\n");
  len = tmp + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  __mptr = (struct list_head  const  *)dev_priv->mm.request_list.next;
  gem_request = (struct drm_i915_gem_request *)__mptr + 0xffffffffffffffe8UL;
  goto ldv_23323;
  ldv_23322: 
  tmp___0 = sprintf(buf + (unsigned long )len, "    %d @ %d %08x\n", gem_request->seqno,
                    (int )((unsigned int )jiffies - (unsigned int )gem_request->emitted_jiffies),
                    gem_request->flush_domains);
  len = tmp___0 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  __mptr___0 = (struct list_head  const  *)gem_request->list.next;
  gem_request = (struct drm_i915_gem_request *)__mptr___0 + 0xffffffffffffffe8UL;
  ldv_23323: 
  __builtin_prefetch((void const   *)gem_request->list.next);
  if ((unsigned long )(& gem_request->list) != (unsigned long )(& dev_priv->mm.request_list)) {
    goto ldv_23322;
  } else {

  }

  if ((off_t )len > (off_t )request + offset) {
    return (request);
  } else {

  }
  *eof = 1;
  return ((int )((unsigned int )len - (unsigned int )offset));
}
}
static int i915_gem_seqno_info(char *buf , char **start , off_t offset , int request ,
                               int *eof , void *data ) 
{ 
  struct drm_minor *minor ;
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  int len ;
  uint32_t tmp ;
  int tmp___0 ;
  int tmp___1 ;
  int tmp___2 ;

  {
  minor = (struct drm_minor *)data;
  dev = minor->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  len = 0;
  if ((unsigned long )offset > 4016UL) {
    *eof = 1;
    return (0);
  } else {

  }
  *start = buf + (unsigned long )offset;
  *eof = 0;
  tmp = i915_get_gem_seqno(dev);
  tmp___0 = sprintf(buf + (unsigned long )len, "Current sequence: %d\n", tmp);
  len = tmp___0 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___1 = sprintf(buf + (unsigned long )len, "Waiter sequence:  %d\n", dev_priv->mm.waiting_gem_seqno);
  len = tmp___1 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___2 = sprintf(buf + (unsigned long )len, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
  len = tmp___2 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  if ((off_t )len > (off_t )request + offset) {
    return (request);
  } else {

  }
  *eof = 1;
  return ((int )((unsigned int )len - (unsigned int )offset));
}
}
static int i915_interrupt_info(char *buf , char **start , off_t offset , int request ,
                               int *eof , void *data ) 
{ 
  struct drm_minor *minor ;
  struct drm_device *dev ;
  drm_i915_private_t *dev_priv ;
  int len ;
  unsigned int tmp ;
  int tmp___0 ;
  unsigned int tmp___1 ;
  int tmp___2 ;
  unsigned int tmp___3 ;
  int tmp___4 ;
  unsigned int tmp___5 ;
  int tmp___6 ;
  unsigned int tmp___7 ;
  int tmp___8 ;
  int tmp___9 ;
  uint32_t tmp___10 ;
  int tmp___11 ;
  int tmp___12 ;
  int tmp___13 ;

  {
  minor = (struct drm_minor *)data;
  dev = minor->dev;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  len = 0;
  if ((unsigned long )offset > 4016UL) {
    *eof = 1;
    return (0);
  } else {

  }
  *start = buf + (unsigned long )offset;
  *eof = 0;
  tmp = readl((void const volatile   *)dev_priv->regs + 8352U);
  tmp___0 = sprintf(buf + (unsigned long )len, "Interrupt enable:    %08x\n", tmp);
  len = tmp___0 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___1 = readl((void const volatile   *)dev_priv->regs + 8356U);
  tmp___2 = sprintf(buf + (unsigned long )len, "Interrupt identity:  %08x\n", tmp___1);
  len = tmp___2 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___3 = readl((void const volatile   *)dev_priv->regs + 8360U);
  tmp___4 = sprintf(buf + (unsigned long )len, "Interrupt mask:      %08x\n", tmp___3);
  len = tmp___4 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___5 = readl((void const volatile   *)dev_priv->regs + 458788U);
  tmp___6 = sprintf(buf + (unsigned long )len, "Pipe A stat:         %08x\n", tmp___5);
  len = tmp___6 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___7 = readl((void const volatile   *)dev_priv->regs + 462884U);
  tmp___8 = sprintf(buf + (unsigned long )len, "Pipe B stat:         %08x\n", tmp___7);
  len = tmp___8 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___9 = sprintf(buf + (unsigned long )len, "Interrupts received: %d\n", dev_priv->irq_received.counter);
  len = tmp___9 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___10 = i915_get_gem_seqno(dev);
  tmp___11 = sprintf(buf + (unsigned long )len, "Current sequence:    %d\n", tmp___10);
  len = tmp___11 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___12 = sprintf(buf + (unsigned long )len, "Waiter sequence:     %d\n", dev_priv->mm.waiting_gem_seqno);
  len = tmp___12 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  tmp___13 = sprintf(buf + (unsigned long )len, "IRQ sequence:        %d\n", dev_priv->mm.irq_gem_seqno);
  len = tmp___13 + len;
  if ((unsigned int )len > 4016U) {
    *eof = 1;
    return ((int )((unsigned int )len - (unsigned int )offset));
  } else {

  }
  if ((off_t )len > (off_t )request + offset) {
    return (request);
  } else {

  }
  *eof = 1;
  return ((int )((unsigned int )len - (unsigned int )offset));
}
}
static struct drm_proc_list i915_gem_proc_list[6U]  = {      {"i915_gem_active", & i915_gem_active_info}, 
        {"i915_gem_flushing", & i915_gem_flushing_info}, 
        {"i915_gem_inactive", & i915_gem_inactive_info}, 
        {"i915_gem_request", & i915_gem_request_info}, 
        {"i915_gem_seqno", & i915_gem_seqno_info}, 
        {"i915_gem_interrupt", & i915_interrupt_info}};
int i915_gem_proc_init(struct drm_minor *minor ) 
{ 
  struct proc_dir_entry *ent ;
  int i ;
  int j ;

  {
  i = 0;
  goto ldv_23370;
  ldv_23369: 
  ent = create_proc_entry(i915_gem_proc_list[i].name, 33060U, minor->dev_root);
  if ((unsigned long )ent == (unsigned long )((struct proc_dir_entry *)0)) {
    printk("<3>[drm:%s] *ERROR* Cannot create /proc/dri/.../%s\n", "i915_gem_proc_init",
           i915_gem_proc_list[i].name);
    j = 0;
    goto ldv_23367;
    ldv_23366: 
    remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
    j = j + 1;
    ldv_23367: ;
    if (j < i) {
      goto ldv_23366;
    } else {

    }

    return (-1);
  } else {

  }
  ent->read_proc = i915_gem_proc_list[i].f;
  ent->data = (void *)minor;
  i = i + 1;
  ldv_23370: ;
  if ((unsigned int )i <= 5U) {
    goto ldv_23369;
  } else {

  }

  return (0);
}
}
void i915_gem_proc_cleanup(struct drm_minor *minor ) 
{ 
  int i ;

  {
  if ((unsigned long )minor->dev_root == (unsigned long )((struct proc_dir_entry *)0)) {
    return;
  } else {

  }
  i = 0;
  goto ldv_23377;
  ldv_23376: 
  remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
  i = i + 1;
  ldv_23377: ;
  if ((unsigned int )i <= 5U) {
    goto ldv_23376;
  } else {

  }

  return;
}
}
unsigned long ldv___get_free_pages_130(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_132(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_136(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_140(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
struct page *ldv_alloc_page_vma_156(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_146(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_148(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_152(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
__inline static unsigned short readw(void const volatile   *addr ) 
{ 
  unsigned short ret ;

  {
  __asm__  volatile   ("movw %1,%0": "=r" (ret): "m" (*((unsigned short volatile   *)addr)): "memory");
  return (ret);
}
}
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev ) 
{ 
  drm_i915_private_t *dev_priv ;
  uint32_t swizzle_x ;
  uint32_t swizzle_y ;
  uint32_t dcc ;
  unsigned short tmp ;
  unsigned short tmp___0 ;

  {
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  swizzle_x = 5U;
  swizzle_y = 5U;
  if ((((((dev->pci_device != 9602 && dev->pci_device != 9610) && dev->pci_device != 9618) && dev->pci_device != 10098) && (dev->pci_device != 10146 && dev->pci_device != 10158)) && (((((((((dev->pci_device != 10610 && dev->pci_device != 10626) && dev->pci_device != 10642) && dev->pci_device != 10658) && dev->pci_device != 10754) && dev->pci_device != 10770) && dev->pci_device != 10818) && dev->pci_device != 11778) && dev->pci_device != 11794) && dev->pci_device != 11810)) && ((dev->pci_device != 10690 && dev->pci_device != 10674) && dev->pci_device != 10706)) {
    swizzle_x = 0U;
    swizzle_y = 0U;
  } else
  if (((((((((((dev->pci_device != 10610 && dev->pci_device != 10626) && dev->pci_device != 10642) && dev->pci_device != 10658) && dev->pci_device != 10754) && dev->pci_device != 10770) && dev->pci_device != 10818) && dev->pci_device != 11778) && dev->pci_device != 11794) && dev->pci_device != 11810) && ((dev->pci_device != 10690 && dev->pci_device != 10674) && dev->pci_device != 10706)) || dev->pci_device == 10754) {
    dcc = readl((void const volatile   *)dev_priv->regs + 66048U);
    switch (dcc & 3U) {
    case 0U: ;
    case 1U: 
    swizzle_x = 0U;
    swizzle_y = 0U;
    goto ldv_23251;
    case 2U: ;
    if (((dev->pci_device == 9602 || dev->pci_device == 9610) || dev->pci_device == 9618) || (dcc & 1024U) != 0U) {
      swizzle_x = 2U;
      swizzle_y = 1U;
    } else
    if (dev->pci_device == 10754) {
      swizzle_x = 4U;
      swizzle_y = 3U;
    } else {
      swizzle_x = 5U;
      swizzle_y = 5U;
    }
    goto ldv_23251;
    }
    ldv_23251: ;
    if (dcc == 4294967295U) {
      printk("<3>[drm:%s] *ERROR* Couldn\'t read from MCHBAR.  Disabling tiling.\n",
             "i915_gem_detect_bit_6_swizzle");
      swizzle_x = 5U;
      swizzle_y = 5U;
    } else {

    }
  } else {
    tmp = readw((void const volatile   *)dev_priv->regs + 66054U);
    tmp___0 = readw((void const volatile   *)dev_priv->regs + 67078U);
    if ((int )tmp != (int )tmp___0) {
      swizzle_x = 0U;
      swizzle_y = 0U;
    } else {
      swizzle_x = 2U;
      swizzle_y = 1U;
    }
  }
  dev_priv->mm.bit_6_swizzle_x = swizzle_x;
  dev_priv->mm.bit_6_swizzle_y = swizzle_y;
  return;
}
}
int i915_gem_set_tiling(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_set_tiling *args ;
  drm_i915_private_t *dev_priv ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;

  {
  args = (struct drm_i915_gem_set_tiling *)data;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return (-22);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  if (args->tiling_mode == 0U) {
    obj_priv->tiling_mode = 0U;
    args->swizzle_mode = 0U;
  } else {
    if (args->tiling_mode == 1U) {
      args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
    } else {
      args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
    }
    if (args->swizzle_mode == 5U) {
      args->tiling_mode = 0U;
      args->swizzle_mode = 0U;
    } else {

    }
  }
  obj_priv->tiling_mode = args->tiling_mode;
  mutex_unlock(& dev->struct_mutex);
  drm_gem_object_unreference(obj);
  return (0);
}
}
int i915_gem_get_tiling(struct drm_device *dev , void *data , struct drm_file *file_priv ) 
{ 
  struct drm_i915_gem_get_tiling *args ;
  drm_i915_private_t *dev_priv ;
  struct drm_gem_object *obj ;
  struct drm_i915_gem_object *obj_priv ;

  {
  args = (struct drm_i915_gem_get_tiling *)data;
  dev_priv = (drm_i915_private_t *)dev->dev_private;
  obj = drm_gem_object_lookup(dev, file_priv, (int )args->handle);
  if ((unsigned long )obj == (unsigned long )((struct drm_gem_object *)0)) {
    return (-22);
  } else {

  }
  obj_priv = (struct drm_i915_gem_object *)obj->driver_private;
  mutex_lock_nested(& dev->struct_mutex, 0U);
  args->tiling_mode = obj_priv->tiling_mode;
  switch (obj_priv->tiling_mode) {
  case (uint32_t )1: 
  args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
  goto ldv_23273;
  case (uint32_t )2: 
  args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
  goto ldv_23273;
  case (uint32_t )0: 
  args->swizzle_mode = 0U;
  goto ldv_23273;
  default: 
  printk("<3>[drm:%s] *ERROR* unknown tiling mode\n", "i915_gem_get_tiling");
  }
  ldv_23273: 
  mutex_unlock(& dev->struct_mutex);
  drm_gem_object_unreference(obj);
  return (0);
}
}
unsigned long ldv___get_free_pages_146(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_148(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_152(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_156(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
struct page *ldv_alloc_page_vma_172(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) ;
unsigned long ldv___get_free_pages_162(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_164(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
void *ldv_kmem_cache_alloc_168(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ;
__inline static void *compat_alloc_user_space(long len ) 
{ 
  struct pt_regs *regs ;
  struct task_struct *tmp ;

  {
  tmp = get_current();
  regs = (struct pt_regs *)(tmp->thread.sp0 + 0xffffffffffffffffUL);
  return ((void *)(regs->sp - (unsigned long )len));
}
}
extern void lock_kernel(void) ;
extern void unlock_kernel(void) ;
extern void __put_user_bad(void) ;
extern long drm_compat_ioctl(struct file * , unsigned int  , unsigned long  ) ;
static int compat_i915_batchbuffer(struct file *file , unsigned int cmd , unsigned long arg ) 
{ 
  drm_i915_batchbuffer32_t batchbuffer32 ;
  drm_i915_batchbuffer_t *batchbuffer ;
  unsigned long tmp ;
  void *tmp___0 ;
  unsigned long flag ;
  unsigned long roksum ;
  struct thread_info *tmp___1 ;
  long tmp___2 ;
  long __pu_err ;
  long __pu_err___0 ;
  long __pu_err___1 ;
  long __pu_err___2 ;
  long __pu_err___3 ;
  long __pu_err___4 ;
  int tmp___3 ;

  {
  tmp = copy_from_user((void *)(& batchbuffer32), (void const   *)arg, 24U);
  if (tmp != 0UL) {
    return (-14);
  } else {

  }
  tmp___0 = compat_alloc_user_space(32L);
  batchbuffer = (drm_i915_batchbuffer_t *)tmp___0;
  tmp___1 = current_thread_info();
  __asm__  ("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0": "=&r" (flag), "=r" (roksum): "1" (batchbuffer),
            "g" (32L), "rm" (tmp___1->addr_limit.seg));
  tmp___2 = ldv__builtin_expect(flag == 0UL, 1L);
  if (tmp___2 == 0L) {
    return (-14);
  } else {
    __pu_err = 0L;
    switch (4UL) {
    case 1UL: 
    __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "iq" (batchbuffer32.start),
                         "m" (*((struct __large_struct *)(& batchbuffer->start))),
                         "i" (-14), "0" (__pu_err));
    goto ldv_22834;
    case 2UL: 
    __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" (batchbuffer32.start),
                         "m" (*((struct __large_struct *)(& batchbuffer->start))),
                         "i" (-14), "0" (__pu_err));
    goto ldv_22834;
    case 4UL: 
    __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" (batchbuffer32.start),
                         "m" (*((struct __large_struct *)(& batchbuffer->start))),
                         "i" (-14), "0" (__pu_err));
    goto ldv_22834;
    case 8UL: 
    __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "Zr" (batchbuffer32.start),
                         "m" (*((struct __large_struct *)(& batchbuffer->start))),
                         "i" (-14), "0" (__pu_err));
    goto ldv_22834;
    default: 
    __put_user_bad();
    }
    ldv_22834: ;
    if (__pu_err != 0L) {
      return (-14);
    } else {
      __pu_err___0 = 0L;
      switch (4UL) {
      case 1UL: 
      __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "iq" (batchbuffer32.used),
                           "m" (*((struct __large_struct *)(& batchbuffer->used))),
                           "i" (-14), "0" (__pu_err___0));
      goto ldv_22842;
      case 2UL: 
      __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "ir" (batchbuffer32.used),
                           "m" (*((struct __large_struct *)(& batchbuffer->used))),
                           "i" (-14), "0" (__pu_err___0));
      goto ldv_22842;
      case 4UL: 
      __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "ir" (batchbuffer32.used),
                           "m" (*((struct __large_struct *)(& batchbuffer->used))),
                           "i" (-14), "0" (__pu_err___0));
      goto ldv_22842;
      case 8UL: 
      __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "Zr" (batchbuffer32.used),
                           "m" (*((struct __large_struct *)(& batchbuffer->used))),
                           "i" (-14), "0" (__pu_err___0));
      goto ldv_22842;
      default: 
      __put_user_bad();
      }
      ldv_22842: ;
      if (__pu_err___0 != 0L) {
        return (-14);
      } else {
        __pu_err___1 = 0L;
        switch (4UL) {
        case 1UL: 
        __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "iq" (batchbuffer32.DR1),
                             "m" (*((struct __large_struct *)(& batchbuffer->DR1))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_22850;
        case 2UL: 
        __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "ir" (batchbuffer32.DR1),
                             "m" (*((struct __large_struct *)(& batchbuffer->DR1))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_22850;
        case 4UL: 
        __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "ir" (batchbuffer32.DR1),
                             "m" (*((struct __large_struct *)(& batchbuffer->DR1))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_22850;
        case 8UL: 
        __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "Zr" (batchbuffer32.DR1),
                             "m" (*((struct __large_struct *)(& batchbuffer->DR1))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_22850;
        default: 
        __put_user_bad();
        }
        ldv_22850: ;
        if (__pu_err___1 != 0L) {
          return (-14);
        } else {
          __pu_err___2 = 0L;
          switch (4UL) {
          case 1UL: 
          __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "iq" (batchbuffer32.DR4),
                               "m" (*((struct __large_struct *)(& batchbuffer->DR4))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_22858;
          case 2UL: 
          __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "ir" (batchbuffer32.DR4),
                               "m" (*((struct __large_struct *)(& batchbuffer->DR4))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_22858;
          case 4UL: 
          __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "ir" (batchbuffer32.DR4),
                               "m" (*((struct __large_struct *)(& batchbuffer->DR4))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_22858;
          case 8UL: 
          __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "Zr" (batchbuffer32.DR4),
                               "m" (*((struct __large_struct *)(& batchbuffer->DR4))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_22858;
          default: 
          __put_user_bad();
          }
          ldv_22858: ;
          if (__pu_err___2 != 0L) {
            return (-14);
          } else {
            __pu_err___3 = 0L;
            switch (4UL) {
            case 1UL: 
            __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___3): "iq" (batchbuffer32.num_cliprects),
                                 "m" (*((struct __large_struct *)(& batchbuffer->num_cliprects))),
                                 "i" (-14), "0" (__pu_err___3));
            goto ldv_22866;
            case 2UL: 
            __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___3): "ir" (batchbuffer32.num_cliprects),
                                 "m" (*((struct __large_struct *)(& batchbuffer->num_cliprects))),
                                 "i" (-14), "0" (__pu_err___3));
            goto ldv_22866;
            case 4UL: 
            __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___3): "ir" (batchbuffer32.num_cliprects),
                                 "m" (*((struct __large_struct *)(& batchbuffer->num_cliprects))),
                                 "i" (-14), "0" (__pu_err___3));
            goto ldv_22866;
            case 8UL: 
            __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___3): "Zr" (batchbuffer32.num_cliprects),
                                 "m" (*((struct __large_struct *)(& batchbuffer->num_cliprects))),
                                 "i" (-14), "0" (__pu_err___3));
            goto ldv_22866;
            default: 
            __put_user_bad();
            }
            ldv_22866: ;
            if (__pu_err___3 != 0L) {
              return (-14);
            } else {
              __pu_err___4 = 0L;
              switch (8UL) {
              case 1UL: 
              __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___4): "iq" ((struct drm_clip_rect *)((unsigned long )batchbuffer32.cliprects)),
                                   "m" (*((struct __large_struct *)(& batchbuffer->cliprects))),
                                   "i" (-14), "0" (__pu_err___4));
              goto ldv_22874;
              case 2UL: 
              __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___4): "ir" ((struct drm_clip_rect *)((unsigned long )batchbuffer32.cliprects)),
                                   "m" (*((struct __large_struct *)(& batchbuffer->cliprects))),
                                   "i" (-14), "0" (__pu_err___4));
              goto ldv_22874;
              case 4UL: 
              __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___4): "ir" ((struct drm_clip_rect *)((unsigned long )batchbuffer32.cliprects)),
                                   "m" (*((struct __large_struct *)(& batchbuffer->cliprects))),
                                   "i" (-14), "0" (__pu_err___4));
              goto ldv_22874;
              case 8UL: 
              __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___4): "Zr" ((struct drm_clip_rect *)((unsigned long )batchbuffer32.cliprects)),
                                   "m" (*((struct __large_struct *)(& batchbuffer->cliprects))),
                                   "i" (-14), "0" (__pu_err___4));
              goto ldv_22874;
              default: 
              __put_user_bad();
              }
              ldv_22874: ;
              if (__pu_err___4 != 0L) {
                return (-14);
              } else {

              }
            }
          }
        }
      }
    }
  }
  tmp___3 = drm_ioctl((file->f_path.dentry)->d_inode, file, 1075864643U, (unsigned long )batchbuffer);
  return (tmp___3);
}
}
static int compat_i915_cmdbuffer(struct file *file , unsigned int cmd , unsigned long arg ) 
{ 
  drm_i915_cmdbuffer32_t cmdbuffer32 ;
  drm_i915_cmdbuffer_t *cmdbuffer ;
  unsigned long tmp ;
  void *tmp___0 ;
  unsigned long flag ;
  unsigned long roksum ;
  struct thread_info *tmp___1 ;
  long tmp___2 ;
  long __pu_err ;
  long __pu_err___0 ;
  long __pu_err___1 ;
  long __pu_err___2 ;
  long __pu_err___3 ;
  long __pu_err___4 ;
  int tmp___3 ;

  {
  tmp = copy_from_user((void *)(& cmdbuffer32), (void const   *)arg, 24U);
  if (tmp != 0UL) {
    return (-14);
  } else {

  }
  tmp___0 = compat_alloc_user_space(32L);
  cmdbuffer = (drm_i915_cmdbuffer_t *)tmp___0;
  tmp___1 = current_thread_info();
  __asm__  ("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0": "=&r" (flag), "=r" (roksum): "1" (cmdbuffer),
            "g" (32L), "rm" (tmp___1->addr_limit.seg));
  tmp___2 = ldv__builtin_expect(flag == 0UL, 1L);
  if (tmp___2 == 0L) {
    return (-14);
  } else {
    __pu_err = 0L;
    switch (8UL) {
    case 1UL: 
    __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "iq" ((char *)((unsigned long )cmdbuffer32.buf)),
                         "m" (*((struct __large_struct *)(& cmdbuffer->buf))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22900;
    case 2UL: 
    __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" ((char *)((unsigned long )cmdbuffer32.buf)),
                         "m" (*((struct __large_struct *)(& cmdbuffer->buf))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22900;
    case 4UL: 
    __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" ((char *)((unsigned long )cmdbuffer32.buf)),
                         "m" (*((struct __large_struct *)(& cmdbuffer->buf))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22900;
    case 8UL: 
    __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "Zr" ((char *)((unsigned long )cmdbuffer32.buf)),
                         "m" (*((struct __large_struct *)(& cmdbuffer->buf))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22900;
    default: 
    __put_user_bad();
    }
    ldv_22900: ;
    if (__pu_err != 0L) {
      return (-14);
    } else {
      __pu_err___0 = 0L;
      switch (4UL) {
      case 1UL: 
      __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "iq" (cmdbuffer32.sz),
                           "m" (*((struct __large_struct *)(& cmdbuffer->sz))), "i" (-14),
                           "0" (__pu_err___0));
      goto ldv_22908;
      case 2UL: 
      __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "ir" (cmdbuffer32.sz),
                           "m" (*((struct __large_struct *)(& cmdbuffer->sz))), "i" (-14),
                           "0" (__pu_err___0));
      goto ldv_22908;
      case 4UL: 
      __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "ir" (cmdbuffer32.sz),
                           "m" (*((struct __large_struct *)(& cmdbuffer->sz))), "i" (-14),
                           "0" (__pu_err___0));
      goto ldv_22908;
      case 8UL: 
      __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "Zr" (cmdbuffer32.sz),
                           "m" (*((struct __large_struct *)(& cmdbuffer->sz))), "i" (-14),
                           "0" (__pu_err___0));
      goto ldv_22908;
      default: 
      __put_user_bad();
      }
      ldv_22908: ;
      if (__pu_err___0 != 0L) {
        return (-14);
      } else {
        __pu_err___1 = 0L;
        switch (4UL) {
        case 1UL: 
        __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "iq" (cmdbuffer32.DR1),
                             "m" (*((struct __large_struct *)(& cmdbuffer->DR1))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_22916;
        case 2UL: 
        __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "ir" (cmdbuffer32.DR1),
                             "m" (*((struct __large_struct *)(& cmdbuffer->DR1))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_22916;
        case 4UL: 
        __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "ir" (cmdbuffer32.DR1),
                             "m" (*((struct __large_struct *)(& cmdbuffer->DR1))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_22916;
        case 8UL: 
        __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "Zr" (cmdbuffer32.DR1),
                             "m" (*((struct __large_struct *)(& cmdbuffer->DR1))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_22916;
        default: 
        __put_user_bad();
        }
        ldv_22916: ;
        if (__pu_err___1 != 0L) {
          return (-14);
        } else {
          __pu_err___2 = 0L;
          switch (4UL) {
          case 1UL: 
          __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "iq" (cmdbuffer32.DR4),
                               "m" (*((struct __large_struct *)(& cmdbuffer->DR4))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_22924;
          case 2UL: 
          __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "ir" (cmdbuffer32.DR4),
                               "m" (*((struct __large_struct *)(& cmdbuffer->DR4))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_22924;
          case 4UL: 
          __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "ir" (cmdbuffer32.DR4),
                               "m" (*((struct __large_struct *)(& cmdbuffer->DR4))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_22924;
          case 8UL: 
          __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "Zr" (cmdbuffer32.DR4),
                               "m" (*((struct __large_struct *)(& cmdbuffer->DR4))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_22924;
          default: 
          __put_user_bad();
          }
          ldv_22924: ;
          if (__pu_err___2 != 0L) {
            return (-14);
          } else {
            __pu_err___3 = 0L;
            switch (4UL) {
            case 1UL: 
            __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___3): "iq" (cmdbuffer32.num_cliprects),
                                 "m" (*((struct __large_struct *)(& cmdbuffer->num_cliprects))),
                                 "i" (-14), "0" (__pu_err___3));
            goto ldv_22932;
            case 2UL: 
            __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___3): "ir" (cmdbuffer32.num_cliprects),
                                 "m" (*((struct __large_struct *)(& cmdbuffer->num_cliprects))),
                                 "i" (-14), "0" (__pu_err___3));
            goto ldv_22932;
            case 4UL: 
            __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___3): "ir" (cmdbuffer32.num_cliprects),
                                 "m" (*((struct __large_struct *)(& cmdbuffer->num_cliprects))),
                                 "i" (-14), "0" (__pu_err___3));
            goto ldv_22932;
            case 8UL: 
            __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___3): "Zr" (cmdbuffer32.num_cliprects),
                                 "m" (*((struct __large_struct *)(& cmdbuffer->num_cliprects))),
                                 "i" (-14), "0" (__pu_err___3));
            goto ldv_22932;
            default: 
            __put_user_bad();
            }
            ldv_22932: ;
            if (__pu_err___3 != 0L) {
              return (-14);
            } else {
              __pu_err___4 = 0L;
              switch (8UL) {
              case 1UL: 
              __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___4): "iq" ((struct drm_clip_rect *)((unsigned long )cmdbuffer32.cliprects)),
                                   "m" (*((struct __large_struct *)(& cmdbuffer->cliprects))),
                                   "i" (-14), "0" (__pu_err___4));
              goto ldv_22940;
              case 2UL: 
              __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___4): "ir" ((struct drm_clip_rect *)((unsigned long )cmdbuffer32.cliprects)),
                                   "m" (*((struct __large_struct *)(& cmdbuffer->cliprects))),
                                   "i" (-14), "0" (__pu_err___4));
              goto ldv_22940;
              case 4UL: 
              __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___4): "ir" ((struct drm_clip_rect *)((unsigned long )cmdbuffer32.cliprects)),
                                   "m" (*((struct __large_struct *)(& cmdbuffer->cliprects))),
                                   "i" (-14), "0" (__pu_err___4));
              goto ldv_22940;
              case 8UL: 
              __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___4): "Zr" ((struct drm_clip_rect *)((unsigned long )cmdbuffer32.cliprects)),
                                   "m" (*((struct __large_struct *)(& cmdbuffer->cliprects))),
                                   "i" (-14), "0" (__pu_err___4));
              goto ldv_22940;
              default: 
              __put_user_bad();
              }
              ldv_22940: ;
              if (__pu_err___4 != 0L) {
                return (-14);
              } else {

              }
            }
          }
        }
      }
    }
  }
  tmp___3 = drm_ioctl((file->f_path.dentry)->d_inode, file, 1075864651U, (unsigned long )cmdbuffer);
  return (tmp___3);
}
}
static int compat_i915_irq_emit(struct file *file , unsigned int cmd , unsigned long arg ) 
{ 
  drm_i915_irq_emit32_t req32 ;
  drm_i915_irq_emit_t *request ;
  unsigned long tmp ;
  void *tmp___0 ;
  unsigned long flag ;
  unsigned long roksum ;
  struct thread_info *tmp___1 ;
  long tmp___2 ;
  long __pu_err ;
  int tmp___3 ;

  {
  tmp = copy_from_user((void *)(& req32), (void const   *)arg, 4U);
  if (tmp != 0UL) {
    return (-14);
  } else {

  }
  tmp___0 = compat_alloc_user_space(8L);
  request = (drm_i915_irq_emit_t *)tmp___0;
  tmp___1 = current_thread_info();
  __asm__  ("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0": "=&r" (flag), "=r" (roksum): "1" (request),
            "g" (8L), "rm" (tmp___1->addr_limit.seg));
  tmp___2 = ldv__builtin_expect(flag == 0UL, 1L);
  if (tmp___2 == 0L) {
    return (-14);
  } else {
    __pu_err = 0L;
    switch (8UL) {
    case 1UL: 
    __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "iq" ((int *)((unsigned long )req32.irq_seq)),
                         "m" (*((struct __large_struct *)(& request->irq_seq))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22961;
    case 2UL: 
    __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" ((int *)((unsigned long )req32.irq_seq)),
                         "m" (*((struct __large_struct *)(& request->irq_seq))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22961;
    case 4UL: 
    __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" ((int *)((unsigned long )req32.irq_seq)),
                         "m" (*((struct __large_struct *)(& request->irq_seq))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22961;
    case 8UL: 
    __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "Zr" ((int *)((unsigned long )req32.irq_seq)),
                         "m" (*((struct __large_struct *)(& request->irq_seq))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22961;
    default: 
    __put_user_bad();
    }
    ldv_22961: ;
    if (__pu_err != 0L) {
      return (-14);
    } else {

    }
  }
  tmp___3 = drm_ioctl((file->f_path.dentry)->d_inode, file, 3221775428U, (unsigned long )request);
  return (tmp___3);
}
}
static int compat_i915_getparam(struct file *file , unsigned int cmd , unsigned long arg ) 
{ 
  drm_i915_getparam32_t req32 ;
  drm_i915_getparam_t *request ;
  unsigned long tmp ;
  void *tmp___0 ;
  unsigned long flag ;
  unsigned long roksum ;
  struct thread_info *tmp___1 ;
  long tmp___2 ;
  long __pu_err ;
  long __pu_err___0 ;
  int tmp___3 ;

  {
  tmp = copy_from_user((void *)(& req32), (void const   *)arg, 8U);
  if (tmp != 0UL) {
    return (-14);
  } else {

  }
  tmp___0 = compat_alloc_user_space(16L);
  request = (drm_i915_getparam_t *)tmp___0;
  tmp___1 = current_thread_info();
  __asm__  ("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0": "=&r" (flag), "=r" (roksum): "1" (request),
            "g" (16L), "rm" (tmp___1->addr_limit.seg));
  tmp___2 = ldv__builtin_expect(flag == 0UL, 1L);
  if (tmp___2 == 0L) {
    return (-14);
  } else {
    __pu_err = 0L;
    switch (4UL) {
    case 1UL: 
    __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "iq" (req32.param),
                         "m" (*((struct __large_struct *)(& request->param))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22983;
    case 2UL: 
    __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" (req32.param),
                         "m" (*((struct __large_struct *)(& request->param))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22983;
    case 4UL: 
    __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" (req32.param),
                         "m" (*((struct __large_struct *)(& request->param))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22983;
    case 8UL: 
    __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "Zr" (req32.param),
                         "m" (*((struct __large_struct *)(& request->param))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_22983;
    default: 
    __put_user_bad();
    }
    ldv_22983: ;
    if (__pu_err != 0L) {
      return (-14);
    } else {
      __pu_err___0 = 0L;
      switch (8UL) {
      case 1UL: 
      __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "iq" ((int *)((unsigned long )req32.value)),
                           "m" (*((struct __large_struct *)(& request->value))), "i" (-14),
                           "0" (__pu_err___0));
      goto ldv_22991;
      case 2UL: 
      __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "ir" ((int *)((unsigned long )req32.value)),
                           "m" (*((struct __large_struct *)(& request->value))), "i" (-14),
                           "0" (__pu_err___0));
      goto ldv_22991;
      case 4UL: 
      __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "ir" ((int *)((unsigned long )req32.value)),
                           "m" (*((struct __large_struct *)(& request->value))), "i" (-14),
                           "0" (__pu_err___0));
      goto ldv_22991;
      case 8UL: 
      __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "Zr" ((int *)((unsigned long )req32.value)),
                           "m" (*((struct __large_struct *)(& request->value))), "i" (-14),
                           "0" (__pu_err___0));
      goto ldv_22991;
      default: 
      __put_user_bad();
      }
      ldv_22991: ;
      if (__pu_err___0 != 0L) {
        return (-14);
      } else {

      }
    }
  }
  tmp___3 = drm_ioctl((file->f_path.dentry)->d_inode, file, 3222299718U, (unsigned long )request);
  return (tmp___3);
}
}
static int compat_i915_alloc(struct file *file , unsigned int cmd , unsigned long arg ) 
{ 
  drm_i915_mem_alloc32_t req32 ;
  drm_i915_mem_alloc_t *request ;
  unsigned long tmp ;
  void *tmp___0 ;
  unsigned long flag ;
  unsigned long roksum ;
  struct thread_info *tmp___1 ;
  long tmp___2 ;
  long __pu_err ;
  long __pu_err___0 ;
  long __pu_err___1 ;
  long __pu_err___2 ;
  int tmp___3 ;

  {
  tmp = copy_from_user((void *)(& req32), (void const   *)arg, 16U);
  if (tmp != 0UL) {
    return (-14);
  } else {

  }
  tmp___0 = compat_alloc_user_space(24L);
  request = (drm_i915_mem_alloc_t *)tmp___0;
  tmp___1 = current_thread_info();
  __asm__  ("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0": "=&r" (flag), "=r" (roksum): "1" (request),
            "g" (24L), "rm" (tmp___1->addr_limit.seg));
  tmp___2 = ldv__builtin_expect(flag == 0UL, 1L);
  if (tmp___2 == 0L) {
    return (-14);
  } else {
    __pu_err = 0L;
    switch (4UL) {
    case 1UL: 
    __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "iq" (req32.region),
                         "m" (*((struct __large_struct *)(& request->region))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_23015;
    case 2UL: 
    __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" (req32.region),
                         "m" (*((struct __large_struct *)(& request->region))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_23015;
    case 4UL: 
    __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "ir" (req32.region),
                         "m" (*((struct __large_struct *)(& request->region))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_23015;
    case 8UL: 
    __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err): "Zr" (req32.region),
                         "m" (*((struct __large_struct *)(& request->region))), "i" (-14),
                         "0" (__pu_err));
    goto ldv_23015;
    default: 
    __put_user_bad();
    }
    ldv_23015: ;
    if (__pu_err != 0L) {
      return (-14);
    } else {
      __pu_err___0 = 0L;
      switch (4UL) {
      case 1UL: 
      __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "iq" (req32.alignment),
                           "m" (*((struct __large_struct *)(& request->alignment))),
                           "i" (-14), "0" (__pu_err___0));
      goto ldv_23023;
      case 2UL: 
      __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "ir" (req32.alignment),
                           "m" (*((struct __large_struct *)(& request->alignment))),
                           "i" (-14), "0" (__pu_err___0));
      goto ldv_23023;
      case 4UL: 
      __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "ir" (req32.alignment),
                           "m" (*((struct __large_struct *)(& request->alignment))),
                           "i" (-14), "0" (__pu_err___0));
      goto ldv_23023;
      case 8UL: 
      __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___0): "Zr" (req32.alignment),
                           "m" (*((struct __large_struct *)(& request->alignment))),
                           "i" (-14), "0" (__pu_err___0));
      goto ldv_23023;
      default: 
      __put_user_bad();
      }
      ldv_23023: ;
      if (__pu_err___0 != 0L) {
        return (-14);
      } else {
        __pu_err___1 = 0L;
        switch (4UL) {
        case 1UL: 
        __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "iq" (req32.size),
                             "m" (*((struct __large_struct *)(& request->size))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_23031;
        case 2UL: 
        __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "ir" (req32.size),
                             "m" (*((struct __large_struct *)(& request->size))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_23031;
        case 4UL: 
        __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "ir" (req32.size),
                             "m" (*((struct __large_struct *)(& request->size))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_23031;
        case 8UL: 
        __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___1): "Zr" (req32.size),
                             "m" (*((struct __large_struct *)(& request->size))),
                             "i" (-14), "0" (__pu_err___1));
        goto ldv_23031;
        default: 
        __put_user_bad();
        }
        ldv_23031: ;
        if (__pu_err___1 != 0L) {
          return (-14);
        } else {
          __pu_err___2 = 0L;
          switch (8UL) {
          case 1UL: 
          __asm__  volatile   ("1:\tmovb %b1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "iq" ((int *)((unsigned long )req32.region_offset)),
                               "m" (*((struct __large_struct *)(& request->region_offset))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_23039;
          case 2UL: 
          __asm__  volatile   ("1:\tmovw %w1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "ir" ((int *)((unsigned long )req32.region_offset)),
                               "m" (*((struct __large_struct *)(& request->region_offset))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_23039;
          case 4UL: 
          __asm__  volatile   ("1:\tmovl %k1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "ir" ((int *)((unsigned long )req32.region_offset)),
                               "m" (*((struct __large_struct *)(& request->region_offset))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_23039;
          case 8UL: 
          __asm__  volatile   ("1:\tmovq %1,%2\n2:\n.section .fixup,\"ax\"\n3:\tmov %3,%0\n\tjmp 2b\n.previous\n .section __ex_table,\"a\"\n .balign 8 \n .quad 1b,3b\n .previous\n": "=r" (__pu_err___2): "Zr" ((int *)((unsigned long )req32.region_offset)),
                               "m" (*((struct __large_struct *)(& request->region_offset))),
                               "i" (-14), "0" (__pu_err___2));
          goto ldv_23039;
          default: 
          __put_user_bad();
          }
          ldv_23039: ;
          if (__pu_err___2 != 0L) {
            return (-14);
          } else {

          }
        }
      }
    }
  }
  tmp___3 = drm_ioctl((file->f_path.dentry)->d_inode, file, 3222824008U, (unsigned long )request);
  return (tmp___3);
}
}
drm_ioctl_compat_t *i915_compat_ioctls[12U]  = 
  {      0,      0,      0,      & compat_i915_batchbuffer, 
        & compat_i915_irq_emit,      0,      & compat_i915_getparam,      0, 
        & compat_i915_alloc,      0,      0,      & compat_i915_cmdbuffer};
long i915_compat_ioctl(struct file *filp , unsigned int cmd , unsigned long arg ) 
{ 
  unsigned int nr ;
  drm_ioctl_compat_t *fn ;
  int ret ;
  long tmp ;

  {
  nr = cmd & 255U;
  fn = 0;
  if (nr <= 63U) {
    tmp = drm_compat_ioctl(filp, cmd, arg);
    return (tmp);
  } else {

  }
  if (nr <= 75U) {
    fn = i915_compat_ioctls[nr - 64U];
  } else {

  }
  lock_kernel();
  if ((unsigned long )fn != (unsigned long )((drm_ioctl_compat_t *)0)) {
    ret = (*fn)(filp, cmd, arg);
  } else {
    ret = drm_ioctl((filp->f_path.dentry)->d_inode, filp, cmd, arg);
  }
  unlock_kernel();
  return ((long )ret);
}
}
unsigned long ldv___get_free_pages_162(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) 
{ 
  unsigned long tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2);
  return (tmp);
}
}
void *ldv_kmem_cache_alloc_164(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
void *ldv_kmem_cache_alloc_168(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) 
{ 


  {
  ldv_check_alloc_flags(ldv_func_arg2);
  kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2);
  return ((void *)0);
}
}
struct page *ldv_alloc_page_vma_172(gfp_t ldv_func_arg1 , struct vm_area_struct *ldv_func_arg2 ,
                                    unsigned long ldv_func_arg3 ) 
{ 
  struct page *tmp ;

  {
  ldv_check_alloc_flags(ldv_func_arg1);
  tmp = alloc_page_vma(ldv_func_arg1, ldv_func_arg2, ldv_func_arg3);
  return (tmp);
}
}
__inline static void ldv_error(void) 
{ 


  {
  LDV_ERROR: {reach_error();abort();}
}
}
extern int ldv_undef_int(void) ;
long ldv__builtin_expect(long exp , long c ) 
{ 


  {
  return (exp);
}
}
int ldv_spin  =    LDV_SPIN_UNLOCKED;
void ldv_check_alloc_flags(gfp_t flags ) 
{ 


  {
  if (ldv_spin == LDV_SPIN_UNLOCKED || flags == 32U) {

  } else {
    ldv_error();
  }
  return;
}
}
extern struct page *ldv_some_page(void) ;
struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags ) 
{ 
  struct page *tmp ;

  {
  if (ldv_spin == LDV_SPIN_UNLOCKED || flags == 32U) {

  } else {
    ldv_error();
  }
  tmp = ldv_some_page();
  return (tmp);
}
}
void ldv_check_alloc_nonatomic(void) 
{ 


  {
  if (ldv_spin == LDV_SPIN_UNLOCKED) {

  } else {
    ldv_error();
  }
  return;
}
}
void ldv_spin_lock(void) 
{ 


  {
  ldv_spin = LDV_SPIN_LOCKED;
  return;
}
}
void ldv_spin_unlock(void) 
{ 


  {
  ldv_spin = LDV_SPIN_UNLOCKED;
  return;
}
}
int ldv_spin_trylock(void) 
{ 
  int is_lock ;

  {
  is_lock = ldv_undef_int();
  if (is_lock) {
    return (0);
  } else {
    ldv_spin = LDV_SPIN_LOCKED;
    return (1);
  }
}
}