extern void abort(void); extern void __assert_fail(const char *, const char *, unsigned int, const char *) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__)); void reach_error() { __assert_fail("0", "drivers--mtd--mtdoops.ko_014.69423d9.43_1a.cil_true-unreach-call.i", 3, "reach_error"); } /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef __builtin_va_list __gnuc_va_list[1U]; typedef __gnuc_va_list va_list[1U]; typedef unsigned int __kernel_mode_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; typedef unsigned long __kernel_size_t; typedef long __kernel_ssize_t; typedef long __kernel_time_t; typedef long __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef long long __kernel_loff_t; typedef __kernel_uid_t __kernel_uid32_t; typedef __kernel_gid_t __kernel_gid32_t; typedef unsigned char __u8; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef __kernel_mode_t mode_t; typedef __kernel_pid_t pid_t; typedef __kernel_timer_t timer_t; typedef __kernel_clockid_t clockid_t; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __kernel_clock_t clock_t; typedef unsigned char u_char; typedef unsigned int u_int; typedef unsigned long u_long; typedef __u32 u_int32_t; typedef __u8 uint8_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned int gfp_t; typedef u64 phys_addr_t; typedef phys_addr_t resource_size_t; struct module; struct bug_entry { unsigned long bug_addr ; char const *file ; unsigned short line ; unsigned short flags ; }; struct completion; struct pt_regs; struct pid; struct task_struct; struct mm_struct; typedef void (*ds_ovfl_callback_t)(struct task_struct * ); struct ds_context { unsigned char *ds ; struct task_struct *owner[2U] ; ds_ovfl_callback_t callback[2U] ; void *buffer[2U] ; unsigned int pages[2U] ; unsigned long count ; struct ds_context **this ; struct task_struct *task ; }; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct info { long ___orig_eip ; long ___ebx ; long ___ecx ; long ___edx ; long ___esi ; long ___edi ; long ___ebp ; long ___eax ; long ___ds ; long ___es ; long ___fs ; long ___orig_eax ; long ___eip ; long ___cs ; long ___eflags ; long ___esp ; long ___ss ; long ___vm86_es ; long ___vm86_ds ; long ___vm86_fs ; long ___vm86_gs ; }; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct page; struct __anonstruct_pgd_t_7 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_7 pgd_t; struct __anonstruct_pgprot_t_8 { pgprotval_t pgprot ; }; typedef struct __anonstruct_pgprot_t_8 pgprot_t; struct __anonstruct_ldv_2034_12 { unsigned int a ; unsigned int b ; }; struct __anonstruct_ldv_2049_13 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion_ldv_2050_11 { struct __anonstruct_ldv_2034_12 ldv_2034 ; struct __anonstruct_ldv_2049_13 ldv_2049 ; }; struct desc_struct { union __anonunion_ldv_2050_11 ldv_2050 ; }; struct cpumask { unsigned long bits[1U] ; }; typedef struct cpumask cpumask_t; struct thread_struct; struct raw_spinlock; struct x8664_pda { struct task_struct *pcurrent ; unsigned long data_offset ; unsigned long kernelstack ; unsigned long oldrsp ; int irqcount ; unsigned int cpunumber ; char *irqstackptr ; short nodenumber ; short in_bootmem ; unsigned int __softirq_pending ; unsigned int __nmi_count ; short mmu_state ; short isidle ; struct mm_struct *active_mm ; unsigned int apic_timer_irqs ; unsigned int irq0_irqs ; unsigned int irq_resched_count ; unsigned int irq_call_count ; unsigned int irq_tlb_count ; unsigned int irq_thermal_count ; unsigned int irq_threshold_count ; unsigned int irq_spurious_count ; }; struct exec_domain; struct map_segment; struct exec_domain { char const *name ; void (*handler)(int , struct pt_regs * ) ; unsigned char pers_low ; unsigned char pers_high ; unsigned long *signal_map ; unsigned long *signal_invmap ; struct map_segment *err_map ; struct map_segment *socktype_map ; struct map_segment *sockopt_map ; struct map_segment *af_map ; struct module *module ; struct exec_domain *next ; }; struct i387_fsave_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct_ldv_4633_15 { u64 rip ; u64 rdp ; }; struct __anonstruct_ldv_4639_16 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion_ldv_4640_14 { struct __anonstruct_ldv_4633_15 ldv_4633 ; struct __anonstruct_ldv_4639_16 ldv_4639 ; }; union __anonunion_ldv_4649_17 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct i387_fxsave_struct { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion_ldv_4640_14 ldv_4640 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion_ldv_4649_17 ldv_4649 ; }; struct i387_soft_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct info *info ; u32 entry_eip ; }; struct xsave_hdr_struct { u64 xstate_bv ; u64 reserved1[2U] ; u64 reserved2[5U] ; }; struct xsave_struct { struct i387_fxsave_struct i387 ; struct xsave_hdr_struct xsave_hdr ; }; union thread_xstate { struct i387_fsave_struct fsave ; struct i387_fxsave_struct fxsave ; struct i387_soft_struct soft ; struct xsave_struct xsave ; }; struct kmem_cache; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned long usersp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long ip ; unsigned long fs ; unsigned long gs ; unsigned long debugreg0 ; unsigned long debugreg1 ; unsigned long debugreg2 ; unsigned long debugreg3 ; unsigned long debugreg6 ; unsigned long debugreg7 ; unsigned long cr2 ; unsigned long trap_no ; unsigned long error_code ; union thread_xstate *xstate ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; unsigned long debugctlmsr ; struct ds_context *ds_ctx ; unsigned int bts_ovfl_signal ; }; struct __anonstruct_mm_segment_t_18 { unsigned long seg ; }; typedef struct __anonstruct_mm_segment_t_18 mm_segment_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct timespec; struct compat_timespec; struct __anonstruct_ldv_5073_20 { unsigned long arg0 ; unsigned long arg1 ; unsigned long arg2 ; unsigned long arg3 ; }; struct __anonstruct_futex_21 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; }; struct __anonstruct_nanosleep_22 { clockid_t index ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_23 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion_ldv_5095_19 { struct __anonstruct_ldv_5073_20 ldv_5073 ; struct __anonstruct_futex_21 futex ; struct __anonstruct_nanosleep_22 nanosleep ; struct __anonstruct_poll_23 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion_ldv_5095_19 ldv_5095 ; }; struct thread_info { struct task_struct *task ; struct exec_domain *exec_domain ; unsigned long flags ; __u32 status ; __u32 cpu ; int preempt_count ; mm_segment_t addr_limit ; struct restart_block restart_block ; void *sysenter_return ; }; struct raw_spinlock { unsigned int slock ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct_raw_rwlock_t_24 { unsigned int lock ; }; typedef struct __anonstruct_raw_rwlock_t_24 raw_rwlock_t; struct lockdep_map; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[9U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache ; char const *name ; int cpu ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 2 ; unsigned char hardirqs_off : 1 ; }; struct __anonstruct_spinlock_t_25 { raw_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_spinlock_t_25 spinlock_t; struct __anonstruct_rwlock_t_26 { raw_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_26 rwlock_t; struct __anonstruct_atomic_t_27 { int counter ; }; typedef struct __anonstruct_atomic_t_27 atomic_t; struct __anonstruct_atomic64_t_28 { long counter ; }; typedef struct __anonstruct_atomic64_t_28 atomic64_t; typedef atomic64_t atomic_long_t; struct timespec { time_t tv_sec ; long tv_nsec ; }; struct __wait_queue; typedef struct __wait_queue wait_queue_t; struct __wait_queue { unsigned int flags ; void *private ; int (*func)(wait_queue_t * , unsigned int , int , void * ) ; struct list_head task_list ; }; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct __anonstruct_nodemask_t_30 { unsigned long bits[1U] ; }; typedef struct __anonstruct_nodemask_t_30 nodemask_t; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct thread_info *owner ; char const *name ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; struct mutex *lock ; void *magic ; }; struct rw_semaphore; struct rw_semaphore { __s32 activity ; spinlock_t wait_lock ; struct list_head wait_list ; struct lockdep_map dep_map ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct file; struct __anonstruct_mm_context_t_78 { void *ldt ; int size ; struct mutex lock ; void *vdso ; }; typedef struct __anonstruct_mm_context_t_78 mm_context_t; struct vm_area_struct; struct key; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; struct kobject; struct attribute { char const *name ; struct module *owner ; mode_t mode ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct sysfs_dirent; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct sysfs_dirent *sd ; struct kref kref ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops *sysfs_ops ; struct attribute **default_attrs ; }; struct kobj_uevent_env { char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (*filter)(struct kset * , struct kobject * ) ; char const *(*name)(struct kset * , struct kobject * ) ; int (*uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops *uevent_ops ; }; struct marker; typedef void marker_probe_func(void * , void * , char const * , va_list * ); struct marker_probe_closure { marker_probe_func *func ; void *probe_private ; }; struct marker { char const *name ; char const *format ; char state ; char ptype ; void (*call)(struct marker const * , void * , ...) ; struct marker_probe_closure single ; struct marker_probe_closure *multi ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct tvec_base; struct timer_list { struct list_head entry ; unsigned long expires ; void (*function)(unsigned long ) ; unsigned long data ; struct tvec_base *base ; void *start_site ; char start_comm[16U] ; int start_pid ; }; struct hrtimer; enum hrtimer_restart; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; }; struct kmem_cache_cpu { void **freelist ; struct page *page ; int node ; unsigned int offset ; unsigned int objsize ; unsigned int stat[18U] ; }; struct kmem_cache_node { spinlock_t list_lock ; unsigned long nr_partial ; unsigned long min_partial ; struct list_head partial ; atomic_long_t nr_slabs ; atomic_long_t total_objects ; struct list_head full ; }; struct kmem_cache_order_objects { unsigned long x ; }; struct kmem_cache { unsigned long flags ; int size ; int objsize ; int offset ; struct kmem_cache_order_objects oo ; struct kmem_cache_node local_node ; struct kmem_cache_order_objects max ; struct kmem_cache_order_objects min ; gfp_t allocflags ; int refcount ; void (*ctor)(void * ) ; int inuse ; int align ; char const *name ; struct list_head list ; struct kobject kobj ; int remote_node_defrag_ratio ; struct kmem_cache_node *node[64U] ; struct kmem_cache_cpu *cpu_slab[8U] ; }; struct completion { unsigned int done ; wait_queue_head_t wait ; }; struct rcu_head { struct rcu_head *next ; void (*func)(struct rcu_head * ) ; }; struct tracepoint; struct tracepoint { char const *name ; int state ; void **funcs ; }; struct __anonstruct_local_t_89 { atomic_long_t a ; }; typedef struct __anonstruct_local_t_89 local_t; struct mod_arch_specific { }; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; }; struct exception_table_entry; struct module_ref { local_t count ; }; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2 } ; struct module_sect_attrs; struct module_notes_attrs; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; void *unwind_info ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; unsigned int num_symtab ; char *strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; void *percpu ; char *args ; struct marker *markers ; unsigned int num_markers ; struct tracepoint *tracepoints ; unsigned int num_tracepoints ; struct list_head modules_which_use_me ; struct task_struct *waiter ; void (*exit)(void) ; struct module_ref ref[8U] ; }; struct tty_struct; struct tty_driver; struct console { char name[16U] ; void (*write)(struct console * , char const * , unsigned int ) ; int (*read)(struct console * , char * , unsigned int ) ; struct tty_driver *(*device)(struct console * , int * ) ; void (*unblank)(void) ; int (*setup)(struct console * , char * ) ; int (*early_setup)(void) ; short flags ; short index ; int cflag ; void *data ; struct console *next ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct rb_node { unsigned long rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct prio_tree_node; struct raw_prio_tree_node { struct prio_tree_node *left ; struct prio_tree_node *right ; struct prio_tree_node *parent ; }; struct prio_tree_node { struct prio_tree_node *left ; struct prio_tree_node *right ; struct prio_tree_node *parent ; unsigned long start ; unsigned long last ; }; struct address_space; typedef atomic_long_t mm_counter_t; struct __anonstruct_ldv_10399_92 { u16 inuse ; u16 objects ; }; union __anonunion_ldv_10400_91 { atomic_t _mapcount ; struct __anonstruct_ldv_10399_92 ldv_10399 ; }; struct __anonstruct_ldv_10405_94 { unsigned long private ; struct address_space *mapping ; }; union __anonunion_ldv_10409_93 { struct __anonstruct_ldv_10405_94 ldv_10405 ; spinlock_t ptl ; struct kmem_cache *slab ; struct page *first_page ; }; union __anonunion_ldv_10413_95 { unsigned long index ; void *freelist ; }; struct page { unsigned long flags ; atomic_t _count ; union __anonunion_ldv_10400_91 ldv_10400 ; union __anonunion_ldv_10409_93 ldv_10409 ; union __anonunion_ldv_10413_95 ldv_10413 ; struct list_head lru ; }; struct __anonstruct_vm_set_97 { struct list_head list ; void *parent ; struct vm_area_struct *head ; }; union __anonunion_shared_96 { struct __anonstruct_vm_set_97 vm_set ; struct raw_prio_tree_node prio_tree_node ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { struct mm_struct *vm_mm ; unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct rb_node vm_rb ; union __anonunion_shared_96 shared ; struct list_head anon_vma_node ; struct anon_vma *anon_vma ; struct vm_operations_struct *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; unsigned long vm_truncate_count ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct kioctx; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; struct vm_area_struct *mmap_cache ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; void (*unmap_area)(struct mm_struct * , unsigned long ) ; unsigned long mmap_base ; unsigned long task_size ; unsigned long cached_hole_size ; unsigned long free_area_cache ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; int map_count ; struct rw_semaphore mmap_sem ; spinlock_t page_table_lock ; struct list_head mmlist ; mm_counter_t _file_rss ; mm_counter_t _anon_rss ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long reserved_vm ; unsigned long def_flags ; unsigned long nr_ptes ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[42U] ; cpumask_t cpu_vm_mask ; mm_context_t context ; unsigned int faultstamp ; unsigned int token_priority ; unsigned int last_interval ; unsigned long flags ; struct core_state *core_state ; rwlock_t ioctx_list_lock ; struct kioctx *ioctx_list ; struct task_struct *owner ; struct file *exe_file ; unsigned long num_exe_file_vmas ; struct mmu_notifier_mm *mmu_notifier_mm ; }; typedef unsigned long cputime_t; struct sem_undo_list; struct sem_undo_list { atomic_t refcnt ; spinlock_t lock ; struct list_head list_proc ; }; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct siginfo; struct __anonstruct_sigset_t_98 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_98 sigset_t; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_100 { pid_t _pid ; uid_t _uid ; }; struct __anonstruct__timer_101 { timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_102 { pid_t _pid ; uid_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_103 { pid_t _pid ; uid_t _uid ; int _status ; clock_t _utime ; clock_t _stime ; }; struct __anonstruct__sigfault_104 { void *_addr ; }; struct __anonstruct__sigpoll_105 { long _band ; int _fd ; }; union __anonunion__sifields_99 { int _pad[28U] ; struct __anonstruct__kill_100 _kill ; struct __anonstruct__timer_101 _timer ; struct __anonstruct__rt_102 _rt ; struct __anonstruct__sigchld_103 _sigchld ; struct __anonstruct__sigfault_104 _sigfault ; struct __anonstruct__sigpoll_105 _sigpoll ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_99 _sifields ; }; typedef struct siginfo siginfo_t; struct user_struct; struct sigpending { struct list_head list ; sigset_t signal ; }; struct dentry; struct vfsmount; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct fs_struct { atomic_t count ; rwlock_t lock ; int umask ; struct path root ; struct path pwd ; }; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct rcu_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct prop_local_single { unsigned long events ; unsigned long period ; int shift ; spinlock_t lock ; }; struct __anonstruct_seccomp_t_108 { int mode ; }; typedef struct __anonstruct_seccomp_t_108 seccomp_t; struct plist_head { struct list_head prio_list ; struct list_head node_list ; spinlock_t *lock ; }; struct rt_mutex_waiter; struct rlimit { unsigned long rlim_cur ; unsigned long rlim_max ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; enum hrtimer_cb_mode { HRTIMER_CB_SOFTIRQ = 0, HRTIMER_CB_IRQSAFE_PERCPU = 1, HRTIMER_CB_IRQSAFE_UNLOCKED = 2 } ; struct hrtimer { struct rb_node node ; ktime_t _expires ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; struct list_head cb_entry ; enum hrtimer_cb_mode cb_mode ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; clockid_t index ; struct rb_root active ; struct rb_node *first ; ktime_t resolution ; ktime_t (*get_time)(void) ; ktime_t softirq_time ; ktime_t offset ; }; struct hrtimer_cpu_base { spinlock_t lock ; struct hrtimer_clock_base clock_base[2U] ; struct list_head cb_pending ; ktime_t expires_next ; int hres_active ; unsigned long nr_events ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct futex_pi_state; struct robust_list_head; struct bio; struct cfs_rq; struct task_group; struct nsproxy; struct kvec { void *iov_base ; size_t iov_len ; }; struct aio_ring_info { unsigned long mmap_base ; unsigned long mmap_size ; struct page **ring_pages ; spinlock_t ring_lock ; long nr_pages ; unsigned int nr ; unsigned int tail ; struct page *internal_pages[8U] ; }; struct kioctx { atomic_t users ; int dead ; struct mm_struct *mm ; unsigned long user_id ; struct kioctx *next ; wait_queue_head_t wait ; spinlock_t ctx_lock ; int reqs_active ; struct list_head active_reqs ; struct list_head run_list ; unsigned int max_reqs ; struct aio_ring_info ring_info ; struct delayed_work wq ; }; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct thread_group_cputime { struct task_cputime *totals ; }; union __anonunion_ldv_11778_110 { pid_t pgrp ; pid_t __pgrp ; }; union __anonunion_ldv_11783_111 { pid_t session ; pid_t __session ; }; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t count ; atomic_t live ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; cputime_t it_prof_expires ; cputime_t it_virt_expires ; cputime_t it_prof_incr ; cputime_t it_virt_incr ; struct thread_group_cputime cputime ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; union __anonunion_ldv_11778_110 ldv_11778 ; struct pid *tty_old_pgrp ; union __anonunion_ldv_11783_111 ldv_11783 ; int leader ; struct tty_struct *tty ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; struct task_io_accounting ioac ; struct rlimit rlim[16U] ; struct key *session_keyring ; struct key *process_keyring ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; struct tty_audit_buf *tty_audit_buf ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t files ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t epoll_devs ; atomic_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; uid_t uid ; struct task_group *tg ; struct kobject kobj ; struct work_struct work ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long cpu_time ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; unsigned int bkl_count ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; struct timespec blkio_start ; struct timespec blkio_end ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; struct timespec freepages_start ; struct timespec freepages_end ; u64 freepages_delay ; u32 freepages_count ; }; enum cpu_idle_type { CPU_IDLE = 0, CPU_NOT_IDLE = 1, CPU_NEWLY_IDLE = 2, CPU_MAX_IDLE_TYPES = 3 } ; struct sched_group { struct sched_group *next ; cpumask_t cpumask ; unsigned int __cpu_power ; u32 reciprocal_cpu_power ; }; enum sched_domain_level { SD_LV_NONE = 0, SD_LV_SIBLING = 1, SD_LV_MC = 2, SD_LV_CPU = 3, SD_LV_NODE = 4, SD_LV_ALLNODES = 5, SD_LV_MAX = 6 } ; struct sched_domain { struct sched_domain *parent ; struct sched_domain *child ; struct sched_group *groups ; cpumask_t span ; unsigned long min_interval ; unsigned long max_interval ; unsigned int busy_factor ; unsigned int imbalance_pct ; unsigned int cache_nice_tries ; unsigned int busy_idx ; unsigned int idle_idx ; unsigned int newidle_idx ; unsigned int wake_idx ; unsigned int forkexec_idx ; int flags ; enum sched_domain_level level ; unsigned long last_balance ; unsigned int balance_interval ; unsigned int nr_balance_failed ; u64 last_update ; unsigned int lb_count[3U] ; unsigned int lb_failed[3U] ; unsigned int lb_balanced[3U] ; unsigned int lb_imbalance[3U] ; unsigned int lb_gained[3U] ; unsigned int lb_hot_gained[3U] ; unsigned int lb_nobusyg[3U] ; unsigned int lb_nobusyq[3U] ; unsigned int alb_count ; unsigned int alb_failed ; unsigned int alb_pushed ; unsigned int sbe_count ; unsigned int sbe_balanced ; unsigned int sbe_pushed ; unsigned int sbf_count ; unsigned int sbf_balanced ; unsigned int sbf_pushed ; unsigned int ttwu_wake_remote ; unsigned int ttwu_move_affine ; unsigned int ttwu_move_balance ; char *name ; }; struct io_context; struct group_info { int ngroups ; atomic_t usage ; gid_t small_block[32U] ; int nblocks ; gid_t *blocks[0U] ; }; struct audit_context; struct pipe_inode_info; struct rq; struct sched_class { struct sched_class const *next ; void (*enqueue_task)(struct rq * , struct task_struct * , int ) ; void (*dequeue_task)(struct rq * , struct task_struct * , int ) ; void (*yield_task)(struct rq * ) ; void (*check_preempt_curr)(struct rq * , struct task_struct * , int ) ; struct task_struct *(*pick_next_task)(struct rq * ) ; void (*put_prev_task)(struct rq * , struct task_struct * ) ; int (*select_task_rq)(struct task_struct * , int ) ; unsigned long (*load_balance)(struct rq * , int , struct rq * , unsigned long , struct sched_domain * , enum cpu_idle_type , int * , int * ) ; int (*move_one_task)(struct rq * , int , struct rq * , struct sched_domain * , enum cpu_idle_type ) ; void (*pre_schedule)(struct rq * , struct task_struct * ) ; void (*post_schedule)(struct rq * ) ; void (*task_wake_up)(struct rq * , struct task_struct * ) ; void (*set_cpus_allowed)(struct task_struct * , cpumask_t const * ) ; void (*rq_online)(struct rq * ) ; void (*rq_offline)(struct rq * ) ; void (*set_curr_task)(struct rq * ) ; void (*task_tick)(struct rq * , struct task_struct * , int ) ; void (*task_new)(struct rq * , struct task_struct * ) ; void (*switched_from)(struct rq * , struct task_struct * , int ) ; void (*switched_to)(struct rq * , struct task_struct * , int ) ; void (*prio_changed)(struct rq * , struct task_struct * , int , int ) ; void (*moved_group)(struct task_struct * ) ; }; struct load_weight { unsigned long weight ; unsigned long inv_weight ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 last_wakeup ; u64 avg_overlap ; u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_forced2_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned int time_slice ; int nr_cpus_allowed ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct linux_binfmt; struct files_struct; struct css_set; struct compat_robust_list_head; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; int lock_depth ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct hlist_head preempt_notifiers ; unsigned char fpu_counter ; s8 oomkilladj ; unsigned int btrace_seq ; unsigned int policy ; cpumask_t cpus_allowed ; struct sched_info sched_info ; struct list_head tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; struct linux_binfmt *binfmt ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned int personality ; unsigned char did_exec : 1 ; pid_t pid ; pid_t tgid ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; cputime_t prev_utime ; cputime_t prev_stime ; unsigned long nvcsw ; unsigned long nivcsw ; struct timespec start_time ; struct timespec real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; uid_t uid ; uid_t euid ; uid_t suid ; uid_t fsuid ; gid_t gid ; gid_t egid ; gid_t sgid ; gid_t fsgid ; struct group_info *group_info ; kernel_cap_t cap_effective ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_bset ; struct user_struct *user ; unsigned int securebits ; unsigned char jit_keyring ; struct key *request_key_auth ; struct key *thread_keyring ; char comm[16U] ; int link_count ; int total_link_count ; struct sysv_sem sysvsem ; unsigned long last_switch_timestamp ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; void *security ; struct audit_context *audit_context ; uid_t loginuid ; unsigned int sessionid ; seccomp_t seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; spinlock_t pi_lock ; struct plist_head pi_waiters ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; int hardirqs_enabled ; unsigned long hardirq_enable_ip ; unsigned int hardirq_enable_event ; unsigned long hardirq_disable_ip ; unsigned int hardirq_disable_event ; int softirqs_enabled ; unsigned long softirq_disable_ip ; unsigned int softirq_disable_event ; unsigned long softirq_enable_ip ; unsigned int softirq_enable_event ; int hardirq_context ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; void *journal_info ; struct bio *bio_list ; struct bio **bio_tail ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; int cpuset_mems_generation ; int cpuset_mem_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct mempolicy *mempolicy ; short il_next ; atomic_t fs_excl ; struct rcu_head rcu ; struct pipe_inode_info *splice_pipe ; struct task_delay_info *delays ; int make_it_fail ; struct prop_local_single dirties ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; struct list_head *scm_work_list ; }; struct otp_info { uint32_t start ; uint32_t length ; uint32_t locked ; }; struct nand_oobfree { uint32_t offset ; uint32_t length ; }; struct nand_ecclayout { uint32_t eccbytes ; uint32_t eccpos[64U] ; uint32_t oobavail ; struct nand_oobfree oobfree[8U] ; }; struct mtd_ecc_stats { uint32_t corrected ; uint32_t failed ; uint32_t badblocks ; uint32_t bbtblocks ; }; struct mtd_info; struct erase_info { struct mtd_info *mtd ; uint64_t addr ; uint64_t len ; uint64_t fail_addr ; u_long time ; u_long retries ; u_int dev ; u_int cell ; void (*callback)(struct erase_info * ) ; u_long priv ; u_char state ; struct erase_info *next ; }; struct mtd_erase_region_info { uint64_t offset ; u_int32_t erasesize ; u_int32_t numblocks ; unsigned long *lockmap ; }; enum ldv_13417 { MTD_OOB_PLACE = 0, MTD_OOB_AUTO = 1, MTD_OOB_RAW = 2 } ; typedef enum ldv_13417 mtd_oob_mode_t; struct mtd_oob_ops { mtd_oob_mode_t mode ; size_t len ; size_t retlen ; size_t ooblen ; size_t oobretlen ; uint32_t ooboffs ; uint8_t *datbuf ; uint8_t *oobbuf ; }; struct mtd_info { u_char type ; u_int32_t flags ; uint64_t size ; u_int32_t erasesize ; u_int32_t writesize ; u_int32_t oobsize ; u_int32_t oobavail ; unsigned int erasesize_shift ; unsigned int writesize_shift ; unsigned int erasesize_mask ; unsigned int writesize_mask ; char const *name ; int index ; struct nand_ecclayout *ecclayout ; int numeraseregions ; struct mtd_erase_region_info *eraseregions ; int (*erase)(struct mtd_info * , struct erase_info * ) ; int (*point)(struct mtd_info * , loff_t , size_t , size_t * , void ** , resource_size_t * ) ; void (*unpoint)(struct mtd_info * , loff_t , size_t ) ; int (*read)(struct mtd_info * , loff_t , size_t , size_t * , u_char * ) ; int (*write)(struct mtd_info * , loff_t , size_t , size_t * , u_char const * ) ; int (*panic_write)(struct mtd_info * , loff_t , size_t , size_t * , u_char const * ) ; int (*read_oob)(struct mtd_info * , loff_t , struct mtd_oob_ops * ) ; int (*write_oob)(struct mtd_info * , loff_t , struct mtd_oob_ops * ) ; int (*get_fact_prot_info)(struct mtd_info * , struct otp_info * , size_t ) ; int (*read_fact_prot_reg)(struct mtd_info * , loff_t , size_t , size_t * , u_char * ) ; int (*get_user_prot_info)(struct mtd_info * , struct otp_info * , size_t ) ; int (*read_user_prot_reg)(struct mtd_info * , loff_t , size_t , size_t * , u_char * ) ; int (*write_user_prot_reg)(struct mtd_info * , loff_t , size_t , size_t * , u_char * ) ; int (*lock_user_prot_reg)(struct mtd_info * , loff_t , size_t ) ; int (*writev)(struct mtd_info * , struct kvec const * , unsigned long , loff_t , size_t * ) ; void (*sync)(struct mtd_info * ) ; int (*lock)(struct mtd_info * , loff_t , uint64_t ) ; int (*unlock)(struct mtd_info * , loff_t , uint64_t ) ; int (*suspend)(struct mtd_info * ) ; void (*resume)(struct mtd_info * ) ; int (*block_isbad)(struct mtd_info * , loff_t ) ; int (*block_markbad)(struct mtd_info * , loff_t ) ; struct notifier_block reboot_notifier ; struct mtd_ecc_stats ecc_stats ; int subpage_sft ; void *priv ; struct module *owner ; int usecount ; int (*get_device)(struct mtd_info * ) ; void (*put_device)(struct mtd_info * ) ; }; struct mtd_notifier { void (*add)(struct mtd_info * ) ; void (*remove)(struct mtd_info * ) ; struct list_head list ; }; struct mtdoops_context { int mtd_index ; struct work_struct work_erase ; struct work_struct work_write ; struct mtd_info *mtd ; int oops_pages ; int nextpage ; int nextcount ; void *oops_buf ; spinlock_t writecount_lock ; int ready ; int writecount ; }; enum __anonenum_97 { LDV_SPIN_UNLOCKED = 0, LDV_SPIN_LOCKED = 1 } ; void *memcpy(void * , void const * , unsigned long ) ; void ldv_spin_lock(void) ; void ldv_spin_unlock(void) ; extern int printk(char const * , ...) ; extern int oops_in_progress ; extern void *memset(void * , int , size_t ) ; extern void __bad_pda_field(void) ; extern struct x8664_pda _proxy_pda ; __inline static struct task_struct *get_current(void) { struct task_struct *ret__ ; { switch (8UL) { case 2UL: __asm__ ("movw %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent)); goto ldv_4238; case 4UL: __asm__ ("movl %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent)); goto ldv_4238; case 8UL: __asm__ ("movq %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent)); goto ldv_4238; default: __bad_pda_field(); } ldv_4238: ; return (ret__); } } __inline static unsigned long __xchg(unsigned long x , void volatile *ptr , int size ) { { switch (size) { case 1: __asm__ volatile ("xchgb %b0,%1": "=q" (x): "m" (*((long volatile *)ptr)), "0" (x): "memory"); goto ldv_4253; case 2: __asm__ volatile ("xchgw %w0,%1": "=r" (x): "m" (*((long volatile *)ptr)), "0" (x): "memory"); goto ldv_4253; case 4: __asm__ volatile ("xchgl %k0,%1": "=r" (x): "m" (*((long volatile *)ptr)), "0" (x): "memory"); goto ldv_4253; case 8: __asm__ volatile ("xchgq %0,%1": "=r" (x): "m" (*((long volatile *)ptr)), "0" (x): "memory"); goto ldv_4253; } ldv_4253: ; return (x); } } __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } __inline static struct thread_info *current_thread_info(void) { struct thread_info *ti ; unsigned long ret__ ; { switch (8UL) { case 2UL: __asm__ ("movw %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack)); goto ldv_5114; case 4UL: __asm__ ("movl %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack)); goto ldv_5114; case 8UL: __asm__ ("movq %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack)); goto ldv_5114; default: __bad_pda_field(); } ldv_5114: ti = (struct thread_info *)(ret__ - 8152UL); return (ti); } } extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; extern int default_wake_function(wait_queue_t * , unsigned int , int , void * ) ; extern void init_waitqueue_head(wait_queue_head_t * ) ; extern void add_wait_queue(wait_queue_head_t * , wait_queue_t * ) ; extern void remove_wait_queue(wait_queue_head_t * , wait_queue_t * ) ; extern void __wake_up(wait_queue_head_t * , unsigned int , int , void * ) ; extern unsigned long __get_free_pages(gfp_t , unsigned int ) ; unsigned long ldv___get_free_pages_2(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) ; extern void flush_scheduled_work(void) ; extern int schedule_work(struct work_struct * ) ; extern void *kmem_cache_alloc(struct kmem_cache * , gfp_t ) ; void *ldv_kmem_cache_alloc_4(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ; void *ldv_kmem_cache_alloc_8(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) ; void ldv_check_alloc_flags(gfp_t flags ) ; void ldv_check_alloc_nonatomic(void) ; extern void register_console(struct console * ) ; extern int unregister_console(struct console * ) ; extern void *vmalloc(unsigned long ) ; void *ldv_vmalloc_11(unsigned long ldv_func_arg1 ) ; extern void vfree(void const * ) ; extern void schedule(void) ; extern void register_mtd_user(struct mtd_notifier * ) ; extern int unregister_mtd_user(struct mtd_notifier * ) ; static struct mtdoops_context oops_cxt ; static void mtdoops_erase_callback(struct erase_info *done ) { wait_queue_head_t *wait_q ; { wait_q = (wait_queue_head_t *)done->priv; __wake_up(wait_q, 3U, 1, 0); return; } } static int mtdoops_erase_block(struct mtd_info *mtd , int offset ) { struct erase_info erase ; wait_queue_t wait ; struct task_struct *tmp ; wait_queue_head_t wait_q ; int ret ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; { tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & default_wake_function; wait.task_list.next = 0; wait.task_list.prev = 0; init_waitqueue_head(& wait_q); erase.mtd = mtd; erase.callback = & mtdoops_erase_callback; erase.addr = (uint64_t )offset; erase.len = (uint64_t )mtd->erasesize; erase.priv = (unsigned long )(& wait_q); tmp___0 = get_current(); __xchg(1UL, (void volatile *)(& tmp___0->state), 8); add_wait_queue(& wait_q, & wait); ret = (*(mtd->erase))(mtd, & erase); if (ret != 0) { tmp___1 = get_current(); __xchg(0UL, (void volatile *)(& tmp___1->state), 8); remove_wait_queue(& wait_q, & wait); printk("<4>mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", erase.addr, erase.len, mtd->name); return (ret); } else { } schedule(); remove_wait_queue(& wait_q, & wait); return (0); } } static void mtdoops_inc_counter(struct mtdoops_context *cxt ) { struct mtd_info *mtd ; size_t retlen ; u32 count ; int ret ; { mtd = cxt->mtd; cxt->nextpage = cxt->nextpage + 1; if (cxt->nextpage >= cxt->oops_pages) { cxt->nextpage = 0; } else { } cxt->nextcount = cxt->nextcount + 1; if (cxt->nextcount == -1) { cxt->nextcount = 0; } else { } ret = (*(mtd->read))(mtd, (loff_t )(cxt->nextpage * 4096), 4UL, & retlen, (u_char *)(& count)); if (retlen != 4UL || (ret < 0 && ret != -117)) { printk("<3>mtdoops: Read failure at %d (%td of 4 read), err %d.\n", cxt->nextpage * 4096, retlen, ret); schedule_work(& cxt->work_erase); return; } else { } if (count != 4294967295U) { schedule_work(& cxt->work_erase); return; } else { } printk("<7>mtdoops: Ready %d, %d (no erase)\n", cxt->nextpage, cxt->nextcount); cxt->ready = 1; return; } } static void mtdoops_workfunc_erase(struct work_struct *work ) { struct mtdoops_context *cxt ; struct work_struct const *__mptr ; struct mtd_info *mtd ; int i ; int j ; int ret ; int mod ; { __mptr = (struct work_struct const *)work; cxt = (struct mtdoops_context *)__mptr + 0xfffffffffffffff8UL; mtd = cxt->mtd; i = 0; if ((unsigned long )mtd == (unsigned long )((struct mtd_info *)0)) { return; } else { } mod = (int )((u_int32_t )(cxt->nextpage * 4096) % mtd->erasesize); if (mod != 0) { cxt->nextpage = (int )((u_int32_t )cxt->nextpage + (mtd->erasesize - (u_int32_t )mod) / 4096U); if (cxt->nextpage >= cxt->oops_pages) { cxt->nextpage = 0; } else { } } else { } goto ldv_13778; ldv_13777: ret = (*(mtd->block_isbad))(mtd, (loff_t )(cxt->nextpage * 4096)); if (ret == 0) { goto ldv_13775; } else { } if (ret < 0) { printk("<3>mtdoops: block_isbad failed, aborting.\n"); return; } else { } badblock: printk("<4>mtdoops: Bad block at %08x\n", cxt->nextpage * 4096); i = i + 1; cxt->nextpage = (int )((u_int32_t )cxt->nextpage + mtd->erasesize / 4096U); if (cxt->nextpage >= cxt->oops_pages) { cxt->nextpage = 0; } else { } if ((u_int32_t )i == (u_int32_t )cxt->oops_pages / (mtd->erasesize / 4096U)) { printk("<3>mtdoops: All blocks bad!\n"); return; } else { } ldv_13778: ; if ((unsigned long )mtd->block_isbad != (unsigned long )((int (*)(struct mtd_info * , loff_t ))0)) { goto ldv_13777; } else { } ldv_13775: j = 0; ret = -1; goto ldv_13780; ldv_13779: ret = mtdoops_erase_block(mtd, cxt->nextpage * 4096); j = j + 1; ldv_13780: ; if (j <= 2 && ret < 0) { goto ldv_13779; } else { } if (ret >= 0) { printk("<7>mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); cxt->ready = 1; return; } else { } if ((unsigned long )mtd->block_markbad != (unsigned long )((int (*)(struct mtd_info * , loff_t ))0) && ret == -5) { ret = (*(mtd->block_markbad))(mtd, (loff_t )(cxt->nextpage * 4096)); if (ret < 0) { printk("<3>mtdoops: block_markbad failed, aborting.\n"); return; } else { } } else { } goto badblock; } } static void mtdoops_write(struct mtdoops_context *cxt , int panic___0 ) { struct mtd_info *mtd ; size_t retlen ; int ret ; { mtd = cxt->mtd; if (cxt->writecount <= 4095) { memset(cxt->oops_buf + (unsigned long )cxt->writecount, 255, (size_t )(4096 - cxt->writecount)); } else { } if (panic___0 != 0) { ret = (*(mtd->panic_write))(mtd, (loff_t )(cxt->nextpage * 4096), 4096UL, & retlen, (u_char const *)cxt->oops_buf); } else { ret = (*(mtd->write))(mtd, (loff_t )(cxt->nextpage * 4096), 4096UL, & retlen, (u_char const *)cxt->oops_buf); } cxt->writecount = 0; if (retlen != 4096UL || ret < 0) { printk("<3>mtdoops: Write failure at %d (%td of %d written), err %d.\n", cxt->nextpage * 4096, retlen, 4096, ret); } else { } mtdoops_inc_counter(cxt); return; } } static void mtdoops_workfunc_write(struct work_struct *work ) { struct mtdoops_context *cxt ; struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; cxt = (struct mtdoops_context *)__mptr + 0xffffffffffffffb8UL; mtdoops_write(cxt, 0); return; } } static void find_next_position(struct mtdoops_context *cxt ) { struct mtd_info *mtd ; int ret ; int page ; int maxpos ; u32 count[2U] ; u32 maxcount ; size_t retlen ; { mtd = cxt->mtd; maxpos = 0; maxcount = 4294967295U; page = 0; goto ldv_13807; ldv_13806: ret = (*(mtd->read))(mtd, (loff_t )(page * 4096), 8UL, & retlen, (u_char *)(& count)); if (retlen != 8UL || (ret < 0 && ret != -117)) { printk("<3>mtdoops: Read failure at %d (%td of 8 read), err %d.\n", page * 4096, retlen, ret); goto ldv_13805; } else { } if (count[1] != 1560304896U) { goto ldv_13805; } else { } if (count[0] == 4294967295U) { goto ldv_13805; } else { } if (maxcount == 4294967295U) { maxcount = count[0]; maxpos = page; } else if (count[0] <= 1073741823U && maxcount > 3221225472U) { maxcount = count[0]; maxpos = page; } else if (count[0] > maxcount && count[0] <= 3221225471U) { maxcount = count[0]; maxpos = page; } else if ((count[0] > maxcount && count[0] > 3221225472U) && maxcount > 2147483648U) { maxcount = count[0]; maxpos = page; } else { } ldv_13805: page = page + 1; ldv_13807: ; if (cxt->oops_pages > page) { goto ldv_13806; } else { } if (maxcount == 4294967295U) { cxt->nextpage = 0; cxt->nextcount = 1; schedule_work(& cxt->work_erase); return; } else { } cxt->nextpage = maxpos; cxt->nextcount = (int )maxcount; mtdoops_inc_counter(cxt); return; } } static void mtdoops_notify_add(struct mtd_info *mtd ) { struct mtdoops_context *cxt ; { cxt = & oops_cxt; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) { return; } else { } if (mtd->size < (uint64_t )(mtd->erasesize * 2U)) { printk("<3>MTD partition %d not big enough for mtdoops\n", mtd->index); return; } else { } if (mtd->erasesize <= 4095U) { printk("<3>Eraseblock size of MTD partition %d too small\n", mtd->index); return; } else { } cxt->mtd = mtd; if (mtd->size > 2147483647ULL) { cxt->oops_pages = 524287; } else { cxt->oops_pages = (int )mtd->size / 4096; } find_next_position(cxt); printk("<6>mtdoops: Attached to MTD device %d\n", mtd->index); return; } } static void mtdoops_notify_remove(struct mtd_info *mtd ) { struct mtdoops_context *cxt ; { cxt = & oops_cxt; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) { return; } else { } cxt->mtd = 0; flush_scheduled_work(); return; } } static void mtdoops_console_sync(void) { struct mtdoops_context *cxt ; struct mtd_info *mtd ; struct thread_info *tmp ; { cxt = & oops_cxt; mtd = cxt->mtd; if ((cxt->ready == 0 || (unsigned long )mtd == (unsigned long )((struct mtd_info *)0)) || cxt->writecount == 0) { return; } else { } ldv_spin_lock(); if (cxt->ready == 0) { ldv_spin_unlock(); return; } else { } cxt->ready = 0; ldv_spin_unlock(); if ((unsigned long )mtd->panic_write != (unsigned long )((int (*)(struct mtd_info * , loff_t , size_t , size_t * , u_char const * ))0)) { tmp = current_thread_info(); if (((unsigned long )tmp->preempt_count & 268435200UL) != 0UL) { mtdoops_write(cxt, 1); } else { schedule_work(& cxt->work_write); } } else { schedule_work(& cxt->work_write); } return; } } static void mtdoops_console_write(struct console *co , char const *s , unsigned int count ) { struct mtdoops_context *cxt ; struct mtd_info *mtd ; u32 *stamp ; u32 *tmp ; size_t __len ; void *__ret ; { cxt = (struct mtdoops_context *)co->data; mtd = cxt->mtd; if (oops_in_progress == 0) { mtdoops_console_sync(); return; } else { } if (cxt->ready == 0 || (unsigned long )mtd == (unsigned long )((struct mtd_info *)0)) { return; } else { } ldv_spin_lock(); if (cxt->ready == 0) { return; } else { } if (cxt->writecount == 0) { stamp = (u32 *)cxt->oops_buf; tmp = stamp; stamp = stamp + 1; *tmp = (u32 )cxt->nextcount; *stamp = 1560304896U; cxt->writecount = 8; } else { } if ((unsigned int )cxt->writecount + count > 4096U) { count = (unsigned int )(4096 - cxt->writecount); } else { } __len = (size_t )count; __ret = memcpy(cxt->oops_buf + (unsigned long )cxt->writecount, (void const *)s, __len); cxt->writecount = (int )((unsigned int )cxt->writecount + count); ldv_spin_unlock(); if (cxt->writecount == 4096) { mtdoops_console_sync(); } else { } return; } } static int mtdoops_console_setup(struct console *co , char *options ) { struct mtdoops_context *cxt ; { cxt = (struct mtdoops_context *)co->data; if (cxt->mtd_index != -1) { return (-16); } else { } if ((int )co->index == -1) { return (-22); } else { } cxt->mtd_index = (int )co->index; return (0); } } static struct mtd_notifier mtdoops_notifier = {& mtdoops_notify_add, & mtdoops_notify_remove, {0, 0}}; static struct console mtdoops_console = {{'t', 't', 'y', 'M', 'T', 'D', '\000'}, & mtdoops_console_write, 0, 0, & mtdoops_console_sync, & mtdoops_console_setup, 0, (short)0, -1, 0, (void *)(& oops_cxt), 0}; static int mtdoops_console_init(void) { struct mtdoops_context *cxt ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_1 ; { cxt = & oops_cxt; cxt->mtd_index = -1; cxt->oops_buf = ldv_vmalloc_11(4096UL); if ((unsigned long )cxt->oops_buf == (unsigned long )((void *)0)) { printk("<3>Failed to allocate mtdoops buffer workspace\n"); return (-12); } else { } __constr_expr_0.counter = 0L; cxt->work_erase.data = __constr_expr_0; lockdep_init_map(& cxt->work_erase.lockdep_map, "&cxt->work_erase", & __key, 0); INIT_LIST_HEAD(& cxt->work_erase.entry); cxt->work_erase.func = & mtdoops_workfunc_erase; __constr_expr_1.counter = 0L; cxt->work_write.data = __constr_expr_1; lockdep_init_map(& cxt->work_write.lockdep_map, "&cxt->work_write", & __key___0, 0); INIT_LIST_HEAD(& cxt->work_write.entry); cxt->work_write.func = & mtdoops_workfunc_write; register_console(& mtdoops_console); register_mtd_user(& mtdoops_notifier); return (0); } } static void mtdoops_console_exit(void) { struct mtdoops_context *cxt ; { cxt = & oops_cxt; unregister_mtd_user(& mtdoops_notifier); unregister_console(& mtdoops_console); vfree((void const *)cxt->oops_buf); return; } } extern void ldv_check_final_state(void) ; extern void ldv_initialize(void) ; extern void ldv_handler_precall(void) ; extern int nondet_int(void) ; int LDV_IN_INTERRUPT ; int main(void) { struct mtd_info *var_group1 ; struct console *var_group2 ; char const *var_mtdoops_console_write_10_p1 ; unsigned int var_mtdoops_console_write_10_p2 ; char *var_mtdoops_console_setup_11_p1 ; int ldv_s_mtdoops_notifier_mtd_notifier ; int tmp ; int tmp___0 ; int tmp___1 ; { ldv_s_mtdoops_notifier_mtd_notifier = 0; LDV_IN_INTERRUPT = 1; ldv_initialize(); ldv_handler_precall(); tmp = mtdoops_console_init(); if (tmp != 0) { goto ldv_final; } else { } goto ldv_13898; ldv_13897: tmp___0 = nondet_int(); switch (tmp___0) { case 0: ; if (ldv_s_mtdoops_notifier_mtd_notifier == 0) { ldv_handler_precall(); mtdoops_notify_remove(var_group1); ldv_s_mtdoops_notifier_mtd_notifier = 0; } else { } goto ldv_13891; case 1: ldv_handler_precall(); mtdoops_notify_add(var_group1); goto ldv_13891; case 2: ldv_handler_precall(); mtdoops_console_write(var_group2, var_mtdoops_console_write_10_p1, var_mtdoops_console_write_10_p2); goto ldv_13891; case 3: ldv_handler_precall(); mtdoops_console_setup(var_group2, var_mtdoops_console_setup_11_p1); goto ldv_13891; case 4: ldv_handler_precall(); mtdoops_console_sync(); goto ldv_13891; default: ; goto ldv_13891; } ldv_13891: ; ldv_13898: tmp___1 = nondet_int(); if (tmp___1 != 0 || ldv_s_mtdoops_notifier_mtd_notifier != 0) { goto ldv_13897; } else { } ldv_handler_precall(); mtdoops_console_exit(); ldv_final: ldv_check_final_state(); return 0; } } unsigned long ldv___get_free_pages_2(gfp_t ldv_func_arg1 , unsigned int ldv_func_arg2 ) { unsigned long tmp ; { ldv_check_alloc_flags(ldv_func_arg1); tmp = __get_free_pages(ldv_func_arg1, ldv_func_arg2); return (tmp); } } void *ldv_kmem_cache_alloc_4(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) { { ldv_check_alloc_flags(ldv_func_arg2); kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2); return ((void *)0); } } void *ldv_kmem_cache_alloc_8(struct kmem_cache *ldv_func_arg1 , gfp_t ldv_func_arg2 ) { { ldv_check_alloc_flags(ldv_func_arg2); kmem_cache_alloc(ldv_func_arg1, ldv_func_arg2); return ((void *)0); } } void *ldv_vmalloc_11(unsigned long ldv_func_arg1 ) { { ldv_check_alloc_nonatomic(); vmalloc(ldv_func_arg1); return ((void *)0); } } long ldv__builtin_expect(long exp , long c ) ; __inline static void ldv_error(void) { { LDV_ERROR: {reach_error();abort();} } } extern int ldv_undef_int(void) ; long ldv__builtin_expect(long exp , long c ) { { return (exp); } } int ldv_spin = LDV_SPIN_UNLOCKED; void ldv_check_alloc_flags(gfp_t flags ) { { if (ldv_spin == LDV_SPIN_UNLOCKED || flags == 32U) { } else { ldv_error(); } return; } } extern struct page *ldv_some_page(void) ; struct page *ldv_check_alloc_flags_and_return_some_page(gfp_t flags ) { struct page *tmp ; { if (ldv_spin == LDV_SPIN_UNLOCKED || flags == 32U) { } else { ldv_error(); } tmp = ldv_some_page(); return (tmp); } } void ldv_check_alloc_nonatomic(void) { { if (ldv_spin == LDV_SPIN_UNLOCKED) { } else { ldv_error(); } return; } } void ldv_spin_lock(void) { { ldv_spin = LDV_SPIN_LOCKED; return; } } void ldv_spin_unlock(void) { { ldv_spin = LDV_SPIN_UNLOCKED; return; } } int ldv_spin_trylock(void) { int is_lock ; { is_lock = ldv_undef_int(); if (is_lock) { return (0); } else { ldv_spin = LDV_SPIN_LOCKED; return (1); } } }