extern void abort(void); extern void __assert_fail(const char *, const char *, unsigned int, const char *) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__)); void reach_error() { __assert_fail("0", "drivers--mtd--mtdoops.ko_010.7903cba.32_7a.cil_true-unreach-call.i", 3, "reach_error"); } /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef __builtin_va_list __gnuc_va_list[1U]; typedef __gnuc_va_list va_list[1U]; typedef unsigned int __kernel_mode_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; typedef unsigned long __kernel_size_t; typedef long __kernel_ssize_t; typedef long __kernel_time_t; typedef long __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef long long __kernel_loff_t; typedef __kernel_uid_t __kernel_uid32_t; typedef __kernel_gid_t __kernel_gid32_t; typedef unsigned char __u8; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef __kernel_mode_t mode_t; typedef __kernel_pid_t pid_t; typedef __kernel_timer_t timer_t; typedef __kernel_clockid_t clockid_t; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __kernel_clock_t clock_t; typedef unsigned char u_char; typedef unsigned int u_int; typedef unsigned long u_long; typedef __u32 u_int32_t; typedef __u8 uint8_t; typedef __u32 uint32_t; typedef unsigned int gfp_t; struct module; struct bug_entry { unsigned long bug_addr ; char const *file ; unsigned short line ; unsigned short flags ; }; struct completion; struct pt_regs; struct pid; struct task_struct; struct mm_struct; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct info { long ___orig_eip ; long ___ebx ; long ___ecx ; long ___edx ; long ___esi ; long ___edi ; long ___ebp ; long ___eax ; long ___ds ; long ___es ; long ___fs ; long ___orig_eax ; long ___eip ; long ___cs ; long ___eflags ; long ___esp ; long ___ss ; long ___vm86_es ; long ___vm86_ds ; long ___vm86_fs ; long ___vm86_gs ; }; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct page; struct __anonstruct_pgd_t_6 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_6 pgd_t; struct __anonstruct_pgprot_t_7 { pgprotval_t pgprot ; }; typedef struct __anonstruct_pgprot_t_7 pgprot_t; struct __anonstruct_ldv_1863_11 { unsigned int a ; unsigned int b ; }; struct __anonstruct_ldv_1878_12 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion_ldv_1879_10 { struct __anonstruct_ldv_1863_11 ldv_1863 ; struct __anonstruct_ldv_1878_12 ldv_1878 ; }; struct desc_struct { union __anonunion_ldv_1879_10 ldv_1879 ; }; struct __anonstruct_cpumask_t_13 { unsigned long bits[1U] ; }; typedef struct __anonstruct_cpumask_t_13 cpumask_t; struct thread_struct; struct x8664_pda { struct task_struct *pcurrent ; unsigned long data_offset ; unsigned long kernelstack ; unsigned long oldrsp ; int irqcount ; unsigned int cpunumber ; char *irqstackptr ; unsigned int __softirq_pending ; unsigned int __nmi_count ; short mmu_state ; short isidle ; struct mm_struct *active_mm ; unsigned int apic_timer_irqs ; unsigned int irq0_irqs ; unsigned int irq_resched_count ; unsigned int irq_call_count ; unsigned int irq_tlb_count ; unsigned int irq_thermal_count ; unsigned int irq_threshold_count ; unsigned int irq_spurious_count ; }; struct exec_domain; struct map_segment; struct exec_domain { char const *name ; void (*handler)(int , struct pt_regs * ) ; unsigned char pers_low ; unsigned char pers_high ; unsigned long *signal_map ; unsigned long *signal_invmap ; struct map_segment *err_map ; struct map_segment *socktype_map ; struct map_segment *sockopt_map ; struct map_segment *af_map ; struct module *module ; struct exec_domain *next ; }; struct i387_fsave_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct_ldv_3957_15 { u64 rip ; u64 rdp ; }; struct __anonstruct_ldv_3963_16 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion_ldv_3964_14 { struct __anonstruct_ldv_3957_15 ldv_3957 ; struct __anonstruct_ldv_3963_16 ldv_3963 ; }; struct i387_fxsave_struct { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion_ldv_3964_14 ldv_3964 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[24U] ; }; struct i387_soft_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct info *info ; u32 entry_eip ; }; union thread_xstate { struct i387_fsave_struct fsave ; struct i387_fxsave_struct fxsave ; struct i387_soft_struct soft ; }; struct kmem_cache; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned long usersp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long ip ; unsigned long fs ; unsigned long gs ; unsigned long debugreg0 ; unsigned long debugreg1 ; unsigned long debugreg2 ; unsigned long debugreg3 ; unsigned long debugreg6 ; unsigned long debugreg7 ; unsigned long cr2 ; unsigned long trap_no ; unsigned long error_code ; union thread_xstate *xstate ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; unsigned long debugctlmsr ; unsigned long ds_area_msr ; }; struct __anonstruct_mm_segment_t_17 { unsigned long seg ; }; typedef struct __anonstruct_mm_segment_t_17 mm_segment_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct __anonstruct_raw_spinlock_t_18 { unsigned int slock ; }; typedef struct __anonstruct_raw_spinlock_t_18 raw_spinlock_t; struct __anonstruct_raw_rwlock_t_19 { unsigned int lock ; }; typedef struct __anonstruct_raw_rwlock_t_19 raw_rwlock_t; struct lockdep_map; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; }; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned long usage_mask ; struct stack_trace usage_traces[9U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache ; char const *name ; int cpu ; }; struct held_lock { u64 prev_chain_key ; struct lock_class *class ; unsigned long acquire_ip ; struct lockdep_map *instance ; u64 waittime_stamp ; u64 holdtime_stamp ; int irq_context ; int trylock ; int read ; int check ; int hardirqs_off ; }; struct __anonstruct_spinlock_t_20 { raw_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_spinlock_t_20 spinlock_t; struct __anonstruct_rwlock_t_21 { raw_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_21 rwlock_t; struct __anonstruct_atomic_t_22 { int counter ; }; typedef struct __anonstruct_atomic_t_22 atomic_t; struct __anonstruct_atomic64_t_23 { long counter ; }; typedef struct __anonstruct_atomic64_t_23 atomic64_t; typedef atomic64_t atomic_long_t; struct thread_info; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct thread_info *owner ; char const *name ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; struct mutex *lock ; void *magic ; }; struct timespec; struct compat_timespec; struct __anonstruct_ldv_4819_25 { unsigned long arg0 ; unsigned long arg1 ; unsigned long arg2 ; unsigned long arg3 ; }; struct __anonstruct_futex_26 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; }; struct __anonstruct_nanosleep_27 { clockid_t index ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; union __anonunion_ldv_4833_24 { struct __anonstruct_ldv_4819_25 ldv_4819 ; struct __anonstruct_futex_26 futex ; struct __anonstruct_nanosleep_27 nanosleep ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion_ldv_4833_24 ldv_4833 ; }; struct thread_info { struct task_struct *task ; struct exec_domain *exec_domain ; __u32 flags ; __u32 status ; __u32 cpu ; int preempt_count ; mm_segment_t addr_limit ; struct restart_block restart_block ; void *sysenter_return ; }; struct timespec { time_t tv_sec ; long tv_nsec ; }; struct key; struct file; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; struct kobject; struct attribute { char const *name ; struct module *owner ; mode_t mode ; }; struct attribute_group { char const *name ; int (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; }; struct vm_area_struct; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; }; struct kref { atomic_t refcount ; }; struct __wait_queue; typedef struct __wait_queue wait_queue_t; struct __wait_queue { unsigned int flags ; void *private ; int (*func)(wait_queue_t * , unsigned int , int , void * ) ; struct list_head task_list ; }; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct kset; struct kobj_type; struct sysfs_dirent; struct kobject { char const *name ; struct kref kref ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct sysfs_dirent *sd ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops *sysfs_ops ; struct attribute **default_attrs ; }; struct kobj_uevent_env { char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (*filter)(struct kset * , struct kobject * ) ; char const *(*name)(struct kset * , struct kobject * ) ; int (*uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops *uevent_ops ; }; struct marker; typedef void marker_probe_func(void * , void * , char const * , va_list * ); struct marker_probe_closure { marker_probe_func *func ; void *probe_private ; }; struct marker { char const *name ; char const *format ; char state ; char ptype ; void (*call)(struct marker const * , void * , char const * , ...) ; struct marker_probe_closure single ; struct marker_probe_closure *multi ; }; struct __anonstruct_nodemask_t_34 { unsigned long bits[1U] ; }; typedef struct __anonstruct_nodemask_t_34 nodemask_t; struct rw_semaphore; struct rw_semaphore { __s32 activity ; spinlock_t wait_lock ; struct list_head wait_list ; struct lockdep_map dep_map ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct tvec_base; struct timer_list { struct list_head entry ; unsigned long expires ; void (*function)(unsigned long ) ; unsigned long data ; struct tvec_base *base ; void *start_site ; char start_comm[16U] ; int start_pid ; }; struct hrtimer; enum hrtimer_restart; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; }; struct kmem_cache_cpu { void **freelist ; struct page *page ; int node ; unsigned int offset ; unsigned int objsize ; unsigned int stat[17U] ; }; struct kmem_cache_node { spinlock_t list_lock ; unsigned long nr_partial ; struct list_head partial ; atomic_long_t nr_slabs ; struct list_head full ; }; struct kmem_cache { unsigned long flags ; int size ; int objsize ; int offset ; int order ; struct kmem_cache_node local_node ; int objects ; gfp_t allocflags ; int refcount ; void (*ctor)(struct kmem_cache * , void * ) ; int inuse ; int align ; char const *name ; struct list_head list ; struct kobject kobj ; int remote_node_defrag_ratio ; struct kmem_cache_node *node[64U] ; struct kmem_cache_cpu *cpu_slab[8U] ; }; struct __anonstruct_local_t_86 { atomic_long_t a ; }; typedef struct __anonstruct_local_t_86 local_t; struct mod_arch_specific { }; struct kernel_symbol { unsigned long value ; char const *name ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; }; struct exception_table_entry; struct module_ref { local_t count ; }; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2 } ; struct module_sect_attr { struct module_attribute mattr ; char *name ; unsigned long address ; }; struct module_sect_attrs { struct attribute_group grp ; int nsections ; struct module_sect_attr attrs[0U] ; }; struct module_param_attrs; struct module_notes_attrs; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_param_attrs *param_attrs ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned int num_syms ; unsigned long const *crcs ; struct kernel_symbol const *gpl_syms ; unsigned int num_gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned int num_unused_syms ; unsigned long const *unused_crcs ; struct kernel_symbol const *unused_gpl_syms ; unsigned int num_unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; struct kernel_symbol const *gpl_future_syms ; unsigned int num_gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_exentries ; struct exception_table_entry const *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned long init_size ; unsigned long core_size ; unsigned long init_text_size ; unsigned long core_text_size ; void *unwind_info ; struct mod_arch_specific arch ; unsigned int taints ; struct list_head bug_list ; struct bug_entry *bug_table ; unsigned int num_bugs ; struct module_ref ref[8U] ; struct list_head modules_which_use_me ; struct task_struct *waiter ; void (*exit)(void) ; Elf64_Sym *symtab ; unsigned long num_symtab ; char *strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; void *percpu ; char *args ; struct marker *markers ; unsigned int num_markers ; }; struct tty_struct; struct tty_driver; struct console { char name[16U] ; void (*write)(struct console * , char const * , unsigned int ) ; int (*read)(struct console * , char * , unsigned int ) ; struct tty_driver *(*device)(struct console * , int * ) ; void (*unblank)(void) ; int (*setup)(struct console * , char * ) ; int (*early_setup)(void) ; short flags ; short index ; int cflag ; void *data ; struct console *next ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct rb_node { unsigned long rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct prio_tree_node; struct raw_prio_tree_node { struct prio_tree_node *left ; struct prio_tree_node *right ; struct prio_tree_node *parent ; }; struct prio_tree_node { struct prio_tree_node *left ; struct prio_tree_node *right ; struct prio_tree_node *parent ; unsigned long start ; unsigned long last ; }; struct completion { unsigned int done ; wait_queue_head_t wait ; }; struct __anonstruct_mm_context_t_88 { void *ldt ; rwlock_t ldtlock ; int size ; struct mutex lock ; void *vdso ; }; typedef struct __anonstruct_mm_context_t_88 mm_context_t; struct address_space; typedef atomic_long_t mm_counter_t; union __anonunion_ldv_8981_89 { atomic_t _mapcount ; unsigned int inuse ; }; struct __anonstruct_ldv_8986_91 { unsigned long private ; struct address_space *mapping ; }; union __anonunion_ldv_8990_90 { struct __anonstruct_ldv_8986_91 ldv_8986 ; spinlock_t ptl ; struct kmem_cache *slab ; struct page *first_page ; }; union __anonunion_ldv_8994_92 { unsigned long index ; void *freelist ; }; struct page { unsigned long flags ; atomic_t _count ; union __anonunion_ldv_8981_89 ldv_8981 ; union __anonunion_ldv_8990_90 ldv_8990 ; union __anonunion_ldv_8994_92 ldv_8994 ; struct list_head lru ; unsigned long page_cgroup ; }; struct __anonstruct_vm_set_94 { struct list_head list ; void *parent ; struct vm_area_struct *head ; }; union __anonunion_shared_93 { struct __anonstruct_vm_set_94 vm_set ; struct raw_prio_tree_node prio_tree_node ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { struct mm_struct *vm_mm ; unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct rb_node vm_rb ; union __anonunion_shared_93 shared ; struct list_head anon_vma_node ; struct anon_vma *anon_vma ; struct vm_operations_struct *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; unsigned long vm_truncate_count ; struct mempolicy *vm_policy ; }; struct kioctx; struct mem_cgroup; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; struct vm_area_struct *mmap_cache ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; void (*unmap_area)(struct mm_struct * , unsigned long ) ; unsigned long mmap_base ; unsigned long task_size ; unsigned long cached_hole_size ; unsigned long free_area_cache ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; int map_count ; struct rw_semaphore mmap_sem ; spinlock_t page_table_lock ; struct list_head mmlist ; mm_counter_t _file_rss ; mm_counter_t _anon_rss ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long reserved_vm ; unsigned long def_flags ; unsigned long nr_ptes ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[38U] ; cpumask_t cpu_vm_mask ; mm_context_t context ; unsigned int faultstamp ; unsigned int token_priority ; unsigned int last_interval ; unsigned long flags ; int core_waiters ; struct completion *core_startup_done ; struct completion core_done ; rwlock_t ioctx_list_lock ; struct kioctx *ioctx_list ; struct mem_cgroup *mem_cgroup ; }; typedef unsigned long cputime_t; struct sem_undo; struct sem_undo { struct sem_undo *proc_next ; struct sem_undo *id_next ; int semid ; short *semadj ; }; struct sem_undo_list { atomic_t refcnt ; spinlock_t lock ; struct sem_undo *proc_list ; }; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct siginfo; struct __anonstruct_sigset_t_95 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_95 sigset_t; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_97 { pid_t _pid ; uid_t _uid ; }; struct __anonstruct__timer_98 { timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_99 { pid_t _pid ; uid_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_100 { pid_t _pid ; uid_t _uid ; int _status ; clock_t _utime ; clock_t _stime ; }; struct __anonstruct__sigfault_101 { void *_addr ; }; struct __anonstruct__sigpoll_102 { long _band ; int _fd ; }; union __anonunion__sifields_96 { int _pad[28U] ; struct __anonstruct__kill_97 _kill ; struct __anonstruct__timer_98 _timer ; struct __anonstruct__rt_99 _rt ; struct __anonstruct__sigchld_100 _sigchld ; struct __anonstruct__sigfault_101 _sigfault ; struct __anonstruct__sigpoll_102 _sigpoll ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_96 _sifields ; }; typedef struct siginfo siginfo_t; struct user_struct; struct sigpending { struct list_head list ; sigset_t signal ; }; struct dentry; struct vfsmount; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct fs_struct { atomic_t count ; rwlock_t lock ; int umask ; struct path root ; struct path pwd ; struct path altroot ; }; struct rcu_head { struct rcu_head *next ; void (*func)(struct rcu_head * ) ; }; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; struct hlist_head tasks[3U] ; struct rcu_head rcu ; int level ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct prop_local_single { unsigned long events ; int shift ; unsigned long period ; spinlock_t lock ; }; struct __anonstruct_seccomp_t_105 { int mode ; }; typedef struct __anonstruct_seccomp_t_105 seccomp_t; struct plist_head { struct list_head prio_list ; struct list_head node_list ; spinlock_t *lock ; }; struct rt_mutex_waiter; struct rlimit { unsigned long rlim_cur ; unsigned long rlim_max ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; enum hrtimer_cb_mode { HRTIMER_CB_SOFTIRQ = 0, HRTIMER_CB_IRQSAFE = 1, HRTIMER_CB_IRQSAFE_NO_RESTART = 2, HRTIMER_CB_IRQSAFE_NO_SOFTIRQ = 3 } ; struct hrtimer { struct rb_node node ; ktime_t expires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; enum hrtimer_cb_mode cb_mode ; struct list_head cb_entry ; void *start_site ; char start_comm[16U] ; int start_pid ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; clockid_t index ; struct rb_root active ; struct rb_node *first ; ktime_t resolution ; ktime_t (*get_time)(void) ; ktime_t (*get_softirq_time)(void) ; ktime_t softirq_time ; ktime_t offset ; int (*reprogram)(struct hrtimer * , struct hrtimer_clock_base * , ktime_t ) ; }; struct hrtimer_cpu_base { spinlock_t lock ; struct hrtimer_clock_base clock_base[2U] ; struct list_head cb_pending ; ktime_t expires_next ; int hres_active ; unsigned long nr_events ; }; struct task_io_accounting { u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct futex_pi_state; struct robust_list_head; struct bio; struct cfs_rq; struct task_group; struct nsproxy; struct kvec { void *iov_base ; size_t iov_len ; }; struct aio_ring_info { unsigned long mmap_base ; unsigned long mmap_size ; struct page **ring_pages ; spinlock_t ring_lock ; long nr_pages ; unsigned int nr ; unsigned int tail ; struct page *internal_pages[8U] ; }; struct kioctx { atomic_t users ; int dead ; struct mm_struct *mm ; unsigned long user_id ; struct kioctx *next ; wait_queue_head_t wait ; spinlock_t ctx_lock ; int reqs_active ; struct list_head active_reqs ; struct list_head run_list ; unsigned int max_reqs ; struct aio_ring_info ring_info ; struct delayed_work wq ; }; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; union __anonunion_ldv_10350_107 { pid_t pgrp ; pid_t __pgrp ; }; union __anonunion_ldv_10355_108 { pid_t session ; pid_t __session ; }; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t count ; atomic_t live ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; struct task_struct *group_exit_task ; int notify_count ; int group_stop_count ; unsigned int flags ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; cputime_t it_prof_expires ; cputime_t it_virt_expires ; cputime_t it_prof_incr ; cputime_t it_virt_incr ; union __anonunion_ldv_10350_107 ldv_10350 ; struct pid *tty_old_pgrp ; union __anonunion_ldv_10355_108 ldv_10355 ; int leader ; struct tty_struct *tty ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct list_head cpu_timers[3U] ; struct key *session_keyring ; struct key *process_keyring ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; struct tty_audit_buf *tty_audit_buf ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t files ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; uid_t uid ; struct task_group *tg ; struct kobject kobj ; struct work_struct work ; }; struct backing_dev_info; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long cpu_time ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; unsigned int bkl_count ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; struct timespec blkio_start ; struct timespec blkio_end ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; }; enum cpu_idle_type { CPU_IDLE = 0, CPU_NOT_IDLE = 1, CPU_NEWLY_IDLE = 2, CPU_MAX_IDLE_TYPES = 3 } ; struct sched_group { struct sched_group *next ; cpumask_t cpumask ; unsigned int __cpu_power ; u32 reciprocal_cpu_power ; }; enum sched_domain_level { SD_LV_NONE = 0, SD_LV_SIBLING = 1, SD_LV_MC = 2, SD_LV_CPU = 3, SD_LV_NODE = 4, SD_LV_ALLNODES = 5, SD_LV_MAX = 6 } ; struct sched_domain { struct sched_domain *parent ; struct sched_domain *child ; struct sched_group *groups ; cpumask_t span ; int first_cpu ; unsigned long min_interval ; unsigned long max_interval ; unsigned int busy_factor ; unsigned int imbalance_pct ; unsigned int cache_nice_tries ; unsigned int busy_idx ; unsigned int idle_idx ; unsigned int newidle_idx ; unsigned int wake_idx ; unsigned int forkexec_idx ; int flags ; enum sched_domain_level level ; unsigned long last_balance ; unsigned int balance_interval ; unsigned int nr_balance_failed ; unsigned int lb_count[3U] ; unsigned int lb_failed[3U] ; unsigned int lb_balanced[3U] ; unsigned int lb_imbalance[3U] ; unsigned int lb_gained[3U] ; unsigned int lb_hot_gained[3U] ; unsigned int lb_nobusyg[3U] ; unsigned int lb_nobusyq[3U] ; unsigned int alb_count ; unsigned int alb_failed ; unsigned int alb_pushed ; unsigned int sbe_count ; unsigned int sbe_balanced ; unsigned int sbe_pushed ; unsigned int sbf_count ; unsigned int sbf_balanced ; unsigned int sbf_pushed ; unsigned int ttwu_wake_remote ; unsigned int ttwu_move_affine ; unsigned int ttwu_move_balance ; }; struct io_context; struct group_info { int ngroups ; atomic_t usage ; gid_t small_block[32U] ; int nblocks ; gid_t *blocks[0U] ; }; struct audit_context; struct pipe_inode_info; struct rq; struct sched_class { struct sched_class const *next ; void (*enqueue_task)(struct rq * , struct task_struct * , int ) ; void (*dequeue_task)(struct rq * , struct task_struct * , int ) ; void (*yield_task)(struct rq * ) ; int (*select_task_rq)(struct task_struct * , int ) ; void (*check_preempt_curr)(struct rq * , struct task_struct * ) ; struct task_struct *(*pick_next_task)(struct rq * ) ; void (*put_prev_task)(struct rq * , struct task_struct * ) ; unsigned long (*load_balance)(struct rq * , int , struct rq * , unsigned long , struct sched_domain * , enum cpu_idle_type , int * , int * ) ; int (*move_one_task)(struct rq * , int , struct rq * , struct sched_domain * , enum cpu_idle_type ) ; void (*pre_schedule)(struct rq * , struct task_struct * ) ; void (*post_schedule)(struct rq * ) ; void (*task_wake_up)(struct rq * , struct task_struct * ) ; void (*set_curr_task)(struct rq * ) ; void (*task_tick)(struct rq * , struct task_struct * , int ) ; void (*task_new)(struct rq * , struct task_struct * ) ; void (*set_cpus_allowed)(struct task_struct * , cpumask_t const * ) ; void (*join_domain)(struct rq * ) ; void (*leave_domain)(struct rq * ) ; void (*switched_from)(struct rq * , struct task_struct * , int ) ; void (*switched_to)(struct rq * , struct task_struct * , int ) ; void (*prio_changed)(struct rq * , struct task_struct * , int , int ) ; void (*moved_group)(struct task_struct * ) ; }; struct load_weight { unsigned long weight ; unsigned long inv_weight ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 last_wakeup ; u64 avg_overlap ; u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_forced2_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned int time_slice ; unsigned long timeout ; int nr_cpus_allowed ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct linux_binfmt; struct files_struct; struct css_set; struct compat_robust_list_head; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; int lock_depth ; int prio ; int static_prio ; int normal_prio ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct hlist_head preempt_notifiers ; unsigned char fpu_counter ; s8 oomkilladj ; unsigned int policy ; cpumask_t cpus_allowed ; struct sched_info sched_info ; struct list_head tasks ; struct list_head ptrace_children ; struct list_head ptrace_list ; struct mm_struct *mm ; struct mm_struct *active_mm ; struct linux_binfmt *binfmt ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned int personality ; unsigned char did_exec : 1 ; pid_t pid ; pid_t tgid ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct pid_link pids[3U] ; struct list_head thread_group ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; unsigned int rt_priority ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; cputime_t prev_utime ; cputime_t prev_stime ; unsigned long nvcsw ; unsigned long nivcsw ; struct timespec start_time ; struct timespec real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; cputime_t it_prof_expires ; cputime_t it_virt_expires ; unsigned long long it_sched_expires ; struct list_head cpu_timers[3U] ; uid_t uid ; uid_t euid ; uid_t suid ; uid_t fsuid ; gid_t gid ; gid_t egid ; gid_t sgid ; gid_t fsgid ; struct group_info *group_info ; kernel_cap_t cap_effective ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_bset ; unsigned char keep_capabilities : 1 ; struct user_struct *user ; struct key *request_key_auth ; struct key *thread_keyring ; unsigned char jit_keyring ; char comm[16U] ; int link_count ; int total_link_count ; struct sysv_sem sysvsem ; unsigned long last_switch_timestamp ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; void *security ; struct audit_context *audit_context ; uid_t loginuid ; unsigned int sessionid ; seccomp_t seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; spinlock_t pi_lock ; struct plist_head pi_waiters ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; int hardirqs_enabled ; unsigned long hardirq_enable_ip ; unsigned int hardirq_enable_event ; unsigned long hardirq_disable_ip ; unsigned int hardirq_disable_event ; int softirqs_enabled ; unsigned long softirq_disable_ip ; unsigned int softirq_disable_event ; unsigned long softirq_enable_ip ; unsigned int softirq_enable_event ; int hardirq_context ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; struct held_lock held_locks[48U] ; unsigned int lockdep_recursion ; void *journal_info ; struct bio *bio_list ; struct bio **bio_tail ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_stimexpd ; struct mempolicy *mempolicy ; short il_next ; nodemask_t mems_allowed ; int cpuset_mems_generation ; int cpuset_mem_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; atomic_t fs_excl ; struct rcu_head rcu ; struct pipe_inode_info *splice_pipe ; struct task_delay_info *delays ; int make_it_fail ; struct prop_local_single dirties ; int latency_record_count ; struct latency_record latency_record[32U] ; }; struct otp_info { uint32_t start ; uint32_t length ; uint32_t locked ; }; struct nand_oobfree { uint32_t offset ; uint32_t length ; }; struct nand_ecclayout { uint32_t eccbytes ; uint32_t eccpos[64U] ; uint32_t oobavail ; struct nand_oobfree oobfree[8U] ; }; struct mtd_ecc_stats { uint32_t corrected ; uint32_t failed ; uint32_t badblocks ; uint32_t bbtblocks ; }; struct mtd_info; struct erase_info { struct mtd_info *mtd ; u_int32_t addr ; u_int32_t len ; u_int32_t fail_addr ; u_long time ; u_long retries ; u_int dev ; u_int cell ; void (*callback)(struct erase_info * ) ; u_long priv ; u_char state ; struct erase_info *next ; }; struct mtd_erase_region_info { u_int32_t offset ; u_int32_t erasesize ; u_int32_t numblocks ; unsigned long *lockmap ; }; enum ldv_10882 { MTD_OOB_PLACE = 0, MTD_OOB_AUTO = 1, MTD_OOB_RAW = 2 } ; typedef enum ldv_10882 mtd_oob_mode_t; struct mtd_oob_ops { mtd_oob_mode_t mode ; size_t len ; size_t retlen ; size_t ooblen ; size_t oobretlen ; uint32_t ooboffs ; uint8_t *datbuf ; uint8_t *oobbuf ; }; struct mtd_info { u_char type ; u_int32_t flags ; u_int32_t size ; u_int32_t erasesize ; u_int32_t writesize ; u_int32_t oobsize ; u_int32_t oobavail ; char *name ; int index ; struct nand_ecclayout *ecclayout ; int numeraseregions ; struct mtd_erase_region_info *eraseregions ; int (*erase)(struct mtd_info * , struct erase_info * ) ; int (*point)(struct mtd_info * , loff_t , size_t , size_t * , u_char ** ) ; void (*unpoint)(struct mtd_info * , u_char * , loff_t , size_t ) ; int (*read)(struct mtd_info * , loff_t , size_t , size_t * , u_char * ) ; int (*write)(struct mtd_info * , loff_t , size_t , size_t * , u_char const * ) ; int (*panic_write)(struct mtd_info * , loff_t , size_t , size_t * , u_char const * ) ; int (*read_oob)(struct mtd_info * , loff_t , struct mtd_oob_ops * ) ; int (*write_oob)(struct mtd_info * , loff_t , struct mtd_oob_ops * ) ; int (*get_fact_prot_info)(struct mtd_info * , struct otp_info * , size_t ) ; int (*read_fact_prot_reg)(struct mtd_info * , loff_t , size_t , size_t * , u_char * ) ; int (*get_user_prot_info)(struct mtd_info * , struct otp_info * , size_t ) ; int (*read_user_prot_reg)(struct mtd_info * , loff_t , size_t , size_t * , u_char * ) ; int (*write_user_prot_reg)(struct mtd_info * , loff_t , size_t , size_t * , u_char * ) ; int (*lock_user_prot_reg)(struct mtd_info * , loff_t , size_t ) ; int (*writev)(struct mtd_info * , struct kvec const * , unsigned long , loff_t , size_t * ) ; void (*sync)(struct mtd_info * ) ; int (*lock)(struct mtd_info * , loff_t , size_t ) ; int (*unlock)(struct mtd_info * , loff_t , size_t ) ; int (*suspend)(struct mtd_info * ) ; void (*resume)(struct mtd_info * ) ; int (*block_isbad)(struct mtd_info * , loff_t ) ; int (*block_markbad)(struct mtd_info * , loff_t ) ; struct notifier_block reboot_notifier ; struct mtd_ecc_stats ecc_stats ; int subpage_sft ; void *priv ; struct module *owner ; int usecount ; int (*get_device)(struct mtd_info * ) ; void (*put_device)(struct mtd_info * ) ; }; struct mtd_notifier { void (*add)(struct mtd_info * ) ; void (*remove)(struct mtd_info * ) ; struct list_head list ; }; struct mtdoops_context { int mtd_index ; struct work_struct work_erase ; struct work_struct work_write ; struct mtd_info *mtd ; int oops_pages ; int nextpage ; int nextcount ; void *oops_buf ; spinlock_t writecount_lock ; int ready ; int writecount ; }; void *memcpy(void * , void const * , unsigned long ) ; extern int printk(char const * , ...) ; extern int oops_in_progress ; extern void *memset(void * , int , size_t ) ; extern void __bad_pda_field(void) ; extern struct x8664_pda _proxy_pda ; __inline static struct task_struct *get_current(void) { struct task_struct *t ; struct task_struct *ret__ ; { switch (8UL) { case 2UL: __asm__ ("movw %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent)); goto ldv_3579; case 4UL: __asm__ ("movl %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent)); goto ldv_3579; case 8UL: __asm__ ("movq %%gs:%c1,%0": "=r" (ret__): "i" (0UL), "m" (_proxy_pda.pcurrent)); goto ldv_3579; default: __bad_pda_field(); } ldv_3579: t = ret__; return (t); } } __inline static unsigned long __xchg(unsigned long x , void volatile *ptr , int size ) { { switch (size) { case 1: __asm__ volatile ("xchgb %b0,%1": "=q" (x): "m" (*((long volatile *)ptr)), "0" (x): "memory"); goto ldv_3594; case 2: __asm__ volatile ("xchgw %w0,%1": "=r" (x): "m" (*((long volatile *)ptr)), "0" (x): "memory"); goto ldv_3594; case 4: __asm__ volatile ("xchgl %k0,%1": "=r" (x): "m" (*((long volatile *)ptr)), "0" (x): "memory"); goto ldv_3594; case 8: __asm__ volatile ("xchgq %0,%1": "=r" (x): "m" (*((long volatile *)ptr)), "0" (x): "memory"); goto ldv_3594; } ldv_3594: ; return (x); } } __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; __inline static struct thread_info *current_thread_info(void) { struct thread_info *ti ; unsigned long ret__ ; { switch (8UL) { case 2UL: __asm__ ("movw %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack)); goto ldv_4851; case 4UL: __asm__ ("movl %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack)); goto ldv_4851; case 8UL: __asm__ ("movq %%gs:%c1,%0": "=r" (ret__): "i" (16UL), "m" (_proxy_pda.kernelstack)); goto ldv_4851; default: __bad_pda_field(); } ldv_4851: ti = (struct thread_info *)(ret__ - 8152UL); return (ti); } } extern unsigned long _spin_lock_irqsave(spinlock_t * ) ; extern void _spin_unlock_irqrestore(spinlock_t * , unsigned long ) ; extern int default_wake_function(wait_queue_t * , unsigned int , int , void * ) ; extern void init_waitqueue_head(wait_queue_head_t * ) ; extern void add_wait_queue(wait_queue_head_t * , wait_queue_t * ) ; extern void remove_wait_queue(wait_queue_head_t * , wait_queue_t * ) ; extern void __wake_up(wait_queue_head_t * , unsigned int , int , void * ) ; extern void flush_scheduled_work(void) ; extern int schedule_work(struct work_struct * ) ; extern void register_console(struct console * ) ; extern int unregister_console(struct console * ) ; extern void *vmalloc(unsigned long ) ; extern void vfree(void const * ) ; extern void schedule(void) ; extern void register_mtd_user(struct mtd_notifier * ) ; extern int unregister_mtd_user(struct mtd_notifier * ) ; static struct mtdoops_context oops_cxt ; static void mtdoops_erase_callback(struct erase_info *done ) { wait_queue_head_t *wait_q ; { wait_q = (wait_queue_head_t *)done->priv; __wake_up(wait_q, 3U, 1, 0); return; } } static int mtdoops_erase_block(struct mtd_info *mtd , int offset ) { struct erase_info erase ; wait_queue_t wait ; struct task_struct *tmp ; wait_queue_head_t wait_q ; int ret ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; { tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & default_wake_function; wait.task_list.next = 0; wait.task_list.prev = 0; init_waitqueue_head(& wait_q); erase.mtd = mtd; erase.callback = & mtdoops_erase_callback; erase.addr = (u_int32_t )offset; erase.len = mtd->erasesize; erase.priv = (unsigned long )(& wait_q); tmp___0 = get_current(); __xchg(1UL, (void volatile *)(& tmp___0->state), 8); add_wait_queue(& wait_q, & wait); ret = (*(mtd->erase))(mtd, & erase); if (ret != 0) { tmp___1 = get_current(); __xchg(0UL, (void volatile *)(& tmp___1->state), 8); remove_wait_queue(& wait_q, & wait); printk("<4>mtdoops: erase of region [0x%x, 0x%x] on \"%s\" failed\n", erase.addr, erase.len, mtd->name); return (ret); } else { } schedule(); remove_wait_queue(& wait_q, & wait); return (0); } } static void mtdoops_inc_counter(struct mtdoops_context *cxt ) { struct mtd_info *mtd ; size_t retlen ; u32 count ; int ret ; { mtd = cxt->mtd; cxt->nextpage = cxt->nextpage + 1; if (cxt->nextpage > cxt->oops_pages) { cxt->nextpage = 0; } else { } cxt->nextcount = cxt->nextcount + 1; if (cxt->nextcount == -1) { cxt->nextcount = 0; } else { } ret = (*(mtd->read))(mtd, (loff_t )(cxt->nextpage * 4096), 4UL, & retlen, (u_char *)(& count)); if (retlen != 4UL || (ret < 0 && ret != -117)) { printk("<3>mtdoops: Read failure at %d (%td of 4 read), err %d.\n", cxt->nextpage * 4096, retlen, ret); schedule_work(& cxt->work_erase); return; } else { } if (count != 4294967295U) { schedule_work(& cxt->work_erase); return; } else { } printk("<7>mtdoops: Ready %d, %d (no erase)\n", cxt->nextpage, cxt->nextcount); cxt->ready = 1; return; } } static void mtdoops_workfunc_erase(struct work_struct *work ) { struct mtdoops_context *cxt ; struct work_struct const *__mptr ; struct mtd_info *mtd ; int i ; int j ; int ret ; int mod ; { __mptr = (struct work_struct const *)work; cxt = (struct mtdoops_context *)__mptr + 0xfffffffffffffff8UL; mtd = cxt->mtd; i = 0; if ((unsigned long )mtd == (unsigned long )((struct mtd_info *)0)) { return; } else { } mod = (int )((u_int32_t )(cxt->nextpage * 4096) % mtd->erasesize); if (mod != 0) { cxt->nextpage = (int )((u_int32_t )cxt->nextpage + (mtd->erasesize - (u_int32_t )mod) / 4096U); if (cxt->nextpage > cxt->oops_pages) { cxt->nextpage = 0; } else { } } else { } goto ldv_12174; ldv_12173: ret = (*(mtd->block_isbad))(mtd, (loff_t )(cxt->nextpage * 4096)); if (ret == 0) { goto ldv_12171; } else { } if (ret < 0) { printk("<3>mtdoops: block_isbad failed, aborting.\n"); return; } else { } badblock: printk("<4>mtdoops: Bad block at %08x\n", cxt->nextpage * 4096); i = i + 1; cxt->nextpage = (int )((u_int32_t )cxt->nextpage + mtd->erasesize / 4096U); if (cxt->nextpage > cxt->oops_pages) { cxt->nextpage = 0; } else { } if ((u_int32_t )i == (u_int32_t )cxt->oops_pages / (mtd->erasesize / 4096U)) { printk("<3>mtdoops: All blocks bad!\n"); return; } else { } ldv_12174: ; if ((unsigned long )mtd->block_isbad != (unsigned long )((int (*)(struct mtd_info * , loff_t ))0)) { goto ldv_12173; } else { } ldv_12171: j = 0; ret = -1; goto ldv_12176; ldv_12175: ret = mtdoops_erase_block(mtd, cxt->nextpage * 4096); j = j + 1; ldv_12176: ; if (j <= 2 && ret < 0) { goto ldv_12175; } else { } if (ret >= 0) { printk("<7>mtdoops: Ready %d, %d \n", cxt->nextpage, cxt->nextcount); cxt->ready = 1; return; } else { } if ((unsigned long )mtd->block_markbad != (unsigned long )((int (*)(struct mtd_info * , loff_t ))0) && ret == -5) { ret = (*(mtd->block_markbad))(mtd, (loff_t )(cxt->nextpage * 4096)); if (ret < 0) { printk("<3>mtdoops: block_markbad failed, aborting.\n"); return; } else { } } else { } goto badblock; } } static void mtdoops_write(struct mtdoops_context *cxt , int panic___0 ) { struct mtd_info *mtd ; size_t retlen ; int ret ; { mtd = cxt->mtd; if (cxt->writecount <= 4095) { memset(cxt->oops_buf + (unsigned long )cxt->writecount, 255, (size_t )(4096 - cxt->writecount)); } else { } if (panic___0 != 0) { ret = (*(mtd->panic_write))(mtd, (loff_t )(cxt->nextpage * 4096), 4096UL, & retlen, (u_char const *)cxt->oops_buf); } else { ret = (*(mtd->write))(mtd, (loff_t )(cxt->nextpage * 4096), 4096UL, & retlen, (u_char const *)cxt->oops_buf); } cxt->writecount = 0; if (retlen != 4096UL || ret < 0) { printk("<3>mtdoops: Write failure at %d (%td of %d written), err %d.\n", cxt->nextpage * 4096, retlen, 4096, ret); } else { } mtdoops_inc_counter(cxt); return; } } static void mtdoops_workfunc_write(struct work_struct *work ) { struct mtdoops_context *cxt ; struct work_struct const *__mptr ; { __mptr = (struct work_struct const *)work; cxt = (struct mtdoops_context *)__mptr + 0xffffffffffffffb8UL; mtdoops_write(cxt, 0); return; } } static void find_next_position(struct mtdoops_context *cxt ) { struct mtd_info *mtd ; int ret ; int page ; int maxpos ; u32 count ; u32 maxcount ; size_t retlen ; { mtd = cxt->mtd; maxpos = 0; maxcount = 4294967295U; page = 0; goto ldv_12203; ldv_12202: ret = (*(mtd->read))(mtd, (loff_t )(page * 4096), 4UL, & retlen, (u_char *)(& count)); if (retlen != 4UL || (ret < 0 && ret != -117)) { printk("<3>mtdoops: Read failure at %d (%td of 4 read), err %d.\n", page * 4096, retlen, ret); goto ldv_12201; } else { } if (count == 4294967295U) { goto ldv_12201; } else { } if (maxcount == 4294967295U) { maxcount = count; maxpos = page; } else if (count <= 1073741823U && maxcount > 3221225472U) { maxcount = count; maxpos = page; } else if (count > maxcount && count <= 3221225471U) { maxcount = count; maxpos = page; } else if ((count > maxcount && count > 3221225472U) && maxcount > 2147483648U) { maxcount = count; maxpos = page; } else { } ldv_12201: page = page + 1; ldv_12203: ; if (cxt->oops_pages > page) { goto ldv_12202; } else { } if (maxcount == 4294967295U) { cxt->nextpage = 0; cxt->nextcount = 1; cxt->ready = 1; printk("<7>mtdoops: Ready %d, %d (first init)\n", cxt->nextpage, cxt->nextcount); return; } else { } cxt->nextpage = maxpos; cxt->nextcount = (int )maxcount; mtdoops_inc_counter(cxt); return; } } static void mtdoops_notify_add(struct mtd_info *mtd ) { struct mtdoops_context *cxt ; { cxt = & oops_cxt; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) { return; } else { } if (mtd->size < mtd->erasesize * 2U) { printk("<3>MTD partition %d not big enough for mtdoops\n", mtd->index); return; } else { } if (mtd->erasesize <= 4095U) { printk("<3>Eraseblock size of MTD partition %d too small\n", mtd->index); return; } else { } cxt->mtd = mtd; cxt->oops_pages = (int )(mtd->size / 4096U); find_next_position(cxt); printk("<6>mtdoops: Attached to MTD device %d\n", mtd->index); return; } } static void mtdoops_notify_remove(struct mtd_info *mtd ) { struct mtdoops_context *cxt ; { cxt = & oops_cxt; if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) { return; } else { } cxt->mtd = 0; flush_scheduled_work(); return; } } static void mtdoops_console_sync(void) { struct mtdoops_context *cxt ; struct mtd_info *mtd ; unsigned long flags ; struct thread_info *tmp ; { cxt = & oops_cxt; mtd = cxt->mtd; if ((cxt->ready == 0 || (unsigned long )mtd == (unsigned long )((struct mtd_info *)0)) || cxt->writecount == 0) { return; } else { } flags = _spin_lock_irqsave(& cxt->writecount_lock); if (cxt->ready == 0) { _spin_unlock_irqrestore(& cxt->writecount_lock, flags); return; } else { } cxt->ready = 0; _spin_unlock_irqrestore(& cxt->writecount_lock, flags); if ((unsigned long )mtd->panic_write != (unsigned long )((int (*)(struct mtd_info * , loff_t , size_t , size_t * , u_char const * ))0)) { tmp = current_thread_info(); if (((unsigned long )tmp->preempt_count & 268435200UL) != 0UL) { mtdoops_write(cxt, 1); } else { schedule_work(& cxt->work_write); } } else { schedule_work(& cxt->work_write); } return; } } static void mtdoops_console_write(struct console *co , char const *s , unsigned int count ) { struct mtdoops_context *cxt ; struct mtd_info *mtd ; unsigned long flags ; u32 *stamp ; size_t __len ; void *__ret ; { cxt = (struct mtdoops_context *)co->data; mtd = cxt->mtd; if (oops_in_progress == 0) { mtdoops_console_sync(); return; } else { } if (cxt->ready == 0 || (unsigned long )mtd == (unsigned long )((struct mtd_info *)0)) { return; } else { } flags = _spin_lock_irqsave(& cxt->writecount_lock); if (cxt->ready == 0) { return; } else { } if (cxt->writecount == 0) { stamp = (u32 *)cxt->oops_buf; *stamp = (u32 )cxt->nextcount; cxt->writecount = 4; } else { } if ((unsigned int )cxt->writecount + count > 4096U) { count = (unsigned int )(4096 - cxt->writecount); } else { } __len = (size_t )count; __ret = memcpy(cxt->oops_buf + (unsigned long )cxt->writecount, (void const *)s, __len); cxt->writecount = (int )((unsigned int )cxt->writecount + count); _spin_unlock_irqrestore(& cxt->writecount_lock, flags); if (cxt->writecount == 4096) { mtdoops_console_sync(); } else { } return; } } static int mtdoops_console_setup(struct console *co , char *options ) { struct mtdoops_context *cxt ; { cxt = (struct mtdoops_context *)co->data; if (cxt->mtd_index != -1) { return (-16); } else { } if ((int )co->index == -1) { return (-22); } else { } cxt->mtd_index = (int )co->index; return (0); } } static struct mtd_notifier mtdoops_notifier = {& mtdoops_notify_add, & mtdoops_notify_remove, {0, 0}}; static struct console mtdoops_console = {{'t', 't', 'y', 'M', 'T', 'D', '\000'}, & mtdoops_console_write, 0, 0, & mtdoops_console_sync, & mtdoops_console_setup, 0, (short)0, -1, 0, (void *)(& oops_cxt), 0}; static int mtdoops_console_init(void) { struct mtdoops_context *cxt ; struct lock_class_key __key ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_1 ; { cxt = & oops_cxt; cxt->mtd_index = -1; cxt->oops_buf = vmalloc(4096UL); if ((unsigned long )cxt->oops_buf == (unsigned long )((void *)0)) { printk("<3>Failed to allocate mtdoops buffer workspace\n"); return (-12); } else { } __constr_expr_0.counter = 0L; cxt->work_erase.data = __constr_expr_0; lockdep_init_map(& cxt->work_erase.lockdep_map, "&cxt->work_erase", & __key, 0); INIT_LIST_HEAD(& cxt->work_erase.entry); cxt->work_erase.func = & mtdoops_workfunc_erase; __constr_expr_1.counter = 0L; cxt->work_write.data = __constr_expr_1; lockdep_init_map(& cxt->work_write.lockdep_map, "&cxt->work_write", & __key___0, 0); INIT_LIST_HEAD(& cxt->work_write.entry); cxt->work_write.func = & mtdoops_workfunc_write; register_console(& mtdoops_console); register_mtd_user(& mtdoops_notifier); return (0); } } static void mtdoops_console_exit(void) { struct mtdoops_context *cxt ; { cxt = & oops_cxt; unregister_mtd_user(& mtdoops_notifier); unregister_console(& mtdoops_console); vfree((void const *)cxt->oops_buf); return; } } void ldv_check_final_state(void) ; void ldv_initialize(void) ; extern void ldv_handler_precall(void) ; extern int nondet_int(void) ; int LDV_IN_INTERRUPT ; int main(void) { struct mtd_info *var_group1 ; struct console *var_group2 ; char const *var_mtdoops_console_write_10_p1 ; unsigned int var_mtdoops_console_write_10_p2 ; char *var_mtdoops_console_setup_11_p1 ; int ldv_s_mtdoops_notifier_mtd_notifier ; int tmp ; int tmp___0 ; int tmp___1 ; { ldv_s_mtdoops_notifier_mtd_notifier = 0; LDV_IN_INTERRUPT = 1; ldv_initialize(); ldv_handler_precall(); tmp = mtdoops_console_init(); if (tmp != 0) { goto ldv_final; } else { } goto ldv_12294; ldv_12293: tmp___0 = nondet_int(); switch (tmp___0) { case 0: ; if (ldv_s_mtdoops_notifier_mtd_notifier == 0) { ldv_handler_precall(); mtdoops_notify_remove(var_group1); ldv_s_mtdoops_notifier_mtd_notifier = 0; } else { } goto ldv_12287; case 1: ldv_handler_precall(); mtdoops_notify_add(var_group1); goto ldv_12287; case 2: ldv_handler_precall(); mtdoops_console_write(var_group2, var_mtdoops_console_write_10_p1, var_mtdoops_console_write_10_p2); goto ldv_12287; case 3: ldv_handler_precall(); mtdoops_console_setup(var_group2, var_mtdoops_console_setup_11_p1); goto ldv_12287; case 4: ldv_handler_precall(); mtdoops_console_sync(); goto ldv_12287; default: ; goto ldv_12287; } ldv_12287: ; ldv_12294: tmp___1 = nondet_int(); if (tmp___1 != 0 || ldv_s_mtdoops_notifier_mtd_notifier != 0) { goto ldv_12293; } else { } ldv_handler_precall(); mtdoops_console_exit(); ldv_final: ldv_check_final_state(); return 0; } } long ldv__builtin_expect(long exp , long c ) ; long ldv__builtin_expect(long exp , long c ) { { return (exp); } } void ldv_initialize(void) { { return; } } void ldv_check_final_state(void) { { return; } }