extern void abort(void);
extern void __assert_fail(const char *, const char *, unsigned int, const char *) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__));
void reach_error() { __assert_fail("0", "drivers--mtd--mtdoops.ko_035.7086c19.39_7a.cil_true-unreach-call.i", 3, "reach_error"); }

/* Generated by CIL v. 1.5.1 */
/* print_CIL_Input is false */

typedef unsigned char __u8;
typedef unsigned short __u16;
typedef int __s32;
typedef unsigned int __u32;
typedef unsigned long long __u64;
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
typedef long long s64;
typedef unsigned long long u64;
typedef unsigned int __kernel_mode_t;
typedef int __kernel_pid_t;
typedef unsigned int __kernel_uid_t;
typedef unsigned int __kernel_gid_t;
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef long __kernel_time_t;
typedef long __kernel_clock_t;
typedef int __kernel_timer_t;
typedef int __kernel_clockid_t;
typedef long long __kernel_loff_t;
typedef __kernel_uid_t __kernel_uid32_t;
typedef __kernel_gid_t __kernel_gid32_t;
typedef __u32 __kernel_dev_t;
typedef __kernel_dev_t dev_t;
typedef __kernel_mode_t mode_t;
typedef __kernel_pid_t pid_t;
typedef __kernel_clockid_t clockid_t;
typedef _Bool bool;
typedef __kernel_uid32_t uid_t;
typedef __kernel_gid32_t gid_t;
typedef __kernel_loff_t loff_t;
typedef __kernel_size_t size_t;
typedef __kernel_ssize_t ssize_t;
typedef __kernel_time_t time_t;
typedef unsigned char u_char;
typedef unsigned long u_long;
typedef __s32 int32_t;
typedef __u8 uint8_t;
typedef __u32 uint32_t;
typedef __u64 uint64_t;
typedef unsigned int gfp_t;
typedef u64 phys_addr_t;
typedef phys_addr_t resource_size_t;
struct __anonstruct_atomic_t_6 {
   int counter ;
};
typedef struct __anonstruct_atomic_t_6 atomic_t;
struct __anonstruct_atomic64_t_7 {
   long counter ;
};
typedef struct __anonstruct_atomic64_t_7 atomic64_t;
struct list_head {
   struct list_head *next ;
   struct list_head *prev ;
};
struct hlist_node;
struct hlist_head {
   struct hlist_node *first ;
};
struct hlist_node {
   struct hlist_node *next ;
   struct hlist_node **pprev ;
};
struct rcu_head {
   struct rcu_head *next ;
   void (*func)(struct rcu_head * ) ;
};
struct module;
typedef void (*ctor_fn_t)(void);
struct device;
struct bug_entry {
   int bug_addr_disp ;
   int file_disp ;
   unsigned short line ;
   unsigned short flags ;
};
struct completion;
struct pt_regs;
struct pid;
struct timespec;
struct page;
struct task_struct;
struct mm_struct;
struct pt_regs {
   unsigned long r15 ;
   unsigned long r14 ;
   unsigned long r13 ;
   unsigned long r12 ;
   unsigned long bp ;
   unsigned long bx ;
   unsigned long r11 ;
   unsigned long r10 ;
   unsigned long r9 ;
   unsigned long r8 ;
   unsigned long ax ;
   unsigned long cx ;
   unsigned long dx ;
   unsigned long si ;
   unsigned long di ;
   unsigned long orig_ax ;
   unsigned long ip ;
   unsigned long cs ;
   unsigned long flags ;
   unsigned long sp ;
   unsigned long ss ;
};
struct __anonstruct_ldv_2036_13 {
   unsigned int a ;
   unsigned int b ;
};
struct __anonstruct_ldv_2051_14 {
   u16 limit0 ;
   u16 base0 ;
   unsigned char base1 ;
   unsigned char type : 4 ;
   unsigned char s : 1 ;
   unsigned char dpl : 2 ;
   unsigned char p : 1 ;
   unsigned char limit : 4 ;
   unsigned char avl : 1 ;
   unsigned char l : 1 ;
   unsigned char d : 1 ;
   unsigned char g : 1 ;
   unsigned char base2 ;
};
union __anonunion_ldv_2052_12 {
   struct __anonstruct_ldv_2036_13 ldv_2036 ;
   struct __anonstruct_ldv_2051_14 ldv_2051 ;
};
struct desc_struct {
   union __anonunion_ldv_2052_12 ldv_2052 ;
};
typedef unsigned long pgdval_t;
typedef unsigned long pgprotval_t;
struct pgprot {
   pgprotval_t pgprot ;
};
typedef struct pgprot pgprot_t;
struct __anonstruct_pgd_t_16 {
   pgdval_t pgd ;
};
typedef struct __anonstruct_pgd_t_16 pgd_t;
typedef struct page *pgtable_t;
struct file;
struct thread_struct;
struct cpumask;
struct arch_spinlock;
struct kernel_vm86_regs {
   struct pt_regs pt ;
   unsigned short es ;
   unsigned short __esh ;
   unsigned short ds ;
   unsigned short __dsh ;
   unsigned short fs ;
   unsigned short __fsh ;
   unsigned short gs ;
   unsigned short __gsh ;
};
union __anonunion_ldv_2684_19 {
   struct pt_regs *regs ;
   struct kernel_vm86_regs *vm86 ;
};
struct math_emu_info {
   long ___orig_eip ;
   union __anonunion_ldv_2684_19 ldv_2684 ;
};
struct cpumask {
   unsigned long bits[64U] ;
};
typedef struct cpumask cpumask_t;
typedef struct cpumask *cpumask_var_t;
struct jump_label_key;
struct i387_fsave_struct {
   u32 cwd ;
   u32 swd ;
   u32 twd ;
   u32 fip ;
   u32 fcs ;
   u32 foo ;
   u32 fos ;
   u32 st_space[20U] ;
   u32 status ;
};
struct __anonstruct_ldv_5051_24 {
   u64 rip ;
   u64 rdp ;
};
struct __anonstruct_ldv_5057_25 {
   u32 fip ;
   u32 fcs ;
   u32 foo ;
   u32 fos ;
};
union __anonunion_ldv_5058_23 {
   struct __anonstruct_ldv_5051_24 ldv_5051 ;
   struct __anonstruct_ldv_5057_25 ldv_5057 ;
};
union __anonunion_ldv_5067_26 {
   u32 padding1[12U] ;
   u32 sw_reserved[12U] ;
};
struct i387_fxsave_struct {
   u16 cwd ;
   u16 swd ;
   u16 twd ;
   u16 fop ;
   union __anonunion_ldv_5058_23 ldv_5058 ;
   u32 mxcsr ;
   u32 mxcsr_mask ;
   u32 st_space[32U] ;
   u32 xmm_space[64U] ;
   u32 padding[12U] ;
   union __anonunion_ldv_5067_26 ldv_5067 ;
};
struct i387_soft_struct {
   u32 cwd ;
   u32 swd ;
   u32 twd ;
   u32 fip ;
   u32 fcs ;
   u32 foo ;
   u32 fos ;
   u32 st_space[20U] ;
   u8 ftop ;
   u8 changed ;
   u8 lookahead ;
   u8 no_update ;
   u8 rm ;
   u8 alimit ;
   struct math_emu_info *info ;
   u32 entry_eip ;
};
struct ymmh_struct {
   u32 ymmh_space[64U] ;
};
struct xsave_hdr_struct {
   u64 xstate_bv ;
   u64 reserved1[2U] ;
   u64 reserved2[5U] ;
};
struct xsave_struct {
   struct i387_fxsave_struct i387 ;
   struct xsave_hdr_struct xsave_hdr ;
   struct ymmh_struct ymmh ;
};
union thread_xstate {
   struct i387_fsave_struct fsave ;
   struct i387_fxsave_struct fxsave ;
   struct i387_soft_struct soft ;
   struct xsave_struct xsave ;
};
struct fpu {
   union thread_xstate *state ;
};
struct kmem_cache;
struct perf_event;
struct thread_struct {
   struct desc_struct tls_array[3U] ;
   unsigned long sp0 ;
   unsigned long sp ;
   unsigned long usersp ;
   unsigned short es ;
   unsigned short ds ;
   unsigned short fsindex ;
   unsigned short gsindex ;
   unsigned long fs ;
   unsigned long gs ;
   struct perf_event *ptrace_bps[4U] ;
   unsigned long debugreg6 ;
   unsigned long ptrace_dr7 ;
   unsigned long cr2 ;
   unsigned long trap_no ;
   unsigned long error_code ;
   struct fpu fpu ;
   unsigned long *io_bitmap_ptr ;
   unsigned long iopl ;
   unsigned int io_bitmap_max ;
};
typedef atomic64_t atomic_long_t;
typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
struct __raw_tickets {
   __ticket_t head ;
   __ticket_t tail ;
};
union __anonunion_ldv_5909_29 {
   __ticketpair_t head_tail ;
   struct __raw_tickets tickets ;
};
struct arch_spinlock {
   union __anonunion_ldv_5909_29 ldv_5909 ;
};
typedef struct arch_spinlock arch_spinlock_t;
struct lockdep_map;
struct stack_trace {
   unsigned int nr_entries ;
   unsigned int max_entries ;
   unsigned long *entries ;
   int skip ;
};
struct lockdep_subclass_key {
   char __one_byte ;
};
struct lock_class_key {
   struct lockdep_subclass_key subkeys[8U] ;
};
struct lock_class {
   struct list_head hash_entry ;
   struct list_head lock_entry ;
   struct lockdep_subclass_key *key ;
   unsigned int subclass ;
   unsigned int dep_gen_id ;
   unsigned long usage_mask ;
   struct stack_trace usage_traces[13U] ;
   struct list_head locks_after ;
   struct list_head locks_before ;
   unsigned int version ;
   unsigned long ops ;
   char const   *name ;
   int name_version ;
   unsigned long contention_point[4U] ;
   unsigned long contending_point[4U] ;
};
struct lockdep_map {
   struct lock_class_key *key ;
   struct lock_class *class_cache[2U] ;
   char const   *name ;
   int cpu ;
   unsigned long ip ;
};
struct held_lock {
   u64 prev_chain_key ;
   unsigned long acquire_ip ;
   struct lockdep_map *instance ;
   struct lockdep_map *nest_lock ;
   u64 waittime_stamp ;
   u64 holdtime_stamp ;
   unsigned short class_idx : 13 ;
   unsigned char irq_context : 2 ;
   unsigned char trylock : 1 ;
   unsigned char read : 2 ;
   unsigned char check : 2 ;
   unsigned char hardirqs_off : 1 ;
   unsigned short references : 11 ;
};
struct raw_spinlock {
   arch_spinlock_t raw_lock ;
   unsigned int magic ;
   unsigned int owner_cpu ;
   void *owner ;
   struct lockdep_map dep_map ;
};
typedef struct raw_spinlock raw_spinlock_t;
struct __anonstruct_ldv_6128_33 {
   u8 __padding[24U] ;
   struct lockdep_map dep_map ;
};
union __anonunion_ldv_6129_32 {
   struct raw_spinlock rlock ;
   struct __anonstruct_ldv_6128_33 ldv_6128 ;
};
struct spinlock {
   union __anonunion_ldv_6129_32 ldv_6129 ;
};
typedef struct spinlock spinlock_t;
struct timespec {
   __kernel_time_t tv_sec ;
   long tv_nsec ;
};
struct __wait_queue;
typedef struct __wait_queue wait_queue_t;
struct __wait_queue {
   unsigned int flags ;
   void *private ;
   int (*func)(wait_queue_t * , unsigned int  , int  , void * ) ;
   struct list_head task_list ;
};
struct __wait_queue_head {
   spinlock_t lock ;
   struct list_head task_list ;
};
typedef struct __wait_queue_head wait_queue_head_t;
struct __anonstruct_nodemask_t_36 {
   unsigned long bits[16U] ;
};
typedef struct __anonstruct_nodemask_t_36 nodemask_t;
struct mutex {
   atomic_t count ;
   spinlock_t wait_lock ;
   struct list_head wait_list ;
   struct task_struct *owner ;
   char const   *name ;
   void *magic ;
   struct lockdep_map dep_map ;
};
struct mutex_waiter {
   struct list_head list ;
   struct task_struct *task ;
   void *magic ;
};
struct rw_semaphore;
struct rw_semaphore {
   long count ;
   raw_spinlock_t wait_lock ;
   struct list_head wait_list ;
   struct lockdep_map dep_map ;
};
struct notifier_block {
   int (*notifier_call)(struct notifier_block * , unsigned long  , void * ) ;
   struct notifier_block *next ;
   int priority ;
};
union ktime {
   s64 tv64 ;
};
typedef union ktime ktime_t;
struct tvec_base;
struct timer_list {
   struct list_head entry ;
   unsigned long expires ;
   struct tvec_base *base ;
   void (*function)(unsigned long  ) ;
   unsigned long data ;
   int slack ;
   int start_pid ;
   void *start_site ;
   char start_comm[16U] ;
   struct lockdep_map lockdep_map ;
};
struct hrtimer;
enum hrtimer_restart;
struct work_struct;
struct work_struct {
   atomic_long_t data ;
   struct list_head entry ;
   void (*func)(struct work_struct * ) ;
   struct lockdep_map lockdep_map ;
};
struct completion {
   unsigned int done ;
   wait_queue_head_t wait ;
};
struct pm_message {
   int event ;
};
typedef struct pm_message pm_message_t;
struct dev_pm_ops {
   int (*prepare)(struct device * ) ;
   void (*complete)(struct device * ) ;
   int (*suspend)(struct device * ) ;
   int (*resume)(struct device * ) ;
   int (*freeze)(struct device * ) ;
   int (*thaw)(struct device * ) ;
   int (*poweroff)(struct device * ) ;
   int (*restore)(struct device * ) ;
   int (*suspend_noirq)(struct device * ) ;
   int (*resume_noirq)(struct device * ) ;
   int (*freeze_noirq)(struct device * ) ;
   int (*thaw_noirq)(struct device * ) ;
   int (*poweroff_noirq)(struct device * ) ;
   int (*restore_noirq)(struct device * ) ;
   int (*runtime_suspend)(struct device * ) ;
   int (*runtime_resume)(struct device * ) ;
   int (*runtime_idle)(struct device * ) ;
};
enum rpm_status {
    RPM_ACTIVE = 0,
    RPM_RESUMING = 1,
    RPM_SUSPENDED = 2,
    RPM_SUSPENDING = 3
} ;
enum rpm_request {
    RPM_REQ_NONE = 0,
    RPM_REQ_IDLE = 1,
    RPM_REQ_SUSPEND = 2,
    RPM_REQ_AUTOSUSPEND = 3,
    RPM_REQ_RESUME = 4
} ;
struct wakeup_source;
struct pm_subsys_data {
   spinlock_t lock ;
   unsigned int refcount ;
};
struct pm_qos_constraints;
struct dev_pm_info {
   pm_message_t power_state ;
   unsigned char can_wakeup : 1 ;
   unsigned char async_suspend : 1 ;
   bool is_prepared ;
   bool is_suspended ;
   bool ignore_children ;
   spinlock_t lock ;
   struct list_head entry ;
   struct completion completion ;
   struct wakeup_source *wakeup ;
   bool wakeup_path ;
   struct timer_list suspend_timer ;
   unsigned long timer_expires ;
   struct work_struct work ;
   wait_queue_head_t wait_queue ;
   atomic_t usage_count ;
   atomic_t child_count ;
   unsigned char disable_depth : 3 ;
   unsigned char idle_notification : 1 ;
   unsigned char request_pending : 1 ;
   unsigned char deferred_resume : 1 ;
   unsigned char run_wake : 1 ;
   unsigned char runtime_auto : 1 ;
   unsigned char no_callbacks : 1 ;
   unsigned char irq_safe : 1 ;
   unsigned char use_autosuspend : 1 ;
   unsigned char timer_autosuspends : 1 ;
   enum rpm_request request ;
   enum rpm_status runtime_status ;
   int runtime_error ;
   int autosuspend_delay ;
   unsigned long last_busy ;
   unsigned long active_jiffies ;
   unsigned long suspended_jiffies ;
   unsigned long accounting_timestamp ;
   struct pm_subsys_data *subsys_data ;
   struct pm_qos_constraints *constraints ;
};
struct dev_pm_domain {
   struct dev_pm_ops ops ;
};
struct __anonstruct_mm_context_t_101 {
   void *ldt ;
   int size ;
   unsigned short ia32_compat ;
   struct mutex lock ;
   void *vdso ;
};
typedef struct __anonstruct_mm_context_t_101 mm_context_t;
struct vm_area_struct;
struct nsproxy;
struct cred;
typedef __u64 Elf64_Addr;
typedef __u16 Elf64_Half;
typedef __u32 Elf64_Word;
typedef __u64 Elf64_Xword;
struct elf64_sym {
   Elf64_Word st_name ;
   unsigned char st_info ;
   unsigned char st_other ;
   Elf64_Half st_shndx ;
   Elf64_Addr st_value ;
   Elf64_Xword st_size ;
};
typedef struct elf64_sym Elf64_Sym;
struct sock;
struct kobject;
enum kobj_ns_type {
    KOBJ_NS_TYPE_NONE = 0,
    KOBJ_NS_TYPE_NET = 1,
    KOBJ_NS_TYPES = 2
} ;
struct kobj_ns_type_operations {
   enum kobj_ns_type type ;
   void *(*grab_current_ns)(void) ;
   void const   *(*netlink_ns)(struct sock * ) ;
   void const   *(*initial_ns)(void) ;
   void (*drop_ns)(void * ) ;
};
struct attribute {
   char const   *name ;
   mode_t mode ;
   struct lock_class_key *key ;
   struct lock_class_key skey ;
};
struct attribute_group {
   char const   *name ;
   mode_t (*is_visible)(struct kobject * , struct attribute * , int  ) ;
   struct attribute **attrs ;
};
struct bin_attribute {
   struct attribute attr ;
   size_t size ;
   void *private ;
   ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * ,
                   loff_t  , size_t  ) ;
   ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * ,
                    loff_t  , size_t  ) ;
   int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ;
};
struct sysfs_ops {
   ssize_t (*show)(struct kobject * , struct attribute * , char * ) ;
   ssize_t (*store)(struct kobject * , struct attribute * , char const   * , size_t  ) ;
   void const   *(*namespace)(struct kobject * , struct attribute  const  * ) ;
};
struct sysfs_dirent;
struct kref {
   atomic_t refcount ;
};
struct kset;
struct kobj_type;
struct kobject {
   char const   *name ;
   struct list_head entry ;
   struct kobject *parent ;
   struct kset *kset ;
   struct kobj_type *ktype ;
   struct sysfs_dirent *sd ;
   struct kref kref ;
   unsigned char state_initialized : 1 ;
   unsigned char state_in_sysfs : 1 ;
   unsigned char state_add_uevent_sent : 1 ;
   unsigned char state_remove_uevent_sent : 1 ;
   unsigned char uevent_suppress : 1 ;
};
struct kobj_type {
   void (*release)(struct kobject * ) ;
   struct sysfs_ops  const  *sysfs_ops ;
   struct attribute **default_attrs ;
   struct kobj_ns_type_operations  const  *(*child_ns_type)(struct kobject * ) ;
   void const   *(*namespace)(struct kobject * ) ;
};
struct kobj_uevent_env {
   char *envp[32U] ;
   int envp_idx ;
   char buf[2048U] ;
   int buflen ;
};
struct kset_uevent_ops {
   int (* const  filter)(struct kset * , struct kobject * ) ;
   char const   *(* const  name)(struct kset * , struct kobject * ) ;
   int (* const  uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ;
};
struct kset {
   struct list_head list ;
   spinlock_t list_lock ;
   struct kobject kobj ;
   struct kset_uevent_ops  const  *uevent_ops ;
};
struct kernel_param;
struct kernel_param_ops {
   int (*set)(char const   * , struct kernel_param  const  * ) ;
   int (*get)(char * , struct kernel_param  const  * ) ;
   void (*free)(void * ) ;
};
struct kparam_string;
struct kparam_array;
union __anonunion_ldv_13265_134 {
   void *arg ;
   struct kparam_string  const  *str ;
   struct kparam_array  const  *arr ;
};
struct kernel_param {
   char const   *name ;
   struct kernel_param_ops  const  *ops ;
   u16 perm ;
   u16 flags ;
   union __anonunion_ldv_13265_134 ldv_13265 ;
};
struct kparam_string {
   unsigned int maxlen ;
   char *string ;
};
struct kparam_array {
   unsigned int max ;
   unsigned int elemsize ;
   unsigned int *num ;
   struct kernel_param_ops  const  *ops ;
   void *elem ;
};
struct jump_label_key {
   atomic_t enabled ;
};
struct tracepoint;
struct tracepoint_func {
   void *func ;
   void *data ;
};
struct tracepoint {
   char const   *name ;
   struct jump_label_key key ;
   void (*regfunc)(void) ;
   void (*unregfunc)(void) ;
   struct tracepoint_func *funcs ;
};
struct kernel_symbol {
   unsigned long value ;
   char const   *name ;
};
struct mod_arch_specific {

};
struct module_param_attrs;
struct module_kobject {
   struct kobject kobj ;
   struct module *mod ;
   struct kobject *drivers_dir ;
   struct module_param_attrs *mp ;
};
struct module_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ;
   ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const   * ,
                    size_t  ) ;
   void (*setup)(struct module * , char const   * ) ;
   int (*test)(struct module * ) ;
   void (*free)(struct module * ) ;
};
struct exception_table_entry;
enum module_state {
    MODULE_STATE_LIVE = 0,
    MODULE_STATE_COMING = 1,
    MODULE_STATE_GOING = 2
} ;
struct module_ref {
   unsigned int incs ;
   unsigned int decs ;
};
struct module_sect_attrs;
struct module_notes_attrs;
struct ftrace_event_call;
struct module {
   enum module_state state ;
   struct list_head list ;
   char name[56U] ;
   struct module_kobject mkobj ;
   struct module_attribute *modinfo_attrs ;
   char const   *version ;
   char const   *srcversion ;
   struct kobject *holders_dir ;
   struct kernel_symbol  const  *syms ;
   unsigned long const   *crcs ;
   unsigned int num_syms ;
   struct kernel_param *kp ;
   unsigned int num_kp ;
   unsigned int num_gpl_syms ;
   struct kernel_symbol  const  *gpl_syms ;
   unsigned long const   *gpl_crcs ;
   struct kernel_symbol  const  *unused_syms ;
   unsigned long const   *unused_crcs ;
   unsigned int num_unused_syms ;
   unsigned int num_unused_gpl_syms ;
   struct kernel_symbol  const  *unused_gpl_syms ;
   unsigned long const   *unused_gpl_crcs ;
   struct kernel_symbol  const  *gpl_future_syms ;
   unsigned long const   *gpl_future_crcs ;
   unsigned int num_gpl_future_syms ;
   unsigned int num_exentries ;
   struct exception_table_entry *extable ;
   int (*init)(void) ;
   void *module_init ;
   void *module_core ;
   unsigned int init_size ;
   unsigned int core_size ;
   unsigned int init_text_size ;
   unsigned int core_text_size ;
   unsigned int init_ro_size ;
   unsigned int core_ro_size ;
   struct mod_arch_specific arch ;
   unsigned int taints ;
   unsigned int num_bugs ;
   struct list_head bug_list ;
   struct bug_entry *bug_table ;
   Elf64_Sym *symtab ;
   Elf64_Sym *core_symtab ;
   unsigned int num_symtab ;
   unsigned int core_num_syms ;
   char *strtab ;
   char *core_strtab ;
   struct module_sect_attrs *sect_attrs ;
   struct module_notes_attrs *notes_attrs ;
   char *args ;
   void *percpu ;
   unsigned int percpu_size ;
   unsigned int num_tracepoints ;
   struct tracepoint * const  *tracepoints_ptrs ;
   unsigned int num_trace_bprintk_fmt ;
   char const   **trace_bprintk_fmt_start ;
   struct ftrace_event_call **trace_events ;
   unsigned int num_trace_events ;
   struct list_head source_list ;
   struct list_head target_list ;
   struct task_struct *waiter ;
   void (*exit)(void) ;
   struct module_ref *refptr ;
   ctor_fn_t (**ctors)(void) ;
   unsigned int num_ctors ;
};
struct tty_struct;
struct kernel_cap_struct {
   __u32 cap[2U] ;
};
typedef struct kernel_cap_struct kernel_cap_t;
struct user_namespace;
struct rb_node {
   unsigned long rb_parent_color ;
   struct rb_node *rb_right ;
   struct rb_node *rb_left ;
};
struct rb_root {
   struct rb_node *rb_node ;
};
struct prio_tree_node;
struct raw_prio_tree_node {
   struct prio_tree_node *left ;
   struct prio_tree_node *right ;
   struct prio_tree_node *parent ;
};
struct prio_tree_node {
   struct prio_tree_node *left ;
   struct prio_tree_node *right ;
   struct prio_tree_node *parent ;
   unsigned long start ;
   unsigned long last ;
};
struct address_space;
union __anonunion_ldv_14450_137 {
   unsigned long index ;
   void *freelist ;
};
struct __anonstruct_ldv_14460_141 {
   unsigned short inuse ;
   unsigned short objects : 15 ;
   unsigned char frozen : 1 ;
};
union __anonunion_ldv_14461_140 {
   atomic_t _mapcount ;
   struct __anonstruct_ldv_14460_141 ldv_14460 ;
};
struct __anonstruct_ldv_14463_139 {
   union __anonunion_ldv_14461_140 ldv_14461 ;
   atomic_t _count ;
};
union __anonunion_ldv_14464_138 {
   unsigned long counters ;
   struct __anonstruct_ldv_14463_139 ldv_14463 ;
};
struct __anonstruct_ldv_14465_136 {
   union __anonunion_ldv_14450_137 ldv_14450 ;
   union __anonunion_ldv_14464_138 ldv_14464 ;
};
struct __anonstruct_ldv_14472_143 {
   struct page *next ;
   int pages ;
   int pobjects ;
};
union __anonunion_ldv_14473_142 {
   struct list_head lru ;
   struct __anonstruct_ldv_14472_143 ldv_14472 ;
};
union __anonunion_ldv_14478_144 {
   unsigned long private ;
   struct kmem_cache *slab ;
   struct page *first_page ;
};
struct page {
   unsigned long flags ;
   struct address_space *mapping ;
   struct __anonstruct_ldv_14465_136 ldv_14465 ;
   union __anonunion_ldv_14473_142 ldv_14473 ;
   union __anonunion_ldv_14478_144 ldv_14478 ;
};
struct __anonstruct_vm_set_146 {
   struct list_head list ;
   void *parent ;
   struct vm_area_struct *head ;
};
union __anonunion_shared_145 {
   struct __anonstruct_vm_set_146 vm_set ;
   struct raw_prio_tree_node prio_tree_node ;
};
struct anon_vma;
struct vm_operations_struct;
struct mempolicy;
struct vm_area_struct {
   struct mm_struct *vm_mm ;
   unsigned long vm_start ;
   unsigned long vm_end ;
   struct vm_area_struct *vm_next ;
   struct vm_area_struct *vm_prev ;
   pgprot_t vm_page_prot ;
   unsigned long vm_flags ;
   struct rb_node vm_rb ;
   union __anonunion_shared_145 shared ;
   struct list_head anon_vma_chain ;
   struct anon_vma *anon_vma ;
   struct vm_operations_struct  const  *vm_ops ;
   unsigned long vm_pgoff ;
   struct file *vm_file ;
   void *vm_private_data ;
   struct mempolicy *vm_policy ;
};
struct core_thread {
   struct task_struct *task ;
   struct core_thread *next ;
};
struct core_state {
   atomic_t nr_threads ;
   struct core_thread dumper ;
   struct completion startup ;
};
struct mm_rss_stat {
   atomic_long_t count[3U] ;
};
struct linux_binfmt;
struct mmu_notifier_mm;
struct mm_struct {
   struct vm_area_struct *mmap ;
   struct rb_root mm_rb ;
   struct vm_area_struct *mmap_cache ;
   unsigned long (*get_unmapped_area)(struct file * , unsigned long  , unsigned long  ,
                                      unsigned long  , unsigned long  ) ;
   void (*unmap_area)(struct mm_struct * , unsigned long  ) ;
   unsigned long mmap_base ;
   unsigned long task_size ;
   unsigned long cached_hole_size ;
   unsigned long free_area_cache ;
   pgd_t *pgd ;
   atomic_t mm_users ;
   atomic_t mm_count ;
   int map_count ;
   spinlock_t page_table_lock ;
   struct rw_semaphore mmap_sem ;
   struct list_head mmlist ;
   unsigned long hiwater_rss ;
   unsigned long hiwater_vm ;
   unsigned long total_vm ;
   unsigned long locked_vm ;
   unsigned long pinned_vm ;
   unsigned long shared_vm ;
   unsigned long exec_vm ;
   unsigned long stack_vm ;
   unsigned long reserved_vm ;
   unsigned long def_flags ;
   unsigned long nr_ptes ;
   unsigned long start_code ;
   unsigned long end_code ;
   unsigned long start_data ;
   unsigned long end_data ;
   unsigned long start_brk ;
   unsigned long brk ;
   unsigned long start_stack ;
   unsigned long arg_start ;
   unsigned long arg_end ;
   unsigned long env_start ;
   unsigned long env_end ;
   unsigned long saved_auxv[44U] ;
   struct mm_rss_stat rss_stat ;
   struct linux_binfmt *binfmt ;
   cpumask_var_t cpu_vm_mask_var ;
   mm_context_t context ;
   unsigned int faultstamp ;
   unsigned int token_priority ;
   unsigned int last_interval ;
   unsigned long flags ;
   struct core_state *core_state ;
   spinlock_t ioctx_lock ;
   struct hlist_head ioctx_list ;
   struct task_struct *owner ;
   struct file *exe_file ;
   unsigned long num_exe_file_vmas ;
   struct mmu_notifier_mm *mmu_notifier_mm ;
   pgtable_t pmd_huge_pte ;
   struct cpumask cpumask_allocation ;
};
typedef unsigned long cputime_t;
struct sem_undo_list;
struct sysv_sem {
   struct sem_undo_list *undo_list ;
};
struct siginfo;
struct __anonstruct_sigset_t_147 {
   unsigned long sig[1U] ;
};
typedef struct __anonstruct_sigset_t_147 sigset_t;
typedef void __signalfn_t(int  );
typedef __signalfn_t *__sighandler_t;
typedef void __restorefn_t(void);
typedef __restorefn_t *__sigrestore_t;
struct sigaction {
   __sighandler_t sa_handler ;
   unsigned long sa_flags ;
   __sigrestore_t sa_restorer ;
   sigset_t sa_mask ;
};
struct k_sigaction {
   struct sigaction sa ;
};
union sigval {
   int sival_int ;
   void *sival_ptr ;
};
typedef union sigval sigval_t;
struct __anonstruct__kill_149 {
   __kernel_pid_t _pid ;
   __kernel_uid32_t _uid ;
};
struct __anonstruct__timer_150 {
   __kernel_timer_t _tid ;
   int _overrun ;
   char _pad[0U] ;
   sigval_t _sigval ;
   int _sys_private ;
};
struct __anonstruct__rt_151 {
   __kernel_pid_t _pid ;
   __kernel_uid32_t _uid ;
   sigval_t _sigval ;
};
struct __anonstruct__sigchld_152 {
   __kernel_pid_t _pid ;
   __kernel_uid32_t _uid ;
   int _status ;
   __kernel_clock_t _utime ;
   __kernel_clock_t _stime ;
};
struct __anonstruct__sigfault_153 {
   void *_addr ;
   short _addr_lsb ;
};
struct __anonstruct__sigpoll_154 {
   long _band ;
   int _fd ;
};
union __anonunion__sifields_148 {
   int _pad[28U] ;
   struct __anonstruct__kill_149 _kill ;
   struct __anonstruct__timer_150 _timer ;
   struct __anonstruct__rt_151 _rt ;
   struct __anonstruct__sigchld_152 _sigchld ;
   struct __anonstruct__sigfault_153 _sigfault ;
   struct __anonstruct__sigpoll_154 _sigpoll ;
};
struct siginfo {
   int si_signo ;
   int si_errno ;
   int si_code ;
   union __anonunion__sifields_148 _sifields ;
};
typedef struct siginfo siginfo_t;
struct user_struct;
struct sigpending {
   struct list_head list ;
   sigset_t signal ;
};
struct pid_namespace;
struct upid {
   int nr ;
   struct pid_namespace *ns ;
   struct hlist_node pid_chain ;
};
struct pid {
   atomic_t count ;
   unsigned int level ;
   struct hlist_head tasks[3U] ;
   struct rcu_head rcu ;
   struct upid numbers[1U] ;
};
struct pid_link {
   struct hlist_node node ;
   struct pid *pid ;
};
struct __anonstruct_seccomp_t_157 {
   int mode ;
};
typedef struct __anonstruct_seccomp_t_157 seccomp_t;
struct plist_head {
   struct list_head node_list ;
};
struct plist_node {
   int prio ;
   struct list_head prio_list ;
   struct list_head node_list ;
};
struct rt_mutex_waiter;
struct rlimit {
   unsigned long rlim_cur ;
   unsigned long rlim_max ;
};
struct timerqueue_node {
   struct rb_node node ;
   ktime_t expires ;
};
struct timerqueue_head {
   struct rb_root head ;
   struct timerqueue_node *next ;
};
struct hrtimer_clock_base;
struct hrtimer_cpu_base;
enum hrtimer_restart {
    HRTIMER_NORESTART = 0,
    HRTIMER_RESTART = 1
} ;
struct hrtimer {
   struct timerqueue_node node ;
   ktime_t _softexpires ;
   enum hrtimer_restart (*function)(struct hrtimer * ) ;
   struct hrtimer_clock_base *base ;
   unsigned long state ;
   int start_pid ;
   void *start_site ;
   char start_comm[16U] ;
};
struct hrtimer_clock_base {
   struct hrtimer_cpu_base *cpu_base ;
   int index ;
   clockid_t clockid ;
   struct timerqueue_head active ;
   ktime_t resolution ;
   ktime_t (*get_time)(void) ;
   ktime_t softirq_time ;
   ktime_t offset ;
};
struct hrtimer_cpu_base {
   raw_spinlock_t lock ;
   unsigned long active_bases ;
   ktime_t expires_next ;
   int hres_active ;
   int hang_detected ;
   unsigned long nr_events ;
   unsigned long nr_retries ;
   unsigned long nr_hangs ;
   ktime_t max_hang_time ;
   struct hrtimer_clock_base clock_base[3U] ;
};
struct task_io_accounting {
   u64 rchar ;
   u64 wchar ;
   u64 syscr ;
   u64 syscw ;
   u64 read_bytes ;
   u64 write_bytes ;
   u64 cancelled_write_bytes ;
};
struct latency_record {
   unsigned long backtrace[12U] ;
   unsigned int count ;
   unsigned long time ;
   unsigned long max ;
};
typedef int32_t key_serial_t;
typedef uint32_t key_perm_t;
struct key;
struct signal_struct;
struct key_type;
struct keyring_list;
struct key_user;
union __anonunion_ldv_15710_158 {
   time_t expiry ;
   time_t revoked_at ;
};
union __anonunion_type_data_159 {
   struct list_head link ;
   unsigned long x[2U] ;
   void *p[2U] ;
   int reject_error ;
};
union __anonunion_payload_160 {
   unsigned long value ;
   void *rcudata ;
   void *data ;
   struct keyring_list *subscriptions ;
};
struct key {
   atomic_t usage ;
   key_serial_t serial ;
   struct rb_node serial_node ;
   struct key_type *type ;
   struct rw_semaphore sem ;
   struct key_user *user ;
   void *security ;
   union __anonunion_ldv_15710_158 ldv_15710 ;
   uid_t uid ;
   gid_t gid ;
   key_perm_t perm ;
   unsigned short quotalen ;
   unsigned short datalen ;
   unsigned long flags ;
   char *description ;
   union __anonunion_type_data_159 type_data ;
   union __anonunion_payload_160 payload ;
};
struct audit_context;
struct group_info {
   atomic_t usage ;
   int ngroups ;
   int nblocks ;
   gid_t small_block[32U] ;
   gid_t *blocks[0U] ;
};
struct thread_group_cred {
   atomic_t usage ;
   pid_t tgid ;
   spinlock_t lock ;
   struct key *session_keyring ;
   struct key *process_keyring ;
   struct rcu_head rcu ;
};
struct cred {
   atomic_t usage ;
   atomic_t subscribers ;
   void *put_addr ;
   unsigned int magic ;
   uid_t uid ;
   gid_t gid ;
   uid_t suid ;
   gid_t sgid ;
   uid_t euid ;
   gid_t egid ;
   uid_t fsuid ;
   gid_t fsgid ;
   unsigned int securebits ;
   kernel_cap_t cap_inheritable ;
   kernel_cap_t cap_permitted ;
   kernel_cap_t cap_effective ;
   kernel_cap_t cap_bset ;
   unsigned char jit_keyring ;
   struct key *thread_keyring ;
   struct key *request_key_auth ;
   struct thread_group_cred *tgcred ;
   void *security ;
   struct user_struct *user ;
   struct user_namespace *user_ns ;
   struct group_info *group_info ;
   struct rcu_head rcu ;
};
struct llist_node;
struct llist_node {
   struct llist_node *next ;
};
struct futex_pi_state;
struct robust_list_head;
struct bio_list;
struct fs_struct;
struct perf_event_context;
struct blk_plug;
struct cfs_rq;
struct kvec {
   void *iov_base ;
   size_t iov_len ;
};
struct sighand_struct {
   atomic_t count ;
   struct k_sigaction action[64U] ;
   spinlock_t siglock ;
   wait_queue_head_t signalfd_wqh ;
};
struct pacct_struct {
   int ac_flag ;
   long ac_exitcode ;
   unsigned long ac_mem ;
   cputime_t ac_utime ;
   cputime_t ac_stime ;
   unsigned long ac_minflt ;
   unsigned long ac_majflt ;
};
struct cpu_itimer {
   cputime_t expires ;
   cputime_t incr ;
   u32 error ;
   u32 incr_error ;
};
struct task_cputime {
   cputime_t utime ;
   cputime_t stime ;
   unsigned long long sum_exec_runtime ;
};
struct thread_group_cputimer {
   struct task_cputime cputime ;
   int running ;
   raw_spinlock_t lock ;
};
struct autogroup;
struct taskstats;
struct tty_audit_buf;
struct signal_struct {
   atomic_t sigcnt ;
   atomic_t live ;
   int nr_threads ;
   wait_queue_head_t wait_chldexit ;
   struct task_struct *curr_target ;
   struct sigpending shared_pending ;
   int group_exit_code ;
   int notify_count ;
   struct task_struct *group_exit_task ;
   int group_stop_count ;
   unsigned int flags ;
   struct list_head posix_timers ;
   struct hrtimer real_timer ;
   struct pid *leader_pid ;
   ktime_t it_real_incr ;
   struct cpu_itimer it[2U] ;
   struct thread_group_cputimer cputimer ;
   struct task_cputime cputime_expires ;
   struct list_head cpu_timers[3U] ;
   struct pid *tty_old_pgrp ;
   int leader ;
   struct tty_struct *tty ;
   struct autogroup *autogroup ;
   cputime_t utime ;
   cputime_t stime ;
   cputime_t cutime ;
   cputime_t cstime ;
   cputime_t gtime ;
   cputime_t cgtime ;
   cputime_t prev_utime ;
   cputime_t prev_stime ;
   unsigned long nvcsw ;
   unsigned long nivcsw ;
   unsigned long cnvcsw ;
   unsigned long cnivcsw ;
   unsigned long min_flt ;
   unsigned long maj_flt ;
   unsigned long cmin_flt ;
   unsigned long cmaj_flt ;
   unsigned long inblock ;
   unsigned long oublock ;
   unsigned long cinblock ;
   unsigned long coublock ;
   unsigned long maxrss ;
   unsigned long cmaxrss ;
   struct task_io_accounting ioac ;
   unsigned long long sum_sched_runtime ;
   struct rlimit rlim[16U] ;
   struct pacct_struct pacct ;
   struct taskstats *stats ;
   unsigned int audit_tty ;
   struct tty_audit_buf *tty_audit_buf ;
   struct rw_semaphore threadgroup_fork_lock ;
   int oom_adj ;
   int oom_score_adj ;
   int oom_score_adj_min ;
   struct mutex cred_guard_mutex ;
};
struct user_struct {
   atomic_t __count ;
   atomic_t processes ;
   atomic_t files ;
   atomic_t sigpending ;
   atomic_t inotify_watches ;
   atomic_t inotify_devs ;
   atomic_t fanotify_listeners ;
   atomic_long_t epoll_watches ;
   unsigned long mq_bytes ;
   unsigned long locked_shm ;
   struct key *uid_keyring ;
   struct key *session_keyring ;
   struct hlist_node uidhash_node ;
   uid_t uid ;
   struct user_namespace *user_ns ;
   atomic_long_t locked_vm ;
};
struct backing_dev_info;
struct reclaim_state;
struct sched_info {
   unsigned long pcount ;
   unsigned long long run_delay ;
   unsigned long long last_arrival ;
   unsigned long long last_queued ;
};
struct task_delay_info {
   spinlock_t lock ;
   unsigned int flags ;
   struct timespec blkio_start ;
   struct timespec blkio_end ;
   u64 blkio_delay ;
   u64 swapin_delay ;
   u32 blkio_count ;
   u32 swapin_count ;
   struct timespec freepages_start ;
   struct timespec freepages_end ;
   u64 freepages_delay ;
   u32 freepages_count ;
};
struct io_context;
struct pipe_inode_info;
struct rq;
struct sched_class {
   struct sched_class  const  *next ;
   void (*enqueue_task)(struct rq * , struct task_struct * , int  ) ;
   void (*dequeue_task)(struct rq * , struct task_struct * , int  ) ;
   void (*yield_task)(struct rq * ) ;
   bool (*yield_to_task)(struct rq * , struct task_struct * , bool  ) ;
   void (*check_preempt_curr)(struct rq * , struct task_struct * , int  ) ;
   struct task_struct *(*pick_next_task)(struct rq * ) ;
   void (*put_prev_task)(struct rq * , struct task_struct * ) ;
   int (*select_task_rq)(struct task_struct * , int  , int  ) ;
   void (*pre_schedule)(struct rq * , struct task_struct * ) ;
   void (*post_schedule)(struct rq * ) ;
   void (*task_waking)(struct task_struct * ) ;
   void (*task_woken)(struct rq * , struct task_struct * ) ;
   void (*set_cpus_allowed)(struct task_struct * , struct cpumask  const  * ) ;
   void (*rq_online)(struct rq * ) ;
   void (*rq_offline)(struct rq * ) ;
   void (*set_curr_task)(struct rq * ) ;
   void (*task_tick)(struct rq * , struct task_struct * , int  ) ;
   void (*task_fork)(struct task_struct * ) ;
   void (*switched_from)(struct rq * , struct task_struct * ) ;
   void (*switched_to)(struct rq * , struct task_struct * ) ;
   void (*prio_changed)(struct rq * , struct task_struct * , int  ) ;
   unsigned int (*get_rr_interval)(struct rq * , struct task_struct * ) ;
   void (*task_move_group)(struct task_struct * , int  ) ;
};
struct load_weight {
   unsigned long weight ;
   unsigned long inv_weight ;
};
struct sched_statistics {
   u64 wait_start ;
   u64 wait_max ;
   u64 wait_count ;
   u64 wait_sum ;
   u64 iowait_count ;
   u64 iowait_sum ;
   u64 sleep_start ;
   u64 sleep_max ;
   s64 sum_sleep_runtime ;
   u64 block_start ;
   u64 block_max ;
   u64 exec_max ;
   u64 slice_max ;
   u64 nr_migrations_cold ;
   u64 nr_failed_migrations_affine ;
   u64 nr_failed_migrations_running ;
   u64 nr_failed_migrations_hot ;
   u64 nr_forced_migrations ;
   u64 nr_wakeups ;
   u64 nr_wakeups_sync ;
   u64 nr_wakeups_migrate ;
   u64 nr_wakeups_local ;
   u64 nr_wakeups_remote ;
   u64 nr_wakeups_affine ;
   u64 nr_wakeups_affine_attempts ;
   u64 nr_wakeups_passive ;
   u64 nr_wakeups_idle ;
};
struct sched_entity {
   struct load_weight load ;
   struct rb_node run_node ;
   struct list_head group_node ;
   unsigned int on_rq ;
   u64 exec_start ;
   u64 sum_exec_runtime ;
   u64 vruntime ;
   u64 prev_sum_exec_runtime ;
   u64 nr_migrations ;
   struct sched_statistics statistics ;
   struct sched_entity *parent ;
   struct cfs_rq *cfs_rq ;
   struct cfs_rq *my_q ;
};
struct rt_rq;
struct sched_rt_entity {
   struct list_head run_list ;
   unsigned long timeout ;
   unsigned int time_slice ;
   int nr_cpus_allowed ;
   struct sched_rt_entity *back ;
   struct sched_rt_entity *parent ;
   struct rt_rq *rt_rq ;
   struct rt_rq *my_q ;
};
struct mem_cgroup;
struct memcg_batch_info {
   int do_batch ;
   struct mem_cgroup *memcg ;
   unsigned long nr_pages ;
   unsigned long memsw_nr_pages ;
};
struct files_struct;
struct irqaction;
struct css_set;
struct compat_robust_list_head;
struct task_struct {
   long volatile   state ;
   void *stack ;
   atomic_t usage ;
   unsigned int flags ;
   unsigned int ptrace ;
   struct llist_node wake_entry ;
   int on_cpu ;
   int on_rq ;
   int prio ;
   int static_prio ;
   int normal_prio ;
   unsigned int rt_priority ;
   struct sched_class  const  *sched_class ;
   struct sched_entity se ;
   struct sched_rt_entity rt ;
   struct hlist_head preempt_notifiers ;
   unsigned char fpu_counter ;
   unsigned int policy ;
   cpumask_t cpus_allowed ;
   struct sched_info sched_info ;
   struct list_head tasks ;
   struct plist_node pushable_tasks ;
   struct mm_struct *mm ;
   struct mm_struct *active_mm ;
   unsigned char brk_randomized : 1 ;
   int exit_state ;
   int exit_code ;
   int exit_signal ;
   int pdeath_signal ;
   unsigned int jobctl ;
   unsigned int personality ;
   unsigned char did_exec : 1 ;
   unsigned char in_execve : 1 ;
   unsigned char in_iowait : 1 ;
   unsigned char sched_reset_on_fork : 1 ;
   unsigned char sched_contributes_to_load : 1 ;
   pid_t pid ;
   pid_t tgid ;
   unsigned long stack_canary ;
   struct task_struct *real_parent ;
   struct task_struct *parent ;
   struct list_head children ;
   struct list_head sibling ;
   struct task_struct *group_leader ;
   struct list_head ptraced ;
   struct list_head ptrace_entry ;
   struct pid_link pids[3U] ;
   struct list_head thread_group ;
   struct completion *vfork_done ;
   int *set_child_tid ;
   int *clear_child_tid ;
   cputime_t utime ;
   cputime_t stime ;
   cputime_t utimescaled ;
   cputime_t stimescaled ;
   cputime_t gtime ;
   cputime_t prev_utime ;
   cputime_t prev_stime ;
   unsigned long nvcsw ;
   unsigned long nivcsw ;
   struct timespec start_time ;
   struct timespec real_start_time ;
   unsigned long min_flt ;
   unsigned long maj_flt ;
   struct task_cputime cputime_expires ;
   struct list_head cpu_timers[3U] ;
   struct cred  const  *real_cred ;
   struct cred  const  *cred ;
   struct cred *replacement_session_keyring ;
   char comm[16U] ;
   int link_count ;
   int total_link_count ;
   struct sysv_sem sysvsem ;
   unsigned long last_switch_count ;
   struct thread_struct thread ;
   struct fs_struct *fs ;
   struct files_struct *files ;
   struct nsproxy *nsproxy ;
   struct signal_struct *signal ;
   struct sighand_struct *sighand ;
   sigset_t blocked ;
   sigset_t real_blocked ;
   sigset_t saved_sigmask ;
   struct sigpending pending ;
   unsigned long sas_ss_sp ;
   size_t sas_ss_size ;
   int (*notifier)(void * ) ;
   void *notifier_data ;
   sigset_t *notifier_mask ;
   struct audit_context *audit_context ;
   uid_t loginuid ;
   unsigned int sessionid ;
   seccomp_t seccomp ;
   u32 parent_exec_id ;
   u32 self_exec_id ;
   spinlock_t alloc_lock ;
   struct irqaction *irqaction ;
   raw_spinlock_t pi_lock ;
   struct plist_head pi_waiters ;
   struct rt_mutex_waiter *pi_blocked_on ;
   struct mutex_waiter *blocked_on ;
   unsigned int irq_events ;
   unsigned long hardirq_enable_ip ;
   unsigned long hardirq_disable_ip ;
   unsigned int hardirq_enable_event ;
   unsigned int hardirq_disable_event ;
   int hardirqs_enabled ;
   int hardirq_context ;
   unsigned long softirq_disable_ip ;
   unsigned long softirq_enable_ip ;
   unsigned int softirq_disable_event ;
   unsigned int softirq_enable_event ;
   int softirqs_enabled ;
   int softirq_context ;
   u64 curr_chain_key ;
   int lockdep_depth ;
   unsigned int lockdep_recursion ;
   struct held_lock held_locks[48U] ;
   gfp_t lockdep_reclaim_gfp ;
   void *journal_info ;
   struct bio_list *bio_list ;
   struct blk_plug *plug ;
   struct reclaim_state *reclaim_state ;
   struct backing_dev_info *backing_dev_info ;
   struct io_context *io_context ;
   unsigned long ptrace_message ;
   siginfo_t *last_siginfo ;
   struct task_io_accounting ioac ;
   u64 acct_rss_mem1 ;
   u64 acct_vm_mem1 ;
   cputime_t acct_timexpd ;
   nodemask_t mems_allowed ;
   int mems_allowed_change_disable ;
   int cpuset_mem_spread_rotor ;
   int cpuset_slab_spread_rotor ;
   struct css_set *cgroups ;
   struct list_head cg_list ;
   struct robust_list_head *robust_list ;
   struct compat_robust_list_head *compat_robust_list ;
   struct list_head pi_state_list ;
   struct futex_pi_state *pi_state_cache ;
   struct perf_event_context *perf_event_ctxp[2U] ;
   struct mutex perf_event_mutex ;
   struct list_head perf_event_list ;
   struct mempolicy *mempolicy ;
   short il_next ;
   short pref_node_fork ;
   struct rcu_head rcu ;
   struct pipe_inode_info *splice_pipe ;
   struct task_delay_info *delays ;
   int make_it_fail ;
   int nr_dirtied ;
   int nr_dirtied_pause ;
   int latency_record_count ;
   struct latency_record latency_record[32U] ;
   unsigned long timer_slack_ns ;
   unsigned long default_timer_slack_ns ;
   struct list_head *scm_work_list ;
   unsigned long trace ;
   unsigned long trace_recursion ;
   struct memcg_batch_info memcg_batch ;
   atomic_t ptrace_bp_refcnt ;
};
enum irqreturn {
    IRQ_NONE = 0,
    IRQ_HANDLED = 1,
    IRQ_WAKE_THREAD = 2
} ;
typedef enum irqreturn irqreturn_t;
struct proc_dir_entry;
struct exception_table_entry {
   unsigned long insn ;
   unsigned long fixup ;
};
struct irqaction {
   irqreturn_t (*handler)(int  , void * ) ;
   unsigned long flags ;
   void *dev_id ;
   void *percpu_dev_id ;
   struct irqaction *next ;
   int irq ;
   irqreturn_t (*thread_fn)(int  , void * ) ;
   struct task_struct *thread ;
   unsigned long thread_flags ;
   unsigned long thread_mask ;
   char const   *name ;
   struct proc_dir_entry *dir ;
};
struct klist_node;
struct klist_node {
   void *n_klist ;
   struct list_head n_node ;
   struct kref n_ref ;
};
struct dma_map_ops;
struct dev_archdata {
   void *acpi_handle ;
   struct dma_map_ops *dma_ops ;
   void *iommu ;
};
struct device_private;
struct device_driver;
struct driver_private;
struct class;
struct subsys_private;
struct bus_type;
struct device_node;
struct iommu_ops;
struct bus_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct bus_type * , char * ) ;
   ssize_t (*store)(struct bus_type * , char const   * , size_t  ) ;
};
struct device_attribute;
struct driver_attribute;
struct bus_type {
   char const   *name ;
   struct bus_attribute *bus_attrs ;
   struct device_attribute *dev_attrs ;
   struct driver_attribute *drv_attrs ;
   int (*match)(struct device * , struct device_driver * ) ;
   int (*uevent)(struct device * , struct kobj_uevent_env * ) ;
   int (*probe)(struct device * ) ;
   int (*remove)(struct device * ) ;
   void (*shutdown)(struct device * ) ;
   int (*suspend)(struct device * , pm_message_t  ) ;
   int (*resume)(struct device * ) ;
   struct dev_pm_ops  const  *pm ;
   struct iommu_ops *iommu_ops ;
   struct subsys_private *p ;
};
struct of_device_id;
struct device_driver {
   char const   *name ;
   struct bus_type *bus ;
   struct module *owner ;
   char const   *mod_name ;
   bool suppress_bind_attrs ;
   struct of_device_id  const  *of_match_table ;
   int (*probe)(struct device * ) ;
   int (*remove)(struct device * ) ;
   void (*shutdown)(struct device * ) ;
   int (*suspend)(struct device * , pm_message_t  ) ;
   int (*resume)(struct device * ) ;
   struct attribute_group  const  **groups ;
   struct dev_pm_ops  const  *pm ;
   struct driver_private *p ;
};
struct driver_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct device_driver * , char * ) ;
   ssize_t (*store)(struct device_driver * , char const   * , size_t  ) ;
};
struct class_attribute;
struct class {
   char const   *name ;
   struct module *owner ;
   struct class_attribute *class_attrs ;
   struct device_attribute *dev_attrs ;
   struct bin_attribute *dev_bin_attrs ;
   struct kobject *dev_kobj ;
   int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ;
   char *(*devnode)(struct device * , mode_t * ) ;
   void (*class_release)(struct class * ) ;
   void (*dev_release)(struct device * ) ;
   int (*suspend)(struct device * , pm_message_t  ) ;
   int (*resume)(struct device * ) ;
   struct kobj_ns_type_operations  const  *ns_type ;
   void const   *(*namespace)(struct device * ) ;
   struct dev_pm_ops  const  *pm ;
   struct subsys_private *p ;
};
struct device_type;
struct class_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct class * , struct class_attribute * , char * ) ;
   ssize_t (*store)(struct class * , struct class_attribute * , char const   * , size_t  ) ;
   void const   *(*namespace)(struct class * , struct class_attribute  const  * ) ;
};
struct device_type {
   char const   *name ;
   struct attribute_group  const  **groups ;
   int (*uevent)(struct device * , struct kobj_uevent_env * ) ;
   char *(*devnode)(struct device * , mode_t * ) ;
   void (*release)(struct device * ) ;
   struct dev_pm_ops  const  *pm ;
};
struct device_attribute {
   struct attribute attr ;
   ssize_t (*show)(struct device * , struct device_attribute * , char * ) ;
   ssize_t (*store)(struct device * , struct device_attribute * , char const   * ,
                    size_t  ) ;
};
struct device_dma_parameters {
   unsigned int max_segment_size ;
   unsigned long segment_boundary_mask ;
};
struct dma_coherent_mem;
struct device {
   struct device *parent ;
   struct device_private *p ;
   struct kobject kobj ;
   char const   *init_name ;
   struct device_type  const  *type ;
   struct mutex mutex ;
   struct bus_type *bus ;
   struct device_driver *driver ;
   void *platform_data ;
   struct dev_pm_info power ;
   struct dev_pm_domain *pm_domain ;
   int numa_node ;
   u64 *dma_mask ;
   u64 coherent_dma_mask ;
   struct device_dma_parameters *dma_parms ;
   struct list_head dma_pools ;
   struct dma_coherent_mem *dma_mem ;
   struct dev_archdata archdata ;
   struct device_node *of_node ;
   dev_t devt ;
   spinlock_t devres_lock ;
   struct list_head devres_head ;
   struct klist_node knode_class ;
   struct class *class ;
   struct attribute_group  const  **groups ;
   void (*release)(struct device * ) ;
};
struct wakeup_source {
   char *name ;
   struct list_head entry ;
   spinlock_t lock ;
   struct timer_list timer ;
   unsigned long timer_expires ;
   ktime_t total_time ;
   ktime_t max_time ;
   ktime_t last_time ;
   unsigned long event_count ;
   unsigned long active_count ;
   unsigned long relax_count ;
   unsigned long hit_count ;
   unsigned char active : 1 ;
};
struct otp_info {
   __u32 start ;
   __u32 length ;
   __u32 locked ;
};
struct nand_oobfree {
   __u32 offset ;
   __u32 length ;
};
struct mtd_ecc_stats {
   __u32 corrected ;
   __u32 failed ;
   __u32 badblocks ;
   __u32 bbtblocks ;
};
struct mtd_info;
struct erase_info {
   struct mtd_info *mtd ;
   uint64_t addr ;
   uint64_t len ;
   uint64_t fail_addr ;
   u_long time ;
   u_long retries ;
   unsigned int dev ;
   unsigned int cell ;
   void (*callback)(struct erase_info * ) ;
   u_long priv ;
   u_char state ;
   struct erase_info *next ;
};
struct mtd_erase_region_info {
   uint64_t offset ;
   uint32_t erasesize ;
   uint32_t numblocks ;
   unsigned long *lockmap ;
};
struct mtd_oob_ops {
   unsigned int mode ;
   size_t len ;
   size_t retlen ;
   size_t ooblen ;
   size_t oobretlen ;
   uint32_t ooboffs ;
   uint8_t *datbuf ;
   uint8_t *oobbuf ;
};
struct nand_ecclayout {
   __u32 eccbytes ;
   __u32 eccpos[448U] ;
   __u32 oobavail ;
   struct nand_oobfree oobfree[32U] ;
};
struct mtd_info {
   u_char type ;
   uint32_t flags ;
   uint64_t size ;
   uint32_t erasesize ;
   uint32_t writesize ;
   uint32_t writebufsize ;
   uint32_t oobsize ;
   uint32_t oobavail ;
   unsigned int erasesize_shift ;
   unsigned int writesize_shift ;
   unsigned int erasesize_mask ;
   unsigned int writesize_mask ;
   char const   *name ;
   int index ;
   struct nand_ecclayout *ecclayout ;
   int numeraseregions ;
   struct mtd_erase_region_info *eraseregions ;
   int (*erase)(struct mtd_info * , struct erase_info * ) ;
   int (*point)(struct mtd_info * , loff_t  , size_t  , size_t * , void ** , resource_size_t * ) ;
   void (*unpoint)(struct mtd_info * , loff_t  , size_t  ) ;
   unsigned long (*get_unmapped_area)(struct mtd_info * , unsigned long  , unsigned long  ,
                                      unsigned long  ) ;
   int (*read)(struct mtd_info * , loff_t  , size_t  , size_t * , u_char * ) ;
   int (*write)(struct mtd_info * , loff_t  , size_t  , size_t * , u_char const   * ) ;
   int (*panic_write)(struct mtd_info * , loff_t  , size_t  , size_t * , u_char const   * ) ;
   int (*read_oob)(struct mtd_info * , loff_t  , struct mtd_oob_ops * ) ;
   int (*write_oob)(struct mtd_info * , loff_t  , struct mtd_oob_ops * ) ;
   int (*get_fact_prot_info)(struct mtd_info * , struct otp_info * , size_t  ) ;
   int (*read_fact_prot_reg)(struct mtd_info * , loff_t  , size_t  , size_t * , u_char * ) ;
   int (*get_user_prot_info)(struct mtd_info * , struct otp_info * , size_t  ) ;
   int (*read_user_prot_reg)(struct mtd_info * , loff_t  , size_t  , size_t * , u_char * ) ;
   int (*write_user_prot_reg)(struct mtd_info * , loff_t  , size_t  , size_t * , u_char * ) ;
   int (*lock_user_prot_reg)(struct mtd_info * , loff_t  , size_t  ) ;
   int (*writev)(struct mtd_info * , struct kvec  const  * , unsigned long  , loff_t  ,
                 size_t * ) ;
   void (*sync)(struct mtd_info * ) ;
   int (*lock)(struct mtd_info * , loff_t  , uint64_t  ) ;
   int (*unlock)(struct mtd_info * , loff_t  , uint64_t  ) ;
   int (*is_locked)(struct mtd_info * , loff_t  , uint64_t  ) ;
   int (*block_isbad)(struct mtd_info * , loff_t  ) ;
   int (*suspend)(struct mtd_info * ) ;
   void (*resume)(struct mtd_info * ) ;
   struct backing_dev_info *backing_dev_info ;
   int (*block_markbad)(struct mtd_info * , loff_t  ) ;
   struct notifier_block reboot_notifier ;
   struct mtd_ecc_stats ecc_stats ;
   int subpage_sft ;
   void *priv ;
   struct module *owner ;
   struct device dev ;
   int usecount ;
   int (*get_device)(struct mtd_info * ) ;
   void (*put_device)(struct mtd_info * ) ;
};
struct mtd_notifier {
   void (*add)(struct mtd_info * ) ;
   void (*remove)(struct mtd_info * ) ;
   struct list_head list ;
};
enum kmsg_dump_reason {
    KMSG_DUMP_OOPS = 0,
    KMSG_DUMP_PANIC = 1,
    KMSG_DUMP_KEXEC = 2,
    KMSG_DUMP_RESTART = 3,
    KMSG_DUMP_HALT = 4,
    KMSG_DUMP_POWEROFF = 5,
    KMSG_DUMP_EMERG = 6
} ;
struct kmsg_dumper {
   void (*dump)(struct kmsg_dumper * , enum kmsg_dump_reason  , char const   * , unsigned long  ,
                char const   * , unsigned long  ) ;
   struct list_head list ;
   int registered ;
};
struct mtdoops_context {
   struct kmsg_dumper dump ;
   int mtd_index ;
   struct work_struct work_erase ;
   struct work_struct work_write ;
   struct mtd_info *mtd ;
   int oops_pages ;
   int nextpage ;
   int nextcount ;
   unsigned long *oops_page_used ;
   void *oops_buf ;
};
void *memcpy(void * , void const   * , unsigned long  ) ;
__inline static void set_bit(unsigned int nr , unsigned long volatile   *addr ) 
{ 


  {
  __asm__  volatile   (".section .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.previous\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile   *)addr)): "Ir" (nr): "memory");
  return;
}
}
__inline static void clear_bit(int nr , unsigned long volatile   *addr ) 
{ 


  {
  __asm__  volatile   (".section .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.previous\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile   *)addr)): "Ir" (nr));
  return;
}
}
__inline static int variable_test_bit(int nr , unsigned long const volatile   *addr ) 
{ 
  int oldbit ;

  {
  __asm__  volatile   ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)),
                       "Ir" (nr));
  return (oldbit);
}
}
extern int printk(char const   *  , ...) ;
extern unsigned long simple_strtoul(char const   * , char ** , unsigned int  ) ;
extern void __bad_percpu_size(void) ;
extern struct task_struct *current_task ;
__inline static struct task_struct *get_current(void) 
{ 
  struct task_struct *pfo_ret__ ;

  {
  switch (8UL) {
  case 1UL: 
  __asm__  ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& current_task));
  goto ldv_2778;
  case 2UL: 
  __asm__  ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task));
  goto ldv_2778;
  case 4UL: 
  __asm__  ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task));
  goto ldv_2778;
  case 8UL: 
  __asm__  ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task));
  goto ldv_2778;
  default: 
  __bad_percpu_size();
  }
  ldv_2778: ;
  return (pfo_ret__);
}
}
extern void __xchg_wrong_size(void) ;
extern void *memset(void * , int  , size_t  ) ;
extern size_t strlen(char const   * ) ;
extern int strcmp(char const   * , char const   * ) ;
__inline static u64 div_u64_rem(u64 dividend , u32 divisor , u32 *remainder ) 
{ 


  {
  *remainder = (u32 )(dividend % (u64 )divisor);
  return (dividend / (u64 )divisor);
}
}
__inline static u64 div_u64(u64 dividend , u32 divisor ) 
{ 
  u32 remainder ;
  u64 tmp ;

  {
  tmp = div_u64_rem(dividend, divisor, & remainder);
  return (tmp);
}
}
__inline static void INIT_LIST_HEAD(struct list_head *list ) 
{ 


  {
  list->next = list;
  list->prev = list;
  return;
}
}
extern void lockdep_init_map(struct lockdep_map * , char const   * , struct lock_class_key * ,
                             int  ) ;
extern void __ldv_spin_lock(spinlock_t * ) ;
void ldv___ldv_spin_lock_4(spinlock_t *ldv_func_arg1 ) ;
void ldv___ldv_spin_lock_6(spinlock_t *ldv_func_arg1 ) ;
void ldv_spin_lock_node_size_lock_of_pglist_data(void) ;
void ldv_spin_lock_siglock_of_sighand_struct(void) ;
extern int default_wake_function(wait_queue_t * , unsigned int  , int  , void * ) ;
extern void __init_waitqueue_head(wait_queue_head_t * , struct lock_class_key * ) ;
extern void add_wait_queue(wait_queue_head_t * , wait_queue_t * ) ;
extern void remove_wait_queue(wait_queue_head_t * , wait_queue_t * ) ;
extern void __wake_up(wait_queue_head_t * , unsigned int  , int  , void * ) ;
extern void __init_work(struct work_struct * , int  ) ;
extern int schedule_work(struct work_struct * ) ;
extern bool flush_work_sync(struct work_struct * ) ;
extern void *vmalloc(unsigned long  ) ;
extern void vfree(void const   * ) ;
extern void schedule(void) ;
__inline static int mtd_erase(struct mtd_info *mtd , struct erase_info *instr ) 
{ 
  int tmp ;

  {
  tmp = (*(mtd->erase))(mtd, instr);
  return (tmp);
}
}
__inline static int mtd_read(struct mtd_info *mtd , loff_t from , size_t len , size_t *retlen ,
                             u_char *buf ) 
{ 
  int tmp ;

  {
  tmp = (*(mtd->read))(mtd, from, len, retlen, buf);
  return (tmp);
}
}
__inline static int mtd_write(struct mtd_info *mtd , loff_t to , size_t len , size_t *retlen ,
                              u_char const   *buf ) 
{ 
  int tmp ;

  {
  tmp = (*(mtd->write))(mtd, to, len, retlen, buf);
  return (tmp);
}
}
__inline static int mtd_panic_write(struct mtd_info *mtd , loff_t to , size_t len ,
                                    size_t *retlen , u_char const   *buf ) 
{ 
  int tmp ;

  {
  tmp = (*(mtd->panic_write))(mtd, to, len, retlen, buf);
  return (tmp);
}
}
__inline static int mtd_block_isbad(struct mtd_info *mtd , loff_t ofs ) 
{ 
  int tmp ;

  {
  tmp = (*(mtd->block_isbad))(mtd, ofs);
  return (tmp);
}
}
__inline static uint32_t mtd_div_by_eb(uint64_t sz , struct mtd_info *mtd ) 
{ 
  uint32_t __base ;
  uint32_t __rem ;

  {
  if (mtd->erasesize_shift != 0U) {
    return ((uint32_t )(sz >> (int )mtd->erasesize_shift));
  } else {

  }
  __base = mtd->erasesize;
  __rem = (uint32_t )(sz % (uint64_t )__base);
  sz = sz / (uint64_t )__base;
  return ((uint32_t )sz);
}
}
extern void register_mtd_user(struct mtd_notifier * ) ;
extern int unregister_mtd_user(struct mtd_notifier * ) ;
__inline static int mtd_is_bitflip(int err ) 
{ 


  {
  return (err == -117);
}
}
extern int kmsg_dump_register(struct kmsg_dumper * ) ;
extern int kmsg_dump_unregister(struct kmsg_dumper * ) ;
static unsigned long record_size  =    4096UL;
static char mtddev[80U]  ;
static int dump_oops  =    1;
static struct mtdoops_context oops_cxt  ;
static void mark_page_used(struct mtdoops_context *cxt , int page ) 
{ 


  {
  set_bit((unsigned int )page, (unsigned long volatile   *)cxt->oops_page_used);
  return;
}
}
static void mark_page_unused(struct mtdoops_context *cxt , int page ) 
{ 


  {
  clear_bit(page, (unsigned long volatile   *)cxt->oops_page_used);
  return;
}
}
static int page_is_used(struct mtdoops_context *cxt , int page ) 
{ 
  int tmp ;

  {
  tmp = variable_test_bit(page, (unsigned long const volatile   *)cxt->oops_page_used);
  return (tmp);
}
}
static void mtdoops_erase_callback(struct erase_info *done ) 
{ 
  wait_queue_head_t *wait_q ;

  {
  wait_q = (wait_queue_head_t *)done->priv;
  __wake_up(wait_q, 3U, 1, 0);
  return;
}
}
static int mtdoops_erase_block(struct mtdoops_context *cxt , int offset ) 
{ 
  struct mtd_info *mtd ;
  u32 start_page_offset ;
  uint32_t tmp ;
  u32 start_page ;
  u32 erase_pages ;
  struct erase_info erase ;
  wait_queue_t wait ;
  struct task_struct *tmp___0 ;
  wait_queue_head_t wait_q ;
  int ret ;
  int page ;
  struct lock_class_key __key ;
  long volatile   __x ;
  u8 volatile   *__ptr ;
  struct task_struct *tmp___1 ;
  u16 volatile   *__ptr___0 ;
  struct task_struct *tmp___2 ;
  u32 volatile   *__ptr___1 ;
  struct task_struct *tmp___3 ;
  u64 volatile   *__ptr___2 ;
  struct task_struct *tmp___4 ;
  long volatile   __x___0 ;
  u8 volatile   *__ptr___3 ;
  struct task_struct *tmp___5 ;
  u16 volatile   *__ptr___4 ;
  struct task_struct *tmp___6 ;
  u32 volatile   *__ptr___5 ;
  struct task_struct *tmp___7 ;
  u64 volatile   *__ptr___6 ;
  struct task_struct *tmp___8 ;

  {
  mtd = cxt->mtd;
  tmp = mtd_div_by_eb((uint64_t )offset, mtd);
  start_page_offset = tmp * mtd->erasesize;
  start_page = (u32 )((unsigned long )start_page_offset / record_size);
  erase_pages = (u32 )((unsigned long )mtd->erasesize / record_size);
  tmp___0 = get_current();
  wait.flags = 0U;
  wait.private = (void *)tmp___0;
  wait.func = & default_wake_function;
  wait.task_list.next = 0;
  wait.task_list.prev = 0;
  __init_waitqueue_head(& wait_q, & __key);
  erase.mtd = mtd;
  erase.callback = & mtdoops_erase_callback;
  erase.addr = (uint64_t )offset;
  erase.len = (uint64_t )mtd->erasesize;
  erase.priv = (unsigned long )(& wait_q);
  __x = 1L;
  switch (8UL) {
  case 1UL: 
  tmp___1 = get_current();
  __ptr = (u8 volatile   *)(& tmp___1->state);
  __asm__  volatile   ("xchgb %0,%1": "=q" (__x), "+m" (*__ptr): "0" (__x): "memory");
  goto ldv_20340;
  case 2UL: 
  tmp___2 = get_current();
  __ptr___0 = (u16 volatile   *)(& tmp___2->state);
  __asm__  volatile   ("xchgw %0,%1": "=r" (__x), "+m" (*__ptr___0): "0" (__x): "memory");
  goto ldv_20340;
  case 4UL: 
  tmp___3 = get_current();
  __ptr___1 = (u32 volatile   *)(& tmp___3->state);
  __asm__  volatile   ("xchgl %0,%1": "=r" (__x), "+m" (*__ptr___1): "0" (__x): "memory");
  goto ldv_20340;
  case 8UL: 
  tmp___4 = get_current();
  __ptr___2 = (u64 volatile   *)(& tmp___4->state);
  __asm__  volatile   ("xchgq %0,%1": "=r" (__x), "+m" (*__ptr___2): "0" (__x): "memory");
  goto ldv_20340;
  default: 
  __xchg_wrong_size();
  }
  ldv_20340: 
  add_wait_queue(& wait_q, & wait);
  ret = mtd_erase(mtd, & erase);
  if (ret != 0) {
    __x___0 = 0L;
    switch (8UL) {
    case 1UL: 
    tmp___5 = get_current();
    __ptr___3 = (u8 volatile   *)(& tmp___5->state);
    __asm__  volatile   ("xchgb %0,%1": "=q" (__x___0), "+m" (*__ptr___3): "0" (__x___0): "memory");
    goto ldv_20352;
    case 2UL: 
    tmp___6 = get_current();
    __ptr___4 = (u16 volatile   *)(& tmp___6->state);
    __asm__  volatile   ("xchgw %0,%1": "=r" (__x___0), "+m" (*__ptr___4): "0" (__x___0): "memory");
    goto ldv_20352;
    case 4UL: 
    tmp___7 = get_current();
    __ptr___5 = (u32 volatile   *)(& tmp___7->state);
    __asm__  volatile   ("xchgl %0,%1": "=r" (__x___0), "+m" (*__ptr___5): "0" (__x___0): "memory");
    goto ldv_20352;
    case 8UL: 
    tmp___8 = get_current();
    __ptr___6 = (u64 volatile   *)(& tmp___8->state);
    __asm__  volatile   ("xchgq %0,%1": "=r" (__x___0), "+m" (*__ptr___6): "0" (__x___0): "memory");
    goto ldv_20352;
    default: 
    __xchg_wrong_size();
    }
    ldv_20352: 
    remove_wait_queue(& wait_q, & wait);
    printk("<4>mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n", erase.addr,
           erase.len, (char *)(& mtddev));
    return (ret);
  } else {

  }
  schedule();
  remove_wait_queue(& wait_q, & wait);
  page = (int )start_page;
  goto ldv_20362;
  ldv_20361: 
  mark_page_unused(cxt, page);
  page = page + 1;
  ldv_20362: ;
  if ((u32 )page < start_page + erase_pages) {
    goto ldv_20361;
  } else {

  }

  return (0);
}
}
static void mtdoops_inc_counter(struct mtdoops_context *cxt ) 
{ 
  int tmp ;

  {
  cxt->nextpage = cxt->nextpage + 1;
  if (cxt->nextpage >= cxt->oops_pages) {
    cxt->nextpage = 0;
  } else {

  }
  cxt->nextcount = cxt->nextcount + 1;
  if (cxt->nextcount == -1) {
    cxt->nextcount = 0;
  } else {

  }
  tmp = page_is_used(cxt, cxt->nextpage);
  if (tmp != 0) {
    schedule_work(& cxt->work_erase);
    return;
  } else {

  }
  printk("<7>mtdoops: ready %d, %d (no erase)\n", cxt->nextpage, cxt->nextcount);
  return;
}
}
static void mtdoops_workfunc_erase(struct work_struct *work ) 
{ 
  struct mtdoops_context *cxt ;
  struct work_struct  const  *__mptr ;
  struct mtd_info *mtd ;
  int i ;
  int j ;
  int ret ;
  int mod ;

  {
  __mptr = (struct work_struct  const  *)work;
  cxt = (struct mtdoops_context *)__mptr + 0xffffffffffffffd8UL;
  mtd = cxt->mtd;
  i = 0;
  if ((unsigned long )mtd == (unsigned long )((struct mtd_info *)0)) {
    return;
  } else {

  }
  mod = (int )(((unsigned long )cxt->nextpage * record_size) % (unsigned long )mtd->erasesize);
  if (mod != 0) {
    cxt->nextpage = (int )((unsigned int )cxt->nextpage + (unsigned int )((unsigned long )(mtd->erasesize - (uint32_t )mod) / record_size));
    if (cxt->nextpage >= cxt->oops_pages) {
      cxt->nextpage = 0;
    } else {

    }
  } else {

  }
  goto ldv_20381;
  ldv_20380: 
  ret = mtd_block_isbad(mtd, (loff_t )((unsigned long )cxt->nextpage * record_size));
  if (ret == 0) {
    goto ldv_20378;
  } else {

  }
  if (ret < 0) {
    printk("<3>mtdoops: block_isbad failed, aborting\n");
    return;
  } else {

  }
  badblock: 
  printk("<4>mtdoops: bad block at %08lx\n", (unsigned long )cxt->nextpage * record_size);
  i = i + 1;
  cxt->nextpage = (int )((unsigned int )cxt->nextpage + (unsigned int )((unsigned long )mtd->erasesize / record_size));
  if (cxt->nextpage >= cxt->oops_pages) {
    cxt->nextpage = 0;
  } else {

  }
  if ((unsigned long )i == (unsigned long )cxt->oops_pages / ((unsigned long )mtd->erasesize / record_size)) {
    printk("<3>mtdoops: all blocks bad!\n");
    return;
  } else {

  }
  ldv_20381: ;
  if ((unsigned long )mtd->block_isbad != (unsigned long )((int (*)(struct mtd_info * ,
                                                                    loff_t  ))0)) {
    goto ldv_20380;
  } else {

  }
  ldv_20378: 
  j = 0;
  ret = -1;
  goto ldv_20383;
  ldv_20382: 
  ret = mtdoops_erase_block(cxt, (int )((unsigned int )((unsigned long )cxt->nextpage) * (unsigned int )record_size));
  j = j + 1;
  ldv_20383: ;
  if (j <= 2 && ret < 0) {
    goto ldv_20382;
  } else {

  }

  if (ret >= 0) {
    printk("<7>mtdoops: ready %d, %d\n", cxt->nextpage, cxt->nextcount);
    return;
  } else {

  }
  if ((unsigned long )mtd->block_markbad != (unsigned long )((int (*)(struct mtd_info * ,
                                                                      loff_t  ))0) && ret == -5) {
    ret = (*(mtd->block_markbad))(mtd, (loff_t )((unsigned long )cxt->nextpage * record_size));
    if (ret < 0) {
      printk("<3>mtdoops: block_markbad failed, aborting\n");
      return;
    } else {

    }
  } else {

  }
  goto badblock;
}
}
static void mtdoops_write(struct mtdoops_context *cxt , int panic___0 ) 
{ 
  struct mtd_info *mtd ;
  size_t retlen ;
  u32 *hdr ;
  int ret ;

  {
  mtd = cxt->mtd;
  hdr = (u32 *)cxt->oops_buf;
  *hdr = (u32 )cxt->nextcount;
  *(hdr + 1UL) = 1560304896U;
  if (panic___0 != 0) {
    ret = mtd_panic_write(mtd, (loff_t )((unsigned long )cxt->nextpage * record_size),
                          record_size, & retlen, (u_char const   *)cxt->oops_buf);
  } else {
    ret = mtd_write(mtd, (loff_t )((unsigned long )cxt->nextpage * record_size), record_size,
                    & retlen, (u_char const   *)cxt->oops_buf);
  }
  if (retlen != record_size || ret < 0) {
    printk("<3>mtdoops: write failure at %ld (%td of %ld written), error %d\n", (unsigned long )cxt->nextpage * record_size,
           retlen, record_size, ret);
  } else {

  }
  mark_page_used(cxt, cxt->nextpage);
  memset(cxt->oops_buf, 255, record_size);
  mtdoops_inc_counter(cxt);
  return;
}
}
static void mtdoops_workfunc_write(struct work_struct *work ) 
{ 
  struct mtdoops_context *cxt ;
  struct work_struct  const  *__mptr ;

  {
  __mptr = (struct work_struct  const  *)work;
  cxt = (struct mtdoops_context *)__mptr + 0xffffffffffffff88UL;
  mtdoops_write(cxt, 0);
  return;
}
}
static void find_next_position(struct mtdoops_context *cxt ) 
{ 
  struct mtd_info *mtd ;
  int ret ;
  int page ;
  int maxpos ;
  u32 count[2U] ;
  u32 maxcount ;
  size_t retlen ;
  int tmp ;
  int tmp___0 ;

  {
  mtd = cxt->mtd;
  maxpos = 0;
  maxcount = 4294967295U;
  page = 0;
  goto ldv_20411;
  ldv_20410: ;
  if ((unsigned long )mtd->block_isbad != (unsigned long )((int (*)(struct mtd_info * ,
                                                                    loff_t  ))0)) {
    tmp = mtd_block_isbad(mtd, (loff_t )((unsigned long )page * record_size));
    if (tmp != 0) {
      goto ldv_20409;
    } else {

    }
  } else {

  }
  mark_page_used(cxt, page);
  ret = mtd_read(mtd, (loff_t )((unsigned long )page * record_size), 8UL, & retlen,
                 (u_char *)(& count));
  if (retlen != 8UL) {
    printk("<3>mtdoops: read failure at %ld (%td of %d read), err %d\n", (unsigned long )page * record_size,
           retlen, 8, ret);
    goto ldv_20409;
  } else
  if (ret < 0) {
    tmp___0 = mtd_is_bitflip(ret);
    if (tmp___0 == 0) {
      printk("<3>mtdoops: read failure at %ld (%td of %d read), err %d\n", (unsigned long )page * record_size,
             retlen, 8, ret);
      goto ldv_20409;
    } else {

    }
  } else {

  }
  if (count[0] == 4294967295U && count[1] == 4294967295U) {
    mark_page_unused(cxt, page);
  } else {

  }
  if (count[0] == 4294967295U) {
    goto ldv_20409;
  } else {

  }
  if (maxcount == 4294967295U) {
    maxcount = count[0];
    maxpos = page;
  } else
  if (count[0] <= 1073741823U && maxcount > 3221225472U) {
    maxcount = count[0];
    maxpos = page;
  } else
  if (count[0] > maxcount && count[0] <= 3221225471U) {
    maxcount = count[0];
    maxpos = page;
  } else
  if ((count[0] > maxcount && count[0] > 3221225472U) && maxcount > 2147483648U) {
    maxcount = count[0];
    maxpos = page;
  } else {

  }
  ldv_20409: 
  page = page + 1;
  ldv_20411: ;
  if (cxt->oops_pages > page) {
    goto ldv_20410;
  } else {

  }

  if (maxcount == 4294967295U) {
    cxt->nextpage = 0;
    cxt->nextcount = 1;
    schedule_work(& cxt->work_erase);
    return;
  } else {

  }
  cxt->nextpage = maxpos;
  cxt->nextcount = (int )maxcount;
  mtdoops_inc_counter(cxt);
  return;
}
}
static void mtdoops_do_dump(struct kmsg_dumper *dumper , enum kmsg_dump_reason reason ,
                            char const   *s1 , unsigned long l1 , char const   *s2 ,
                            unsigned long l2 ) 
{ 
  struct mtdoops_context *cxt ;
  struct kmsg_dumper  const  *__mptr ;
  unsigned long s1_start ;
  unsigned long s2_start ;
  unsigned long l1_cpy ;
  unsigned long l2_cpy ;
  char *dst ;
  unsigned long _min1 ;
  unsigned long _min2 ;
  unsigned long _min1___0 ;
  unsigned long _min2___0 ;
  size_t __len ;
  void *__ret ;
  size_t __len___0 ;
  void *__ret___0 ;

  {
  __mptr = (struct kmsg_dumper  const  *)dumper;
  cxt = (struct mtdoops_context *)__mptr;
  if (((unsigned int )reason != 0U && (unsigned int )reason != 1U) && (unsigned int )reason != 2U) {
    return;
  } else {

  }
  if ((unsigned int )reason == 0U && dump_oops == 0) {
    return;
  } else {

  }
  dst = (char *)cxt->oops_buf + 8U;
  _min1 = l2;
  _min2 = record_size - 8UL;
  l2_cpy = _min1 < _min2 ? _min1 : _min2;
  _min1___0 = l1;
  _min2___0 = (record_size - l2_cpy) - 8UL;
  l1_cpy = _min1___0 < _min2___0 ? _min1___0 : _min2___0;
  s2_start = l2 - l2_cpy;
  s1_start = l1 - l1_cpy;
  __len = l1_cpy;
  __ret = memcpy((void *)dst, (void const   *)(s1 + s1_start), __len);
  __len___0 = l2_cpy;
  __ret___0 = memcpy((void *)(dst + l1_cpy), (void const   *)(s2 + s2_start),
                               __len___0);
  if ((unsigned int )reason != 0U) {
    if ((unsigned long )(cxt->mtd)->panic_write == (unsigned long )((int (*)(struct mtd_info * ,
                                                                             loff_t  ,
                                                                             size_t  ,
                                                                             size_t * ,
                                                                             u_char const   * ))0)) {
      printk("<3>mtdoops: Cannot write from panic without panic_write\n");
    } else {
      mtdoops_write(cxt, 1);
    }
    return;
  } else {

  }
  schedule_work(& cxt->work_write);
  return;
}
}
static void mtdoops_notify_add(struct mtd_info *mtd ) 
{ 
  struct mtdoops_context *cxt ;
  u64 mtdoops_pages ;
  u64 tmp ;
  int err ;
  int tmp___0 ;
  void *tmp___1 ;

  {
  cxt = & oops_cxt;
  tmp = div_u64(mtd->size, (u32 )record_size);
  mtdoops_pages = tmp;
  tmp___0 = strcmp(mtd->name, (char const   *)(& mtddev));
  if (tmp___0 == 0) {
    cxt->mtd_index = mtd->index;
  } else {

  }
  if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) {
    return;
  } else {

  }
  if (mtd->size < (uint64_t )(mtd->erasesize * 2U)) {
    printk("<3>mtdoops: MTD partition %d not big enough for mtdoops\n", mtd->index);
    return;
  } else {

  }
  if ((unsigned long )mtd->erasesize < record_size) {
    printk("<3>mtdoops: eraseblock size of MTD partition %d too small\n", mtd->index);
    return;
  } else {

  }
  if (mtd->size > 8388608ULL) {
    printk("<3>mtdoops: mtd%d is too large (limit is %d MiB)\n", mtd->index, 8);
    return;
  } else {

  }
  tmp___1 = vmalloc((unsigned long )(((mtdoops_pages + 63ULL) / 64ULL) * 8ULL));
  cxt->oops_page_used = (unsigned long *)tmp___1;
  if ((unsigned long )cxt->oops_page_used == (unsigned long )((unsigned long *)0)) {
    printk("<3>mtdoops: could not allocate page array\n");
    return;
  } else {

  }
  cxt->dump.dump = & mtdoops_do_dump;
  err = kmsg_dump_register(& cxt->dump);
  if (err != 0) {
    printk("<3>mtdoops: registering kmsg dumper failed, error %d\n", err);
    vfree((void const   *)cxt->oops_page_used);
    cxt->oops_page_used = 0;
    return;
  } else {

  }
  cxt->mtd = mtd;
  cxt->oops_pages = (int )((unsigned long )((int )mtd->size) / record_size);
  find_next_position(cxt);
  printk("<6>mtdoops: Attached to MTD device %d\n", mtd->index);
  return;
}
}
static void mtdoops_notify_remove(struct mtd_info *mtd ) 
{ 
  struct mtdoops_context *cxt ;
  int tmp ;

  {
  cxt = & oops_cxt;
  if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0) {
    return;
  } else {

  }
  tmp = kmsg_dump_unregister(& cxt->dump);
  if (tmp < 0) {
    printk("<4>mtdoops: could not unregister kmsg_dumper\n");
  } else {

  }
  cxt->mtd = 0;
  flush_work_sync(& cxt->work_erase);
  flush_work_sync(& cxt->work_write);
  return;
}
}
static struct mtd_notifier mtdoops_notifier  =    {& mtdoops_notify_add, & mtdoops_notify_remove, {0, 0}};
static int mtdoops_init(void) 
{ 
  struct mtdoops_context *cxt ;
  int mtd_index ;
  char *endp ;
  size_t tmp ;
  unsigned long tmp___0 ;
  struct lock_class_key __key ;
  atomic_long_t __constr_expr_0 ;
  struct lock_class_key __key___0 ;
  atomic_long_t __constr_expr_1 ;

  {
  cxt = & oops_cxt;
  tmp = strlen((char const   *)(& mtddev));
  if (tmp == 0UL) {
    printk("<3>mtdoops: mtd device (mtddev=name/number) must be supplied\n");
    return (-22);
  } else {

  }
  if ((record_size & 4095UL) != 0UL) {
    printk("<3>mtdoops: record_size must be a multiple of 4096\n");
    return (-22);
  } else {

  }
  if (record_size <= 4095UL) {
    printk("<3>mtdoops: record_size must be over 4096 bytes\n");
    return (-22);
  } else {

  }
  cxt->mtd_index = -1;
  tmp___0 = simple_strtoul((char const   *)(& mtddev), & endp, 0U);
  mtd_index = (int )tmp___0;
  if ((int )((signed char )*endp) == 0) {
    cxt->mtd_index = mtd_index;
  } else {

  }
  cxt->oops_buf = vmalloc(record_size);
  if ((unsigned long )cxt->oops_buf == (unsigned long )((void *)0)) {
    printk("<3>mtdoops: failed to allocate buffer workspace\n");
    return (-12);
  } else {

  }
  memset(cxt->oops_buf, 255, record_size);
  __init_work(& cxt->work_erase, 0);
  __constr_expr_0.counter = 2097664L;
  cxt->work_erase.data = __constr_expr_0;
  lockdep_init_map(& cxt->work_erase.lockdep_map, "(&cxt->work_erase)", & __key, 0);
  INIT_LIST_HEAD(& cxt->work_erase.entry);
  cxt->work_erase.func = & mtdoops_workfunc_erase;
  __init_work(& cxt->work_write, 0);
  __constr_expr_1.counter = 2097664L;
  cxt->work_write.data = __constr_expr_1;
  lockdep_init_map(& cxt->work_write.lockdep_map, "(&cxt->work_write)", & __key___0,
                   0);
  INIT_LIST_HEAD(& cxt->work_write.entry);
  cxt->work_write.func = & mtdoops_workfunc_write;
  register_mtd_user(& mtdoops_notifier);
  return (0);
}
}
static void mtdoops_exit(void) 
{ 
  struct mtdoops_context *cxt ;

  {
  cxt = & oops_cxt;
  unregister_mtd_user(& mtdoops_notifier);
  vfree((void const   *)cxt->oops_buf);
  vfree((void const   *)cxt->oops_page_used);
  return;
}
}
void ldv_check_final_state(void) ;
void ldv_initialize(void) ;
extern void ldv_handler_precall(void) ;
extern int nondet_int(void) ;
int LDV_IN_INTERRUPT  ;
int main(void) 
{ 
  struct mtd_info *var_group1 ;
  int ldv_s_mtdoops_notifier_mtd_notifier ;
  int tmp ;
  int tmp___0 ;
  int tmp___1 ;

  {
  ldv_s_mtdoops_notifier_mtd_notifier = 0;
  LDV_IN_INTERRUPT = 1;
  ldv_initialize();
  ldv_handler_precall();
  tmp = mtdoops_init();
  if (tmp != 0) {
    goto ldv_final;
  } else {

  }
  goto ldv_20503;
  ldv_20502: 
  tmp___0 = nondet_int();
  switch (tmp___0) {
  case 0: ;
  if (ldv_s_mtdoops_notifier_mtd_notifier == 0) {
    ldv_handler_precall();
    mtdoops_notify_remove(var_group1);
    ldv_s_mtdoops_notifier_mtd_notifier = 0;
  } else {

  }
  goto ldv_20499;
  case 1: 
  ldv_handler_precall();
  mtdoops_notify_add(var_group1);
  goto ldv_20499;
  default: ;
  goto ldv_20499;
  }
  ldv_20499: ;
  ldv_20503: 
  tmp___1 = nondet_int();
  if (tmp___1 != 0 || ldv_s_mtdoops_notifier_mtd_notifier != 0) {
    goto ldv_20502;
  } else {

  }

  ldv_handler_precall();
  mtdoops_exit();
  ldv_final: 
  ldv_check_final_state();
  return 0;
}
}
void ldv___ldv_spin_lock_4(spinlock_t *ldv_func_arg1 ) 
{ 


  {
  ldv_spin_lock_node_size_lock_of_pglist_data();
  __ldv_spin_lock(ldv_func_arg1);
  return;
}
}
void ldv___ldv_spin_lock_6(spinlock_t *ldv_func_arg1 ) 
{ 


  {
  ldv_spin_lock_siglock_of_sighand_struct();
  __ldv_spin_lock(ldv_func_arg1);
  return;
}
}
long ldv__builtin_expect(long exp , long c ) ;
__inline static void ldv_error(void) 
{ 


  {
  LDV_ERROR: {reach_error();abort();}
}
}
extern int ldv_undef_int(void) ;
long ldv__builtin_expect(long exp , long c ) 
{ 


  {
  return (exp);
}
}
static int ldv_spin_alloc_lock_of_task_struct  ;
void ldv_spin_lock_alloc_lock_of_task_struct(void) 
{ 


  {
  if (ldv_spin_alloc_lock_of_task_struct == 1) {

  } else {
    ldv_error();
  }
  ldv_spin_alloc_lock_of_task_struct = 2;
  return;
}
}
void ldv_spin_unlock_alloc_lock_of_task_struct(void) 
{ 


  {
  if (ldv_spin_alloc_lock_of_task_struct == 2) {

  } else {
    ldv_error();
  }
  ldv_spin_alloc_lock_of_task_struct = 1;
  return;
}
}
int ldv_spin_trylock_alloc_lock_of_task_struct(void) 
{ 
  int is_spin_held_by_another_thread ;

  {
  if (ldv_spin_alloc_lock_of_task_struct == 1) {

  } else {
    ldv_error();
  }
  is_spin_held_by_another_thread = ldv_undef_int();
  if (is_spin_held_by_another_thread) {
    return (0);
  } else {
    ldv_spin_alloc_lock_of_task_struct = 2;
    return (1);
  }
}
}
void ldv_spin_unlock_wait_alloc_lock_of_task_struct(void) 
{ 


  {
  if (ldv_spin_alloc_lock_of_task_struct == 1) {

  } else {
    ldv_error();
  }
  return;
}
}
int ldv_spin_is_locked_alloc_lock_of_task_struct(void) 
{ 
  int is_spin_held_by_another_thread ;

  {
  is_spin_held_by_another_thread = ldv_undef_int();
  if (ldv_spin_alloc_lock_of_task_struct == 1 && ! is_spin_held_by_another_thread) {
    return (0);
  } else {
    return (1);
  }
}
}
int ldv_spin_can_lock_alloc_lock_of_task_struct(void) 
{ 
  int tmp ;
  int tmp___0 ;

  {
  tmp = ldv_spin_is_locked_alloc_lock_of_task_struct();
  if (tmp) {
    tmp___0 = 0;
  } else {
    tmp___0 = 1;
  }
  return (tmp___0);
}
}
int ldv_spin_is_contended_alloc_lock_of_task_struct(void) 
{ 
  int is_spin_contended ;

  {
  is_spin_contended = ldv_undef_int();
  if (is_spin_contended) {
    return (0);
  } else {
    return (1);
  }
}
}
int ldv_atomic_dec_and_lock_alloc_lock_of_task_struct(void) 
{ 
  int atomic_value_after_dec ;

  {
  if (ldv_spin_alloc_lock_of_task_struct == 1) {

  } else {
    ldv_error();
  }
  atomic_value_after_dec = ldv_undef_int();
  if (atomic_value_after_dec == 0) {
    ldv_spin_alloc_lock_of_task_struct = 2;
    return (1);
  } else {

  }
  return (0);
}
}
static int ldv_spin_lock_of_NOT_ARG_SIGN  ;
void ldv_spin_lock_lock_of_NOT_ARG_SIGN(void) 
{ 


  {
  if (ldv_spin_lock_of_NOT_ARG_SIGN == 1) {

  } else {
    ldv_error();
  }
  ldv_spin_lock_of_NOT_ARG_SIGN = 2;
  return;
}
}
void ldv_spin_unlock_lock_of_NOT_ARG_SIGN(void) 
{ 


  {
  if (ldv_spin_lock_of_NOT_ARG_SIGN == 2) {

  } else {
    ldv_error();
  }
  ldv_spin_lock_of_NOT_ARG_SIGN = 1;
  return;
}
}
int ldv_spin_trylock_lock_of_NOT_ARG_SIGN(void) 
{ 
  int is_spin_held_by_another_thread ;

  {
  if (ldv_spin_lock_of_NOT_ARG_SIGN == 1) {

  } else {
    ldv_error();
  }
  is_spin_held_by_another_thread = ldv_undef_int();
  if (is_spin_held_by_another_thread) {
    return (0);
  } else {
    ldv_spin_lock_of_NOT_ARG_SIGN = 2;
    return (1);
  }
}
}
void ldv_spin_unlock_wait_lock_of_NOT_ARG_SIGN(void) 
{ 


  {
  if (ldv_spin_lock_of_NOT_ARG_SIGN == 1) {

  } else {
    ldv_error();
  }
  return;
}
}
int ldv_spin_is_locked_lock_of_NOT_ARG_SIGN(void) 
{ 
  int is_spin_held_by_another_thread ;

  {
  is_spin_held_by_another_thread = ldv_undef_int();
  if (ldv_spin_lock_of_NOT_ARG_SIGN == 1 && ! is_spin_held_by_another_thread) {
    return (0);
  } else {
    return (1);
  }
}
}
int ldv_spin_can_lock_lock_of_NOT_ARG_SIGN(void) 
{ 
  int tmp ;
  int tmp___0 ;

  {
  tmp = ldv_spin_is_locked_lock_of_NOT_ARG_SIGN();
  if (tmp) {
    tmp___0 = 0;
  } else {
    tmp___0 = 1;
  }
  return (tmp___0);
}
}
int ldv_spin_is_contended_lock_of_NOT_ARG_SIGN(void) 
{ 
  int is_spin_contended ;

  {
  is_spin_contended = ldv_undef_int();
  if (is_spin_contended) {
    return (0);
  } else {
    return (1);
  }
}
}
int ldv_atomic_dec_and_lock_lock_of_NOT_ARG_SIGN(void) 
{ 
  int atomic_value_after_dec ;

  {
  if (ldv_spin_lock_of_NOT_ARG_SIGN == 1) {

  } else {
    ldv_error();
  }
  atomic_value_after_dec = ldv_undef_int();
  if (atomic_value_after_dec == 0) {
    ldv_spin_lock_of_NOT_ARG_SIGN = 2;
    return (1);
  } else {

  }
  return (0);
}
}
static int ldv_spin_node_size_lock_of_pglist_data  ;
void ldv_spin_lock_node_size_lock_of_pglist_data(void) 
{ 


  {
  if (ldv_spin_node_size_lock_of_pglist_data == 1) {

  } else {
    ldv_error();
  }
  ldv_spin_node_size_lock_of_pglist_data = 2;
  return;
}
}
void ldv_spin_unlock_node_size_lock_of_pglist_data(void) 
{ 


  {
  if (ldv_spin_node_size_lock_of_pglist_data == 2) {

  } else {
    ldv_error();
  }
  ldv_spin_node_size_lock_of_pglist_data = 1;
  return;
}
}
int ldv_spin_trylock_node_size_lock_of_pglist_data(void) 
{ 
  int is_spin_held_by_another_thread ;

  {
  if (ldv_spin_node_size_lock_of_pglist_data == 1) {

  } else {
    ldv_error();
  }
  is_spin_held_by_another_thread = ldv_undef_int();
  if (is_spin_held_by_another_thread) {
    return (0);
  } else {
    ldv_spin_node_size_lock_of_pglist_data = 2;
    return (1);
  }
}
}
void ldv_spin_unlock_wait_node_size_lock_of_pglist_data(void) 
{ 


  {
  if (ldv_spin_node_size_lock_of_pglist_data == 1) {

  } else {
    ldv_error();
  }
  return;
}
}
int ldv_spin_is_locked_node_size_lock_of_pglist_data(void) 
{ 
  int is_spin_held_by_another_thread ;

  {
  is_spin_held_by_another_thread = ldv_undef_int();
  if (ldv_spin_node_size_lock_of_pglist_data == 1 && ! is_spin_held_by_another_thread) {
    return (0);
  } else {
    return (1);
  }
}
}
int ldv_spin_can_lock_node_size_lock_of_pglist_data(void) 
{ 
  int tmp ;
  int tmp___0 ;

  {
  tmp = ldv_spin_is_locked_node_size_lock_of_pglist_data();
  if (tmp) {
    tmp___0 = 0;
  } else {
    tmp___0 = 1;
  }
  return (tmp___0);
}
}
int ldv_spin_is_contended_node_size_lock_of_pglist_data(void) 
{ 
  int is_spin_contended ;

  {
  is_spin_contended = ldv_undef_int();
  if (is_spin_contended) {
    return (0);
  } else {
    return (1);
  }
}
}
int ldv_atomic_dec_and_lock_node_size_lock_of_pglist_data(void) 
{ 
  int atomic_value_after_dec ;

  {
  if (ldv_spin_node_size_lock_of_pglist_data == 1) {

  } else {
    ldv_error();
  }
  atomic_value_after_dec = ldv_undef_int();
  if (atomic_value_after_dec == 0) {
    ldv_spin_node_size_lock_of_pglist_data = 2;
    return (1);
  } else {

  }
  return (0);
}
}
static int ldv_spin_siglock_of_sighand_struct  ;
void ldv_spin_lock_siglock_of_sighand_struct(void) 
{ 


  {
  if (ldv_spin_siglock_of_sighand_struct == 1) {

  } else {
    ldv_error();
  }
  ldv_spin_siglock_of_sighand_struct = 2;
  return;
}
}
void ldv_spin_unlock_siglock_of_sighand_struct(void) 
{ 


  {
  if (ldv_spin_siglock_of_sighand_struct == 2) {

  } else {
    ldv_error();
  }
  ldv_spin_siglock_of_sighand_struct = 1;
  return;
}
}
int ldv_spin_trylock_siglock_of_sighand_struct(void) 
{ 
  int is_spin_held_by_another_thread ;

  {
  if (ldv_spin_siglock_of_sighand_struct == 1) {

  } else {
    ldv_error();
  }
  is_spin_held_by_another_thread = ldv_undef_int();
  if (is_spin_held_by_another_thread) {
    return (0);
  } else {
    ldv_spin_siglock_of_sighand_struct = 2;
    return (1);
  }
}
}
void ldv_spin_unlock_wait_siglock_of_sighand_struct(void) 
{ 


  {
  if (ldv_spin_siglock_of_sighand_struct == 1) {

  } else {
    ldv_error();
  }
  return;
}
}
int ldv_spin_is_locked_siglock_of_sighand_struct(void) 
{ 
  int is_spin_held_by_another_thread ;

  {
  is_spin_held_by_another_thread = ldv_undef_int();
  if (ldv_spin_siglock_of_sighand_struct == 1 && ! is_spin_held_by_another_thread) {
    return (0);
  } else {
    return (1);
  }
}
}
int ldv_spin_can_lock_siglock_of_sighand_struct(void) 
{ 
  int tmp ;
  int tmp___0 ;

  {
  tmp = ldv_spin_is_locked_siglock_of_sighand_struct();
  if (tmp) {
    tmp___0 = 0;
  } else {
    tmp___0 = 1;
  }
  return (tmp___0);
}
}
int ldv_spin_is_contended_siglock_of_sighand_struct(void) 
{ 
  int is_spin_contended ;

  {
  is_spin_contended = ldv_undef_int();
  if (is_spin_contended) {
    return (0);
  } else {
    return (1);
  }
}
}
int ldv_atomic_dec_and_lock_siglock_of_sighand_struct(void) 
{ 
  int atomic_value_after_dec ;

  {
  if (ldv_spin_siglock_of_sighand_struct == 1) {

  } else {
    ldv_error();
  }
  atomic_value_after_dec = ldv_undef_int();
  if (atomic_value_after_dec == 0) {
    ldv_spin_siglock_of_sighand_struct = 2;
    return (1);
  } else {

  }
  return (0);
}
}
void ldv_initialize(void) 
{ 


  {
  ldv_spin_alloc_lock_of_task_struct = 1;
  ldv_spin_lock_of_NOT_ARG_SIGN = 1;
  ldv_spin_node_size_lock_of_pglist_data = 1;
  ldv_spin_siglock_of_sighand_struct = 1;
  return;
}
}
void ldv_check_final_state(void) 
{ 


  {
  if (ldv_spin_alloc_lock_of_task_struct == 1) {

  } else {
    ldv_error();
  }
  if (ldv_spin_lock_of_NOT_ARG_SIGN == 1) {

  } else {
    ldv_error();
  }
  if (ldv_spin_node_size_lock_of_pglist_data == 1) {

  } else {
    ldv_error();
  }
  if (ldv_spin_siglock_of_sighand_struct == 1) {

  } else {
    ldv_error();
  }
  return;
}
}