extern void abort(void); #include void reach_error() { assert(0); } /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef signed char __s8; typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef long long __s64; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef short s16; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef __kernel_long_t __kernel_off_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u16 __be16; typedef __u32 __be32; typedef __u64 __be64; typedef __u32 __wsum; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __u32 nlink_t; typedef __kernel_off_t off_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; typedef unsigned int oom_flags_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; struct module; typedef void (*ctor_fn_t)(void); struct file_operations; struct device; struct net_device; struct completion; struct pt_regs; struct pid; typedef u16 __ticket_t; typedef u32 __ticketpair_t; struct __raw_tickets { __ticket_t head ; __ticket_t tail ; }; union __anonunion_ldv_2024_8 { __ticketpair_t head_tail ; struct __raw_tickets tickets ; }; struct arch_spinlock { union __anonunion_ldv_2024_8 ldv_2024 ; }; typedef struct arch_spinlock arch_spinlock_t; struct __anonstruct_ldv_2031_10 { u32 read ; s32 write ; }; union __anonunion_arch_rwlock_t_9 { s64 lock ; struct __anonstruct_ldv_2031_10 ldv_2031 ; }; typedef union __anonunion_arch_rwlock_t_9 arch_rwlock_t; struct task_struct; struct lockdep_map; struct mm_struct; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct __anonstruct_ldv_2096_12 { unsigned int a ; unsigned int b ; }; struct __anonstruct_ldv_2111_13 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion_ldv_2112_11 { struct __anonstruct_ldv_2096_12 ldv_2096 ; struct __anonstruct_ldv_2111_13 ldv_2111 ; }; struct desc_struct { union __anonunion_ldv_2112_11 ldv_2112 ; }; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_15 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_15 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct cpumask; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion_ldv_2767_18 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion_ldv_2767_18 ldv_2767 ; }; struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct cpumask { unsigned long bits[64U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct static_key; struct exec_domain; struct map_segment; struct exec_domain { char const *name ; void (*handler)(int , struct pt_regs * ) ; unsigned char pers_low ; unsigned char pers_high ; unsigned long *signal_map ; unsigned long *signal_invmap ; struct map_segment *err_map ; struct map_segment *socktype_map ; struct map_segment *sockopt_map ; struct map_segment *af_map ; struct module *module ; struct exec_domain *next ; }; struct seq_operations; struct i387_fsave_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct_ldv_5125_23 { u64 rip ; u64 rdp ; }; struct __anonstruct_ldv_5131_24 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion_ldv_5132_22 { struct __anonstruct_ldv_5125_23 ldv_5125 ; struct __anonstruct_ldv_5131_24 ldv_5131 ; }; union __anonunion_ldv_5141_25 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct i387_fxsave_struct { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion_ldv_5132_22 ldv_5132 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion_ldv_5141_25 ldv_5141 ; }; struct i387_soft_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct ymmh_struct { u32 ymmh_space[64U] ; }; struct xsave_hdr_struct { u64 xstate_bv ; u64 reserved1[2U] ; u64 reserved2[5U] ; }; struct xsave_struct { struct i387_fxsave_struct i387 ; struct xsave_hdr_struct xsave_hdr ; struct ymmh_struct ymmh ; }; union thread_xstate { struct i387_fsave_struct fsave ; struct i387_fxsave_struct fxsave ; struct i387_soft_struct soft ; struct xsave_struct xsave ; }; struct fpu { unsigned int last_cpu ; unsigned int has_fpu ; union thread_xstate *state ; }; struct kmem_cache; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned long usersp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; struct fpu fpu ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; struct __anonstruct_mm_segment_t_27 { unsigned long seg ; }; typedef struct __anonstruct_mm_segment_t_27 mm_segment_t; typedef atomic64_t atomic_long_t; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; } __attribute__((__packed__)) ; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 2 ; unsigned char hardirqs_off : 1 ; unsigned short references : 11 ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct_ldv_5960_29 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion_ldv_5961_28 { struct raw_spinlock rlock ; struct __anonstruct_ldv_5960_29 ldv_5960 ; }; struct spinlock { union __anonunion_ldv_5961_28 ldv_5961 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_30 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_30 rwlock_t; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; char const *name ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_32 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_33 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_34 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion_ldv_6198_31 { struct __anonstruct_futex_32 futex ; struct __anonstruct_nanosleep_33 nanosleep ; struct __anonstruct_poll_34 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion_ldv_6198_31 ldv_6198 ; }; struct thread_info { struct task_struct *task ; struct exec_domain *exec_domain ; __u32 flags ; __u32 status ; __u32 cpu ; int preempt_count ; mm_segment_t addr_limit ; struct restart_block restart_block ; void *sysenter_return ; unsigned char sig_on_uaccess_error : 1 ; unsigned char uaccess_err : 1 ; }; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct __anonstruct_seqlock_t_35 { unsigned int sequence ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_35 seqlock_t; struct seqcount { unsigned int sequence ; }; typedef struct seqcount seqcount_t; struct __anonstruct_nodemask_t_36 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_36 nodemask_t; struct rw_semaphore; struct rw_semaphore { long count ; raw_spinlock_t wait_lock ; struct list_head wait_list ; struct lockdep_map dep_map ; }; struct completion { unsigned int done ; wait_queue_head_t wait ; }; struct notifier_block; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct tvec_base; struct timer_list { struct list_head entry ; unsigned long expires ; struct tvec_base *base ; void (*function)(unsigned long ) ; unsigned long data ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct workqueue_struct; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; int cpu ; }; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct blocking_notifier_head { struct rw_semaphore rwsem ; struct notifier_block *head ; }; struct ctl_table; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; }; struct dev_pm_qos; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool ignore_children ; bool early_init ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct pm_subsys_data *subsys_data ; struct dev_pm_qos *qos ; }; struct dev_pm_domain { struct dev_pm_ops ops ; }; struct __anonstruct_mm_context_t_101 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; }; typedef struct __anonstruct_mm_context_t_101 mm_context_t; struct vm_area_struct; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct inode; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct uprobe; struct uprobe_task { enum uprobe_task_state state ; struct arch_uprobe_task autask ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; unsigned long vaddr ; }; struct xol_area { wait_queue_head_t wq ; atomic_t slot_count ; unsigned long *bitmap ; struct page *page ; unsigned long vaddr ; }; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; union __anonunion_ldv_12308_129 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct_ldv_12318_133 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion_ldv_12320_132 { atomic_t _mapcount ; struct __anonstruct_ldv_12318_133 ldv_12318 ; int units ; }; struct __anonstruct_ldv_12322_131 { union __anonunion_ldv_12320_132 ldv_12320 ; atomic_t _count ; }; union __anonunion_ldv_12323_130 { unsigned long counters ; struct __anonstruct_ldv_12322_131 ldv_12322 ; }; struct __anonstruct_ldv_12324_128 { union __anonunion_ldv_12308_129 ldv_12308 ; union __anonunion_ldv_12323_130 ldv_12323 ; }; struct __anonstruct_ldv_12331_135 { struct page *next ; int pages ; int pobjects ; }; struct slab; union __anonunion_ldv_12335_134 { struct list_head lru ; struct __anonstruct_ldv_12331_135 ldv_12331 ; struct list_head list ; struct slab *slab_page ; }; union __anonunion_ldv_12340_136 { unsigned long private ; struct kmem_cache *slab_cache ; struct page *first_page ; }; struct page { unsigned long flags ; struct address_space *mapping ; struct __anonstruct_ldv_12324_128 ldv_12324 ; union __anonunion_ldv_12335_134 ldv_12335 ; union __anonunion_ldv_12340_136 ldv_12340 ; unsigned long debug_flags ; int _last_nid ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_linear_138 { struct rb_node rb ; unsigned long rb_subtree_last ; }; union __anonunion_shared_137 { struct __anonstruct_linear_138 linear ; struct list_head nonlinear ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; struct rb_node vm_rb ; unsigned long rb_subtree_gap ; struct mm_struct *vm_mm ; pgprot_t vm_page_prot ; unsigned long vm_flags ; union __anonunion_shared_137 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; struct vm_area_struct *mmap_cache ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; void (*unmap_area)(struct mm_struct * , unsigned long ) ; unsigned long mmap_base ; unsigned long task_size ; unsigned long cached_hole_size ; unsigned long free_area_cache ; unsigned long highest_vm_end ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long nr_ptes ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[44U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct hlist_head ioctx_list ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; pgtable_t pmd_huge_pte ; struct cpumask cpumask_allocation ; unsigned long numa_next_scan ; unsigned long numa_next_reset ; unsigned long numa_scan_offset ; int numa_scan_seq ; int first_nid ; struct uprobes_state uprobes_state ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; }; struct shrinker { int (*shrink)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; struct list_head list ; atomic_long_t nr_in_batch ; }; struct file_ra_state; struct user_struct; struct writeback_control; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *page ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; int (*migrate)(struct vm_area_struct * , nodemask_t const * , nodemask_t const * , unsigned long ) ; int (*remap_pages)(struct vm_area_struct * , unsigned long , unsigned long , unsigned long ) ; }; struct kvec; struct ratelimit_state { raw_spinlock_t lock ; int interval ; int burst ; int printed ; int missed ; unsigned long begin ; }; struct mem_cgroup; struct __anonstruct_ldv_15144_140 { struct mem_cgroup *memcg ; struct list_head list ; struct kmem_cache *root_cache ; bool dead ; atomic_t nr_pages ; struct work_struct destroy ; }; union __anonunion_ldv_15145_139 { struct kmem_cache *memcg_caches[0U] ; struct __anonstruct_ldv_15144_140 ldv_15144 ; }; struct memcg_cache_params { bool is_root_cache ; union __anonunion_ldv_15145_139 ldv_15145 ; }; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; void const *(*namespace)(struct kobject * , struct attribute const * ) ; }; struct sysfs_dirent; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct sysfs_dirent *sd ; struct kref kref ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct kmem_cache_cpu { void **freelist ; unsigned long tid ; struct page *page ; struct page *partial ; unsigned int stat[26U] ; }; struct kmem_cache_node { spinlock_t list_lock ; unsigned long nr_partial ; struct list_head partial ; atomic_long_t nr_slabs ; atomic_long_t total_objects ; struct list_head full ; }; struct kmem_cache_order_objects { unsigned long x ; }; struct kmem_cache { struct kmem_cache_cpu *cpu_slab ; unsigned long flags ; unsigned long min_partial ; int size ; int object_size ; int offset ; int cpu_partial ; struct kmem_cache_order_objects oo ; struct kmem_cache_order_objects max ; struct kmem_cache_order_objects min ; gfp_t allocflags ; int refcount ; void (*ctor)(void * ) ; int inuse ; int align ; int reserved ; char const *name ; struct list_head list ; struct kobject kobj ; struct memcg_cache_params *memcg_params ; int max_attr_size ; int remote_node_defrag_ratio ; struct kmem_cache_node *node[1024U] ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct kvec { void *iov_base ; size_t iov_len ; }; typedef unsigned short __kernel_sa_family_t; struct __kernel_sockaddr_storage { __kernel_sa_family_t ss_family ; char __data[126U] ; }; struct cred; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family ; char sa_data[14U] ; }; struct msghdr { void *msg_name ; int msg_namelen ; struct iovec *msg_iov ; __kernel_size_t msg_iovlen ; void *msg_control ; __kernel_size_t msg_controllen ; unsigned int msg_flags ; }; union __anonunion_in6_u_141 { __u8 u6_addr8[16U] ; __be16 u6_addr16[8U] ; __be32 u6_addr32[4U] ; }; struct in6_addr { union __anonunion_in6_u_141 in6_u ; }; typedef u32 rpc_authflavor_t; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; struct xdr_buf { struct kvec head[1U] ; struct kvec tail[1U] ; struct page **pages ; unsigned int page_base ; unsigned int page_len ; unsigned int flags ; unsigned int buflen ; unsigned int len ; }; struct sk_buff; struct xdr_stream { __be32 *p ; struct xdr_buf *buf ; __be32 *end ; struct kvec *iov ; struct kvec scratch ; struct page **page_ptr ; unsigned int nwords ; }; struct rpc_procinfo; struct rpc_cred; struct rpc_message { struct rpc_procinfo *rpc_proc ; void *rpc_argp ; void *rpc_resp ; struct rpc_cred *rpc_cred ; }; struct rpc_call_ops; struct rpc_wait_queue; struct rpc_wait { struct list_head list ; struct list_head links ; struct list_head timer_list ; unsigned long expires ; }; struct rpc_clnt; struct rpc_rqst; union __anonunion_u_142 { struct work_struct tk_work ; struct rpc_wait tk_wait ; }; struct rpc_task { atomic_t tk_count ; struct list_head tk_task ; struct rpc_clnt *tk_client ; struct rpc_rqst *tk_rqstp ; struct rpc_message tk_msg ; void (*tk_callback)(struct rpc_task * ) ; void (*tk_action)(struct rpc_task * ) ; struct rpc_call_ops const *tk_ops ; void *tk_calldata ; unsigned long tk_timeout ; unsigned long tk_runstate ; struct workqueue_struct *tk_workqueue ; struct rpc_wait_queue *tk_waitqueue ; union __anonunion_u_142 u ; ktime_t tk_start ; pid_t tk_owner ; int tk_status ; unsigned short tk_flags ; unsigned short tk_timeouts ; unsigned short tk_pid ; unsigned char tk_priority : 2 ; unsigned char tk_garb_retry : 2 ; unsigned char tk_cred_retry : 2 ; unsigned char tk_rebind_retry : 2 ; }; struct rpc_call_ops { void (*rpc_call_prepare)(struct rpc_task * , void * ) ; void (*rpc_call_done)(struct rpc_task * , void * ) ; void (*rpc_count_stats)(struct rpc_task * , void * ) ; void (*rpc_release)(void * ) ; }; struct rpc_task_setup { struct rpc_task *task ; struct rpc_clnt *rpc_client ; struct rpc_message const *rpc_message ; struct rpc_call_ops const *callback_ops ; void *callback_data ; struct workqueue_struct *workqueue ; unsigned short flags ; signed char priority ; }; struct rpc_timer { struct timer_list timer ; struct list_head list ; unsigned long expires ; }; struct rpc_wait_queue { spinlock_t lock ; struct list_head tasks[4U] ; pid_t owner ; unsigned char maxpriority ; unsigned char priority ; unsigned char nr ; unsigned short qlen ; struct rpc_timer timer_list ; char const *name ; }; struct net; struct rpc_timeout { unsigned long to_initval ; unsigned long to_maxval ; unsigned long to_increment ; unsigned int to_retries ; unsigned char to_exponential ; }; enum rpc_display_format_t { RPC_DISPLAY_ADDR = 0, RPC_DISPLAY_PORT = 1, RPC_DISPLAY_PROTO = 2, RPC_DISPLAY_HEX_ADDR = 3, RPC_DISPLAY_HEX_PORT = 4, RPC_DISPLAY_NETID = 5, RPC_DISPLAY_MAX = 6 } ; struct rpc_xprt; struct rpc_rqst { struct rpc_xprt *rq_xprt ; struct xdr_buf rq_snd_buf ; struct xdr_buf rq_rcv_buf ; struct rpc_task *rq_task ; struct rpc_cred *rq_cred ; __be32 rq_xid ; int rq_cong ; u32 rq_seqno ; int rq_enc_pages_num ; struct page **rq_enc_pages ; void (*rq_release_snd_buf)(struct rpc_rqst * ) ; struct list_head rq_list ; __u32 *rq_buffer ; size_t rq_callsize ; size_t rq_rcvsize ; size_t rq_xmit_bytes_sent ; size_t rq_reply_bytes_recvd ; struct xdr_buf rq_private_buf ; unsigned long rq_majortimeo ; unsigned long rq_timeout ; ktime_t rq_rtt ; unsigned int rq_retries ; unsigned int rq_connect_cookie ; u32 rq_bytes_sent ; ktime_t rq_xtime ; int rq_ntrans ; struct list_head rq_bc_list ; unsigned long rq_bc_pa_state ; struct list_head rq_bc_pa_list ; }; struct rpc_xprt_ops { void (*set_buffer_size)(struct rpc_xprt * , size_t , size_t ) ; int (*reserve_xprt)(struct rpc_xprt * , struct rpc_task * ) ; void (*release_xprt)(struct rpc_xprt * , struct rpc_task * ) ; void (*alloc_slot)(struct rpc_xprt * , struct rpc_task * ) ; void (*rpcbind)(struct rpc_task * ) ; void (*set_port)(struct rpc_xprt * , unsigned short ) ; void (*connect)(struct rpc_task * ) ; void *(*buf_alloc)(struct rpc_task * , size_t ) ; void (*buf_free)(void * ) ; int (*send_request)(struct rpc_task * ) ; void (*set_retrans_timeout)(struct rpc_task * ) ; void (*timer)(struct rpc_task * ) ; void (*release_request)(struct rpc_task * ) ; void (*close)(struct rpc_xprt * ) ; void (*destroy)(struct rpc_xprt * ) ; void (*print_stats)(struct rpc_xprt * , struct seq_file * ) ; }; struct svc_xprt; struct svc_serv; struct __anonstruct_stat_143 { unsigned long bind_count ; unsigned long connect_count ; unsigned long connect_start ; unsigned long connect_time ; unsigned long sends ; unsigned long recvs ; unsigned long bad_xids ; unsigned long max_slots ; unsigned long long req_u ; unsigned long long bklog_u ; unsigned long long sending_u ; unsigned long long pending_u ; }; struct rpc_xprt { atomic_t count ; struct rpc_xprt_ops *ops ; struct rpc_timeout const *timeout ; struct __kernel_sockaddr_storage addr ; size_t addrlen ; int prot ; unsigned long cong ; unsigned long cwnd ; size_t max_payload ; unsigned int tsh_size ; struct rpc_wait_queue binding ; struct rpc_wait_queue sending ; struct rpc_wait_queue pending ; struct rpc_wait_queue backlog ; struct list_head free ; unsigned int max_reqs ; unsigned int min_reqs ; atomic_t num_reqs ; unsigned long state ; unsigned char resvport : 1 ; unsigned int swapper ; unsigned int bind_index ; unsigned long bind_timeout ; unsigned long reestablish_timeout ; unsigned int connect_cookie ; struct work_struct task_cleanup ; struct timer_list timer ; unsigned long last_used ; unsigned long idle_timeout ; spinlock_t transport_lock ; spinlock_t reserve_lock ; u32 xid ; struct rpc_task *snd_task ; struct svc_xprt *bc_xprt ; struct svc_serv *bc_serv ; unsigned int bc_alloc_count ; spinlock_t bc_pa_lock ; struct list_head bc_pa_list ; struct list_head recv ; struct __anonstruct_stat_143 stat ; struct net *xprt_net ; char const *servername ; char const *address_strings[6U] ; }; struct group_info; struct auth_cred { uid_t uid ; gid_t gid ; struct group_info *group_info ; char const *principal ; unsigned char machine_cred : 1 ; }; struct rpc_auth; struct rpc_credops; struct rpc_cred { struct hlist_node cr_hash ; struct list_head cr_lru ; struct callback_head cr_rcu ; struct rpc_auth *cr_auth ; struct rpc_credops const *cr_ops ; unsigned long cr_magic ; unsigned long cr_expire ; unsigned long cr_flags ; atomic_t cr_count ; uid_t cr_uid ; }; struct rpc_cred_cache; struct rpc_authops; struct rpc_auth { unsigned int au_cslack ; unsigned int au_rslack ; unsigned int au_verfsize ; unsigned int au_flags ; struct rpc_authops const *au_ops ; rpc_authflavor_t au_flavor ; atomic_t au_count ; struct rpc_cred_cache *au_credcache ; }; struct rpc_authops { struct module *owner ; rpc_authflavor_t au_flavor ; char *au_name ; struct rpc_auth *(*create)(struct rpc_clnt * , rpc_authflavor_t ) ; void (*destroy)(struct rpc_auth * ) ; struct rpc_cred *(*lookup_cred)(struct rpc_auth * , struct auth_cred * , int ) ; struct rpc_cred *(*crcreate)(struct rpc_auth * , struct auth_cred * , int ) ; int (*pipes_create)(struct rpc_auth * ) ; void (*pipes_destroy)(struct rpc_auth * ) ; int (*list_pseudoflavors)(rpc_authflavor_t * , int ) ; }; struct rpc_credops { char const *cr_name ; int (*cr_init)(struct rpc_auth * , struct rpc_cred * ) ; void (*crdestroy)(struct rpc_cred * ) ; int (*crmatch)(struct auth_cred * , struct rpc_cred * , int ) ; struct rpc_cred *(*crbind)(struct rpc_task * , struct rpc_cred * , int ) ; __be32 *(*crmarshal)(struct rpc_task * , __be32 * ) ; int (*crrefresh)(struct rpc_task * ) ; __be32 *(*crvalidate)(struct rpc_task * , __be32 * ) ; int (*crwrap_req)(struct rpc_task * , void (*)(void * , struct xdr_stream * , void * ) , void * , __be32 * , void * ) ; int (*crunwrap_resp)(struct rpc_task * , int (*)(void * , struct xdr_stream * , void * ) , void * , __be32 * , void * ) ; }; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct nameidata; struct path; struct vfsmount; struct __anonstruct_ldv_17263_145 { u32 hash ; u32 len ; }; union __anonunion_ldv_17265_144 { struct __anonstruct_ldv_17263_145 ldv_17263 ; u64 hash_len ; }; struct qstr { union __anonunion_ldv_17265_144 ldv_17265 ; unsigned char const *name ; }; struct dentry_operations; struct super_block; union __anonunion_d_u_146 { struct list_head d_child ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; unsigned int d_count ; spinlock_t d_lock ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; union __anonunion_d_u_146 d_u ; struct list_head d_subdirs ; struct hlist_node d_alias ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct inode const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct inode const * , struct dentry const * , struct inode const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct user_namespace; typedef uid_t kuid_t; typedef gid_t kgid_t; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; struct radix_tree_node; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct block_device; struct io_context; struct cgroup_subsys_state; struct export_operations; struct kiocb; struct pipe_inode_info; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct fs_disk_quota { __s8 d_version ; __s8 d_flags ; __u16 d_fieldmask ; __u32 d_id ; __u64 d_blk_hardlimit ; __u64 d_blk_softlimit ; __u64 d_ino_hardlimit ; __u64 d_ino_softlimit ; __u64 d_bcount ; __u64 d_icount ; __s32 d_itimer ; __s32 d_btimer ; __u16 d_iwarns ; __u16 d_bwarns ; __s32 d_padding2 ; __u64 d_rtb_hardlimit ; __u64 d_rtb_softlimit ; __u64 d_rtbcount ; __s32 d_rtbtimer ; __u16 d_rtbwarns ; __s16 d_padding3 ; char d_padding4[8U] ; }; struct fs_qfilestat { __u64 qfs_ino ; __u64 qfs_nblks ; __u32 qfs_nextents ; }; typedef struct fs_qfilestat fs_qfilestat_t; struct fs_quota_stat { __s8 qs_version ; __u16 qs_flags ; __s8 qs_pad ; fs_qfilestat_t qs_uquota ; fs_qfilestat_t qs_gquota ; __u32 qs_incoredqs ; __s32 qs_btimelimit ; __s32 qs_itimelimit ; __s32 qs_rtbtimelimit ; __u16 qs_bwarnlimit ; __u16 qs_iwarnlimit ; }; struct dquot; typedef __kernel_uid32_t projid_t; typedef projid_t kprojid_t; struct if_dqinfo { __u64 dqi_bgrace ; __u64 dqi_igrace ; __u32 dqi_flags ; __u32 dqi_valid ; }; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion_ldv_18411_148 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion_ldv_18411_148 ldv_18411 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_maxblimit ; qsize_t dqi_maxilimit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_on_meta)(struct super_block * , int , int ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*get_info)(struct super_block * , int , struct if_dqinfo * ) ; int (*set_info)(struct super_block * , int , struct if_dqinfo * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct fs_disk_quota * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct fs_disk_quota * ) ; int (*get_xstate)(struct super_block * , struct fs_quota_stat * ) ; int (*set_xstate)(struct super_block * , unsigned int , int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct rw_semaphore dqptr_sem ; struct inode *files[2U] ; struct mem_dqinfo info[2U] ; struct quota_format_ops const *ops[2U] ; }; union __anonunion_arg_150 { char *buf ; void *data ; }; struct __anonstruct_read_descriptor_t_149 { size_t written ; size_t count ; union __anonunion_arg_150 arg ; int error ; }; typedef struct __anonstruct_read_descriptor_t_149 read_descriptor_t; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned long ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(int , struct kiocb * , struct iovec const * , loff_t , unsigned long ) ; int (*get_xip_mem)(struct address_space * , unsigned long , int , void ** , unsigned long * ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , read_descriptor_t * , unsigned long ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct backing_dev_info; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; unsigned int i_mmap_writable ; struct rb_root i_mmap ; struct list_head i_mmap_nonlinear ; struct mutex i_mmap_mutex ; unsigned long nrpages ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; struct backing_dev_info *backing_dev_info ; spinlock_t private_lock ; struct list_head private_list ; void *private_data ; }; struct request_queue; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; }; struct posix_acl; struct inode_operations; union __anonunion_ldv_18845_151 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion_ldv_18865_152 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock; struct cdev; union __anonunion_ldv_18881_153 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion_ldv_18845_151 ldv_18845 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion_ldv_18865_152 ldv_18865 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; struct file_operations const *i_fop ; struct file_lock *i_flock ; struct address_space i_data ; struct dquot *i_dquot[2U] ; struct list_head i_devices ; union __anonunion_ldv_18881_153 ldv_18881 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; atomic_t i_readcount ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_154 { struct list_head fu_list ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_154 f_u ; struct path f_path ; struct file_operations const *f_op ; spinlock_t f_lock ; int f_sb_list_cpu ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; unsigned long f_mnt_write_state ; }; struct files_struct; typedef struct files_struct *fl_owner_t; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , struct file_lock * , int ) ; void (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock ** , int ) ; }; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_156 { struct list_head link ; int state ; }; union __anonunion_fl_u_155 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_156 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_155 fl_u ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct file_system_type; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head *s_files ; struct list_head s_mounts ; struct list_head s_dentry_lru ; int s_nr_dentry_unused ; spinlock_t s_inode_lru_lock ; struct list_head s_inode_lru ; int s_nr_inodes_unused ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*aio_read)(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; ssize_t (*aio_write)(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; int (*readdir)(struct file * , void * , int (*)(void * , char const * , int , loff_t , u64 , unsigned int ) ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; int (*show_fdinfo)(struct seq_file * , struct file * ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; void *(*follow_link)(struct dentry * , struct nameidata * ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct dentry * , struct nameidata * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_fs)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; int (*nr_cached_objects)(struct super_block * ) ; void (*free_cached_objects)(struct super_block * , int ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; typedef int read_proc_t(char * , char ** , off_t , int , int * , void * ); typedef int write_proc_t(struct file * , char const * , unsigned long , void * ); struct proc_dir_entry { unsigned int low_ino ; umode_t mode ; nlink_t nlink ; kuid_t uid ; kgid_t gid ; loff_t size ; struct inode_operations const *proc_iops ; struct file_operations const *proc_fops ; struct proc_dir_entry *next ; struct proc_dir_entry *parent ; struct proc_dir_entry *subdir ; void *data ; read_proc_t *read_proc ; write_proc_t *write_proc ; atomic_t count ; int pde_users ; struct completion *pde_unload_completion ; struct list_head pde_openers ; spinlock_t pde_unload_lock ; u8 namelen ; char name[] ; }; struct nsproxy; struct ctl_table_header; struct __anonstruct_sigset_t_157 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_157 sigset_t; struct siginfo; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_159 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_160 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_161 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_162 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__sigfault_163 { void *_addr ; short _addr_lsb ; }; struct __anonstruct__sigpoll_164 { long _band ; int _fd ; }; struct __anonstruct__sigsys_165 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_158 { int _pad[28U] ; struct __anonstruct__kill_159 _kill ; struct __anonstruct__timer_160 _timer ; struct __anonstruct__rt_161 _rt ; struct __anonstruct__sigchld_162 _sigchld ; struct __anonstruct__sigfault_163 _sigfault ; struct __anonstruct__sigpoll_164 _sigpoll ; struct __anonstruct__sigsys_165 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_158 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct rpc_program; struct rpc_stat { struct rpc_program const *program ; unsigned int netcnt ; unsigned int netudpcnt ; unsigned int nettcpcnt ; unsigned int nettcpconn ; unsigned int netreconn ; unsigned int rpccnt ; unsigned int rpcretrans ; unsigned int rpcauthrefresh ; unsigned int rpcgarbage ; }; struct svc_program; struct svc_stat { struct svc_program *program ; unsigned int netcnt ; unsigned int netudpcnt ; unsigned int nettcpcnt ; unsigned int nettcpconn ; unsigned int rpccnt ; unsigned int rpcbadfmt ; unsigned int rpcbadauth ; unsigned int rpcbadclnt ; }; struct rpc_rtt { unsigned long timeo ; unsigned long srtt[5U] ; unsigned long sdrtt[5U] ; int ntimeouts[5U] ; }; struct ipv6_devconf { __s32 forwarding ; __s32 hop_limit ; __s32 mtu6 ; __s32 accept_ra ; __s32 accept_redirects ; __s32 autoconf ; __s32 dad_transmits ; __s32 rtr_solicits ; __s32 rtr_solicit_interval ; __s32 rtr_solicit_delay ; __s32 force_mld_version ; __s32 use_tempaddr ; __s32 temp_valid_lft ; __s32 temp_prefered_lft ; __s32 regen_max_retry ; __s32 max_desync_factor ; __s32 max_addresses ; __s32 accept_ra_defrtr ; __s32 accept_ra_pinfo ; __s32 accept_ra_rtr_pref ; __s32 rtr_probe_interval ; __s32 accept_ra_rt_info_max_plen ; __s32 proxy_ndp ; __s32 accept_source_route ; __s32 optimistic_dad ; __s32 mc_forwarding ; __s32 disable_ipv6 ; __s32 accept_dad ; __s32 force_tllao ; __s32 ndisc_notify ; void *sysctl ; }; enum ldv_18810 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; typedef enum ldv_18810 socket_state; struct socket_wq { wait_queue_head_t wait ; struct fasync_struct *fasync_list ; struct callback_head rcu ; }; struct proto_ops; struct socket { socket_state state ; short type ; unsigned long flags ; struct socket_wq *wq ; struct file *file ; struct sock *sk ; struct proto_ops const *ops ; }; struct proto_ops { int family ; struct module *owner ; int (*release)(struct socket * ) ; int (*bind)(struct socket * , struct sockaddr * , int ) ; int (*connect)(struct socket * , struct sockaddr * , int , int ) ; int (*socketpair)(struct socket * , struct socket * ) ; int (*accept)(struct socket * , struct socket * , int ) ; int (*getname)(struct socket * , struct sockaddr * , int * , int ) ; unsigned int (*poll)(struct file * , struct socket * , struct poll_table_struct * ) ; int (*ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*listen)(struct socket * , int ) ; int (*shutdown)(struct socket * , int ) ; int (*setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct socket * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct socket * , int , int , char * , int * ) ; int (*sendmsg)(struct kiocb * , struct socket * , struct msghdr * , size_t ) ; int (*recvmsg)(struct kiocb * , struct socket * , struct msghdr * , size_t , int ) ; int (*mmap)(struct file * , struct socket * , struct vm_area_struct * ) ; ssize_t (*sendpage)(struct socket * , struct page * , int , size_t , int ) ; ssize_t (*splice_read)(struct socket * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; void (*set_peek_off)(struct sock * , int ) ; }; struct exception_table_entry { int insn ; int fixup ; }; struct klist_node; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct dma_map_ops; struct dev_archdata { struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct class; struct subsys_private; struct bus_type; struct device_node; struct iommu_ops; struct iommu_group; struct bus_attribute { struct attribute attr ; ssize_t (*show)(struct bus_type * , char * ) ; ssize_t (*store)(struct bus_type * , char const * , size_t ) ; }; struct device_attribute; struct driver_attribute; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct bus_attribute *bus_attrs ; struct device_attribute *dev_attrs ; struct driver_attribute *drv_attrs ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops *iommu_ops ; struct subsys_private *p ; }; struct device_type; struct of_device_id; struct acpi_device_id; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; struct of_device_id const *of_match_table ; struct acpi_device_id const *acpi_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct driver_attribute { struct attribute attr ; ssize_t (*show)(struct device_driver * , char * ) ; ssize_t (*store)(struct device_driver * , char const * , size_t ) ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct device_attribute *dev_attrs ; struct bin_attribute *dev_bin_attrs ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; void const *(*namespace)(struct class * , struct class_attribute const * ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct acpi_dev_node { void *handle ; }; struct dma_coherent_mem; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct dev_archdata archdata ; struct device_node *of_node ; struct acpi_dev_node acpi_node ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; typedef s32 dma_cookie_t; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t resolution ; ktime_t (*get_time)(void) ; ktime_t softirq_time ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; unsigned int active_bases ; unsigned int clock_was_set ; ktime_t expires_next ; int hres_active ; int hang_detected ; unsigned long nr_events ; unsigned long nr_retries ; unsigned long nr_hangs ; ktime_t max_hang_time ; struct hrtimer_clock_base clock_base[3U] ; }; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; typedef u64 netdev_features_t; struct nf_conntrack { atomic_t use ; }; struct nf_bridge_info { atomic_t use ; unsigned int mask ; struct net_device *physindev ; struct net_device *physoutdev ; unsigned long data[4U] ; }; struct sk_buff_head { struct sk_buff *next ; struct sk_buff *prev ; __u32 qlen ; spinlock_t lock ; }; typedef unsigned int sk_buff_data_t; struct sec_path; struct __anonstruct_ldv_24616_171 { __u16 csum_start ; __u16 csum_offset ; }; union __anonunion_ldv_24617_170 { __wsum csum ; struct __anonstruct_ldv_24616_171 ldv_24616 ; }; union __anonunion_ldv_24656_172 { __u32 mark ; __u32 dropcount ; __u32 avail_size ; }; struct sk_buff { struct sk_buff *next ; struct sk_buff *prev ; ktime_t tstamp ; struct sock *sk ; struct net_device *dev ; char cb[48U] ; unsigned long _skb_refdst ; struct sec_path *sp ; unsigned int len ; unsigned int data_len ; __u16 mac_len ; __u16 hdr_len ; union __anonunion_ldv_24617_170 ldv_24617 ; __u32 priority ; unsigned char local_df : 1 ; unsigned char cloned : 1 ; unsigned char ip_summed : 2 ; unsigned char nohdr : 1 ; unsigned char nfctinfo : 3 ; unsigned char pkt_type : 3 ; unsigned char fclone : 2 ; unsigned char ipvs_property : 1 ; unsigned char peeked : 1 ; unsigned char nf_trace : 1 ; __be16 protocol ; void (*destructor)(struct sk_buff * ) ; struct nf_conntrack *nfct ; struct sk_buff *nfct_reasm ; struct nf_bridge_info *nf_bridge ; int skb_iif ; __u32 rxhash ; __u16 vlan_tci ; __u16 tc_index ; __u16 tc_verd ; __u16 queue_mapping ; unsigned char ndisc_nodetype : 2 ; unsigned char pfmemalloc : 1 ; unsigned char ooo_okay : 1 ; unsigned char l4_rxhash : 1 ; unsigned char wifi_acked_valid : 1 ; unsigned char wifi_acked : 1 ; unsigned char no_fcs : 1 ; unsigned char head_frag : 1 ; unsigned char encapsulation : 1 ; dma_cookie_t dma_cookie ; __u32 secmark ; union __anonunion_ldv_24656_172 ldv_24656 ; sk_buff_data_t inner_transport_header ; sk_buff_data_t inner_network_header ; sk_buff_data_t transport_header ; sk_buff_data_t network_header ; sk_buff_data_t mac_header ; sk_buff_data_t tail ; sk_buff_data_t end ; unsigned char *head ; unsigned char *data ; unsigned int truesize ; atomic_t users ; }; struct dst_entry; struct rtable; struct plist_head { struct list_head node_list ; }; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; struct pm_qos_request { struct plist_node node ; int pm_qos_class ; struct delayed_work work ; }; struct pm_qos_flags_request { struct list_head node ; s32 flags ; }; enum dev_pm_qos_req_type { DEV_PM_QOS_LATENCY = 1, DEV_PM_QOS_FLAGS = 2 } ; union __anonunion_data_174 { struct plist_node pnode ; struct pm_qos_flags_request flr ; }; struct dev_pm_qos_request { enum dev_pm_qos_req_type type ; union __anonunion_data_174 data ; struct device *dev ; }; enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2 } ; struct pm_qos_constraints { struct plist_head list ; s32 target_value ; s32 default_value ; enum pm_qos_type type ; struct blocking_notifier_head *notifiers ; }; struct pm_qos_flags { struct list_head list ; s32 effective_flags ; }; struct dev_pm_qos { struct pm_qos_constraints latency ; struct pm_qos_flags flags ; struct dev_pm_qos_request *latency_req ; struct dev_pm_qos_request *flags_req ; }; struct dql { unsigned int num_queued ; unsigned int adj_limit ; unsigned int last_obj_cnt ; unsigned int limit ; unsigned int num_completed ; unsigned int prev_ovlimit ; unsigned int prev_num_queued ; unsigned int prev_last_obj_cnt ; unsigned int lowest_slack ; unsigned long slack_start_time ; unsigned int max_limit ; unsigned int min_limit ; unsigned int slack_hold_time ; }; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; struct __anonstruct_sync_serial_settings_175 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; }; typedef struct __anonstruct_sync_serial_settings_175 sync_serial_settings; struct __anonstruct_te1_settings_176 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; unsigned int slot_map ; }; typedef struct __anonstruct_te1_settings_176 te1_settings; struct __anonstruct_raw_hdlc_proto_177 { unsigned short encoding ; unsigned short parity ; }; typedef struct __anonstruct_raw_hdlc_proto_177 raw_hdlc_proto; struct __anonstruct_fr_proto_178 { unsigned int t391 ; unsigned int t392 ; unsigned int n391 ; unsigned int n392 ; unsigned int n393 ; unsigned short lmi ; unsigned short dce ; }; typedef struct __anonstruct_fr_proto_178 fr_proto; struct __anonstruct_fr_proto_pvc_179 { unsigned int dlci ; }; typedef struct __anonstruct_fr_proto_pvc_179 fr_proto_pvc; struct __anonstruct_fr_proto_pvc_info_180 { unsigned int dlci ; char master[16U] ; }; typedef struct __anonstruct_fr_proto_pvc_info_180 fr_proto_pvc_info; struct __anonstruct_cisco_proto_181 { unsigned int interval ; unsigned int timeout ; }; typedef struct __anonstruct_cisco_proto_181 cisco_proto; struct ifmap { unsigned long mem_start ; unsigned long mem_end ; unsigned short base_addr ; unsigned char irq ; unsigned char dma ; unsigned char port ; }; union __anonunion_ifs_ifsu_182 { raw_hdlc_proto *raw_hdlc ; cisco_proto *cisco ; fr_proto *fr ; fr_proto_pvc *fr_pvc ; fr_proto_pvc_info *fr_pvc_info ; sync_serial_settings *sync ; te1_settings *te1 ; }; struct if_settings { unsigned int type ; unsigned int size ; union __anonunion_ifs_ifsu_182 ifs_ifsu ; }; union __anonunion_ifr_ifrn_183 { char ifrn_name[16U] ; }; union __anonunion_ifr_ifru_184 { struct sockaddr ifru_addr ; struct sockaddr ifru_dstaddr ; struct sockaddr ifru_broadaddr ; struct sockaddr ifru_netmask ; struct sockaddr ifru_hwaddr ; short ifru_flags ; int ifru_ivalue ; int ifru_mtu ; struct ifmap ifru_map ; char ifru_slave[16U] ; char ifru_newname[16U] ; void *ifru_data ; struct if_settings ifru_settings ; }; struct ifreq { union __anonunion_ifr_ifrn_183 ifr_ifrn ; union __anonunion_ifr_ifru_184 ifr_ifru ; }; struct io_event { __u64 data ; __u64 obj ; __s64 res ; __s64 res2 ; }; typedef unsigned long cputime_t; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct rt_mutex_waiter; struct rlimit { unsigned long rlim_cur ; unsigned long rlim_max ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct ctl_table_root; struct ctl_dir; typedef int proc_handler(struct ctl_table * , int , void * , size_t * , loff_t * ); struct ctl_table_poll { atomic_t event ; wait_queue_head_t wait ; }; struct ctl_table { char const *procname ; void *data ; int maxlen ; umode_t mode ; struct ctl_table *child ; proc_handler *proc_handler ; struct ctl_table_poll *poll ; void *extra1 ; void *extra2 ; }; struct ctl_node { struct rb_node node ; struct ctl_table_header *header ; }; struct __anonstruct_ldv_27101_187 { struct ctl_table *ctl_table ; int used ; int count ; int nreg ; }; union __anonunion_ldv_27103_186 { struct __anonstruct_ldv_27101_187 ldv_27101 ; struct callback_head rcu ; }; struct ctl_table_set; struct ctl_table_header { union __anonunion_ldv_27103_186 ldv_27103 ; struct completion *unregistering ; struct ctl_table *ctl_table_arg ; struct ctl_table_root *root ; struct ctl_table_set *set ; struct ctl_dir *parent ; struct ctl_node *node ; }; struct ctl_dir { struct ctl_table_header header ; struct rb_root root ; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set * ) ; struct ctl_dir dir ; }; struct ctl_table_root { struct ctl_table_set default_set ; struct ctl_table_set *(*lookup)(struct ctl_table_root * , struct nsproxy * ) ; int (*permissions)(struct ctl_table_header * , struct ctl_table * ) ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct key_type; struct keyring_list; union __anonunion_ldv_27181_188 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion_ldv_27190_189 { time_t expiry ; time_t revoked_at ; }; union __anonunion_type_data_190 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_191 { unsigned long value ; void *rcudata ; void *data ; struct keyring_list *subscriptions ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion_ldv_27181_188 ldv_27181 ; struct key_type *type ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion_ldv_27190_189 ldv_27190 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; char *description ; union __anonunion_type_data_190 type_data ; union __anonunion_payload_191 payload ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct thread_group_cred; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *session_keyring ; struct key *process_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; struct thread_group_cred *tgcred ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; struct llist_node; struct llist_node { struct llist_node *next ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct cfs_rq; struct task_group; struct kioctx; union __anonunion_ki_obj_192 { void *user ; struct task_struct *tsk ; }; struct eventfd_ctx; struct kiocb { struct list_head ki_run_list ; unsigned long ki_flags ; int ki_users ; unsigned int ki_key ; struct file *ki_filp ; struct kioctx *ki_ctx ; int (*ki_cancel)(struct kiocb * , struct io_event * ) ; ssize_t (*ki_retry)(struct kiocb * ) ; void (*ki_dtor)(struct kiocb * ) ; union __anonunion_ki_obj_192 ki_obj ; __u64 ki_user_data ; loff_t ki_pos ; void *private ; unsigned short ki_opcode ; size_t ki_nbytes ; char *ki_buf ; size_t ki_left ; struct iovec ki_inline_vec ; struct iovec *ki_iovec ; unsigned long ki_nr_segs ; unsigned long ki_cur_seg ; struct list_head ki_list ; struct list_head ki_batch ; struct eventfd_ctx *ki_eventfd ; }; struct aio_ring_info { unsigned long mmap_base ; unsigned long mmap_size ; struct page **ring_pages ; spinlock_t ring_lock ; long nr_pages ; unsigned int nr ; unsigned int tail ; struct page *internal_pages[8U] ; }; struct kioctx { atomic_t users ; int dead ; struct mm_struct *mm ; unsigned long user_id ; struct hlist_node list ; wait_queue_head_t wait ; spinlock_t ctx_lock ; int reqs_active ; struct list_head active_reqs ; struct list_head run_list ; unsigned int max_reqs ; struct aio_ring_info ring_info ; struct delayed_work wq ; struct callback_head callback_head ; }; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct cputime { cputime_t utime ; cputime_t stime ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime cputime ; int running ; raw_spinlock_t lock ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; struct tty_audit_buf *tty_audit_buf ; struct rw_semaphore group_rwsem ; oom_flags_t oom_flags ; short oom_score_adj ; short oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t files ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; struct timespec blkio_start ; struct timespec blkio_end ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; struct timespec freepages_start ; struct timespec freepages_end ; u64 freepages_delay ; u32 freepages_count ; }; struct uts_namespace; struct rq; struct sched_class { struct sched_class const *next ; void (*enqueue_task)(struct rq * , struct task_struct * , int ) ; void (*dequeue_task)(struct rq * , struct task_struct * , int ) ; void (*yield_task)(struct rq * ) ; bool (*yield_to_task)(struct rq * , struct task_struct * , bool ) ; void (*check_preempt_curr)(struct rq * , struct task_struct * , int ) ; struct task_struct *(*pick_next_task)(struct rq * ) ; void (*put_prev_task)(struct rq * , struct task_struct * ) ; int (*select_task_rq)(struct task_struct * , int , int ) ; void (*migrate_task_rq)(struct task_struct * , int ) ; void (*pre_schedule)(struct rq * , struct task_struct * ) ; void (*post_schedule)(struct rq * ) ; void (*task_waking)(struct task_struct * ) ; void (*task_woken)(struct rq * , struct task_struct * ) ; void (*set_cpus_allowed)(struct task_struct * , struct cpumask const * ) ; void (*rq_online)(struct rq * ) ; void (*rq_offline)(struct rq * ) ; void (*set_curr_task)(struct rq * ) ; void (*task_tick)(struct rq * , struct task_struct * , int ) ; void (*task_fork)(struct task_struct * ) ; void (*switched_from)(struct rq * , struct task_struct * ) ; void (*switched_to)(struct rq * , struct task_struct * ) ; void (*prio_changed)(struct rq * , struct task_struct * , int ) ; unsigned int (*get_rr_interval)(struct rq * , struct task_struct * ) ; void (*task_move_group)(struct task_struct * , int ) ; }; struct load_weight { unsigned long weight ; unsigned long inv_weight ; }; struct sched_avg { u32 runnable_avg_sum ; u32 runnable_avg_period ; u64 last_runnable_update ; s64 decay_count ; unsigned long load_avg_contrib ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; struct sched_avg avg ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct memcg_batch_info { int do_batch ; struct mem_cgroup *memcg ; unsigned long nr_pages ; unsigned long memsw_nr_pages ; }; struct css_set; struct compat_robust_list_head; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct hlist_head preempt_notifiers ; unsigned char fpu_counter ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; unsigned char brk_randomized : 1 ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned int jobctl ; unsigned int personality ; unsigned char did_exec : 1 ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char no_new_privs : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; pid_t pid ; pid_t tgid ; unsigned long stack_canary ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; struct cputime prev_cputime ; unsigned long nvcsw ; unsigned long nivcsw ; struct timespec start_time ; struct timespec real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; int link_count ; int total_link_count ; struct sysv_sem sysvsem ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct plist_head pi_waiters ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; int numa_scan_seq ; int numa_migrate_seq ; unsigned int numa_scan_period ; u64 node_stamp ; struct callback_head numa_work ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_batch_info memcg_batch ; unsigned int memcg_kmem_skip_account ; atomic_t ptrace_bp_refcnt ; struct uprobe_task *utask ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; struct ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_proto ; }; struct ethtool_cmd { __u32 cmd ; __u32 supported ; __u32 advertising ; __u16 speed ; __u8 duplex ; __u8 port ; __u8 phy_address ; __u8 transceiver ; __u8 autoneg ; __u8 mdio_support ; __u32 maxtxpkt ; __u32 maxrxpkt ; __u16 speed_hi ; __u8 eth_tp_mdix ; __u8 eth_tp_mdix_ctrl ; __u32 lp_advertising ; __u32 reserved[2U] ; }; struct ethtool_drvinfo { __u32 cmd ; char driver[32U] ; char version[32U] ; char fw_version[32U] ; char bus_info[32U] ; char reserved1[32U] ; char reserved2[12U] ; __u32 n_priv_flags ; __u32 n_stats ; __u32 testinfo_len ; __u32 eedump_len ; __u32 regdump_len ; }; struct ethtool_wolinfo { __u32 cmd ; __u32 supported ; __u32 wolopts ; __u8 sopass[6U] ; }; struct ethtool_regs { __u32 cmd ; __u32 version ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eeprom { __u32 cmd ; __u32 magic ; __u32 offset ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eee { __u32 cmd ; __u32 supported ; __u32 advertised ; __u32 lp_advertised ; __u32 eee_active ; __u32 eee_enabled ; __u32 tx_lpi_enabled ; __u32 tx_lpi_timer ; __u32 reserved[2U] ; }; struct ethtool_modinfo { __u32 cmd ; __u32 type ; __u32 eeprom_len ; __u32 reserved[8U] ; }; struct ethtool_coalesce { __u32 cmd ; __u32 rx_coalesce_usecs ; __u32 rx_max_coalesced_frames ; __u32 rx_coalesce_usecs_irq ; __u32 rx_max_coalesced_frames_irq ; __u32 tx_coalesce_usecs ; __u32 tx_max_coalesced_frames ; __u32 tx_coalesce_usecs_irq ; __u32 tx_max_coalesced_frames_irq ; __u32 stats_block_coalesce_usecs ; __u32 use_adaptive_rx_coalesce ; __u32 use_adaptive_tx_coalesce ; __u32 pkt_rate_low ; __u32 rx_coalesce_usecs_low ; __u32 rx_max_coalesced_frames_low ; __u32 tx_coalesce_usecs_low ; __u32 tx_max_coalesced_frames_low ; __u32 pkt_rate_high ; __u32 rx_coalesce_usecs_high ; __u32 rx_max_coalesced_frames_high ; __u32 tx_coalesce_usecs_high ; __u32 tx_max_coalesced_frames_high ; __u32 rate_sample_interval ; }; struct ethtool_ringparam { __u32 cmd ; __u32 rx_max_pending ; __u32 rx_mini_max_pending ; __u32 rx_jumbo_max_pending ; __u32 tx_max_pending ; __u32 rx_pending ; __u32 rx_mini_pending ; __u32 rx_jumbo_pending ; __u32 tx_pending ; }; struct ethtool_channels { __u32 cmd ; __u32 max_rx ; __u32 max_tx ; __u32 max_other ; __u32 max_combined ; __u32 rx_count ; __u32 tx_count ; __u32 other_count ; __u32 combined_count ; }; struct ethtool_pauseparam { __u32 cmd ; __u32 autoneg ; __u32 rx_pause ; __u32 tx_pause ; }; struct ethtool_test { __u32 cmd ; __u32 flags ; __u32 reserved ; __u32 len ; __u64 data[0U] ; }; struct ethtool_stats { __u32 cmd ; __u32 n_stats ; __u64 data[0U] ; }; struct ethtool_tcpip4_spec { __be32 ip4src ; __be32 ip4dst ; __be16 psrc ; __be16 pdst ; __u8 tos ; }; struct ethtool_ah_espip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 spi ; __u8 tos ; }; struct ethtool_usrip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 l4_4_bytes ; __u8 tos ; __u8 ip_ver ; __u8 proto ; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec ; struct ethtool_tcpip4_spec udp_ip4_spec ; struct ethtool_tcpip4_spec sctp_ip4_spec ; struct ethtool_ah_espip4_spec ah_ip4_spec ; struct ethtool_ah_espip4_spec esp_ip4_spec ; struct ethtool_usrip4_spec usr_ip4_spec ; struct ethhdr ether_spec ; __u8 hdata[52U] ; }; struct ethtool_flow_ext { __u8 padding[2U] ; unsigned char h_dest[6U] ; __be16 vlan_etype ; __be16 vlan_tci ; __be32 data[2U] ; }; struct ethtool_rx_flow_spec { __u32 flow_type ; union ethtool_flow_union h_u ; struct ethtool_flow_ext h_ext ; union ethtool_flow_union m_u ; struct ethtool_flow_ext m_ext ; __u64 ring_cookie ; __u32 location ; }; struct ethtool_rxnfc { __u32 cmd ; __u32 flow_type ; __u64 data ; struct ethtool_rx_flow_spec fs ; __u32 rule_cnt ; __u32 rule_locs[0U] ; }; struct ethtool_flash { __u32 cmd ; __u32 region ; char data[128U] ; }; struct ethtool_dump { __u32 cmd ; __u32 version ; __u32 flag ; __u32 len ; __u8 data[0U] ; }; struct ethtool_ts_info { __u32 cmd ; __u32 so_timestamping ; __s32 phc_index ; __u32 tx_types ; __u32 tx_reserved[3U] ; __u32 rx_filters ; __u32 rx_reserved[3U] ; }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; struct ethtool_ops { int (*get_settings)(struct net_device * , struct ethtool_cmd * ) ; int (*set_settings)(struct net_device * , struct ethtool_cmd * ) ; void (*get_drvinfo)(struct net_device * , struct ethtool_drvinfo * ) ; int (*get_regs_len)(struct net_device * ) ; void (*get_regs)(struct net_device * , struct ethtool_regs * , void * ) ; void (*get_wol)(struct net_device * , struct ethtool_wolinfo * ) ; int (*set_wol)(struct net_device * , struct ethtool_wolinfo * ) ; u32 (*get_msglevel)(struct net_device * ) ; void (*set_msglevel)(struct net_device * , u32 ) ; int (*nway_reset)(struct net_device * ) ; u32 (*get_link)(struct net_device * ) ; int (*get_eeprom_len)(struct net_device * ) ; int (*get_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; int (*set_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; void (*get_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; int (*set_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; void (*get_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; int (*set_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; void (*self_test)(struct net_device * , struct ethtool_test * , u64 * ) ; void (*get_strings)(struct net_device * , u32 , u8 * ) ; int (*set_phys_id)(struct net_device * , enum ethtool_phys_id_state ) ; void (*get_ethtool_stats)(struct net_device * , struct ethtool_stats * , u64 * ) ; int (*begin)(struct net_device * ) ; void (*complete)(struct net_device * ) ; u32 (*get_priv_flags)(struct net_device * ) ; int (*set_priv_flags)(struct net_device * , u32 ) ; int (*get_sset_count)(struct net_device * , int ) ; int (*get_rxnfc)(struct net_device * , struct ethtool_rxnfc * , u32 * ) ; int (*set_rxnfc)(struct net_device * , struct ethtool_rxnfc * ) ; int (*flash_device)(struct net_device * , struct ethtool_flash * ) ; int (*reset)(struct net_device * , u32 * ) ; u32 (*get_rxfh_indir_size)(struct net_device * ) ; int (*get_rxfh_indir)(struct net_device * , u32 * ) ; int (*set_rxfh_indir)(struct net_device * , u32 const * ) ; void (*get_channels)(struct net_device * , struct ethtool_channels * ) ; int (*set_channels)(struct net_device * , struct ethtool_channels * ) ; int (*get_dump_flag)(struct net_device * , struct ethtool_dump * ) ; int (*get_dump_data)(struct net_device * , struct ethtool_dump * , void * ) ; int (*set_dump)(struct net_device * , struct ethtool_dump * ) ; int (*get_ts_info)(struct net_device * , struct ethtool_ts_info * ) ; int (*get_module_info)(struct net_device * , struct ethtool_modinfo * ) ; int (*get_module_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_eee)(struct net_device * , struct ethtool_eee * ) ; int (*set_eee)(struct net_device * , struct ethtool_eee * ) ; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr ; int sysctl_somaxconn ; struct prot_inuse *inuse ; }; struct u64_stats_sync { }; struct ipstats_mib { u64 mibs[31U] ; struct u64_stats_sync syncp ; }; struct icmp_mib { unsigned long mibs[27U] ; }; struct icmpmsg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6_mib { unsigned long mibs[5U] ; }; struct icmpv6_mib_device { atomic_long_t mibs[5U] ; }; struct icmpv6msg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512U] ; }; struct tcp_mib { unsigned long mibs[15U] ; }; struct udp_mib { unsigned long mibs[7U] ; }; struct linux_mib { unsigned long mibs[92U] ; }; struct linux_xfrm_mib { unsigned long mibs[27U] ; }; struct netns_mib { struct tcp_mib *tcp_statistics[1U] ; struct ipstats_mib *ip_statistics[1U] ; struct linux_mib *net_statistics[1U] ; struct udp_mib *udp_statistics[1U] ; struct udp_mib *udplite_statistics[1U] ; struct icmp_mib *icmp_statistics[1U] ; struct icmpmsg_mib *icmpmsg_statistics ; struct proc_dir_entry *proc_net_devsnmp6 ; struct udp_mib *udp_stats_in6[1U] ; struct udp_mib *udplite_stats_in6[1U] ; struct ipstats_mib *ipv6_statistics[1U] ; struct icmpv6_mib *icmpv6_statistics[1U] ; struct icmpv6msg_mib *icmpv6msg_statistics ; struct linux_xfrm_mib *xfrm_statistics[1U] ; }; struct netns_unix { int sysctl_max_dgram_qlen ; struct ctl_table_header *ctl ; }; struct netns_packet { struct mutex sklist_lock ; struct hlist_head sklist ; }; struct netns_frags { int nqueues ; atomic_t mem ; struct list_head lru_list ; int timeout ; int high_thresh ; int low_thresh ; }; struct tcpm_hash_bucket; struct ipv4_devconf; struct fib_rules_ops; struct fib_table; struct inet_peer_base; struct xt_table; struct netns_ipv4 { struct ctl_table_header *forw_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *ipv4_hdr ; struct ctl_table_header *route_hdr ; struct ipv4_devconf *devconf_all ; struct ipv4_devconf *devconf_dflt ; struct fib_rules_ops *rules_ops ; bool fib_has_custom_rules ; struct fib_table *fib_local ; struct fib_table *fib_main ; struct fib_table *fib_default ; int fib_num_tclassid_users ; struct hlist_head *fib_table_hash ; struct sock *fibnl ; struct sock **icmp_sk ; struct inet_peer_base *peers ; struct tcpm_hash_bucket *tcp_metrics_hash ; unsigned int tcp_metrics_hash_log ; struct netns_frags frags ; struct xt_table *iptable_filter ; struct xt_table *iptable_mangle ; struct xt_table *iptable_raw ; struct xt_table *arptable_filter ; struct xt_table *iptable_security ; struct xt_table *nat_table ; int sysctl_icmp_echo_ignore_all ; int sysctl_icmp_echo_ignore_broadcasts ; int sysctl_icmp_ignore_bogus_error_responses ; int sysctl_icmp_ratelimit ; int sysctl_icmp_ratemask ; int sysctl_icmp_errors_use_inbound_ifaddr ; kgid_t sysctl_ping_group_range[2U] ; long sysctl_tcp_mem[3U] ; atomic_t dev_addr_genid ; struct list_head mr_tables ; struct fib_rules_ops *mr_rules_ops ; }; struct neighbour; struct dst_ops { unsigned short family ; __be16 protocol ; unsigned int gc_thresh ; int (*gc)(struct dst_ops * ) ; struct dst_entry *(*check)(struct dst_entry * , __u32 ) ; unsigned int (*default_advmss)(struct dst_entry const * ) ; unsigned int (*mtu)(struct dst_entry const * ) ; u32 *(*cow_metrics)(struct dst_entry * , unsigned long ) ; void (*destroy)(struct dst_entry * ) ; void (*ifdown)(struct dst_entry * , struct net_device * , int ) ; struct dst_entry *(*negative_advice)(struct dst_entry * ) ; void (*link_failure)(struct sk_buff * ) ; void (*update_pmtu)(struct dst_entry * , struct sock * , struct sk_buff * , u32 ) ; void (*redirect)(struct dst_entry * , struct sock * , struct sk_buff * ) ; int (*local_out)(struct sk_buff * ) ; struct neighbour *(*neigh_lookup)(struct dst_entry const * , struct sk_buff * , void const * ) ; struct kmem_cache *kmem_cachep ; struct percpu_counter pcpuc_entries ; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *icmp_hdr ; struct ctl_table_header *frags_hdr ; int bindv6only ; int flush_delay ; int ip6_rt_max_size ; int ip6_rt_gc_min_interval ; int ip6_rt_gc_timeout ; int ip6_rt_gc_interval ; int ip6_rt_gc_elasticity ; int ip6_rt_mtu_expires ; int ip6_rt_min_advmss ; int icmpv6_time ; }; struct rt6_info; struct rt6_statistics; struct fib6_table; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl ; struct ipv6_devconf *devconf_all ; struct ipv6_devconf *devconf_dflt ; struct inet_peer_base *peers ; struct netns_frags frags ; struct xt_table *ip6table_filter ; struct xt_table *ip6table_mangle ; struct xt_table *ip6table_raw ; struct xt_table *ip6table_security ; struct xt_table *ip6table_nat ; struct rt6_info *ip6_null_entry ; struct rt6_statistics *rt6_stats ; struct timer_list ip6_fib_timer ; struct hlist_head *fib_table_hash ; struct fib6_table *fib6_main_tbl ; struct dst_ops ip6_dst_ops ; unsigned int ip6_rt_gc_expire ; unsigned long ip6_rt_last_gc ; struct rt6_info *ip6_prohibit_entry ; struct rt6_info *ip6_blk_hole_entry ; struct fib6_table *fib6_local_tbl ; struct fib_rules_ops *fib6_rules_ops ; struct sock **icmp_sk ; struct sock *ndisc_sk ; struct sock *tcp_sk ; struct sock *igmp_sk ; struct list_head mr6_tables ; struct fib_rules_ops *mr6_rules_ops ; }; struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl ; struct netns_frags frags ; }; struct sctp_mib; struct netns_sctp { struct sctp_mib *sctp_statistics[1U] ; struct proc_dir_entry *proc_net_sctp ; struct ctl_table_header *sysctl_header ; struct sock *ctl_sock ; struct list_head local_addr_list ; struct list_head addr_waitq ; struct timer_list addr_wq_timer ; struct list_head auto_asconf_splist ; spinlock_t addr_wq_lock ; spinlock_t local_addr_lock ; unsigned int rto_initial ; unsigned int rto_min ; unsigned int rto_max ; int rto_alpha ; int rto_beta ; int max_burst ; int cookie_preserve_enable ; char *sctp_hmac_alg ; unsigned int valid_cookie_life ; unsigned int sack_timeout ; unsigned int hb_interval ; int max_retrans_association ; int max_retrans_path ; int max_retrans_init ; int pf_retrans ; int sndbuf_policy ; int rcvbuf_policy ; int default_auto_asconf ; int addip_enable ; int addip_noauth ; int prsctp_enable ; int auth_enable ; int scope_policy ; int rwnd_upd_shift ; unsigned long max_autoclose ; }; struct netns_dccp { struct sock *v4_ctl_sk ; struct sock *v6_ctl_sk ; }; struct static_key { atomic_t enabled ; }; struct nlattr; struct ebt_table; struct netns_xt { struct list_head tables[13U] ; struct ebt_table *broute_table ; struct ebt_table *frame_filter ; struct ebt_table *frame_nat ; }; struct hlist_nulls_node; struct hlist_nulls_head { struct hlist_nulls_node *first ; }; struct hlist_nulls_node { struct hlist_nulls_node *next ; struct hlist_nulls_node **pprev ; }; struct nf_proto_net { struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; struct ctl_table_header *ctl_compat_header ; struct ctl_table *ctl_compat_table ; unsigned int users ; }; struct nf_generic_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_tcp_net { struct nf_proto_net pn ; unsigned int timeouts[14U] ; unsigned int tcp_loose ; unsigned int tcp_be_liberal ; unsigned int tcp_max_retrans ; }; struct nf_udp_net { struct nf_proto_net pn ; unsigned int timeouts[2U] ; }; struct nf_icmp_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_ip_net { struct nf_generic_net generic ; struct nf_tcp_net tcp ; struct nf_udp_net udp ; struct nf_icmp_net icmp ; struct nf_icmp_net icmpv6 ; struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct nf_exp_event_notifier; struct netns_ct { atomic_t count ; unsigned int expect_count ; unsigned int htable_size ; struct kmem_cache *nf_conntrack_cachep ; struct hlist_nulls_head *hash ; struct hlist_head *expect_hash ; struct hlist_nulls_head unconfirmed ; struct hlist_nulls_head dying ; struct ip_conntrack_stat *stat ; struct nf_ct_event_notifier *nf_conntrack_event_cb ; struct nf_exp_event_notifier *nf_expect_event_cb ; int sysctl_events ; unsigned int sysctl_events_retry_timeout ; int sysctl_acct ; int sysctl_tstamp ; int sysctl_checksum ; unsigned int sysctl_log_invalid ; int sysctl_auto_assign_helper ; bool auto_assign_helper_warned ; struct nf_ip_net nf_ct_proto ; struct hlist_head *nat_bysource ; unsigned int nat_htable_size ; struct ctl_table_header *sysctl_header ; struct ctl_table_header *acct_sysctl_header ; struct ctl_table_header *tstamp_sysctl_header ; struct ctl_table_header *event_sysctl_header ; struct ctl_table_header *helper_sysctl_header ; char *slabname ; }; struct xfrm_policy_hash { struct hlist_head *table ; unsigned int hmask ; }; struct netns_xfrm { struct list_head state_all ; struct hlist_head *state_bydst ; struct hlist_head *state_bysrc ; struct hlist_head *state_byspi ; unsigned int state_hmask ; unsigned int state_num ; struct work_struct state_hash_work ; struct hlist_head state_gc_list ; struct work_struct state_gc_work ; wait_queue_head_t km_waitq ; struct list_head policy_all ; struct hlist_head *policy_byidx ; unsigned int policy_idx_hmask ; struct hlist_head policy_inexact[6U] ; struct xfrm_policy_hash policy_bydst[6U] ; unsigned int policy_count[6U] ; struct work_struct policy_hash_work ; struct sock *nlsk ; struct sock *nlsk_stash ; u32 sysctl_aevent_etime ; u32 sysctl_aevent_rseqth ; int sysctl_larval_drop ; u32 sysctl_acq_expires ; struct ctl_table_header *sysctl_hdr ; struct dst_ops xfrm4_dst_ops ; struct dst_ops xfrm6_dst_ops ; }; struct net_generic; struct netns_ipvs; struct net { atomic_t passive ; atomic_t count ; spinlock_t rules_mod_lock ; struct list_head list ; struct list_head cleanup_list ; struct list_head exit_list ; struct user_namespace *user_ns ; unsigned int proc_inum ; struct proc_dir_entry *proc_net ; struct proc_dir_entry *proc_net_stat ; struct ctl_table_set sysctls ; struct sock *rtnl ; struct sock *genl_sock ; struct list_head dev_base_head ; struct hlist_head *dev_name_head ; struct hlist_head *dev_index_head ; unsigned int dev_base_seq ; int ifindex ; struct list_head rules_ops ; struct net_device *loopback_dev ; struct netns_core core ; struct netns_mib mib ; struct netns_packet packet ; struct netns_unix unx ; struct netns_ipv4 ipv4 ; struct netns_ipv6 ipv6 ; struct netns_sctp sctp ; struct netns_dccp dccp ; struct netns_xt xt ; struct netns_ct ct ; struct netns_nf_frag nf_frag ; struct sock *nfnl ; struct sock *nfnl_stash ; struct sk_buff_head wext_nlevents ; struct net_generic *gen ; struct netns_xfrm xfrm ; struct netns_ipvs *ipvs ; struct sock *diag_nlsk ; atomic_t rt_genid ; }; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct dsa_chip_data { struct device *mii_bus ; int sw_addr ; char *port_names[12U] ; s8 *rtable ; }; struct dsa_platform_data { struct device *netdev ; int nr_chips ; struct dsa_chip_data *chip ; }; struct dsa_switch; struct dsa_switch_tree { struct dsa_platform_data *pd ; struct net_device *master_netdev ; __be16 tag_protocol ; s8 cpu_switch ; s8 cpu_port ; int link_poll_needed ; struct work_struct link_poll_work ; struct timer_list link_poll_timer ; struct dsa_switch *ds[4U] ; }; struct dsa_switch_driver; struct mii_bus; struct dsa_switch { struct dsa_switch_tree *dst ; int index ; struct dsa_chip_data *pd ; struct dsa_switch_driver *drv ; struct mii_bus *master_mii_bus ; u32 dsa_port_mask ; u32 phys_port_mask ; struct mii_bus *slave_mii_bus ; struct net_device *ports[12U] ; }; struct dsa_switch_driver { struct list_head list ; __be16 tag_protocol ; int priv_size ; char *(*probe)(struct mii_bus * , int ) ; int (*setup)(struct dsa_switch * ) ; int (*set_addr)(struct dsa_switch * , u8 * ) ; int (*phy_read)(struct dsa_switch * , int , int ) ; int (*phy_write)(struct dsa_switch * , int , int , u16 ) ; void (*poll_link)(struct dsa_switch * ) ; void (*get_strings)(struct dsa_switch * , int , uint8_t * ) ; void (*get_ethtool_stats)(struct dsa_switch * , int , uint64_t * ) ; int (*get_sset_count)(struct dsa_switch * ) ; }; struct ieee_ets { __u8 willing ; __u8 ets_cap ; __u8 cbs ; __u8 tc_tx_bw[8U] ; __u8 tc_rx_bw[8U] ; __u8 tc_tsa[8U] ; __u8 prio_tc[8U] ; __u8 tc_reco_bw[8U] ; __u8 tc_reco_tsa[8U] ; __u8 reco_prio_tc[8U] ; }; struct ieee_maxrate { __u64 tc_maxrate[8U] ; }; struct ieee_pfc { __u8 pfc_cap ; __u8 pfc_en ; __u8 mbc ; __u16 delay ; __u64 requests[8U] ; __u64 indications[8U] ; }; struct cee_pg { __u8 willing ; __u8 error ; __u8 pg_en ; __u8 tcs_supported ; __u8 pg_bw[8U] ; __u8 prio_pg[8U] ; }; struct cee_pfc { __u8 willing ; __u8 error ; __u8 pfc_en ; __u8 tcs_supported ; }; struct dcb_app { __u8 selector ; __u8 priority ; __u16 protocol ; }; struct dcb_peer_app_info { __u8 willing ; __u8 error ; }; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_setets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_getmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_setmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_getpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_setpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_getapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_setapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_delapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_peer_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_peer_getpfc)(struct net_device * , struct ieee_pfc * ) ; u8 (*getstate)(struct net_device * ) ; u8 (*setstate)(struct net_device * , u8 ) ; void (*getpermhwaddr)(struct net_device * , u8 * ) ; void (*setpgtccfgtx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgtx)(struct net_device * , int , u8 ) ; void (*setpgtccfgrx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgrx)(struct net_device * , int , u8 ) ; void (*getpgtccfgtx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgtx)(struct net_device * , int , u8 * ) ; void (*getpgtccfgrx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgrx)(struct net_device * , int , u8 * ) ; void (*setpfccfg)(struct net_device * , int , u8 ) ; void (*getpfccfg)(struct net_device * , int , u8 * ) ; u8 (*setall)(struct net_device * ) ; u8 (*getcap)(struct net_device * , int , u8 * ) ; int (*getnumtcs)(struct net_device * , int , u8 * ) ; int (*setnumtcs)(struct net_device * , int , u8 ) ; u8 (*getpfcstate)(struct net_device * ) ; void (*setpfcstate)(struct net_device * , u8 ) ; void (*getbcncfg)(struct net_device * , int , u32 * ) ; void (*setbcncfg)(struct net_device * , int , u32 ) ; void (*getbcnrp)(struct net_device * , int , u8 * ) ; void (*setbcnrp)(struct net_device * , int , u8 ) ; u8 (*setapp)(struct net_device * , u8 , u16 , u8 ) ; u8 (*getapp)(struct net_device * , u8 , u16 ) ; u8 (*getfeatcfg)(struct net_device * , int , u8 * ) ; u8 (*setfeatcfg)(struct net_device * , int , u8 ) ; u8 (*getdcbx)(struct net_device * ) ; u8 (*setdcbx)(struct net_device * , u8 ) ; int (*peer_getappinfo)(struct net_device * , struct dcb_peer_app_info * , u16 * ) ; int (*peer_getapptable)(struct net_device * , struct dcb_app * ) ; int (*cee_peer_getpg)(struct net_device * , struct cee_pg * ) ; int (*cee_peer_getpfc)(struct net_device * , struct cee_pfc * ) ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct idr_layer { unsigned long bitmap ; struct idr_layer *ary[64U] ; int count ; int layer ; struct callback_head callback_head ; }; struct idr { struct idr_layer *top ; struct idr_layer *id_free ; int layers ; int id_free_cnt ; spinlock_t lock ; }; struct ida_bitmap { long nr_busy ; unsigned long bitmap[15U] ; }; struct ida { struct idr idr ; struct ida_bitmap *free_bitmap ; }; struct xattr_handler { char const *prefix ; int flags ; size_t (*list)(struct dentry * , char * , size_t , char const * , size_t , int ) ; int (*get)(struct dentry * , char const * , void * , size_t , int ) ; int (*set)(struct dentry * , char const * , void const * , size_t , int , int ) ; }; struct simple_xattrs { struct list_head head ; spinlock_t lock ; }; struct cgroupfs_root; struct cgroup_subsys; struct cgroup; struct css_id; struct cgroup_subsys_state { struct cgroup *cgroup ; atomic_t refcnt ; unsigned long flags ; struct css_id *id ; struct work_struct dput_work ; }; struct cgroup { unsigned long flags ; atomic_t count ; int id ; struct list_head sibling ; struct list_head children ; struct list_head files ; struct cgroup *parent ; struct dentry *dentry ; struct cgroup_subsys_state *subsys[12U] ; struct cgroupfs_root *root ; struct cgroup *top_cgroup ; struct list_head css_sets ; struct list_head allcg_node ; struct list_head cft_q_node ; struct list_head release_list ; struct list_head pidlists ; struct mutex pidlist_mutex ; struct callback_head callback_head ; struct list_head event_list ; spinlock_t event_list_lock ; struct simple_xattrs xattrs ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head cg_links ; struct cgroup_subsys_state *subsys[12U] ; struct callback_head callback_head ; }; struct cgroup_map_cb { int (*fill)(struct cgroup_map_cb * , char const * , u64 ) ; void *state ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct simple_xattrs xattrs ; int (*open)(struct inode * , struct file * ) ; ssize_t (*read)(struct cgroup * , struct cftype * , struct file * , char * , size_t , loff_t * ) ; u64 (*read_u64)(struct cgroup * , struct cftype * ) ; s64 (*read_s64)(struct cgroup * , struct cftype * ) ; int (*read_map)(struct cgroup * , struct cftype * , struct cgroup_map_cb * ) ; int (*read_seq_string)(struct cgroup * , struct cftype * , struct seq_file * ) ; ssize_t (*write)(struct cgroup * , struct cftype * , struct file * , char const * , size_t , loff_t * ) ; int (*write_u64)(struct cgroup * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup * , struct cftype * , s64 ) ; int (*write_string)(struct cgroup * , struct cftype * , char const * ) ; int (*trigger)(struct cgroup * , unsigned int ) ; int (*release)(struct inode * , struct file * ) ; int (*register_event)(struct cgroup * , struct cftype * , struct eventfd_ctx * , char const * ) ; void (*unregister_event)(struct cgroup * , struct cftype * , struct eventfd_ctx * ) ; }; struct cftype_set { struct list_head node ; struct cftype *cfts ; }; struct cgroup_taskset; struct cgroup_subsys { struct cgroup_subsys_state *(*css_alloc)(struct cgroup * ) ; int (*css_online)(struct cgroup * ) ; void (*css_offline)(struct cgroup * ) ; void (*css_free)(struct cgroup * ) ; int (*can_attach)(struct cgroup * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup * , struct cgroup * , struct task_struct * ) ; void (*bind)(struct cgroup * ) ; int subsys_id ; int active ; int disabled ; int early_init ; bool use_id ; bool broken_hierarchy ; bool warned_broken_hierarchy ; char const *name ; struct cgroupfs_root *root ; struct list_head sibling ; struct idr idr ; spinlock_t id_lock ; struct list_head cftsets ; struct cftype *base_cftypes ; struct cftype_set base_cftset ; struct module *module ; }; struct netprio_map { struct callback_head rcu ; u32 priomap_len ; u32 priomap[] ; }; struct kernel_symbol { unsigned long value ; char const *name ; }; struct xfrm_policy; struct xfrm_state; struct request_sock; struct security_mnt_opts { char **mnt_opts ; int *mnt_opts_flags ; int num_mnt_opts ; }; struct mnt_namespace; struct ipc_namespace; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns ; struct net *net_ns ; }; struct nlmsghdr { __u32 nlmsg_len ; __u16 nlmsg_type ; __u16 nlmsg_flags ; __u32 nlmsg_seq ; __u32 nlmsg_pid ; }; struct nlattr { __u16 nla_len ; __u16 nla_type ; }; struct netlink_callback { struct sk_buff *skb ; struct nlmsghdr const *nlh ; int (*dump)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; void *data ; struct module *module ; u16 family ; u16 min_dump_alloc ; unsigned int prev_seq ; unsigned int seq ; long args[6U] ; }; struct ndmsg { __u8 ndm_family ; __u8 ndm_pad1 ; __u16 ndm_pad2 ; __s32 ndm_ifindex ; __u16 ndm_state ; __u8 ndm_flags ; __u8 ndm_type ; }; struct rtnl_link_stats64 { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 rx_errors ; __u64 tx_errors ; __u64 rx_dropped ; __u64 tx_dropped ; __u64 multicast ; __u64 collisions ; __u64 rx_length_errors ; __u64 rx_over_errors ; __u64 rx_crc_errors ; __u64 rx_frame_errors ; __u64 rx_fifo_errors ; __u64 rx_missed_errors ; __u64 tx_aborted_errors ; __u64 tx_carrier_errors ; __u64 tx_fifo_errors ; __u64 tx_heartbeat_errors ; __u64 tx_window_errors ; __u64 rx_compressed ; __u64 tx_compressed ; }; struct ifla_vf_info { __u32 vf ; __u8 mac[32U] ; __u32 vlan ; __u32 qos ; __u32 tx_rate ; __u32 spoofchk ; }; struct netpoll_info; struct phy_device; struct wireless_dev; enum netdev_tx { __NETDEV_TX_MIN = (-0x7FFFFFFF-1), NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; typedef enum netdev_tx netdev_tx_t; struct net_device_stats { unsigned long rx_packets ; unsigned long tx_packets ; unsigned long rx_bytes ; unsigned long tx_bytes ; unsigned long rx_errors ; unsigned long tx_errors ; unsigned long rx_dropped ; unsigned long tx_dropped ; unsigned long multicast ; unsigned long collisions ; unsigned long rx_length_errors ; unsigned long rx_over_errors ; unsigned long rx_crc_errors ; unsigned long rx_frame_errors ; unsigned long rx_fifo_errors ; unsigned long rx_missed_errors ; unsigned long tx_aborted_errors ; unsigned long tx_carrier_errors ; unsigned long tx_fifo_errors ; unsigned long tx_heartbeat_errors ; unsigned long tx_window_errors ; unsigned long rx_compressed ; unsigned long tx_compressed ; }; struct neigh_parms; struct netdev_hw_addr_list { struct list_head list ; int count ; }; struct hh_cache { u16 hh_len ; u16 __pad ; seqlock_t hh_lock ; unsigned long hh_data[16U] ; }; struct header_ops { int (*create)(struct sk_buff * , struct net_device * , unsigned short , void const * , void const * , unsigned int ) ; int (*parse)(struct sk_buff const * , unsigned char * ) ; int (*rebuild)(struct sk_buff * ) ; int (*cache)(struct neighbour const * , struct hh_cache * , __be16 ) ; void (*cache_update)(struct hh_cache * , struct net_device const * , unsigned char const * ) ; }; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff ** ); struct Qdisc; struct netdev_queue { struct net_device *dev ; struct Qdisc *qdisc ; struct Qdisc *qdisc_sleeping ; struct kobject kobj ; int numa_node ; spinlock_t _xmit_lock ; int xmit_lock_owner ; unsigned long trans_start ; unsigned long trans_timeout ; unsigned long state ; struct dql dql ; }; struct rps_map { unsigned int len ; struct callback_head rcu ; u16 cpus[0U] ; }; struct rps_dev_flow { u16 cpu ; u16 filter ; unsigned int last_qtail ; }; struct rps_dev_flow_table { unsigned int mask ; struct callback_head rcu ; struct work_struct free_work ; struct rps_dev_flow flows[0U] ; }; struct netdev_rx_queue { struct rps_map *rps_map ; struct rps_dev_flow_table *rps_flow_table ; struct kobject kobj ; struct net_device *dev ; }; struct xps_map { unsigned int len ; unsigned int alloc_len ; struct callback_head rcu ; u16 queues[0U] ; }; struct xps_dev_maps { struct callback_head rcu ; struct xps_map *cpu_map[0U] ; }; struct netdev_tc_txq { u16 count ; u16 offset ; }; struct netdev_fcoe_hbainfo { char manufacturer[64U] ; char serial_number[64U] ; char hardware_version[64U] ; char driver_version[64U] ; char optionrom_version[64U] ; char firmware_version[64U] ; char model[256U] ; char model_description[256U] ; }; struct net_device_ops { int (*ndo_init)(struct net_device * ) ; void (*ndo_uninit)(struct net_device * ) ; int (*ndo_open)(struct net_device * ) ; int (*ndo_stop)(struct net_device * ) ; netdev_tx_t (*ndo_start_xmit)(struct sk_buff * , struct net_device * ) ; u16 (*ndo_select_queue)(struct net_device * , struct sk_buff * ) ; void (*ndo_change_rx_flags)(struct net_device * , int ) ; void (*ndo_set_rx_mode)(struct net_device * ) ; int (*ndo_set_mac_address)(struct net_device * , void * ) ; int (*ndo_validate_addr)(struct net_device * ) ; int (*ndo_do_ioctl)(struct net_device * , struct ifreq * , int ) ; int (*ndo_set_config)(struct net_device * , struct ifmap * ) ; int (*ndo_change_mtu)(struct net_device * , int ) ; int (*ndo_neigh_setup)(struct net_device * , struct neigh_parms * ) ; void (*ndo_tx_timeout)(struct net_device * ) ; struct rtnl_link_stats64 *(*ndo_get_stats64)(struct net_device * , struct rtnl_link_stats64 * ) ; struct net_device_stats *(*ndo_get_stats)(struct net_device * ) ; int (*ndo_vlan_rx_add_vid)(struct net_device * , unsigned short ) ; int (*ndo_vlan_rx_kill_vid)(struct net_device * , unsigned short ) ; void (*ndo_poll_controller)(struct net_device * ) ; int (*ndo_netpoll_setup)(struct net_device * , struct netpoll_info * , gfp_t ) ; void (*ndo_netpoll_cleanup)(struct net_device * ) ; int (*ndo_set_vf_mac)(struct net_device * , int , u8 * ) ; int (*ndo_set_vf_vlan)(struct net_device * , int , u16 , u8 ) ; int (*ndo_set_vf_tx_rate)(struct net_device * , int , int ) ; int (*ndo_set_vf_spoofchk)(struct net_device * , int , bool ) ; int (*ndo_get_vf_config)(struct net_device * , int , struct ifla_vf_info * ) ; int (*ndo_set_vf_port)(struct net_device * , int , struct nlattr ** ) ; int (*ndo_get_vf_port)(struct net_device * , int , struct sk_buff * ) ; int (*ndo_setup_tc)(struct net_device * , u8 ) ; int (*ndo_fcoe_enable)(struct net_device * ) ; int (*ndo_fcoe_disable)(struct net_device * ) ; int (*ndo_fcoe_ddp_setup)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_ddp_done)(struct net_device * , u16 ) ; int (*ndo_fcoe_ddp_target)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_get_hbainfo)(struct net_device * , struct netdev_fcoe_hbainfo * ) ; int (*ndo_fcoe_get_wwn)(struct net_device * , u64 * , int ) ; int (*ndo_rx_flow_steer)(struct net_device * , struct sk_buff const * , u16 , u32 ) ; int (*ndo_add_slave)(struct net_device * , struct net_device * ) ; int (*ndo_del_slave)(struct net_device * , struct net_device * ) ; netdev_features_t (*ndo_fix_features)(struct net_device * , netdev_features_t ) ; int (*ndo_set_features)(struct net_device * , netdev_features_t ) ; int (*ndo_neigh_construct)(struct neighbour * ) ; void (*ndo_neigh_destroy)(struct neighbour * ) ; int (*ndo_fdb_add)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 ) ; int (*ndo_fdb_del)(struct ndmsg * , struct net_device * , unsigned char const * ) ; int (*ndo_fdb_dump)(struct sk_buff * , struct netlink_callback * , struct net_device * , int ) ; int (*ndo_bridge_setlink)(struct net_device * , struct nlmsghdr * ) ; int (*ndo_bridge_getlink)(struct sk_buff * , u32 , u32 , struct net_device * ) ; }; struct iw_handler_def; struct iw_public_data; struct vlan_info; struct in_device; struct dn_dev; struct inet6_dev; struct cpu_rmap; struct pcpu_lstats; struct pcpu_tstats; struct pcpu_dstats; union __anonunion_ldv_37041_219 { void *ml_priv ; struct pcpu_lstats *lstats ; struct pcpu_tstats *tstats ; struct pcpu_dstats *dstats ; }; struct garp_port; struct rtnl_link_ops; struct net_device { char name[16U] ; struct hlist_node name_hlist ; char *ifalias ; unsigned long mem_end ; unsigned long mem_start ; unsigned long base_addr ; unsigned int irq ; unsigned long state ; struct list_head dev_list ; struct list_head napi_list ; struct list_head unreg_list ; netdev_features_t features ; netdev_features_t hw_features ; netdev_features_t wanted_features ; netdev_features_t vlan_features ; netdev_features_t hw_enc_features ; int ifindex ; int iflink ; struct net_device_stats stats ; atomic_long_t rx_dropped ; struct iw_handler_def const *wireless_handlers ; struct iw_public_data *wireless_data ; struct net_device_ops const *netdev_ops ; struct ethtool_ops const *ethtool_ops ; struct header_ops const *header_ops ; unsigned int flags ; unsigned int priv_flags ; unsigned short gflags ; unsigned short padded ; unsigned char operstate ; unsigned char link_mode ; unsigned char if_port ; unsigned char dma ; unsigned int mtu ; unsigned short type ; unsigned short hard_header_len ; unsigned short needed_headroom ; unsigned short needed_tailroom ; unsigned char perm_addr[32U] ; unsigned char addr_assign_type ; unsigned char addr_len ; unsigned char neigh_priv_len ; unsigned short dev_id ; spinlock_t addr_list_lock ; struct netdev_hw_addr_list uc ; struct netdev_hw_addr_list mc ; bool uc_promisc ; unsigned int promiscuity ; unsigned int allmulti ; struct vlan_info *vlan_info ; struct dsa_switch_tree *dsa_ptr ; void *atalk_ptr ; struct in_device *ip_ptr ; struct dn_dev *dn_ptr ; struct inet6_dev *ip6_ptr ; void *ax25_ptr ; struct wireless_dev *ieee80211_ptr ; unsigned long last_rx ; struct net_device *master ; unsigned char *dev_addr ; struct netdev_hw_addr_list dev_addrs ; unsigned char broadcast[32U] ; struct kset *queues_kset ; struct netdev_rx_queue *_rx ; unsigned int num_rx_queues ; unsigned int real_num_rx_queues ; struct cpu_rmap *rx_cpu_rmap ; rx_handler_func_t *rx_handler ; void *rx_handler_data ; struct netdev_queue *ingress_queue ; struct netdev_queue *_tx ; unsigned int num_tx_queues ; unsigned int real_num_tx_queues ; struct Qdisc *qdisc ; unsigned long tx_queue_len ; spinlock_t tx_global_lock ; struct xps_dev_maps *xps_maps ; unsigned long trans_start ; int watchdog_timeo ; struct timer_list watchdog_timer ; int *pcpu_refcnt ; struct list_head todo_list ; struct hlist_node index_hlist ; struct list_head link_watch_list ; unsigned char reg_state ; bool dismantle ; unsigned short rtnl_link_state ; void (*destructor)(struct net_device * ) ; struct netpoll_info *npinfo ; struct net *nd_net ; union __anonunion_ldv_37041_219 ldv_37041 ; struct garp_port *garp_port ; struct device dev ; struct attribute_group const *sysfs_groups[4U] ; struct rtnl_link_ops const *rtnl_link_ops ; unsigned int gso_max_size ; u16 gso_max_segs ; struct dcbnl_rtnl_ops const *dcbnl_ops ; u8 num_tc ; struct netdev_tc_txq tc_to_txq[16U] ; u8 prio_tc_map[16U] ; unsigned int fcoe_ddp_xid ; struct netprio_map *priomap ; struct phy_device *phydev ; struct lock_class_key *qdisc_tx_busylock ; int group ; struct pm_qos_request pm_qos_req ; }; struct res_counter { unsigned long long usage ; unsigned long long max_usage ; unsigned long long limit ; unsigned long long soft_limit ; unsigned long long failcnt ; spinlock_t lock ; struct res_counter *parent ; }; struct sock_filter { __u16 code ; __u8 jt ; __u8 jf ; __u32 k ; }; struct sk_filter { atomic_t refcnt ; unsigned int len ; unsigned int (*bpf_func)(struct sk_buff const * , struct sock_filter const * ) ; struct callback_head rcu ; struct sock_filter insns[0U] ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; struct nla_policy { u16 type ; u16 len ; }; struct rtnl_link_ops { struct list_head list ; char const *kind ; size_t priv_size ; void (*setup)(struct net_device * ) ; int maxtype ; struct nla_policy const *policy ; int (*validate)(struct nlattr ** , struct nlattr ** ) ; int (*newlink)(struct net * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; int (*changelink)(struct net_device * , struct nlattr ** , struct nlattr ** ) ; void (*dellink)(struct net_device * , struct list_head * ) ; size_t (*get_size)(struct net_device const * ) ; int (*fill_info)(struct sk_buff * , struct net_device const * ) ; size_t (*get_xstats_size)(struct net_device const * ) ; int (*fill_xstats)(struct sk_buff * , struct net_device const * ) ; unsigned int (*get_num_tx_queues)(void) ; unsigned int (*get_num_rx_queues)(void) ; }; struct neigh_table; struct neigh_parms { struct net *net ; struct net_device *dev ; struct neigh_parms *next ; int (*neigh_setup)(struct neighbour * ) ; void (*neigh_cleanup)(struct neighbour * ) ; struct neigh_table *tbl ; void *sysctl_table ; int dead ; atomic_t refcnt ; struct callback_head callback_head ; int base_reachable_time ; int retrans_time ; int gc_staletime ; int reachable_time ; int delay_probe_time ; int queue_len_bytes ; int ucast_probes ; int app_probes ; int mcast_probes ; int anycast_delay ; int proxy_delay ; int proxy_qlen ; int locktime ; }; struct neigh_statistics { unsigned long allocs ; unsigned long destroys ; unsigned long hash_grows ; unsigned long res_failed ; unsigned long lookups ; unsigned long hits ; unsigned long rcv_probes_mcast ; unsigned long rcv_probes_ucast ; unsigned long periodic_gc_runs ; unsigned long forced_gc_runs ; unsigned long unres_discards ; }; struct neigh_ops; struct neighbour { struct neighbour *next ; struct neigh_table *tbl ; struct neigh_parms *parms ; unsigned long confirmed ; unsigned long updated ; rwlock_t lock ; atomic_t refcnt ; struct sk_buff_head arp_queue ; unsigned int arp_queue_len_bytes ; struct timer_list timer ; unsigned long used ; atomic_t probes ; __u8 flags ; __u8 nud_state ; __u8 type ; __u8 dead ; seqlock_t ha_lock ; unsigned char ha[32U] ; struct hh_cache hh ; int (*output)(struct neighbour * , struct sk_buff * ) ; struct neigh_ops const *ops ; struct callback_head rcu ; struct net_device *dev ; u8 primary_key[0U] ; }; struct neigh_ops { int family ; void (*solicit)(struct neighbour * , struct sk_buff * ) ; void (*error_report)(struct neighbour * , struct sk_buff * ) ; int (*output)(struct neighbour * , struct sk_buff * ) ; int (*connected_output)(struct neighbour * , struct sk_buff * ) ; }; struct pneigh_entry { struct pneigh_entry *next ; struct net *net ; struct net_device *dev ; u8 flags ; u8 key[0U] ; }; struct neigh_hash_table { struct neighbour **hash_buckets ; unsigned int hash_shift ; __u32 hash_rnd[4U] ; struct callback_head rcu ; }; struct neigh_table { struct neigh_table *next ; int family ; int entry_size ; int key_len ; __u32 (*hash)(void const * , struct net_device const * , __u32 * ) ; int (*constructor)(struct neighbour * ) ; int (*pconstructor)(struct pneigh_entry * ) ; void (*pdestructor)(struct pneigh_entry * ) ; void (*proxy_redo)(struct sk_buff * ) ; char *id ; struct neigh_parms parms ; int gc_interval ; int gc_thresh1 ; int gc_thresh2 ; int gc_thresh3 ; unsigned long last_flush ; struct delayed_work gc_work ; struct timer_list proxy_timer ; struct sk_buff_head proxy_queue ; atomic_t entries ; rwlock_t lock ; unsigned long last_rand ; struct neigh_statistics *stats ; struct neigh_hash_table *nht ; struct pneigh_entry **phash_buckets ; }; union __anonunion_ldv_39990_224 { unsigned long expires ; struct dst_entry *from ; }; struct dn_route; union __anonunion_ldv_40015_225 { struct dst_entry *next ; struct rtable *rt_next ; struct rt6_info *rt6_next ; struct dn_route *dn_next ; }; struct dst_entry { struct callback_head callback_head ; struct dst_entry *child ; struct net_device *dev ; struct dst_ops *ops ; unsigned long _metrics ; union __anonunion_ldv_39990_224 ldv_39990 ; struct dst_entry *path ; void *__pad0 ; struct xfrm_state *xfrm ; int (*input)(struct sk_buff * ) ; int (*output)(struct sk_buff * ) ; unsigned short flags ; unsigned short pending_confirm ; short error ; short obsolete ; unsigned short header_len ; unsigned short trailer_len ; __u32 tclassid ; long __pad_to_align_refcnt[2U] ; atomic_t __refcnt ; int __use ; unsigned long lastuse ; union __anonunion_ldv_40015_225 ldv_40015 ; }; struct __anonstruct_socket_lock_t_226 { spinlock_t slock ; int owned ; wait_queue_head_t wq ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_socket_lock_t_226 socket_lock_t; struct proto; typedef __u32 __portpair; typedef __u64 __addrpair; struct __anonstruct_ldv_40232_228 { __be32 skc_daddr ; __be32 skc_rcv_saddr ; }; union __anonunion_ldv_40233_227 { __addrpair skc_addrpair ; struct __anonstruct_ldv_40232_228 ldv_40232 ; }; union __anonunion_ldv_40237_229 { unsigned int skc_hash ; __u16 skc_u16hashes[2U] ; }; struct __anonstruct_ldv_40243_231 { __be16 skc_dport ; __u16 skc_num ; }; union __anonunion_ldv_40244_230 { __portpair skc_portpair ; struct __anonstruct_ldv_40243_231 ldv_40243 ; }; union __anonunion_ldv_40252_232 { struct hlist_node skc_bind_node ; struct hlist_nulls_node skc_portaddr_node ; }; union __anonunion_ldv_40259_233 { struct hlist_node skc_node ; struct hlist_nulls_node skc_nulls_node ; }; struct sock_common { union __anonunion_ldv_40233_227 ldv_40233 ; union __anonunion_ldv_40237_229 ldv_40237 ; union __anonunion_ldv_40244_230 ldv_40244 ; unsigned short skc_family ; unsigned char volatile skc_state ; unsigned char skc_reuse ; int skc_bound_dev_if ; union __anonunion_ldv_40252_232 ldv_40252 ; struct proto *skc_prot ; struct net *skc_net ; int skc_dontcopy_begin[0U] ; union __anonunion_ldv_40259_233 ldv_40259 ; int skc_tx_queue_mapping ; atomic_t skc_refcnt ; int skc_dontcopy_end[0U] ; }; struct cg_proto; struct __anonstruct_sk_backlog_234 { atomic_t rmem_alloc ; int len ; struct sk_buff *head ; struct sk_buff *tail ; }; struct sock { struct sock_common __sk_common ; socket_lock_t sk_lock ; struct sk_buff_head sk_receive_queue ; struct __anonstruct_sk_backlog_234 sk_backlog ; int sk_forward_alloc ; __u32 sk_rxhash ; atomic_t sk_drops ; int sk_rcvbuf ; struct sk_filter *sk_filter ; struct socket_wq *sk_wq ; struct sk_buff_head sk_async_wait_queue ; struct xfrm_policy *sk_policy[2U] ; unsigned long sk_flags ; struct dst_entry *sk_rx_dst ; struct dst_entry *sk_dst_cache ; spinlock_t sk_dst_lock ; atomic_t sk_wmem_alloc ; atomic_t sk_omem_alloc ; int sk_sndbuf ; struct sk_buff_head sk_write_queue ; unsigned char sk_shutdown : 2 ; unsigned char sk_no_check : 2 ; unsigned char sk_userlocks : 4 ; unsigned char sk_protocol ; unsigned short sk_type ; int sk_wmem_queued ; gfp_t sk_allocation ; netdev_features_t sk_route_caps ; netdev_features_t sk_route_nocaps ; int sk_gso_type ; unsigned int sk_gso_max_size ; u16 sk_gso_max_segs ; int sk_rcvlowat ; unsigned long sk_lingertime ; struct sk_buff_head sk_error_queue ; struct proto *sk_prot_creator ; rwlock_t sk_callback_lock ; int sk_err ; int sk_err_soft ; unsigned short sk_ack_backlog ; unsigned short sk_max_ack_backlog ; __u32 sk_priority ; __u32 sk_cgrp_prioidx ; struct pid *sk_peer_pid ; struct cred const *sk_peer_cred ; long sk_rcvtimeo ; long sk_sndtimeo ; void *sk_protinfo ; struct timer_list sk_timer ; ktime_t sk_stamp ; struct socket *sk_socket ; void *sk_user_data ; struct page_frag sk_frag ; struct sk_buff *sk_send_head ; __s32 sk_peek_off ; int sk_write_pending ; void *sk_security ; __u32 sk_mark ; u32 sk_classid ; struct cg_proto *sk_cgrp ; void (*sk_state_change)(struct sock * ) ; void (*sk_data_ready)(struct sock * , int ) ; void (*sk_write_space)(struct sock * ) ; void (*sk_error_report)(struct sock * ) ; int (*sk_backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*sk_destruct)(struct sock * ) ; }; struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct udp_table; union __anonunion_h_235 { struct inet_hashinfo *hashinfo ; struct udp_table *udp_table ; struct raw_hashinfo *raw_hash ; }; struct proto { void (*close)(struct sock * , long ) ; int (*connect)(struct sock * , struct sockaddr * , int ) ; int (*disconnect)(struct sock * , int ) ; struct sock *(*accept)(struct sock * , int , int * ) ; int (*ioctl)(struct sock * , int , unsigned long ) ; int (*init)(struct sock * ) ; void (*destroy)(struct sock * ) ; void (*shutdown)(struct sock * , int ) ; int (*setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_ioctl)(struct sock * , unsigned int , unsigned long ) ; int (*sendmsg)(struct kiocb * , struct sock * , struct msghdr * , size_t ) ; int (*recvmsg)(struct kiocb * , struct sock * , struct msghdr * , size_t , int , int , int * ) ; int (*sendpage)(struct sock * , struct page * , int , size_t , int ) ; int (*bind)(struct sock * , struct sockaddr * , int ) ; int (*backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*release_cb)(struct sock * ) ; void (*mtu_reduced)(struct sock * ) ; void (*hash)(struct sock * ) ; void (*unhash)(struct sock * ) ; void (*rehash)(struct sock * ) ; int (*get_port)(struct sock * , unsigned short ) ; void (*clear_sk)(struct sock * , int ) ; unsigned int inuse_idx ; void (*enter_memory_pressure)(struct sock * ) ; atomic_long_t *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; int *sysctl_wmem ; int *sysctl_rmem ; int max_header ; bool no_autobind ; struct kmem_cache *slab ; unsigned int obj_size ; int slab_flags ; struct percpu_counter *orphan_count ; struct request_sock_ops *rsk_prot ; struct timewait_sock_ops *twsk_prot ; union __anonunion_h_235 h ; struct module *owner ; char name[32U] ; struct list_head node ; int (*init_cgroup)(struct mem_cgroup * , struct cgroup_subsys * ) ; void (*destroy_cgroup)(struct mem_cgroup * ) ; struct cg_proto *(*proto_cgroup)(struct mem_cgroup * ) ; }; struct cg_proto { void (*enter_memory_pressure)(struct sock * ) ; struct res_counter *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; unsigned long flags ; struct mem_cgroup *memcg ; }; struct request_values { }; struct request_sock_ops { int family ; int obj_size ; struct kmem_cache *slab ; char *slab_name ; int (*rtx_syn_ack)(struct sock * , struct request_sock * , struct request_values * ) ; void (*send_ack)(struct sock * , struct sk_buff * , struct request_sock * ) ; void (*send_reset)(struct sock * , struct sk_buff * ) ; void (*destructor)(struct request_sock * ) ; void (*syn_ack_timeout)(struct sock * , struct request_sock * ) ; }; struct request_sock { struct request_sock *dl_next ; u16 mss ; u8 num_retrans ; unsigned char cookie_ts : 1 ; unsigned char num_timeout : 7 ; u32 window_clamp ; u32 rcv_wnd ; u32 ts_recent ; unsigned long expires ; struct request_sock_ops const *rsk_ops ; struct sock *sk ; u32 secid ; u32 peer_secid ; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab ; char *twsk_slab_name ; unsigned int twsk_obj_size ; int (*twsk_unique)(struct sock * , struct sock * , void * ) ; void (*twsk_destructor)(struct sock * ) ; }; struct ip6_sf_list { struct ip6_sf_list *sf_next ; struct in6_addr sf_addr ; unsigned long sf_count[2U] ; unsigned char sf_gsresp ; unsigned char sf_oldin ; unsigned char sf_crcount ; }; struct ifmcaddr6 { struct in6_addr mca_addr ; struct inet6_dev *idev ; struct ifmcaddr6 *next ; struct ip6_sf_list *mca_sources ; struct ip6_sf_list *mca_tomb ; unsigned int mca_sfmode ; unsigned char mca_crcount ; unsigned long mca_sfcount[2U] ; struct timer_list mca_timer ; unsigned int mca_flags ; int mca_users ; atomic_t mca_refcnt ; spinlock_t mca_lock ; unsigned long mca_cstamp ; unsigned long mca_tstamp ; }; struct ifacaddr6 { struct in6_addr aca_addr ; struct inet6_dev *aca_idev ; struct rt6_info *aca_rt ; struct ifacaddr6 *aca_next ; int aca_users ; atomic_t aca_refcnt ; spinlock_t aca_lock ; unsigned long aca_cstamp ; unsigned long aca_tstamp ; }; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry ; struct ipstats_mib *ipv6[1U] ; struct icmpv6_mib_device *icmpv6dev ; struct icmpv6msg_mib_device *icmpv6msgdev ; }; struct inet6_dev { struct net_device *dev ; struct list_head addr_list ; struct ifmcaddr6 *mc_list ; struct ifmcaddr6 *mc_tomb ; spinlock_t mc_lock ; unsigned char mc_qrv ; unsigned char mc_gq_running ; unsigned char mc_ifc_count ; unsigned long mc_v1_seen ; unsigned long mc_maxdelay ; struct timer_list mc_gq_timer ; struct timer_list mc_ifc_timer ; struct ifacaddr6 *ac_list ; rwlock_t lock ; atomic_t refcnt ; __u32 if_flags ; int dead ; u8 rndid[8U] ; struct timer_list regen_timer ; struct list_head tempaddr_list ; struct neigh_parms *nd_parms ; struct inet6_dev *next ; struct ipv6_devconf cnf ; struct ipv6_devstat stats ; unsigned long tstamp ; struct callback_head rcu ; }; struct rpc_iostats; struct rpc_clnt { atomic_t cl_count ; struct list_head cl_clients ; struct list_head cl_tasks ; spinlock_t cl_lock ; struct rpc_xprt *cl_xprt ; struct rpc_procinfo *cl_procinfo ; u32 cl_prog ; u32 cl_vers ; u32 cl_maxproc ; char const *cl_protname ; struct rpc_auth *cl_auth ; struct rpc_stat *cl_stats ; struct rpc_iostats *cl_metrics ; unsigned char cl_softrtry : 1 ; unsigned char cl_discrtry : 1 ; unsigned char cl_autobind : 1 ; unsigned char cl_chatty : 1 ; struct rpc_rtt *cl_rtt ; struct rpc_timeout const *cl_timeout ; int cl_nodelen ; char cl_nodename[32U] ; struct dentry *cl_dentry ; struct rpc_clnt *cl_parent ; struct rpc_rtt cl_rtt_default ; struct rpc_timeout cl_timeout_default ; struct rpc_program const *cl_program ; char *cl_principal ; }; struct rpc_version; struct rpc_program { char const *name ; u32 number ; unsigned int nrvers ; struct rpc_version const **version ; struct rpc_stat *stats ; char const *pipe_dir_name ; }; struct rpc_version { u32 number ; unsigned int nrprocs ; struct rpc_procinfo *procs ; }; struct rpc_procinfo { u32 p_proc ; void (*p_encode)(void * , struct xdr_stream * , void * ) ; int (*p_decode)(void * , struct xdr_stream * , void * ) ; unsigned int p_arglen ; unsigned int p_replen ; unsigned int p_count ; unsigned int p_timer ; u32 p_statidx ; char const *p_name ; }; struct nfs_fh { unsigned short size ; unsigned char data[128U] ; }; enum nfs3_stable_how { NFS_UNSTABLE = 0, NFS_DATA_SYNC = 1, NFS_FILE_SYNC = 2 } ; struct __anonstruct_nfs4_verifier_246 { char data[8U] ; }; typedef struct __anonstruct_nfs4_verifier_246 nfs4_verifier; struct nfs_stateid4 { __be32 seqid ; char other[12U] ; }; typedef struct nfs_stateid4 nfs4_stateid; struct nfs4_sessionid { unsigned char data[16U] ; }; struct nfs4_deviceid { char data[16U] ; }; union __anonunion_ldv_44622_247 { kuid_t e_uid ; kgid_t e_gid ; unsigned int e_id ; }; struct posix_acl_entry { short e_tag ; unsigned short e_perm ; union __anonunion_ldv_44622_247 ldv_44622 ; }; union __anonunion_ldv_44626_248 { atomic_t a_refcount ; struct callback_head a_rcu ; }; struct posix_acl { union __anonunion_ldv_44626_248 ldv_44626 ; unsigned int a_count ; struct posix_acl_entry a_entries[0U] ; }; struct nfs4_secinfo_flavors; struct nfs4_string { unsigned int len ; char *data ; }; struct nfs_fsid { uint64_t major ; uint64_t minor ; }; struct nfs4_threshold { __u32 bm ; __u32 l_type ; __u64 rd_sz ; __u64 wr_sz ; __u64 rd_io_sz ; __u64 wr_io_sz ; }; struct __anonstruct_nfs2_250 { __u32 blocksize ; __u32 blocks ; }; struct __anonstruct_nfs3_251 { __u64 used ; }; union __anonunion_du_249 { struct __anonstruct_nfs2_250 nfs2 ; struct __anonstruct_nfs3_251 nfs3 ; }; struct nfs_fattr { unsigned int valid ; umode_t mode ; __u32 nlink ; __u32 uid ; __u32 gid ; dev_t rdev ; __u64 size ; union __anonunion_du_249 du ; struct nfs_fsid fsid ; __u64 fileid ; __u64 mounted_on_fileid ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; __u64 change_attr ; __u64 pre_change_attr ; __u64 pre_size ; struct timespec pre_mtime ; struct timespec pre_ctime ; unsigned long time_start ; unsigned long gencount ; struct nfs4_string *owner_name ; struct nfs4_string *group_name ; struct nfs4_threshold *mdsthreshold ; }; struct nfs_fsinfo { struct nfs_fattr *fattr ; __u32 rtmax ; __u32 rtpref ; __u32 rtmult ; __u32 wtmax ; __u32 wtpref ; __u32 wtmult ; __u32 dtpref ; __u64 maxfilesize ; struct timespec time_delta ; __u32 lease_time ; __u32 layouttype ; __u32 blksize ; }; struct nfs_fsstat { struct nfs_fattr *fattr ; __u64 tbytes ; __u64 fbytes ; __u64 abytes ; __u64 tfiles ; __u64 ffiles ; __u64 afiles ; }; struct nfs_pathconf { struct nfs_fattr *fattr ; __u32 max_link ; __u32 max_namelen ; }; struct nfs4_change_info { u32 atomic ; u64 before ; u64 after ; }; struct nfs_seqid; struct nfs4_channel_attrs { u32 max_rqst_sz ; u32 max_resp_sz ; u32 max_resp_sz_cached ; u32 max_ops ; u32 max_reqs ; }; struct nfs4_slot; struct nfs4_sequence_args { struct nfs4_slot *sa_slot ; unsigned char sa_cache_this : 1 ; unsigned char sa_privileged : 1 ; }; struct nfs4_sequence_res { struct nfs4_slot *sr_slot ; unsigned long sr_timestamp ; int sr_status ; u32 sr_status_flags ; u32 sr_highest_slotid ; u32 sr_target_highest_slotid ; }; struct nfs4_get_lease_time_args { struct nfs4_sequence_args la_seq_args ; }; struct nfs4_get_lease_time_res { struct nfs4_sequence_res lr_seq_res ; struct nfs_fsinfo *lr_fsinfo ; }; struct nfs4_layoutdriver_data { struct page **pages ; __u32 pglen ; __u32 len ; }; struct pnfs_layout_range { u32 iomode ; u64 offset ; u64 length ; }; struct nfs_open_context; struct nfs4_layoutget_args { struct nfs4_sequence_args seq_args ; __u32 type ; struct pnfs_layout_range range ; __u64 minlength ; __u32 maxcount ; struct inode *inode ; struct nfs_open_context *ctx ; nfs4_stateid stateid ; struct nfs4_layoutdriver_data layout ; }; struct nfs4_layoutget_res { struct nfs4_sequence_res seq_res ; __u32 return_on_close ; struct pnfs_layout_range range ; __u32 type ; nfs4_stateid stateid ; struct nfs4_layoutdriver_data *layoutp ; }; struct nfs4_layoutget { struct nfs4_layoutget_args args ; struct nfs4_layoutget_res res ; gfp_t gfp_flags ; }; struct nfs4_getdevicelist_args { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; u32 layoutclass ; }; struct pnfs_devicelist; struct nfs4_getdevicelist_res { struct nfs4_sequence_res seq_res ; struct pnfs_devicelist *devlist ; }; struct pnfs_device; struct nfs4_getdeviceinfo_args { struct nfs4_sequence_args seq_args ; struct pnfs_device *pdev ; }; struct nfs4_getdeviceinfo_res { struct nfs4_sequence_res seq_res ; struct pnfs_device *pdev ; }; struct nfs4_layoutcommit_args { struct nfs4_sequence_args seq_args ; nfs4_stateid stateid ; __u64 lastbytewritten ; struct inode *inode ; u32 const *bitmask ; }; struct nfs_server; struct nfs4_layoutcommit_res { struct nfs4_sequence_res seq_res ; struct nfs_fattr *fattr ; struct nfs_server const *server ; int status ; }; struct nfs4_layoutcommit_data { struct rpc_task task ; struct nfs_fattr fattr ; struct list_head lseg_list ; struct rpc_cred *cred ; struct nfs4_layoutcommit_args args ; struct nfs4_layoutcommit_res res ; }; struct pnfs_layout_hdr; struct nfs4_layoutreturn_args { struct nfs4_sequence_args seq_args ; struct pnfs_layout_hdr *layout ; struct inode *inode ; nfs4_stateid stateid ; __u32 layout_type ; }; struct nfs4_layoutreturn_res { struct nfs4_sequence_res seq_res ; u32 lrs_present ; nfs4_stateid stateid ; }; struct nfs_client; struct nfs4_layoutreturn { struct nfs4_layoutreturn_args args ; struct nfs4_layoutreturn_res res ; struct rpc_cred *cred ; struct nfs_client *clp ; int rpc_status ; }; struct stateowner_id { __u64 create_time ; __u32 uniquifier ; }; struct __anonstruct_ldv_45036_253 { struct iattr *attrs ; nfs4_verifier verifier ; }; union __anonunion_u_252 { struct __anonstruct_ldv_45036_253 ldv_45036 ; nfs4_stateid delegation ; fmode_t delegation_type ; }; struct nfs_openargs { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; struct nfs_seqid *seqid ; int open_flags ; fmode_t fmode ; u32 access ; __u64 clientid ; struct stateowner_id id ; union __anonunion_u_252 u ; struct qstr const *name ; struct nfs_server const *server ; u32 const *bitmask ; u32 const *open_bitmap ; __u32 claim ; }; struct nfs_openres { struct nfs4_sequence_res seq_res ; nfs4_stateid stateid ; struct nfs_fh fh ; struct nfs4_change_info cinfo ; __u32 rflags ; struct nfs_fattr *f_attr ; struct nfs_seqid *seqid ; struct nfs_server const *server ; fmode_t delegation_type ; nfs4_stateid delegation ; __u32 do_recall ; __u64 maxsize ; __u32 attrset[2U] ; struct nfs4_string *owner ; struct nfs4_string *group_owner ; __u32 access_request ; __u32 access_supported ; __u32 access_result ; }; struct nfs_open_confirmargs { struct nfs_fh const *fh ; nfs4_stateid *stateid ; struct nfs_seqid *seqid ; }; struct nfs_open_confirmres { nfs4_stateid stateid ; struct nfs_seqid *seqid ; }; struct nfs_closeargs { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; nfs4_stateid *stateid ; struct nfs_seqid *seqid ; fmode_t fmode ; u32 const *bitmask ; }; struct nfs_closeres { struct nfs4_sequence_res seq_res ; nfs4_stateid stateid ; struct nfs_fattr *fattr ; struct nfs_seqid *seqid ; struct nfs_server const *server ; }; struct nfs_lowner { __u64 clientid ; __u64 id ; dev_t s_dev ; }; struct nfs_lock_args { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; struct file_lock *fl ; struct nfs_seqid *lock_seqid ; nfs4_stateid *lock_stateid ; struct nfs_seqid *open_seqid ; nfs4_stateid *open_stateid ; struct nfs_lowner lock_owner ; unsigned char block : 1 ; unsigned char reclaim : 1 ; unsigned char new_lock_owner : 1 ; }; struct nfs_lock_res { struct nfs4_sequence_res seq_res ; nfs4_stateid stateid ; struct nfs_seqid *lock_seqid ; struct nfs_seqid *open_seqid ; }; struct nfs_locku_args { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; struct file_lock *fl ; struct nfs_seqid *seqid ; nfs4_stateid *stateid ; }; struct nfs_locku_res { struct nfs4_sequence_res seq_res ; nfs4_stateid stateid ; struct nfs_seqid *seqid ; }; struct nfs_lockt_args { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; struct file_lock *fl ; struct nfs_lowner lock_owner ; }; struct nfs_lockt_res { struct nfs4_sequence_res seq_res ; struct file_lock *denied ; }; struct nfs_release_lockowner_args { struct nfs_lowner lock_owner ; }; struct nfs4_delegreturnargs { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fhandle ; nfs4_stateid const *stateid ; u32 const *bitmask ; }; struct nfs4_delegreturnres { struct nfs4_sequence_res seq_res ; struct nfs_fattr *fattr ; struct nfs_server const *server ; }; struct nfs_lock_context; struct nfs_readargs { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; struct nfs_open_context *context ; struct nfs_lock_context *lock_context ; __u64 offset ; __u32 count ; unsigned int pgbase ; struct page **pages ; }; struct nfs_readres { struct nfs4_sequence_res seq_res ; struct nfs_fattr *fattr ; __u32 count ; int eof ; }; struct nfs_writeargs { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; struct nfs_open_context *context ; struct nfs_lock_context *lock_context ; __u64 offset ; __u32 count ; enum nfs3_stable_how stable ; unsigned int pgbase ; struct page **pages ; u32 const *bitmask ; }; struct nfs_write_verifier { char data[8U] ; }; struct nfs_writeverf { struct nfs_write_verifier verifier ; enum nfs3_stable_how committed ; }; struct nfs_writeres { struct nfs4_sequence_res seq_res ; struct nfs_fattr *fattr ; struct nfs_writeverf *verf ; __u32 count ; struct nfs_server const *server ; }; struct nfs_commitargs { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; __u64 offset ; __u32 count ; u32 const *bitmask ; }; struct nfs_commitres { struct nfs4_sequence_res seq_res ; struct nfs_fattr *fattr ; struct nfs_writeverf *verf ; struct nfs_server const *server ; }; struct nfs_removeargs { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; struct qstr name ; }; struct nfs_removeres { struct nfs4_sequence_res seq_res ; struct nfs_server const *server ; struct nfs_fattr *dir_attr ; struct nfs4_change_info cinfo ; }; struct nfs_renameargs { struct nfs4_sequence_args seq_args ; struct nfs_fh const *old_dir ; struct nfs_fh const *new_dir ; struct qstr const *old_name ; struct qstr const *new_name ; }; struct nfs_renameres { struct nfs4_sequence_res seq_res ; struct nfs_server const *server ; struct nfs4_change_info old_cinfo ; struct nfs_fattr *old_fattr ; struct nfs4_change_info new_cinfo ; struct nfs_fattr *new_fattr ; }; struct nfs_entry { __u64 ino ; __u64 cookie ; __u64 prev_cookie ; char const *name ; unsigned int len ; int eof ; struct nfs_fh *fh ; struct nfs_fattr *fattr ; unsigned char d_type ; struct nfs_server *server ; }; struct nfs_setattrargs { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; nfs4_stateid stateid ; struct iattr *iap ; struct nfs_server const *server ; u32 const *bitmask ; }; struct nfs_setaclargs { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; size_t acl_len ; unsigned int acl_pgbase ; struct page **acl_pages ; }; struct nfs_setaclres { struct nfs4_sequence_res seq_res ; }; struct nfs_getaclargs { struct nfs4_sequence_args seq_args ; struct nfs_fh *fh ; size_t acl_len ; unsigned int acl_pgbase ; struct page **acl_pages ; }; struct nfs_getaclres { struct nfs4_sequence_res seq_res ; size_t acl_len ; size_t acl_data_offset ; int acl_flags ; struct page *acl_scratch ; }; struct nfs_setattrres { struct nfs4_sequence_res seq_res ; struct nfs_fattr *fattr ; struct nfs_server const *server ; }; struct nfs4_accessargs { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; u32 const *bitmask ; u32 access ; }; struct nfs4_accessres { struct nfs4_sequence_res seq_res ; struct nfs_server const *server ; struct nfs_fattr *fattr ; u32 supported ; u32 access ; }; struct __anonstruct_symlink_255 { struct page **pages ; unsigned int len ; }; struct __anonstruct_device_256 { u32 specdata1 ; u32 specdata2 ; }; union __anonunion_u_254 { struct __anonstruct_symlink_255 symlink ; struct __anonstruct_device_256 device ; }; struct nfs4_create_arg { struct nfs4_sequence_args seq_args ; u32 ftype ; union __anonunion_u_254 u ; struct qstr const *name ; struct nfs_server const *server ; struct iattr const *attrs ; struct nfs_fh const *dir_fh ; u32 const *bitmask ; }; struct nfs4_create_res { struct nfs4_sequence_res seq_res ; struct nfs_server const *server ; struct nfs_fh *fh ; struct nfs_fattr *fattr ; struct nfs4_change_info dir_cinfo ; }; struct nfs4_fsinfo_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; u32 const *bitmask ; }; struct nfs4_fsinfo_res { struct nfs4_sequence_res seq_res ; struct nfs_fsinfo *fsinfo ; }; struct nfs4_getattr_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; u32 const *bitmask ; }; struct nfs4_getattr_res { struct nfs4_sequence_res seq_res ; struct nfs_server const *server ; struct nfs_fattr *fattr ; }; struct nfs4_link_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; struct nfs_fh const *dir_fh ; struct qstr const *name ; u32 const *bitmask ; }; struct nfs4_link_res { struct nfs4_sequence_res seq_res ; struct nfs_server const *server ; struct nfs_fattr *fattr ; struct nfs4_change_info cinfo ; struct nfs_fattr *dir_attr ; }; struct nfs4_lookup_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *dir_fh ; struct qstr const *name ; u32 const *bitmask ; }; struct nfs4_lookup_res { struct nfs4_sequence_res seq_res ; struct nfs_server const *server ; struct nfs_fattr *fattr ; struct nfs_fh *fh ; }; struct nfs4_lookup_root_arg { struct nfs4_sequence_args seq_args ; u32 const *bitmask ; }; struct nfs4_pathconf_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; u32 const *bitmask ; }; struct nfs4_pathconf_res { struct nfs4_sequence_res seq_res ; struct nfs_pathconf *pathconf ; }; struct nfs4_readdir_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; u64 cookie ; nfs4_verifier verifier ; u32 count ; struct page **pages ; unsigned int pgbase ; u32 const *bitmask ; int plus ; }; struct nfs4_readdir_res { struct nfs4_sequence_res seq_res ; nfs4_verifier verifier ; unsigned int pgbase ; }; struct nfs4_readlink { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; unsigned int pgbase ; unsigned int pglen ; struct page **pages ; }; struct nfs4_readlink_res { struct nfs4_sequence_res seq_res ; }; struct nfs4_setclientid { nfs4_verifier const *sc_verifier ; unsigned int sc_name_len ; char sc_name[128U] ; u32 sc_prog ; unsigned int sc_netid_len ; char sc_netid[5U] ; unsigned int sc_uaddr_len ; char sc_uaddr[58U] ; u32 sc_cb_ident ; }; struct nfs4_setclientid_res { u64 clientid ; nfs4_verifier confirm ; }; struct nfs4_statfs_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *fh ; u32 const *bitmask ; }; struct nfs4_statfs_res { struct nfs4_sequence_res seq_res ; struct nfs_fsstat *fsstat ; }; struct nfs4_server_caps_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh *fhandle ; }; struct nfs4_server_caps_res { struct nfs4_sequence_res seq_res ; u32 attr_bitmask[3U] ; u32 acl_bitmask ; u32 has_links ; u32 has_symlinks ; u32 fh_expire_type ; }; struct nfs4_pathname { unsigned int ncomponents ; struct nfs4_string components[512U] ; }; struct nfs4_fs_location { unsigned int nservers ; struct nfs4_string servers[10U] ; struct nfs4_pathname rootpath ; }; struct nfs4_fs_locations { struct nfs_fattr fattr ; struct nfs_server const *server ; struct nfs4_pathname fs_path ; int nlocations ; struct nfs4_fs_location locations[10U] ; }; struct nfs4_fs_locations_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *dir_fh ; struct qstr const *name ; struct page *page ; u32 const *bitmask ; }; struct nfs4_fs_locations_res { struct nfs4_sequence_res seq_res ; struct nfs4_fs_locations *fs_locations ; }; struct nfs4_secinfo_oid { unsigned int len ; char data[32U] ; }; struct nfs4_secinfo_gss { struct nfs4_secinfo_oid sec_oid4 ; unsigned int qop4 ; unsigned int service ; }; struct nfs4_secinfo_flavor { unsigned int flavor ; struct nfs4_secinfo_gss gss ; }; struct nfs4_secinfo_flavors { unsigned int num_flavors ; struct nfs4_secinfo_flavor flavors[0U] ; }; struct nfs4_secinfo_arg { struct nfs4_sequence_args seq_args ; struct nfs_fh const *dir_fh ; struct qstr const *name ; }; struct nfs4_secinfo_res { struct nfs4_sequence_res seq_res ; struct nfs4_secinfo_flavors *flavors ; }; struct nfstime4 { u64 seconds ; u32 nseconds ; }; struct pnfs_layout_segment; struct pnfs_commit_bucket { struct list_head written ; struct list_head committing ; struct pnfs_layout_segment *wlseg ; struct pnfs_layout_segment *clseg ; }; struct pnfs_ds_commit_info { int nwritten ; int ncommitting ; int nbuckets ; struct pnfs_commit_bucket *buckets ; }; struct nfs41_exchange_id_args { struct nfs_client *client ; nfs4_verifier *verifier ; unsigned int id_len ; char id[48U] ; u32 flags ; }; struct nfs41_server_owner { uint64_t minor_id ; uint32_t major_id_sz ; char major_id[1024U] ; }; struct nfs41_server_scope { uint32_t server_scope_sz ; char server_scope[1024U] ; }; struct nfs41_impl_id { char domain[1025U] ; char name[1025U] ; struct nfstime4 date ; }; struct nfs4_session; struct nfs41_bind_conn_to_session_res { struct nfs4_session *session ; u32 dir ; bool use_conn_in_rdma_mode ; }; struct nfs41_exchange_id_res { u64 clientid ; u32 seqid ; u32 flags ; struct nfs41_server_owner *server_owner ; struct nfs41_server_scope *server_scope ; struct nfs41_impl_id *impl_id ; }; struct nfs41_create_session_args { struct nfs_client *client ; uint32_t flags ; uint32_t cb_program ; struct nfs4_channel_attrs fc_attrs ; struct nfs4_channel_attrs bc_attrs ; }; struct nfs41_create_session_res { struct nfs_client *client ; }; struct nfs41_reclaim_complete_args { struct nfs4_sequence_args seq_args ; unsigned char one_fs : 1 ; }; struct nfs41_reclaim_complete_res { struct nfs4_sequence_res seq_res ; }; struct nfs41_secinfo_no_name_args { struct nfs4_sequence_args seq_args ; int style ; }; struct nfs41_test_stateid_args { struct nfs4_sequence_args seq_args ; nfs4_stateid *stateid ; }; struct nfs41_test_stateid_res { struct nfs4_sequence_res seq_res ; unsigned int status ; }; struct nfs41_free_stateid_args { struct nfs4_sequence_args seq_args ; nfs4_stateid *stateid ; }; struct nfs41_free_stateid_res { struct nfs4_sequence_res seq_res ; unsigned int status ; }; struct nfs_page; struct nfs_page_array { struct page **pagevec ; unsigned int npages ; struct page *page_array[8U] ; }; struct nfs_pgio_header; struct nfs_read_data { struct nfs_pgio_header *header ; struct list_head list ; struct rpc_task task ; struct nfs_fattr fattr ; struct nfs_readargs args ; struct nfs_readres res ; unsigned long timestamp ; int (*read_done_cb)(struct rpc_task * , struct nfs_read_data * ) ; __u64 mds_offset ; struct nfs_page_array pages ; struct nfs_client *ds_clp ; }; struct nfs_pgio_completion_ops; struct nfs_direct_req; struct nfs_pgio_header { struct inode *inode ; struct rpc_cred *cred ; struct list_head pages ; struct list_head rpc_list ; atomic_t refcnt ; struct nfs_page *req ; struct nfs_writeverf *verf ; struct pnfs_layout_segment *lseg ; loff_t io_start ; struct rpc_call_ops const *mds_ops ; void (*release)(struct nfs_pgio_header * ) ; struct nfs_pgio_completion_ops const *completion_ops ; struct nfs_direct_req *dreq ; void *layout_private ; spinlock_t lock ; int pnfs_error ; int error ; unsigned long good_bytes ; unsigned long flags ; }; struct nfs_write_data { struct nfs_pgio_header *header ; struct list_head list ; struct rpc_task task ; struct nfs_fattr fattr ; struct nfs_writeverf verf ; struct nfs_writeargs args ; struct nfs_writeres res ; unsigned long timestamp ; int (*write_done_cb)(struct rpc_task * , struct nfs_write_data * ) ; __u64 mds_offset ; struct nfs_page_array pages ; struct nfs_client *ds_clp ; }; struct nfs_mds_commit_info { atomic_t rpcs_out ; unsigned long ncommit ; struct list_head list ; }; struct nfs_commit_data; struct nfs_inode; struct nfs_commit_completion_ops { void (*error_cleanup)(struct nfs_inode * ) ; void (*completion)(struct nfs_commit_data * ) ; }; struct nfs_commit_info { spinlock_t *lock ; struct nfs_mds_commit_info *mds ; struct pnfs_ds_commit_info *ds ; struct nfs_direct_req *dreq ; struct nfs_commit_completion_ops const *completion_ops ; }; struct nfs_commit_data { struct rpc_task task ; struct inode *inode ; struct rpc_cred *cred ; struct nfs_fattr fattr ; struct nfs_writeverf verf ; struct list_head pages ; struct list_head list ; struct nfs_direct_req *dreq ; struct nfs_commitargs args ; struct nfs_commitres res ; struct nfs_open_context *context ; struct pnfs_layout_segment *lseg ; struct nfs_client *ds_clp ; int ds_commit_index ; struct rpc_call_ops const *mds_ops ; struct nfs_commit_completion_ops const *completion_ops ; int (*commit_done_cb)(struct rpc_task * , struct nfs_commit_data * ) ; }; struct nfs_pgio_completion_ops { void (*error_cleanup)(struct list_head * ) ; void (*init_hdr)(struct nfs_pgio_header * ) ; void (*completion)(struct nfs_pgio_header * ) ; }; struct nfs_unlinkdata { struct hlist_node list ; struct nfs_removeargs args ; struct nfs_removeres res ; struct inode *dir ; struct rpc_cred *cred ; struct nfs_fattr dir_attr ; }; struct nfs_renamedata { struct nfs_renameargs args ; struct nfs_renameres res ; struct rpc_cred *cred ; struct inode *old_dir ; struct dentry *old_dentry ; struct nfs_fattr old_fattr ; struct inode *new_dir ; struct dentry *new_dentry ; struct nfs_fattr new_fattr ; }; struct nfs_access_entry; struct nfs_subversion; struct nfs_mount_info; struct nfs_client_initdata; struct nfs_pageio_descriptor; struct nfs_rpc_ops { u32 version ; struct dentry_operations const *dentry_ops ; struct inode_operations const *dir_inode_ops ; struct inode_operations const *file_inode_ops ; struct file_operations const *file_ops ; int (*getroot)(struct nfs_server * , struct nfs_fh * , struct nfs_fsinfo * ) ; struct vfsmount *(*submount)(struct nfs_server * , struct dentry * , struct nfs_fh * , struct nfs_fattr * ) ; struct dentry *(*try_mount)(int , char const * , struct nfs_mount_info * , struct nfs_subversion * ) ; int (*getattr)(struct nfs_server * , struct nfs_fh * , struct nfs_fattr * ) ; int (*setattr)(struct dentry * , struct nfs_fattr * , struct iattr * ) ; int (*lookup)(struct inode * , struct qstr * , struct nfs_fh * , struct nfs_fattr * ) ; int (*access)(struct inode * , struct nfs_access_entry * ) ; int (*readlink)(struct inode * , struct page * , unsigned int , unsigned int ) ; int (*create)(struct inode * , struct dentry * , struct iattr * , int ) ; int (*remove)(struct inode * , struct qstr * ) ; void (*unlink_setup)(struct rpc_message * , struct inode * ) ; void (*unlink_rpc_prepare)(struct rpc_task * , struct nfs_unlinkdata * ) ; int (*unlink_done)(struct rpc_task * , struct inode * ) ; int (*rename)(struct inode * , struct qstr * , struct inode * , struct qstr * ) ; void (*rename_setup)(struct rpc_message * , struct inode * ) ; void (*rename_rpc_prepare)(struct rpc_task * , struct nfs_renamedata * ) ; int (*rename_done)(struct rpc_task * , struct inode * , struct inode * ) ; int (*link)(struct inode * , struct inode * , struct qstr * ) ; int (*symlink)(struct inode * , struct dentry * , struct page * , unsigned int , struct iattr * ) ; int (*mkdir)(struct inode * , struct dentry * , struct iattr * ) ; int (*rmdir)(struct inode * , struct qstr * ) ; int (*readdir)(struct dentry * , struct rpc_cred * , u64 , struct page ** , unsigned int , int ) ; int (*mknod)(struct inode * , struct dentry * , struct iattr * , dev_t ) ; int (*statfs)(struct nfs_server * , struct nfs_fh * , struct nfs_fsstat * ) ; int (*fsinfo)(struct nfs_server * , struct nfs_fh * , struct nfs_fsinfo * ) ; int (*pathconf)(struct nfs_server * , struct nfs_fh * , struct nfs_pathconf * ) ; int (*set_capabilities)(struct nfs_server * , struct nfs_fh * ) ; int (*decode_dirent)(struct xdr_stream * , struct nfs_entry * , int ) ; void (*read_setup)(struct nfs_read_data * , struct rpc_message * ) ; void (*read_pageio_init)(struct nfs_pageio_descriptor * , struct inode * , struct nfs_pgio_completion_ops const * ) ; void (*read_rpc_prepare)(struct rpc_task * , struct nfs_read_data * ) ; int (*read_done)(struct rpc_task * , struct nfs_read_data * ) ; void (*write_setup)(struct nfs_write_data * , struct rpc_message * ) ; void (*write_pageio_init)(struct nfs_pageio_descriptor * , struct inode * , int , struct nfs_pgio_completion_ops const * ) ; void (*write_rpc_prepare)(struct rpc_task * , struct nfs_write_data * ) ; int (*write_done)(struct rpc_task * , struct nfs_write_data * ) ; void (*commit_setup)(struct nfs_commit_data * , struct rpc_message * ) ; void (*commit_rpc_prepare)(struct rpc_task * , struct nfs_commit_data * ) ; int (*commit_done)(struct rpc_task * , struct nfs_commit_data * ) ; int (*lock)(struct file * , int , struct file_lock * ) ; int (*lock_check_bounds)(struct file_lock const * ) ; void (*clear_acl_cache)(struct inode * ) ; void (*close_context)(struct nfs_open_context * , int ) ; struct inode *(*open_context)(struct inode * , struct nfs_open_context * , int , struct iattr * ) ; int (*have_delegation)(struct inode * , fmode_t ) ; int (*return_delegation)(struct inode * ) ; struct nfs_client *(*alloc_client)(struct nfs_client_initdata const * ) ; struct nfs_client *(*init_client)(struct nfs_client * , struct rpc_timeout const * , char const * , rpc_authflavor_t ) ; void (*free_client)(struct nfs_client * ) ; struct nfs_server *(*create_server)(struct nfs_mount_info * , struct nfs_subversion * ) ; struct nfs_server *(*clone_server)(struct nfs_server * , struct nfs_fh * , struct nfs_fattr * , rpc_authflavor_t ) ; }; struct fprop_local_percpu { struct percpu_counter events ; unsigned int period ; raw_spinlock_t lock ; }; enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ; struct writeback_control { long nr_to_write ; long pages_skipped ; loff_t range_start ; loff_t range_end ; enum writeback_sync_modes sync_mode ; unsigned char for_kupdate : 1 ; unsigned char for_background : 1 ; unsigned char tagged_writepages : 1 ; unsigned char for_reclaim : 1 ; unsigned char range_cyclic : 1 ; }; struct bdi_writeback; typedef int congested_fn(void * , int ); struct bdi_writeback { struct backing_dev_info *bdi ; unsigned int nr ; unsigned long last_old_flush ; unsigned long last_active ; struct task_struct *task ; struct timer_list wakeup_timer ; struct list_head b_dirty ; struct list_head b_io ; struct list_head b_more_io ; spinlock_t list_lock ; }; struct backing_dev_info { struct list_head bdi_list ; unsigned long ra_pages ; unsigned long state ; unsigned int capabilities ; congested_fn *congested_fn ; void *congested_data ; char *name ; struct percpu_counter bdi_stat[4U] ; unsigned long bw_time_stamp ; unsigned long dirtied_stamp ; unsigned long written_stamp ; unsigned long write_bandwidth ; unsigned long avg_write_bandwidth ; unsigned long dirty_ratelimit ; unsigned long balanced_dirty_ratelimit ; struct fprop_local_percpu completions ; int dirty_exceeded ; unsigned int min_ratio ; unsigned int max_ratio ; unsigned int max_prop_frac ; struct bdi_writeback wb ; spinlock_t wb_lock ; struct list_head work_list ; struct device *dev ; struct timer_list laptop_mode_wb_timer ; struct dentry *debug_dir ; struct dentry *debug_stats ; }; struct nfs_iostats; struct nlm_host; struct nfs4_minor_version_ops; struct idmap; struct fscache_cookie; struct nfs_client { atomic_t cl_count ; atomic_t cl_mds_count ; int cl_cons_state ; unsigned long cl_res_state ; unsigned long cl_flags ; struct __kernel_sockaddr_storage cl_addr ; size_t cl_addrlen ; char *cl_hostname ; struct list_head cl_share_link ; struct list_head cl_superblocks ; struct rpc_clnt *cl_rpcclient ; struct nfs_rpc_ops const *rpc_ops ; int cl_proto ; struct nfs_subversion *cl_nfs_mod ; u32 cl_minorversion ; struct rpc_cred *cl_machine_cred ; u64 cl_clientid ; nfs4_verifier cl_confirm ; unsigned long cl_state ; spinlock_t cl_lock ; unsigned long cl_lease_time ; unsigned long cl_last_renewal ; struct delayed_work cl_renewd ; struct rpc_wait_queue cl_rpcwaitq ; struct idmap *cl_idmap ; char cl_ipaddr[48U] ; u32 cl_cb_ident ; struct nfs4_minor_version_ops const *cl_mvops ; u32 cl_seqid ; u32 cl_exchange_flags ; struct nfs4_session *cl_session ; bool cl_preserve_clid ; struct nfs41_server_owner *cl_serverowner ; struct nfs41_server_scope *cl_serverscope ; struct nfs41_impl_id *cl_implid ; struct fscache_cookie *fscache ; struct net *cl_net ; }; struct nfs_fscache_key; struct pnfs_layoutdriver_type; struct nfs_server { struct nfs_client *nfs_client ; struct list_head client_link ; struct list_head master_link ; struct rpc_clnt *client ; struct rpc_clnt *client_acl ; struct nlm_host *nlm_host ; struct nfs_iostats *io_stats ; struct backing_dev_info backing_dev_info ; atomic_long_t writeback ; int flags ; unsigned int caps ; unsigned int rsize ; unsigned int rpages ; unsigned int wsize ; unsigned int wpages ; unsigned int wtmult ; unsigned int dtsize ; unsigned short port ; unsigned int bsize ; unsigned int acregmin ; unsigned int acregmax ; unsigned int acdirmin ; unsigned int acdirmax ; unsigned int namelen ; unsigned int options ; struct nfs_fsid fsid ; __u64 maxfilesize ; struct timespec time_delta ; unsigned long mount_time ; dev_t s_dev ; struct nfs_fscache_key *fscache_key ; struct fscache_cookie *fscache ; u32 pnfs_blksize ; u32 attr_bitmask[3U] ; u32 cache_consistency_bitmask[2U] ; u32 acl_bitmask ; u32 fh_expire_type ; struct pnfs_layoutdriver_type *pnfs_curr_ld ; struct rpc_wait_queue roc_rpcwaitq ; void *pnfs_ld_data ; struct rb_root state_owners ; struct ida openowner_id ; struct ida lockowner_id ; struct list_head state_owners_lru ; struct list_head layouts ; struct list_head delegations ; void (*destroy)(struct nfs_server * ) ; atomic_t active ; struct __kernel_sockaddr_storage mountd_address ; size_t mountd_addrlen ; u32 mountd_version ; unsigned short mountd_port ; unsigned short mountd_protocol ; }; struct nfs_access_entry { struct rb_node rb_node ; struct list_head lru ; unsigned long jiffies ; struct rpc_cred *cred ; int mask ; }; struct nfs_lockowner { fl_owner_t l_owner ; pid_t l_pid ; }; struct nfs_lock_context { atomic_t count ; struct list_head list ; struct nfs_open_context *open_context ; struct nfs_lockowner lockowner ; }; struct nfs4_state; struct nfs_open_context { struct nfs_lock_context lock_context ; struct dentry *dentry ; struct rpc_cred *cred ; struct nfs4_state *state ; fmode_t mode ; unsigned long flags ; int error ; struct list_head list ; struct nfs4_threshold *mdsthreshold ; }; struct nfs_delegation; struct nfs4_cached_acl; struct nfs_inode { __u64 fileid ; struct nfs_fh fh ; unsigned long flags ; unsigned long cache_validity ; unsigned long read_cache_jiffies ; unsigned long attrtimeo ; unsigned long attrtimeo_timestamp ; unsigned long attr_gencount ; unsigned long cache_change_attribute ; struct rb_root access_cache ; struct list_head access_cache_entry_lru ; struct list_head access_cache_inode_lru ; struct posix_acl *acl_access ; struct posix_acl *acl_default ; __be32 cookieverf[2U] ; unsigned long npages ; struct nfs_mds_commit_info commit_info ; struct list_head open_files ; atomic_t silly_count ; struct hlist_head silly_list ; wait_queue_head_t waitqueue ; struct nfs4_cached_acl *nfs4_acl ; struct list_head open_states ; struct nfs_delegation *delegation ; fmode_t delegation_state ; struct rw_semaphore rwsem ; struct pnfs_layout_hdr *layout ; __u64 write_io ; __u64 read_io ; struct fscache_cookie *fscache ; struct inode vfs_inode ; }; struct nfs_page { struct list_head wb_list ; struct page *wb_page ; struct nfs_open_context *wb_context ; struct nfs_lock_context *wb_lock_context ; unsigned long wb_index ; unsigned int wb_offset ; unsigned int wb_pgbase ; unsigned int wb_bytes ; struct kref wb_kref ; unsigned long wb_flags ; struct nfs_write_verifier wb_verf ; }; struct nfs_pageio_ops { void (*pg_init)(struct nfs_pageio_descriptor * , struct nfs_page * ) ; bool (*pg_test)(struct nfs_pageio_descriptor * , struct nfs_page * , struct nfs_page * ) ; int (*pg_doio)(struct nfs_pageio_descriptor * ) ; }; struct nfs_pageio_descriptor { struct list_head pg_list ; unsigned long pg_bytes_written ; size_t pg_count ; size_t pg_bsize ; unsigned int pg_base ; unsigned char pg_moreio : 1 ; unsigned char pg_recoalesce : 1 ; struct inode *pg_inode ; struct nfs_pageio_ops const *pg_ops ; int pg_ioflags ; int pg_error ; struct rpc_call_ops const *pg_rpc_callops ; struct nfs_pgio_completion_ops const *pg_completion_ops ; struct pnfs_layout_segment *pg_lseg ; struct nfs_direct_req *pg_dreq ; void *pg_layout_private ; }; struct nameidata { struct path path ; struct qstr last ; struct path root ; struct inode *inode ; unsigned int flags ; unsigned int seq ; int last_type ; unsigned int depth ; char *saved_names[9U] ; }; struct vfsmount { struct dentry *mnt_root ; struct super_block *mnt_sb ; int mnt_flags ; }; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; struct kernel_param; struct kernel_param_ops { int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion_ldv_47586_261 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct kernel_param_ops const *ops ; u16 perm ; s16 level ; union __anonunion_ldv_47586_261 ldv_47586 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct tracepoint; struct tracepoint_func { void *func ; void *data ; }; struct tracepoint { char const *name ; struct static_key key ; void (*regfunc)(void) ; void (*unregfunc)(void) ; struct tracepoint_func *funcs ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2 } ; struct module_ref { unsigned long incs ; unsigned long decs ; }; struct module_sect_attrs; struct module_notes_attrs; struct ftrace_event_call; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct ftrace_event_call **trace_events ; unsigned int num_trace_events ; struct list_head source_list ; struct list_head target_list ; struct task_struct *waiter ; void (*exit)(void) ; struct module_ref *refptr ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct new_utsname { char sysname[65U] ; char nodename[65U] ; char release[65U] ; char version[65U] ; char machine[65U] ; char domainname[65U] ; }; struct uts_namespace { struct kref kref ; struct new_utsname name ; struct user_namespace *user_ns ; unsigned int proc_inum ; }; struct nfs4_state_recovery_ops; struct nfs4_state_maintenance_ops; struct nfs4_minor_version_ops { u32 minor_version ; int (*call_sync)(struct rpc_clnt * , struct nfs_server * , struct rpc_message * , struct nfs4_sequence_args * , struct nfs4_sequence_res * ) ; bool (*match_stateid)(nfs4_stateid const * , nfs4_stateid const * ) ; int (*find_root_sec)(struct nfs_server * , struct nfs_fh * , struct nfs_fsinfo * ) ; struct nfs4_state_recovery_ops const *reboot_recovery_ops ; struct nfs4_state_recovery_ops const *nograce_recovery_ops ; struct nfs4_state_maintenance_ops const *state_renewal_ops ; }; struct nfs_seqid_counter { ktime_t create_time ; int owner_id ; int flags ; u32 counter ; spinlock_t lock ; struct list_head list ; struct rpc_wait_queue wait ; }; struct nfs_seqid { struct nfs_seqid_counter *sequence ; struct list_head list ; struct rpc_task *task ; }; struct nfs4_state_owner { struct nfs_server *so_server ; struct list_head so_lru ; unsigned long so_expires ; struct rb_node so_server_node ; struct rpc_cred *so_cred ; spinlock_t so_lock ; atomic_t so_count ; unsigned long so_flags ; struct list_head so_states ; struct nfs_seqid_counter so_seqid ; }; union __anonunion_lo_u_262 { fl_owner_t posix_owner ; pid_t flock_owner ; }; struct nfs4_lock_owner { unsigned int lo_type ; union __anonunion_lo_u_262 lo_u ; }; struct nfs4_lock_state { struct list_head ls_locks ; struct nfs4_state *ls_state ; unsigned long ls_flags ; struct nfs_seqid_counter ls_seqid ; nfs4_stateid ls_stateid ; atomic_t ls_count ; struct nfs4_lock_owner ls_owner ; }; struct nfs4_state { struct list_head open_states ; struct list_head inode_states ; struct list_head lock_states ; struct nfs4_state_owner *owner ; struct inode *inode ; unsigned long flags ; spinlock_t state_lock ; seqlock_t seqlock ; nfs4_stateid stateid ; nfs4_stateid open_stateid ; unsigned int n_rdonly ; unsigned int n_wronly ; unsigned int n_rdwr ; fmode_t state ; atomic_t count ; }; struct nfs4_exception { long timeout ; int retry ; struct nfs4_state *state ; struct inode *inode ; }; struct nfs4_state_recovery_ops { int owner_flag_bit ; int state_flag_bit ; int (*recover_open)(struct nfs4_state_owner * , struct nfs4_state * ) ; int (*recover_lock)(struct nfs4_state * , struct file_lock * ) ; int (*establish_clid)(struct nfs_client * , struct rpc_cred * ) ; struct rpc_cred *(*get_clid_cred)(struct nfs_client * ) ; int (*reclaim_complete)(struct nfs_client * ) ; int (*detect_trunking)(struct nfs_client * , struct nfs_client ** , struct rpc_cred * ) ; }; struct nfs4_state_maintenance_ops { int (*sched_state_renewal)(struct nfs_client * , struct rpc_cred * , unsigned int ) ; struct rpc_cred *(*get_state_renewal_cred_locked)(struct nfs_client * ) ; int (*renew_lease)(struct nfs_client * , struct rpc_cred * ) ; }; struct svc_version; struct nfs_delegation { struct list_head super_list ; struct rpc_cred *cred ; struct inode *inode ; nfs4_stateid stateid ; fmode_t type ; loff_t maxsize ; __u64 change_attr ; unsigned long flags ; spinlock_t lock ; struct callback_head rcu ; }; struct nfs_clone_mount { struct super_block const *sb ; struct dentry const *dentry ; struct nfs_fh *fh ; struct nfs_fattr *fattr ; char *hostname ; char *mnt_path ; struct sockaddr *addr ; size_t addrlen ; rpc_authflavor_t authflavor ; }; struct nfs_client_initdata { unsigned long init_flags ; char const *hostname ; struct sockaddr const *addr ; size_t addrlen ; struct nfs_subversion *nfs_mod ; int proto ; u32 minorversion ; struct net *net ; }; struct __anonstruct_mount_server_263 { struct __kernel_sockaddr_storage address ; size_t addrlen ; char *hostname ; u32 version ; int port ; unsigned short protocol ; }; struct __anonstruct_nfs_server_264 { struct __kernel_sockaddr_storage address ; size_t addrlen ; char *hostname ; char *export_path ; int port ; unsigned short protocol ; }; struct nfs_parsed_mount_data { int flags ; unsigned int rsize ; unsigned int wsize ; unsigned int timeo ; unsigned int retrans ; unsigned int acregmin ; unsigned int acregmax ; unsigned int acdirmin ; unsigned int acdirmax ; unsigned int namlen ; unsigned int options ; unsigned int bsize ; unsigned int auth_flavor_len ; rpc_authflavor_t auth_flavors[1U] ; char *client_address ; unsigned int version ; unsigned int minorversion ; char *fscache_uniq ; bool need_mount ; struct __anonstruct_mount_server_263 mount_server ; struct __anonstruct_nfs_server_264 nfs_server ; struct security_mnt_opts lsm_opts ; struct net *net ; }; struct nfs_mount_info { void (*fill_super)(struct super_block * , struct nfs_mount_info * ) ; int (*set_security)(struct super_block * , struct dentry * , struct nfs_mount_info * ) ; struct nfs_parsed_mount_data *parsed ; struct nfs_clone_mount *cloned ; struct nfs_fh *mntfh ; }; enum nfs_stat_eventcounters { NFSIOS_INODEREVALIDATE = 0, NFSIOS_DENTRYREVALIDATE = 1, NFSIOS_DATAINVALIDATE = 2, NFSIOS_ATTRINVALIDATE = 3, NFSIOS_VFSOPEN = 4, NFSIOS_VFSLOOKUP = 5, NFSIOS_VFSACCESS = 6, NFSIOS_VFSUPDATEPAGE = 7, NFSIOS_VFSREADPAGE = 8, NFSIOS_VFSREADPAGES = 9, NFSIOS_VFSWRITEPAGE = 10, NFSIOS_VFSWRITEPAGES = 11, NFSIOS_VFSGETDENTS = 12, NFSIOS_VFSSETATTR = 13, NFSIOS_VFSFLUSH = 14, NFSIOS_VFSFSYNC = 15, NFSIOS_VFSLOCK = 16, NFSIOS_VFSRELEASE = 17, NFSIOS_CONGESTIONWAIT = 18, NFSIOS_SETATTRTRUNC = 19, NFSIOS_EXTENDWRITE = 20, NFSIOS_SILLYRENAME = 21, NFSIOS_SHORTREAD = 22, NFSIOS_SHORTWRITE = 23, NFSIOS_DELAY = 24, NFSIOS_PNFS_READ = 25, NFSIOS_PNFS_WRITE = 26, __NFSIOS_COUNTSMAX = 27 } ; struct nfs_iostats { unsigned long long bytes[8U] ; unsigned long long fscache[5U] ; unsigned long events[27U] ; }; typedef unsigned long pao_T_____23; typedef unsigned long pao_T_____24; typedef unsigned long pao_T_____25; typedef unsigned long pao_T_____26; struct cache_head { struct cache_head *next ; time_t expiry_time ; time_t last_refresh ; struct kref ref ; unsigned long flags ; }; struct cache_detail_procfs { struct proc_dir_entry *proc_ent ; struct proc_dir_entry *flush_ent ; struct proc_dir_entry *channel_ent ; struct proc_dir_entry *content_ent ; }; struct cache_detail_pipefs { struct dentry *dir ; }; union __anonunion_u_265 { struct cache_detail_procfs procfs ; struct cache_detail_pipefs pipefs ; }; struct cache_detail { struct module *owner ; int hash_size ; struct cache_head **hash_table ; rwlock_t hash_lock ; atomic_t inuse ; char *name ; void (*cache_put)(struct kref * ) ; int (*cache_upcall)(struct cache_detail * , struct cache_head * ) ; int (*cache_parse)(struct cache_detail * , char * , int ) ; int (*cache_show)(struct seq_file * , struct cache_detail * , struct cache_head * ) ; void (*warn_no_listener)(struct cache_detail * , int ) ; struct cache_head *(*alloc)(void) ; int (*match)(struct cache_head * , struct cache_head * ) ; void (*init)(struct cache_head * , struct cache_head * ) ; void (*update)(struct cache_head * , struct cache_head * ) ; time_t flush_time ; struct list_head others ; time_t nextcheck ; int entries ; struct list_head queue ; atomic_t readers ; time_t last_close ; time_t last_warn ; union __anonunion_u_265 u ; struct net *net ; }; struct cache_deferred_req; struct cache_req { struct cache_deferred_req *(*defer)(struct cache_req * ) ; int thread_wait ; }; struct cache_deferred_req { struct hlist_node hash ; struct list_head recent ; struct cache_head *item ; void *owner ; void (*revisit)(struct cache_deferred_req * , int ) ; }; struct svc_cred { uid_t cr_uid ; gid_t cr_gid ; struct group_info *cr_group_info ; u32 cr_flavor ; char *cr_principal ; }; struct svc_rqst; struct auth_ops; struct auth_domain { struct kref ref ; struct hlist_node hash ; char *name ; struct auth_ops *flavour ; }; struct auth_ops { char *name ; struct module *owner ; int flavour ; int (*accept)(struct svc_rqst * , __be32 * ) ; int (*release)(struct svc_rqst * ) ; void (*domain_release)(struct auth_domain * ) ; int (*set_client)(struct svc_rqst * ) ; }; struct svc_pool_stats { unsigned long packets ; unsigned long sockets_queued ; unsigned long threads_woken ; unsigned long threads_timedout ; }; struct svc_pool { unsigned int sp_id ; spinlock_t sp_lock ; struct list_head sp_threads ; struct list_head sp_sockets ; unsigned int sp_nrthreads ; struct list_head sp_all_threads ; struct svc_pool_stats sp_stats ; }; struct svc_serv { struct svc_program *sv_program ; struct svc_stat *sv_stats ; spinlock_t sv_lock ; unsigned int sv_nrthreads ; unsigned int sv_maxconn ; unsigned int sv_max_payload ; unsigned int sv_max_mesg ; unsigned int sv_xdrsize ; struct list_head sv_permsocks ; struct list_head sv_tempsocks ; int sv_tmpcnt ; struct timer_list sv_temptimer ; char *sv_name ; unsigned int sv_nrpools ; struct svc_pool *sv_pools ; void (*sv_shutdown)(struct svc_serv * , struct net * ) ; struct module *sv_module ; int (*sv_function)(void * ) ; struct list_head sv_cb_list ; spinlock_t sv_cb_lock ; wait_queue_head_t sv_cb_waitq ; struct svc_xprt *sv_bc_xprt ; }; struct svc_procedure; struct svc_deferred_req; struct svc_cacherep; struct svc_rqst { struct list_head rq_list ; struct list_head rq_all ; struct svc_xprt *rq_xprt ; struct __kernel_sockaddr_storage rq_addr ; size_t rq_addrlen ; struct __kernel_sockaddr_storage rq_daddr ; size_t rq_daddrlen ; struct svc_serv *rq_server ; struct svc_pool *rq_pool ; struct svc_procedure *rq_procinfo ; struct auth_ops *rq_authop ; struct svc_cred rq_cred ; void *rq_xprt_ctxt ; struct svc_deferred_req *rq_deferred ; int rq_usedeferral ; size_t rq_xprt_hlen ; struct xdr_buf rq_arg ; struct xdr_buf rq_res ; struct page *rq_pages[259U] ; struct page **rq_respages ; int rq_resused ; struct page **rq_next_page ; struct kvec rq_vec[259U] ; __be32 rq_xid ; u32 rq_prog ; u32 rq_vers ; u32 rq_proc ; u32 rq_prot ; unsigned char rq_secure : 1 ; void *rq_argp ; void *rq_resp ; void *rq_auth_data ; int rq_reserved ; struct cache_req rq_chandle ; bool rq_dropme ; struct auth_domain *rq_client ; struct auth_domain *rq_gssclient ; int rq_cachetype ; struct svc_cacherep *rq_cacherep ; int rq_splice_ok ; wait_queue_head_t rq_wait ; struct task_struct *rq_task ; }; struct svc_deferred_req { u32 prot ; struct svc_xprt *xprt ; struct __kernel_sockaddr_storage addr ; size_t addrlen ; struct __kernel_sockaddr_storage daddr ; size_t daddrlen ; struct cache_deferred_req handle ; size_t xprt_hlen ; int argslen ; __be32 args[0U] ; }; struct svc_program { struct svc_program *pg_next ; u32 pg_prog ; unsigned int pg_lovers ; unsigned int pg_hivers ; unsigned int pg_nvers ; struct svc_version **pg_vers ; char *pg_name ; char *pg_class ; struct svc_stat *pg_stats ; int (*pg_authenticate)(struct svc_rqst * ) ; }; struct svc_version { u32 vs_vers ; u32 vs_nproc ; struct svc_procedure *vs_proc ; u32 vs_xdrsize ; unsigned char vs_hidden : 1 ; int (*vs_dispatch)(struct svc_rqst * , __be32 * ) ; }; struct svc_procedure { __be32 (*pc_func)(struct svc_rqst * , void * , void * ) ; int (*pc_decode)(void * , __be32 * , void * ) ; int (*pc_encode)(void * , __be32 * , void * ) ; int (*pc_release)(void * , __be32 * , void * ) ; unsigned int pc_argsize ; unsigned int pc_ressize ; unsigned int pc_count ; unsigned int pc_cachetype ; unsigned int pc_xdrressize ; }; struct pnfs_layout_segment { struct list_head pls_list ; struct list_head pls_lc_list ; struct pnfs_layout_range pls_range ; atomic_t pls_refcount ; unsigned long pls_flags ; struct pnfs_layout_hdr *pls_layout ; }; enum pnfs_try_status { PNFS_ATTEMPTED = 0, PNFS_NOT_ATTEMPTED = 1 } ; struct nfs4_deviceid_node; struct pnfs_layoutdriver_type { struct list_head pnfs_tblid ; u32 const id ; char const *name ; struct module *owner ; unsigned int flags ; int (*set_layoutdriver)(struct nfs_server * , struct nfs_fh const * ) ; int (*clear_layoutdriver)(struct nfs_server * ) ; struct pnfs_layout_hdr *(*alloc_layout_hdr)(struct inode * , gfp_t ) ; void (*free_layout_hdr)(struct pnfs_layout_hdr * ) ; struct pnfs_layout_segment *(*alloc_lseg)(struct pnfs_layout_hdr * , struct nfs4_layoutget_res * , gfp_t ) ; void (*free_lseg)(struct pnfs_layout_segment * ) ; struct nfs_pageio_ops const *pg_read_ops ; struct nfs_pageio_ops const *pg_write_ops ; struct pnfs_ds_commit_info *(*get_ds_info)(struct inode * ) ; void (*mark_request_commit)(struct nfs_page * , struct pnfs_layout_segment * , struct nfs_commit_info * ) ; void (*clear_request_commit)(struct nfs_page * , struct nfs_commit_info * ) ; int (*scan_commit_lists)(struct nfs_commit_info * , int ) ; void (*recover_commit_reqs)(struct list_head * , struct nfs_commit_info * ) ; int (*commit_pagelist)(struct inode * , struct list_head * , int , struct nfs_commit_info * ) ; enum pnfs_try_status (*read_pagelist)(struct nfs_read_data * ) ; enum pnfs_try_status (*write_pagelist)(struct nfs_write_data * , int ) ; void (*free_deviceid_node)(struct nfs4_deviceid_node * ) ; void (*encode_layoutreturn)(struct pnfs_layout_hdr * , struct xdr_stream * , struct nfs4_layoutreturn_args const * ) ; void (*cleanup_layoutcommit)(struct nfs4_layoutcommit_data * ) ; void (*encode_layoutcommit)(struct pnfs_layout_hdr * , struct xdr_stream * , struct nfs4_layoutcommit_args const * ) ; }; struct pnfs_layout_hdr { atomic_t plh_refcount ; struct list_head plh_layouts ; struct list_head plh_bulk_recall ; struct list_head plh_segs ; nfs4_stateid plh_stateid ; atomic_t plh_outstanding ; unsigned long plh_block_lgets ; u32 plh_barrier ; unsigned long plh_retry_timestamp ; unsigned long plh_flags ; loff_t plh_lwb ; struct rpc_cred *plh_lc_cred ; struct inode *plh_inode ; }; struct pnfs_device { struct nfs4_deviceid dev_id ; unsigned int layout_type ; unsigned int mincount ; struct page **pages ; unsigned int pgbase ; unsigned int pglen ; }; struct pnfs_devicelist { unsigned int eof ; unsigned int num_devs ; struct nfs4_deviceid dev_id[16U] ; }; struct nfs4_deviceid_node { struct hlist_node node ; struct hlist_node tmpnode ; struct pnfs_layoutdriver_type const *ld ; struct nfs_client const *nfs_client ; unsigned long flags ; unsigned long timestamp_unavailable ; struct nfs4_deviceid deviceid ; atomic_t ref ; }; struct net_generic { unsigned int len ; struct callback_head rcu ; void *ptr[0U] ; }; struct bl_dev_msg { int32_t status ; uint32_t major ; uint32_t minor ; }; struct rpc_pipe; struct nfs_net { struct cache_detail *nfs_dns_resolve ; struct rpc_pipe *bl_device_pipe ; struct bl_dev_msg bl_mount_reply ; wait_queue_head_t bl_wq ; struct list_head nfs_client_list ; struct list_head nfs_volume_list ; struct idr cb_ident_idr ; unsigned short nfs_callback_tcpport ; unsigned short nfs_callback_tcpport6 ; int cb_users[2U] ; spinlock_t nfs_client_lock ; struct timespec boot_time ; }; struct nfs4_slot_table; struct nfs4_slot { struct nfs4_slot_table *table ; struct nfs4_slot *next ; unsigned long generation ; u32 slot_nr ; u32 seq_nr ; unsigned char interrupted : 1 ; }; struct nfs4_slot_table { struct nfs4_session *session ; struct nfs4_slot *slots ; unsigned long used_slots[16U] ; spinlock_t slot_tbl_lock ; struct rpc_wait_queue slot_tbl_waitq ; u32 max_slots ; u32 max_slotid ; u32 highest_used_slotid ; u32 target_highest_slotid ; u32 server_highest_slotid ; s32 d_target_highest_slotid ; s32 d2_target_highest_slotid ; unsigned long generation ; struct completion complete ; }; struct nfs4_session { struct nfs4_sessionid sess_id ; u32 flags ; unsigned long session_state ; u32 hash_alg ; u32 ssv_len ; struct nfs4_channel_attrs fc_attrs ; struct nfs4_slot_table fc_slot_table ; struct nfs4_channel_attrs bc_attrs ; struct nfs4_slot_table bc_slot_table ; struct nfs_client *clp ; unsigned int fc_target_max_rqst_sz ; unsigned int fc_target_max_resp_sz ; }; struct __anonstruct_super_269 { unsigned long s_flags ; }; struct __anonstruct_nfs_server_270 { struct nfs_fsid fsid ; int flags ; unsigned int rsize ; unsigned int wsize ; unsigned int acregmin ; unsigned int acregmax ; unsigned int acdirmin ; unsigned int acdirmax ; }; struct __anonstruct_rpc_auth_271 { rpc_authflavor_t au_flavor ; }; struct __anonstruct_key_268 { struct __anonstruct_super_269 super ; struct __anonstruct_nfs_server_270 nfs_server ; struct __anonstruct_rpc_auth_271 rpc_auth ; u8 uniq_len ; char uniquifier[0U] ; }; struct nfs_fscache_key { struct rb_node node ; struct nfs_client *nfs_client ; struct __anonstruct_key_268 key ; }; struct nfs4_opendata; struct nfs41_call_sync_data { struct nfs_server const *seq_server ; struct nfs4_sequence_args *seq_args ; struct nfs4_sequence_res *seq_res ; }; struct nfs4_opendata { struct kref kref ; struct nfs_openargs o_arg ; struct nfs_openres o_res ; struct nfs_open_confirmargs c_arg ; struct nfs_open_confirmres c_res ; struct nfs4_string owner_name ; struct nfs4_string group_name ; struct nfs_fattr f_attr ; struct dentry *dir ; struct dentry *dentry ; struct nfs4_state_owner *owner ; struct nfs4_state *state ; struct iattr attrs ; unsigned long timestamp ; unsigned char rpc_done : 1 ; int rpc_status ; int cancelled ; }; struct nfs4_closedata { struct inode *inode ; struct nfs4_state *state ; struct nfs_closeargs arg ; struct nfs_closeres res ; struct nfs_fattr fattr ; unsigned long timestamp ; bool roc ; u32 roc_barrier ; }; struct nfs4_createdata { struct rpc_message msg ; struct nfs4_create_arg arg ; struct nfs4_create_res res ; struct nfs_fh fh ; struct nfs_fattr fattr ; }; struct nfs4_renewdata { struct nfs_client *client ; unsigned long timestamp ; }; struct nfs4_cached_acl { int cached ; size_t len ; char data[0U] ; }; struct nfs4_delegreturndata { struct nfs4_delegreturnargs args ; struct nfs4_delegreturnres res ; struct nfs_fh fh ; nfs4_stateid stateid ; unsigned long timestamp ; struct nfs_fattr fattr ; int rpc_status ; }; struct nfs4_unlockdata { struct nfs_locku_args arg ; struct nfs_locku_res res ; struct nfs4_lock_state *lsp ; struct nfs_open_context *ctx ; struct file_lock fl ; struct nfs_server const *server ; unsigned long timestamp ; }; struct nfs4_lockdata { struct nfs_lock_args arg ; struct nfs_lock_res res ; struct nfs4_lock_state *lsp ; struct nfs_open_context *ctx ; struct file_lock fl ; unsigned long timestamp ; int rpc_status ; int cancelled ; struct nfs_server *server ; }; struct nfs_release_lockowner_data { struct nfs4_lock_state *lsp ; struct nfs_server *server ; struct nfs_release_lockowner_args args ; }; struct nfs4_get_lease_time_data { struct nfs4_get_lease_time_args *args ; struct nfs4_get_lease_time_res *res ; struct nfs_client *clp ; }; struct nfs4_sequence_data { struct nfs_client *clp ; struct nfs4_sequence_args args ; struct nfs4_sequence_res res ; }; struct nfs4_reclaim_complete_data { struct nfs_client *clp ; struct nfs41_reclaim_complete_args arg ; struct nfs41_reclaim_complete_res res ; }; typedef int ldv_func_ret_type___2; enum hrtimer_restart; enum nfs_opnum4 { OP_ACCESS = 3, OP_CLOSE = 4, OP_COMMIT = 5, OP_CREATE = 6, OP_DELEGPURGE = 7, OP_DELEGRETURN = 8, OP_GETATTR = 9, OP_GETFH = 10, OP_LINK = 11, OP_LOCK = 12, OP_LOCKT = 13, OP_LOCKU = 14, OP_LOOKUP = 15, OP_LOOKUPP = 16, OP_NVERIFY = 17, OP_OPEN = 18, OP_OPENATTR = 19, OP_OPEN_CONFIRM = 20, OP_OPEN_DOWNGRADE = 21, OP_PUTFH = 22, OP_PUTPUBFH = 23, OP_PUTROOTFH = 24, OP_READ = 25, OP_READDIR = 26, OP_READLINK = 27, OP_REMOVE = 28, OP_RENAME = 29, OP_RENEW = 30, OP_RESTOREFH = 31, OP_SAVEFH = 32, OP_SECINFO = 33, OP_SETATTR = 34, OP_SETCLIENTID = 35, OP_SETCLIENTID_CONFIRM = 36, OP_VERIFY = 37, OP_WRITE = 38, OP_RELEASE_LOCKOWNER = 39, OP_BACKCHANNEL_CTL = 40, OP_BIND_CONN_TO_SESSION = 41, OP_EXCHANGE_ID = 42, OP_CREATE_SESSION = 43, OP_DESTROY_SESSION = 44, OP_FREE_STATEID = 45, OP_GET_DIR_DELEGATION = 46, OP_GETDEVICEINFO = 47, OP_GETDEVICELIST = 48, OP_LAYOUTCOMMIT = 49, OP_LAYOUTGET = 50, OP_LAYOUTRETURN = 51, OP_SECINFO_NO_NAME = 52, OP_SEQUENCE = 53, OP_SET_SSV = 54, OP_TEST_STATEID = 55, OP_WANT_DELEGATION = 56, OP_DESTROY_CLIENTID = 57, OP_RECLAIM_COMPLETE = 58, OP_ILLEGAL = 10044 } ; typedef u64 clientid4; struct compound_hdr { int32_t status ; uint32_t nops ; __be32 *nops_p ; uint32_t taglen ; char *tag ; uint32_t replen ; u32 minorversion ; }; struct __anonstruct_nfs_errtbl_265 { int stat ; int errno ; }; typedef int ldv_func_ret_type___4; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; struct nfs_subversion { struct module *owner ; struct file_system_type *nfs_fs ; struct rpc_version const *rpc_vers ; struct nfs_rpc_ops const *rpc_ops ; struct super_operations const *sops ; struct xattr_handler const **xattr ; struct list_head list ; }; struct nfs_referral_count { struct list_head list ; struct task_struct const *task ; unsigned int referral_count ; }; enum hrtimer_restart; enum hrtimer_restart; struct match_token { int token ; char const *pattern ; }; struct __anonstruct_substring_t_31 { char *from ; char *to ; }; typedef struct __anonstruct_substring_t_31 substring_t; enum hrtimer_restart; struct idmap_msg { __u8 im_type ; __u8 im_conv ; char im_name[128U] ; __u32 im_id ; __u8 im_status ; }; struct rpc_pipe_msg { struct list_head list ; void *data ; size_t len ; size_t copied ; int errno ; }; struct rpc_pipe_ops { ssize_t (*upcall)(struct file * , struct rpc_pipe_msg * , char * , size_t ) ; ssize_t (*downcall)(struct file * , char const * , size_t ) ; void (*release_pipe)(struct inode * ) ; int (*open_pipe)(struct inode * ) ; void (*destroy_msg)(struct rpc_pipe_msg * ) ; }; struct rpc_pipe { struct list_head pipe ; struct list_head in_upcall ; struct list_head in_downcall ; int pipelen ; int nreaders ; int nwriters ; int flags ; struct delayed_work queue_timeout ; struct rpc_pipe_ops const *ops ; spinlock_t lock ; struct dentry *dentry ; }; struct rpc_inode { struct inode vfs_inode ; void *private ; struct rpc_pipe *pipe ; wait_queue_head_t waitq ; }; struct key_construction { struct key *key ; struct key *authkey ; }; struct key_preparsed_payload { char *description ; void *type_data[2U] ; void *payload ; void const *data ; size_t datalen ; size_t quotalen ; }; struct key_type { char const *name ; size_t def_datalen ; int (*vet_description)(char const * ) ; int (*preparse)(struct key_preparsed_payload * ) ; void (*free_preparse)(struct key_preparsed_payload * ) ; int (*instantiate)(struct key * , struct key_preparsed_payload * ) ; int (*update)(struct key * , struct key_preparsed_payload * ) ; int (*match)(struct key const * , void const * ) ; void (*revoke)(struct key * ) ; void (*destroy)(struct key * ) ; void (*describe)(struct key const * , struct seq_file * ) ; long (*read)(struct key const * , char * , size_t ) ; int (*request_key)(struct key_construction * , char const * , void * ) ; struct list_head link ; struct lock_class_key lock_class ; }; struct user_key_payload { struct callback_head rcu ; unsigned short datalen ; char data[0U] ; }; struct idmap_legacy_upcalldata { struct rpc_pipe_msg pipe_msg ; struct idmap_msg idmap_msg ; struct key_construction *key_cons ; struct idmap *idmap ; }; struct idmap { struct rpc_pipe *idmap_pipe ; struct idmap_legacy_upcalldata *idmap_upcall_data ; struct mutex idmap_mutex ; }; struct __wait_queue; typedef struct __wait_queue wait_queue_t; struct __wait_queue { unsigned int flags ; void *private ; int (*func)(wait_queue_t * , unsigned int , int , void * ) ; struct list_head task_list ; }; enum hrtimer_restart; struct svc_xprt_ops { struct svc_xprt *(*xpo_create)(struct svc_serv * , struct net * , struct sockaddr * , int , int ) ; struct svc_xprt *(*xpo_accept)(struct svc_xprt * ) ; int (*xpo_has_wspace)(struct svc_xprt * ) ; int (*xpo_recvfrom)(struct svc_rqst * ) ; void (*xpo_prep_reply_hdr)(struct svc_rqst * ) ; int (*xpo_sendto)(struct svc_rqst * ) ; void (*xpo_release_rqst)(struct svc_rqst * ) ; void (*xpo_detach)(struct svc_xprt * ) ; void (*xpo_free)(struct svc_xprt * ) ; }; struct svc_xprt_class { char const *xcl_name ; struct module *xcl_owner ; struct svc_xprt_ops *xcl_ops ; struct list_head xcl_list ; u32 xcl_max_payload ; }; struct svc_xprt { struct svc_xprt_class *xpt_class ; struct svc_xprt_ops *xpt_ops ; struct kref xpt_ref ; struct list_head xpt_list ; struct list_head xpt_ready ; unsigned long xpt_flags ; struct svc_serv *xpt_server ; atomic_t xpt_reserved ; struct mutex xpt_mutex ; spinlock_t xpt_lock ; void *xpt_auth_cache ; struct list_head xpt_deferred ; struct __kernel_sockaddr_storage xpt_local ; size_t xpt_locallen ; struct __kernel_sockaddr_storage xpt_remote ; size_t xpt_remotelen ; struct rpc_wait_queue xpt_bc_pending ; struct list_head xpt_users ; struct net *xpt_net ; struct rpc_xprt *xpt_bc_xprt ; }; struct nfs_callback_data { unsigned int users ; struct svc_serv *serv ; struct svc_rqst *rqst ; struct task_struct *task ; }; enum hrtimer_restart; struct cb_process_state { __be32 drc_status ; struct nfs_client *clp ; u32 slotid ; struct net *net ; }; struct cb_compound_hdr_arg { unsigned int taglen ; char const *tag ; unsigned int minorversion ; unsigned int cb_ident ; unsigned int nops ; }; struct cb_compound_hdr_res { __be32 *status ; unsigned int taglen ; char const *tag ; __be32 *nops ; }; struct cb_getattrargs { struct sockaddr *addr ; struct nfs_fh fh ; uint32_t bitmap[2U] ; }; struct cb_getattrres { __be32 status ; uint32_t bitmap[2U] ; uint64_t size ; uint64_t change_attr ; struct timespec ctime ; struct timespec mtime ; }; struct cb_recallargs { struct sockaddr *addr ; struct nfs_fh fh ; nfs4_stateid stateid ; uint32_t truncate ; }; struct referring_call { uint32_t rc_sequenceid ; uint32_t rc_slotid ; }; struct referring_call_list { struct nfs4_sessionid rcl_sessionid ; uint32_t rcl_nrefcalls ; struct referring_call *rcl_refcalls ; }; struct cb_sequenceargs { struct sockaddr *csa_addr ; struct nfs4_sessionid csa_sessionid ; uint32_t csa_sequenceid ; uint32_t csa_slotid ; uint32_t csa_highestslotid ; uint32_t csa_cachethis ; uint32_t csa_nrclists ; struct referring_call_list *csa_rclists ; }; struct cb_sequenceres { __be32 csr_status ; struct nfs4_sessionid csr_sessionid ; uint32_t csr_sequenceid ; uint32_t csr_slotid ; uint32_t csr_highestslotid ; uint32_t csr_target_highestslotid ; }; struct cb_recallanyargs { struct sockaddr *craa_addr ; uint32_t craa_objs_to_keep ; uint32_t craa_type_mask ; }; struct cb_recallslotargs { struct sockaddr *crsa_addr ; uint32_t crsa_target_highest_slotid ; }; struct __anonstruct_ldv_48218_260 { struct nfs_fh cbl_fh ; struct pnfs_layout_range cbl_range ; nfs4_stateid cbl_stateid ; }; union __anonunion_ldv_48220_259 { struct __anonstruct_ldv_48218_260 ldv_48218 ; struct nfs_fsid cbl_fsid ; }; struct cb_layoutrecallargs { struct sockaddr *cbl_addr ; uint32_t cbl_recall_type ; uint32_t cbl_layout_type ; uint32_t cbl_layoutchanged ; union __anonunion_ldv_48220_259 ldv_48220 ; }; struct cb_devicenotifyitem { uint32_t cbd_notify_type ; uint32_t cbd_layout_type ; struct nfs4_deviceid cbd_dev_id ; uint32_t cbd_immediate ; }; struct cb_devicenotifyargs { int ndevs ; struct cb_devicenotifyitem *devs ; }; struct callback_op { __be32 (*process_op)(void * , void * , struct cb_process_state * ) ; __be32 (*decode_args)(struct svc_rqst * , struct xdr_stream * , void * ) ; __be32 (*encode_res)(struct svc_rqst * , struct xdr_stream * , void * ) ; long res_maxsize ; }; enum hrtimer_restart; struct __anonstruct___kernel_fsid_t_5 { int val[2U] ; }; typedef struct __anonstruct___kernel_fsid_t_5 __kernel_fsid_t; struct in_addr { __be32 s_addr ; }; struct sockaddr_in { __kernel_sa_family_t sin_family ; __be16 sin_port ; struct in_addr sin_addr ; unsigned char __pad[8U] ; }; enum hrtimer_restart; struct xdr_netobj { unsigned int len ; u8 *data ; }; struct sockaddr_in6 { unsigned short sin6_family ; __be16 sin6_port ; __be32 sin6_flowinfo ; struct in6_addr sin6_addr ; __u32 sin6_scope_id ; }; struct gss_api_mech; struct gss_ctx { struct gss_api_mech *mech_type ; void *internal_ctx_id ; }; struct pf_desc { u32 pseudoflavor ; u32 service ; char *name ; char *auth_domain_name ; }; struct gss_api_ops; struct gss_api_mech { struct list_head gm_list ; struct module *gm_owner ; struct xdr_netobj gm_oid ; char *gm_name ; struct gss_api_ops const *gm_ops ; int gm_pf_num ; struct pf_desc *gm_pfs ; char const *gm_upcall_enctypes ; }; struct gss_api_ops { int (*gss_import_sec_context)(void const * , size_t , struct gss_ctx * , gfp_t ) ; u32 (*gss_get_mic)(struct gss_ctx * , struct xdr_buf * , struct xdr_netobj * ) ; u32 (*gss_verify_mic)(struct gss_ctx * , struct xdr_buf * , struct xdr_netobj * ) ; u32 (*gss_wrap)(struct gss_ctx * , int , struct xdr_buf * , struct page ** ) ; u32 (*gss_unwrap)(struct gss_ctx * , int , struct xdr_buf * ) ; void (*gss_delete_sec_context)(void * ) ; }; struct kstatfs { long f_type ; long f_bsize ; u64 f_blocks ; u64 f_bfree ; u64 f_bavail ; u64 f_files ; u64 f_ffree ; __kernel_fsid_t f_fsid ; long f_namelen ; long f_frsize ; long f_flags ; long f_spare[4U] ; }; enum hrtimer_restart; enum hrtimer_restart; typedef struct ctl_table ctl_table; enum hrtimer_restart; enum hrtimer_restart; enum hrtimer_restart; enum pnfs_iomode { IOMODE_READ = 1, IOMODE_RW = 2, IOMODE_ANY = 3 } ; struct nfs_read_header { struct nfs_pgio_header header ; struct nfs_read_data rpc_data ; }; struct nfs_write_header { struct nfs_pgio_header header ; struct nfs_write_data rpc_data ; struct nfs_writeverf verf ; }; enum hrtimer_restart; long ldv__builtin_expect(long exp , long c ) ; __inline static void set_bit(unsigned int nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static void clear_bit(int nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static void clear_bit_unlock(unsigned int nr , unsigned long volatile *addr ) { { __asm__ volatile ("": : : "memory"); clear_bit((int )nr, addr); return; } } __inline static int test_and_clear_bit(int nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return (oldbit); } } __inline static int constant_test_bit(unsigned int nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr / 64U)) >> ((int )nr & 63)) & 1); } } __inline static int variable_test_bit(int nr , unsigned long const volatile *addr ) { int oldbit ; { __asm__ volatile ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)), "Ir" (nr)); return (oldbit); } } __inline static __u64 __arch_swab64(__u64 val ) { { __asm__ ("bswapq %0": "=r" (val): "0" (val)); return (val); } } __inline static __u64 __fswab64(__u64 val ) { __u64 tmp ; { tmp = __arch_swab64(val); return (tmp); } } extern int printk(char const * , ...) ; extern void __might_sleep(char const * , int , int ) ; extern int scnprintf(char * , size_t , char const * , ...) ; __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void __list_del_entry(struct list_head * ) ; __inline static void list_del_init(struct list_head *entry ) { { __list_del_entry(entry); INIT_LIST_HEAD(entry); return; } } __inline static int list_empty(struct list_head const *head ) { { return ((unsigned long )((struct list_head const *)head->next) == (unsigned long )head); } } extern void __bad_percpu_size(void) ; extern struct task_struct *current_task ; __inline static struct task_struct *get_current(void) { struct task_struct *pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& current_task)); goto ldv_2861; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2861; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2861; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2861; default: __bad_percpu_size(); } ldv_2861: ; return (pfo_ret__); } } extern void *memcpy(void * , void const * , size_t ) ; extern void *memset(void * , int , size_t ) ; extern int memcmp(void const * , void const * , size_t ) ; extern int strcmp(char const * , char const * ) ; extern void warn_slowpath_null(char const * , int const ) ; __inline static void *ERR_PTR(long error ) { { return ((void *)error); } } __inline static long PTR_ERR(void const *ptr ) { { return ((long )ptr); } } __inline static long IS_ERR(void const *ptr ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )ptr > 0xfffffffffffff000UL, 0L); return (tmp); } } __inline static void *ERR_CAST(void const *ptr ) { { return ((void *)ptr); } } extern void __cmpxchg_wrong_size(void) ; __inline static int atomic_read(atomic_t const *v ) { { return ((int )*((int volatile *)(& v->counter))); } } __inline static void atomic_set(atomic_t *v , int i ) { { v->counter = i; return; } } __inline static int atomic_sub_and_test(int i , atomic_t *v ) { unsigned char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subl %2,%0; sete %1": "+m" (v->counter), "=qm" (c): "ir" (i): "memory"); return ((int )c); } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } __inline static int atomic_cmpxchg(atomic_t *v , int old , int new ) { int __ret ; int __old ; int __new ; u8 volatile *__ptr ; u16 volatile *__ptr___0 ; u32 volatile *__ptr___1 ; u64 volatile *__ptr___2 ; { __old = old; __new = new; switch (4UL) { case 1UL: __ptr = (u8 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgb %2,%1": "=a" (__ret), "+m" (*__ptr): "q" (__new), "0" (__old): "memory"); goto ldv_5494; case 2UL: __ptr___0 = (u16 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgw %2,%1": "=a" (__ret), "+m" (*__ptr___0): "r" (__new), "0" (__old): "memory"); goto ldv_5494; case 4UL: __ptr___1 = (u32 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgl %2,%1": "=a" (__ret), "+m" (*__ptr___1): "r" (__new), "0" (__old): "memory"); goto ldv_5494; case 8UL: __ptr___2 = (u64 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgq %2,%1": "=a" (__ret), "+m" (*__ptr___2): "r" (__new), "0" (__old): "memory"); goto ldv_5494; default: __cmpxchg_wrong_size(); } ldv_5494: ; return (__ret); } } __inline static int __atomic_add_unless(atomic_t *v , int a , int u ) { int c ; int old ; long tmp ; long tmp___0 ; { c = atomic_read((atomic_t const *)v); ldv_5523: tmp = ldv__builtin_expect(c == u, 0L); if (tmp != 0L) { goto ldv_5522; } else { } old = atomic_cmpxchg(v, c, c + a); tmp___0 = ldv__builtin_expect(old == c, 1L); if (tmp___0 != 0L) { goto ldv_5522; } else { } c = old; goto ldv_5523; ldv_5522: ; return (c); } } __inline static int atomic_add_unless(atomic_t *v , int a , int u ) { int tmp ; { tmp = __atomic_add_unless(v, a, u); return (tmp != u); } } extern void lock_acquire(struct lockdep_map * , unsigned int , int , int , int , struct lockdep_map * , unsigned long ) ; extern void lock_release(struct lockdep_map * , int , unsigned long ) ; extern int lock_is_held(struct lockdep_map * ) ; extern void lockdep_rcu_suspicious(char const * , int const , char const * ) ; extern int mutex_trylock(struct mutex * ) ; int ldv_mutex_trylock_4(struct mutex *ldv_func_arg1 ) ; extern void mutex_unlock(struct mutex * ) ; void ldv_mutex_unlock_2(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_5(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_7(struct mutex *ldv_func_arg1 ) ; extern void mutex_lock(struct mutex * ) ; void ldv_mutex_lock_1(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_3(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_6(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_cred_guard_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_cred_guard_mutex(struct mutex *lock ) ; void ldv_mutex_lock_lock(struct mutex *lock ) ; void ldv_mutex_unlock_lock(struct mutex *lock ) ; void ldv_mutex_lock_mutex(struct mutex *lock ) ; int ldv_mutex_trylock_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_mutex(struct mutex *lock ) ; extern unsigned long kernel_stack ; __inline static struct thread_info *current_thread_info(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6221; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6221; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6221; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6221; default: __bad_percpu_size(); } ldv_6221: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static int test_ti_thread_flag(struct thread_info *ti , int flag ) { int tmp ; { tmp = variable_test_bit(flag, (unsigned long const volatile *)(& ti->flags)); return (tmp); } } extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; __inline static void spin_lock(spinlock_t *lock ) { { _raw_spin_lock(& lock->ldv_5961.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { _raw_spin_unlock(& lock->ldv_5961.rlock); return; } } extern void wake_up_bit(void * , int ) ; __inline static void write_seqlock(seqlock_t *sl ) { { spin_lock(& sl->lock); sl->sequence = sl->sequence + 1U; __asm__ volatile ("": : : "memory"); return; } } __inline static void write_sequnlock(seqlock_t *sl ) { { __asm__ volatile ("": : : "memory"); sl->sequence = sl->sequence + 1U; spin_unlock(& sl->lock); return; } } extern void down_read(struct rw_semaphore * ) ; extern void up_read(struct rw_semaphore * ) ; __inline static void __rcu_read_lock(void) { struct thread_info *tmp ; { tmp = current_thread_info(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } extern int rcu_is_cpu_idle(void) ; extern bool rcu_lockdep_current_cpu_online(void) ; __inline static void rcu_lock_acquire(struct lockdep_map *map ) { { lock_acquire(map, 0U, 0, 2, 1, 0, (unsigned long )((void *)0)); return; } } __inline static void rcu_lock_release(struct lockdep_map *map ) { { lock_release(map, 1, (unsigned long )((void *)0)); return; } } extern struct lockdep_map rcu_lock_map ; extern int debug_lockdep_rcu_enabled(void) ; __inline static int rcu_read_lock_held(void) { int tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { return (0); } else { } tmp___1 = rcu_lockdep_current_cpu_online(); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } tmp___3 = lock_is_held(& rcu_lock_map); return (tmp___3); } } __inline static void rcu_read_lock(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 763, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 784, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock(); return; } } extern unsigned long volatile jiffies ; extern void __bad_size_call_parameter(void) ; extern struct page *alloc_pages_current(gfp_t , unsigned int ) ; __inline static struct page *alloc_pages(gfp_t gfp_mask , unsigned int order ) { struct page *tmp ; { tmp = alloc_pages_current(gfp_mask, order); return (tmp); } } extern void __free_pages(struct page * , unsigned int ) ; extern void put_page(struct page * ) ; __inline static void *lowmem_page_address(struct page const *page ) { { return ((void *)((unsigned long )((unsigned long long )(((long )page + 24189255811072L) / 80L) << 12) + 0xffff880000000000UL)); } } extern void msleep(unsigned int ) ; __inline static void ssleep(unsigned int seconds ) { { msleep(seconds * 1000U); return; } } extern int ___ratelimit(struct ratelimit_state * , char const * ) ; extern void kfree(void const * ) ; __inline static void kref_init(struct kref *kref ) { { atomic_set(& kref->refcount, 1); return; } } __inline static void kref_get(struct kref *kref ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = atomic_read((atomic_t const *)(& kref->refcount)); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 42); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); atomic_inc(& kref->refcount); return; } } __inline static int kref_sub(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 67); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub(kref, 1U, release); return (tmp); } } extern int __VERIFIER_nondet_int(void); extern void abort(void); void assume_abort_if_not(int cond) { if(!cond) {abort();} } extern void *malloc(size_t size); long ldv_is_err(const void *ptr) { return ((unsigned long)ptr > ((unsigned long)-4095)); } void *ldv_malloc(size_t size) { if (__VERIFIER_nondet_int()) { void *res = malloc(size); assume_abort_if_not(!ldv_is_err(res)); return res; } else { return ((void *)0); } } void *__kmalloc(size_t size, gfp_t t) { return ldv_malloc(size); } void *ldv_malloc(size_t size ) ; __inline static void *kmalloc(size_t size , gfp_t flags ) { void *tmp___2 ; { tmp___2 = __kmalloc(size, flags); return (tmp___2); } } __inline static void *kmalloc_array(size_t n , size_t size , gfp_t flags ) { void *tmp ; { if (size != 0UL && 0xffffffffffffffffUL / size < n) { return (0); } else { } tmp = __kmalloc(n * size, flags); return (tmp); } } void *ldv_calloc(size_t nmemb , size_t size ) ; __inline static void *kcalloc(size_t n , size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc_array(n, size, flags | 32768U); return (tmp); } } void *ldv_zalloc(size_t size ) ; __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc(size, flags | 32768U); return (tmp); } } extern unsigned int nfs_debug ; __inline static void put_unaligned_be64(u64 val , void *p ) { __u64 tmp ; { tmp = __fswab64(val); *((__be64 *)p) = tmp; return; } } __inline static __be32 *xdr_encode_hyper(__be32 *p , __u64 val ) { { put_unaligned_be64(val, (void *)p); return (p + 2UL); } } extern void _copy_from_pages(char * , struct page ** , size_t , size_t ) ; extern struct rpc_task *rpc_run_task(struct rpc_task_setup const * ) ; extern void rpc_put_task(struct rpc_task * ) ; extern void rpc_put_task_async(struct rpc_task * ) ; extern void rpc_exit(struct rpc_task * , int ) ; extern void rpc_sleep_on(struct rpc_wait_queue * , struct rpc_task * , void (*)(struct rpc_task * ) ) ; extern void rpc_sleep_on_priority(struct rpc_wait_queue * , struct rpc_task * , void (*)(struct rpc_task * ) , int ) ; extern void rpc_wake_up_queued_task(struct rpc_wait_queue * , struct rpc_task * ) ; extern void rpc_delay(struct rpc_task * , unsigned long ) ; extern int __rpc_wait_for_completion_task(struct rpc_task * , int (*)(void * ) ) ; __inline static int rpc_wait_for_completion_task(struct rpc_task *task ) { int tmp ; { tmp = __rpc_wait_for_completion_task(task, 0); return (tmp); } } extern struct rpc_auth *rpcauth_create(rpc_authflavor_t , struct rpc_clnt * ) ; extern int rpcauth_list_flavors(rpc_authflavor_t * , int ) ; extern void put_rpccred(struct rpc_cred * ) ; extern void d_instantiate(struct dentry * , struct inode * ) ; extern void d_drop(struct dentry * ) ; extern void d_rehash(struct dentry * ) ; __inline static void d_add(struct dentry *entry , struct inode *inode ) { { d_instantiate(entry, inode); d_rehash(entry); return; } } __inline static struct dentry *dget_dlock(struct dentry *dentry ) { { if ((unsigned long )dentry != (unsigned long )((struct dentry *)0)) { dentry->d_count = dentry->d_count + 1U; } else { } return (dentry); } } __inline static struct dentry *dget(struct dentry *dentry ) { { if ((unsigned long )dentry != (unsigned long )((struct dentry *)0)) { spin_lock(& dentry->d_lock); dget_dlock(dentry); spin_unlock(& dentry->d_lock); } else { } return (dentry); } } extern struct dentry *dget_parent(struct dentry * ) ; extern void dput(struct dentry * ) ; extern int posix_lock_file_wait(struct file * , struct file_lock * ) ; extern int flock_lock_file_wait(struct file * , struct file_lock * ) ; extern int current_umask(void) ; extern void iput(struct inode * ) ; extern struct inode *igrab(struct inode * ) ; __inline static int sigismember(sigset_t *set , int _sig ) { unsigned long sig ; { sig = (unsigned long )(_sig + -1); return ((int )(set->sig[0] >> (int )sig) & 1); } } extern long schedule_timeout_killable(long ) ; __inline static int test_tsk_thread_flag(struct task_struct *tsk , int flag ) { int tmp ; { tmp = test_ti_thread_flag((struct thread_info *)tsk->stack, flag); return (tmp); } } __inline static int signal_pending(struct task_struct *p ) { int tmp ; long tmp___0 ; { tmp = test_tsk_thread_flag(p, 2); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); return ((int )tmp___0); } } __inline static int __fatal_signal_pending(struct task_struct *p ) { int tmp ; long tmp___0 ; { tmp = sigismember(& p->pending.signal, 9); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); return ((int )tmp___0); } } __inline static int fatal_signal_pending(struct task_struct *p ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp = signal_pending(p); if (tmp != 0) { tmp___0 = __fatal_signal_pending(p); if (tmp___0 != 0) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } return (tmp___1); } } extern ssize_t generic_getxattr(struct dentry * , char const * , void * , size_t ) ; extern ssize_t generic_listxattr(struct dentry * , char * , size_t ) ; extern int generic_setxattr(struct dentry * , char const * , void const * , size_t , int ) ; extern int generic_removexattr(struct dentry * , char const * ) ; extern struct rpc_clnt *rpc_clone_client(struct rpc_clnt * ) ; extern void rpc_shutdown_client(struct rpc_clnt * ) ; extern void rpc_call_start(struct rpc_task * ) ; extern int rpc_call_async(struct rpc_clnt * , struct rpc_message const * , int , struct rpc_call_ops const * , void * ) ; extern int rpc_call_sync(struct rpc_clnt * , struct rpc_message const * , int ) ; extern int rpc_restart_call_prepare(struct rpc_task * ) ; extern int rpc_restart_call(struct rpc_task * ) ; extern char const *rpc_peeraddr2str(struct rpc_clnt * , enum rpc_display_format_t ) ; __inline static void nfs_copy_fh(struct nfs_fh *target , struct nfs_fh const *source ) { size_t __len ; void *__ret ; { target->size = source->size; __len = (size_t )source->size; __ret = memcpy((void *)(& target->data), (void const *)(& source->data), __len); return; } } __inline static bool seqid_mutating_err(u32 err ) { { switch (err) { case 10022U: ; case 10023U: ; case 10025U: ; case 10026U: ; case 10036U: ; case 10018U: ; case 10020U: ; return (0); } return (1); } } __inline static void *kmap_atomic(struct page *page ) { void *tmp ; { __rcu_read_lock(); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void __kunmap_atomic(void *addr ) { { __rcu_read_unlock(); return; } } __inline static int nfs_fsid_equal(struct nfs_fsid const *a , struct nfs_fsid const *b ) { { return ((unsigned long long )a->major == (unsigned long long )b->major && (unsigned long long )a->minor == (unsigned long long )b->minor); } } struct nfs_rpc_ops const nfs_v4_clientops ; __inline static struct nfs_inode *NFS_I(struct inode const *inode ) { struct inode const *__mptr ; { __mptr = inode; return ((struct nfs_inode *)__mptr + 0xfffffffffffffd80UL); } } __inline static struct nfs_server *NFS_SB(struct super_block const *s ) { { return ((struct nfs_server *)s->s_fs_info); } } __inline static struct nfs_fh *NFS_FH(struct inode const *inode ) { struct nfs_inode *tmp ; { tmp = NFS_I(inode); return (& tmp->fh); } } __inline static struct nfs_server *NFS_SERVER(struct inode const *inode ) { struct nfs_server *tmp ; { tmp = NFS_SB((struct super_block const *)inode->i_sb); return (tmp); } } __inline static struct rpc_clnt *NFS_CLIENT(struct inode const *inode ) { struct nfs_server *tmp ; { tmp = NFS_SERVER(inode); return (tmp->client); } } __inline static __u64 NFS_FILEID(struct inode const *inode ) { struct nfs_inode *tmp ; { tmp = NFS_I(inode); return (tmp->fileid); } } __inline static void nfs_set_verifier(struct dentry *dentry , unsigned long verf ) { { dentry->d_time = verf; return; } } __inline static unsigned long nfs_save_change_attribute(struct inode *dir ) { struct nfs_inode *tmp ; { tmp = NFS_I((struct inode const *)dir); return (tmp->cache_change_attribute); } } extern void nfs_invalidate_atime(struct inode * ) ; extern struct inode *nfs_fhget(struct super_block * , struct nfs_fh * , struct nfs_fattr * ) ; extern int nfs_refresh_inode(struct inode * , struct nfs_fattr * ) ; extern int nfs_post_op_update_inode(struct inode * , struct nfs_fattr * ) ; extern int nfs_post_op_update_inode_force_wcc(struct inode * , struct nfs_fattr * ) ; extern int nfs_getattr(struct vfsmount * , struct dentry * , struct kstat * ) ; extern void nfs_access_add_cache(struct inode * , struct nfs_access_entry * ) ; extern void nfs_access_set_mask(struct nfs_access_entry * , u32 ) ; extern int nfs_permission(struct inode * , int ) ; extern int nfs_revalidate_inode(struct nfs_server * , struct inode * ) ; extern int nfs_setattr(struct dentry * , struct iattr * ) ; extern void nfs_setattr_update_inode(struct inode * , struct iattr * ) ; extern struct nfs_open_context *get_nfs_open_context(struct nfs_open_context * ) ; extern void put_nfs_open_context(struct nfs_open_context * ) ; extern struct nfs_open_context *alloc_nfs_open_context(struct dentry * , fmode_t ) ; extern void nfs_fattr_init(struct nfs_fattr * ) ; extern struct nfs_fattr *nfs_alloc_fattr(void) ; __inline static void nfs_free_fattr(struct nfs_fattr const *fattr ) { { kfree((void const *)fattr); return; } } struct file_operations const nfs4_file_operations ; __inline static struct nfs_open_context *nfs_file_open_context(struct file *filp ) { { return ((struct nfs_open_context *)filp->private_data); } } extern void nfs_force_lookup_revalidate(struct inode * ) ; extern int nfs_instantiate(struct dentry * , struct nfs_fh * , struct nfs_fattr * ) ; extern int nfs_may_open(struct inode * , struct rpc_cred * , int ) ; extern void nfs_access_zap_cache(struct inode * ) ; void nfs_fattr_init_names(struct nfs_fattr *fattr , struct nfs4_string *owner_name , struct nfs4_string *group_name ) ; void nfs_fattr_free_names(struct nfs_fattr *fattr ) ; void nfs_fattr_map_and_free_names(struct nfs_server *server , struct nfs_fattr *fattr ) ; extern atomic_t system_freezing_cnt ; extern bool freezing_slow_path(struct task_struct * ) ; __inline static bool freezing(struct task_struct *p ) { int tmp ; long tmp___0 ; bool tmp___1 ; { tmp = atomic_read((atomic_t const *)(& system_freezing_cnt)); tmp___0 = ldv__builtin_expect(tmp == 0, 1L); if (tmp___0 != 0L) { return (0); } else { } tmp___1 = freezing_slow_path(p); return (tmp___1); } } extern bool __refrigerator(bool ) ; __inline static bool try_to_freeze(void) { struct task_struct *tmp ; bool tmp___0 ; int tmp___1 ; long tmp___2 ; bool tmp___3 ; { __might_sleep("include/linux/freezer.h", 46, 0); tmp = get_current(); tmp___0 = freezing(tmp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } tmp___2 = ldv__builtin_expect((long )tmp___1, 1L); if (tmp___2 != 0L) { return (0); } else { } tmp___3 = __refrigerator(0); return (tmp___3); } } __inline static void freezer_do_not_count(void) { struct task_struct *tmp ; { tmp = get_current(); tmp->flags = tmp->flags | 1073741824U; return; } } __inline static void freezer_count(void) { struct task_struct *tmp ; { tmp = get_current(); tmp->flags = tmp->flags & 3221225471U; __asm__ volatile ("mfence": : : "memory"); try_to_freeze(); return; } } __inline static void nfs_confirm_seqid(struct nfs_seqid_counter *seqid , int status ) { bool tmp ; { tmp = seqid_mutating_err((u32 )(- status)); if ((int )tmp) { seqid->flags = seqid->flags | 1; } else { } return; } } extern struct dentry_operations const nfs4_dentry_operations ; extern int nfs_atomic_open(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors ) ; struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt , struct inode *inode , struct qstr *name ) ; struct vfsmount *nfs4_submount(struct nfs_server *server , struct dentry *dentry , struct nfs_fh *fh , struct nfs_fattr *fattr ) ; int nfs4_proc_setclientid(struct nfs_client *clp , u32 program , unsigned short port , struct rpc_cred *cred , struct nfs4_setclientid_res *res ) ; int nfs4_proc_setclientid_confirm(struct nfs_client *clp , struct nfs4_setclientid_res *arg , struct rpc_cred *cred ) ; int nfs4_proc_get_rootfh(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info ) ; int nfs4_proc_bind_conn_to_session(struct nfs_client *clp , struct rpc_cred *cred ) ; int nfs4_proc_exchange_id(struct nfs_client *clp , struct rpc_cred *cred ) ; int nfs4_destroy_clientid(struct nfs_client *clp ) ; int nfs4_init_clientid(struct nfs_client *clp , struct rpc_cred *cred ) ; int nfs41_init_clientid(struct nfs_client *clp , struct rpc_cred *cred ) ; int nfs4_do_close(struct nfs4_state *state , gfp_t gfp_mask , int wait ) ; int nfs4_server_capabilities(struct nfs_server *server , struct nfs_fh *fhandle ) ; int nfs4_proc_fs_locations(struct rpc_clnt *client , struct inode *dir , struct qstr const *name , struct nfs4_fs_locations *fs_locations , struct page *page ) ; struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *dir , struct qstr *name , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) ; int nfs4_proc_secinfo(struct inode *dir , struct qstr const *name , struct nfs4_secinfo_flavors *flavors ) ; int nfs4_release_lockowner(struct nfs4_lock_state *lsp ) ; struct xattr_handler const *nfs4_xattr_handlers[2U] ; __inline static struct nfs4_session *nfs4_get_session(struct nfs_server const *server ) { { return ((server->nfs_client)->cl_session); } } int nfs4_setup_sequence(struct nfs_server const *server , struct nfs4_sequence_args *args , struct nfs4_sequence_res *res , struct rpc_task *task ) ; int nfs41_setup_sequence(struct nfs4_session *session , struct nfs4_sequence_args *args , struct nfs4_sequence_res *res , struct rpc_task *task ) ; int nfs4_proc_create_session(struct nfs_client *clp , struct rpc_cred *cred ) ; int nfs4_proc_destroy_session(struct nfs4_session *session , struct rpc_cred *cred ) ; int nfs4_proc_get_lease_time(struct nfs_client *clp , struct nfs_fsinfo *fsinfo ) ; int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data , bool sync ) ; struct nfs4_minor_version_ops const *nfs_v4_minor_ops[2U] ; u32 const nfs4_fattr_bitmap[3U] ; u32 const nfs4_statfs_bitmap[2U] ; u32 const nfs4_pathconf_bitmap[2U] ; u32 const nfs4_fsinfo_bitmap[3U] ; u32 const nfs4_fs_locations_bitmap[2U] ; void nfs4_free_client(struct nfs_client *clp ) ; struct nfs_client *nfs4_alloc_client(struct nfs_client_initdata const *cl_init ) ; void nfs4_schedule_state_renewal(struct nfs_client *clp ) ; struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp ) ; struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp ) ; struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp ) ; int nfs40_discover_server_trunking(struct nfs_client *clp , struct nfs_client **result , struct rpc_cred *cred ) ; struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp ) ; int nfs41_discover_server_trunking(struct nfs_client *clp , struct nfs_client **result , struct rpc_cred *cred ) ; void nfs4_schedule_session_recovery(struct nfs4_session *session , int err ) ; void nfs41_server_notify_highest_slotid_update(struct nfs_client *clp ) ; struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server , struct rpc_cred *cred , gfp_t gfp_flags ) ; void nfs4_put_state_owner(struct nfs4_state_owner *sp ) ; struct nfs4_state *nfs4_get_open_state(struct inode *inode , struct nfs4_state_owner *owner ) ; void nfs4_put_open_state(struct nfs4_state *state ) ; void nfs4_close_state(struct nfs4_state *state , fmode_t fmode ) ; void nfs4_close_sync(struct nfs4_state *state , fmode_t fmode ) ; void nfs4_state_set_mode_locked(struct nfs4_state *state , fmode_t fmode ) ; void nfs_inode_find_state_and_recover(struct inode *inode , nfs4_stateid const *stateid ) ; void nfs4_schedule_lease_recovery(struct nfs_client *clp ) ; int nfs4_wait_clnt_recover(struct nfs_client *clp ) ; int nfs4_client_recover_expired_lease(struct nfs_client *clp ) ; void nfs4_schedule_path_down_recovery(struct nfs_client *clp ) ; void nfs4_schedule_stateid_recovery(struct nfs_server const *server , struct nfs4_state *state ) ; void nfs41_handle_sequence_flag_errors(struct nfs_client *clp , u32 flags ) ; void nfs4_put_lock_state(struct nfs4_lock_state *lsp ) ; int nfs4_set_lock_state(struct nfs4_state *state , struct file_lock *fl ) ; void nfs4_select_rw_stateid(nfs4_stateid *dst , struct nfs4_state *state , fmode_t fmode , struct nfs_lockowner const *lockowner ) ; struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter , gfp_t gfp_mask ) ; int nfs_wait_on_sequence(struct nfs_seqid *seqid , struct rpc_task *task ) ; void nfs_release_seqid(struct nfs_seqid *seqid ) ; void nfs_free_seqid(struct nfs_seqid *seqid ) ; void nfs4_free_lock_state(struct nfs_server *server , struct nfs4_lock_state *lsp ) ; nfs4_stateid const zero_stateid ; struct dentry *nfs4_try_mount(int flags , char const *dev_name___0 , struct nfs_mount_info *mount_info , struct nfs_subversion *nfs_mod ) ; extern unsigned short max_session_slots ; extern char nfs4_client_id_uniquifier[64U] ; struct rpc_procinfo nfs4_procedures[53U] ; __inline static void nfs4_stateid_copy(nfs4_stateid *dst , nfs4_stateid const *src ) { size_t __len ; void *__ret ; { __len = 16UL; if (__len > 63UL) { __ret = memcpy((void *)dst, (void const *)src, __len); } else { __ret = memcpy((void *)dst, (void const *)src, __len); } return; } } __inline static bool nfs4_stateid_match(nfs4_stateid const *dst , nfs4_stateid const *src ) { int tmp ; { tmp = memcmp((void const *)dst, (void const *)src, 16UL); return (tmp == 0); } } int nfs_inode_set_delegation(struct inode *inode , struct rpc_cred *cred , struct nfs_openres *res ) ; void nfs_inode_reclaim_delegation(struct inode *inode , struct rpc_cred *cred , struct nfs_openres *res ) ; int nfs4_inode_return_delegation(struct inode *inode ) ; void nfs_remove_bad_delegation(struct inode *inode ) ; int nfs4_proc_delegreturn(struct inode *inode , struct rpc_cred *cred , nfs4_stateid const *stateid , int issync ) ; int nfs4_open_delegation_recall(struct nfs_open_context *ctx , struct nfs4_state *state , nfs4_stateid const *stateid ) ; int nfs4_lock_delegation_recall(struct nfs4_state *state , struct file_lock *fl ) ; bool nfs4_copy_delegation_stateid(nfs4_stateid *dst , struct inode *inode , fmode_t flags ) ; void nfs_mark_delegation_referenced(struct nfs_delegation *delegation ) ; int nfs4_have_delegation(struct inode *inode , fmode_t flags ) ; extern void nfs_put_client(struct nfs_client * ) ; struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info , struct nfs_subversion *nfs_mod ) ; extern struct nfs_server *nfs_clone_server(struct nfs_server * , struct nfs_fh * , struct nfs_fattr * , rpc_authflavor_t ) ; int nfs4_decode_dirent(struct xdr_stream *xdr , struct nfs_entry *entry , int plus ) ; extern struct dentry *nfs_lookup(struct inode * , struct dentry * , unsigned int ) ; extern int nfs_create(struct inode * , struct dentry * , umode_t , bool ) ; extern int nfs_mkdir(struct inode * , struct dentry * , umode_t ) ; extern int nfs_rmdir(struct inode * , struct dentry * ) ; extern int nfs_unlink(struct inode * , struct dentry * ) ; extern int nfs_symlink(struct inode * , struct dentry * , char const * ) ; extern int nfs_link(struct dentry * , struct inode * , struct dentry * ) ; extern int nfs_mknod(struct inode * , struct dentry * , umode_t , dev_t ) ; extern int nfs_rename(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; extern struct workqueue_struct *nfsiod_workqueue ; extern void nfs_zap_acl_cache(struct inode * ) ; extern void nfs_sb_active(struct super_block * ) ; extern void nfs_sb_deactive(struct super_block * ) ; extern void nfs_sb_deactive_async(struct super_block * ) ; void __nfs4_read_done_cb(struct nfs_read_data *data ) ; struct nfs_client *nfs4_init_client(struct nfs_client *clp , struct rpc_timeout const *timeparms , char const *ip_addr , rpc_authflavor_t authflavour ) ; __inline static unsigned int nfs_page_array_len(unsigned int base , size_t len ) { { return ((unsigned int )((((unsigned long )base + len) + 4095UL) >> 12)); } } __inline static void nfs_inc_server_stats(struct nfs_server const *server , enum nfs_stat_eventcounters stat ) { void const *__vpp_verify ; int pao_ID__ ; int pao_ID_____0 ; int pao_ID_____1 ; int pao_ID_____2 ; { __vpp_verify = 0; switch (8UL) { case 1UL: pao_ID__ = 1; switch (8UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addb %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "qi" (1UL)); } goto ldv_49292; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addw %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49292; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addl %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49292; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addq %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "re" (1UL)); } goto ldv_49292; default: __bad_percpu_size(); } ldv_49292: ; goto ldv_49297; case 2UL: pao_ID_____0 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____0 == 1) { __asm__ ("incb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____0 == -1) { __asm__ ("decb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addb %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "qi" (1UL)); } goto ldv_49303; case 2UL: ; if (pao_ID_____0 == 1) { __asm__ ("incw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____0 == -1) { __asm__ ("decw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addw %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49303; case 4UL: ; if (pao_ID_____0 == 1) { __asm__ ("incl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____0 == -1) { __asm__ ("decl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addl %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49303; case 8UL: ; if (pao_ID_____0 == 1) { __asm__ ("incq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____0 == -1) { __asm__ ("decq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addq %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "re" (1UL)); } goto ldv_49303; default: __bad_percpu_size(); } ldv_49303: ; goto ldv_49297; case 4UL: pao_ID_____1 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____1 == 1) { __asm__ ("incb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____1 == -1) { __asm__ ("decb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addb %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "qi" (1UL)); } goto ldv_49313; case 2UL: ; if (pao_ID_____1 == 1) { __asm__ ("incw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____1 == -1) { __asm__ ("decw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addw %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49313; case 4UL: ; if (pao_ID_____1 == 1) { __asm__ ("incl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____1 == -1) { __asm__ ("decl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addl %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49313; case 8UL: ; if (pao_ID_____1 == 1) { __asm__ ("incq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____1 == -1) { __asm__ ("decq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addq %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "re" (1UL)); } goto ldv_49313; default: __bad_percpu_size(); } ldv_49313: ; goto ldv_49297; case 8UL: pao_ID_____2 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____2 == 1) { __asm__ ("incb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____2 == -1) { __asm__ ("decb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addb %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "qi" (1UL)); } goto ldv_49323; case 2UL: ; if (pao_ID_____2 == 1) { __asm__ ("incw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____2 == -1) { __asm__ ("decw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addw %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49323; case 4UL: ; if (pao_ID_____2 == 1) { __asm__ ("incl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____2 == -1) { __asm__ ("decl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addl %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49323; case 8UL: ; if (pao_ID_____2 == 1) { __asm__ ("incq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____2 == -1) { __asm__ ("decq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addq %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "re" (1UL)); } goto ldv_49323; default: __bad_percpu_size(); } ldv_49323: ; goto ldv_49297; default: __bad_size_call_parameter(); goto ldv_49297; } ldv_49297: ; return; } } int nfs4_proc_getdevicelist(struct nfs_server *server , struct nfs_fh const *fh , struct pnfs_devicelist *devlist ) ; int nfs4_proc_getdeviceinfo(struct nfs_server *server , struct pnfs_device *pdev ) ; struct pnfs_layout_segment *nfs4_proc_layoutget(struct nfs4_layoutget *lgp , gfp_t gfp_flags ) ; int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp ) ; void pnfs_put_lseg(struct pnfs_layout_segment *lseg ) ; void pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio , struct inode *inode , struct nfs_pgio_completion_ops const *compl_ops ) ; void pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio , struct inode *inode , int ioflags , struct nfs_pgio_completion_ops const *compl_ops ) ; void set_pnfs_layoutdriver(struct nfs_server *server , struct nfs_fh const *mntfh , u32 id ) ; struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp ) ; void pnfs_free_lseg_list(struct list_head *free_me ) ; void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo ) ; void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo , nfs4_stateid const *new , bool update_barrier ) ; int pnfs_choose_layoutget_stateid(nfs4_stateid *dst , struct pnfs_layout_hdr *lo , struct nfs4_state *open_state ) ; int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo , struct list_head *tmp_list , struct pnfs_layout_range *recall_range ) ; bool pnfs_roc(struct inode *ino ) ; void pnfs_roc_release(struct inode *ino ) ; void pnfs_roc_set_barrier(struct inode *ino , u32 barrier ) ; bool pnfs_roc_drain(struct inode *ino , u32 *barrier , struct rpc_task *task ) ; void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data ) ; int _pnfs_return_layout(struct inode *ino ) ; struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) ; __inline static int pnfs_enabled_sb(struct nfs_server *nfss ) { { return ((unsigned long )nfss->pnfs_curr_ld != (unsigned long )((struct pnfs_layoutdriver_type *)0)); } } __inline static bool pnfs_ld_layoutret_on_setattr(struct inode *inode ) { struct nfs_server *tmp ; int tmp___0 ; struct nfs_server *tmp___1 ; { tmp = NFS_SERVER((struct inode const *)inode); tmp___0 = pnfs_enabled_sb(tmp); if (tmp___0 == 0) { return (0); } else { } tmp___1 = NFS_SERVER((struct inode const *)inode); return (((tmp___1->pnfs_curr_ld)->flags & 1U) != 0U); } } __inline static int pnfs_return_layout(struct inode *ino ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs_server *nfss ; struct nfs_server *tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = NFS_I((struct inode const *)ino); nfsi = tmp; tmp___0 = NFS_SERVER((struct inode const *)ino); nfss = tmp___0; tmp___2 = pnfs_enabled_sb(nfss); if (tmp___2 != 0 && (unsigned long )nfsi->layout != (unsigned long )((struct pnfs_layout_hdr *)0)) { tmp___1 = _pnfs_return_layout(ino); return (tmp___1); } else { } return (0); } } __inline static bool pnfs_use_threshold(struct nfs4_threshold **dst , struct nfs4_threshold *src , struct nfs_server *nfss ) { { return ((bool )(((((unsigned long )dst != (unsigned long )((struct nfs4_threshold **)0) && (unsigned long )src != (unsigned long )((struct nfs4_threshold *)0)) && src->bm != 0U) && (unsigned long )nfss->pnfs_curr_ld != (unsigned long )((struct pnfs_layoutdriver_type *)0)) && (unsigned int )(nfss->pnfs_curr_ld)->id == src->l_type)); } } __inline static void *net_generic(struct net const *net , int id ) { struct net_generic *ng ; void *ptr ; struct net_generic *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { rcu_read_lock(); _________p1 = *((struct net_generic * const volatile *)(& net->gen)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("include/net/netns/generic.h", 40, "suspicious rcu_dereference_check() usage"); } else { } } else { } ng = _________p1; tmp___1 = ldv__builtin_expect(id == 0, 0L); if (tmp___1 != 0L) { goto _L; } else { tmp___2 = ldv__builtin_expect((unsigned int )id > ng->len, 0L); if (tmp___2 != 0L) { _L: /* CIL Label */ __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (41), "i" (12UL)); ldv_50437: ; goto ldv_50437; } else { } } ptr = ng->ptr[id + -1]; rcu_read_unlock(); tmp___3 = ldv__builtin_expect((unsigned long )ptr == (unsigned long )((void *)0), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (45), "i" (12UL)); ldv_50438: ; goto ldv_50438; } else { } return (ptr); } } extern int nfs_net_id ; struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl ) ; void nfs4_free_slot(struct nfs4_slot_table *tbl , struct nfs4_slot *slot ) ; void nfs41_update_target_slotid(struct nfs4_slot_table *tbl , struct nfs4_slot *slot , struct nfs4_sequence_res *res ) ; int nfs4_setup_session_slot_tables(struct nfs4_session *ses ) ; bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl , struct nfs4_slot *slot ) ; extern void __fscache_invalidate(struct fscache_cookie * ) ; __inline static void fscache_invalidate(struct fscache_cookie *cookie ) { { if ((unsigned long )cookie != (unsigned long )((struct fscache_cookie *)0)) { __fscache_invalidate(cookie); } else { } return; } } __inline static void nfs_fscache_invalidate(struct inode *inode ) { struct nfs_inode *tmp ; { tmp = NFS_I((struct inode const *)inode); fscache_invalidate(tmp->fscache); return; } } static int _nfs4_proc_open(struct nfs4_opendata *data ) ; static int _nfs4_recover_proc_open(struct nfs4_opendata *data ) ; static int nfs4_do_fsinfo(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *fsinfo ) ; static int nfs4_async_handle_error(struct rpc_task *task , struct nfs_server const *server , struct nfs4_state *state ) ; static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr ) ; static int nfs4_proc_getattr(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) ; static int _nfs4_proc_getattr(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) ; static int nfs4_do_setattr(struct inode *inode , struct rpc_cred *cred , struct nfs_fattr *fattr , struct iattr *sattr , struct nfs4_state *state ) ; static int nfs41_test_stateid(struct nfs_server *server , nfs4_stateid *stateid ) ; static int nfs41_free_stateid(struct nfs_server *server , nfs4_stateid *stateid ) ; static int nfs4_map_errors(int err ) { long tmp ; { if (err >= -1000) { return (err); } else { } switch (err) { case -10018: ; return (-121); case -10016: ; return (-1); case -10039: ; case -10041: ; return (-22); case -10015: ; return (-13); case -10021: ; return (-93); case -13: ; return (-13); default: tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s could not handle NFSv4 error %d\n", "nfs4_map_errors", - err); } else { } goto ldv_50962; } ldv_50962: ; return (-5); } } u32 const nfs4_fattr_bitmap[3U] = { 1048858U, 3187258U}; static u32 const nfs4_pnfs_open_bitmap[3U] = { 1048858U, 3187258U, 16U}; static u32 const nfs4_open_noattr_bitmap[3U] = { 1048586U}; u32 const nfs4_statfs_bitmap[2U] = { 14680064U, 7168U}; u32 const nfs4_pathconf_bitmap[2U] = { 805306368U, 0U}; u32 const nfs4_fsinfo_bitmap[3U] = { 3355444224U, 1074266112U, 2U}; u32 const nfs4_fs_locations_bitmap[2U] = { 17826074U, 11575866U}; static void nfs4_setup_readdir(u64 cookie , __be32 *verifier , struct dentry *dentry , struct nfs4_readdir_arg *readdir ) { __be32 *start ; __be32 *p ; size_t __len ; void *__ret ; void *tmp ; __be32 *tmp___0 ; __be32 *tmp___1 ; __be32 *tmp___2 ; __be32 *tmp___3 ; size_t __len___0 ; void *__ret___0 ; __be32 *tmp___4 ; __be32 *tmp___5 ; __be32 *tmp___6 ; __u64 tmp___7 ; __be32 *tmp___8 ; __be32 *tmp___9 ; __be32 *tmp___10 ; __be32 *tmp___11 ; size_t __len___1 ; void *__ret___1 ; __be32 *tmp___12 ; __be32 *tmp___13 ; __be32 *tmp___14 ; __u64 tmp___15 ; { if (cookie > 2ULL) { readdir->cookie = cookie; __len = 8UL; if (__len > 63UL) { __ret = memcpy((void *)(& readdir->verifier), (void const *)verifier, __len); } else { __ret = memcpy((void *)(& readdir->verifier), (void const *)verifier, __len); } return; } else { } readdir->cookie = 0ULL; memset((void *)(& readdir->verifier), 0, 8UL); if (cookie == 2ULL) { return; } else { } tmp = kmap_atomic(*(readdir->pages)); p = (__be32 *)tmp; start = p; if (cookie == 0ULL) { tmp___0 = p; p = p + 1; *tmp___0 = 16777216U; tmp___1 = p; p = p + 1; *tmp___1 = 0U; tmp___2 = p; p = p + 1; *tmp___2 = 16777216U; tmp___3 = p; p = p + 1; *tmp___3 = 16777216U; __len___0 = 4UL; if (__len___0 > 63UL) { __ret___0 = memcpy((void *)p, (void const *)".", __len___0); } else { __ret___0 = memcpy((void *)p, (void const *)".", __len___0); } p = p + 1; tmp___4 = p; p = p + 1; *tmp___4 = 16777216U; tmp___5 = p; p = p + 1; *tmp___5 = 4096U; tmp___6 = p; p = p + 1; *tmp___6 = 134217728U; tmp___7 = NFS_FILEID((struct inode const *)dentry->d_inode); p = xdr_encode_hyper(p, tmp___7); } else { } tmp___8 = p; p = p + 1; *tmp___8 = 16777216U; tmp___9 = p; p = p + 1; *tmp___9 = 0U; tmp___10 = p; p = p + 1; *tmp___10 = 33554432U; tmp___11 = p; p = p + 1; *tmp___11 = 33554432U; __len___1 = 4UL; if (__len___1 > 63UL) { __ret___1 = memcpy((void *)p, (void const *)"..", __len___1); } else { __ret___1 = memcpy((void *)p, (void const *)"..", __len___1); } p = p + 1; tmp___12 = p; p = p + 1; *tmp___12 = 16777216U; tmp___13 = p; p = p + 1; *tmp___13 = 4096U; tmp___14 = p; p = p + 1; *tmp___14 = 134217728U; tmp___15 = NFS_FILEID((struct inode const *)(dentry->d_parent)->d_inode); p = xdr_encode_hyper(p, tmp___15); readdir->pgbase = (unsigned int )((long )p) - (unsigned int )((long )start); readdir->count = readdir->count - readdir->pgbase; __kunmap_atomic((void *)start); return; } } static int nfs4_delay(struct rpc_clnt *clnt , long *timeout ) { int res ; long __retval ; struct task_struct *tmp ; int tmp___0 ; { res = 0; __might_sleep("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4proc.c.prepared", 325, 0); if (*timeout <= 0L) { *timeout = 25L; } else { } if (*timeout > 3750L) { *timeout = 3750L; } else { } freezer_do_not_count(); __retval = schedule_timeout_killable(*timeout); freezer_count(); tmp = get_current(); tmp___0 = fatal_signal_pending(tmp); if (tmp___0 != 0) { res = -512; } else { } *timeout = *timeout << 1; return (res); } } static int nfs4_handle_exception(struct nfs_server *server , int errorcode , struct nfs4_exception *exception ) { struct nfs_client *clp ; struct nfs4_state *state ; struct inode *inode ; int ret ; int tmp ; long tmp___0 ; int tmp___1 ; { clp = server->nfs_client; state = exception->state; inode = exception->inode; ret = errorcode; exception->retry = 0; switch (errorcode) { case 0: ; return (0); case -10038: ; if ((unsigned long )inode != (unsigned long )((struct inode *)0)) { tmp = nfs4_have_delegation(inode, 1U); if (tmp != 0) { nfs4_inode_return_delegation(inode); exception->retry = 1; return (0); } else { } } else { } if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { goto ldv_51005; } else { } nfs4_schedule_stateid_recovery((struct nfs_server const *)server, state); goto wait_on_recovery; case -10087: ; case -10047: ; case -10025: ; if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { goto ldv_51005; } else { } nfs_remove_bad_delegation(state->inode); nfs4_schedule_stateid_recovery((struct nfs_server const *)server, state); goto wait_on_recovery; case -10011: ; if ((unsigned long )state != (unsigned long )((struct nfs4_state *)0)) { nfs4_schedule_stateid_recovery((struct nfs_server const *)server, state); } else { } case -10023: ; case -10022: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; case -10052: ; case -10053: ; case -10077: ; case -10055: ; case -10078: ; case -10076: ; case -10063: tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s ERROR: %d Reset session\n", "nfs4_handle_exception", errorcode); } else { } nfs4_schedule_session_recovery(clp->cl_session, errorcode); goto wait_on_recovery; case -10046: ; if (exception->timeout > 250L) { ret = -16; goto ldv_51005; } else { } case -10013: ; case -10008: ret = nfs4_delay(server->client, & exception->timeout); if (ret != 0) { goto ldv_51005; } else { } case -10068: ; case -10024: exception->retry = 1; goto ldv_51005; case -10039: ; case -10041: ; if ((server->caps & 32768U) != 0U) { server->caps = server->caps & 4294934527U; exception->retry = 1; printk("\fNFS: v4 server %s does not accept raw uid/gids. Reenabling the idmapper.\n", (server->nfs_client)->cl_hostname); } else { } } ldv_51005: tmp___1 = nfs4_map_errors(ret); return (tmp___1); wait_on_recovery: ret = nfs4_wait_clnt_recover(clp); if (ret == 0) { exception->retry = 1; } else { } return (ret); } } static void do_renew_lease(struct nfs_client *clp , unsigned long timestamp ) { { spin_lock(& clp->cl_lock); if ((long )clp->cl_last_renewal - (long )timestamp < 0L) { clp->cl_last_renewal = timestamp; } else { } spin_unlock(& clp->cl_lock); return; } } static void renew_lease(struct nfs_server const *server , unsigned long timestamp ) { { do_renew_lease(server->nfs_client, timestamp); return; } } static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res ) { struct nfs4_session *session ; struct nfs4_slot_table *tbl ; bool send_new_highest_used_slotid ; long tmp ; bool tmp___0 ; { send_new_highest_used_slotid = 0; if ((unsigned long )res->sr_slot == (unsigned long )((struct nfs4_slot *)0)) { tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: No slot\n", "nfs41_sequence_free_slot"); } else { } return; } else { } tbl = (res->sr_slot)->table; session = tbl->session; spin_lock(& tbl->slot_tbl_lock); if (tbl->highest_used_slotid > tbl->target_highest_slotid) { send_new_highest_used_slotid = 1; } else { } tmp___0 = nfs41_wake_and_assign_slot(tbl, res->sr_slot); if ((int )tmp___0) { send_new_highest_used_slotid = 0; goto out_unlock; } else { } nfs4_free_slot(tbl, res->sr_slot); if (tbl->highest_used_slotid != 4294967295U) { send_new_highest_used_slotid = 0; } else { } out_unlock: spin_unlock(& tbl->slot_tbl_lock); res->sr_slot = 0; if ((int )send_new_highest_used_slotid) { nfs41_server_notify_highest_slotid_update(session->clp); } else { } return; } } static int nfs41_sequence_done(struct rpc_task *task , struct nfs4_sequence_res *res ) { struct nfs4_session *session ; struct nfs4_slot *slot ; struct nfs_client *clp ; bool interrupted ; int ret ; long tmp ; long tmp___0 ; int tmp___1 ; int tmp___2 ; { interrupted = 0; ret = 1; if (((int )task->tk_flags & 2048) == 0) { goto out; } else { } slot = res->sr_slot; session = (slot->table)->session; if ((unsigned int )*((unsigned char *)slot + 32UL) != 0U) { slot->interrupted = 0U; interrupted = 1; } else { } switch (res->sr_status) { case 0: slot->seq_nr = slot->seq_nr + 1U; clp = session->clp; do_renew_lease(clp, res->sr_timestamp); if (res->sr_status_flags != 0U) { nfs4_schedule_lease_recovery(clp); } else { } nfs41_update_target_slotid(slot->table, slot, res); goto ldv_51061; case 1: slot->interrupted = 1U; goto out; case -10008: tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: slot=%u seq=%u: Operation in progress\n", "nfs41_sequence_done", slot->slot_nr, slot->seq_nr); } else { } goto out_retry; case -10053: ; goto retry_nowait; case -10063: ; if ((int )interrupted) { slot->seq_nr = slot->seq_nr + 1U; goto retry_nowait; } else { } if (slot->seq_nr != 1U) { slot->seq_nr = 1U; goto retry_nowait; } else { } goto ldv_51061; case -10076: slot->seq_nr = slot->seq_nr + 1U; goto retry_nowait; default: slot->seq_nr = slot->seq_nr + 1U; } ldv_51061: ; out: tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: Error %d free the slot \n", "nfs41_sequence_done", res->sr_status); } else { } nfs41_sequence_free_slot(res); return (ret); retry_nowait: tmp___1 = rpc_restart_call_prepare(task); if (tmp___1 != 0) { task->tk_status = 0; ret = 0; } else { } goto out; out_retry: tmp___2 = rpc_restart_call(task); if (tmp___2 == 0) { goto out; } else { } rpc_delay(task, 3750UL); return (0); } } static int nfs4_sequence_done(struct rpc_task *task , struct nfs4_sequence_res *res ) { int tmp ; { if ((unsigned long )res->sr_slot == (unsigned long )((struct nfs4_slot *)0)) { return (1); } else { } tmp = nfs41_sequence_done(task, res); return (tmp); } } static void nfs41_init_sequence(struct nfs4_sequence_args *args , struct nfs4_sequence_res *res , int cache_reply ) { { args->sa_slot = 0; args->sa_cache_this = 0U; args->sa_privileged = 0U; if (cache_reply != 0) { args->sa_cache_this = 1U; } else { } res->sr_slot = 0; return; } } static void nfs4_set_sequence_privileged(struct nfs4_sequence_args *args ) { { args->sa_privileged = 1U; return; } } int nfs41_setup_sequence(struct nfs4_session *session , struct nfs4_sequence_args *args , struct nfs4_sequence_res *res , struct rpc_task *task ) { struct nfs4_slot *slot ; struct nfs4_slot_table *tbl ; long tmp ; long tmp___0 ; int tmp___1 ; void *tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; { tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs41_setup_sequence"); } else { } if ((unsigned long )res->sr_slot != (unsigned long )((struct nfs4_slot *)0)) { goto out_success; } else { } tbl = & session->fc_slot_table; task->tk_timeout = 0UL; spin_lock(& tbl->slot_tbl_lock); tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& session->session_state)); if (tmp___1 != 0 && (unsigned int )*((unsigned char *)args + 8UL) == 0U) { tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s session is draining\n", "nfs41_setup_sequence"); } else { } goto out_sleep; } else { } slot = nfs4_alloc_slot(tbl); tmp___4 = IS_ERR((void const *)slot); if (tmp___4 != 0L) { tmp___2 = ERR_PTR(-12L); if ((unsigned long )tmp___2 == (unsigned long )((void *)slot)) { task->tk_timeout = 62UL; } else { } tmp___3 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d<-- %s: no free slots\n", "nfs41_setup_sequence"); } else { } goto out_sleep; } else { } spin_unlock(& tbl->slot_tbl_lock); args->sa_slot = slot; tmp___5 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001d<-- %s slotid=%d seqid=%d\n", "nfs41_setup_sequence", slot->slot_nr, slot->seq_nr); } else { } res->sr_slot = slot; res->sr_timestamp = jiffies; res->sr_status_flags = 0U; res->sr_status = 1; out_success: rpc_call_start(task); return (0); out_sleep: ; if ((unsigned int )*((unsigned char *)args + 8UL) != 0U) { rpc_sleep_on_priority(& tbl->slot_tbl_waitq, task, 0, 2); } else { rpc_sleep_on(& tbl->slot_tbl_waitq, task, 0); } spin_unlock(& tbl->slot_tbl_lock); return (-11); } } int nfs4_setup_sequence(struct nfs_server const *server , struct nfs4_sequence_args *args , struct nfs4_sequence_res *res , struct rpc_task *task ) { struct nfs4_session *session ; struct nfs4_session *tmp ; int ret ; long tmp___0 ; long tmp___1 ; { tmp = nfs4_get_session(server); session = tmp; ret = 0; if ((unsigned long )session == (unsigned long )((struct nfs4_session *)0)) { rpc_call_start(task); goto out; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d--> %s clp %p session %p sr_slot %d\n", "nfs4_setup_sequence", session->clp, session, (unsigned long )res->sr_slot != (unsigned long )((struct nfs4_slot *)0) ? (res->sr_slot)->slot_nr : 4294967295U); } else { } ret = nfs41_setup_sequence(session, args, res, task); out: tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d<-- %s status=%d\n", "nfs4_setup_sequence", ret); } else { } return (ret); } } static void nfs41_call_sync_prepare(struct rpc_task *task , void *calldata ) { struct nfs41_call_sync_data *data ; struct nfs4_session *session ; struct nfs4_session *tmp ; long tmp___0 ; { data = (struct nfs41_call_sync_data *)calldata; tmp = nfs4_get_session(data->seq_server); session = tmp; tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d--> %s data->seq_server %p\n", "nfs41_call_sync_prepare", data->seq_server); } else { } nfs41_setup_sequence(session, data->seq_args, data->seq_res, task); return; } } static void nfs41_call_sync_done(struct rpc_task *task , void *calldata ) { struct nfs41_call_sync_data *data ; { data = (struct nfs41_call_sync_data *)calldata; nfs41_sequence_done(task, data->seq_res); return; } } static struct rpc_call_ops const nfs41_call_sync_ops = {& nfs41_call_sync_prepare, & nfs41_call_sync_done, 0, 0}; static int nfs4_call_sync_sequence(struct rpc_clnt *clnt , struct nfs_server *server , struct rpc_message *msg , struct nfs4_sequence_args *args , struct nfs4_sequence_res *res ) { int ret ; struct rpc_task *task ; struct nfs41_call_sync_data data ; struct rpc_task_setup task_setup ; long tmp ; long tmp___0 ; { data.seq_server = (struct nfs_server const *)server; data.seq_args = args; data.seq_res = res; task_setup.task = 0; task_setup.rpc_client = clnt; task_setup.rpc_message = (struct rpc_message const *)msg; task_setup.callback_ops = & nfs41_call_sync_ops; task_setup.callback_data = (void *)(& data); task_setup.workqueue = 0; task_setup.flags = (unsigned short)0; task_setup.priority = (signed char)0; task = rpc_run_task((struct rpc_task_setup const *)(& task_setup)); tmp___0 = IS_ERR((void const *)task); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)task); ret = (int )tmp; } else { ret = task->tk_status; rpc_put_task(task); } return (ret); } } static int _nfs4_call_sync(struct rpc_clnt *clnt , struct nfs_server *server , struct rpc_message *msg , struct nfs4_sequence_args *args , struct nfs4_sequence_res *res ) { int tmp ; { tmp = rpc_call_sync(clnt, (struct rpc_message const *)msg, 0); return (tmp); } } static int nfs4_call_sync(struct rpc_clnt *clnt , struct nfs_server *server , struct rpc_message *msg , struct nfs4_sequence_args *args , struct nfs4_sequence_res *res , int cache_reply ) { int tmp ; { nfs41_init_sequence(args, res, cache_reply); tmp = (*(((server->nfs_client)->cl_mvops)->call_sync))(clnt, server, msg, args, res); return (tmp); } } static void update_changeattr(struct inode *dir , struct nfs4_change_info *cinfo ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; { tmp = NFS_I((struct inode const *)dir); nfsi = tmp; spin_lock(& dir->i_lock); nfsi->cache_validity = nfsi->cache_validity | 3UL; if (cinfo->atomic == 0U || cinfo->before != dir->i_version) { nfs_force_lookup_revalidate(dir); } else { } dir->i_version = cinfo->after; nfs_fscache_invalidate(dir); spin_unlock(& dir->i_lock); return; } } static void nfs4_init_opendata_res(struct nfs4_opendata *p ) { { p->o_res.f_attr = & p->f_attr; p->o_res.seqid = p->o_arg.seqid; p->c_res.seqid = p->c_arg.seqid; p->o_res.server = p->o_arg.server; p->o_res.access_request = p->o_arg.access; nfs_fattr_init(& p->f_attr); nfs_fattr_init_names(& p->f_attr, & p->owner_name, & p->group_name); return; } } static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry , struct nfs4_state_owner *sp , fmode_t fmode , int flags , struct iattr const *attrs , gfp_t gfp_mask ) { struct dentry *parent ; struct dentry *tmp ; struct inode *dir ; struct nfs_server *server ; struct nfs_server *tmp___0 ; struct nfs4_opendata *p ; void *tmp___1 ; struct nfs_fh *tmp___2 ; __be32 verf[2U] ; size_t __len ; void *__ret ; struct task_struct *tmp___3 ; size_t __len___0 ; void *__ret___0 ; { tmp = dget_parent(dentry); parent = tmp; dir = parent->d_inode; tmp___0 = NFS_SERVER((struct inode const *)dir); server = tmp___0; tmp___1 = kzalloc(880UL, gfp_mask); p = (struct nfs4_opendata *)tmp___1; if ((unsigned long )p == (unsigned long )((struct nfs4_opendata *)0)) { goto err; } else { } p->o_arg.seqid = nfs_alloc_seqid(& sp->so_seqid, gfp_mask); if ((unsigned long )p->o_arg.seqid == (unsigned long )((struct nfs_seqid *)0)) { goto err_free; } else { } nfs_sb_active(dentry->d_sb); p->dentry = dget(dentry); p->dir = parent; p->owner = sp; atomic_inc(& sp->so_count); tmp___2 = NFS_FH((struct inode const *)dir); p->o_arg.fh = (struct nfs_fh const *)tmp___2; p->o_arg.open_flags = flags; p->o_arg.fmode = fmode & 3U; if ((flags & 128) == 0) { p->o_arg.access = 45U; } else { } p->o_arg.clientid = (server->nfs_client)->cl_clientid; p->o_arg.id.create_time = (__u64 )sp->so_seqid.create_time.tv64; p->o_arg.id.uniquifier = (__u32 )sp->so_seqid.owner_id; p->o_arg.name = (struct qstr const *)(& dentry->d_name); p->o_arg.server = (struct nfs_server const *)server; p->o_arg.bitmask = (u32 const *)(& server->attr_bitmask); p->o_arg.open_bitmap = (u32 const *)(& nfs4_fattr_bitmap); p->o_arg.claim = 0U; if ((unsigned long )attrs != (unsigned long )((struct iattr const *)0) && (unsigned int )attrs->ia_valid != 0U) { p->o_arg.u.ldv_45036.attrs = & p->attrs; __len = 80UL; if (__len > 63UL) { __ret = memcpy((void *)(& p->attrs), (void const *)attrs, __len); } else { __ret = memcpy((void *)(& p->attrs), (void const *)attrs, __len); } verf[0] = (__be32 )jiffies; tmp___3 = get_current(); verf[1] = (__be32 )tmp___3->pid; __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = memcpy((void *)(& p->o_arg.u.ldv_45036.verifier.data), (void const *)(& verf), __len___0); } else { __ret___0 = memcpy((void *)(& p->o_arg.u.ldv_45036.verifier.data), (void const *)(& verf), __len___0); } } else { } p->c_arg.fh = (struct nfs_fh const *)(& p->o_res.fh); p->c_arg.stateid = & p->o_res.stateid; p->c_arg.seqid = p->o_arg.seqid; nfs4_init_opendata_res(p); kref_init(& p->kref); return (p); err_free: kfree((void const *)p); err: dput(parent); return (0); } } static void nfs4_opendata_free(struct kref *kref ) { struct nfs4_opendata *p ; struct kref const *__mptr ; struct super_block *sb ; { __mptr = (struct kref const *)kref; p = (struct nfs4_opendata *)__mptr; sb = (p->dentry)->d_sb; nfs_free_seqid(p->o_arg.seqid); if ((unsigned long )p->state != (unsigned long )((struct nfs4_state *)0)) { nfs4_put_open_state(p->state); } else { } nfs4_put_state_owner(p->owner); dput(p->dir); dput(p->dentry); nfs_sb_deactive(sb); nfs_fattr_free_names(& p->f_attr); kfree((void const *)p); return; } } static void nfs4_opendata_put(struct nfs4_opendata *p ) { { if ((unsigned long )p != (unsigned long )((struct nfs4_opendata *)0)) { kref_put(& p->kref, & nfs4_opendata_free); } else { } return; } } static int nfs4_wait_for_completion_rpc_task(struct rpc_task *task ) { int ret ; { ret = rpc_wait_for_completion_task(task); return (ret); } } static int can_open_cached(struct nfs4_state *state , fmode_t mode , int open_mode ) { int ret ; int tmp ; int tmp___0 ; int tmp___1 ; { ret = 0; if ((open_mode & 640) != 0) { goto out; } else { } switch (mode & 3U) { case 1U: tmp = constant_test_bit(2U, (unsigned long const volatile *)(& state->flags)); ret = (tmp != 0 && state->n_rdonly != 0U) | ret; goto ldv_51224; case 2U: tmp___0 = constant_test_bit(3U, (unsigned long const volatile *)(& state->flags)); ret = (tmp___0 != 0 && state->n_wronly != 0U) | ret; goto ldv_51224; case 3U: tmp___1 = constant_test_bit(4U, (unsigned long const volatile *)(& state->flags)); ret = (tmp___1 != 0 && state->n_rdwr != 0U) | ret; } ldv_51224: ; out: ; return (ret); } } static int can_open_delegated(struct nfs_delegation *delegation , fmode_t fmode ) { int tmp ; { if ((unsigned long )delegation == (unsigned long )((struct nfs_delegation *)0)) { return (0); } else { } if ((delegation->type & fmode) != fmode) { return (0); } else { } tmp = constant_test_bit(0U, (unsigned long const volatile *)(& delegation->flags)); if (tmp != 0) { return (0); } else { } nfs_mark_delegation_referenced(delegation); return (1); } } static void update_open_stateflags(struct nfs4_state *state , fmode_t fmode ) { { switch (fmode) { case 2U: state->n_wronly = state->n_wronly + 1U; goto ldv_51236; case 1U: state->n_rdonly = state->n_rdonly + 1U; goto ldv_51236; case 3U: state->n_rdwr = state->n_rdwr + 1U; } ldv_51236: nfs4_state_set_mode_locked(state, state->state | fmode); return; } } static void nfs_set_open_stateid_locked(struct nfs4_state *state , nfs4_stateid *stateid , fmode_t fmode ) { int tmp ; { tmp = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp == 0) { nfs4_stateid_copy(& state->stateid, (nfs4_stateid const *)stateid); } else { } nfs4_stateid_copy(& state->open_stateid, (nfs4_stateid const *)stateid); switch (fmode) { case 1U: set_bit(2U, (unsigned long volatile *)(& state->flags)); goto ldv_51245; case 2U: set_bit(3U, (unsigned long volatile *)(& state->flags)); goto ldv_51245; case 3U: set_bit(4U, (unsigned long volatile *)(& state->flags)); } ldv_51245: ; return; } } static void nfs_set_open_stateid(struct nfs4_state *state , nfs4_stateid *stateid , fmode_t fmode ) { { write_seqlock(& state->seqlock); nfs_set_open_stateid_locked(state, stateid, fmode); write_sequnlock(& state->seqlock); return; } } static void __update_open_stateid(struct nfs4_state *state , nfs4_stateid *open_stateid , nfs4_stateid const *deleg_stateid , fmode_t fmode ) { { write_seqlock(& state->seqlock); if ((unsigned long )deleg_stateid != (unsigned long )((nfs4_stateid const *)0)) { nfs4_stateid_copy(& state->stateid, deleg_stateid); set_bit(1U, (unsigned long volatile *)(& state->flags)); } else { } if ((unsigned long )open_stateid != (unsigned long )((nfs4_stateid *)0)) { nfs_set_open_stateid_locked(state, open_stateid, fmode); } else { } write_sequnlock(& state->seqlock); spin_lock(& (state->owner)->so_lock); update_open_stateflags(state, fmode); spin_unlock(& (state->owner)->so_lock); return; } } static int update_open_stateid(struct nfs4_state *state , nfs4_stateid *open_stateid , nfs4_stateid *delegation , fmode_t fmode ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs_delegation *deleg_cur ; int ret ; struct nfs_delegation *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; { tmp = NFS_I((struct inode const *)state->inode); nfsi = tmp; ret = 0; fmode = fmode & 3U; rcu_read_lock(); _________p1 = *((struct nfs_delegation * volatile *)(& nfsi->delegation)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4proc.c.prepared", 1034, "suspicious rcu_dereference_check() usage"); } else { } } else { } deleg_cur = _________p1; if ((unsigned long )deleg_cur == (unsigned long )((struct nfs_delegation *)0)) { goto no_delegation; } else { } spin_lock(& deleg_cur->lock); if ((unsigned long )nfsi->delegation != (unsigned long )deleg_cur || (deleg_cur->type & fmode) != fmode) { goto no_delegation_unlock; } else { } if ((unsigned long )delegation == (unsigned long )((nfs4_stateid *)0)) { delegation = & deleg_cur->stateid; } else { tmp___2 = nfs4_stateid_match((nfs4_stateid const *)(& deleg_cur->stateid), (nfs4_stateid const *)delegation); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { goto no_delegation_unlock; } else { } } nfs_mark_delegation_referenced(deleg_cur); __update_open_stateid(state, open_stateid, (nfs4_stateid const *)(& deleg_cur->stateid), fmode); ret = 1; no_delegation_unlock: spin_unlock(& deleg_cur->lock); no_delegation: rcu_read_unlock(); if (ret == 0 && (unsigned long )open_stateid != (unsigned long )((nfs4_stateid *)0)) { __update_open_stateid(state, open_stateid, 0, fmode); ret = 1; } else { } return (ret); } } static void nfs4_return_incompatible_delegation(struct inode *inode , fmode_t fmode ) { struct nfs_delegation *delegation ; struct nfs_delegation *_________p1 ; struct nfs_inode *tmp ; bool __warned ; int tmp___0 ; int tmp___1 ; { rcu_read_lock(); tmp = NFS_I((struct inode const *)inode); _________p1 = *((struct nfs_delegation * volatile *)(& tmp->delegation)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4proc.c.prepared", 1070, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; if ((unsigned long )delegation == (unsigned long )((struct nfs_delegation *)0) || (delegation->type & fmode) == fmode) { rcu_read_unlock(); return; } else { } rcu_read_unlock(); nfs4_inode_return_delegation(inode); return; } } static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata ) { struct nfs4_state *state ; struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs_delegation *delegation ; int open_mode ; fmode_t fmode ; nfs4_stateid stateid ; int ret ; int tmp___0 ; int tmp___1 ; struct nfs_delegation *_________p1 ; bool __warned ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; void *tmp___6 ; { state = opendata->state; tmp = NFS_I((struct inode const *)state->inode); nfsi = tmp; open_mode = opendata->o_arg.open_flags & 640; fmode = opendata->o_arg.fmode; ret = -11; ldv_51297: tmp___1 = can_open_cached(state, fmode, open_mode); if (tmp___1 != 0) { spin_lock(& (state->owner)->so_lock); tmp___0 = can_open_cached(state, fmode, open_mode); if (tmp___0 != 0) { update_open_stateflags(state, fmode); spin_unlock(& (state->owner)->so_lock); goto out_return_state; } else { } spin_unlock(& (state->owner)->so_lock); } else { } rcu_read_lock(); _________p1 = *((struct nfs_delegation * volatile *)(& nfsi->delegation)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4proc.c.prepared", 1100, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; tmp___4 = can_open_delegated(delegation, fmode); if (tmp___4 == 0) { rcu_read_unlock(); goto ldv_51295; } else { } nfs4_stateid_copy(& stateid, (nfs4_stateid const *)(& delegation->stateid)); rcu_read_unlock(); ret = nfs_may_open(state->inode, (state->owner)->so_cred, open_mode); if (ret != 0) { goto out; } else { } ret = -11; tmp___5 = update_open_stateid(state, 0, & stateid, fmode); if (tmp___5 != 0) { goto out_return_state; } else { } goto ldv_51297; ldv_51295: ; out: tmp___6 = ERR_PTR((long )ret); return ((struct nfs4_state *)tmp___6); out_return_state: atomic_inc(& state->count); return (state); } } static void nfs4_opendata_check_deleg(struct nfs4_opendata *data , struct nfs4_state *state ) { struct nfs_client *clp ; struct nfs_server *tmp ; struct nfs_delegation *delegation ; int delegation_flags ; struct nfs_delegation *_________p1 ; struct nfs_inode *tmp___0 ; bool __warned ; int tmp___1 ; int tmp___2 ; struct ratelimit_state _rs ; int tmp___3 ; { tmp = NFS_SERVER((struct inode const *)state->inode); clp = tmp->nfs_client; delegation_flags = 0; rcu_read_lock(); tmp___0 = NFS_I((struct inode const *)state->inode); _________p1 = *((struct nfs_delegation * volatile *)(& tmp___0->delegation)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4proc.c.prepared", 1132, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { delegation_flags = (int )delegation->flags; } else { } rcu_read_unlock(); if (data->o_arg.claim == 2U) { _rs.lock.raw_lock.ldv_2024.head_tail = 0U; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = 0xffffffffffffffffUL; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___3 = ___ratelimit(& _rs, "nfs4_opendata_check_deleg"); if (tmp___3 != 0) { printk("\vNFS: Broken NFSv4 server %s is returning a delegation for OPEN(CLAIM_DELEGATE_CUR)\n", clp->cl_hostname); } else { } } else if (((unsigned long )delegation_flags & 1UL) == 0UL) { nfs_inode_set_delegation(state->inode, (data->owner)->so_cred, & data->o_res); } else { nfs_inode_reclaim_delegation(state->inode, (data->owner)->so_cred, & data->o_res); } return; } } static struct nfs4_state *_nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data ) { struct inode *inode ; struct nfs4_state *state ; int ret ; void *tmp ; { inode = (data->state)->inode; state = data->state; if ((unsigned int )*((unsigned char *)data + 864UL) == 0U) { ret = data->rpc_status; goto err; } else { } ret = -116; if (((data->f_attr.valid & 1U) == 0U || (data->f_attr.valid & 2048U) == 0U) || (data->f_attr.valid & 131072U) == 0U) { goto err; } else { } ret = -12; state = nfs4_get_open_state(inode, data->owner); if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { goto err; } else { } ret = nfs_refresh_inode(inode, & data->f_attr); if (ret != 0) { goto err; } else { } if (data->o_res.delegation_type != 0U) { nfs4_opendata_check_deleg(data, state); } else { } update_open_stateid(state, & data->o_res.stateid, 0, data->o_arg.fmode); return (state); err: tmp = ERR_PTR((long )ret); return ((struct nfs4_state *)tmp); } } static struct nfs4_state *_nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data ) { struct inode *inode ; struct nfs4_state *state ; int ret ; long tmp ; long tmp___0 ; void *tmp___1 ; { state = 0; if ((unsigned int )*((unsigned char *)data + 864UL) == 0U) { state = nfs4_try_open_cached(data); goto out; } else { } ret = -11; if ((data->f_attr.valid & 162943U) == 0U) { goto err; } else { } inode = nfs_fhget((data->dir)->d_sb, & data->o_res.fh, & data->f_attr); tmp = PTR_ERR((void const *)inode); ret = (int )tmp; tmp___0 = IS_ERR((void const *)inode); if (tmp___0 != 0L) { goto err; } else { } ret = -12; state = nfs4_get_open_state(inode, data->owner); if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { goto err_put_inode; } else { } if (data->o_res.delegation_type != 0U) { nfs4_opendata_check_deleg(data, state); } else { } update_open_stateid(state, & data->o_res.stateid, 0, data->o_arg.fmode); iput(inode); out: ; return (state); err_put_inode: iput(inode); err: tmp___1 = ERR_PTR((long )ret); return ((struct nfs4_state *)tmp___1); } } static struct nfs4_state *nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data ) { struct nfs4_state *tmp ; struct nfs4_state *tmp___0 ; { if (data->o_arg.claim == 1U) { tmp = _nfs4_opendata_reclaim_to_nfs4_state(data); return (tmp); } else { } tmp___0 = _nfs4_opendata_to_nfs4_state(data); return (tmp___0); } } static struct nfs_open_context *nfs4_state_find_open_context(struct nfs4_state *state ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs_open_context *ctx ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; void *tmp___0 ; { tmp = NFS_I((struct inode const *)state->inode); nfsi = tmp; spin_lock(& (state->inode)->i_lock); __mptr = (struct list_head const *)nfsi->open_files.next; ctx = (struct nfs_open_context *)__mptr + 0xffffffffffffffa0UL; goto ldv_51341; ldv_51340: ; if ((unsigned long )ctx->state != (unsigned long )state) { goto ldv_51339; } else { } get_nfs_open_context(ctx); spin_unlock(& (state->inode)->i_lock); return (ctx); ldv_51339: __mptr___0 = (struct list_head const *)ctx->list.next; ctx = (struct nfs_open_context *)__mptr___0 + 0xffffffffffffffa0UL; ldv_51341: ; if ((unsigned long )(& ctx->list) != (unsigned long )(& nfsi->open_files)) { goto ldv_51340; } else { } spin_unlock(& (state->inode)->i_lock); tmp___0 = ERR_PTR(-2L); return ((struct nfs_open_context *)tmp___0); } } static struct nfs4_opendata *nfs4_open_recoverdata_alloc(struct nfs_open_context *ctx , struct nfs4_state *state ) { struct nfs4_opendata *opendata ; void *tmp ; { opendata = nfs4_opendata_alloc(ctx->dentry, state->owner, 0U, 0, 0, 80U); if ((unsigned long )opendata == (unsigned long )((struct nfs4_opendata *)0)) { tmp = ERR_PTR(-12L); return ((struct nfs4_opendata *)tmp); } else { } opendata->state = state; atomic_inc(& state->count); return (opendata); } } static int nfs4_open_recover_helper(struct nfs4_opendata *opendata , fmode_t fmode , struct nfs4_state **res ) { struct nfs4_state *newstate ; int ret ; long tmp ; long tmp___0 ; { opendata->o_arg.open_flags = 0; opendata->o_arg.fmode = fmode; memset((void *)(& opendata->o_res), 0, 312UL); memset((void *)(& opendata->c_res), 0, 24UL); nfs4_init_opendata_res(opendata); ret = _nfs4_recover_proc_open(opendata); if (ret != 0) { return (ret); } else { } newstate = nfs4_opendata_to_nfs4_state(opendata); tmp___0 = IS_ERR((void const *)newstate); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)newstate); return ((int )tmp); } else { } nfs4_close_state(newstate, fmode); *res = newstate; return (0); } } static int nfs4_open_recover(struct nfs4_opendata *opendata , struct nfs4_state *state ) { struct nfs4_state *newstate ; int ret ; int tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { clear_bit(1, (unsigned long volatile *)(& state->flags)); __asm__ volatile ("": : : "memory"); if (state->n_rdwr != 0U) { clear_bit(4, (unsigned long volatile *)(& state->flags)); ret = nfs4_open_recover_helper(opendata, 3U, & newstate); if (ret != 0) { return (ret); } else { } if ((unsigned long )newstate != (unsigned long )state) { return (-116); } else { } } else { } if (state->n_wronly != 0U) { clear_bit(3, (unsigned long volatile *)(& state->flags)); ret = nfs4_open_recover_helper(opendata, 2U, & newstate); if (ret != 0) { return (ret); } else { } if ((unsigned long )newstate != (unsigned long )state) { return (-116); } else { } } else { } if (state->n_rdonly != 0U) { clear_bit(2, (unsigned long volatile *)(& state->flags)); ret = nfs4_open_recover_helper(opendata, 1U, & newstate); if (ret != 0) { return (ret); } else { } if ((unsigned long )newstate != (unsigned long )state) { return (-116); } else { } } else { } tmp___0 = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp___0 == 0) { tmp___1 = nfs4_stateid_match((nfs4_stateid const *)(& state->stateid), (nfs4_stateid const *)(& state->open_stateid)); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { write_seqlock(& state->seqlock); tmp = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp == 0) { nfs4_stateid_copy(& state->stateid, (nfs4_stateid const *)(& state->open_stateid)); } else { } write_sequnlock(& state->seqlock); } else { } } else { } return (0); } } static int _nfs4_do_open_reclaim(struct nfs_open_context *ctx , struct nfs4_state *state ) { struct nfs_delegation *delegation ; struct nfs4_opendata *opendata ; fmode_t delegation_type ; int status ; long tmp ; long tmp___0 ; struct nfs_fh *tmp___1 ; struct nfs_delegation *_________p1 ; struct nfs_inode *tmp___2 ; bool __warned ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { delegation_type = 0U; opendata = nfs4_open_recoverdata_alloc(ctx, state); tmp___0 = IS_ERR((void const *)opendata); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)opendata); return ((int )tmp); } else { } opendata->o_arg.claim = 1U; tmp___1 = NFS_FH((struct inode const *)state->inode); opendata->o_arg.fh = (struct nfs_fh const *)tmp___1; rcu_read_lock(); tmp___2 = NFS_I((struct inode const *)state->inode); _________p1 = *((struct nfs_delegation * volatile *)(& tmp___2->delegation)); tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned) { tmp___4 = rcu_read_lock_held(); if (tmp___4 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4proc.c.prepared", 1350, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { tmp___5 = constant_test_bit(0U, (unsigned long const volatile *)(& delegation->flags)); if (tmp___5 != 0) { delegation_type = delegation->type; } else { } } else { } rcu_read_unlock(); opendata->o_arg.u.delegation_type = delegation_type; status = nfs4_open_recover(opendata, state); nfs4_opendata_put(opendata); return (status); } } static int nfs4_do_open_reclaim(struct nfs_open_context *ctx , struct nfs4_state *state ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_exception exception ; int err ; { tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51380: err = _nfs4_do_open_reclaim(ctx, state); if (err != -10008) { goto ldv_51379; } else { } nfs4_handle_exception(server, err, & exception); if (exception.retry != 0) { goto ldv_51380; } else { } ldv_51379: ; return (err); } } static int nfs4_open_reclaim(struct nfs4_state_owner *sp , struct nfs4_state *state ) { struct nfs_open_context *ctx ; int ret ; long tmp ; long tmp___0 ; { ctx = nfs4_state_find_open_context(state); tmp___0 = IS_ERR((void const *)ctx); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)ctx); return ((int )tmp); } else { } ret = nfs4_do_open_reclaim(ctx, state); put_nfs_open_context(ctx); return (ret); } } static int _nfs4_open_delegation_recall(struct nfs_open_context *ctx , struct nfs4_state *state , nfs4_stateid const *stateid ) { struct nfs4_opendata *opendata ; int ret ; long tmp ; long tmp___0 ; { opendata = nfs4_open_recoverdata_alloc(ctx, state); tmp___0 = IS_ERR((void const *)opendata); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)opendata); return ((int )tmp); } else { } opendata->o_arg.claim = 2U; nfs4_stateid_copy(& opendata->o_arg.u.delegation, stateid); ret = nfs4_open_recover(opendata, state); nfs4_opendata_put(opendata); return (ret); } } int nfs4_open_delegation_recall(struct nfs_open_context *ctx , struct nfs4_state *state , nfs4_stateid const *stateid ) { struct nfs4_exception exception ; struct nfs_server *server ; struct nfs_server *tmp ; int err ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; ldv_51419: err = _nfs4_open_delegation_recall(ctx, state, stateid); switch (err) { case 0: ; case -2: ; case -116: ; goto out; case -10052: ; case -10053: ; case -10077: ; case -10055: ; case -10078: nfs4_schedule_session_recovery((server->nfs_client)->cl_session, err); goto out; case -10022: ; case -10023: ; case -10011: nfs4_schedule_lease_recovery(server->nfs_client); goto out; case -512: ; case -10087: ; case -10047: ; case -10025: nfs_inode_find_state_and_recover(state->inode, stateid); nfs4_schedule_stateid_recovery((struct nfs_server const *)server, state); case -12: err = 0; goto out; } err = nfs4_handle_exception(server, err, & exception); if (exception.retry != 0) { goto ldv_51419; } else { } out: ; return (err); } } static void nfs4_open_confirm_done(struct rpc_task *task , void *calldata ) { struct nfs4_opendata *data ; { data = (struct nfs4_opendata *)calldata; data->rpc_status = task->tk_status; if (data->rpc_status == 0) { nfs4_stateid_copy(& data->o_res.stateid, (nfs4_stateid const *)(& data->c_res.stateid)); nfs_confirm_seqid(& (data->owner)->so_seqid, 0); renew_lease(data->o_res.server, data->timestamp); data->rpc_done = 1U; } else { } return; } } static void nfs4_open_confirm_release(void *calldata ) { struct nfs4_opendata *data ; struct nfs4_state *state ; long tmp ; { data = (struct nfs4_opendata *)calldata; state = 0; if (data->cancelled == 0) { goto out_free; } else { } if ((unsigned int )*((unsigned char *)data + 864UL) == 0U) { goto out_free; } else { } state = nfs4_opendata_to_nfs4_state(data); tmp = IS_ERR((void const *)state); if (tmp == 0L) { nfs4_close_state(state, data->o_arg.fmode); } else { } out_free: nfs4_opendata_put(data); return; } } static struct rpc_call_ops const nfs4_open_confirm_ops = {0, & nfs4_open_confirm_done, 0, & nfs4_open_confirm_release}; static int _nfs4_proc_open_confirm(struct nfs4_opendata *data ) { struct nfs_server *server ; struct nfs_server *tmp ; struct rpc_task *task ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; int status ; long tmp___0 ; long tmp___1 ; { tmp = NFS_SERVER((struct inode const *)(data->dir)->d_inode); server = tmp; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 5UL; msg.rpc_argp = (void *)(& data->c_arg); msg.rpc_resp = (void *)(& data->c_res); msg.rpc_cred = (data->owner)->so_cred; task_setup_data.task = 0; task_setup_data.rpc_client = server->client; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_open_confirm_ops; task_setup_data.callback_data = (void *)data; task_setup_data.workqueue = nfsiod_workqueue; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; kref_get(& data->kref); data->rpc_done = 0U; data->rpc_status = 0; data->timestamp = jiffies; task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___1 = IS_ERR((void const *)task); if (tmp___1 != 0L) { tmp___0 = PTR_ERR((void const *)task); return ((int )tmp___0); } else { } status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) { data->cancelled = 1; __asm__ volatile ("": : : "memory"); } else { status = data->rpc_status; } rpc_put_task(task); return (status); } } static void nfs4_open_prepare(struct rpc_task *task , void *calldata ) { struct nfs4_opendata *data ; struct nfs4_state_owner *sp ; int tmp ; struct nfs_delegation *delegation ; int tmp___0 ; struct nfs_delegation *_________p1 ; struct nfs_inode *tmp___1 ; bool __warned ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { data = (struct nfs4_opendata *)calldata; sp = data->owner; tmp = nfs_wait_on_sequence(data->o_arg.seqid, task); if (tmp != 0) { return; } else { } if ((unsigned long )data->state != (unsigned long )((struct nfs4_state *)0)) { tmp___0 = can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags); if (tmp___0 != 0) { goto out_no_action; } else { } rcu_read_lock(); tmp___1 = NFS_I((struct inode const *)(data->state)->inode); _________p1 = *((struct nfs_delegation * volatile *)(& tmp___1->delegation)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4proc.c.prepared", 1541, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; if (data->o_arg.claim != 2U) { tmp___4 = can_open_delegated(delegation, data->o_arg.fmode); if (tmp___4 != 0) { goto unlock_no_action; } else { } } else { } rcu_read_unlock(); } else { } data->o_arg.clientid = ((sp->so_server)->nfs_client)->cl_clientid; if (data->o_arg.claim == 1U) { task->tk_msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 6UL; data->o_arg.open_bitmap = (u32 const *)(& nfs4_open_noattr_bitmap); nfs_copy_fh(& data->o_res.fh, data->o_arg.fh); } else { } data->timestamp = jiffies; tmp___5 = nfs4_setup_sequence(data->o_arg.server, & data->o_arg.seq_args, & data->o_res.seq_res, task); if (tmp___5 != 0) { nfs_release_seqid(data->o_arg.seqid); } else { } return; unlock_no_action: rcu_read_unlock(); out_no_action: task->tk_action = 0; nfs4_sequence_done(task, & data->o_res.seq_res); return; } } static void nfs4_open_done(struct rpc_task *task , void *calldata ) { struct nfs4_opendata *data ; int tmp ; { data = (struct nfs4_opendata *)calldata; data->rpc_status = task->tk_status; tmp = nfs4_sequence_done(task, & data->o_res.seq_res); if (tmp == 0) { return; } else { } if (task->tk_status == 0) { if ((int )(data->o_res.f_attr)->valid & 1) { switch ((int )(data->o_res.f_attr)->mode & 61440) { case 32768: ; goto ldv_51459; case 40960: data->rpc_status = -40; goto ldv_51459; case 16384: data->rpc_status = -21; goto ldv_51459; default: data->rpc_status = -20; } ldv_51459: ; } else { } renew_lease(data->o_res.server, data->timestamp); if ((data->o_res.rflags & 2U) == 0U) { nfs_confirm_seqid(& (data->owner)->so_seqid, 0); } else { } } else { } data->rpc_done = 1U; return; } } static void nfs4_open_release(void *calldata ) { struct nfs4_opendata *data ; struct nfs4_state *state ; long tmp ; { data = (struct nfs4_opendata *)calldata; state = 0; if (data->cancelled == 0) { goto out_free; } else { } if (data->rpc_status != 0 || (unsigned int )*((unsigned char *)data + 864UL) == 0U) { goto out_free; } else { } if ((data->o_res.rflags & 2U) != 0U) { goto out_free; } else { } state = nfs4_opendata_to_nfs4_state(data); tmp = IS_ERR((void const *)state); if (tmp == 0L) { nfs4_close_state(state, data->o_arg.fmode); } else { } out_free: nfs4_opendata_put(data); return; } } static struct rpc_call_ops const nfs4_open_ops = {& nfs4_open_prepare, & nfs4_open_done, 0, & nfs4_open_release}; static int nfs4_run_open_task(struct nfs4_opendata *data , int isrecover ) { struct inode *dir ; struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_openargs *o_arg ; struct nfs_openres *o_res ; struct rpc_task *task ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; int status ; long tmp___0 ; long tmp___1 ; { dir = (data->dir)->d_inode; tmp = NFS_SERVER((struct inode const *)dir); server = tmp; o_arg = & data->o_arg; o_res = & data->o_res; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 4UL; msg.rpc_argp = (void *)o_arg; msg.rpc_resp = (void *)o_res; msg.rpc_cred = (data->owner)->so_cred; task_setup_data.task = 0; task_setup_data.rpc_client = server->client; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_open_ops; task_setup_data.callback_data = (void *)data; task_setup_data.workqueue = nfsiod_workqueue; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; nfs41_init_sequence(& o_arg->seq_args, & o_res->seq_res, 1); kref_get(& data->kref); data->rpc_done = 0U; data->rpc_status = 0; data->cancelled = 0; if (isrecover != 0) { nfs4_set_sequence_privileged(& o_arg->seq_args); } else { } task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___1 = IS_ERR((void const *)task); if (tmp___1 != 0L) { tmp___0 = PTR_ERR((void const *)task); return ((int )tmp___0); } else { } status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) { data->cancelled = 1; __asm__ volatile ("": : : "memory"); } else { status = data->rpc_status; } rpc_put_task(task); return (status); } } static int _nfs4_recover_proc_open(struct nfs4_opendata *data ) { struct inode *dir ; struct nfs_openres *o_res ; int status ; struct nfs_server *tmp ; { dir = (data->dir)->d_inode; o_res = & data->o_res; status = nfs4_run_open_task(data, 1); if (status != 0 || (unsigned int )*((unsigned char *)data + 864UL) == 0U) { return (status); } else { } tmp = NFS_SERVER((struct inode const *)dir); nfs_fattr_map_and_free_names(tmp, & data->f_attr); if ((o_res->rflags & 2U) != 0U) { status = _nfs4_proc_open_confirm(data); if (status != 0) { return (status); } else { } } else { } return (status); } } static int nfs4_opendata_access(struct rpc_cred *cred , struct nfs4_opendata *opendata , struct nfs4_state *state , fmode_t fmode ) { struct nfs_access_entry cache ; u32 mask ; { if (opendata->o_res.access_supported == 0U) { return (0); } else { } mask = 0U; if ((int )fmode & 1) { mask = mask | 4U; } else { } if ((fmode & 32U) != 0U) { mask = mask | 1U; } else { } cache.cred = cred; cache.jiffies = jiffies; nfs_access_set_mask(& cache, opendata->o_res.access_result); nfs_access_add_cache(state->inode, & cache); if ((((u32 )(~ cache.mask) & mask) & 5U) == 0U) { return (0); } else { } nfs4_close_state(state, fmode); return (-13); } } static int _nfs4_proc_open(struct nfs4_opendata *data ) { struct inode *dir ; struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_openargs *o_arg ; struct nfs_openres *o_res ; int status ; { dir = (data->dir)->d_inode; tmp = NFS_SERVER((struct inode const *)dir); server = tmp; o_arg = & data->o_arg; o_res = & data->o_res; status = nfs4_run_open_task(data, 0); if ((unsigned int )*((unsigned char *)data + 864UL) == 0U) { return (status); } else { } if (status != 0) { if (status == -10041 && (o_arg->open_flags & 64) == 0) { return (-2); } else { } return (status); } else { } nfs_fattr_map_and_free_names(server, & data->f_attr); if ((o_arg->open_flags & 64) != 0) { update_changeattr(dir, & o_res->cinfo); } else { } if ((o_res->rflags & 4U) == 0U) { server->caps = server->caps & 4294950911U; } else { } if ((o_res->rflags & 2U) != 0U) { status = _nfs4_proc_open_confirm(data); if (status != 0) { return (status); } else { } } else { } if (((o_res->f_attr)->valid & 162943U) == 0U) { _nfs4_proc_getattr(server, & o_res->fh, o_res->f_attr); } else { } return (0); } } static int nfs4_recover_expired_lease(struct nfs_server *server ) { int tmp ; { tmp = nfs4_client_recover_expired_lease(server->nfs_client); return (tmp); } } static int _nfs4_open_expired(struct nfs_open_context *ctx , struct nfs4_state *state ) { struct nfs4_opendata *opendata ; int ret ; long tmp ; long tmp___0 ; { opendata = nfs4_open_recoverdata_alloc(ctx, state); tmp___0 = IS_ERR((void const *)opendata); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)opendata); return ((int )tmp); } else { } ret = nfs4_open_recover(opendata, state); if (ret == -116) { d_drop(ctx->dentry); } else { } nfs4_opendata_put(opendata); return (ret); } } static int nfs4_do_open_expired(struct nfs_open_context *ctx , struct nfs4_state *state ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_exception exception ; int err ; { tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51524: err = _nfs4_open_expired(ctx, state); switch (err) { default: ; goto out; case -10013: ; case -10008: nfs4_handle_exception(server, err, & exception); err = 0; } if (exception.retry != 0) { goto ldv_51524; } else { } out: ; return (err); } } static int nfs4_open_expired(struct nfs4_state_owner *sp , struct nfs4_state *state ) { struct nfs_open_context *ctx ; int ret ; long tmp ; long tmp___0 ; { ctx = nfs4_state_find_open_context(state); tmp___0 = IS_ERR((void const *)ctx); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)ctx); return ((int )tmp); } else { } ret = nfs4_do_open_expired(ctx, state); put_nfs_open_context(ctx); return (ret); } } static void nfs41_clear_delegation_stateid(struct nfs4_state *state ) { struct nfs_server *server ; struct nfs_server *tmp ; nfs4_stateid *stateid ; int status ; int tmp___0 ; { tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; stateid = & state->stateid; tmp___0 = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp___0 == 0) { return; } else { } status = nfs41_test_stateid(server, stateid); if (status != 0) { if (status != -10025) { nfs41_free_stateid(server, stateid); } else { } nfs_remove_bad_delegation(state->inode); write_seqlock(& state->seqlock); nfs4_stateid_copy(& state->stateid, (nfs4_stateid const *)(& state->open_stateid)); write_sequnlock(& state->seqlock); clear_bit(1, (unsigned long volatile *)(& state->flags)); } else { } return; } } static int nfs41_check_open_stateid(struct nfs4_state *state ) { struct nfs_server *server ; struct nfs_server *tmp ; nfs4_stateid *stateid ; int status ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; stateid = & state->open_stateid; tmp___0 = constant_test_bit(2U, (unsigned long const volatile *)(& state->flags)); if (tmp___0 == 0) { tmp___1 = constant_test_bit(3U, (unsigned long const volatile *)(& state->flags)); if (tmp___1 == 0) { tmp___2 = constant_test_bit(4U, (unsigned long const volatile *)(& state->flags)); if (tmp___2 == 0) { return (-10025); } else { } } else { } } else { } status = nfs41_test_stateid(server, stateid); if (status != 0) { if (status != -10025) { nfs41_free_stateid(server, stateid); } else { } clear_bit(2, (unsigned long volatile *)(& state->flags)); clear_bit(3, (unsigned long volatile *)(& state->flags)); clear_bit(4, (unsigned long volatile *)(& state->flags)); } else { } return (status); } } static int nfs41_open_expired(struct nfs4_state_owner *sp , struct nfs4_state *state ) { int status ; { nfs41_clear_delegation_stateid(state); status = nfs41_check_open_stateid(state); if (status != 0) { status = nfs4_open_expired(sp, state); } else { } return (status); } } __inline static void nfs4_exclusive_attrset(struct nfs4_opendata *opendata , struct iattr *sattr ) { { if (((unsigned long )opendata->o_res.attrset[1] & 32768UL) != 0UL && (sattr->ia_valid & 128U) == 0U) { sattr->ia_valid = sattr->ia_valid | 16U; } else { } if (((unsigned long )opendata->o_res.attrset[1] & 2097152UL) != 0UL && (sattr->ia_valid & 256U) == 0U) { sattr->ia_valid = sattr->ia_valid | 32U; } else { } return; } } static int _nfs4_do_open(struct inode *dir , struct dentry *dentry , fmode_t fmode , int flags , struct iattr *sattr , struct rpc_cred *cred , struct nfs4_state **res , struct nfs4_threshold **ctx_th ) { struct nfs4_state_owner *sp ; struct nfs4_state *state ; struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_opendata *opendata ; int status ; long tmp___0 ; long tmp___1 ; long tmp___2 ; bool tmp___3 ; { state = 0; tmp = NFS_SERVER((struct inode const *)dir); server = tmp; status = -12; sp = nfs4_get_state_owner(server, cred, 208U); if ((unsigned long )sp == (unsigned long )((struct nfs4_state_owner *)0)) { tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dnfs4_do_open: nfs4_get_state_owner failed!\n"); } else { } goto out_err; } else { } status = nfs4_recover_expired_lease(server); if (status != 0) { goto err_put_state_owner; } else { } if ((unsigned long )dentry->d_inode != (unsigned long )((struct inode *)0)) { nfs4_return_incompatible_delegation(dentry->d_inode, fmode); } else { } status = -12; opendata = nfs4_opendata_alloc(dentry, sp, fmode, flags, (struct iattr const *)sattr, 208U); if ((unsigned long )opendata == (unsigned long )((struct nfs4_opendata *)0)) { goto err_put_state_owner; } else { } if ((unsigned long )ctx_th != (unsigned long )((struct nfs4_threshold **)0) && ((unsigned long )server->attr_bitmask[2] & 16UL) != 0UL) { opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); if ((unsigned long )opendata->f_attr.mdsthreshold == (unsigned long )((struct nfs4_threshold *)0)) { goto err_opendata_put; } else { } opendata->o_arg.open_bitmap = (u32 const *)(& nfs4_pnfs_open_bitmap); } else { } if ((unsigned long )dentry->d_inode != (unsigned long )((struct inode *)0)) { opendata->state = nfs4_get_open_state(dentry->d_inode, sp); } else { } status = _nfs4_proc_open(opendata); if (status != 0) { goto err_opendata_put; } else { } state = nfs4_opendata_to_nfs4_state(opendata); tmp___1 = PTR_ERR((void const *)state); status = (int )tmp___1; tmp___2 = IS_ERR((void const *)state); if (tmp___2 != 0L) { goto err_opendata_put; } else { } if ((server->caps & 16384U) != 0U) { set_bit(7U, (unsigned long volatile *)(& state->flags)); } else { } status = nfs4_opendata_access(cred, opendata, state, fmode); if (status != 0) { goto err_opendata_put; } else { } if ((opendata->o_arg.open_flags & 128) != 0) { nfs4_exclusive_attrset(opendata, sattr); nfs_fattr_init(opendata->o_res.f_attr); status = nfs4_do_setattr(state->inode, cred, opendata->o_res.f_attr, sattr, state); if (status == 0) { nfs_setattr_update_inode(state->inode, sattr); } else { } nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); } else { } tmp___3 = pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server); if ((int )tmp___3) { *ctx_th = opendata->f_attr.mdsthreshold; } else { kfree((void const *)opendata->f_attr.mdsthreshold); } opendata->f_attr.mdsthreshold = 0; nfs4_opendata_put(opendata); nfs4_put_state_owner(sp); *res = state; return (0); err_opendata_put: kfree((void const *)opendata->f_attr.mdsthreshold); nfs4_opendata_put(opendata); err_put_state_owner: nfs4_put_state_owner(sp); out_err: *res = 0; return (status); } } static struct nfs4_state *nfs4_do_open(struct inode *dir , struct dentry *dentry , fmode_t fmode , int flags , struct iattr *sattr , struct rpc_cred *cred , struct nfs4_threshold **ctx_th ) { struct nfs4_exception exception ; struct nfs4_state *res ; int status ; struct ratelimit_state _rs ; struct nfs_server *tmp ; int tmp___0 ; struct nfs_server *tmp___1 ; int tmp___2 ; void *tmp___3 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; fmode = fmode & 35U; ldv_51588: status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, & res, ctx_th); if (status == 0) { goto ldv_51583; } else { } if (status == -10026) { _rs.lock.raw_lock.ldv_2024.head_tail = 0U; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = 0xffffffffffffffffUL; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___0 = ___ratelimit(& _rs, "nfs4_do_open"); if (tmp___0 != 0) { tmp = NFS_SERVER((struct inode const *)dir); printk("\fNFS: v4 server %s returned a bad sequence-id error!\n", (tmp->nfs_client)->cl_hostname); } else { } exception.retry = 1; goto ldv_51587; } else { } if (status == -10025) { exception.retry = 1; goto ldv_51587; } else { } if (status == -11) { exception.retry = 1; goto ldv_51587; } else { } tmp___1 = NFS_SERVER((struct inode const *)dir); tmp___2 = nfs4_handle_exception(tmp___1, status, & exception); tmp___3 = ERR_PTR((long )tmp___2); res = (struct nfs4_state *)tmp___3; ldv_51587: ; if (exception.retry != 0) { goto ldv_51588; } else { } ldv_51583: ; return (res); } } static int _nfs4_do_setattr(struct inode *inode , struct rpc_cred *cred , struct nfs_fattr *fattr , struct iattr *sattr , struct nfs4_state *state ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_setattrargs arg ; struct nfs_fh *tmp___0 ; struct nfs_setattrres res ; struct rpc_message msg ; unsigned long timestamp ; int status ; struct nfs_lockowner lockowner ; struct task_struct *tmp___1 ; struct task_struct *tmp___2 ; bool tmp___3 ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; tmp___0 = NFS_FH((struct inode const *)inode); arg.seq_args.sa_slot = 0; arg.seq_args.sa_cache_this = (unsigned char)0; arg.seq_args.sa_privileged = (unsigned char)0; arg.fh = tmp___0; arg.stateid.seqid = 0U; arg.stateid.other[0] = (char)0; arg.stateid.other[1] = (char)0; arg.stateid.other[2] = (char)0; arg.stateid.other[3] = (char)0; arg.stateid.other[4] = (char)0; arg.stateid.other[5] = (char)0; arg.stateid.other[6] = (char)0; arg.stateid.other[7] = (char)0; arg.stateid.other[8] = (char)0; arg.stateid.other[9] = (char)0; arg.stateid.other[10] = (char)0; arg.stateid.other[11] = (char)0; arg.iap = sattr; arg.server = (struct nfs_server const *)server; arg.bitmask = (u32 const *)(& server->attr_bitmask); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.fattr = fattr; res.server = (struct nfs_server const *)server; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 9UL; msg.rpc_argp = (void *)(& arg); msg.rpc_resp = (void *)(& res); msg.rpc_cred = cred; timestamp = jiffies; nfs_fattr_init(fattr); if ((unsigned long )state != (unsigned long )((struct nfs4_state *)0)) { tmp___1 = get_current(); tmp___2 = get_current(); lockowner.l_owner = tmp___1->files; lockowner.l_pid = tmp___2->tgid; nfs4_select_rw_stateid(& arg.stateid, state, 2U, (struct nfs_lockowner const *)(& lockowner)); } else { tmp___3 = nfs4_copy_delegation_stateid(& arg.stateid, inode, 2U); if ((int )tmp___3) { } else { nfs4_stateid_copy(& arg.stateid, & zero_stateid); } } status = nfs4_call_sync(server->client, server, & msg, & arg.seq_args, & res.seq_res, 1); if (status == 0 && (unsigned long )state != (unsigned long )((struct nfs4_state *)0)) { renew_lease((struct nfs_server const *)server, timestamp); } else { } return (status); } } static int nfs4_do_setattr(struct inode *inode , struct rpc_cred *cred , struct nfs_fattr *fattr , struct iattr *sattr , struct nfs4_state *state ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_exception exception ; int err ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; exception.timeout = 0L; exception.retry = 0; exception.state = state; exception.inode = inode; ldv_51615: err = _nfs4_do_setattr(inode, cred, fattr, sattr, state); switch (err) { case -10038: ; if ((unsigned long )state != (unsigned long )((struct nfs4_state *)0) && (state->state & 2U) == 0U) { err = -9; if ((sattr->ia_valid & 32768U) != 0U) { err = -13; } else { } goto out; } else { } } err = nfs4_handle_exception(server, err, & exception); if (exception.retry != 0) { goto ldv_51615; } else { } out: ; return (err); } } static void nfs4_free_closedata(void *data ) { struct nfs4_closedata *calldata ; struct nfs4_state_owner *sp ; struct super_block *sb ; { calldata = (struct nfs4_closedata *)data; sp = (calldata->state)->owner; sb = ((calldata->state)->inode)->i_sb; if ((int )calldata->roc) { pnfs_roc_release((calldata->state)->inode); } else { } nfs4_put_open_state(calldata->state); nfs_free_seqid(calldata->arg.seqid); nfs4_put_state_owner(sp); nfs_sb_deactive_async(sb); kfree((void const *)calldata); return; } } static void nfs4_close_clear_stateid_flags(struct nfs4_state *state , fmode_t fmode ) { { spin_lock(& (state->owner)->so_lock); if ((fmode & 1U) == 0U) { clear_bit(2, (unsigned long volatile *)(& state->flags)); } else { } if ((fmode & 2U) == 0U) { clear_bit(3, (unsigned long volatile *)(& state->flags)); } else { } clear_bit(4, (unsigned long volatile *)(& state->flags)); spin_unlock(& (state->owner)->so_lock); return; } } static void nfs4_close_done(struct rpc_task *task , void *data ) { struct nfs4_closedata *calldata ; struct nfs4_state *state ; struct nfs_server *server ; struct nfs_server *tmp ; long tmp___0 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; { calldata = (struct nfs4_closedata *)data; state = calldata->state; tmp = NFS_SERVER((struct inode const *)calldata->inode); server = tmp; tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: begin!\n", "nfs4_close_done"); } else { } tmp___1 = nfs4_sequence_done(task, & calldata->res.seq_res); if (tmp___1 == 0) { return; } else { } switch (task->tk_status) { case 0: ; if ((int )calldata->roc) { pnfs_roc_set_barrier(state->inode, calldata->roc_barrier); } else { } nfs_set_open_stateid(state, & calldata->res.stateid, 0U); renew_lease((struct nfs_server const *)server, calldata->timestamp); nfs4_close_clear_stateid_flags(state, calldata->arg.fmode); goto ldv_51645; case -10023: ; case -10024: ; case -10025: ; case -10011: ; if (calldata->arg.fmode == 0U) { goto ldv_51645; } else { } default: tmp___2 = nfs4_async_handle_error(task, (struct nfs_server const *)server, state); if (tmp___2 == -11) { rpc_restart_call_prepare(task); } else { } } ldv_51645: nfs_release_seqid(calldata->arg.seqid); nfs_refresh_inode(calldata->inode, calldata->res.fattr); tmp___3 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: done, ret = %d!\n", "nfs4_close_done", task->tk_status); } else { } return; } } static void nfs4_close_prepare(struct rpc_task *task , void *data ) { struct nfs4_closedata *calldata ; struct nfs4_state *state ; struct inode *inode ; int call_close ; long tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; bool tmp___5 ; struct nfs_server *tmp___6 ; int tmp___7 ; long tmp___8 ; { calldata = (struct nfs4_closedata *)data; state = calldata->state; inode = calldata->inode; call_close = 0; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: begin!\n", "nfs4_close_prepare"); } else { } tmp___0 = nfs_wait_on_sequence(calldata->arg.seqid, task); if (tmp___0 != 0) { return; } else { } task->tk_msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 7UL; calldata->arg.fmode = 3U; spin_lock(& (state->owner)->so_lock); if (state->n_rdwr == 0U) { if (state->n_rdonly == 0U) { tmp___1 = constant_test_bit(2U, (unsigned long const volatile *)(& state->flags)); call_close = tmp___1 | call_close; tmp___2 = constant_test_bit(4U, (unsigned long const volatile *)(& state->flags)); call_close = tmp___2 | call_close; calldata->arg.fmode = calldata->arg.fmode & 4294967294U; } else { } if (state->n_wronly == 0U) { tmp___3 = constant_test_bit(3U, (unsigned long const volatile *)(& state->flags)); call_close = tmp___3 | call_close; tmp___4 = constant_test_bit(4U, (unsigned long const volatile *)(& state->flags)); call_close = tmp___4 | call_close; calldata->arg.fmode = calldata->arg.fmode & 4294967293U; } else { } } else { } spin_unlock(& (state->owner)->so_lock); if (call_close == 0) { task->tk_action = 0; nfs4_sequence_done(task, & calldata->res.seq_res); goto out; } else { } if (calldata->arg.fmode == 0U) { task->tk_msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 8UL; if ((int )calldata->roc) { tmp___5 = pnfs_roc_drain(inode, & calldata->roc_barrier, task); if ((int )tmp___5) { goto out; } else { } } else { } } else { } nfs_fattr_init(calldata->res.fattr); calldata->timestamp = jiffies; tmp___6 = NFS_SERVER((struct inode const *)inode); tmp___7 = nfs4_setup_sequence((struct nfs_server const *)tmp___6, & calldata->arg.seq_args, & calldata->res.seq_res, task); if (tmp___7 != 0) { nfs_release_seqid(calldata->arg.seqid); } else { } out: tmp___8 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___8 != 0L) { printk("\001d%s: done!\n", "nfs4_close_prepare"); } else { } return; } } static struct rpc_call_ops const nfs4_close_ops = {& nfs4_close_prepare, & nfs4_close_done, 0, & nfs4_free_closedata}; int nfs4_do_close(struct nfs4_state *state , gfp_t gfp_mask , int wait ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_closedata *calldata ; struct nfs4_state_owner *sp ; struct rpc_task *task ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; int status ; void *tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; sp = state->owner; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 8UL; msg.rpc_argp = 0; msg.rpc_resp = 0; msg.rpc_cred = (state->owner)->so_cred; task_setup_data.task = 0; task_setup_data.rpc_client = server->client; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_close_ops; task_setup_data.callback_data = 0; task_setup_data.workqueue = nfsiod_workqueue; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; status = -12; tmp___0 = kzalloc(376UL, gfp_mask); calldata = (struct nfs4_closedata *)tmp___0; if ((unsigned long )calldata == (unsigned long )((struct nfs4_closedata *)0)) { goto out; } else { } nfs41_init_sequence(& calldata->arg.seq_args, & calldata->res.seq_res, 1); calldata->inode = state->inode; calldata->state = state; calldata->arg.fh = NFS_FH((struct inode const *)state->inode); calldata->arg.stateid = & state->open_stateid; calldata->arg.seqid = nfs_alloc_seqid(& (state->owner)->so_seqid, gfp_mask); if ((unsigned long )calldata->arg.seqid == (unsigned long )((struct nfs_seqid *)0)) { goto out_free_calldata; } else { } calldata->arg.fmode = 0U; calldata->arg.bitmask = (u32 const *)(& server->cache_consistency_bitmask); calldata->res.fattr = & calldata->fattr; calldata->res.seqid = calldata->arg.seqid; calldata->res.server = (struct nfs_server const *)server; calldata->roc = pnfs_roc(state->inode); nfs_sb_active((calldata->inode)->i_sb); msg.rpc_argp = (void *)(& calldata->arg); msg.rpc_resp = (void *)(& calldata->res); task_setup_data.callback_data = (void *)calldata; task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___2 = IS_ERR((void const *)task); if (tmp___2 != 0L) { tmp___1 = PTR_ERR((void const *)task); return ((int )tmp___1); } else { } status = 0; if (wait != 0) { status = rpc_wait_for_completion_task(task); } else { } rpc_put_task(task); return (status); out_free_calldata: kfree((void const *)calldata); out: nfs4_put_open_state(state); nfs4_put_state_owner(sp); return (status); } } static struct inode *nfs4_atomic_open(struct inode *dir , struct nfs_open_context *ctx , int open_flags , struct iattr *attr ) { struct nfs4_state *state ; void *tmp ; long tmp___0 ; struct inode *tmp___1 ; { state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred, & ctx->mdsthreshold); tmp___0 = IS_ERR((void const *)state); if (tmp___0 != 0L) { tmp = ERR_CAST((void const *)state); return ((struct inode *)tmp); } else { } ctx->state = state; tmp___1 = igrab(state->inode); return (tmp___1); } } static void nfs4_close_context(struct nfs_open_context *ctx , int is_sync ) { { if ((unsigned long )ctx->state == (unsigned long )((struct nfs4_state *)0)) { return; } else { } if (is_sync != 0) { nfs4_close_sync(ctx->state, ctx->mode); } else { nfs4_close_state(ctx->state, ctx->mode); } return; } } static int _nfs4_server_capabilities(struct nfs_server *server , struct nfs_fh *fhandle ) { struct nfs4_server_caps_arg args ; struct nfs4_server_caps_res res ; struct rpc_message msg ; int status ; size_t __len ; void *__ret ; size_t __len___0 ; void *__ret___0 ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fhandle = fhandle; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.attr_bitmask[0] = 0U; res.attr_bitmask[1] = 0U; res.attr_bitmask[2] = 0U; res.acl_bitmask = 0U; res.has_links = 0U; res.has_symlinks = 0U; res.fh_expire_type = 0U; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 30UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; status = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); if (status == 0) { __len = 12UL; if (__len > 63UL) { __ret = memcpy((void *)(& server->attr_bitmask), (void const *)(& res.attr_bitmask), __len); } else { __ret = memcpy((void *)(& server->attr_bitmask), (void const *)(& res.attr_bitmask), __len); } server->caps = server->caps & 4294950961U; if (((unsigned long )res.attr_bitmask[0] & 4096UL) != 0UL) { server->caps = server->caps | 8U; } else { } if (res.has_links != 0U) { server->caps = server->caps | 2U; } else { } if (res.has_symlinks != 0U) { server->caps = server->caps | 4U; } else { } if (((unsigned long )res.attr_bitmask[0] & 1048576UL) != 0UL) { server->caps = server->caps | 64U; } else { } if (((unsigned long )res.attr_bitmask[1] & 2UL) != 0UL) { server->caps = server->caps | 128U; } else { } if (((unsigned long )res.attr_bitmask[1] & 8UL) != 0UL) { server->caps = server->caps | 256U; } else { } if (((unsigned long )res.attr_bitmask[1] & 16UL) != 0UL) { server->caps = server->caps | 512U; } else { } if (((unsigned long )res.attr_bitmask[1] & 32UL) != 0UL) { server->caps = server->caps | 1024U; } else { } if (((unsigned long )res.attr_bitmask[1] & 32768UL) != 0UL) { server->caps = server->caps | 2048U; } else { } if (((unsigned long )res.attr_bitmask[1] & 1048576UL) != 0UL) { server->caps = server->caps | 4096U; } else { } if (((unsigned long )res.attr_bitmask[1] & 2097152UL) != 0UL) { server->caps = server->caps | 8192U; } else { } __len___0 = 8UL; if (__len___0 > 63UL) { __ret___0 = memcpy((void *)(& server->cache_consistency_bitmask), (void const *)(& res.attr_bitmask), __len___0); } else { __ret___0 = memcpy((void *)(& server->cache_consistency_bitmask), (void const *)(& res.attr_bitmask), __len___0); } server->cache_consistency_bitmask[0] = server->cache_consistency_bitmask[0] & 24U; server->cache_consistency_bitmask[1] = server->cache_consistency_bitmask[1] & 3145728U; server->acl_bitmask = res.acl_bitmask; server->fh_expire_type = res.fh_expire_type; } else { } return (status); } } int nfs4_server_capabilities(struct nfs_server *server , struct nfs_fh *fhandle ) { struct nfs4_exception exception ; int err ; int tmp ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51707: tmp = _nfs4_server_capabilities(server, fhandle); err = nfs4_handle_exception(server, tmp, & exception); if (exception.retry != 0) { goto ldv_51707; } else { } return (err); } } static int _nfs4_lookup_root(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info ) { struct nfs4_lookup_root_arg args ; struct nfs4_lookup_res res ; struct rpc_message msg ; int tmp ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.bitmask = (u32 const *)(& nfs4_fattr_bitmap); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.server = (struct nfs_server const *)server; res.fattr = info->fattr; res.fh = fhandle; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 20UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; nfs_fattr_init(info->fattr); tmp = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); return (tmp); } } static int nfs4_lookup_root(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info ) { struct nfs4_exception exception ; int err ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51728: err = _nfs4_lookup_root(server, fhandle, info); switch (err) { case 0: ; case -10016: ; goto out; default: err = nfs4_handle_exception(server, err, & exception); } if (exception.retry != 0) { goto ldv_51728; } else { } out: ; return (err); } } static int nfs4_lookup_root_sec(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info , rpc_authflavor_t flavor ) { struct rpc_auth *auth ; int ret ; long tmp ; { auth = rpcauth_create(flavor, server->client); tmp = IS_ERR((void const *)auth); if (tmp != 0L) { ret = -5; goto out; } else { } ret = nfs4_lookup_root(server, fhandle, info); out: ; return (ret); } } static int nfs4_find_root_sec(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info ) { int i ; int len ; int status ; rpc_authflavor_t flav_array[12U] ; { status = 0; len = rpcauth_list_flavors((rpc_authflavor_t *)(& flav_array), 12); if (len < 0) { return (len); } else { } i = 0; goto ldv_51753; ldv_51752: ; if (flav_array[i] == 1U) { goto ldv_51750; } else { } status = nfs4_lookup_root_sec(server, fhandle, info, flav_array[i]); if (status == -10016 || status == -13) { goto ldv_51750; } else { } goto ldv_51751; ldv_51750: i = i + 1; ldv_51753: ; if (i < len) { goto ldv_51752; } else { } ldv_51751: ; if (status == -13) { status = -1; } else { } return (status); } } int nfs4_proc_get_rootfh(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info ) { int minor_version ; int status ; int tmp ; int tmp___0 ; { minor_version = (int )(server->nfs_client)->cl_minorversion; tmp = nfs4_lookup_root(server, fhandle, info); status = tmp; if (status == -10016 && (server->flags & 8192) == 0) { status = (*((nfs_v4_minor_ops[minor_version])->find_root_sec))(server, fhandle, info); } else { } if (status == 0) { status = nfs4_server_capabilities(server, fhandle); } else { } if (status == 0) { status = nfs4_do_fsinfo(server, fhandle, info); } else { } tmp___0 = nfs4_map_errors(status); return (tmp___0); } } static int nfs4_proc_get_root(struct nfs_server *server , struct nfs_fh *mntfh , struct nfs_fsinfo *info ) { int error ; struct nfs_fattr *fattr ; long tmp ; long tmp___0 ; size_t __len ; void *__ret ; int tmp___1 ; { fattr = info->fattr; error = nfs4_server_capabilities(server, mntfh); if (error < 0) { tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001dnfs4_get_root: getcaps error = %d\n", - error); } else { } return (error); } else { } error = nfs4_proc_getattr(server, mntfh, fattr); if (error < 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dnfs4_get_root: getattr error = %d\n", - error); } else { } return (error); } else { } if ((fattr->valid & 1024U) != 0U) { tmp___1 = nfs_fsid_equal((struct nfs_fsid const *)(& server->fsid), (struct nfs_fsid const *)(& fattr->fsid)); if (tmp___1 == 0) { __len = 16UL; if (__len > 63UL) { __ret = memcpy((void *)(& server->fsid), (void const *)(& fattr->fsid), __len); } else { __ret = memcpy((void *)(& server->fsid), (void const *)(& fattr->fsid), __len); } } else { } } else { } return (error); } } static int nfs4_get_referral(struct rpc_clnt *client , struct inode *dir , struct qstr const *name , struct nfs_fattr *fattr , struct nfs_fh *fhandle ) { int status ; struct page *page ; struct nfs4_fs_locations *locations ; void *tmp ; long tmp___0 ; struct nfs_server *tmp___1 ; int tmp___2 ; size_t __len ; void *__ret ; { status = -12; page = 0; locations = 0; page = alloc_pages(208U, 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { goto out; } else { } tmp = kmalloc(92112UL, 208U); locations = (struct nfs4_fs_locations *)tmp; if ((unsigned long )locations == (unsigned long )((struct nfs4_fs_locations *)0)) { goto out; } else { } status = nfs4_proc_fs_locations(client, dir, name, locations, page); if (status != 0) { goto out; } else { } tmp___1 = NFS_SERVER((struct inode const *)dir); tmp___2 = nfs_fsid_equal((struct nfs_fsid const *)(& tmp___1->fsid), (struct nfs_fsid const *)(& locations->fattr.fsid)); if (tmp___2 != 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: server did not return a different fsid for a referral at %s\n", "nfs4_get_referral", name->name); } else { } status = -5; goto out; } else { } nfs_fixup_referral_attributes(& locations->fattr); __len = 216UL; if (__len > 63UL) { __ret = memcpy((void *)fattr, (void const *)(& locations->fattr), __len); } else { __ret = memcpy((void *)fattr, (void const *)(& locations->fattr), __len); } memset((void *)fhandle, 0, 130UL); out: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { __free_pages(page, 0U); } else { } kfree((void const *)locations); return (status); } } static int _nfs4_proc_getattr(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) { struct nfs4_getattr_arg args ; struct nfs4_getattr_res res ; struct rpc_message msg ; int tmp ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = (struct nfs_fh const *)fhandle; args.bitmask = (u32 const *)(& server->attr_bitmask); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.server = (struct nfs_server const *)server; res.fattr = fattr; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 18UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; nfs_fattr_init(fattr); tmp = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); return (tmp); } } static int nfs4_proc_getattr(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) { struct nfs4_exception exception ; int err ; int tmp ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51801: tmp = _nfs4_proc_getattr(server, fhandle, fattr); err = nfs4_handle_exception(server, tmp, & exception); if (exception.retry != 0) { goto ldv_51801; } else { } return (err); } } static int nfs4_proc_setattr(struct dentry *dentry , struct nfs_fattr *fattr , struct iattr *sattr ) { struct inode *inode ; struct rpc_cred *cred ; struct nfs4_state *state ; int status ; bool tmp ; struct nfs_open_context *ctx ; { inode = dentry->d_inode; cred = 0; state = 0; tmp = pnfs_ld_layoutret_on_setattr(inode); if ((int )tmp) { pnfs_return_layout(inode); } else { } nfs_fattr_init(fattr); if ((sattr->ia_valid & 32768U) != 0U) { sattr->ia_valid = sattr->ia_valid & 4294934431U; } else { } if ((sattr->ia_valid & 4294959103U) == 0U) { return (0); } else { } if ((sattr->ia_valid & 8192U) != 0U) { ctx = nfs_file_open_context(sattr->ia_file); if ((unsigned long )ctx != (unsigned long )((struct nfs_open_context *)0)) { cred = ctx->cred; state = ctx->state; } else { } } else { } status = nfs4_do_setattr(inode, cred, fattr, sattr, state); if (status == 0) { nfs_setattr_update_inode(inode, sattr); } else { } return (status); } } static int _nfs4_proc_lookup(struct rpc_clnt *clnt , struct inode *dir , struct qstr const *name , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) { struct nfs_server *server ; struct nfs_server *tmp ; int status ; struct nfs4_lookup_arg args ; struct nfs_fh *tmp___0 ; struct nfs4_lookup_res res ; struct rpc_message msg ; long tmp___1 ; long tmp___2 ; { tmp = NFS_SERVER((struct inode const *)dir); server = tmp; tmp___0 = NFS_FH((struct inode const *)dir); args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.dir_fh = (struct nfs_fh const *)tmp___0; args.name = name; args.bitmask = (u32 const *)(& server->attr_bitmask); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.server = (struct nfs_server const *)server; res.fattr = fattr; res.fh = fhandle; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 19UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; nfs_fattr_init(fattr); tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS call lookup %s\n", name->name); } else { } status = nfs4_call_sync(clnt, server, & msg, & args.seq_args, & res.seq_res, 0); tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001dNFS reply lookup: %d\n", status); } else { } return (status); } } static void nfs_fixup_secinfo_attributes(struct nfs_fattr *fattr ) { { fattr->valid = fattr->valid | 2097159U; fattr->mode = 16749U; fattr->nlink = 2U; return; } } static int nfs4_proc_lookup_common(struct rpc_clnt **clnt , struct inode *dir , struct qstr *name , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) { struct nfs4_exception exception ; struct rpc_clnt *client ; int err ; long tmp ; long tmp___0 ; struct nfs_server *tmp___1 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; client = *clnt; ldv_51844: err = _nfs4_proc_lookup(client, dir, (struct qstr const *)name, fhandle, fattr); switch (err) { case -10041: err = -2; goto out; case -10019: err = nfs4_get_referral(client, dir, (struct qstr const *)name, fattr, fhandle); goto out; case -10016: err = -1; if ((unsigned long )*clnt != (unsigned long )client) { goto out; } else { } client = nfs4_create_sec_client(client, dir, name); tmp___0 = IS_ERR((void const *)client); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)client); return ((int )tmp); } else { } exception.retry = 1; goto ldv_51842; default: tmp___1 = NFS_SERVER((struct inode const *)dir); err = nfs4_handle_exception(tmp___1, err, & exception); } ldv_51842: ; if (exception.retry != 0) { goto ldv_51844; } else { } out: ; if (err == 0) { *clnt = client; } else if ((unsigned long )*clnt != (unsigned long )client) { rpc_shutdown_client(client); } else { } return (err); } } static int nfs4_proc_lookup(struct inode *dir , struct qstr *name , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) { int status ; struct rpc_clnt *client ; struct rpc_clnt *tmp ; struct rpc_clnt *tmp___0 ; { tmp = NFS_CLIENT((struct inode const *)dir); client = tmp; status = nfs4_proc_lookup_common(& client, dir, name, fhandle, fattr); tmp___0 = NFS_CLIENT((struct inode const *)dir); if ((unsigned long )tmp___0 != (unsigned long )client) { rpc_shutdown_client(client); nfs_fixup_secinfo_attributes(fattr); } else { } return (status); } } struct rpc_clnt *nfs4_proc_lookup_mountpoint(struct inode *dir , struct qstr *name , struct nfs_fh *fhandle , struct nfs_fattr *fattr ) { int status ; struct rpc_clnt *client ; struct rpc_clnt *tmp ; struct rpc_clnt *tmp___0 ; void *tmp___1 ; { tmp = NFS_CLIENT((struct inode const *)dir); tmp___0 = rpc_clone_client(tmp); client = tmp___0; status = nfs4_proc_lookup_common(& client, dir, name, fhandle, fattr); if (status < 0) { rpc_shutdown_client(client); tmp___1 = ERR_PTR((long )status); return ((struct rpc_clnt *)tmp___1); } else { } return (client); } } static int _nfs4_proc_access(struct inode *inode , struct nfs_access_entry *entry ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_accessargs args ; struct nfs_fh *tmp___0 ; struct nfs4_accessres res ; struct rpc_message msg ; int mode ; int status ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; tmp___0 = NFS_FH((struct inode const *)inode); args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = (struct nfs_fh const *)tmp___0; args.bitmask = (u32 const *)(& server->cache_consistency_bitmask); args.access = 0U; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.server = (struct nfs_server const *)server; res.fattr = 0; res.supported = 0U; res.access = 0U; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 17UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = entry->cred; mode = entry->mask; if ((mode & 4) != 0) { args.access = args.access | 1U; } else { } if (((int )inode->i_mode & 61440) == 16384) { if ((mode & 2) != 0) { args.access = args.access | 28U; } else { } if (mode & 1) { args.access = args.access | 2U; } else { } } else { if ((mode & 2) != 0) { args.access = args.access | 12U; } else { } if (mode & 1) { args.access = args.access | 32U; } else { } } res.fattr = nfs_alloc_fattr(); if ((unsigned long )res.fattr == (unsigned long )((struct nfs_fattr *)0)) { return (-12); } else { } status = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); if (status == 0) { nfs_access_set_mask(entry, res.access); nfs_refresh_inode(inode, res.fattr); } else { } nfs_free_fattr((struct nfs_fattr const *)res.fattr); return (status); } } static int nfs4_proc_access(struct inode *inode , struct nfs_access_entry *entry ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51878: tmp = _nfs4_proc_access(inode, entry); tmp___0 = NFS_SERVER((struct inode const *)inode); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_51878; } else { } return (err); } } static int _nfs4_proc_readlink(struct inode *inode , struct page *page , unsigned int pgbase , unsigned int pglen ) { struct nfs4_readlink args ; struct nfs_fh *tmp ; struct nfs4_readlink_res res ; struct rpc_message msg ; struct nfs_server *tmp___0 ; struct nfs_server *tmp___1 ; int tmp___2 ; { tmp = NFS_FH((struct inode const *)inode); args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = (struct nfs_fh const *)tmp; args.pgbase = pgbase; args.pglen = pglen; args.pages = & page; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 28UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp___0 = NFS_SERVER((struct inode const *)inode); tmp___1 = NFS_SERVER((struct inode const *)inode); tmp___2 = nfs4_call_sync(tmp___1->client, tmp___0, & msg, & args.seq_args, & res.seq_res, 0); return (tmp___2); } } static int nfs4_proc_readlink(struct inode *inode , struct page *page , unsigned int pgbase , unsigned int pglen ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51897: tmp = _nfs4_proc_readlink(inode, page, pgbase, pglen); tmp___0 = NFS_SERVER((struct inode const *)inode); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_51897; } else { } return (err); } } static int nfs4_proc_create(struct inode *dir , struct dentry *dentry , struct iattr *sattr , int flags ) { struct nfs_open_context *ctx ; struct nfs4_state *state ; int status ; long tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; struct inode *tmp___4 ; unsigned long tmp___5 ; { status = 0; ctx = alloc_nfs_open_context(dentry, 1U); tmp___0 = IS_ERR((void const *)ctx); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)ctx); return ((int )tmp); } else { } tmp___1 = current_umask(); sattr->ia_mode = (umode_t )((int )((short )sattr->ia_mode) & ~ ((int )((short )tmp___1))); state = nfs4_do_open(dir, dentry, ctx->mode, flags, sattr, ctx->cred, & ctx->mdsthreshold); d_drop(dentry); tmp___3 = IS_ERR((void const *)state); if (tmp___3 != 0L) { tmp___2 = PTR_ERR((void const *)state); status = (int )tmp___2; goto out; } else { } tmp___4 = igrab(state->inode); d_add(dentry, tmp___4); tmp___5 = nfs_save_change_attribute(dir); nfs_set_verifier(dentry, tmp___5); ctx->state = state; out: put_nfs_open_context(ctx); return (status); } } static int _nfs4_proc_remove(struct inode *dir , struct qstr *name ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_removeargs args ; struct nfs_fh *tmp___0 ; struct nfs_removeres res ; struct rpc_message msg ; int status ; { tmp = NFS_SERVER((struct inode const *)dir); server = tmp; tmp___0 = NFS_FH((struct inode const *)dir); args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = (struct nfs_fh const *)tmp___0; args.name = *name; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.server = (struct nfs_server const *)server; res.dir_attr = 0; res.cinfo.atomic = 0U; res.cinfo.before = 0ULL; res.cinfo.after = 0ULL; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 21UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; status = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 1); if (status == 0) { update_changeattr(dir, & res.cinfo); } else { } return (status); } } static int nfs4_proc_remove(struct inode *dir , struct qstr *name ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51924: tmp = _nfs4_proc_remove(dir, name); tmp___0 = NFS_SERVER((struct inode const *)dir); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_51924; } else { } return (err); } } static void nfs4_proc_unlink_setup(struct rpc_message *msg , struct inode *dir ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_removeargs *args ; struct nfs_removeres *res ; { tmp = NFS_SERVER((struct inode const *)dir); server = tmp; args = (struct nfs_removeargs *)msg->rpc_argp; res = (struct nfs_removeres *)msg->rpc_resp; res->server = (struct nfs_server const *)server; msg->rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 21UL; nfs41_init_sequence(& args->seq_args, & res->seq_res, 1); return; } } static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task , struct nfs_unlinkdata *data ) { struct nfs_server *tmp ; { tmp = NFS_SERVER((struct inode const *)data->dir); nfs4_setup_sequence((struct nfs_server const *)tmp, & data->args.seq_args, & data->res.seq_res, task); return; } } static int nfs4_proc_unlink_done(struct rpc_task *task , struct inode *dir ) { struct nfs_removeres *res ; int tmp ; int tmp___0 ; { res = (struct nfs_removeres *)task->tk_msg.rpc_resp; tmp = nfs4_sequence_done(task, & res->seq_res); if (tmp == 0) { return (0); } else { } tmp___0 = nfs4_async_handle_error(task, res->server, 0); if (tmp___0 == -11) { return (0); } else { } update_changeattr(dir, & res->cinfo); return (1); } } static void nfs4_proc_rename_setup(struct rpc_message *msg , struct inode *dir ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_renameargs *arg ; struct nfs_renameres *res ; { tmp = NFS_SERVER((struct inode const *)dir); server = tmp; arg = (struct nfs_renameargs *)msg->rpc_argp; res = (struct nfs_renameres *)msg->rpc_resp; msg->rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 22UL; res->server = (struct nfs_server const *)server; nfs41_init_sequence(& arg->seq_args, & res->seq_res, 1); return; } } static void nfs4_proc_rename_rpc_prepare(struct rpc_task *task , struct nfs_renamedata *data ) { struct nfs_server *tmp ; { tmp = NFS_SERVER((struct inode const *)data->old_dir); nfs4_setup_sequence((struct nfs_server const *)tmp, & data->args.seq_args, & data->res.seq_res, task); return; } } static int nfs4_proc_rename_done(struct rpc_task *task , struct inode *old_dir , struct inode *new_dir ) { struct nfs_renameres *res ; int tmp ; int tmp___0 ; { res = (struct nfs_renameres *)task->tk_msg.rpc_resp; tmp = nfs4_sequence_done(task, & res->seq_res); if (tmp == 0) { return (0); } else { } tmp___0 = nfs4_async_handle_error(task, res->server, 0); if (tmp___0 == -11) { return (0); } else { } update_changeattr(old_dir, & res->old_cinfo); update_changeattr(new_dir, & res->new_cinfo); return (1); } } static int _nfs4_proc_rename(struct inode *old_dir , struct qstr *old_name , struct inode *new_dir , struct qstr *new_name ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_renameargs arg ; struct nfs_fh *tmp___0 ; struct nfs_fh *tmp___1 ; struct nfs_renameres res ; struct rpc_message msg ; int status ; { tmp = NFS_SERVER((struct inode const *)old_dir); server = tmp; tmp___0 = NFS_FH((struct inode const *)old_dir); tmp___1 = NFS_FH((struct inode const *)new_dir); arg.seq_args.sa_slot = 0; arg.seq_args.sa_cache_this = (unsigned char)0; arg.seq_args.sa_privileged = (unsigned char)0; arg.old_dir = (struct nfs_fh const *)tmp___0; arg.new_dir = (struct nfs_fh const *)tmp___1; arg.old_name = (struct qstr const *)old_name; arg.new_name = (struct qstr const *)new_name; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.server = (struct nfs_server const *)server; res.old_cinfo.atomic = 0U; res.old_cinfo.before = 0ULL; res.old_cinfo.after = 0ULL; res.old_fattr = 0; res.new_cinfo.atomic = 0U; res.new_cinfo.before = 0ULL; res.new_cinfo.after = 0ULL; res.new_fattr = 0; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 22UL; msg.rpc_argp = (void *)(& arg); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; status = -12; status = nfs4_call_sync(server->client, server, & msg, & arg.seq_args, & res.seq_res, 1); if (status == 0) { update_changeattr(old_dir, & res.old_cinfo); update_changeattr(new_dir, & res.new_cinfo); } else { } return (status); } } static int nfs4_proc_rename(struct inode *old_dir , struct qstr *old_name , struct inode *new_dir , struct qstr *new_name ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51978: tmp = _nfs4_proc_rename(old_dir, old_name, new_dir, new_name); tmp___0 = NFS_SERVER((struct inode const *)old_dir); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_51978; } else { } return (err); } } static int _nfs4_proc_link(struct inode *inode , struct inode *dir , struct qstr *name ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_link_arg arg ; struct nfs_fh *tmp___0 ; struct nfs_fh *tmp___1 ; struct nfs4_link_res res ; struct rpc_message msg ; int status ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; tmp___0 = NFS_FH((struct inode const *)inode); tmp___1 = NFS_FH((struct inode const *)dir); arg.seq_args.sa_slot = 0; arg.seq_args.sa_cache_this = (unsigned char)0; arg.seq_args.sa_privileged = (unsigned char)0; arg.fh = (struct nfs_fh const *)tmp___0; arg.dir_fh = (struct nfs_fh const *)tmp___1; arg.name = (struct qstr const *)name; arg.bitmask = (u32 const *)(& server->attr_bitmask); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.server = (struct nfs_server const *)server; res.fattr = 0; res.cinfo.atomic = 0U; res.cinfo.before = 0ULL; res.cinfo.after = 0ULL; res.dir_attr = 0; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 23UL; msg.rpc_argp = (void *)(& arg); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; status = -12; res.fattr = nfs_alloc_fattr(); if ((unsigned long )res.fattr == (unsigned long )((struct nfs_fattr *)0)) { goto out; } else { } status = nfs4_call_sync(server->client, server, & msg, & arg.seq_args, & res.seq_res, 1); if (status == 0) { update_changeattr(dir, & res.cinfo); nfs_post_op_update_inode(inode, res.fattr); } else { } out: nfs_free_fattr((struct nfs_fattr const *)res.fattr); return (status); } } static int nfs4_proc_link(struct inode *inode , struct inode *dir , struct qstr *name ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_51998: tmp = _nfs4_proc_link(inode, dir, name); tmp___0 = NFS_SERVER((struct inode const *)inode); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_51998; } else { } return (err); } } static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir , struct qstr *name , struct iattr *sattr , u32 ftype ) { struct nfs4_createdata *data ; void *tmp ; struct nfs_server *server ; struct nfs_server *tmp___0 ; struct nfs_fh *tmp___1 ; { tmp = kzalloc(544UL, 208U); data = (struct nfs4_createdata *)tmp; if ((unsigned long )data != (unsigned long )((struct nfs4_createdata *)0)) { tmp___0 = NFS_SERVER((struct inode const *)dir); server = tmp___0; data->msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 25UL; data->msg.rpc_argp = (void *)(& data->arg); data->msg.rpc_resp = (void *)(& data->res); tmp___1 = NFS_FH((struct inode const *)dir); data->arg.dir_fh = (struct nfs_fh const *)tmp___1; data->arg.server = (struct nfs_server const *)server; data->arg.name = (struct qstr const *)name; data->arg.attrs = (struct iattr const *)sattr; data->arg.ftype = ftype; data->arg.bitmask = (u32 const *)(& server->attr_bitmask); data->res.server = (struct nfs_server const *)server; data->res.fh = & data->fh; data->res.fattr = & data->fattr; nfs_fattr_init(data->res.fattr); } else { } return (data); } } static int nfs4_do_create(struct inode *dir , struct dentry *dentry , struct nfs4_createdata *data ) { int status ; struct nfs_server *tmp ; struct nfs_server *tmp___0 ; int tmp___1 ; { tmp = NFS_SERVER((struct inode const *)dir); tmp___0 = NFS_SERVER((struct inode const *)dir); tmp___1 = nfs4_call_sync(tmp___0->client, tmp, & data->msg, & data->arg.seq_args, & data->res.seq_res, 1); status = tmp___1; if (status == 0) { update_changeattr(dir, & data->res.dir_cinfo); status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); } else { } return (status); } } static void nfs4_free_createdata(struct nfs4_createdata *data ) { { kfree((void const *)data); return; } } static int _nfs4_proc_symlink(struct inode *dir , struct dentry *dentry , struct page *page , unsigned int len , struct iattr *sattr ) { struct nfs4_createdata *data ; int status ; { status = -36; if (len > 4096U) { goto out; } else { } status = -12; data = nfs4_alloc_createdata(dir, & dentry->d_name, sattr, 5U); if ((unsigned long )data == (unsigned long )((struct nfs4_createdata *)0)) { goto out; } else { } data->msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 24UL; data->arg.u.symlink.pages = & page; data->arg.u.symlink.len = len; status = nfs4_do_create(dir, dentry, data); nfs4_free_createdata(data); out: ; return (status); } } static int nfs4_proc_symlink(struct inode *dir , struct dentry *dentry , struct page *page , unsigned int len , struct iattr *sattr ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52042: tmp = _nfs4_proc_symlink(dir, dentry, page, len, sattr); tmp___0 = NFS_SERVER((struct inode const *)dir); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_52042; } else { } return (err); } } static int _nfs4_proc_mkdir(struct inode *dir , struct dentry *dentry , struct iattr *sattr ) { struct nfs4_createdata *data ; int status ; { status = -12; data = nfs4_alloc_createdata(dir, & dentry->d_name, sattr, 2U); if ((unsigned long )data == (unsigned long )((struct nfs4_createdata *)0)) { goto out; } else { } status = nfs4_do_create(dir, dentry, data); nfs4_free_createdata(data); out: ; return (status); } } static int nfs4_proc_mkdir(struct inode *dir , struct dentry *dentry , struct iattr *sattr ) { struct nfs4_exception exception ; int err ; int tmp ; int tmp___0 ; struct nfs_server *tmp___1 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; tmp = current_umask(); sattr->ia_mode = (umode_t )((int )((short )sattr->ia_mode) & ~ ((int )((short )tmp))); ldv_52059: tmp___0 = _nfs4_proc_mkdir(dir, dentry, sattr); tmp___1 = NFS_SERVER((struct inode const *)dir); err = nfs4_handle_exception(tmp___1, tmp___0, & exception); if (exception.retry != 0) { goto ldv_52059; } else { } return (err); } } static int _nfs4_proc_readdir(struct dentry *dentry , struct rpc_cred *cred , u64 cookie , struct page **pages , unsigned int count , int plus ) { struct inode *dir ; struct nfs4_readdir_arg args ; struct nfs_fh *tmp ; struct nfs_server *tmp___0 ; struct nfs4_readdir_res res ; struct rpc_message msg ; int status ; long tmp___1 ; struct nfs_inode *tmp___2 ; struct nfs_server *tmp___3 ; struct nfs_server *tmp___4 ; size_t __len ; void *__ret ; struct nfs_inode *tmp___5 ; struct nfs_inode *tmp___6 ; long tmp___7 ; { dir = dentry->d_inode; tmp = NFS_FH((struct inode const *)dir); tmp___0 = NFS_SERVER((struct inode const *)dentry->d_inode); args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = (struct nfs_fh const *)tmp; args.cookie = 0ULL; args.verifier.data[0] = (char)0; args.verifier.data[1] = (char)0; args.verifier.data[2] = (char)0; args.verifier.data[3] = (char)0; args.verifier.data[4] = (char)0; args.verifier.data[5] = (char)0; args.verifier.data[6] = (char)0; args.verifier.data[7] = (char)0; args.count = count; args.pages = pages; args.pgbase = 0U; args.bitmask = (u32 const *)(& tmp___0->attr_bitmask); args.plus = plus; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 29UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = cred; tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: dentry = %s/%s, cookie = %Lu\n", "_nfs4_proc_readdir", (dentry->d_parent)->d_name.name, dentry->d_name.name, cookie); } else { } tmp___2 = NFS_I((struct inode const *)dir); nfs4_setup_readdir(cookie, (__be32 *)(& tmp___2->cookieverf), dentry, & args); res.pgbase = args.pgbase; tmp___3 = NFS_SERVER((struct inode const *)dir); tmp___4 = NFS_SERVER((struct inode const *)dir); status = nfs4_call_sync(tmp___4->client, tmp___3, & msg, & args.seq_args, & res.seq_res, 0); if (status >= 0) { __len = 8UL; if (__len > 63UL) { tmp___5 = NFS_I((struct inode const *)dir); __ret = memcpy((void *)(& tmp___5->cookieverf), (void const *)(& res.verifier.data), __len); } else { tmp___6 = NFS_I((struct inode const *)dir); __ret = memcpy((void *)(& tmp___6->cookieverf), (void const *)(& res.verifier.data), __len); } status = (int )(args.pgbase + (unsigned int )status); } else { } nfs_invalidate_atime(dir); tmp___7 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___7 != 0L) { printk("\001d%s: returns %d\n", "_nfs4_proc_readdir", status); } else { } return (status); } } static int nfs4_proc_readdir(struct dentry *dentry , struct rpc_cred *cred , u64 cookie , struct page **pages , unsigned int count , int plus ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52088: tmp = _nfs4_proc_readdir(dentry, cred, cookie, pages, count, plus); tmp___0 = NFS_SERVER((struct inode const *)dentry->d_inode); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_52088; } else { } return (err); } } static int _nfs4_proc_mknod(struct inode *dir , struct dentry *dentry , struct iattr *sattr , dev_t rdev ) { struct nfs4_createdata *data ; int mode ; int status ; { mode = (int )sattr->ia_mode; status = -12; data = nfs4_alloc_createdata(dir, & dentry->d_name, sattr, 6U); if ((unsigned long )data == (unsigned long )((struct nfs4_createdata *)0)) { goto out; } else { } if ((mode & 61440) == 4096) { data->arg.ftype = 7U; } else if ((mode & 61440) == 24576) { data->arg.ftype = 3U; data->arg.u.device.specdata1 = rdev >> 20; data->arg.u.device.specdata2 = rdev & 1048575U; } else if ((mode & 61440) == 8192) { data->arg.ftype = 4U; data->arg.u.device.specdata1 = rdev >> 20; data->arg.u.device.specdata2 = rdev & 1048575U; } else if ((mode & 61440) != 49152) { status = -22; goto out_free; } else { } status = nfs4_do_create(dir, dentry, data); out_free: nfs4_free_createdata(data); out: ; return (status); } } static int nfs4_proc_mknod(struct inode *dir , struct dentry *dentry , struct iattr *sattr , dev_t rdev ) { struct nfs4_exception exception ; int err ; int tmp ; int tmp___0 ; struct nfs_server *tmp___1 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; tmp = current_umask(); sattr->ia_mode = (umode_t )((int )((short )sattr->ia_mode) & ~ ((int )((short )tmp))); ldv_52109: tmp___0 = _nfs4_proc_mknod(dir, dentry, sattr, rdev); tmp___1 = NFS_SERVER((struct inode const *)dir); err = nfs4_handle_exception(tmp___1, tmp___0, & exception); if (exception.retry != 0) { goto ldv_52109; } else { } return (err); } } static int _nfs4_proc_statfs(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsstat *fsstat ) { struct nfs4_statfs_arg args ; struct nfs4_statfs_res res ; struct rpc_message msg ; int tmp ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = (struct nfs_fh const *)fhandle; args.bitmask = (u32 const *)(& server->attr_bitmask); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.fsstat = fsstat; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 27UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; nfs_fattr_init(fsstat->fattr); tmp = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); return (tmp); } } static int nfs4_proc_statfs(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsstat *fsstat ) { struct nfs4_exception exception ; int err ; int tmp ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52126: tmp = _nfs4_proc_statfs(server, fhandle, fsstat); err = nfs4_handle_exception(server, tmp, & exception); if (exception.retry != 0) { goto ldv_52126; } else { } return (err); } } static int _nfs4_do_fsinfo(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *fsinfo ) { struct nfs4_fsinfo_arg args ; struct nfs4_fsinfo_res res ; struct rpc_message msg ; int tmp ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = (struct nfs_fh const *)fhandle; args.bitmask = (u32 const *)(& server->attr_bitmask); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.fsinfo = fsinfo; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 10UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); return (tmp); } } static int nfs4_do_fsinfo(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *fsinfo ) { struct nfs4_exception exception ; int err ; int tmp ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52143: tmp = _nfs4_do_fsinfo(server, fhandle, fsinfo); err = nfs4_handle_exception(server, tmp, & exception); if (exception.retry != 0) { goto ldv_52143; } else { } return (err); } } static int nfs4_proc_fsinfo(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *fsinfo ) { int error ; { nfs_fattr_init(fsinfo->fattr); error = nfs4_do_fsinfo(server, fhandle, fsinfo); if (error == 0) { server->pnfs_blksize = fsinfo->blksize; set_pnfs_layoutdriver(server, (struct nfs_fh const *)fhandle, fsinfo->layouttype); } else { } return (error); } } static int _nfs4_proc_pathconf(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_pathconf *pathconf ) { struct nfs4_pathconf_arg args ; struct nfs4_pathconf_res res ; struct rpc_message msg ; int tmp ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = (struct nfs_fh const *)fhandle; args.bitmask = (u32 const *)(& server->attr_bitmask); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.pathconf = pathconf; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 26UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; if (((unsigned int )*(args.bitmask) & (unsigned int )nfs4_pathconf_bitmap[0]) == 0U) { memset((void *)pathconf, 0, 16UL); return (0); } else { } nfs_fattr_init(pathconf->fattr); tmp = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); return (tmp); } } static int nfs4_proc_pathconf(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_pathconf *pathconf ) { struct nfs4_exception exception ; int err ; int tmp ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52166: tmp = _nfs4_proc_pathconf(server, fhandle, pathconf); err = nfs4_handle_exception(server, tmp, & exception); if (exception.retry != 0) { goto ldv_52166; } else { } return (err); } } void __nfs4_read_done_cb(struct nfs_read_data *data ) { { nfs_invalidate_atime((data->header)->inode); return; } } static int nfs4_read_done_cb(struct rpc_task *task , struct nfs_read_data *data ) { struct nfs_server *server ; struct nfs_server *tmp ; int tmp___0 ; { tmp = NFS_SERVER((struct inode const *)(data->header)->inode); server = tmp; tmp___0 = nfs4_async_handle_error(task, (struct nfs_server const *)server, (data->args.context)->state); if (tmp___0 == -11) { rpc_restart_call_prepare(task); return (-11); } else { } __nfs4_read_done_cb(data); if (task->tk_status > 0) { renew_lease((struct nfs_server const *)server, data->timestamp); } else { } return (0); } } static int nfs4_read_done(struct rpc_task *task , struct nfs_read_data *data ) { long tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_read_done"); } else { } tmp___0 = nfs4_sequence_done(task, & data->res.seq_res); if (tmp___0 == 0) { return (-11); } else { } if ((unsigned long )data->read_done_cb != (unsigned long )((int (*)(struct rpc_task * , struct nfs_read_data * ))0)) { tmp___1 = (*(data->read_done_cb))(task, data); tmp___3 = tmp___1; } else { tmp___2 = nfs4_read_done_cb(task, data); tmp___3 = tmp___2; } return (tmp___3); } } static void nfs4_proc_read_setup(struct nfs_read_data *data , struct rpc_message *msg ) { { data->timestamp = jiffies; data->read_done_cb = & nfs4_read_done_cb; msg->rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 1UL; nfs41_init_sequence(& data->args.seq_args, & data->res.seq_res, 0); return; } } static void nfs4_proc_read_rpc_prepare(struct rpc_task *task , struct nfs_read_data *data ) { struct nfs_server *tmp ; { tmp = NFS_SERVER((struct inode const *)(data->header)->inode); nfs4_setup_sequence((struct nfs_server const *)tmp, & data->args.seq_args, & data->res.seq_res, task); return; } } static int nfs4_write_done_cb(struct rpc_task *task , struct nfs_write_data *data ) { struct inode *inode ; struct nfs_server *tmp ; int tmp___0 ; struct nfs_server *tmp___1 ; { inode = (data->header)->inode; tmp = NFS_SERVER((struct inode const *)inode); tmp___0 = nfs4_async_handle_error(task, (struct nfs_server const *)tmp, (data->args.context)->state); if (tmp___0 == -11) { rpc_restart_call_prepare(task); return (-11); } else { } if (task->tk_status >= 0) { tmp___1 = NFS_SERVER((struct inode const *)inode); renew_lease((struct nfs_server const *)tmp___1, data->timestamp); nfs_post_op_update_inode_force_wcc(inode, & data->fattr); } else { } return (0); } } static int nfs4_write_done(struct rpc_task *task , struct nfs_write_data *data ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = nfs4_sequence_done(task, & data->res.seq_res); if (tmp == 0) { return (-11); } else { } if ((unsigned long )data->write_done_cb != (unsigned long )((int (*)(struct rpc_task * , struct nfs_write_data * ))0)) { tmp___0 = (*(data->write_done_cb))(task, data); tmp___2 = tmp___0; } else { tmp___1 = nfs4_write_done_cb(task, data); tmp___2 = tmp___1; } return (tmp___2); } } static bool nfs4_write_need_cache_consistency_data(struct nfs_write_data const *data ) { struct nfs_pgio_header const *hdr ; int tmp ; { hdr = (struct nfs_pgio_header const *)data->header; if ((unsigned long )data->ds_clp != (unsigned long )((struct nfs_client */* const */)0) || (unsigned long )hdr->dreq != (unsigned long )((struct nfs_direct_req */* const */)0)) { return (0); } else { } tmp = nfs4_have_delegation(hdr->inode, 1U); return (tmp == 0); } } static void nfs4_proc_write_setup(struct nfs_write_data *data , struct rpc_message *msg ) { struct nfs_server *server ; struct nfs_server *tmp ; bool tmp___0 ; int tmp___1 ; { tmp = NFS_SERVER((struct inode const *)(data->header)->inode); server = tmp; tmp___0 = nfs4_write_need_cache_consistency_data((struct nfs_write_data const *)data); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { data->args.bitmask = 0; data->res.fattr = 0; } else { data->args.bitmask = (u32 const *)(& server->cache_consistency_bitmask); } if ((unsigned long )data->write_done_cb == (unsigned long )((int (*)(struct rpc_task * , struct nfs_write_data * ))0)) { data->write_done_cb = & nfs4_write_done_cb; } else { } data->res.server = (struct nfs_server const *)server; data->timestamp = jiffies; msg->rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 2UL; nfs41_init_sequence(& data->args.seq_args, & data->res.seq_res, 1); return; } } static void nfs4_proc_write_rpc_prepare(struct rpc_task *task , struct nfs_write_data *data ) { struct nfs_server *tmp ; { tmp = NFS_SERVER((struct inode const *)(data->header)->inode); nfs4_setup_sequence((struct nfs_server const *)tmp, & data->args.seq_args, & data->res.seq_res, task); return; } } static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task , struct nfs_commit_data *data ) { struct nfs_server *tmp ; { tmp = NFS_SERVER((struct inode const *)data->inode); nfs4_setup_sequence((struct nfs_server const *)tmp, & data->args.seq_args, & data->res.seq_res, task); return; } } static int nfs4_commit_done_cb(struct rpc_task *task , struct nfs_commit_data *data ) { struct inode *inode ; struct nfs_server *tmp ; int tmp___0 ; { inode = data->inode; tmp = NFS_SERVER((struct inode const *)inode); tmp___0 = nfs4_async_handle_error(task, (struct nfs_server const *)tmp, 0); if (tmp___0 == -11) { rpc_restart_call_prepare(task); return (-11); } else { } return (0); } } static int nfs4_commit_done(struct rpc_task *task , struct nfs_commit_data *data ) { int tmp ; int tmp___0 ; { tmp = nfs4_sequence_done(task, & data->res.seq_res); if (tmp == 0) { return (-11); } else { } tmp___0 = (*(data->commit_done_cb))(task, data); return (tmp___0); } } static void nfs4_proc_commit_setup(struct nfs_commit_data *data , struct rpc_message *msg ) { struct nfs_server *server ; struct nfs_server *tmp ; { tmp = NFS_SERVER((struct inode const *)data->inode); server = tmp; if ((unsigned long )data->commit_done_cb == (unsigned long )((int (*)(struct rpc_task * , struct nfs_commit_data * ))0)) { data->commit_done_cb = & nfs4_commit_done_cb; } else { } data->res.server = (struct nfs_server const *)server; msg->rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 3UL; nfs41_init_sequence(& data->args.seq_args, & data->res.seq_res, 1); return; } } static void nfs4_renew_release(void *calldata ) { struct nfs4_renewdata *data ; struct nfs_client *clp ; int tmp ; { data = (struct nfs4_renewdata *)calldata; clp = data->client; tmp = atomic_read((atomic_t const *)(& clp->cl_count)); if (tmp > 1) { nfs4_schedule_state_renewal(clp); } else { } nfs_put_client(clp); kfree((void const *)data); return; } } static void nfs4_renew_done(struct rpc_task *task , void *calldata ) { struct nfs4_renewdata *data ; struct nfs_client *clp ; unsigned long timestamp ; int tmp ; { data = (struct nfs4_renewdata *)calldata; clp = data->client; timestamp = data->timestamp; if (task->tk_status < 0) { tmp = constant_test_bit(3U, (unsigned long const volatile *)(& clp->cl_res_state)); if (tmp == 0) { return; } else { } if (task->tk_status != 10048) { nfs4_schedule_lease_recovery(clp); return; } else { } nfs4_schedule_path_down_recovery(clp); } else { } do_renew_lease(clp, timestamp); return; } } static struct rpc_call_ops const nfs4_renew_ops = {0, & nfs4_renew_done, 0, & nfs4_renew_release}; static int nfs4_proc_async_renew(struct nfs_client *clp , struct rpc_cred *cred , unsigned int renew_flags ) { struct rpc_message msg ; struct nfs4_renewdata *data ; int tmp ; void *tmp___0 ; int tmp___1 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 11UL; msg.rpc_argp = (void *)clp; msg.rpc_resp = 0; msg.rpc_cred = cred; if (renew_flags == 0U) { return (0); } else { } tmp = atomic_add_unless(& clp->cl_count, 1, 0); if (tmp == 0) { return (-5); } else { } tmp___0 = kmalloc(16UL, 80U); data = (struct nfs4_renewdata *)tmp___0; if ((unsigned long )data == (unsigned long )((struct nfs4_renewdata *)0)) { return (-12); } else { } data->client = clp; data->timestamp = jiffies; tmp___1 = rpc_call_async(clp->cl_rpcclient, (struct rpc_message const *)(& msg), 512, & nfs4_renew_ops, (void *)data); return (tmp___1); } } static int nfs4_proc_renew(struct nfs_client *clp , struct rpc_cred *cred ) { struct rpc_message msg ; unsigned long now ; int status ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 11UL; msg.rpc_argp = (void *)clp; msg.rpc_resp = 0; msg.rpc_cred = cred; now = jiffies; status = rpc_call_sync(clp->cl_rpcclient, (struct rpc_message const *)(& msg), 0); if (status < 0) { return (status); } else { } do_renew_lease(clp, now); return (0); } } __inline static int nfs4_server_supports_acls(struct nfs_server *server ) { { return (((server->caps & 8U) != 0U && (int )server->acl_bitmask & 1) && (server->acl_bitmask & 2U) != 0U); } } static int buf_to_pages_noslab(void const *buf , size_t buflen , struct page **pages , unsigned int *pgbase ) { struct page *newpage ; struct page **spages ; int rc ; size_t len ; size_t __min1 ; size_t __min2 ; size_t __len ; void *__ret ; void *tmp___0 ; struct page **tmp___1 ; { rc = 0; spages = pages; ldv_52279: __min1 = 4096UL; __min2 = buflen; len = __min1 < __min2 ? __min1 : __min2; newpage = alloc_pages(208U, 0U); if ((unsigned long )newpage == (unsigned long )((struct page *)0)) { goto unwind; } else { } __len = len; tmp___0 = lowmem_page_address((struct page const *)newpage); __ret = memcpy(tmp___0, buf, __len); buf = buf + len; buflen = buflen - len; tmp___1 = pages; pages = pages + 1; *tmp___1 = newpage; rc = rc + 1; if (buflen != 0UL) { goto ldv_52279; } else { } return (rc); unwind: ; goto ldv_52282; ldv_52281: __free_pages(*(spages + ((unsigned long )rc + 0xffffffffffffffffUL)), 0U); rc = rc - 1; ldv_52282: ; if (rc > 0) { goto ldv_52281; } else { } return (-12); } } static void nfs4_set_cached_acl(struct inode *inode , struct nfs4_cached_acl *acl ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; { tmp = NFS_I((struct inode const *)inode); nfsi = tmp; spin_lock(& inode->i_lock); kfree((void const *)nfsi->nfs4_acl); nfsi->nfs4_acl = acl; spin_unlock(& inode->i_lock); return; } } static void nfs4_zap_acl_attr(struct inode *inode ) { { nfs4_set_cached_acl(inode, 0); return; } } __inline static ssize_t nfs4_read_cached_acl(struct inode *inode , char *buf , size_t buflen ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs4_cached_acl *acl ; int ret ; size_t __len ; void *__ret ; { tmp = NFS_I((struct inode const *)inode); nfsi = tmp; ret = -2; spin_lock(& inode->i_lock); acl = nfsi->nfs4_acl; if ((unsigned long )acl == (unsigned long )((struct nfs4_cached_acl *)0)) { goto out; } else { } if ((unsigned long )buf == (unsigned long )((char *)0)) { goto out_len; } else { } if (acl->cached == 0) { goto out; } else { } ret = -34; if (acl->len > buflen) { goto out; } else { } __len = acl->len; __ret = memcpy((void *)buf, (void const *)(& acl->data), __len); out_len: ret = (int )acl->len; out: spin_unlock(& inode->i_lock); return ((ssize_t )ret); } } static void nfs4_write_cached_acl(struct inode *inode , struct page **pages , size_t pgbase , size_t acl_len ) { struct nfs4_cached_acl *acl ; size_t buflen ; void *tmp ; void *tmp___0 ; { buflen = acl_len + 16UL; if (buflen <= 4096UL) { tmp = kmalloc(buflen, 208U); acl = (struct nfs4_cached_acl *)tmp; if ((unsigned long )acl == (unsigned long )((struct nfs4_cached_acl *)0)) { goto out; } else { } acl->cached = 1; _copy_from_pages((char *)(& acl->data), pages, pgbase, acl_len); } else { tmp___0 = kmalloc(16UL, 208U); acl = (struct nfs4_cached_acl *)tmp___0; if ((unsigned long )acl == (unsigned long )((struct nfs4_cached_acl *)0)) { goto out; } else { } acl->cached = 0; } acl->len = acl_len; out: nfs4_set_cached_acl(inode, acl); return; } } static ssize_t __nfs4_get_acl_uncached(struct inode *inode , void *buf , size_t buflen ) { struct page *pages[16U] ; unsigned int tmp ; struct nfs_getaclargs args ; struct nfs_fh *tmp___0 ; struct nfs_getaclres res ; struct rpc_message msg ; unsigned int npages ; int ret ; int i ; long tmp___1 ; struct nfs_server *tmp___2 ; struct nfs_server *tmp___3 ; { pages[0] = 0; tmp = 1U; while (1) { if (tmp >= 16U) { break; } else { } pages[tmp] = 0; tmp = tmp + 1U; } tmp___0 = NFS_FH((struct inode const *)inode); args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = tmp___0; args.acl_len = buflen; args.acl_pgbase = 0U; args.acl_pages = (struct page **)(& pages); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.acl_len = buflen; res.acl_data_offset = 0UL; res.acl_flags = 0; res.acl_scratch = 0; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 32UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; npages = (unsigned int )((buflen + 4095UL) / 4096UL); ret = -12; if (npages == 0U) { npages = 1U; } else { } if (npages > 16U) { return (-34L); } else { } i = 0; goto ldv_52333; ldv_52332: pages[i] = alloc_pages(208U, 0U); if ((unsigned long )pages[i] == (unsigned long )((struct page *)0)) { goto out_free; } else { } i = i + 1; ldv_52333: ; if ((unsigned int )i < npages) { goto ldv_52332; } else { } res.acl_scratch = alloc_pages(208U, 0U); if ((unsigned long )res.acl_scratch == (unsigned long )((struct page *)0)) { goto out_free; } else { } args.acl_len = (unsigned long )npages * 4096UL; args.acl_pgbase = 0U; tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s buf %p buflen %zu npages %d args.acl_len %zu\n", "__nfs4_get_acl_uncached", buf, buflen, npages, args.acl_len); } else { } tmp___2 = NFS_SERVER((struct inode const *)inode); tmp___3 = NFS_SERVER((struct inode const *)inode); ret = nfs4_call_sync(tmp___3->client, tmp___2, & msg, & args.seq_args, & res.seq_res, 0); if (ret != 0) { goto out_free; } else { } if (res.acl_flags & 1) { if ((unsigned long )buf == (unsigned long )((void *)0)) { goto out_ok; } else { } ret = -34; goto out_free; } else { } nfs4_write_cached_acl(inode, (struct page **)(& pages), res.acl_data_offset, res.acl_len); if ((unsigned long )buf != (unsigned long )((void *)0)) { if (res.acl_len > buflen) { ret = -34; goto out_free; } else { } _copy_from_pages((char *)buf, (struct page **)(& pages), res.acl_data_offset, res.acl_len); } else { } out_ok: ret = (int )res.acl_len; out_free: i = 0; goto ldv_52338; ldv_52337: ; if ((unsigned long )pages[i] != (unsigned long )((struct page *)0)) { __free_pages(pages[i], 0U); } else { } i = i + 1; ldv_52338: ; if ((unsigned int )i < npages) { goto ldv_52337; } else { } if ((unsigned long )res.acl_scratch != (unsigned long )((struct page *)0)) { __free_pages(res.acl_scratch, 0U); } else { } return ((ssize_t )ret); } } static ssize_t nfs4_get_acl_uncached(struct inode *inode , void *buf , size_t buflen ) { struct nfs4_exception exception ; ssize_t ret ; struct nfs_server *tmp ; int tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52348: ret = __nfs4_get_acl_uncached(inode, buf, buflen); if (ret >= 0L) { goto ldv_52347; } else { } tmp = NFS_SERVER((struct inode const *)inode); tmp___0 = nfs4_handle_exception(tmp, (int )ret, & exception); ret = (ssize_t )tmp___0; if (exception.retry != 0) { goto ldv_52348; } else { } ldv_52347: ; return (ret); } } static ssize_t nfs4_proc_get_acl(struct inode *inode , void *buf , size_t buflen ) { struct nfs_server *server ; struct nfs_server *tmp ; int ret ; int tmp___0 ; struct nfs_inode *tmp___1 ; ssize_t tmp___2 ; ssize_t tmp___3 ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; tmp___0 = nfs4_server_supports_acls(server); if (tmp___0 == 0) { return (-95L); } else { } ret = nfs_revalidate_inode(server, inode); if (ret < 0) { return ((ssize_t )ret); } else { } tmp___1 = NFS_I((struct inode const *)inode); if ((tmp___1->cache_validity & 16UL) != 0UL) { nfs_zap_acl_cache(inode); } else { } tmp___2 = nfs4_read_cached_acl(inode, (char *)buf, buflen); ret = (int )tmp___2; if (ret != -2) { return ((ssize_t )ret); } else { } tmp___3 = nfs4_get_acl_uncached(inode, buf, buflen); return (tmp___3); } } static int __nfs4_proc_set_acl(struct inode *inode , void const *buf , size_t buflen ) { struct nfs_server *server ; struct nfs_server *tmp ; struct page *pages[16U] ; struct nfs_setaclargs arg ; struct nfs_fh *tmp___0 ; struct nfs_setaclres res ; struct rpc_message msg ; unsigned int npages ; int ret ; int i ; int tmp___1 ; struct nfs_inode *tmp___2 ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; tmp___0 = NFS_FH((struct inode const *)inode); arg.seq_args.sa_slot = 0; arg.seq_args.sa_cache_this = (unsigned char)0; arg.seq_args.sa_privileged = (unsigned char)0; arg.fh = tmp___0; arg.acl_len = buflen; arg.acl_pgbase = 0U; arg.acl_pages = (struct page **)(& pages); msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 33UL; msg.rpc_argp = (void *)(& arg); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; npages = (unsigned int )((buflen + 4095UL) / 4096UL); tmp___1 = nfs4_server_supports_acls(server); if (tmp___1 == 0) { return (-95); } else { } if (npages > 16U) { return (-34); } else { } i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, & arg.acl_pgbase); if (i < 0) { return (i); } else { } nfs4_inode_return_delegation(inode); ret = nfs4_call_sync(server->client, server, & msg, & arg.seq_args, & res.seq_res, 1); goto ldv_52372; ldv_52371: put_page(pages[i + -1]); i = i - 1; ldv_52372: ; if (i > 0) { goto ldv_52371; } else { } spin_lock(& inode->i_lock); tmp___2 = NFS_I((struct inode const *)inode); tmp___2->cache_validity = tmp___2->cache_validity | 1UL; spin_unlock(& inode->i_lock); nfs_access_zap_cache(inode); nfs_zap_acl_cache(inode); return (ret); } } static int nfs4_proc_set_acl(struct inode *inode , void const *buf , size_t buflen ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52381: tmp = __nfs4_proc_set_acl(inode, buf, buflen); tmp___0 = NFS_SERVER((struct inode const *)inode); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_52381; } else { } return (err); } } static int nfs4_async_handle_error(struct rpc_task *task , struct nfs_server const *server , struct nfs4_state *state ) { struct nfs_client *clp ; long tmp ; int tmp___0 ; { clp = server->nfs_client; if (task->tk_status >= 0) { return (0); } else { } switch (task->tk_status) { case -10087: ; case -10047: ; case -10025: ; if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { goto ldv_52392; } else { } nfs_remove_bad_delegation(state->inode); case -10038: ; if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { goto ldv_52392; } else { } nfs4_schedule_stateid_recovery(server, state); goto wait_on_recovery; case -10011: ; if ((unsigned long )state != (unsigned long )((struct nfs4_state *)0)) { nfs4_schedule_stateid_recovery(server, state); } else { } case -10023: ; case -10022: nfs4_schedule_lease_recovery(clp); goto wait_on_recovery; case -10052: ; case -10053: ; case -10077: ; case -10078: ; case -10055: ; case -10076: ; case -10063: tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s ERROR %d, Reset session\n", "nfs4_async_handle_error", task->tk_status); } else { } nfs4_schedule_session_recovery(clp->cl_session, task->tk_status); task->tk_status = 0; return (-11); case -10008: nfs_inc_server_stats(server, 24); case -10013: rpc_delay(task, 3750UL); task->tk_status = 0; return (-11); case -10068: ; case -10024: task->tk_status = 0; return (-11); } ldv_52392: task->tk_status = nfs4_map_errors(task->tk_status); return (0); wait_on_recovery: rpc_sleep_on(& clp->cl_rpcwaitq, task, 0); tmp___0 = constant_test_bit(0U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___0 == 0) { rpc_wake_up_queued_task(& clp->cl_rpcwaitq, task); } else { } task->tk_status = 0; return (-11); } } static void nfs4_init_boot_verifier(struct nfs_client const *clp , nfs4_verifier *bootverf ) { __be32 verf[2U] ; struct nfs_net *nn ; void *tmp ; int tmp___0 ; size_t __len ; void *__ret ; { tmp___0 = constant_test_bit(9U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___0 != 0) { verf[0] = 0U; verf[1] = 1000000001U; } else { tmp = net_generic((struct net const *)clp->cl_net, nfs_net_id); nn = (struct nfs_net *)tmp; verf[0] = (unsigned int )nn->boot_time.tv_sec; verf[1] = (unsigned int )nn->boot_time.tv_nsec; } __len = 8UL; if (__len > 63UL) { __ret = memcpy((void *)(& bootverf->data), (void const *)(& verf), __len); } else { __ret = memcpy((void *)(& bootverf->data), (void const *)(& verf), __len); } return; } } static unsigned int nfs4_init_nonuniform_client_string(struct nfs_client const *clp , char *buf , size_t len ) { unsigned int result ; char const *tmp ; char const *tmp___0 ; int tmp___1 ; { rcu_read_lock(); tmp = rpc_peeraddr2str(clp->cl_rpcclient, 2); tmp___0 = rpc_peeraddr2str(clp->cl_rpcclient, 0); tmp___1 = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", (char const *)(& clp->cl_ipaddr), tmp___0, tmp); result = (unsigned int )tmp___1; rcu_read_unlock(); return (result); } } static unsigned int nfs4_init_uniform_client_string(struct nfs_client const *clp , char *buf , size_t len ) { char *nodename ; int tmp ; { nodename = (char *)(& (clp->cl_rpcclient)->cl_nodename); if ((int )((signed char )nfs4_client_id_uniquifier[0]) != 0) { nodename = (char *)(& nfs4_client_id_uniquifier); } else { } tmp = scnprintf(buf, len, "Linux NFSv%u.%u %s", (clp->rpc_ops)->version, clp->cl_minorversion, nodename); return ((unsigned int )tmp); } } int nfs4_proc_setclientid(struct nfs_client *clp , u32 program , unsigned short port , struct rpc_cred *cred , struct nfs4_setclientid_res *res ) { nfs4_verifier sc_verifier ; struct nfs4_setclientid setclientid ; struct rpc_message msg ; int status ; int tmp ; char const *tmp___0 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; { setclientid.sc_verifier = (nfs4_verifier const *)(& sc_verifier); setclientid.sc_name_len = 0U; setclientid.sc_name[0] = (char)0; setclientid.sc_name[1] = (char)0; setclientid.sc_name[2] = (char)0; setclientid.sc_name[3] = (char)0; setclientid.sc_name[4] = (char)0; setclientid.sc_name[5] = (char)0; setclientid.sc_name[6] = (char)0; setclientid.sc_name[7] = (char)0; setclientid.sc_name[8] = (char)0; setclientid.sc_name[9] = (char)0; setclientid.sc_name[10] = (char)0; setclientid.sc_name[11] = (char)0; setclientid.sc_name[12] = (char)0; setclientid.sc_name[13] = (char)0; setclientid.sc_name[14] = (char)0; setclientid.sc_name[15] = (char)0; setclientid.sc_name[16] = (char)0; setclientid.sc_name[17] = (char)0; setclientid.sc_name[18] = (char)0; setclientid.sc_name[19] = (char)0; setclientid.sc_name[20] = (char)0; setclientid.sc_name[21] = (char)0; setclientid.sc_name[22] = (char)0; setclientid.sc_name[23] = (char)0; setclientid.sc_name[24] = (char)0; setclientid.sc_name[25] = (char)0; setclientid.sc_name[26] = (char)0; setclientid.sc_name[27] = (char)0; setclientid.sc_name[28] = (char)0; setclientid.sc_name[29] = (char)0; setclientid.sc_name[30] = (char)0; setclientid.sc_name[31] = (char)0; setclientid.sc_name[32] = (char)0; setclientid.sc_name[33] = (char)0; setclientid.sc_name[34] = (char)0; setclientid.sc_name[35] = (char)0; setclientid.sc_name[36] = (char)0; setclientid.sc_name[37] = (char)0; setclientid.sc_name[38] = (char)0; setclientid.sc_name[39] = (char)0; setclientid.sc_name[40] = (char)0; setclientid.sc_name[41] = (char)0; setclientid.sc_name[42] = (char)0; setclientid.sc_name[43] = (char)0; setclientid.sc_name[44] = (char)0; setclientid.sc_name[45] = (char)0; setclientid.sc_name[46] = (char)0; setclientid.sc_name[47] = (char)0; setclientid.sc_name[48] = (char)0; setclientid.sc_name[49] = (char)0; setclientid.sc_name[50] = (char)0; setclientid.sc_name[51] = (char)0; setclientid.sc_name[52] = (char)0; setclientid.sc_name[53] = (char)0; setclientid.sc_name[54] = (char)0; setclientid.sc_name[55] = (char)0; setclientid.sc_name[56] = (char)0; setclientid.sc_name[57] = (char)0; setclientid.sc_name[58] = (char)0; setclientid.sc_name[59] = (char)0; setclientid.sc_name[60] = (char)0; setclientid.sc_name[61] = (char)0; setclientid.sc_name[62] = (char)0; setclientid.sc_name[63] = (char)0; setclientid.sc_name[64] = (char)0; setclientid.sc_name[65] = (char)0; setclientid.sc_name[66] = (char)0; setclientid.sc_name[67] = (char)0; setclientid.sc_name[68] = (char)0; setclientid.sc_name[69] = (char)0; setclientid.sc_name[70] = (char)0; setclientid.sc_name[71] = (char)0; setclientid.sc_name[72] = (char)0; setclientid.sc_name[73] = (char)0; setclientid.sc_name[74] = (char)0; setclientid.sc_name[75] = (char)0; setclientid.sc_name[76] = (char)0; setclientid.sc_name[77] = (char)0; setclientid.sc_name[78] = (char)0; setclientid.sc_name[79] = (char)0; setclientid.sc_name[80] = (char)0; setclientid.sc_name[81] = (char)0; setclientid.sc_name[82] = (char)0; setclientid.sc_name[83] = (char)0; setclientid.sc_name[84] = (char)0; setclientid.sc_name[85] = (char)0; setclientid.sc_name[86] = (char)0; setclientid.sc_name[87] = (char)0; setclientid.sc_name[88] = (char)0; setclientid.sc_name[89] = (char)0; setclientid.sc_name[90] = (char)0; setclientid.sc_name[91] = (char)0; setclientid.sc_name[92] = (char)0; setclientid.sc_name[93] = (char)0; setclientid.sc_name[94] = (char)0; setclientid.sc_name[95] = (char)0; setclientid.sc_name[96] = (char)0; setclientid.sc_name[97] = (char)0; setclientid.sc_name[98] = (char)0; setclientid.sc_name[99] = (char)0; setclientid.sc_name[100] = (char)0; setclientid.sc_name[101] = (char)0; setclientid.sc_name[102] = (char)0; setclientid.sc_name[103] = (char)0; setclientid.sc_name[104] = (char)0; setclientid.sc_name[105] = (char)0; setclientid.sc_name[106] = (char)0; setclientid.sc_name[107] = (char)0; setclientid.sc_name[108] = (char)0; setclientid.sc_name[109] = (char)0; setclientid.sc_name[110] = (char)0; setclientid.sc_name[111] = (char)0; setclientid.sc_name[112] = (char)0; setclientid.sc_name[113] = (char)0; setclientid.sc_name[114] = (char)0; setclientid.sc_name[115] = (char)0; setclientid.sc_name[116] = (char)0; setclientid.sc_name[117] = (char)0; setclientid.sc_name[118] = (char)0; setclientid.sc_name[119] = (char)0; setclientid.sc_name[120] = (char)0; setclientid.sc_name[121] = (char)0; setclientid.sc_name[122] = (char)0; setclientid.sc_name[123] = (char)0; setclientid.sc_name[124] = (char)0; setclientid.sc_name[125] = (char)0; setclientid.sc_name[126] = (char)0; setclientid.sc_name[127] = (char)0; setclientid.sc_prog = program; setclientid.sc_netid_len = 0U; setclientid.sc_netid[0] = (char)0; setclientid.sc_netid[1] = (char)0; setclientid.sc_netid[2] = (char)0; setclientid.sc_netid[3] = (char)0; setclientid.sc_netid[4] = (char)0; setclientid.sc_uaddr_len = 0U; setclientid.sc_uaddr[0] = (char)0; setclientid.sc_uaddr[1] = (char)0; setclientid.sc_uaddr[2] = (char)0; setclientid.sc_uaddr[3] = (char)0; setclientid.sc_uaddr[4] = (char)0; setclientid.sc_uaddr[5] = (char)0; setclientid.sc_uaddr[6] = (char)0; setclientid.sc_uaddr[7] = (char)0; setclientid.sc_uaddr[8] = (char)0; setclientid.sc_uaddr[9] = (char)0; setclientid.sc_uaddr[10] = (char)0; setclientid.sc_uaddr[11] = (char)0; setclientid.sc_uaddr[12] = (char)0; setclientid.sc_uaddr[13] = (char)0; setclientid.sc_uaddr[14] = (char)0; setclientid.sc_uaddr[15] = (char)0; setclientid.sc_uaddr[16] = (char)0; setclientid.sc_uaddr[17] = (char)0; setclientid.sc_uaddr[18] = (char)0; setclientid.sc_uaddr[19] = (char)0; setclientid.sc_uaddr[20] = (char)0; setclientid.sc_uaddr[21] = (char)0; setclientid.sc_uaddr[22] = (char)0; setclientid.sc_uaddr[23] = (char)0; setclientid.sc_uaddr[24] = (char)0; setclientid.sc_uaddr[25] = (char)0; setclientid.sc_uaddr[26] = (char)0; setclientid.sc_uaddr[27] = (char)0; setclientid.sc_uaddr[28] = (char)0; setclientid.sc_uaddr[29] = (char)0; setclientid.sc_uaddr[30] = (char)0; setclientid.sc_uaddr[31] = (char)0; setclientid.sc_uaddr[32] = (char)0; setclientid.sc_uaddr[33] = (char)0; setclientid.sc_uaddr[34] = (char)0; setclientid.sc_uaddr[35] = (char)0; setclientid.sc_uaddr[36] = (char)0; setclientid.sc_uaddr[37] = (char)0; setclientid.sc_uaddr[38] = (char)0; setclientid.sc_uaddr[39] = (char)0; setclientid.sc_uaddr[40] = (char)0; setclientid.sc_uaddr[41] = (char)0; setclientid.sc_uaddr[42] = (char)0; setclientid.sc_uaddr[43] = (char)0; setclientid.sc_uaddr[44] = (char)0; setclientid.sc_uaddr[45] = (char)0; setclientid.sc_uaddr[46] = (char)0; setclientid.sc_uaddr[47] = (char)0; setclientid.sc_uaddr[48] = (char)0; setclientid.sc_uaddr[49] = (char)0; setclientid.sc_uaddr[50] = (char)0; setclientid.sc_uaddr[51] = (char)0; setclientid.sc_uaddr[52] = (char)0; setclientid.sc_uaddr[53] = (char)0; setclientid.sc_uaddr[54] = (char)0; setclientid.sc_uaddr[55] = (char)0; setclientid.sc_uaddr[56] = (char)0; setclientid.sc_uaddr[57] = (char)0; setclientid.sc_cb_ident = clp->cl_cb_ident; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 12UL; msg.rpc_argp = (void *)(& setclientid); msg.rpc_resp = (void *)res; msg.rpc_cred = cred; nfs4_init_boot_verifier((struct nfs_client const *)clp, & sc_verifier); tmp = constant_test_bit(2U, (unsigned long const volatile *)(& clp->cl_flags)); if (tmp != 0) { setclientid.sc_name_len = nfs4_init_uniform_client_string((struct nfs_client const *)clp, (char *)(& setclientid.sc_name), 128UL); } else { setclientid.sc_name_len = nfs4_init_nonuniform_client_string((struct nfs_client const *)clp, (char *)(& setclientid.sc_name), 128UL); } rcu_read_lock(); tmp___0 = rpc_peeraddr2str(clp->cl_rpcclient, 5); tmp___1 = scnprintf((char *)(& setclientid.sc_netid), 5UL, tmp___0); setclientid.sc_netid_len = (unsigned int )tmp___1; rcu_read_unlock(); tmp___2 = scnprintf((char *)(& setclientid.sc_uaddr), 58UL, "%s.%u.%u", (char *)(& clp->cl_ipaddr), (int )port >> 8, (int )port & 255); setclientid.sc_uaddr_len = (unsigned int )tmp___2; tmp___3 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001dNFS call setclientid auth=%s, \'%.*s\'\n", (((clp->cl_rpcclient)->cl_auth)->au_ops)->au_name, setclientid.sc_name_len, (char *)(& setclientid.sc_name)); } else { } status = rpc_call_sync(clp->cl_rpcclient, (struct rpc_message const *)(& msg), 4096); tmp___4 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001dNFS reply setclientid: %d\n", status); } else { } return (status); } } int nfs4_proc_setclientid_confirm(struct nfs_client *clp , struct nfs4_setclientid_res *arg , struct rpc_cred *cred ) { struct nfs_fsinfo fsinfo ; struct rpc_message msg ; unsigned long now ; int status ; long tmp ; long tmp___0 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 13UL; msg.rpc_argp = (void *)arg; msg.rpc_resp = (void *)(& fsinfo); msg.rpc_cred = cred; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS call setclientid_confirm auth=%s, (client ID %llx)\n", (((clp->cl_rpcclient)->cl_auth)->au_ops)->au_name, clp->cl_clientid); } else { } now = jiffies; status = rpc_call_sync(clp->cl_rpcclient, (struct rpc_message const *)(& msg), 4096); if (status == 0) { spin_lock(& clp->cl_lock); clp->cl_lease_time = (unsigned long )(fsinfo.lease_time * 250U); clp->cl_last_renewal = now; spin_unlock(& clp->cl_lock); } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS reply setclientid_confirm: %d\n", status); } else { } return (status); } } static void nfs4_delegreturn_done(struct rpc_task *task , void *calldata ) { struct nfs4_delegreturndata *data ; int tmp ; int tmp___0 ; { data = (struct nfs4_delegreturndata *)calldata; tmp = nfs4_sequence_done(task, & data->res.seq_res); if (tmp == 0) { return; } else { } switch (task->tk_status) { case -10023: ; case -10011: ; case 0: renew_lease(data->res.server, data->timestamp); goto ldv_52467; default: tmp___0 = nfs4_async_handle_error(task, data->res.server, 0); if (tmp___0 == -11) { rpc_restart_call_prepare(task); return; } else { } } ldv_52467: data->rpc_status = task->tk_status; return; } } static void nfs4_delegreturn_release(void *calldata ) { { kfree((void const *)calldata); return; } } static void nfs4_delegreturn_prepare(struct rpc_task *task , void *data ) { struct nfs4_delegreturndata *d_data ; { d_data = (struct nfs4_delegreturndata *)data; nfs4_setup_sequence(d_data->res.server, & d_data->args.seq_args, & d_data->res.seq_res, task); return; } } static struct rpc_call_ops const nfs4_delegreturn_ops = {& nfs4_delegreturn_prepare, & nfs4_delegreturn_done, 0, & nfs4_delegreturn_release}; static int _nfs4_proc_delegreturn(struct inode *inode , struct rpc_cred *cred , nfs4_stateid const *stateid , int issync ) { struct nfs4_delegreturndata *data ; struct nfs_server *server ; struct nfs_server *tmp ; struct rpc_task *task ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; int status ; void *tmp___0 ; struct nfs_fh *tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 31UL; msg.rpc_argp = 0; msg.rpc_resp = 0; msg.rpc_cred = cred; task_setup_data.task = 0; task_setup_data.rpc_client = server->client; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_delegreturn_ops; task_setup_data.callback_data = 0; task_setup_data.workqueue = 0; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; status = 0; tmp___0 = kzalloc(472UL, 80U); data = (struct nfs4_delegreturndata *)tmp___0; if ((unsigned long )data == (unsigned long )((struct nfs4_delegreturndata *)0)) { return (-12); } else { } nfs41_init_sequence(& data->args.seq_args, & data->res.seq_res, 1); data->args.fhandle = (struct nfs_fh const *)(& data->fh); data->args.stateid = (nfs4_stateid const *)(& data->stateid); data->args.bitmask = (u32 const *)(& server->cache_consistency_bitmask); tmp___1 = NFS_FH((struct inode const *)inode); nfs_copy_fh(& data->fh, (struct nfs_fh const *)tmp___1); nfs4_stateid_copy(& data->stateid, stateid); data->res.fattr = & data->fattr; data->res.server = (struct nfs_server const *)server; nfs_fattr_init(data->res.fattr); data->timestamp = jiffies; data->rpc_status = 0; task_setup_data.callback_data = (void *)data; msg.rpc_argp = (void *)(& data->args); msg.rpc_resp = (void *)(& data->res); task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___3 = IS_ERR((void const *)task); if (tmp___3 != 0L) { tmp___2 = PTR_ERR((void const *)task); return ((int )tmp___2); } else { } if (issync == 0) { goto out; } else { } status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) { goto out; } else { } status = data->rpc_status; if (status == 0) { nfs_post_op_update_inode_force_wcc(inode, & data->fattr); } else { nfs_refresh_inode(inode, & data->fattr); } out: rpc_put_task(task); return (status); } } int nfs4_proc_delegreturn(struct inode *inode , struct rpc_cred *cred , nfs4_stateid const *stateid , int issync ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_exception exception ; int err ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52503: err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); switch (err) { case -10023: ; case -10011: ; case 0: ; return (0); } err = nfs4_handle_exception(server, err, & exception); if (exception.retry != 0) { goto ldv_52503; } else { } return (err); } } static unsigned long nfs4_set_lock_task_retry(unsigned long timeout ) { long __retval ; { freezer_do_not_count(); __retval = schedule_timeout_killable((long )timeout); freezer_count(); timeout = timeout << 1; if (timeout > 7500UL) { return (7500UL); } else { } return (timeout); } } static int _nfs4_proc_getlk(struct nfs4_state *state , int cmd , struct file_lock *request ) { struct inode *inode ; struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_client *clp ; struct nfs_lockt_args arg ; struct nfs_fh *tmp___0 ; struct nfs_lockt_res res ; struct rpc_message msg ; struct nfs4_lock_state *lsp ; int status ; { inode = state->inode; tmp = NFS_SERVER((struct inode const *)inode); server = tmp; clp = server->nfs_client; tmp___0 = NFS_FH((struct inode const *)inode); arg.seq_args.sa_slot = 0; arg.seq_args.sa_cache_this = (unsigned char)0; arg.seq_args.sa_privileged = (unsigned char)0; arg.fh = tmp___0; arg.fl = request; arg.lock_owner.clientid = 0ULL; arg.lock_owner.id = 0ULL; arg.lock_owner.s_dev = 0U; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.denied = request; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 15UL; msg.rpc_argp = (void *)(& arg); msg.rpc_resp = (void *)(& res); msg.rpc_cred = (state->owner)->so_cred; arg.lock_owner.clientid = clp->cl_clientid; status = nfs4_set_lock_state(state, request); if (status != 0) { goto out; } else { } lsp = request->fl_u.nfs4_fl.owner; arg.lock_owner.id = (__u64 )lsp->ls_seqid.owner_id; arg.lock_owner.s_dev = server->s_dev; status = nfs4_call_sync(server->client, server, & msg, & arg.seq_args, & res.seq_res, 1); switch (status) { case 0: request->fl_type = 2U; goto ldv_52525; case -10010: status = 0; } ldv_52525: (*((request->fl_ops)->fl_release_private))(request); out: ; return (status); } } static int nfs4_proc_getlk(struct nfs4_state *state , int cmd , struct file_lock *request ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52534: tmp = _nfs4_proc_getlk(state, cmd, request); tmp___0 = NFS_SERVER((struct inode const *)state->inode); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_52534; } else { } return (err); } } static int do_vfs_lock(struct file *file , struct file_lock *fl ) { int res ; { res = 0; switch (fl->fl_flags & 3U) { case 1U: res = posix_lock_file_wait(file, fl); goto ldv_52542; case 2U: res = flock_lock_file_wait(file, fl); goto ldv_52542; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4proc.c.prepared"), "i" (4408), "i" (12UL)); ldv_52545: ; goto ldv_52545; } ldv_52542: ; return (res); } } static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl , struct nfs_open_context *ctx , struct nfs4_lock_state *lsp , struct nfs_seqid *seqid ) { struct nfs4_unlockdata *p ; struct inode *inode ; void *tmp ; size_t __len ; void *__ret ; struct nfs_server *tmp___0 ; { inode = (lsp->ls_state)->inode; tmp = kzalloc(392UL, 80U); p = (struct nfs4_unlockdata *)tmp; if ((unsigned long )p == (unsigned long )((struct nfs4_unlockdata *)0)) { return (0); } else { } p->arg.fh = NFS_FH((struct inode const *)inode); p->arg.fl = & p->fl; p->arg.seqid = seqid; p->res.seqid = seqid; p->arg.stateid = & lsp->ls_stateid; p->lsp = lsp; atomic_inc(& lsp->ls_count); p->ctx = get_nfs_open_context(ctx); __len = 256UL; if (__len > 63UL) { __ret = memcpy((void *)(& p->fl), (void const *)fl, __len); } else { __ret = memcpy((void *)(& p->fl), (void const *)fl, __len); } tmp___0 = NFS_SERVER((struct inode const *)inode); p->server = (struct nfs_server const *)tmp___0; return (p); } } static void nfs4_locku_release_calldata(void *data ) { struct nfs4_unlockdata *calldata ; { calldata = (struct nfs4_unlockdata *)data; nfs_free_seqid(calldata->arg.seqid); nfs4_put_lock_state(calldata->lsp); put_nfs_open_context(calldata->ctx); kfree((void const *)calldata); return; } } static void nfs4_locku_done(struct rpc_task *task , void *data ) { struct nfs4_unlockdata *calldata ; int tmp ; int tmp___0 ; { calldata = (struct nfs4_unlockdata *)data; tmp = nfs4_sequence_done(task, & calldata->res.seq_res); if (tmp == 0) { return; } else { } switch (task->tk_status) { case 0: nfs4_stateid_copy(& (calldata->lsp)->ls_stateid, (nfs4_stateid const *)(& calldata->res.stateid)); renew_lease(calldata->server, calldata->timestamp); goto ldv_52575; case -10025: ; case -10024: ; case -10023: ; case -10011: ; goto ldv_52575; default: tmp___0 = nfs4_async_handle_error(task, calldata->server, 0); if (tmp___0 == -11) { rpc_restart_call_prepare(task); } else { } } ldv_52575: nfs_release_seqid(calldata->arg.seqid); return; } } static void nfs4_locku_prepare(struct rpc_task *task , void *data ) { struct nfs4_unlockdata *calldata ; int tmp ; int tmp___0 ; int tmp___1 ; { calldata = (struct nfs4_unlockdata *)data; tmp = nfs_wait_on_sequence(calldata->arg.seqid, task); if (tmp != 0) { return; } else { } tmp___0 = constant_test_bit(0U, (unsigned long const volatile *)(& (calldata->lsp)->ls_flags)); if (tmp___0 == 0) { task->tk_action = 0; nfs4_sequence_done(task, & calldata->res.seq_res); return; } else { } calldata->timestamp = jiffies; tmp___1 = nfs4_setup_sequence(calldata->server, & calldata->arg.seq_args, & calldata->res.seq_res, task); if (tmp___1 != 0) { nfs_release_seqid(calldata->arg.seqid); } else { } return; } } static struct rpc_call_ops const nfs4_locku_ops = {& nfs4_locku_prepare, & nfs4_locku_done, 0, & nfs4_locku_release_calldata}; static struct rpc_task *nfs4_do_unlck(struct file_lock *fl , struct nfs_open_context *ctx , struct nfs4_lock_state *lsp , struct nfs_seqid *seqid ) { struct nfs4_unlockdata *data ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; struct rpc_clnt *tmp ; void *tmp___0 ; struct rpc_task *tmp___1 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 16UL; msg.rpc_argp = 0; msg.rpc_resp = 0; msg.rpc_cred = ctx->cred; tmp = NFS_CLIENT((struct inode const *)(lsp->ls_state)->inode); task_setup_data.task = 0; task_setup_data.rpc_client = tmp; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_locku_ops; task_setup_data.callback_data = 0; task_setup_data.workqueue = nfsiod_workqueue; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; fl->fl_type = 2U; data = nfs4_alloc_unlockdata(fl, ctx, lsp, seqid); if ((unsigned long )data == (unsigned long )((struct nfs4_unlockdata *)0)) { nfs_free_seqid(seqid); tmp___0 = ERR_PTR(-12L); return ((struct rpc_task *)tmp___0); } else { } nfs41_init_sequence(& data->arg.seq_args, & data->res.seq_res, 1); msg.rpc_argp = (void *)(& data->arg); msg.rpc_resp = (void *)(& data->res); task_setup_data.callback_data = (void *)data; tmp___1 = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); return (tmp___1); } } static int nfs4_proc_unlck(struct nfs4_state *state , int cmd , struct file_lock *request ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs_seqid *seqid ; struct nfs4_lock_state *lsp ; struct rpc_task *task ; int status ; unsigned char fl_flags ; int tmp___0 ; int tmp___1 ; struct nfs_open_context *tmp___2 ; long tmp___3 ; long tmp___4 ; { tmp = NFS_I((struct inode const *)state->inode); nfsi = tmp; status = 0; fl_flags = (unsigned char )request->fl_flags; status = nfs4_set_lock_state(state, request); request->fl_flags = request->fl_flags | 16U; down_read(& nfsi->rwsem); tmp___0 = do_vfs_lock(request->fl_file, request); if (tmp___0 == -2) { up_read(& nfsi->rwsem); goto out; } else { } up_read(& nfsi->rwsem); if (status != 0) { goto out; } else { } tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp___1 != 0) { goto out; } else { } lsp = request->fl_u.nfs4_fl.owner; seqid = nfs_alloc_seqid(& lsp->ls_seqid, 208U); status = -12; if ((unsigned long )seqid == (unsigned long )((struct nfs_seqid *)0)) { goto out; } else { } tmp___2 = nfs_file_open_context(request->fl_file); task = nfs4_do_unlck(request, tmp___2, lsp, seqid); tmp___3 = PTR_ERR((void const *)task); status = (int )tmp___3; tmp___4 = IS_ERR((void const *)task); if (tmp___4 != 0L) { goto out; } else { } status = nfs4_wait_for_completion_rpc_task(task); rpc_put_task(task); out: request->fl_flags = (unsigned int )fl_flags; return (status); } } static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl , struct nfs_open_context *ctx , struct nfs4_lock_state *lsp , gfp_t gfp_mask ) { struct nfs4_lockdata *p ; struct inode *inode ; struct nfs_server *server ; struct nfs_server *tmp ; void *tmp___0 ; size_t __len ; void *__ret ; { inode = (lsp->ls_state)->inode; tmp = NFS_SERVER((struct inode const *)inode); server = tmp; tmp___0 = kzalloc(456UL, gfp_mask); p = (struct nfs4_lockdata *)tmp___0; if ((unsigned long )p == (unsigned long )((struct nfs4_lockdata *)0)) { return (0); } else { } p->arg.fh = NFS_FH((struct inode const *)inode); p->arg.fl = & p->fl; p->arg.open_seqid = nfs_alloc_seqid(& ((lsp->ls_state)->owner)->so_seqid, gfp_mask); if ((unsigned long )p->arg.open_seqid == (unsigned long )((struct nfs_seqid *)0)) { goto out_free; } else { } p->arg.lock_seqid = nfs_alloc_seqid(& lsp->ls_seqid, gfp_mask); if ((unsigned long )p->arg.lock_seqid == (unsigned long )((struct nfs_seqid *)0)) { goto out_free_seqid; } else { } p->arg.lock_stateid = & lsp->ls_stateid; p->arg.lock_owner.clientid = (server->nfs_client)->cl_clientid; p->arg.lock_owner.id = (__u64 )lsp->ls_seqid.owner_id; p->arg.lock_owner.s_dev = server->s_dev; p->res.lock_seqid = p->arg.lock_seqid; p->lsp = lsp; p->server = server; atomic_inc(& lsp->ls_count); p->ctx = get_nfs_open_context(ctx); __len = 256UL; if (__len > 63UL) { __ret = memcpy((void *)(& p->fl), (void const *)fl, __len); } else { __ret = memcpy((void *)(& p->fl), (void const *)fl, __len); } return (p); out_free_seqid: nfs_free_seqid(p->arg.open_seqid); out_free: kfree((void const *)p); return (0); } } static void nfs4_lock_prepare(struct rpc_task *task , void *calldata ) { struct nfs4_lockdata *data ; struct nfs4_state *state ; long tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; { data = (struct nfs4_lockdata *)calldata; state = (data->lsp)->ls_state; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: begin!\n", "nfs4_lock_prepare"); } else { } tmp___0 = nfs_wait_on_sequence(data->arg.lock_seqid, task); if (tmp___0 != 0) { return; } else { } if ((((data->arg.lock_seqid)->sequence)->flags & 1) == 0) { tmp___1 = nfs_wait_on_sequence(data->arg.open_seqid, task); if (tmp___1 != 0) { goto out_release_lock_seqid; } else { } data->arg.open_stateid = & state->stateid; data->arg.new_lock_owner = 1U; data->res.open_seqid = data->arg.open_seqid; } else { data->arg.new_lock_owner = 0U; } data->timestamp = jiffies; tmp___2 = nfs4_setup_sequence((struct nfs_server const *)data->server, & data->arg.seq_args, & data->res.seq_res, task); if (tmp___2 == 0) { return; } else { } nfs_release_seqid(data->arg.open_seqid); out_release_lock_seqid: nfs_release_seqid(data->arg.lock_seqid); tmp___3 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: done!, ret = %d\n", "nfs4_lock_prepare", data->rpc_status); } else { } return; } } static void nfs4_lock_done(struct rpc_task *task , void *calldata ) { struct nfs4_lockdata *data ; long tmp ; int tmp___0 ; struct nfs_server *tmp___1 ; long tmp___2 ; { data = (struct nfs4_lockdata *)calldata; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: begin!\n", "nfs4_lock_done"); } else { } tmp___0 = nfs4_sequence_done(task, & data->res.seq_res); if (tmp___0 == 0) { return; } else { } data->rpc_status = task->tk_status; if ((unsigned int )*((unsigned char *)data + 88UL) != 0U) { if (data->rpc_status == 0) { nfs_confirm_seqid(& (data->lsp)->ls_seqid, 0); } else { goto out; } } else { } if (data->rpc_status == 0) { nfs4_stateid_copy(& (data->lsp)->ls_stateid, (nfs4_stateid const *)(& data->res.stateid)); set_bit(0U, (unsigned long volatile *)(& (data->lsp)->ls_flags)); tmp___1 = NFS_SERVER((struct inode const *)((data->ctx)->dentry)->d_inode); renew_lease((struct nfs_server const *)tmp___1, data->timestamp); } else { } out: tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: done, ret = %d!\n", "nfs4_lock_done", data->rpc_status); } else { } return; } } static void nfs4_lock_release(void *calldata ) { struct nfs4_lockdata *data ; long tmp ; struct rpc_task *task ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { data = (struct nfs4_lockdata *)calldata; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: begin!\n", "nfs4_lock_release"); } else { } nfs_free_seqid(data->arg.open_seqid); if (data->cancelled != 0) { task = nfs4_do_unlck(& data->fl, data->ctx, data->lsp, data->arg.lock_seqid); tmp___0 = IS_ERR((void const *)task); if (tmp___0 == 0L) { rpc_put_task_async(task); } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: cancelling lock!\n", "nfs4_lock_release"); } else { } } else { nfs_free_seqid(data->arg.lock_seqid); } nfs4_put_lock_state(data->lsp); put_nfs_open_context(data->ctx); kfree((void const *)data); tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: done!\n", "nfs4_lock_release"); } else { } return; } } static struct rpc_call_ops const nfs4_lock_ops = {& nfs4_lock_prepare, & nfs4_lock_done, 0, & nfs4_lock_release}; static void nfs4_handle_setlk_error(struct nfs_server *server , struct nfs4_lock_state *lsp , int new_lock_owner , int error ) { int tmp ; { switch (error) { case -10047: ; case -10025: lsp->ls_seqid.flags = lsp->ls_seqid.flags & -2; if (new_lock_owner != 0) { nfs4_schedule_stateid_recovery((struct nfs_server const *)server, lsp->ls_state); } else { tmp = constant_test_bit(0U, (unsigned long const volatile *)(& lsp->ls_flags)); if (tmp != 0) { nfs4_schedule_stateid_recovery((struct nfs_server const *)server, lsp->ls_state); } else { } } goto ldv_52662; case -10023: lsp->ls_seqid.flags = lsp->ls_seqid.flags & -2; case -10011: nfs4_schedule_lease_recovery(server->nfs_client); } ldv_52662: ; return; } } static int _nfs4_do_setlk(struct nfs4_state *state , int cmd , struct file_lock *fl , int recovery_type ) { struct nfs4_lockdata *data ; struct rpc_task *task ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; struct rpc_clnt *tmp ; int ret ; long tmp___0 ; struct nfs_open_context *tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 14UL; msg.rpc_argp = 0; msg.rpc_resp = 0; msg.rpc_cred = (state->owner)->so_cred; tmp = NFS_CLIENT((struct inode const *)state->inode); task_setup_data.task = 0; task_setup_data.rpc_client = tmp; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_lock_ops; task_setup_data.callback_data = 0; task_setup_data.workqueue = nfsiod_workqueue; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: begin!\n", "_nfs4_do_setlk"); } else { } tmp___1 = nfs_file_open_context(fl->fl_file); data = nfs4_alloc_lockdata(fl, tmp___1, fl->fl_u.nfs4_fl.owner, recovery_type == 0 ? 208U : 80U); if ((unsigned long )data == (unsigned long )((struct nfs4_lockdata *)0)) { return (-12); } else { } if (cmd == 7) { data->arg.block = 1U; } else { } nfs41_init_sequence(& data->arg.seq_args, & data->res.seq_res, 1); msg.rpc_argp = (void *)(& data->arg); msg.rpc_resp = (void *)(& data->res); task_setup_data.callback_data = (void *)data; if (recovery_type > 0) { if (recovery_type == 1) { data->arg.reclaim = 1U; } else { } nfs4_set_sequence_privileged(& data->arg.seq_args); } else { } task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___3 = IS_ERR((void const *)task); if (tmp___3 != 0L) { tmp___2 = PTR_ERR((void const *)task); return ((int )tmp___2); } else { } ret = nfs4_wait_for_completion_rpc_task(task); if (ret == 0) { ret = data->rpc_status; if (ret != 0) { nfs4_handle_setlk_error(data->server, data->lsp, (int )data->arg.new_lock_owner, ret); } else { } } else { data->cancelled = 1; } rpc_put_task(task); tmp___4 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s: done, ret = %d!\n", "_nfs4_do_setlk", ret); } else { } return (ret); } } static int nfs4_lock_reclaim(struct nfs4_state *state , struct file_lock *request ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_exception exception ; int err ; int tmp___0 ; { tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = state->inode; ldv_52685: tmp___0 = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp___0 != 0) { return (0); } else { } err = _nfs4_do_setlk(state, 6, request, 1); if (err != -10008) { goto ldv_52684; } else { } nfs4_handle_exception(server, err, & exception); if (exception.retry != 0) { goto ldv_52685; } else { } ldv_52684: ; return (err); } } static int nfs4_lock_expired(struct nfs4_state *state , struct file_lock *request ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_exception exception ; int err ; int tmp___0 ; { tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = state->inode; err = nfs4_set_lock_state(state, request); if (err != 0) { return (err); } else { } ldv_52697: tmp___0 = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp___0 != 0) { return (0); } else { } err = _nfs4_do_setlk(state, 6, request, 2); switch (err) { default: ; goto out; case -10013: ; case -10008: nfs4_handle_exception(server, err, & exception); err = 0; } if (exception.retry != 0) { goto ldv_52697; } else { } out: ; return (err); } } static int nfs41_check_expired_locks(struct nfs4_state *state ) { int status ; int ret ; struct nfs4_lock_state *lsp ; struct nfs_server *server ; struct nfs_server *tmp ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; { ret = -10025; tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; __mptr = (struct list_head const *)state->lock_states.next; lsp = (struct nfs4_lock_state *)__mptr; goto ldv_52711; ldv_52710: tmp___0 = constant_test_bit(0U, (unsigned long const volatile *)(& lsp->ls_flags)); if (tmp___0 != 0) { status = nfs41_test_stateid(server, & lsp->ls_stateid); if (status != 0) { if (status != -10025) { nfs41_free_stateid(server, & lsp->ls_stateid); } else { } clear_bit(0, (unsigned long volatile *)(& lsp->ls_flags)); ret = status; } else { } } else { } __mptr___0 = (struct list_head const *)lsp->ls_locks.next; lsp = (struct nfs4_lock_state *)__mptr___0; ldv_52711: ; if ((unsigned long )(& lsp->ls_locks) != (unsigned long )(& state->lock_states)) { goto ldv_52710; } else { } return (ret); } } static int nfs41_lock_expired(struct nfs4_state *state , struct file_lock *request ) { int status ; int tmp ; { status = 0; tmp = constant_test_bit(0U, (unsigned long const volatile *)(& state->flags)); if (tmp != 0) { status = nfs41_check_expired_locks(state); } else { } if (status != 0) { status = nfs4_lock_expired(state, request); } else { } return (status); } } static int _nfs4_proc_setlk(struct nfs4_state *state , int cmd , struct file_lock *request ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; unsigned char fl_flags ; int status ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = NFS_I((struct inode const *)state->inode); nfsi = tmp; fl_flags = (unsigned char )request->fl_flags; status = -37; if ((int )fl_flags & 1) { tmp___0 = constant_test_bit(7U, (unsigned long const volatile *)(& state->flags)); if (tmp___0 == 0) { goto out; } else { } } else { } status = nfs4_set_lock_state(state, request); if (status != 0) { goto out; } else { } request->fl_flags = request->fl_flags | 8U; status = do_vfs_lock(request->fl_file, request); if (status < 0) { goto out; } else { } down_read(& nfsi->rwsem); tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp___1 != 0) { request->fl_flags = (unsigned int )fl_flags & 4294967167U; status = do_vfs_lock(request->fl_file, request); goto out_unlock; } else { } status = _nfs4_do_setlk(state, cmd, request, 0); if (status != 0) { goto out_unlock; } else { } request->fl_flags = (unsigned int )fl_flags | 128U; tmp___2 = do_vfs_lock(request->fl_file, request); if (tmp___2 < 0) { printk("\fNFS: %s: VFS is out of sync with lock manager!\n", "_nfs4_proc_setlk"); } else { } out_unlock: up_read(& nfsi->rwsem); out: request->fl_flags = (unsigned int )fl_flags; return (status); } } static int nfs4_proc_setlk(struct nfs4_state *state , int cmd , struct file_lock *request ) { struct nfs4_exception exception ; int err ; struct nfs_server *tmp ; { exception.timeout = 0L; exception.retry = 0; exception.state = state; exception.inode = state->inode; ldv_52736: err = _nfs4_proc_setlk(state, cmd, request); if (err == -10010) { err = -11; } else { } tmp = NFS_SERVER((struct inode const *)state->inode); err = nfs4_handle_exception(tmp, err, & exception); if (exception.retry != 0) { goto ldv_52736; } else { } return (err); } } static int nfs4_proc_lock(struct file *filp , int cmd , struct file_lock *request ) { struct nfs_open_context *ctx ; struct nfs4_state *state ; unsigned long timeout ; int status ; int tmp ; int tmp___0 ; struct task_struct *tmp___1 ; int tmp___2 ; { timeout = 250UL; ctx = nfs_file_open_context(filp); state = ctx->state; if (request->fl_start < 0LL || request->fl_end < 0LL) { return (-22); } else { } if (cmd == 5) { if ((unsigned long )state != (unsigned long )((struct nfs4_state *)0)) { tmp = nfs4_proc_getlk(state, 5, request); return (tmp); } else { } return (0); } else { } if (cmd != 6 && cmd != 7) { return (-22); } else { } if ((unsigned int )request->fl_type == 2U) { if ((unsigned long )state != (unsigned long )((struct nfs4_state *)0)) { tmp___0 = nfs4_proc_unlck(state, cmd, request); return (tmp___0); } else { } return (0); } else { } if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { return (-37); } else { } switch ((int )request->fl_type) { case 0: ; if ((filp->f_mode & 1U) == 0U) { return (-9); } else { } goto ldv_52748; case 1: ; if ((filp->f_mode & 2U) == 0U) { return (-9); } else { } } ldv_52748: ; ldv_52751: status = nfs4_proc_setlk(state, cmd, request); if (status != -11 || cmd == 6) { goto ldv_52750; } else { } timeout = nfs4_set_lock_task_retry(timeout); status = -512; tmp___1 = get_current(); tmp___2 = signal_pending(tmp___1); if (tmp___2 != 0) { goto ldv_52750; } else { } if (status < 0) { goto ldv_52751; } else { } ldv_52750: ; return (status); } } int nfs4_lock_delegation_recall(struct nfs4_state *state , struct file_lock *fl ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_exception exception ; int err ; { tmp = NFS_SERVER((struct inode const *)state->inode); server = tmp; exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; err = nfs4_set_lock_state(state, fl); if (err != 0) { goto out; } else { } ldv_52781: err = _nfs4_do_setlk(state, 6, fl, 0); switch (err) { default: printk("\vNFS: %s: unhandled error %d.\n", "nfs4_lock_delegation_recall", err); case 0: ; case -116: ; goto out; case -10011: nfs4_schedule_stateid_recovery((struct nfs_server const *)server, state); case -10022: ; case -10023: nfs4_schedule_lease_recovery(server->nfs_client); goto out; case -10052: ; case -10053: ; case -10077: ; case -10055: ; case -10078: nfs4_schedule_session_recovery((server->nfs_client)->cl_session, err); goto out; case -512: ; case -10087: ; case -10047: ; case -10025: ; case -10038: nfs4_schedule_stateid_recovery((struct nfs_server const *)server, state); err = 0; goto out; case -12: ; case -10010: err = 0; goto out; case -10008: ; goto ldv_52780; } ldv_52780: err = nfs4_handle_exception(server, err, & exception); if (exception.retry != 0) { goto ldv_52781; } else { } out: ; return (err); } } static void nfs4_release_lockowner_release(void *calldata ) { struct nfs_release_lockowner_data *data ; { data = (struct nfs_release_lockowner_data *)calldata; nfs4_free_lock_state(data->server, data->lsp); kfree((void const *)calldata); return; } } static struct rpc_call_ops const nfs4_release_lockowner_ops = {0, 0, 0, & nfs4_release_lockowner_release}; int nfs4_release_lockowner(struct nfs4_lock_state *lsp ) { struct nfs_server *server ; struct nfs_release_lockowner_data *data ; struct rpc_message msg ; void *tmp ; { server = ((lsp->ls_state)->owner)->so_server; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 35UL; msg.rpc_argp = 0; msg.rpc_resp = 0; msg.rpc_cred = 0; if ((unsigned int )((server->nfs_client)->cl_mvops)->minor_version != 0U) { return (-22); } else { } tmp = kmalloc(40UL, 80U); data = (struct nfs_release_lockowner_data *)tmp; if ((unsigned long )data == (unsigned long )((struct nfs_release_lockowner_data *)0)) { return (-12); } else { } data->lsp = lsp; data->server = server; data->args.lock_owner.clientid = (server->nfs_client)->cl_clientid; data->args.lock_owner.id = (__u64 )lsp->ls_seqid.owner_id; data->args.lock_owner.s_dev = server->s_dev; msg.rpc_argp = (void *)(& data->args); rpc_call_async(server->client, (struct rpc_message const *)(& msg), 0, & nfs4_release_lockowner_ops, (void *)data); return (0); } } static int nfs4_xattr_set_nfs4_acl(struct dentry *dentry , char const *key , void const *buf , size_t buflen , int flags , int type ) { int tmp ; int tmp___0 ; { tmp = strcmp(key, ""); if (tmp != 0) { return (-22); } else { } tmp___0 = nfs4_proc_set_acl(dentry->d_inode, buf, buflen); return (tmp___0); } } static int nfs4_xattr_get_nfs4_acl(struct dentry *dentry , char const *key , void *buf , size_t buflen , int type ) { int tmp ; ssize_t tmp___0 ; { tmp = strcmp(key, ""); if (tmp != 0) { return (-22); } else { } tmp___0 = nfs4_proc_get_acl(dentry->d_inode, buf, buflen); return ((int )tmp___0); } } static size_t nfs4_xattr_list_nfs4_acl(struct dentry *dentry , char *list , size_t list_len , char const *name , size_t name_len , int type ) { size_t len ; struct nfs_server *tmp ; int tmp___0 ; size_t __len ; void *__ret ; { len = 16UL; tmp = NFS_SERVER((struct inode const *)dentry->d_inode); tmp___0 = nfs4_server_supports_acls(tmp); if (tmp___0 == 0) { return (0UL); } else { } if ((unsigned long )list != (unsigned long )((char *)0) && len <= list_len) { __len = len; __ret = memcpy((void *)list, (void const *)"system.nfs4_acl", __len); } else { } return (len); } } static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr ) { { if ((((fattr->valid & 4194304U) == 0U && (fattr->valid & 2048U) == 0U) || (fattr->valid & 1024U) == 0U) || (fattr->valid & 524288U) == 0U) { return; } else { } fattr->valid = fattr->valid | 1048583U; fattr->mode = 16749U; fattr->nlink = 2U; return; } } static int _nfs4_proc_fs_locations(struct rpc_clnt *client , struct inode *dir , struct qstr const *name , struct nfs4_fs_locations *fs_locations , struct page *page ) { struct nfs_server *server ; struct nfs_server *tmp ; u32 bitmask[2U] ; unsigned int tmp___0 ; struct nfs4_fs_locations_arg args ; struct nfs_fh *tmp___1 ; struct nfs4_fs_locations_res res ; struct rpc_message msg ; int status ; long tmp___2 ; struct nfs_server *tmp___3 ; long tmp___4 ; { tmp = NFS_SERVER((struct inode const *)dir); server = tmp; bitmask[0] = 16777472U; tmp___0 = 1U; while (1) { if (tmp___0 >= 2U) { break; } else { } bitmask[tmp___0] = 0U; tmp___0 = tmp___0 + 1U; } tmp___1 = NFS_FH((struct inode const *)dir); args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.dir_fh = (struct nfs_fh const *)tmp___1; args.name = name; args.page = page; args.bitmask = (u32 const *)(& bitmask); res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.fs_locations = fs_locations; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 34UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: start\n", "_nfs4_proc_fs_locations"); } else { } tmp___3 = NFS_SERVER((struct inode const *)dir); if (((unsigned long )tmp___3->attr_bitmask[1] & 8388608UL) != 0UL) { bitmask[1] = bitmask[1] | 8388608U; } else { bitmask[0] = bitmask[0] | 1048576U; } nfs_fattr_init(& fs_locations->fattr); fs_locations->server = (struct nfs_server const *)server; fs_locations->nlocations = 0; status = nfs4_call_sync(client, server, & msg, & args.seq_args, & res.seq_res, 0); tmp___4 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s: returned status = %d\n", "_nfs4_proc_fs_locations", status); } else { } return (status); } } int nfs4_proc_fs_locations(struct rpc_clnt *client , struct inode *dir , struct qstr const *name , struct nfs4_fs_locations *fs_locations , struct page *page ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52851: tmp = _nfs4_proc_fs_locations(client, dir, name, fs_locations, page); tmp___0 = NFS_SERVER((struct inode const *)dir); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_52851; } else { } return (err); } } static int _nfs4_proc_secinfo(struct inode *dir , struct qstr const *name , struct nfs4_secinfo_flavors *flavors ) { int status ; struct nfs4_secinfo_arg args ; struct nfs_fh *tmp ; struct nfs4_secinfo_res res ; struct rpc_message msg ; long tmp___0 ; struct nfs_server *tmp___1 ; struct nfs_server *tmp___2 ; long tmp___3 ; { tmp = NFS_FH((struct inode const *)dir); args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.dir_fh = (struct nfs_fh const *)tmp; args.name = name; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.flavors = flavors; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 36UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS call secinfo %s\n", name->name); } else { } tmp___1 = NFS_SERVER((struct inode const *)dir); tmp___2 = NFS_SERVER((struct inode const *)dir); status = nfs4_call_sync(tmp___2->client, tmp___1, & msg, & args.seq_args, & res.seq_res, 0); tmp___3 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001dNFS reply secinfo: %d\n", status); } else { } return (status); } } int nfs4_proc_secinfo(struct inode *dir , struct qstr const *name , struct nfs4_secinfo_flavors *flavors ) { struct nfs4_exception exception ; int err ; int tmp ; struct nfs_server *tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_52869: tmp = _nfs4_proc_secinfo(dir, name, flavors); tmp___0 = NFS_SERVER((struct inode const *)dir); err = nfs4_handle_exception(tmp___0, tmp, & exception); if (exception.retry != 0) { goto ldv_52869; } else { } return (err); } } static int nfs4_check_cl_exchange_flags(u32 flags ) { { if ((flags & 2147024636U) != 0U) { goto out_inval; } else { } if ((flags & 131072U) != 0U && (flags & 65536U) != 0U) { goto out_inval; } else { } if ((flags & 458752U) == 0U) { goto out_inval; } else { } return (0); out_inval: ; return (-22); } } static bool nfs41_same_server_scope(struct nfs41_server_scope *a , struct nfs41_server_scope *b ) { int tmp ; { if (a->server_scope_sz == b->server_scope_sz) { tmp = memcmp((void const *)(& a->server_scope), (void const *)(& b->server_scope), (size_t )a->server_scope_sz); if (tmp == 0) { return (1); } else { } } else { } return (0); } } int nfs4_proc_bind_conn_to_session(struct nfs_client *clp , struct rpc_cred *cred ) { int status ; struct nfs41_bind_conn_to_session_res res ; struct rpc_message msg ; long tmp ; void *tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 51UL; msg.rpc_argp = (void *)clp; msg.rpc_resp = (void *)(& res); msg.rpc_cred = cred; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_proc_bind_conn_to_session"); } else { } tmp___0 = kzalloc(1432UL, 80U); res.session = (struct nfs4_session *)tmp___0; tmp___1 = ldv__builtin_expect((unsigned long )res.session == (unsigned long )((struct nfs4_session *)0), 0L); if (tmp___1 != 0L) { status = -12; goto out; } else { } status = rpc_call_sync(clp->cl_rpcclient, (struct rpc_message const *)(& msg), 4096); if (status == 0) { tmp___3 = memcmp((void const *)(& (res.session)->sess_id.data), (void const *)(& (clp->cl_session)->sess_id.data), 16UL); if (tmp___3 != 0) { tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001dNFS: %s: Session ID mismatch\n", "nfs4_proc_bind_conn_to_session"); } else { } status = -5; goto out_session; } else { } if (res.dir != 3U) { tmp___4 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001dNFS: %s: Unexpected direction from server\n", "nfs4_proc_bind_conn_to_session"); } else { } status = -5; goto out_session; } else { } if ((int )res.use_conn_in_rdma_mode) { tmp___5 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001dNFS: %s: Server returned RDMA mode = true\n", "nfs4_proc_bind_conn_to_session"); } else { } status = -5; goto out_session; } else { } } else { } out_session: kfree((void const *)res.session); out: tmp___6 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___6 != 0L) { printk("\001d<-- %s status= %d\n", "nfs4_proc_bind_conn_to_session", status); } else { } return (status); } } int nfs4_proc_exchange_id(struct nfs_client *clp , struct rpc_cred *cred ) { nfs4_verifier verifier ; struct nfs41_exchange_id_args args ; struct nfs41_exchange_id_res res ; int status ; struct rpc_message msg ; long tmp ; void *tmp___0 ; long tmp___1 ; void *tmp___2 ; long tmp___3 ; void *tmp___4 ; long tmp___5 ; long tmp___6 ; bool tmp___7 ; int tmp___8 ; long tmp___9 ; long tmp___10 ; { args.client = clp; args.verifier = & verifier; args.id_len = 0U; args.id[0] = (char)0; args.id[1] = (char)0; args.id[2] = (char)0; args.id[3] = (char)0; args.id[4] = (char)0; args.id[5] = (char)0; args.id[6] = (char)0; args.id[7] = (char)0; args.id[8] = (char)0; args.id[9] = (char)0; args.id[10] = (char)0; args.id[11] = (char)0; args.id[12] = (char)0; args.id[13] = (char)0; args.id[14] = (char)0; args.id[15] = (char)0; args.id[16] = (char)0; args.id[17] = (char)0; args.id[18] = (char)0; args.id[19] = (char)0; args.id[20] = (char)0; args.id[21] = (char)0; args.id[22] = (char)0; args.id[23] = (char)0; args.id[24] = (char)0; args.id[25] = (char)0; args.id[26] = (char)0; args.id[27] = (char)0; args.id[28] = (char)0; args.id[29] = (char)0; args.id[30] = (char)0; args.id[31] = (char)0; args.id[32] = (char)0; args.id[33] = (char)0; args.id[34] = (char)0; args.id[35] = (char)0; args.id[36] = (char)0; args.id[37] = (char)0; args.id[38] = (char)0; args.id[39] = (char)0; args.id[40] = (char)0; args.id[41] = (char)0; args.id[42] = (char)0; args.id[43] = (char)0; args.id[44] = (char)0; args.id[45] = (char)0; args.id[46] = (char)0; args.id[47] = (char)0; args.flags = 1U; res.clientid = 0ULL; res.seqid = 0U; res.flags = 0U; res.server_owner = 0; res.server_scope = 0; res.impl_id = 0; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 37UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = cred; nfs4_init_boot_verifier((struct nfs_client const *)clp, & verifier); args.id_len = nfs4_init_uniform_client_string((struct nfs_client const *)clp, (char *)(& args.id), 48UL); tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS call exchange_id auth=%s, \'%.*s\'\n", (((clp->cl_rpcclient)->cl_auth)->au_ops)->au_name, args.id_len, (char *)(& args.id)); } else { } tmp___0 = kzalloc(1040UL, 80U); res.server_owner = (struct nfs41_server_owner *)tmp___0; tmp___1 = ldv__builtin_expect((unsigned long )res.server_owner == (unsigned long )((struct nfs41_server_owner *)0), 0L); if (tmp___1 != 0L) { status = -12; goto out; } else { } tmp___2 = kzalloc(1028UL, 80U); res.server_scope = (struct nfs41_server_scope *)tmp___2; tmp___3 = ldv__builtin_expect((unsigned long )res.server_scope == (unsigned long )((struct nfs41_server_scope *)0), 0L); if (tmp___3 != 0L) { status = -12; goto out_server_owner; } else { } tmp___4 = kzalloc(2072UL, 80U); res.impl_id = (struct nfs41_impl_id *)tmp___4; tmp___5 = ldv__builtin_expect((unsigned long )res.impl_id == (unsigned long )((struct nfs41_impl_id *)0), 0L); if (tmp___5 != 0L) { status = -12; goto out_server_scope; } else { } status = rpc_call_sync(clp->cl_rpcclient, (struct rpc_message const *)(& msg), 4096); if (status == 0) { status = nfs4_check_cl_exchange_flags(res.flags); } else { } if (status == 0) { clp->cl_clientid = res.clientid; clp->cl_exchange_flags = res.flags & 2147483647U; if ((int )res.flags >= 0) { clp->cl_seqid = res.seqid; } else { } kfree((void const *)clp->cl_serverowner); clp->cl_serverowner = res.server_owner; res.server_owner = 0; kfree((void const *)clp->cl_implid); clp->cl_implid = res.impl_id; if ((unsigned long )clp->cl_serverscope != (unsigned long )((struct nfs41_server_scope *)0)) { tmp___7 = nfs41_same_server_scope(clp->cl_serverscope, res.server_scope); if (tmp___7) { tmp___8 = 0; } else { tmp___8 = 1; } if (tmp___8) { tmp___6 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___6 != 0L) { printk("\001d%s: server_scope mismatch detected\n", "nfs4_proc_exchange_id"); } else { } set_bit(8U, (unsigned long volatile *)(& clp->cl_state)); kfree((void const *)clp->cl_serverscope); clp->cl_serverscope = 0; } else { } } else { } if ((unsigned long )clp->cl_serverscope == (unsigned long )((struct nfs41_server_scope *)0)) { clp->cl_serverscope = res.server_scope; goto out; } else { } } else { kfree((void const *)res.impl_id); } out_server_owner: kfree((void const *)res.server_owner); out_server_scope: kfree((void const *)res.server_scope); out: ; if ((unsigned long )clp->cl_implid != (unsigned long )((struct nfs41_impl_id *)0)) { tmp___9 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___9 != 0L) { printk("\001dNFS reply exchange_id: Server Implementation ID: domain: %s, name: %s, date: %llu,%u\n", (char *)(& (clp->cl_implid)->domain), (char *)(& (clp->cl_implid)->name), (clp->cl_implid)->date.seconds, (clp->cl_implid)->date.nseconds); } else { } } else { } tmp___10 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___10 != 0L) { printk("\001dNFS reply exchange_id: %d\n", status); } else { } return (status); } } static int _nfs4_proc_destroy_clientid(struct nfs_client *clp , struct rpc_cred *cred ) { struct rpc_message msg ; int status ; long tmp ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 52UL; msg.rpc_argp = (void *)clp; msg.rpc_resp = 0; msg.rpc_cred = cred; status = rpc_call_sync(clp->cl_rpcclient, (struct rpc_message const *)(& msg), 4096); if (status != 0) { tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS: Got error %d from the server %s on DESTROY_CLIENTID.", status, clp->cl_hostname); } else { } } else { } return (status); } } static int nfs4_proc_destroy_clientid(struct nfs_client *clp , struct rpc_cred *cred ) { unsigned int loop ; int ret ; { loop = 10U; goto ldv_52919; ldv_52918: ret = _nfs4_proc_destroy_clientid(clp, cred); switch (ret) { case -10008: ; case -10074: ssleep(1U); goto ldv_52916; default: ; return (ret); } ldv_52916: loop = loop - 1U; ldv_52919: ; if (loop != 0U) { goto ldv_52918; } else { } return (0); } } int nfs4_destroy_clientid(struct nfs_client *clp ) { struct rpc_cred *cred ; int ret ; { ret = 0; if ((unsigned int )(clp->cl_mvops)->minor_version == 0U) { goto out; } else { } if (clp->cl_exchange_flags == 0U) { goto out; } else { } if ((int )clp->cl_preserve_clid) { goto out; } else { } cred = nfs4_get_exchange_id_cred(clp); ret = nfs4_proc_destroy_clientid(clp, cred); if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { put_rpccred(cred); } else { } switch (ret) { case 0: ; case -10022: clp->cl_exchange_flags = 0U; } out: ; return (ret); } } static void nfs4_get_lease_time_prepare(struct rpc_task *task , void *calldata ) { struct nfs4_get_lease_time_data *data ; long tmp ; long tmp___0 ; { data = (struct nfs4_get_lease_time_data *)calldata; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_get_lease_time_prepare"); } else { } nfs41_setup_sequence((data->clp)->cl_session, & (data->args)->la_seq_args, & (data->res)->lr_seq_res, task); tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d<-- %s\n", "nfs4_get_lease_time_prepare"); } else { } return; } } static void nfs4_get_lease_time_done(struct rpc_task *task , void *calldata ) { struct nfs4_get_lease_time_data *data ; long tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; { data = (struct nfs4_get_lease_time_data *)calldata; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_get_lease_time_done"); } else { } tmp___0 = nfs41_sequence_done(task, & (data->res)->lr_seq_res); if (tmp___0 == 0) { return; } else { } switch (task->tk_status) { case -10008: ; case -10013: tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s Retry: tk_status %d\n", "nfs4_get_lease_time_done", task->tk_status); } else { } rpc_delay(task, 25UL); task->tk_status = 0; case -10068: rpc_restart_call_prepare(task); return; } tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- %s\n", "nfs4_get_lease_time_done"); } else { } return; } } static struct rpc_call_ops const nfs4_get_lease_time_ops = {& nfs4_get_lease_time_prepare, & nfs4_get_lease_time_done, 0, 0}; int nfs4_proc_get_lease_time(struct nfs_client *clp , struct nfs_fsinfo *fsinfo ) { struct rpc_task *task ; struct nfs4_get_lease_time_args args ; struct nfs4_get_lease_time_res res ; struct nfs4_get_lease_time_data data ; struct rpc_message msg ; struct rpc_task_setup task_setup ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { res.lr_seq_res.sr_slot = 0; res.lr_seq_res.sr_timestamp = 0UL; res.lr_seq_res.sr_status = 0; res.lr_seq_res.sr_status_flags = 0U; res.lr_seq_res.sr_highest_slotid = 0U; res.lr_seq_res.sr_target_highest_slotid = 0U; res.lr_fsinfo = fsinfo; data.args = & args; data.res = & res; data.clp = clp; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 41UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; task_setup.task = 0; task_setup.rpc_client = clp->cl_rpcclient; task_setup.rpc_message = (struct rpc_message const *)(& msg); task_setup.callback_ops = & nfs4_get_lease_time_ops; task_setup.callback_data = (void *)(& data); task_setup.workqueue = 0; task_setup.flags = 4096U; task_setup.priority = (signed char)0; nfs41_init_sequence(& args.la_seq_args, & res.lr_seq_res, 0); nfs4_set_sequence_privileged(& args.la_seq_args); tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_proc_get_lease_time"); } else { } task = rpc_run_task((struct rpc_task_setup const *)(& task_setup)); tmp___1 = IS_ERR((void const *)task); if (tmp___1 != 0L) { tmp___0 = PTR_ERR((void const *)task); status = (int )tmp___0; } else { status = task->tk_status; rpc_put_task(task); } tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- %s return %d\n", "nfs4_proc_get_lease_time", status); } else { } return (status); } } static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args ) { struct nfs4_session *session ; unsigned int mxrqst_sz ; unsigned int mxresp_sz ; long tmp ; long tmp___0 ; { session = (args->client)->cl_session; mxrqst_sz = session->fc_target_max_rqst_sz; mxresp_sz = session->fc_target_max_resp_sz; if (mxrqst_sz == 0U) { mxrqst_sz = 1048576U; } else { } if (mxresp_sz == 0U) { mxresp_sz = 1048576U; } else { } args->fc_attrs.max_rqst_sz = mxrqst_sz; args->fc_attrs.max_resp_sz = mxresp_sz; args->fc_attrs.max_ops = 8U; args->fc_attrs.max_reqs = (u32 )max_session_slots; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: Fore Channel : max_rqst_sz=%u max_resp_sz=%u max_ops=%u max_reqs=%u\n", "nfs4_init_channel_attrs", args->fc_attrs.max_rqst_sz, args->fc_attrs.max_resp_sz, args->fc_attrs.max_ops, args->fc_attrs.max_reqs); } else { } args->bc_attrs.max_rqst_sz = 4096U; args->bc_attrs.max_resp_sz = 4096U; args->bc_attrs.max_resp_sz_cached = 0U; args->bc_attrs.max_ops = 2U; args->bc_attrs.max_reqs = 1U; tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: Back Channel : max_rqst_sz=%u max_resp_sz=%u max_resp_sz_cached=%u max_ops=%u max_reqs=%u\n", "nfs4_init_channel_attrs", args->bc_attrs.max_rqst_sz, args->bc_attrs.max_resp_sz, args->bc_attrs.max_resp_sz_cached, args->bc_attrs.max_ops, args->bc_attrs.max_reqs); } else { } return; } } static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args , struct nfs4_session *session ) { struct nfs4_channel_attrs *sent ; struct nfs4_channel_attrs *rcvd ; { sent = & args->fc_attrs; rcvd = & session->fc_attrs; if (rcvd->max_resp_sz > sent->max_resp_sz) { return (-22); } else { } if (rcvd->max_ops < sent->max_ops) { return (-22); } else { } if (rcvd->max_reqs == 0U) { return (-22); } else { } if (rcvd->max_reqs > 1024U) { rcvd->max_reqs = 1024U; } else { } return (0); } } static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args , struct nfs4_session *session ) { struct nfs4_channel_attrs *sent ; struct nfs4_channel_attrs *rcvd ; { sent = & args->bc_attrs; rcvd = & session->bc_attrs; if (rcvd->max_rqst_sz > sent->max_rqst_sz) { return (-22); } else { } if (rcvd->max_resp_sz < sent->max_resp_sz) { return (-22); } else { } if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached) { return (-22); } else { } if (rcvd->max_ops != sent->max_ops) { return (-22); } else { } if (rcvd->max_reqs != sent->max_reqs) { return (-22); } else { } return (0); } } static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args , struct nfs4_session *session ) { int ret ; int tmp ; { ret = nfs4_verify_fore_channel_attrs(args, session); if (ret != 0) { return (ret); } else { } tmp = nfs4_verify_back_channel_attrs(args, session); return (tmp); } } static int _nfs4_proc_create_session(struct nfs_client *clp , struct rpc_cred *cred ) { struct nfs4_session *session ; struct nfs41_create_session_args args ; struct nfs41_create_session_res res ; struct rpc_message msg ; int status ; { session = clp->cl_session; args.client = clp; args.flags = 0U; args.cb_program = 1073741824U; args.fc_attrs.max_rqst_sz = 0U; args.fc_attrs.max_resp_sz = 0U; args.fc_attrs.max_resp_sz_cached = 0U; args.fc_attrs.max_ops = 0U; args.fc_attrs.max_reqs = 0U; args.bc_attrs.max_rqst_sz = 0U; args.bc_attrs.max_resp_sz = 0U; args.bc_attrs.max_resp_sz_cached = 0U; args.bc_attrs.max_ops = 0U; args.bc_attrs.max_reqs = 0U; res.client = clp; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 38UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = cred; nfs4_init_channel_attrs(& args); args.flags = 3U; status = rpc_call_sync((session->clp)->cl_rpcclient, (struct rpc_message const *)(& msg), 4096); if (status == 0) { status = nfs4_verify_channel_attrs(& args, session); clp->cl_seqid = clp->cl_seqid + 1U; } else { } return (status); } } int nfs4_proc_create_session(struct nfs_client *clp , struct rpc_cred *cred ) { int status ; unsigned int *ptr ; struct nfs4_session *session ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { session = clp->cl_session; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s clp=%p session=%p\n", "nfs4_proc_create_session", clp, session); } else { } status = _nfs4_proc_create_session(clp, cred); if (status != 0) { goto out; } else { } status = nfs4_setup_session_slot_tables(session); tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dslot table setup returned %d\n", status); } else { } if (status != 0) { goto out; } else { } ptr = (unsigned int *)(& session->sess_id.data); tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s client>seqid %d sessionid %u:%u:%u:%u\n", "nfs4_proc_create_session", clp->cl_seqid, *ptr, *(ptr + 1UL), *(ptr + 2UL), *(ptr + 3UL)); } else { } out: tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- %s\n", "nfs4_proc_create_session"); } else { } return (status); } } int nfs4_proc_destroy_session(struct nfs4_session *session , struct rpc_cred *cred ) { struct rpc_message msg ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 39UL; msg.rpc_argp = (void *)session; msg.rpc_resp = 0; msg.rpc_cred = cred; status = 0; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> nfs4_proc_destroy_session\n"); } else { } if ((session->clp)->cl_cons_state != 0) { return (status); } else { } status = rpc_call_sync((session->clp)->cl_rpcclient, (struct rpc_message const *)(& msg), 4096); if (status != 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS: Got error %d from the server on DESTROY_SESSION. Session has been destroyed regardless...\n", status); } else { } } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d<-- nfs4_proc_destroy_session\n"); } else { } return (status); } } static void nfs41_sequence_release(void *data ) { struct nfs4_sequence_data *calldata ; struct nfs_client *clp ; int tmp ; { calldata = (struct nfs4_sequence_data *)data; clp = calldata->clp; tmp = atomic_read((atomic_t const *)(& clp->cl_count)); if (tmp > 1) { nfs4_schedule_state_renewal(clp); } else { } nfs_put_client(clp); kfree((void const *)calldata); return; } } static int nfs41_sequence_handle_errors(struct rpc_task *task , struct nfs_client *clp ) { { switch (task->tk_status) { case -10008: rpc_delay(task, 3750UL); return (-11); default: nfs4_schedule_lease_recovery(clp); } return (0); } } static void nfs41_sequence_call_done(struct rpc_task *task , void *data ) { struct nfs4_sequence_data *calldata ; struct nfs_client *clp ; int tmp ; long tmp___0 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; long tmp___4 ; { calldata = (struct nfs4_sequence_data *)data; clp = calldata->clp; tmp = nfs41_sequence_done(task, (struct nfs4_sequence_res *)task->tk_msg.rpc_resp); if (tmp == 0) { return; } else { } if (task->tk_status < 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s ERROR %d\n", "nfs41_sequence_call_done", task->tk_status); } else { } tmp___1 = atomic_read((atomic_t const *)(& clp->cl_count)); if (tmp___1 == 1) { goto out; } else { } tmp___2 = nfs41_sequence_handle_errors(task, clp); if (tmp___2 == -11) { rpc_restart_call_prepare(task); return; } else { } } else { } tmp___3 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s rpc_cred %p\n", "nfs41_sequence_call_done", task->tk_msg.rpc_cred); } else { } out: tmp___4 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d<-- %s\n", "nfs41_sequence_call_done"); } else { } return; } } static void nfs41_sequence_prepare(struct rpc_task *task , void *data ) { struct nfs4_sequence_data *calldata ; struct nfs_client *clp ; struct nfs4_sequence_args *args ; struct nfs4_sequence_res *res ; { calldata = (struct nfs4_sequence_data *)data; clp = calldata->clp; args = (struct nfs4_sequence_args *)task->tk_msg.rpc_argp; res = (struct nfs4_sequence_res *)task->tk_msg.rpc_resp; nfs41_setup_sequence(clp->cl_session, args, res, task); return; } } static struct rpc_call_ops const nfs41_sequence_ops = {& nfs41_sequence_prepare, & nfs41_sequence_call_done, 0, & nfs41_sequence_release}; static struct rpc_task *_nfs41_proc_sequence(struct nfs_client *clp , struct rpc_cred *cred , bool is_privileged ) { struct nfs4_sequence_data *calldata ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; void *tmp ; int tmp___0 ; void *tmp___1 ; void *tmp___2 ; struct rpc_task *tmp___3 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 40UL; msg.rpc_argp = 0; msg.rpc_resp = 0; msg.rpc_cred = cred; task_setup_data.task = 0; task_setup_data.rpc_client = clp->cl_rpcclient; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs41_sequence_ops; task_setup_data.callback_data = 0; task_setup_data.workqueue = 0; task_setup_data.flags = 513U; task_setup_data.priority = (signed char)0; tmp___0 = atomic_add_unless(& clp->cl_count, 1, 0); if (tmp___0 == 0) { tmp = ERR_PTR(-5L); return ((struct rpc_task *)tmp); } else { } tmp___1 = kzalloc(56UL, 80U); calldata = (struct nfs4_sequence_data *)tmp___1; if ((unsigned long )calldata == (unsigned long )((struct nfs4_sequence_data *)0)) { nfs_put_client(clp); tmp___2 = ERR_PTR(-12L); return ((struct rpc_task *)tmp___2); } else { } nfs41_init_sequence(& calldata->args, & calldata->res, 0); if ((int )is_privileged) { nfs4_set_sequence_privileged(& calldata->args); } else { } msg.rpc_argp = (void *)(& calldata->args); msg.rpc_resp = (void *)(& calldata->res); calldata->clp = clp; task_setup_data.callback_data = (void *)calldata; tmp___3 = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); return (tmp___3); } } static int nfs41_proc_async_sequence(struct nfs_client *clp , struct rpc_cred *cred , unsigned int renew_flags ) { struct rpc_task *task ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; { ret = 0; if ((renew_flags & 1U) == 0U) { return (0); } else { } task = _nfs41_proc_sequence(clp, cred, 0); tmp___0 = IS_ERR((void const *)task); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)task); ret = (int )tmp; } else { rpc_put_task_async(task); } tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d<-- %s status=%d\n", "nfs41_proc_async_sequence", ret); } else { } return (ret); } } static int nfs4_proc_sequence(struct nfs_client *clp , struct rpc_cred *cred ) { struct rpc_task *task ; int ret ; long tmp ; long tmp___0 ; struct nfs4_sequence_res *res ; long tmp___1 ; { task = _nfs41_proc_sequence(clp, cred, 1); tmp___0 = IS_ERR((void const *)task); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)task); ret = (int )tmp; goto out; } else { } ret = rpc_wait_for_completion_task(task); if (ret == 0) { res = (struct nfs4_sequence_res *)task->tk_msg.rpc_resp; if (task->tk_status == 0) { nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); } else { } ret = task->tk_status; } else { } rpc_put_task(task); out: tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d<-- %s status=%d\n", "nfs4_proc_sequence", ret); } else { } return (ret); } } static void nfs4_reclaim_complete_prepare(struct rpc_task *task , void *data ) { struct nfs4_reclaim_complete_data *calldata ; { calldata = (struct nfs4_reclaim_complete_data *)data; nfs41_setup_sequence((calldata->clp)->cl_session, & calldata->arg.seq_args, & calldata->res.seq_res, task); return; } } static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task , struct nfs_client *clp ) { { switch (task->tk_status) { case 0: ; case -10054: ; case -10082: ; goto ldv_53082; case -10008: rpc_delay(task, 3750UL); case -10068: ; return (-11); default: nfs4_schedule_lease_recovery(clp); } ldv_53082: ; return (0); } } static void nfs4_reclaim_complete_done(struct rpc_task *task , void *data ) { struct nfs4_reclaim_complete_data *calldata ; struct nfs_client *clp ; struct nfs4_sequence_res *res ; long tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; { calldata = (struct nfs4_reclaim_complete_data *)data; clp = calldata->clp; res = & calldata->res.seq_res; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_reclaim_complete_done"); } else { } tmp___0 = nfs41_sequence_done(task, res); if (tmp___0 == 0) { return; } else { } tmp___1 = nfs41_reclaim_complete_handle_errors(task, clp); if (tmp___1 == -11) { rpc_restart_call_prepare(task); return; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- %s\n", "nfs4_reclaim_complete_done"); } else { } return; } } static void nfs4_free_reclaim_complete_data(void *data ) { struct nfs4_reclaim_complete_data *calldata ; { calldata = (struct nfs4_reclaim_complete_data *)data; kfree((void const *)calldata); return; } } static struct rpc_call_ops const nfs4_reclaim_complete_call_ops = {& nfs4_reclaim_complete_prepare, & nfs4_reclaim_complete_done, 0, & nfs4_free_reclaim_complete_data}; static int nfs41_proc_reclaim_complete(struct nfs_client *clp ) { struct nfs4_reclaim_complete_data *calldata ; struct rpc_task *task ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; int status ; long tmp ; void *tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 42UL; msg.rpc_argp = 0; msg.rpc_resp = 0; msg.rpc_cred = 0; task_setup_data.task = 0; task_setup_data.rpc_client = clp->cl_rpcclient; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_reclaim_complete_call_ops; task_setup_data.callback_data = 0; task_setup_data.workqueue = 0; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; status = -12; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs41_proc_reclaim_complete"); } else { } tmp___0 = kzalloc(64UL, 80U); calldata = (struct nfs4_reclaim_complete_data *)tmp___0; if ((unsigned long )calldata == (unsigned long )((struct nfs4_reclaim_complete_data *)0)) { goto out; } else { } calldata->clp = clp; calldata->arg.one_fs = 0U; nfs41_init_sequence(& calldata->arg.seq_args, & calldata->res.seq_res, 0); nfs4_set_sequence_privileged(& calldata->arg.seq_args); msg.rpc_argp = (void *)(& calldata->arg); msg.rpc_resp = (void *)(& calldata->res); task_setup_data.callback_data = (void *)calldata; task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___2 = IS_ERR((void const *)task); if (tmp___2 != 0L) { tmp___1 = PTR_ERR((void const *)task); status = (int )tmp___1; goto out; } else { } status = nfs4_wait_for_completion_rpc_task(task); if (status == 0) { status = task->tk_status; } else { } rpc_put_task(task); return (0); out: tmp___3 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d<-- %s status=%d\n", "nfs41_proc_reclaim_complete", status); } else { } return (status); } } static void nfs4_layoutget_prepare(struct rpc_task *task , void *calldata ) { struct nfs4_layoutget *lgp ; struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_session *session ; struct nfs4_session *tmp___0 ; long tmp___1 ; int tmp___2 ; struct nfs_inode *tmp___3 ; int tmp___4 ; { lgp = (struct nfs4_layoutget *)calldata; tmp = NFS_SERVER((struct inode const *)lgp->args.inode); server = tmp; tmp___0 = nfs4_get_session((struct nfs_server const *)server); session = tmp___0; tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d--> %s\n", "nfs4_layoutget_prepare"); } else { } tmp___2 = nfs41_setup_sequence(session, & lgp->args.seq_args, & lgp->res.seq_res, task); if (tmp___2 != 0) { return; } else { } tmp___3 = NFS_I((struct inode const *)lgp->args.inode); tmp___4 = pnfs_choose_layoutget_stateid(& lgp->args.stateid, tmp___3->layout, (lgp->args.ctx)->state); if (tmp___4 != 0) { rpc_exit(task, 0); } else { } return; } } static void nfs4_layoutget_done(struct rpc_task *task , void *calldata ) { struct nfs4_layoutget *lgp ; struct inode *inode ; struct nfs_server *server ; struct nfs_server *tmp ; struct pnfs_layout_hdr *lo ; struct nfs4_state *state ; long tmp___0 ; int tmp___1 ; struct nfs_inode *tmp___2 ; struct list_head head ; int tmp___3 ; int tmp___4 ; long tmp___5 ; { lgp = (struct nfs4_layoutget *)calldata; inode = lgp->args.inode; tmp = NFS_SERVER((struct inode const *)inode); server = tmp; state = 0; tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d--> %s\n", "nfs4_layoutget_done"); } else { } tmp___1 = nfs41_sequence_done(task, & lgp->res.seq_res); if (tmp___1 == 0) { goto out; } else { } switch (task->tk_status) { case 0: ; goto out; case -10058: ; case -10061: task->tk_status = -10008; goto ldv_53131; case -10011: ; case -10025: spin_lock(& inode->i_lock); tmp___2 = NFS_I((struct inode const *)inode); lo = tmp___2->layout; if ((unsigned long )lo == (unsigned long )((struct pnfs_layout_hdr *)0)) { spin_unlock(& inode->i_lock); state = (lgp->args.ctx)->state; } else { tmp___3 = list_empty((struct list_head const *)(& lo->plh_segs)); if (tmp___3 != 0) { spin_unlock(& inode->i_lock); state = (lgp->args.ctx)->state; } else { head.next = & head; head.prev = & head; pnfs_mark_matching_lsegs_invalid(lo, & head, 0); spin_unlock(& inode->i_lock); pnfs_free_lseg_list(& head); } } } ldv_53131: tmp___4 = nfs4_async_handle_error(task, (struct nfs_server const *)server, state); if (tmp___4 == -11) { rpc_restart_call_prepare(task); } else { } out: tmp___5 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001d<-- %s\n", "nfs4_layoutget_done"); } else { } return; } } static size_t max_response_pages(struct nfs_server *server ) { u32 max_resp_sz ; unsigned int tmp ; { max_resp_sz = ((server->nfs_client)->cl_session)->fc_attrs.max_resp_sz; tmp = nfs_page_array_len(0U, (size_t )max_resp_sz); return ((size_t )tmp); } } static void nfs4_free_pages(struct page **pages , size_t size ) { int i ; { if ((unsigned long )pages == (unsigned long )((struct page **)0)) { return; } else { } i = 0; goto ldv_53146; ldv_53145: ; if ((unsigned long )*(pages + (unsigned long )i) == (unsigned long )((struct page *)0)) { goto ldv_53144; } else { } __free_pages(*(pages + (unsigned long )i), 0U); i = i + 1; ldv_53146: ; if ((size_t )i < size) { goto ldv_53145; } else { } ldv_53144: kfree((void const *)pages); return; } } static struct page **nfs4_alloc_pages(size_t size , gfp_t gfp_flags ) { struct page **pages ; int i ; void *tmp ; long tmp___0 ; long tmp___1 ; { tmp = kcalloc(size, 8UL, gfp_flags); pages = (struct page **)tmp; if ((unsigned long )pages == (unsigned long )((struct page **)0)) { tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: can\'t alloc array of %zu pages\n", "nfs4_alloc_pages", size); } else { } return (0); } else { } i = 0; goto ldv_53155; ldv_53154: *(pages + (unsigned long )i) = alloc_pages(gfp_flags, 0U); if ((unsigned long )*(pages + (unsigned long )i) == (unsigned long )((struct page *)0)) { tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: failed to allocate page\n", "nfs4_alloc_pages"); } else { } nfs4_free_pages(pages, size); return (0); } else { } i = i + 1; ldv_53155: ; if ((size_t )i < size) { goto ldv_53154; } else { } return (pages); } } static void nfs4_layoutget_release(void *calldata ) { struct nfs4_layoutget *lgp ; struct nfs_server *server ; struct nfs_server *tmp ; size_t max_pages ; size_t tmp___0 ; long tmp___1 ; long tmp___2 ; { lgp = (struct nfs4_layoutget *)calldata; tmp = NFS_SERVER((struct inode const *)lgp->args.inode); server = tmp; tmp___0 = max_response_pages(server); max_pages = tmp___0; tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d--> %s\n", "nfs4_layoutget_release"); } else { } nfs4_free_pages(lgp->args.layout.pages, max_pages); put_nfs_open_context(lgp->args.ctx); kfree((void const *)calldata); tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- %s\n", "nfs4_layoutget_release"); } else { } return; } } static struct rpc_call_ops const nfs4_layoutget_call_ops = {& nfs4_layoutget_prepare, & nfs4_layoutget_done, 0, & nfs4_layoutget_release}; struct pnfs_layout_segment *nfs4_proc_layoutget(struct nfs4_layoutget *lgp , gfp_t gfp_flags ) { struct nfs_server *server ; struct nfs_server *tmp ; size_t max_pages ; size_t tmp___0 ; struct rpc_task *task ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; struct pnfs_layout_segment *lseg ; int status ; long tmp___1 ; void *tmp___2 ; void *tmp___3 ; long tmp___4 ; long tmp___5 ; void *tmp___6 ; { tmp = NFS_SERVER((struct inode const *)lgp->args.inode); server = tmp; tmp___0 = max_response_pages(server); max_pages = tmp___0; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 43UL; msg.rpc_argp = (void *)(& lgp->args); msg.rpc_resp = (void *)(& lgp->res); msg.rpc_cred = 0; task_setup_data.task = 0; task_setup_data.rpc_client = server->client; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_layoutget_call_ops; task_setup_data.callback_data = (void *)lgp; task_setup_data.workqueue = 0; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; lseg = 0; status = 0; tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d--> %s\n", "nfs4_proc_layoutget"); } else { } lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); if ((unsigned long )lgp->args.layout.pages == (unsigned long )((struct page **)0)) { nfs4_layoutget_release((void *)lgp); tmp___2 = ERR_PTR(-12L); return ((struct pnfs_layout_segment *)tmp___2); } else { } lgp->args.layout.pglen = (__u32 )max_pages * 4096U; lgp->res.layoutp = & lgp->args.layout; lgp->res.seq_res.sr_slot = 0; nfs41_init_sequence(& lgp->args.seq_args, & lgp->res.seq_res, 0); task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___4 = IS_ERR((void const *)task); if (tmp___4 != 0L) { tmp___3 = ERR_CAST((void const *)task); return ((struct pnfs_layout_segment *)tmp___3); } else { } status = nfs4_wait_for_completion_rpc_task(task); if (status == 0) { status = task->tk_status; } else { } if (status == 0) { lseg = pnfs_layout_process(lgp); } else { } rpc_put_task(task); tmp___5 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001d<-- %s status=%d\n", "nfs4_proc_layoutget", status); } else { } if (status != 0) { tmp___6 = ERR_PTR((long )status); return ((struct pnfs_layout_segment *)tmp___6); } else { } return (lseg); } } static void nfs4_layoutreturn_prepare(struct rpc_task *task , void *calldata ) { struct nfs4_layoutreturn *lrp ; long tmp ; { lrp = (struct nfs4_layoutreturn *)calldata; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_layoutreturn_prepare"); } else { } nfs41_setup_sequence((lrp->clp)->cl_session, & lrp->args.seq_args, & lrp->res.seq_res, task); return; } } static void nfs4_layoutreturn_done(struct rpc_task *task , void *calldata ) { struct nfs4_layoutreturn *lrp ; struct nfs_server *server ; long tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; { lrp = (struct nfs4_layoutreturn *)calldata; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_layoutreturn_done"); } else { } tmp___0 = nfs41_sequence_done(task, & lrp->res.seq_res); if (tmp___0 == 0) { return; } else { } server = NFS_SERVER((struct inode const *)lrp->args.inode); tmp___1 = nfs4_async_handle_error(task, (struct nfs_server const *)server, 0); if (tmp___1 == -11) { rpc_restart_call_prepare(task); return; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- %s\n", "nfs4_layoutreturn_done"); } else { } return; } } static void nfs4_layoutreturn_release(void *calldata ) { struct nfs4_layoutreturn *lrp ; struct pnfs_layout_hdr *lo ; long tmp ; long tmp___0 ; { lrp = (struct nfs4_layoutreturn *)calldata; lo = lrp->args.layout; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_layoutreturn_release"); } else { } spin_lock(& (lo->plh_inode)->i_lock); if (lrp->res.lrs_present != 0U) { pnfs_set_layout_stateid(lo, (nfs4_stateid const *)(& lrp->res.stateid), 1); } else { } lo->plh_block_lgets = lo->plh_block_lgets - 1UL; spin_unlock(& (lo->plh_inode)->i_lock); pnfs_put_layout_hdr(lrp->args.layout); kfree((void const *)calldata); tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d<-- %s\n", "nfs4_layoutreturn_release"); } else { } return; } } static struct rpc_call_ops const nfs4_layoutreturn_call_ops = {& nfs4_layoutreturn_prepare, & nfs4_layoutreturn_done, 0, & nfs4_layoutreturn_release}; int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp ) { struct rpc_task *task ; struct rpc_message msg ; struct rpc_task_setup task_setup_data ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 46UL; msg.rpc_argp = (void *)(& lrp->args); msg.rpc_resp = (void *)(& lrp->res); msg.rpc_cred = 0; task_setup_data.task = 0; task_setup_data.rpc_client = (lrp->clp)->cl_rpcclient; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_layoutreturn_call_ops; task_setup_data.callback_data = (void *)lrp; task_setup_data.workqueue = 0; task_setup_data.flags = (unsigned short)0; task_setup_data.priority = (signed char)0; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_proc_layoutreturn"); } else { } nfs41_init_sequence(& lrp->args.seq_args, & lrp->res.seq_res, 1); task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___1 = IS_ERR((void const *)task); if (tmp___1 != 0L) { tmp___0 = PTR_ERR((void const *)task); return ((int )tmp___0); } else { } status = task->tk_status; tmp___2 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- %s status=%d\n", "nfs4_proc_layoutreturn", status); } else { } rpc_put_task(task); return (status); } } static int _nfs4_getdevicelist(struct nfs_server *server , struct nfs_fh const *fh , struct pnfs_devicelist *devlist ) { struct nfs4_getdevicelist_args args ; struct nfs4_getdevicelist_res res ; struct rpc_message msg ; int status ; long tmp ; long tmp___0 ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.fh = fh; args.layoutclass = (server->pnfs_curr_ld)->id; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.devlist = devlist; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 50UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "_nfs4_getdevicelist"); } else { } status = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d<-- %s status=%d\n", "_nfs4_getdevicelist", status); } else { } return (status); } } int nfs4_proc_getdevicelist(struct nfs_server *server , struct nfs_fh const *fh , struct pnfs_devicelist *devlist ) { struct nfs4_exception exception ; int err ; int tmp ; long tmp___0 ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_53222: tmp = _nfs4_getdevicelist(server, fh, devlist); err = nfs4_handle_exception(server, tmp, & exception); if (exception.retry != 0) { goto ldv_53222; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: err=%d, num_devs=%u\n", "nfs4_proc_getdevicelist", err, devlist->num_devs); } else { } return (err); } } static int _nfs4_proc_getdeviceinfo(struct nfs_server *server , struct pnfs_device *pdev ) { struct nfs4_getdeviceinfo_args args ; struct nfs4_getdeviceinfo_res res ; struct rpc_message msg ; int status ; long tmp ; long tmp___0 ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.pdev = pdev; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.pdev = pdev; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 44UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "_nfs4_proc_getdeviceinfo"); } else { } status = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d<-- %s status=%d\n", "_nfs4_proc_getdeviceinfo", status); } else { } return (status); } } int nfs4_proc_getdeviceinfo(struct nfs_server *server , struct pnfs_device *pdev ) { struct nfs4_exception exception ; int err ; int tmp ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_53248: tmp = _nfs4_proc_getdeviceinfo(server, pdev); err = nfs4_handle_exception(server, tmp, & exception); if (exception.retry != 0) { goto ldv_53248; } else { } return (err); } } static void nfs4_layoutcommit_prepare(struct rpc_task *task , void *calldata ) { struct nfs4_layoutcommit_data *data ; struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_session *session ; struct nfs4_session *tmp___0 ; { data = (struct nfs4_layoutcommit_data *)calldata; tmp = NFS_SERVER((struct inode const *)data->args.inode); server = tmp; tmp___0 = nfs4_get_session((struct nfs_server const *)server); session = tmp___0; nfs41_setup_sequence(session, & data->args.seq_args, & data->res.seq_res, task); return; } } static void nfs4_layoutcommit_done(struct rpc_task *task , void *calldata ) { struct nfs4_layoutcommit_data *data ; struct nfs_server *server ; struct nfs_server *tmp ; int tmp___0 ; int tmp___1 ; { data = (struct nfs4_layoutcommit_data *)calldata; tmp = NFS_SERVER((struct inode const *)data->args.inode); server = tmp; tmp___0 = nfs41_sequence_done(task, & data->res.seq_res); if (tmp___0 == 0) { return; } else { } switch (task->tk_status) { case -10087: ; case -10049: ; case -10050: ; case -10013: task->tk_status = 0; goto ldv_53274; case 0: nfs_post_op_update_inode_force_wcc(data->args.inode, data->res.fattr); goto ldv_53274; default: tmp___1 = nfs4_async_handle_error(task, (struct nfs_server const *)server, 0); if (tmp___1 == -11) { rpc_restart_call_prepare(task); return; } else { } } ldv_53274: ; return; } } static void nfs4_layoutcommit_release(void *calldata ) { struct nfs4_layoutcommit_data *data ; struct pnfs_layout_segment *lseg ; struct pnfs_layout_segment *tmp ; unsigned long *bitlock ; struct nfs_inode *tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___1 ; struct list_head const *__mptr___1 ; { data = (struct nfs4_layoutcommit_data *)calldata; tmp___0 = NFS_I((struct inode const *)data->args.inode); bitlock = & tmp___0->flags; pnfs_cleanup_layoutcommit(data); __mptr = (struct list_head const *)data->lseg_list.next; lseg = (struct pnfs_layout_segment *)__mptr + 0xfffffffffffffff0UL; __mptr___0 = (struct list_head const *)lseg->pls_lc_list.next; tmp = (struct pnfs_layout_segment *)__mptr___0 + 0xfffffffffffffff0UL; goto ldv_53291; ldv_53290: list_del_init(& lseg->pls_lc_list); tmp___1 = test_and_clear_bit(2, (unsigned long volatile *)(& lseg->pls_flags)); if (tmp___1 != 0) { pnfs_put_lseg(lseg); } else { } lseg = tmp; __mptr___1 = (struct list_head const *)tmp->pls_lc_list.next; tmp = (struct pnfs_layout_segment *)__mptr___1 + 0xfffffffffffffff0UL; ldv_53291: ; if ((unsigned long )(& lseg->pls_lc_list) != (unsigned long )(& data->lseg_list)) { goto ldv_53290; } else { } clear_bit_unlock(10U, (unsigned long volatile *)bitlock); __asm__ volatile ("": : : "memory"); wake_up_bit((void *)bitlock, 10); put_rpccred(data->cred); kfree((void const *)data); return; } } static struct rpc_call_ops const nfs4_layoutcommit_ops = {& nfs4_layoutcommit_prepare, & nfs4_layoutcommit_done, 0, & nfs4_layoutcommit_release}; int nfs4_proc_layoutcommit(struct nfs4_layoutcommit_data *data , bool sync ) { struct rpc_message msg ; struct rpc_task_setup task_setup_data ; struct rpc_clnt *tmp ; struct rpc_task *task ; int status ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 45UL; msg.rpc_argp = (void *)(& data->args); msg.rpc_resp = (void *)(& data->res); msg.rpc_cred = data->cred; tmp = NFS_CLIENT((struct inode const *)data->args.inode); task_setup_data.task = & data->task; task_setup_data.rpc_client = tmp; task_setup_data.rpc_message = (struct rpc_message const *)(& msg); task_setup_data.callback_ops = & nfs4_layoutcommit_ops; task_setup_data.callback_data = (void *)data; task_setup_data.workqueue = 0; task_setup_data.flags = 1U; task_setup_data.priority = (signed char)0; status = 0; tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS: %4d initiating layoutcommit call. sync %d lbw: %llu inode %lu\n", (int )data->task.tk_pid, (int )sync, data->args.lastbytewritten, (data->args.inode)->i_ino); } else { } nfs41_init_sequence(& data->args.seq_args, & data->res.seq_res, 1); task = rpc_run_task((struct rpc_task_setup const *)(& task_setup_data)); tmp___2 = IS_ERR((void const *)task); if (tmp___2 != 0L) { tmp___1 = PTR_ERR((void const *)task); return ((int )tmp___1); } else { } if (! sync) { goto out; } else { } status = nfs4_wait_for_completion_rpc_task(task); if (status != 0) { goto out; } else { } status = task->tk_status; out: tmp___3 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: status %d\n", "nfs4_proc_layoutcommit", status); } else { } rpc_put_task(task); return (status); } } static int _nfs41_proc_secinfo_no_name(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info , struct nfs4_secinfo_flavors *flavors ) { struct nfs41_secinfo_no_name_args args ; struct nfs4_secinfo_res res ; struct rpc_message msg ; int tmp ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.style = 0; res.seq_res.sr_slot = 0; res.seq_res.sr_timestamp = 0UL; res.seq_res.sr_status = 0; res.seq_res.sr_status_flags = 0U; res.seq_res.sr_highest_slotid = 0U; res.seq_res.sr_target_highest_slotid = 0U; res.flavors = flavors; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 47UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp = nfs4_call_sync(server->client, server, & msg, & args.seq_args, & res.seq_res, 0); return (tmp); } } static int nfs41_proc_secinfo_no_name(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info , struct nfs4_secinfo_flavors *flavors ) { struct nfs4_exception exception ; int err ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_53326: err = _nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); switch (err) { case 0: ; case -10016: ; case -10004: ; goto out; default: err = nfs4_handle_exception(server, err, & exception); } if (exception.retry != 0) { goto ldv_53326; } else { } out: ; return (err); } } static int nfs41_find_root_sec(struct nfs_server *server , struct nfs_fh *fhandle , struct nfs_fsinfo *info ) { int err ; struct page *page ; rpc_authflavor_t flavor ; struct nfs4_secinfo_flavors *flavors ; void *tmp ; { page = alloc_pages(208U, 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { err = -12; goto out; } else { } tmp = lowmem_page_address((struct page const *)page); flavors = (struct nfs4_secinfo_flavors *)tmp; err = nfs41_proc_secinfo_no_name(server, fhandle, info, flavors); if (err == -10016 || err == -10004) { err = nfs4_find_root_sec(server, fhandle, info); goto out_freepage; } else { } if (err != 0) { goto out_freepage; } else { } flavor = nfs_find_best_sec(flavors); if (err == 0) { err = nfs4_lookup_root_sec(server, fhandle, info, flavor); } else { } out_freepage: put_page(page); if (err == -13) { return (-1); } else { } out: ; return (err); } } static int _nfs41_test_stateid(struct nfs_server *server , nfs4_stateid *stateid ) { int status ; struct nfs41_test_stateid_args args ; struct nfs41_test_stateid_res res ; struct rpc_message msg ; long tmp ; long tmp___0 ; long tmp___1 ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.stateid = stateid; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 48UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS call test_stateid %p\n", stateid); } else { } nfs41_init_sequence(& args.seq_args, & res.seq_res, 0); nfs4_set_sequence_privileged(& args.seq_args); status = nfs4_call_sync_sequence(server->client, server, & msg, & args.seq_args, & res.seq_res); if (status != 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS reply test_stateid: failed, %d\n", status); } else { } return (status); } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS reply test_stateid: succeeded, %d\n", - res.status); } else { } return ((int )(- res.status)); } } static int nfs41_test_stateid(struct nfs_server *server , nfs4_stateid *stateid ) { struct nfs4_exception exception ; int err ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_53354: err = _nfs41_test_stateid(server, stateid); if (err != -10008) { goto ldv_53353; } else { } nfs4_handle_exception(server, err, & exception); if (exception.retry != 0) { goto ldv_53354; } else { } ldv_53353: ; return (err); } } static int _nfs4_free_stateid(struct nfs_server *server , nfs4_stateid *stateid ) { struct nfs41_free_stateid_args args ; struct nfs41_free_stateid_res res ; struct rpc_message msg ; int status ; long tmp ; long tmp___0 ; { args.seq_args.sa_slot = 0; args.seq_args.sa_cache_this = (unsigned char)0; args.seq_args.sa_privileged = (unsigned char)0; args.stateid = stateid; msg.rpc_proc = (struct rpc_procinfo *)(& nfs4_procedures) + 49UL; msg.rpc_argp = (void *)(& args); msg.rpc_resp = (void *)(& res); msg.rpc_cred = 0; tmp = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS call free_stateid %p\n", stateid); } else { } nfs41_init_sequence(& args.seq_args, & res.seq_res, 0); nfs4_set_sequence_privileged(& args.seq_args); status = nfs4_call_sync_sequence(server->client, server, & msg, & args.seq_args, & res.seq_res); tmp___0 = ldv__builtin_expect((nfs_debug & 16U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS reply free_stateid: %d\n", status); } else { } return (status); } } static int nfs41_free_stateid(struct nfs_server *server , nfs4_stateid *stateid ) { struct nfs4_exception exception ; int err ; { exception.timeout = 0L; exception.retry = 0; exception.state = 0; exception.inode = 0; ldv_53370: err = _nfs4_free_stateid(server, stateid); if (err != -10008) { goto ldv_53369; } else { } nfs4_handle_exception(server, err, & exception); if (exception.retry != 0) { goto ldv_53370; } else { } ldv_53369: ; return (err); } } static bool nfs41_match_stateid(nfs4_stateid const *s1 , nfs4_stateid const *s2 ) { int tmp ; { tmp = memcmp((void const *)(& s1->other), (void const *)(& s2->other), 12UL); if (tmp != 0) { return (0); } else { } if ((unsigned int )s1->seqid == (unsigned int )s2->seqid) { return (1); } else { } if ((unsigned int )s1->seqid == 0U || (unsigned int )s2->seqid == 0U) { return (1); } else { } return (0); } } static bool nfs4_match_stateid(nfs4_stateid const *s1 , nfs4_stateid const *s2 ) { bool tmp ; { tmp = nfs4_stateid_match(s1, s2); return (tmp); } } static struct nfs4_state_recovery_ops const nfs40_reboot_recovery_ops = {0, 5, & nfs4_open_reclaim, & nfs4_lock_reclaim, & nfs4_init_clientid, & nfs4_get_setclientid_cred, 0, & nfs40_discover_server_trunking}; static struct nfs4_state_recovery_ops const nfs41_reboot_recovery_ops = {0, 5, & nfs4_open_reclaim, & nfs4_lock_reclaim, & nfs41_init_clientid, & nfs4_get_exchange_id_cred, & nfs41_proc_reclaim_complete, & nfs41_discover_server_trunking}; static struct nfs4_state_recovery_ops const nfs40_nograce_recovery_ops = {1, 6, & nfs4_open_expired, & nfs4_lock_expired, & nfs4_init_clientid, & nfs4_get_setclientid_cred, 0, 0}; static struct nfs4_state_recovery_ops const nfs41_nograce_recovery_ops = {1, 6, & nfs41_open_expired, & nfs41_lock_expired, & nfs41_init_clientid, & nfs4_get_exchange_id_cred, 0, 0}; static struct nfs4_state_maintenance_ops const nfs40_state_renewal_ops = {& nfs4_proc_async_renew, & nfs4_get_renew_cred_locked, & nfs4_proc_renew}; static struct nfs4_state_maintenance_ops const nfs41_state_renewal_ops = {& nfs41_proc_async_sequence, & nfs4_get_machine_cred_locked, & nfs4_proc_sequence}; static struct nfs4_minor_version_ops const nfs_v4_0_minor_ops = {0U, & _nfs4_call_sync, & nfs4_match_stateid, & nfs4_find_root_sec, & nfs40_reboot_recovery_ops, & nfs40_nograce_recovery_ops, & nfs40_state_renewal_ops}; static struct nfs4_minor_version_ops const nfs_v4_1_minor_ops = {1U, & nfs4_call_sync_sequence, & nfs41_match_stateid, & nfs41_find_root_sec, & nfs41_reboot_recovery_ops, & nfs41_nograce_recovery_ops, & nfs41_state_renewal_ops}; struct nfs4_minor_version_ops const *nfs_v4_minor_ops[2U] = { & nfs_v4_0_minor_ops, & nfs_v4_1_minor_ops}; struct inode_operations const nfs4_dir_inode_operations = {& nfs_lookup, 0, & nfs_permission, 0, 0, 0, & nfs_create, & nfs_link, & nfs_unlink, & nfs_symlink, & nfs_mkdir, & nfs_rmdir, & nfs_mknod, & nfs_rename, & nfs_setattr, & nfs_getattr, & generic_setxattr, & generic_getxattr, & generic_listxattr, & generic_removexattr, 0, 0, & nfs_atomic_open}; static struct inode_operations const nfs4_file_inode_operations = {0, 0, & nfs_permission, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & nfs_setattr, & nfs_getattr, & generic_setxattr, & generic_getxattr, & generic_listxattr, & generic_removexattr, 0, 0, 0}; struct nfs_rpc_ops const nfs_v4_clientops = {4U, & nfs4_dentry_operations, & nfs4_dir_inode_operations, & nfs4_file_inode_operations, & nfs4_file_operations, & nfs4_proc_get_root, & nfs4_submount, & nfs4_try_mount, & nfs4_proc_getattr, & nfs4_proc_setattr, & nfs4_proc_lookup, & nfs4_proc_access, & nfs4_proc_readlink, & nfs4_proc_create, & nfs4_proc_remove, & nfs4_proc_unlink_setup, & nfs4_proc_unlink_rpc_prepare, & nfs4_proc_unlink_done, & nfs4_proc_rename, & nfs4_proc_rename_setup, & nfs4_proc_rename_rpc_prepare, & nfs4_proc_rename_done, & nfs4_proc_link, & nfs4_proc_symlink, & nfs4_proc_mkdir, & nfs4_proc_remove, & nfs4_proc_readdir, & nfs4_proc_mknod, & nfs4_proc_statfs, & nfs4_proc_fsinfo, & nfs4_proc_pathconf, & nfs4_server_capabilities, & nfs4_decode_dirent, & nfs4_proc_read_setup, & pnfs_pageio_init_read, & nfs4_proc_read_rpc_prepare, & nfs4_read_done, & nfs4_proc_write_setup, & pnfs_pageio_init_write, & nfs4_proc_write_rpc_prepare, & nfs4_write_done, & nfs4_proc_commit_setup, & nfs4_proc_commit_rpc_prepare, & nfs4_commit_done, & nfs4_proc_lock, 0, & nfs4_zap_acl_attr, & nfs4_close_context, & nfs4_atomic_open, & nfs4_have_delegation, & nfs4_inode_return_delegation, & nfs4_alloc_client, & nfs4_init_client, & nfs4_free_client, & nfs4_create_server, & nfs_clone_server}; static struct xattr_handler const nfs4_xattr_nfs4_acl_handler = {"system.nfs4_acl", 0, & nfs4_xattr_list_nfs4_acl, & nfs4_xattr_get_nfs4_acl, & nfs4_xattr_set_nfs4_acl}; struct xattr_handler const *nfs4_xattr_handlers[2U] = { & nfs4_xattr_nfs4_acl_handler, 0}; void ldv_check_final_state(void) ; void ldv_initialize(void) ; extern void ldv_handler_precall(void) ; extern int __VERIFIER_nondet_int(void) ; int LDV_IN_INTERRUPT ; void ldv_main0_sequence_infinite_withcheck_stateful(void) { struct rpc_task *var_group1 ; void *var_nfs41_call_sync_prepare_13_p1 ; void *var_nfs41_call_sync_done_14_p1 ; void *var_nfs4_open_confirm_done_49_p1 ; void *var_nfs4_open_confirm_release_50_p0 ; void *var_nfs4_open_prepare_52_p1 ; void *var_nfs4_open_done_53_p1 ; void *var_nfs4_open_release_54_p0 ; void *var_nfs4_close_prepare_74_p1 ; void *var_nfs4_close_done_73_p1 ; void *var_nfs4_free_closedata_71_p0 ; void *var_nfs4_renew_done_145_p1 ; void *var_nfs4_renew_release_144_p0 ; void *var_nfs4_delegreturn_prepare_167_p1 ; void *var_nfs4_delegreturn_done_165_p1 ; void *var_nfs4_delegreturn_release_166_p0 ; void *var_nfs4_locku_prepare_177_p1 ; void *var_nfs4_locku_done_176_p1 ; void *var_nfs4_locku_release_calldata_175_p0 ; void *var_nfs4_lock_prepare_181_p1 ; void *var_nfs4_lock_done_182_p1 ; void *var_nfs4_lock_release_183_p0 ; void *var_nfs4_release_lockowner_release_194_p0 ; void *var_nfs4_get_lease_time_prepare_211_p1 ; void *var_nfs4_get_lease_time_done_212_p1 ; void *var_nfs41_sequence_call_done_223_p1 ; void *var_nfs41_sequence_prepare_224_p1 ; void *var_nfs41_sequence_release_221_p0 ; void *var_nfs4_reclaim_complete_prepare_228_p1 ; void *var_nfs4_reclaim_complete_done_230_p1 ; void *var_nfs4_free_reclaim_complete_data_231_p0 ; void *var_nfs4_layoutget_prepare_233_p1 ; void *var_nfs4_layoutget_done_234_p1 ; void *var_nfs4_layoutget_release_237_p0 ; void *var_nfs4_layoutreturn_prepare_239_p1 ; void *var_nfs4_layoutreturn_done_240_p1 ; void *var_nfs4_layoutreturn_release_241_p0 ; void *var_nfs4_layoutcommit_prepare_247_p1 ; void *var_nfs4_layoutcommit_done_248_p1 ; void *var_nfs4_layoutcommit_release_249_p0 ; struct nfs4_state_owner *var_group2 ; struct nfs4_state *var_group3 ; struct file_lock *var_group4 ; struct nfs_client *var_group5 ; struct rpc_cred *var_group6 ; unsigned int var_nfs4_proc_async_renew_146_p2 ; unsigned int var_nfs41_proc_async_sequence_226_p2 ; struct rpc_clnt *var_group7 ; struct nfs_server *var_group8 ; struct rpc_message *var__nfs4_call_sync_19_p2 ; struct nfs4_sequence_args *var__nfs4_call_sync_19_p3 ; struct nfs4_sequence_res *var__nfs4_call_sync_19_p4 ; nfs4_stateid const *var_nfs4_match_stateid_259_p0 ; nfs4_stateid const *var_nfs4_match_stateid_259_p1 ; struct nfs_fh *var_group9 ; struct nfs_fsinfo *var_nfs4_find_root_sec_83_p2 ; struct rpc_message *var_nfs4_call_sync_sequence_15_p2 ; struct nfs4_sequence_args *var_nfs4_call_sync_sequence_15_p3 ; struct nfs4_sequence_res *var_nfs4_call_sync_sequence_15_p4 ; nfs4_stateid const *var_nfs41_match_stateid_258_p0 ; nfs4_stateid const *var_nfs41_match_stateid_258_p1 ; struct nfs_fsinfo *var_nfs41_find_root_sec_253_p2 ; struct nfs_fsinfo *var_nfs4_proc_get_root_85_p2 ; struct nfs_fattr *var_nfs4_proc_getattr_88_p2 ; struct dentry *var_group10 ; struct nfs_fattr *var_group11 ; struct iattr *var_nfs4_proc_setattr_89_p2 ; struct inode *var_group12 ; struct qstr *var_group13 ; struct nfs_fh *var_nfs4_proc_lookup_93_p2 ; struct nfs_fattr *var_nfs4_proc_lookup_93_p3 ; struct nfs_access_entry *var_group14 ; struct page *var_group15 ; unsigned int var_nfs4_proc_readlink_98_p2 ; unsigned int var_nfs4_proc_readlink_98_p3 ; struct iattr *var_nfs4_proc_create_99_p2 ; int var_nfs4_proc_create_99_p3 ; struct rpc_message *var_group16 ; struct nfs_unlinkdata *var_group17 ; struct inode *var_nfs4_proc_rename_109_p2 ; struct qstr *var_nfs4_proc_rename_109_p3 ; struct nfs_renamedata *var_group18 ; struct inode *var_nfs4_proc_rename_done_107_p2 ; struct qstr *var_nfs4_proc_link_111_p2 ; struct page *var_nfs4_proc_symlink_116_p2 ; unsigned int var_nfs4_proc_symlink_116_p3 ; struct iattr *var_nfs4_proc_symlink_116_p4 ; struct iattr *var_nfs4_proc_mkdir_118_p2 ; u64 var_nfs4_proc_readdir_120_p2 ; struct page **var_nfs4_proc_readdir_120_p3 ; unsigned int var_nfs4_proc_readdir_120_p4 ; int var_nfs4_proc_readdir_120_p5 ; struct iattr *var_nfs4_proc_mknod_122_p2 ; dev_t var_nfs4_proc_mknod_122_p3 ; struct nfs_fsstat *var_nfs4_proc_statfs_124_p2 ; struct nfs_fsinfo *var_nfs4_proc_fsinfo_127_p2 ; struct nfs_pathconf *var_nfs4_proc_pathconf_129_p2 ; struct nfs_read_data *var_group19 ; struct nfs_write_data *var_group20 ; struct nfs_commit_data *var_group21 ; struct file *var_group22 ; int var_nfs4_proc_lock_192_p1 ; struct file_lock *var_nfs4_proc_lock_192_p2 ; struct nfs_open_context *var_group23 ; int var_nfs4_close_context_77_p1 ; int var_nfs4_atomic_open_76_p2 ; struct iattr *var_nfs4_atomic_open_76_p3 ; char *var_nfs4_xattr_list_nfs4_acl_198_p1 ; size_t var_nfs4_xattr_list_nfs4_acl_198_p2 ; char const *var_nfs4_xattr_list_nfs4_acl_198_p3 ; size_t var_nfs4_xattr_list_nfs4_acl_198_p4 ; int var_nfs4_xattr_list_nfs4_acl_198_p5 ; char const *var_nfs4_xattr_get_nfs4_acl_197_p1 ; void *var_nfs4_xattr_get_nfs4_acl_197_p2 ; size_t var_nfs4_xattr_get_nfs4_acl_197_p3 ; int var_nfs4_xattr_get_nfs4_acl_197_p4 ; char const *var_nfs4_xattr_set_nfs4_acl_196_p1 ; void const *var_nfs4_xattr_set_nfs4_acl_196_p2 ; size_t var_nfs4_xattr_set_nfs4_acl_196_p3 ; int var_nfs4_xattr_set_nfs4_acl_196_p4 ; int var_nfs4_xattr_set_nfs4_acl_196_p5 ; int ldv_s_nfs_v4_clientops_nfs_rpc_ops ; int tmp ; int tmp___0 ; { ldv_s_nfs_v4_clientops_nfs_rpc_ops = 0; LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_53632; ldv_53631: tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_handler_precall(); nfs41_call_sync_prepare(var_group1, var_nfs41_call_sync_prepare_13_p1); goto ldv_53532; case 1: ldv_handler_precall(); nfs41_call_sync_done(var_group1, var_nfs41_call_sync_done_14_p1); goto ldv_53532; case 2: ldv_handler_precall(); nfs4_open_confirm_done(var_group1, var_nfs4_open_confirm_done_49_p1); goto ldv_53532; case 3: ldv_handler_precall(); nfs4_open_confirm_release(var_nfs4_open_confirm_release_50_p0); goto ldv_53532; case 4: ldv_handler_precall(); nfs4_open_prepare(var_group1, var_nfs4_open_prepare_52_p1); goto ldv_53532; case 5: ldv_handler_precall(); nfs4_open_done(var_group1, var_nfs4_open_done_53_p1); goto ldv_53532; case 6: ldv_handler_precall(); nfs4_open_release(var_nfs4_open_release_54_p0); goto ldv_53532; case 7: ldv_handler_precall(); nfs4_close_prepare(var_group1, var_nfs4_close_prepare_74_p1); goto ldv_53532; case 8: ldv_handler_precall(); nfs4_close_done(var_group1, var_nfs4_close_done_73_p1); goto ldv_53532; case 9: ldv_handler_precall(); nfs4_free_closedata(var_nfs4_free_closedata_71_p0); goto ldv_53532; case 10: ldv_handler_precall(); nfs4_renew_done(var_group1, var_nfs4_renew_done_145_p1); goto ldv_53532; case 11: ldv_handler_precall(); nfs4_renew_release(var_nfs4_renew_release_144_p0); goto ldv_53532; case 12: ldv_handler_precall(); nfs4_delegreturn_prepare(var_group1, var_nfs4_delegreturn_prepare_167_p1); goto ldv_53532; case 13: ldv_handler_precall(); nfs4_delegreturn_done(var_group1, var_nfs4_delegreturn_done_165_p1); goto ldv_53532; case 14: ldv_handler_precall(); nfs4_delegreturn_release(var_nfs4_delegreturn_release_166_p0); goto ldv_53532; case 15: ldv_handler_precall(); nfs4_locku_prepare(var_group1, var_nfs4_locku_prepare_177_p1); goto ldv_53532; case 16: ldv_handler_precall(); nfs4_locku_done(var_group1, var_nfs4_locku_done_176_p1); goto ldv_53532; case 17: ldv_handler_precall(); nfs4_locku_release_calldata(var_nfs4_locku_release_calldata_175_p0); goto ldv_53532; case 18: ldv_handler_precall(); nfs4_lock_prepare(var_group1, var_nfs4_lock_prepare_181_p1); goto ldv_53532; case 19: ldv_handler_precall(); nfs4_lock_done(var_group1, var_nfs4_lock_done_182_p1); goto ldv_53532; case 20: ldv_handler_precall(); nfs4_lock_release(var_nfs4_lock_release_183_p0); goto ldv_53532; case 21: ldv_handler_precall(); nfs4_release_lockowner_release(var_nfs4_release_lockowner_release_194_p0); goto ldv_53532; case 22: ldv_handler_precall(); nfs4_get_lease_time_prepare(var_group1, var_nfs4_get_lease_time_prepare_211_p1); goto ldv_53532; case 23: ldv_handler_precall(); nfs4_get_lease_time_done(var_group1, var_nfs4_get_lease_time_done_212_p1); goto ldv_53532; case 24: ldv_handler_precall(); nfs41_sequence_call_done(var_group1, var_nfs41_sequence_call_done_223_p1); goto ldv_53532; case 25: ldv_handler_precall(); nfs41_sequence_prepare(var_group1, var_nfs41_sequence_prepare_224_p1); goto ldv_53532; case 26: ldv_handler_precall(); nfs41_sequence_release(var_nfs41_sequence_release_221_p0); goto ldv_53532; case 27: ldv_handler_precall(); nfs4_reclaim_complete_prepare(var_group1, var_nfs4_reclaim_complete_prepare_228_p1); goto ldv_53532; case 28: ldv_handler_precall(); nfs4_reclaim_complete_done(var_group1, var_nfs4_reclaim_complete_done_230_p1); goto ldv_53532; case 29: ldv_handler_precall(); nfs4_free_reclaim_complete_data(var_nfs4_free_reclaim_complete_data_231_p0); goto ldv_53532; case 30: ldv_handler_precall(); nfs4_layoutget_prepare(var_group1, var_nfs4_layoutget_prepare_233_p1); goto ldv_53532; case 31: ldv_handler_precall(); nfs4_layoutget_done(var_group1, var_nfs4_layoutget_done_234_p1); goto ldv_53532; case 32: ldv_handler_precall(); nfs4_layoutget_release(var_nfs4_layoutget_release_237_p0); goto ldv_53532; case 33: ldv_handler_precall(); nfs4_layoutreturn_prepare(var_group1, var_nfs4_layoutreturn_prepare_239_p1); goto ldv_53532; case 34: ldv_handler_precall(); nfs4_layoutreturn_done(var_group1, var_nfs4_layoutreturn_done_240_p1); goto ldv_53532; case 35: ldv_handler_precall(); nfs4_layoutreturn_release(var_nfs4_layoutreturn_release_241_p0); goto ldv_53532; case 36: ldv_handler_precall(); nfs4_layoutcommit_prepare(var_group1, var_nfs4_layoutcommit_prepare_247_p1); goto ldv_53532; case 37: ldv_handler_precall(); nfs4_layoutcommit_done(var_group1, var_nfs4_layoutcommit_done_248_p1); goto ldv_53532; case 38: ldv_handler_precall(); nfs4_layoutcommit_release(var_nfs4_layoutcommit_release_249_p0); goto ldv_53532; case 39: ldv_handler_precall(); nfs4_open_reclaim(var_group2, var_group3); goto ldv_53532; case 40: ldv_handler_precall(); nfs4_lock_reclaim(var_group3, var_group4); goto ldv_53532; case 41: ldv_handler_precall(); nfs4_open_reclaim(var_group2, var_group3); goto ldv_53532; case 42: ldv_handler_precall(); nfs4_lock_reclaim(var_group3, var_group4); goto ldv_53532; case 43: ldv_handler_precall(); nfs41_proc_reclaim_complete(var_group5); goto ldv_53532; case 44: ldv_handler_precall(); nfs4_open_expired(var_group2, var_group3); goto ldv_53532; case 45: ldv_handler_precall(); nfs4_lock_expired(var_group3, var_group4); goto ldv_53532; case 46: ldv_handler_precall(); nfs41_open_expired(var_group2, var_group3); goto ldv_53532; case 47: ldv_handler_precall(); nfs41_lock_expired(var_group3, var_group4); goto ldv_53532; case 48: ldv_handler_precall(); nfs4_proc_async_renew(var_group5, var_group6, var_nfs4_proc_async_renew_146_p2); goto ldv_53532; case 49: ldv_handler_precall(); nfs4_proc_renew(var_group5, var_group6); goto ldv_53532; case 50: ldv_handler_precall(); nfs41_proc_async_sequence(var_group5, var_group6, var_nfs41_proc_async_sequence_226_p2); goto ldv_53532; case 51: ldv_handler_precall(); nfs4_proc_sequence(var_group5, var_group6); goto ldv_53532; case 52: ldv_handler_precall(); _nfs4_call_sync(var_group7, var_group8, var__nfs4_call_sync_19_p2, var__nfs4_call_sync_19_p3, var__nfs4_call_sync_19_p4); goto ldv_53532; case 53: ldv_handler_precall(); nfs4_match_stateid(var_nfs4_match_stateid_259_p0, var_nfs4_match_stateid_259_p1); goto ldv_53532; case 54: ldv_handler_precall(); nfs4_find_root_sec(var_group8, var_group9, var_nfs4_find_root_sec_83_p2); goto ldv_53532; case 55: ldv_handler_precall(); nfs4_call_sync_sequence(var_group7, var_group8, var_nfs4_call_sync_sequence_15_p2, var_nfs4_call_sync_sequence_15_p3, var_nfs4_call_sync_sequence_15_p4); goto ldv_53532; case 56: ldv_handler_precall(); nfs41_match_stateid(var_nfs41_match_stateid_258_p0, var_nfs41_match_stateid_258_p1); goto ldv_53532; case 57: ldv_handler_precall(); nfs41_find_root_sec(var_group8, var_group9, var_nfs41_find_root_sec_253_p2); goto ldv_53532; case 58: ; if (ldv_s_nfs_v4_clientops_nfs_rpc_ops == 0) { ldv_handler_precall(); nfs4_proc_remove(var_group12, var_group13); ldv_s_nfs_v4_clientops_nfs_rpc_ops = 0; } else { } goto ldv_53532; case 59: ldv_handler_precall(); nfs4_proc_get_root(var_group8, var_group9, var_nfs4_proc_get_root_85_p2); goto ldv_53532; case 60: ldv_handler_precall(); nfs4_proc_getattr(var_group8, var_group9, var_nfs4_proc_getattr_88_p2); goto ldv_53532; case 61: ldv_handler_precall(); nfs4_proc_setattr(var_group10, var_group11, var_nfs4_proc_setattr_89_p2); goto ldv_53532; case 62: ldv_handler_precall(); nfs4_proc_lookup(var_group12, var_group13, var_nfs4_proc_lookup_93_p2, var_nfs4_proc_lookup_93_p3); goto ldv_53532; case 63: ldv_handler_precall(); nfs4_proc_access(var_group12, var_group14); goto ldv_53532; case 64: ldv_handler_precall(); nfs4_proc_readlink(var_group12, var_group15, var_nfs4_proc_readlink_98_p2, var_nfs4_proc_readlink_98_p3); goto ldv_53532; case 65: ldv_handler_precall(); nfs4_proc_create(var_group12, var_group10, var_nfs4_proc_create_99_p2, var_nfs4_proc_create_99_p3); goto ldv_53532; case 66: ldv_handler_precall(); nfs4_proc_unlink_setup(var_group16, var_group12); goto ldv_53532; case 67: ldv_handler_precall(); nfs4_proc_unlink_rpc_prepare(var_group1, var_group17); goto ldv_53532; case 68: ldv_handler_precall(); nfs4_proc_unlink_done(var_group1, var_group12); goto ldv_53532; case 69: ldv_handler_precall(); nfs4_proc_rename(var_group12, var_group13, var_nfs4_proc_rename_109_p2, var_nfs4_proc_rename_109_p3); goto ldv_53532; case 70: ldv_handler_precall(); nfs4_proc_rename_setup(var_group16, var_group12); goto ldv_53532; case 71: ldv_handler_precall(); nfs4_proc_rename_rpc_prepare(var_group1, var_group18); goto ldv_53532; case 72: ldv_handler_precall(); nfs4_proc_rename_done(var_group1, var_group12, var_nfs4_proc_rename_done_107_p2); goto ldv_53532; case 73: ldv_handler_precall(); nfs4_proc_link(var_group12, var_group12, var_nfs4_proc_link_111_p2); goto ldv_53532; case 74: ldv_handler_precall(); nfs4_proc_symlink(var_group12, var_group10, var_nfs4_proc_symlink_116_p2, var_nfs4_proc_symlink_116_p3, var_nfs4_proc_symlink_116_p4); goto ldv_53532; case 75: ldv_handler_precall(); nfs4_proc_mkdir(var_group12, var_group10, var_nfs4_proc_mkdir_118_p2); goto ldv_53532; case 76: ldv_handler_precall(); nfs4_proc_readdir(var_group10, var_group6, var_nfs4_proc_readdir_120_p2, var_nfs4_proc_readdir_120_p3, var_nfs4_proc_readdir_120_p4, var_nfs4_proc_readdir_120_p5); goto ldv_53532; case 77: ldv_handler_precall(); nfs4_proc_mknod(var_group12, var_group10, var_nfs4_proc_mknod_122_p2, var_nfs4_proc_mknod_122_p3); goto ldv_53532; case 78: ldv_handler_precall(); nfs4_proc_statfs(var_group8, var_group9, var_nfs4_proc_statfs_124_p2); goto ldv_53532; case 79: ldv_handler_precall(); nfs4_proc_fsinfo(var_group8, var_group9, var_nfs4_proc_fsinfo_127_p2); goto ldv_53532; case 80: ldv_handler_precall(); nfs4_proc_pathconf(var_group8, var_group9, var_nfs4_proc_pathconf_129_p2); goto ldv_53532; case 81: ldv_handler_precall(); nfs4_server_capabilities(var_group8, var_group9); goto ldv_53532; case 82: ldv_handler_precall(); nfs4_proc_read_setup(var_group19, var_group16); goto ldv_53532; case 83: ldv_handler_precall(); nfs4_proc_read_rpc_prepare(var_group1, var_group19); goto ldv_53532; case 84: ldv_handler_precall(); nfs4_read_done(var_group1, var_group19); goto ldv_53532; case 85: ldv_handler_precall(); nfs4_proc_write_setup(var_group20, var_group16); goto ldv_53532; case 86: ldv_handler_precall(); nfs4_proc_write_rpc_prepare(var_group1, var_group20); goto ldv_53532; case 87: ldv_handler_precall(); nfs4_write_done(var_group1, var_group20); goto ldv_53532; case 88: ldv_handler_precall(); nfs4_proc_commit_setup(var_group21, var_group16); goto ldv_53532; case 89: ldv_handler_precall(); nfs4_proc_commit_rpc_prepare(var_group1, var_group21); goto ldv_53532; case 90: ldv_handler_precall(); nfs4_commit_done(var_group1, var_group21); goto ldv_53532; case 91: ldv_handler_precall(); nfs4_proc_lock(var_group22, var_nfs4_proc_lock_192_p1, var_nfs4_proc_lock_192_p2); goto ldv_53532; case 92: ldv_handler_precall(); nfs4_zap_acl_attr(var_group12); goto ldv_53532; case 93: ldv_handler_precall(); nfs4_close_context(var_group23, var_nfs4_close_context_77_p1); goto ldv_53532; case 94: ldv_handler_precall(); nfs4_atomic_open(var_group12, var_group23, var_nfs4_atomic_open_76_p2, var_nfs4_atomic_open_76_p3); goto ldv_53532; case 95: ldv_handler_precall(); nfs4_xattr_list_nfs4_acl(var_group10, var_nfs4_xattr_list_nfs4_acl_198_p1, var_nfs4_xattr_list_nfs4_acl_198_p2, var_nfs4_xattr_list_nfs4_acl_198_p3, var_nfs4_xattr_list_nfs4_acl_198_p4, var_nfs4_xattr_list_nfs4_acl_198_p5); goto ldv_53532; case 96: ldv_handler_precall(); nfs4_xattr_get_nfs4_acl(var_group10, var_nfs4_xattr_get_nfs4_acl_197_p1, var_nfs4_xattr_get_nfs4_acl_197_p2, var_nfs4_xattr_get_nfs4_acl_197_p3, var_nfs4_xattr_get_nfs4_acl_197_p4); goto ldv_53532; case 97: ldv_handler_precall(); nfs4_xattr_set_nfs4_acl(var_group10, var_nfs4_xattr_set_nfs4_acl_196_p1, var_nfs4_xattr_set_nfs4_acl_196_p2, var_nfs4_xattr_set_nfs4_acl_196_p3, var_nfs4_xattr_set_nfs4_acl_196_p4, var_nfs4_xattr_set_nfs4_acl_196_p5); goto ldv_53532; default: ; goto ldv_53532; } ldv_53532: ; ldv_53632: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0 || ldv_s_nfs_v4_clientops_nfs_rpc_ops != 0) { goto ldv_53631; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_1(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_2(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_3(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_4(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_5(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_6(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_7(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static __u32 __arch_swab32(__u32 val ) { { __asm__ ("bswapl %0": "=r" (val): "0" (val)); return (val); } } __inline static __u32 __fswab32(__u32 val ) { __u32 tmp ; { tmp = __arch_swab32(val); return (tmp); } } __inline static __u32 __swab32p(__u32 const *p ) { __u32 tmp ; { tmp = __fswab32(*p); return (tmp); } } __inline static __u64 __swab64p(__u64 const *p ) { __u64 tmp ; { tmp = __fswab64(*p); return (tmp); } } __inline static __u64 __be64_to_cpup(__be64 const *p ) { __u64 tmp ; { tmp = __swab64p(p); return (tmp); } } __inline static __u32 __be32_to_cpup(__be32 const *p ) { __u32 tmp ; { tmp = __swab32p(p); return (tmp); } } extern int snprintf(char * , size_t , char const * , ...) ; extern char *strcpy(char * , char const * ) ; extern void *kmemdup(void const * , size_t , gfp_t ) ; int ldv_mutex_trylock_20(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_16(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_18(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_21(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_15(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_17(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_19(struct mutex *ldv_func_arg1 ) ; __inline static struct new_utsname *utsname(void) { struct task_struct *tmp ; { tmp = get_current(); return (& ((tmp->nsproxy)->uts_ns)->name); } } __inline static u64 get_unaligned_be64(void const *p ) { __u64 tmp ; { tmp = __be64_to_cpup((__be64 const *)p); return (tmp); } } extern __be32 *xdr_encode_opaque_fixed(__be32 * , void const * , unsigned int ) ; extern __be32 *xdr_encode_opaque(__be32 * , void const * , unsigned int ) ; extern void xdr_inline_pages(struct xdr_buf * , unsigned int , struct page ** , unsigned int , unsigned int ) ; extern void xdr_terminate_string(struct xdr_buf * , u32 const ) ; __inline static __be32 *xdr_decode_hyper(__be32 *p , __u64 *valp ) { { *valp = get_unaligned_be64((void const *)p); return (p + 2UL); } } __inline static __be32 *xdr_decode_opaque_fixed(__be32 *p , void *ptr , unsigned int len ) { size_t __len ; void *__ret ; { __len = (size_t )len; __ret = memcpy(ptr, (void const *)p, __len); return (p + (unsigned long )((len + 3U) >> 2)); } } extern __be32 *xdr_reserve_space(struct xdr_stream * , size_t ) ; extern void xdr_write_pages(struct xdr_stream * , struct page ** , unsigned int , unsigned int ) ; extern unsigned int xdr_stream_pos(struct xdr_stream const * ) ; extern void xdr_set_scratch_buffer(struct xdr_stream * , void * , size_t ) ; extern __be32 *xdr_inline_decode(struct xdr_stream * , size_t ) ; extern unsigned int xdr_read_pages(struct xdr_stream * , unsigned int ) ; extern void xdr_enter_page(struct xdr_stream * , unsigned int ) ; struct rpc_version const nfs_version4 ; int nfs_map_name_to_uid(struct nfs_server const *server , char const *name , size_t namelen , __u32 *uid ) ; int nfs_map_group_to_gid(struct nfs_server const *server , char const *name , size_t namelen , __u32 *gid ) ; int nfs_map_uid_to_name(struct nfs_server const *server , __u32 uid , char *buf , size_t buflen ) ; int nfs_map_gid_to_group(struct nfs_server const *server , __u32 gid , char *buf , size_t buflen ) ; void nfs_increment_open_seqid(int status , struct nfs_seqid *seqid ) ; void nfs_increment_lock_seqid(int status , struct nfs_seqid *seqid ) ; extern unsigned short send_implementation_id ; unsigned int const nfs41_maxread_overhead ; unsigned int const nfs41_maxwrite_overhead ; __inline static unsigned char nfs_umode_to_dtype(umode_t mode ) { { return ((unsigned int )((unsigned char )((int )mode >> 12)) & 15U); } } __inline static int nfs4_has_session(struct nfs_client const *clp ) { { if ((unsigned long )clp->cl_session != (unsigned long )((struct nfs4_session */* const */)0)) { return (1); } else { } return (0); } } __inline static int nfs4_has_persistent_session(struct nfs_client const *clp ) { int tmp ; { tmp = nfs4_has_session(clp); if (tmp != 0) { return ((int )(clp->cl_session)->flags & 1); } else { } return (0); } } __inline static void *net_generic___0(struct net const *net , int id ) { struct net_generic *ng ; void *ptr ; struct net_generic *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { rcu_read_lock(); _________p1 = *((struct net_generic * const volatile *)(& net->gen)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("include/net/netns/generic.h", 40, "suspicious rcu_dereference_check() usage"); } else { } } else { } ng = _________p1; tmp___1 = ldv__builtin_expect(id == 0, 0L); if (tmp___1 != 0L) { goto _L; } else { tmp___2 = ldv__builtin_expect((unsigned int )id > ng->len, 0L); if (tmp___2 != 0L) { _L: /* CIL Label */ __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (41), "i" (12UL)); ldv_49403: ; goto ldv_49403; } else { } } ptr = ng->ptr[id + -1]; rcu_read_unlock(); tmp___3 = ldv__builtin_expect((unsigned long )ptr == (unsigned long )((void *)0), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (45), "i" (12UL)); ldv_49404: ; goto ldv_49404; } else { } return (ptr); } } static int nfs4_stat_to_errno(int stat ) ; unsigned int const nfs41_maxwrite_overhead = 1044U; unsigned int const nfs41_maxread_overhead = 904U; static umode_t const nfs_type2fmt[10U] = { 0U, 32768U, 16384U, 24576U, 8192U, 40960U, 49152U, 4096U, 0U, 0U}; static __be32 *reserve_space(struct xdr_stream *xdr , size_t nbytes ) { __be32 *p ; __be32 *tmp ; long tmp___0 ; { tmp = xdr_reserve_space(xdr, nbytes); p = tmp; tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4xdr.c.prepared"), "i" (952), "i" (12UL)); ldv_49443: ; goto ldv_49443; } else { } return (p); } } static void encode_opaque_fixed(struct xdr_stream *xdr , void const *buf , size_t len ) { __be32 *p ; { p = xdr_reserve_space(xdr, len); xdr_encode_opaque_fixed(p, buf, (unsigned int )len); return; } } static void encode_string(struct xdr_stream *xdr , unsigned int len , char const *str ) { __be32 *p ; { p = reserve_space(xdr, (size_t )(len + 4U)); xdr_encode_opaque(p, (void const *)str, len); return; } } static void encode_uint32(struct xdr_stream *xdr , u32 n ) { __be32 *p ; __u32 tmp ; { p = reserve_space(xdr, 4UL); tmp = __fswab32(n); *p = tmp; return; } } static void encode_uint64(struct xdr_stream *xdr , u64 n ) { __be32 *p ; { p = reserve_space(xdr, 8UL); xdr_encode_hyper(p, n); return; } } static void encode_nfs4_seqid(struct xdr_stream *xdr , struct nfs_seqid const *seqid ) { { encode_uint32(xdr, (seqid->sequence)->counter); return; } } static void encode_compound_hdr(struct xdr_stream *xdr , struct rpc_rqst *req , struct compound_hdr *hdr ) { __be32 *p ; struct rpc_auth *auth ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; { auth = (req->rq_cred)->cr_auth; hdr->replen = (auth->au_rslack + hdr->taglen) + 7U; __ret_warn_once = hdr->taglen != 0U; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4xdr.c.prepared", 1006); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); encode_string(xdr, hdr->taglen, (char const *)hdr->tag); p = reserve_space(xdr, 8UL); tmp___2 = p; p = p + 1; tmp___3 = __fswab32(hdr->minorversion); *tmp___2 = tmp___3; hdr->nops_p = p; tmp___4 = __fswab32(hdr->nops); *p = tmp___4; return; } } static void encode_op_hdr(struct xdr_stream *xdr , enum nfs_opnum4 op , uint32_t replen , struct compound_hdr *hdr ) { { encode_uint32(xdr, (u32 )op); hdr->nops = hdr->nops + 1U; hdr->replen = hdr->replen + replen; return; } } static void encode_nops(struct compound_hdr *hdr ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; __u32 tmp___2 ; { __ret_warn_once = hdr->nops > 8U; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4xdr.c.prepared", 1025); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); tmp___2 = __fswab32(hdr->nops); *(hdr->nops_p) = tmp___2; return; } } static void encode_nfs4_stateid(struct xdr_stream *xdr , nfs4_stateid const *stateid ) { { encode_opaque_fixed(xdr, (void const *)stateid, 16UL); return; } } static void encode_nfs4_verifier(struct xdr_stream *xdr , nfs4_verifier const *verf ) { { encode_opaque_fixed(xdr, (void const *)(& verf->data), 8UL); return; } } static void encode_attrs(struct xdr_stream *xdr , struct iattr const *iap , struct nfs_server const *server ) { char owner_name[128U] ; char owner_group[128U] ; int owner_namelen ; int owner_grouplen ; __be32 *p ; __be32 *q ; int len ; uint32_t bmval0 ; uint32_t bmval1 ; long tmp ; long tmp___0 ; __be32 *tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; __be32 *tmp___4 ; __be32 *tmp___5 ; __be32 *tmp___6 ; __u32 tmp___7 ; __be32 *tmp___8 ; __u32 tmp___9 ; __be32 *tmp___10 ; __be32 *tmp___11 ; __be32 *tmp___12 ; __be32 *tmp___13 ; __u32 tmp___14 ; __be32 *tmp___15 ; __u32 tmp___16 ; __be32 *tmp___17 ; __be32 *tmp___18 ; __u32 tmp___19 ; __be32 *tmp___20 ; __u32 tmp___21 ; __u32 tmp___22 ; { owner_namelen = 0; owner_grouplen = 0; bmval0 = 0U; bmval1 = 0U; len = 16; if (((unsigned int )iap->ia_valid & 8U) != 0U) { len = len + 8; } else { } if ((int )iap->ia_valid & 1) { len = len + 4; } else { } if (((unsigned int )iap->ia_valid & 2U) != 0U) { owner_namelen = nfs_map_uid_to_name(server, iap->ia_uid, (char *)(& owner_name), 128UL); if (owner_namelen < 0) { tmp = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp != 0L) { printk("\001dnfs: couldn\'t resolve uid %d to string\n", iap->ia_uid); } else { } strcpy((char *)(& owner_name), "nobody"); owner_namelen = 6; } else { } len = (((owner_namelen + 3) & -4) + 4) + len; } else { } if (((unsigned int )iap->ia_valid & 4U) != 0U) { owner_grouplen = nfs_map_gid_to_group(server, iap->ia_gid, (char *)(& owner_group), 128UL); if (owner_grouplen < 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dnfs: couldn\'t resolve gid %d to string\n", iap->ia_gid); } else { } strcpy((char *)(& owner_group), "nobody"); owner_grouplen = 6; } else { } len = (((owner_grouplen + 3) & -4) + 4) + len; } else { } if (((unsigned int )iap->ia_valid & 128U) != 0U) { len = len + 16; } else if (((unsigned int )iap->ia_valid & 16U) != 0U) { len = len + 4; } else { } if (((unsigned int )iap->ia_valid & 256U) != 0U) { len = len + 16; } else if (((unsigned int )iap->ia_valid & 32U) != 0U) { len = len + 4; } else { } p = reserve_space(xdr, (size_t )len); tmp___1 = p; p = p + 1; *tmp___1 = 33554432U; q = p; p = p + 3UL; if (((unsigned int )iap->ia_valid & 8U) != 0U) { bmval0 = bmval0 | 16U; p = xdr_encode_hyper(p, (__u64 )iap->ia_size); } else { } if ((int )iap->ia_valid & 1) { bmval1 = bmval1 | 2U; tmp___2 = p; p = p + 1; tmp___3 = __fswab32((__u32 )iap->ia_mode & 4095U); *tmp___2 = tmp___3; } else { } if (((unsigned int )iap->ia_valid & 2U) != 0U) { bmval1 = bmval1 | 16U; p = xdr_encode_opaque(p, (void const *)(& owner_name), (unsigned int )owner_namelen); } else { } if (((unsigned int )iap->ia_valid & 4U) != 0U) { bmval1 = bmval1 | 32U; p = xdr_encode_opaque(p, (void const *)(& owner_group), (unsigned int )owner_grouplen); } else { } if (((unsigned int )iap->ia_valid & 128U) != 0U) { bmval1 = bmval1 | 65536U; tmp___4 = p; p = p + 1; *tmp___4 = 16777216U; tmp___5 = p; p = p + 1; *tmp___5 = 0U; tmp___6 = p; p = p + 1; tmp___7 = __fswab32((__u32 )iap->ia_atime.tv_sec); *tmp___6 = tmp___7; tmp___8 = p; p = p + 1; tmp___9 = __fswab32((__u32 )iap->ia_atime.tv_nsec); *tmp___8 = tmp___9; } else if (((unsigned int )iap->ia_valid & 16U) != 0U) { bmval1 = bmval1 | 65536U; tmp___10 = p; p = p + 1; *tmp___10 = 0U; } else { } if (((unsigned int )iap->ia_valid & 256U) != 0U) { bmval1 = bmval1 | 4194304U; tmp___11 = p; p = p + 1; *tmp___11 = 16777216U; tmp___12 = p; p = p + 1; *tmp___12 = 0U; tmp___13 = p; p = p + 1; tmp___14 = __fswab32((__u32 )iap->ia_mtime.tv_sec); *tmp___13 = tmp___14; tmp___15 = p; p = p + 1; tmp___16 = __fswab32((__u32 )iap->ia_mtime.tv_nsec); *tmp___15 = tmp___16; } else if (((unsigned int )iap->ia_valid & 32U) != 0U) { bmval1 = bmval1 | 4194304U; tmp___17 = p; p = p + 1; *tmp___17 = 0U; } else { } if ((long )len != ((long )p - (long )q) + 4L) { printk("\vNFS: Attr length error, %u != %Zu\n", len, ((long )p - (long )q) + 4L); __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4xdr.c.prepared"), "i" (1151), "i" (12UL)); ldv_49518: ; goto ldv_49518; } else { } len = (int )(((unsigned int )((long )p) - (unsigned int )((long )q)) + 4294967284U); tmp___18 = q; q = q + 1; tmp___19 = __fswab32(bmval0); *tmp___18 = tmp___19; tmp___20 = q; q = q + 1; tmp___21 = __fswab32(bmval1); *tmp___20 = tmp___21; tmp___22 = __fswab32((__u32 )len); *q = tmp___22; return; } } static void encode_access(struct xdr_stream *xdr , u32 access , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 3, 4U, hdr); encode_uint32(xdr, access); return; } } static void encode_close(struct xdr_stream *xdr , struct nfs_closeargs const *arg , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 4, 6U, hdr); encode_nfs4_seqid(xdr, (struct nfs_seqid const *)arg->seqid); encode_nfs4_stateid(xdr, (nfs4_stateid const *)arg->stateid); return; } } static void encode_commit(struct xdr_stream *xdr , struct nfs_commitargs const *args , struct compound_hdr *hdr ) { __be32 *p ; __u32 tmp ; { encode_op_hdr(xdr, 5, 4U, hdr); p = reserve_space(xdr, 12UL); p = xdr_encode_hyper(p, args->offset); tmp = __fswab32(args->count); *p = tmp; return; } } static void encode_create(struct xdr_stream *xdr , struct nfs4_create_arg const *create , struct compound_hdr *hdr ) { __be32 *p ; __u32 tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; { encode_op_hdr(xdr, 6, 11U, hdr); encode_uint32(xdr, create->ftype); switch (create->ftype) { case 5U: p = reserve_space(xdr, 4UL); tmp = __fswab32(create->u.symlink.len); *p = tmp; xdr_write_pages(xdr, create->u.symlink.pages, 0U, create->u.symlink.len); goto ldv_49542; case 3U: ; case 4U: p = reserve_space(xdr, 8UL); tmp___0 = p; p = p + 1; tmp___1 = __fswab32(create->u.device.specdata1); *tmp___0 = tmp___1; tmp___2 = __fswab32(create->u.device.specdata2); *p = tmp___2; goto ldv_49542; default: ; goto ldv_49542; } ldv_49542: encode_string(xdr, (create->name)->ldv_17265.ldv_17263.len, (char const *)(create->name)->name); encode_attrs(xdr, create->attrs, create->server); return; } } static void encode_getattr_one(struct xdr_stream *xdr , uint32_t bitmap , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; __u32 tmp___0 ; { encode_op_hdr(xdr, 9, 114U, hdr); p = reserve_space(xdr, 8UL); tmp = p; p = p + 1; *tmp = 16777216U; tmp___0 = __fswab32(bitmap); *p = tmp___0; return; } } static void encode_getattr_two(struct xdr_stream *xdr , uint32_t bm0 , uint32_t bm1 , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; { encode_op_hdr(xdr, 9, 114U, hdr); p = reserve_space(xdr, 12UL); tmp = p; p = p + 1; *tmp = 33554432U; tmp___0 = p; p = p + 1; tmp___1 = __fswab32(bm0); *tmp___0 = tmp___1; tmp___2 = __fswab32(bm1); *p = tmp___2; return; } } static void encode_getattr_three(struct xdr_stream *xdr , uint32_t bm0 , uint32_t bm1 , uint32_t bm2 , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __be32 *tmp___5 ; __be32 *tmp___6 ; __u32 tmp___7 ; __u32 tmp___8 ; __be32 *tmp___9 ; __u32 tmp___10 ; { encode_op_hdr(xdr, 9, 114U, hdr); if (bm2 != 0U) { p = reserve_space(xdr, 16UL); tmp = p; p = p + 1; *tmp = 50331648U; tmp___0 = p; p = p + 1; tmp___1 = __fswab32(bm0); *tmp___0 = tmp___1; tmp___2 = p; p = p + 1; tmp___3 = __fswab32(bm1); *tmp___2 = tmp___3; tmp___4 = __fswab32(bm2); *p = tmp___4; } else if (bm1 != 0U) { p = reserve_space(xdr, 12UL); tmp___5 = p; p = p + 1; *tmp___5 = 33554432U; tmp___6 = p; p = p + 1; tmp___7 = __fswab32(bm0); *tmp___6 = tmp___7; tmp___8 = __fswab32(bm1); *p = tmp___8; } else { p = reserve_space(xdr, 8UL); tmp___9 = p; p = p + 1; *tmp___9 = 16777216U; tmp___10 = __fswab32(bm0); *p = tmp___10; } return; } } static void encode_getfattr(struct xdr_stream *xdr , u32 const *bitmask , struct compound_hdr *hdr ) { { encode_getattr_two(xdr, (unsigned int )*bitmask & (unsigned int )nfs4_fattr_bitmap[0], (unsigned int )*(bitmask + 1UL) & (unsigned int )nfs4_fattr_bitmap[1], hdr); return; } } static void encode_getfattr_open(struct xdr_stream *xdr , u32 const *bitmask , u32 const *open_bitmap , struct compound_hdr *hdr ) { { encode_getattr_three(xdr, (unsigned int )*bitmask & (unsigned int )*open_bitmap, (unsigned int )*(bitmask + 1UL) & (unsigned int )*(open_bitmap + 1UL), (unsigned int )*(bitmask + 2UL) & (unsigned int )*(open_bitmap + 2UL), hdr); return; } } static void encode_fsinfo(struct xdr_stream *xdr , u32 const *bitmask , struct compound_hdr *hdr ) { { encode_getattr_three(xdr, (unsigned int )*bitmask & (unsigned int )nfs4_fsinfo_bitmap[0], (unsigned int )*(bitmask + 1UL) & (unsigned int )nfs4_fsinfo_bitmap[1], (unsigned int )*(bitmask + 2UL) & (unsigned int )nfs4_fsinfo_bitmap[2], hdr); return; } } static void encode_fs_locations(struct xdr_stream *xdr , u32 const *bitmask , struct compound_hdr *hdr ) { { encode_getattr_two(xdr, (unsigned int )*bitmask & (unsigned int )nfs4_fs_locations_bitmap[0], (unsigned int )*(bitmask + 1UL) & (unsigned int )nfs4_fs_locations_bitmap[1], hdr); return; } } static void encode_getfh(struct xdr_stream *xdr , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 10, 35U, hdr); return; } } static void encode_link(struct xdr_stream *xdr , struct qstr const *name , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 11, 7U, hdr); encode_string(xdr, name->ldv_17265.ldv_17263.len, (char const *)name->name); return; } } __inline static int nfs4_lock_type(struct file_lock *fl , int block ) { { if ((unsigned int )fl->fl_type == 0U) { return (block != 0 ? 3 : 1); } else { } return (block != 0 ? 4 : 2); } } __inline static uint64_t nfs4_lock_length(struct file_lock *fl ) { { if (fl->fl_end == 9223372036854775807LL) { return (0xffffffffffffffffULL); } else { } return ((uint64_t )((fl->fl_end - fl->fl_start) + 1LL)); } } static void encode_lockowner(struct xdr_stream *xdr , struct nfs_lowner const *lowner ) { __be32 *p ; __be32 *tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; { p = reserve_space(xdr, 32UL); p = xdr_encode_hyper(p, lowner->clientid); tmp = p; p = p + 1; *tmp = 335544320U; p = xdr_encode_opaque_fixed(p, (void const *)"lock id:", 8U); tmp___0 = p; p = p + 1; tmp___1 = __fswab32(lowner->s_dev); *tmp___0 = tmp___1; xdr_encode_hyper(p, lowner->id); return; } } static void encode_lock(struct xdr_stream *xdr , struct nfs_lock_args const *args , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; int tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; uint64_t tmp___4 ; __u32 tmp___5 ; { encode_op_hdr(xdr, 12, 43U, hdr); p = reserve_space(xdr, 28UL); tmp = p; p = p + 1; tmp___0 = nfs4_lock_type(args->fl, (int )args->block); tmp___1 = __fswab32((__u32 )tmp___0); *tmp = tmp___1; tmp___2 = p; p = p + 1; tmp___3 = __fswab32((__u32 )args->reclaim); *tmp___2 = tmp___3; p = xdr_encode_hyper(p, (__u64 )(args->fl)->fl_start); tmp___4 = nfs4_lock_length(args->fl); p = xdr_encode_hyper(p, tmp___4); tmp___5 = __fswab32((__u32 )args->new_lock_owner); *p = tmp___5; if ((unsigned int )*((unsigned char *)args + 88UL) != 0U) { encode_nfs4_seqid(xdr, (struct nfs_seqid const *)args->open_seqid); encode_nfs4_stateid(xdr, (nfs4_stateid const *)args->open_stateid); encode_nfs4_seqid(xdr, (struct nfs_seqid const *)args->lock_seqid); encode_lockowner(xdr, & args->lock_owner); } else { encode_nfs4_stateid(xdr, (nfs4_stateid const *)args->lock_stateid); encode_nfs4_seqid(xdr, (struct nfs_seqid const *)args->lock_seqid); } return; } } static void encode_lockt(struct xdr_stream *xdr , struct nfs_lockt_args const *args , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; int tmp___0 ; __u32 tmp___1 ; uint64_t tmp___2 ; { encode_op_hdr(xdr, 13, 43U, hdr); p = reserve_space(xdr, 20UL); tmp = p; p = p + 1; tmp___0 = nfs4_lock_type(args->fl, 0); tmp___1 = __fswab32((__u32 )tmp___0); *tmp = tmp___1; p = xdr_encode_hyper(p, (__u64 )(args->fl)->fl_start); tmp___2 = nfs4_lock_length(args->fl); p = xdr_encode_hyper(p, tmp___2); encode_lockowner(xdr, & args->lock_owner); return; } } static void encode_locku(struct xdr_stream *xdr , struct nfs_locku_args const *args , struct compound_hdr *hdr ) { __be32 *p ; int tmp ; uint64_t tmp___0 ; { encode_op_hdr(xdr, 14, 6U, hdr); tmp = nfs4_lock_type(args->fl, 0); encode_uint32(xdr, (u32 )tmp); encode_nfs4_seqid(xdr, (struct nfs_seqid const *)args->seqid); encode_nfs4_stateid(xdr, (nfs4_stateid const *)args->stateid); p = reserve_space(xdr, 16UL); p = xdr_encode_hyper(p, (__u64 )(args->fl)->fl_start); tmp___0 = nfs4_lock_length(args->fl); xdr_encode_hyper(p, tmp___0); return; } } static void encode_release_lockowner(struct xdr_stream *xdr , struct nfs_lowner const *lowner , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 39, 2U, hdr); encode_lockowner(xdr, lowner); return; } } static void encode_lookup(struct xdr_stream *xdr , struct qstr const *name , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 15, 2U, hdr); encode_string(xdr, name->ldv_17265.ldv_17263.len, (char const *)name->name); return; } } static void encode_share_access(struct xdr_stream *xdr , fmode_t fmode ) { __be32 *p ; __be32 *tmp ; __be32 *tmp___0 ; __be32 *tmp___1 ; __be32 *tmp___2 ; { p = reserve_space(xdr, 8UL); switch (fmode & 3U) { case 1U: tmp = p; p = p + 1; *tmp = 16777216U; goto ldv_49643; case 2U: tmp___0 = p; p = p + 1; *tmp___0 = 33554432U; goto ldv_49643; case 3U: tmp___1 = p; p = p + 1; *tmp___1 = 50331648U; goto ldv_49643; default: tmp___2 = p; p = p + 1; *tmp___2 = 0U; } ldv_49643: *p = 0U; return; } } __inline static void encode_openhdr(struct xdr_stream *xdr , struct nfs_openargs const *arg ) { __be32 *p ; __be32 *tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; { encode_nfs4_seqid(xdr, (struct nfs_seqid const *)arg->seqid); encode_share_access(xdr, arg->fmode); p = reserve_space(xdr, 36UL); p = xdr_encode_hyper(p, arg->clientid); tmp = p; p = p + 1; *tmp = 402653184U; p = xdr_encode_opaque_fixed(p, (void const *)"open id:", 8U); tmp___0 = p; p = p + 1; tmp___1 = __fswab32((arg->server)->s_dev); *tmp___0 = tmp___1; tmp___2 = p; p = p + 1; tmp___3 = __fswab32(arg->id.uniquifier); *tmp___2 = tmp___3; xdr_encode_hyper(p, arg->id.create_time); return; } } __inline static void encode_createmode(struct xdr_stream *xdr , struct nfs_openargs const *arg ) { __be32 *p ; struct nfs_client *clp ; struct iattr dummy ; int tmp ; { p = reserve_space(xdr, 4UL); switch ((int )arg->open_flags & 128) { case 0: *p = 0U; encode_attrs(xdr, (struct iattr const *)arg->u.ldv_45036.attrs, arg->server); goto ldv_49659; default: clp = (arg->server)->nfs_client; if ((unsigned int )(clp->cl_mvops)->minor_version != 0U) { tmp = nfs4_has_persistent_session((struct nfs_client const *)clp); if (tmp != 0) { *p = 16777216U; encode_attrs(xdr, (struct iattr const *)arg->u.ldv_45036.attrs, arg->server); } else { *p = 50331648U; encode_nfs4_verifier(xdr, & arg->u.ldv_45036.verifier); dummy.ia_valid = 0U; encode_attrs(xdr, (struct iattr const *)(& dummy), arg->server); } } else { *p = 33554432U; encode_nfs4_verifier(xdr, & arg->u.ldv_45036.verifier); } } ldv_49659: ; return; } } static void encode_opentype(struct xdr_stream *xdr , struct nfs_openargs const *arg ) { __be32 *p ; { p = reserve_space(xdr, 4UL); switch ((int )arg->open_flags & 64) { case 0: *p = 0U; goto ldv_49668; default: *p = 16777216U; encode_createmode(xdr, arg); } ldv_49668: ; return; } } __inline static void encode_delegation_type(struct xdr_stream *xdr , fmode_t delegation_type ) { __be32 *p ; { p = reserve_space(xdr, 4UL); switch (delegation_type) { case 0U: *p = 0U; goto ldv_49676; case 1U: *p = 16777216U; goto ldv_49676; case 3U: *p = 33554432U; goto ldv_49676; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4xdr.c.prepared"), "i" (1494), "i" (12UL)); ldv_49680: ; goto ldv_49680; } ldv_49676: ; return; } } __inline static void encode_claim_null(struct xdr_stream *xdr , struct qstr const *name ) { __be32 *p ; { p = reserve_space(xdr, 4UL); *p = 0U; encode_string(xdr, name->ldv_17265.ldv_17263.len, (char const *)name->name); return; } } __inline static void encode_claim_previous(struct xdr_stream *xdr , fmode_t type ) { __be32 *p ; { p = reserve_space(xdr, 4UL); *p = 16777216U; encode_delegation_type(xdr, type); return; } } __inline static void encode_claim_delegate_cur(struct xdr_stream *xdr , struct qstr const *name , nfs4_stateid const *stateid ) { __be32 *p ; { p = reserve_space(xdr, 4UL); *p = 33554432U; encode_nfs4_stateid(xdr, stateid); encode_string(xdr, name->ldv_17265.ldv_17263.len, (char const *)name->name); return; } } static void encode_open(struct xdr_stream *xdr , struct nfs_openargs const *arg , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 18, 58U, hdr); encode_openhdr(xdr, arg); encode_opentype(xdr, arg); switch (arg->claim) { case 0U: encode_claim_null(xdr, arg->name); goto ldv_49703; case 1U: encode_claim_previous(xdr, arg->u.delegation_type); goto ldv_49703; case 2U: encode_claim_delegate_cur(xdr, arg->name, & arg->u.delegation); goto ldv_49703; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4xdr.c.prepared"), "i" (1542), "i" (12UL)); ldv_49707: ; goto ldv_49707; } ldv_49703: ; return; } } static void encode_open_confirm(struct xdr_stream *xdr , struct nfs_open_confirmargs const *arg , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 20, 6U, hdr); encode_nfs4_stateid(xdr, (nfs4_stateid const *)arg->stateid); encode_nfs4_seqid(xdr, (struct nfs_seqid const *)arg->seqid); return; } } static void encode_open_downgrade(struct xdr_stream *xdr , struct nfs_closeargs const *arg , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 21, 6U, hdr); encode_nfs4_stateid(xdr, (nfs4_stateid const *)arg->stateid); encode_nfs4_seqid(xdr, (struct nfs_seqid const *)arg->seqid); encode_share_access(xdr, arg->fmode); return; } } static void encode_putfh(struct xdr_stream *xdr , struct nfs_fh const *fh , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 22, 2U, hdr); encode_string(xdr, (unsigned int )fh->size, (char const *)(& fh->data)); return; } } static void encode_putrootfh(struct xdr_stream *xdr , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 24, 2U, hdr); return; } } static void encode_open_stateid(struct xdr_stream *xdr , struct nfs_open_context const *ctx , struct nfs_lock_context const *l_ctx , fmode_t fmode , int zero_seqid ) { nfs4_stateid stateid ; struct nfs_lockowner const *lockowner ; { if ((unsigned long )ctx->state != (unsigned long )((struct nfs4_state */* const */)0)) { lockowner = 0; if ((unsigned long )l_ctx != (unsigned long )((struct nfs_lock_context const *)0)) { lockowner = & l_ctx->lockowner; } else { } nfs4_select_rw_stateid(& stateid, ctx->state, fmode, lockowner); if (zero_seqid != 0) { stateid.seqid = 0U; } else { } encode_nfs4_stateid(xdr, (nfs4_stateid const *)(& stateid)); } else { encode_nfs4_stateid(xdr, & zero_stateid); } return; } } static void encode_read(struct xdr_stream *xdr , struct nfs_readargs const *args , struct compound_hdr *hdr ) { __be32 *p ; __u32 tmp ; { encode_op_hdr(xdr, 25, 4U, hdr); encode_open_stateid(xdr, (struct nfs_open_context const *)args->context, (struct nfs_lock_context const *)args->lock_context, 1U, (int )hdr->minorversion); p = reserve_space(xdr, 12UL); p = xdr_encode_hyper(p, args->offset); tmp = __fswab32(args->count); *p = tmp; return; } } static void encode_readdir(struct xdr_stream *xdr , struct nfs4_readdir_arg const *readdir , struct rpc_rqst *req , struct compound_hdr *hdr ) { uint32_t attrs[2U] ; uint32_t dircount ; __be32 *p ; __be32 verf[2U] ; __be32 *tmp ; __u32 tmp___0 ; __be32 *tmp___1 ; __u32 tmp___2 ; __be32 *tmp___3 ; __be32 *tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; size_t __len ; void *__ret ; long tmp___7 ; { attrs[0] = 2048U; attrs[1] = 8388608U; dircount = readdir->count >> 1; if ((int )readdir->plus != 0) { attrs[0] = attrs[0] | 1573146U; attrs[1] = attrs[1] | 3187258U; dircount = dircount >> 1; } else { } if (((unsigned long )*(readdir->bitmask + 1UL) & 8388608UL) == 0UL) { attrs[0] = attrs[0] | 1048576U; } else { } encode_op_hdr(xdr, 26, 4U, hdr); encode_uint64(xdr, readdir->cookie); encode_nfs4_verifier(xdr, & readdir->verifier); p = reserve_space(xdr, 20UL); tmp = p; p = p + 1; tmp___0 = __fswab32(dircount); *tmp = tmp___0; tmp___1 = p; p = p + 1; tmp___2 = __fswab32(readdir->count); *tmp___1 = tmp___2; tmp___3 = p; p = p + 1; *tmp___3 = 33554432U; tmp___4 = p; p = p + 1; tmp___5 = __fswab32(attrs[0] & (uint32_t )*(readdir->bitmask)); *tmp___4 = tmp___5; tmp___6 = __fswab32(attrs[1] & (uint32_t )*(readdir->bitmask + 1UL)); *p = tmp___6; __len = 8UL; if (__len > 63UL) { __ret = memcpy((void *)(& verf), (void const *)(& readdir->verifier.data), __len); } else { __ret = memcpy((void *)(& verf), (void const *)(& readdir->verifier.data), __len); } tmp___7 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___7 != 0L) { printk("\001d%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n", "encode_readdir", readdir->cookie, verf[0], verf[1], attrs[0] & (uint32_t )*(readdir->bitmask), attrs[1] & (uint32_t )*(readdir->bitmask + 1UL)); } else { } return; } } static void encode_readlink(struct xdr_stream *xdr , struct nfs4_readlink const *readlink , struct rpc_rqst *req , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 27, 3U, hdr); return; } } static void encode_remove(struct xdr_stream *xdr , struct qstr const *name , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 28, 7U, hdr); encode_string(xdr, name->ldv_17265.ldv_17263.len, (char const *)name->name); return; } } static void encode_rename(struct xdr_stream *xdr , struct qstr const *oldname , struct qstr const *newname , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 29, 12U, hdr); encode_string(xdr, oldname->ldv_17265.ldv_17263.len, (char const *)oldname->name); encode_string(xdr, newname->ldv_17265.ldv_17263.len, (char const *)newname->name); return; } } static void encode_renew(struct xdr_stream *xdr , clientid4 clid , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 30, 2U, hdr); encode_uint64(xdr, clid); return; } } static void encode_restorefh(struct xdr_stream *xdr , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 31, 2U, hdr); return; } } static void encode_setacl(struct xdr_stream *xdr , struct nfs_setaclargs *arg , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; __u32 tmp___0 ; { encode_op_hdr(xdr, 34, 6U, hdr); encode_nfs4_stateid(xdr, & zero_stateid); p = reserve_space(xdr, 8UL); tmp = p; p = p + 1; *tmp = 16777216U; *p = 1048576U; p = reserve_space(xdr, 4UL); tmp___0 = __fswab32((__u32 )arg->acl_len); *p = tmp___0; xdr_write_pages(xdr, arg->acl_pages, arg->acl_pgbase, (unsigned int )arg->acl_len); return; } } static void encode_savefh(struct xdr_stream *xdr , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 32, 2U, hdr); return; } } static void encode_setattr(struct xdr_stream *xdr , struct nfs_setattrargs const *arg , struct nfs_server const *server , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 34, 6U, hdr); encode_nfs4_stateid(xdr, & arg->stateid); encode_attrs(xdr, (struct iattr const *)arg->iap, server); return; } } static void encode_setclientid(struct xdr_stream *xdr , struct nfs4_setclientid const *setclientid , struct compound_hdr *hdr ) { __be32 *p ; __u32 tmp ; __u32 tmp___0 ; { encode_op_hdr(xdr, 35, 1028U, hdr); encode_nfs4_verifier(xdr, setclientid->sc_verifier); encode_string(xdr, setclientid->sc_name_len, (char const *)(& setclientid->sc_name)); p = reserve_space(xdr, 4UL); tmp = __fswab32(setclientid->sc_prog); *p = tmp; encode_string(xdr, setclientid->sc_netid_len, (char const *)(& setclientid->sc_netid)); encode_string(xdr, setclientid->sc_uaddr_len, (char const *)(& setclientid->sc_uaddr)); p = reserve_space(xdr, 4UL); tmp___0 = __fswab32(setclientid->sc_cb_ident); *p = tmp___0; return; } } static void encode_setclientid_confirm(struct xdr_stream *xdr , struct nfs4_setclientid_res const *arg , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 36, 2U, hdr); encode_uint64(xdr, arg->clientid); encode_nfs4_verifier(xdr, & arg->confirm); return; } } static void encode_write(struct xdr_stream *xdr , struct nfs_writeargs const *args , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; __u32 tmp___0 ; __u32 tmp___1 ; { encode_op_hdr(xdr, 38, 6U, hdr); encode_open_stateid(xdr, (struct nfs_open_context const *)args->context, (struct nfs_lock_context const *)args->lock_context, 2U, (int )hdr->minorversion); p = reserve_space(xdr, 16UL); p = xdr_encode_hyper(p, args->offset); tmp = p; p = p + 1; tmp___0 = __fswab32((__u32 )args->stable); *tmp = tmp___0; tmp___1 = __fswab32(args->count); *p = tmp___1; xdr_write_pages(xdr, args->pages, args->pgbase, args->count); return; } } static void encode_delegreturn(struct xdr_stream *xdr , nfs4_stateid const *stateid , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 8, 2U, hdr); encode_nfs4_stateid(xdr, stateid); return; } } static void encode_secinfo(struct xdr_stream *xdr , struct qstr const *name , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 33, 147U, hdr); encode_string(xdr, name->ldv_17265.ldv_17263.len, (char const *)name->name); return; } } static void encode_bind_conn_to_session(struct xdr_stream *xdr , struct nfs4_session *session , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; { encode_op_hdr(xdr, 41, 8U, hdr); encode_opaque_fixed(xdr, (void const *)(& session->sess_id.data), 16UL); p = xdr_reserve_space(xdr, 8UL); tmp = p; p = p + 1; *tmp = 117440512U; *p = 0U; return; } } static void encode_exchange_id(struct xdr_stream *xdr , struct nfs41_exchange_id_args *args , struct compound_hdr *hdr ) { __be32 *p ; char impl_name[268U] ; int len ; __be32 *tmp ; __u32 tmp___0 ; __be32 *tmp___1 ; struct new_utsname *tmp___2 ; struct new_utsname *tmp___3 ; struct new_utsname *tmp___4 ; struct new_utsname *tmp___5 ; { len = 0; encode_op_hdr(xdr, 42, 1041U, hdr); encode_nfs4_verifier(xdr, (nfs4_verifier const *)args->verifier); encode_string(xdr, args->id_len, (char const *)(& args->id)); p = reserve_space(xdr, 12UL); tmp = p; p = p + 1; tmp___0 = __fswab32(args->flags); *tmp = tmp___0; tmp___1 = p; p = p + 1; *tmp___1 = 0U; if ((unsigned int )send_implementation_id != 0U) { tmp___2 = utsname(); tmp___3 = utsname(); tmp___4 = utsname(); tmp___5 = utsname(); len = snprintf((char *)(& impl_name), 268UL, "%s %s %s %s", (char *)(& tmp___5->sysname), (char *)(& tmp___4->release), (char *)(& tmp___3->version), (char *)(& tmp___2->machine)); } else { } if (len > 0) { *p = 16777216U; encode_string(xdr, 10U, "kernel.org"); encode_string(xdr, (unsigned int )len, (char const *)(& impl_name)); p = reserve_space(xdr, 12UL); p = xdr_encode_hyper(p, 0ULL); *p = 0U; } else { *p = 0U; } return; } } static void encode_create_session(struct xdr_stream *xdr , struct nfs41_create_session_args *args , struct compound_hdr *hdr ) { __be32 *p ; char machine_name[64U] ; uint32_t len ; struct nfs_client *clp ; struct nfs_net *nn ; void *tmp ; u32 max_resp_sz_cached ; int tmp___0 ; __be32 *tmp___1 ; __u32 tmp___2 ; __be32 *tmp___3 ; __u32 tmp___4 ; __be32 *tmp___5 ; __be32 *tmp___6 ; __u32 tmp___7 ; __be32 *tmp___8 ; __u32 tmp___9 ; __be32 *tmp___10 ; __u32 tmp___11 ; __be32 *tmp___12 ; __u32 tmp___13 ; __be32 *tmp___14 ; __u32 tmp___15 ; __be32 *tmp___16 ; __be32 *tmp___17 ; __be32 *tmp___18 ; __u32 tmp___19 ; __be32 *tmp___20 ; __u32 tmp___21 ; __be32 *tmp___22 ; __u32 tmp___23 ; __be32 *tmp___24 ; __u32 tmp___25 ; __be32 *tmp___26 ; __u32 tmp___27 ; __be32 *tmp___28 ; __be32 *tmp___29 ; __u32 tmp___30 ; __be32 *tmp___31 ; __be32 *tmp___32 ; __be32 *tmp___33 ; __be32 *tmp___34 ; __be32 *tmp___35 ; { clp = args->client; tmp = net_generic___0((struct net const *)clp->cl_net, nfs_net_id); nn = (struct nfs_net *)tmp; max_resp_sz_cached = 2532U; tmp___0 = scnprintf((char *)(& machine_name), 64UL, "%s", (char *)(& clp->cl_ipaddr)); len = (uint32_t )tmp___0; encode_op_hdr(xdr, 43, 24U, hdr); p = reserve_space(xdr, (size_t )(len + 104U)); p = xdr_encode_hyper(p, clp->cl_clientid); tmp___1 = p; p = p + 1; tmp___2 = __fswab32(clp->cl_seqid); *tmp___1 = tmp___2; tmp___3 = p; p = p + 1; tmp___4 = __fswab32(args->flags); *tmp___3 = tmp___4; tmp___5 = p; p = p + 1; *tmp___5 = 0U; tmp___6 = p; p = p + 1; tmp___7 = __fswab32(args->fc_attrs.max_rqst_sz); *tmp___6 = tmp___7; tmp___8 = p; p = p + 1; tmp___9 = __fswab32(args->fc_attrs.max_resp_sz); *tmp___8 = tmp___9; tmp___10 = p; p = p + 1; tmp___11 = __fswab32(max_resp_sz_cached); *tmp___10 = tmp___11; tmp___12 = p; p = p + 1; tmp___13 = __fswab32(args->fc_attrs.max_ops); *tmp___12 = tmp___13; tmp___14 = p; p = p + 1; tmp___15 = __fswab32(args->fc_attrs.max_reqs); *tmp___14 = tmp___15; tmp___16 = p; p = p + 1; *tmp___16 = 0U; tmp___17 = p; p = p + 1; *tmp___17 = 0U; tmp___18 = p; p = p + 1; tmp___19 = __fswab32(args->bc_attrs.max_rqst_sz); *tmp___18 = tmp___19; tmp___20 = p; p = p + 1; tmp___21 = __fswab32(args->bc_attrs.max_resp_sz); *tmp___20 = tmp___21; tmp___22 = p; p = p + 1; tmp___23 = __fswab32(args->bc_attrs.max_resp_sz_cached); *tmp___22 = tmp___23; tmp___24 = p; p = p + 1; tmp___25 = __fswab32(args->bc_attrs.max_ops); *tmp___24 = tmp___25; tmp___26 = p; p = p + 1; tmp___27 = __fswab32(args->bc_attrs.max_reqs); *tmp___26 = tmp___27; tmp___28 = p; p = p + 1; *tmp___28 = 0U; tmp___29 = p; p = p + 1; tmp___30 = __fswab32(args->cb_program); *tmp___29 = tmp___30; tmp___31 = p; p = p + 1; *tmp___31 = 16777216U; tmp___32 = p; p = p + 1; *tmp___32 = 16777216U; tmp___33 = p; p = p + 1; *tmp___33 = (unsigned int )nn->boot_time.tv_nsec; p = xdr_encode_opaque(p, (void const *)(& machine_name), len); tmp___34 = p; p = p + 1; *tmp___34 = 0U; tmp___35 = p; p = p + 1; *tmp___35 = 0U; *p = 0U; return; } } static void encode_destroy_session(struct xdr_stream *xdr , struct nfs4_session *session , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 44, 2U, hdr); encode_opaque_fixed(xdr, (void const *)(& session->sess_id.data), 16UL); return; } } static void encode_destroy_clientid(struct xdr_stream *xdr , uint64_t clientid , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 57, 2U, hdr); encode_uint64(xdr, clientid); return; } } static void encode_reclaim_complete(struct xdr_stream *xdr , struct nfs41_reclaim_complete_args *args , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 58, 6U, hdr); encode_uint32(xdr, (u32 )args->one_fs); return; } } static void encode_sequence(struct xdr_stream *xdr , struct nfs4_sequence_args const *args , struct compound_hdr *hdr ) { struct nfs4_session *session ; struct nfs4_slot_table *tp ; struct nfs4_slot *slot ; __be32 *p ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; __be32 *tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; { slot = args->sa_slot; if ((unsigned long )slot == (unsigned long )((struct nfs4_slot *)0)) { return; } else { } tp = slot->table; session = tp->session; encode_op_hdr(xdr, 53, 11U, hdr); tmp = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: sessionid=%u:%u:%u:%u seqid=%d slotid=%d max_slotid=%d cache_this=%d\n", "encode_sequence", *((u32 *)(& session->sess_id.data)), *((u32 *)(& session->sess_id.data) + 1UL), *((u32 *)(& session->sess_id.data) + 2UL), *((u32 *)(& session->sess_id.data) + 3UL), slot->seq_nr, slot->slot_nr, tp->highest_used_slotid, (int )args->sa_cache_this); } else { } p = reserve_space(xdr, 32UL); p = xdr_encode_opaque_fixed(p, (void const *)(& session->sess_id.data), 16U); tmp___0 = p; p = p + 1; tmp___1 = __fswab32(slot->seq_nr); *tmp___0 = tmp___1; tmp___2 = p; p = p + 1; tmp___3 = __fswab32(slot->slot_nr); *tmp___2 = tmp___3; tmp___4 = p; p = p + 1; tmp___5 = __fswab32(tp->highest_used_slotid); *tmp___4 = tmp___5; tmp___6 = __fswab32((__u32 )args->sa_cache_this); *p = tmp___6; return; } } static void encode_getdevicelist(struct xdr_stream *xdr , struct nfs4_getdevicelist_args const *args , struct compound_hdr *hdr ) { __be32 *p ; nfs4_verifier dummy ; __be32 *tmp ; __u32 tmp___0 ; __be32 *tmp___1 ; { dummy.data[0] = 'd'; dummy.data[1] = 'u'; dummy.data[2] = 'm'; dummy.data[3] = 'm'; dummy.data[4] = 'm'; dummy.data[5] = 'm'; dummy.data[6] = 'm'; dummy.data[7] = 'y'; encode_op_hdr(xdr, 48, 72U, hdr); p = reserve_space(xdr, 16UL); tmp = p; p = p + 1; tmp___0 = __fswab32(args->layoutclass); *tmp = tmp___0; tmp___1 = p; p = p + 1; *tmp___1 = 268435456U; xdr_encode_hyper(p, 0ULL); encode_nfs4_verifier(xdr, (nfs4_verifier const *)(& dummy)); return; } } static void encode_getdeviceinfo(struct xdr_stream *xdr , struct nfs4_getdeviceinfo_args const *args , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; __u32 tmp___0 ; __be32 *tmp___1 ; __u32 tmp___2 ; __be32 *tmp___3 ; { encode_op_hdr(xdr, 47, 6U, hdr); p = reserve_space(xdr, 28UL); p = xdr_encode_opaque_fixed(p, (void const *)(& (args->pdev)->dev_id.data), 16U); tmp = p; p = p + 1; tmp___0 = __fswab32((args->pdev)->layout_type); *tmp = tmp___0; tmp___1 = p; p = p + 1; tmp___2 = __fswab32((args->pdev)->pglen); *tmp___1 = tmp___2; tmp___3 = p; p = p + 1; *tmp___3 = 0U; return; } } static void encode_layoutget(struct xdr_stream *xdr , struct nfs4_layoutget_args const *args , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; long tmp___4 ; { encode_op_hdr(xdr, 50, 1038U, hdr); p = reserve_space(xdr, 36UL); tmp = p; p = p + 1; *tmp = 0U; tmp___0 = p; p = p + 1; tmp___1 = __fswab32(args->type); *tmp___0 = tmp___1; tmp___2 = p; p = p + 1; tmp___3 = __fswab32(args->range.iomode); *tmp___2 = tmp___3; p = xdr_encode_hyper(p, args->range.offset); p = xdr_encode_hyper(p, args->range.length); p = xdr_encode_hyper(p, args->minlength); encode_nfs4_stateid(xdr, & args->stateid); encode_uint32(xdr, args->maxcount); tmp___4 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s: 1st type:0x%x iomode:%d off:%lu len:%lu mc:%d\n", "encode_layoutget", args->type, args->range.iomode, (unsigned long )args->range.offset, (unsigned long )args->range.length, args->maxcount); } else { } return; } } static int encode_layoutcommit(struct xdr_stream *xdr , struct inode *inode , struct nfs4_layoutcommit_args const *args , struct compound_hdr *hdr ) { __be32 *p ; struct nfs_server *tmp ; long tmp___0 ; __be32 *tmp___1 ; __be32 *tmp___2 ; __be32 *tmp___3 ; struct nfs_server *tmp___4 ; __u32 tmp___5 ; struct nfs_server *tmp___6 ; struct nfs_inode *tmp___7 ; struct nfs_server *tmp___8 ; { tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { tmp = NFS_SERVER((struct inode const *)args->inode); printk("\001d%s: lbw: %llu type: %d\n", "encode_layoutcommit", args->lastbytewritten, (tmp->pnfs_curr_ld)->id); } else { } encode_op_hdr(xdr, 49, 5U, hdr); p = reserve_space(xdr, 20UL); p = xdr_encode_hyper(p, 0ULL); p = xdr_encode_hyper(p, (unsigned long long )args->lastbytewritten + 1ULL); *p = 0U; encode_nfs4_stateid(xdr, & args->stateid); p = reserve_space(xdr, 20UL); tmp___1 = p; p = p + 1; *tmp___1 = 16777216U; p = xdr_encode_hyper(p, args->lastbytewritten); tmp___2 = p; p = p + 1; *tmp___2 = 0U; tmp___3 = p; p = p + 1; tmp___4 = NFS_SERVER((struct inode const *)args->inode); tmp___5 = __fswab32((tmp___4->pnfs_curr_ld)->id); *tmp___3 = tmp___5; tmp___8 = NFS_SERVER((struct inode const *)inode); if ((unsigned long )(tmp___8->pnfs_curr_ld)->encode_layoutcommit != (unsigned long )((void (*)(struct pnfs_layout_hdr * , struct xdr_stream * , struct nfs4_layoutcommit_args const * ))0)) { tmp___6 = NFS_SERVER((struct inode const *)inode); tmp___7 = NFS_I((struct inode const *)inode); (*((tmp___6->pnfs_curr_ld)->encode_layoutcommit))(tmp___7->layout, xdr, args); } else { encode_uint32(xdr, 0U); } return (0); } } static void encode_layoutreturn(struct xdr_stream *xdr , struct nfs4_layoutreturn_args const *args , struct compound_hdr *hdr ) { __be32 *p ; __be32 *tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; struct nfs_server *tmp___3 ; struct nfs_inode *tmp___4 ; struct nfs_server *tmp___5 ; { encode_op_hdr(xdr, 51, 7U, hdr); p = reserve_space(xdr, 16UL); tmp = p; p = p + 1; *tmp = 0U; tmp___0 = p; p = p + 1; tmp___1 = __fswab32(args->layout_type); *tmp___0 = tmp___1; tmp___2 = p; p = p + 1; *tmp___2 = 50331648U; *p = 16777216U; p = reserve_space(xdr, 16UL); p = xdr_encode_hyper(p, 0ULL); p = xdr_encode_hyper(p, 0xffffffffffffffffULL); spin_lock(& (args->inode)->i_lock); encode_nfs4_stateid(xdr, & args->stateid); spin_unlock(& (args->inode)->i_lock); tmp___5 = NFS_SERVER((struct inode const *)args->inode); if ((unsigned long )(tmp___5->pnfs_curr_ld)->encode_layoutreturn != (unsigned long )((void (*)(struct pnfs_layout_hdr * , struct xdr_stream * , struct nfs4_layoutreturn_args const * ))0)) { tmp___3 = NFS_SERVER((struct inode const *)args->inode); tmp___4 = NFS_I((struct inode const *)args->inode); (*((tmp___3->pnfs_curr_ld)->encode_layoutreturn))(tmp___4->layout, xdr, args); } else { encode_uint32(xdr, 0U); } return; } } static int encode_secinfo_no_name(struct xdr_stream *xdr , struct nfs41_secinfo_no_name_args const *args , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 52, 147U, hdr); encode_uint32(xdr, (u32 )args->style); return (0); } } static void encode_test_stateid(struct xdr_stream *xdr , struct nfs41_test_stateid_args *args , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 55, 5U, hdr); encode_uint32(xdr, 1U); encode_nfs4_stateid(xdr, (nfs4_stateid const *)args->stateid); return; } } static void encode_free_stateid(struct xdr_stream *xdr , struct nfs41_free_stateid_args *args , struct compound_hdr *hdr ) { { encode_op_hdr(xdr, 45, 3U, hdr); encode_nfs4_stateid(xdr, (nfs4_stateid const *)args->stateid); return; } } static u32 nfs4_xdr_minorversion(struct nfs4_sequence_args const *args ) { { if ((unsigned long )args->sa_slot != (unsigned long )((struct nfs4_slot */* const */)0)) { return ((u32 )(((((args->sa_slot)->table)->session)->clp)->cl_mvops)->minor_version); } else { } return (0U); } } static void nfs4_xdr_enc_access(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_accessargs const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_access(xdr, args->access, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_lookup(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_lookup_arg const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->dir_fh, & hdr); encode_lookup(xdr, args->name, & hdr); encode_getfh(xdr, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_lookup_root(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_lookup_root_arg const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putrootfh(xdr, & hdr); encode_getfh(xdr, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_remove(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_removeargs const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_remove(xdr, & args->name, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_rename(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_renameargs const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->old_dir, & hdr); encode_savefh(xdr, & hdr); encode_putfh(xdr, args->new_dir, & hdr); encode_rename(xdr, args->old_name, args->new_name, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_link(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_link_arg const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_savefh(xdr, & hdr); encode_putfh(xdr, args->dir_fh, & hdr); encode_link(xdr, args->name, & hdr); encode_restorefh(xdr, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_create(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_create_arg const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->dir_fh, & hdr); encode_create(xdr, args, & hdr); encode_getfh(xdr, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_symlink(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_create_arg const *args ) { { nfs4_xdr_enc_create(req, xdr, args); return; } } static void nfs4_xdr_enc_getattr(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_getattr_arg const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_close(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_closeargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_close(xdr, (struct nfs_closeargs const *)args, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_open(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_openargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, args->fh, & hdr); encode_open(xdr, (struct nfs_openargs const *)args, & hdr); encode_getfh(xdr, & hdr); if (args->access != 0U) { encode_access(xdr, args->access, & hdr); } else { } encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_open_confirm(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_open_confirmargs *args ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = 0U; encode_compound_hdr(xdr, req, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_open_confirm(xdr, (struct nfs_open_confirmargs const *)args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_open_noattr(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_openargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, args->fh, & hdr); encode_open(xdr, (struct nfs_openargs const *)args, & hdr); if (args->access != 0U) { encode_access(xdr, args->access, & hdr); } else { } encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_open_downgrade(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_closeargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_open_downgrade(xdr, (struct nfs_closeargs const *)args, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_lock(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_lock_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_lock(xdr, (struct nfs_lock_args const *)args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_lockt(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_lockt_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_lockt(xdr, (struct nfs_lockt_args const *)args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_locku(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_locku_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_locku(xdr, (struct nfs_locku_args const *)args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_release_lockowner(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_release_lockowner_args *args ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = 0U; encode_compound_hdr(xdr, req, & hdr); encode_release_lockowner(xdr, (struct nfs_lowner const *)(& args->lock_owner), & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_readlink(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_readlink const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_readlink(xdr, args, req, & hdr); xdr_inline_pages(& req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->pglen); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_readdir(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_readdir_arg const *args ) { struct compound_hdr hdr ; u32 tmp ; long tmp___0 ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_readdir(xdr, args, req, & hdr); xdr_inline_pages(& req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->count); tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: inlined page args = (%u, %p, %u, %u)\n", "nfs4_xdr_enc_readdir", hdr.replen << 2, args->pages, args->pgbase, args->count); } else { } encode_nops(& hdr); return; } } static void nfs4_xdr_enc_read(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_readargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_read(xdr, (struct nfs_readargs const *)args, & hdr); xdr_inline_pages(& req->rq_rcv_buf, hdr.replen << 2, args->pages, args->pgbase, args->count); req->rq_rcv_buf.flags = req->rq_rcv_buf.flags | 1U; encode_nops(& hdr); return; } } static void nfs4_xdr_enc_setattr(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_setattrargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_setattr(xdr, (struct nfs_setattrargs const *)args, args->server, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_getacl(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_getaclargs *args ) { struct compound_hdr hdr ; u32 tmp ; uint32_t replen ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); replen = hdr.replen + 3U; encode_getattr_two(xdr, 4096U, 0U, & hdr); xdr_inline_pages(& req->rq_rcv_buf, replen << 2, args->acl_pages, args->acl_pgbase, (unsigned int )args->acl_len); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_write(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_writeargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_write(xdr, (struct nfs_writeargs const *)args, & hdr); req->rq_snd_buf.flags = req->rq_snd_buf.flags | 2U; if ((unsigned long )args->bitmask != (unsigned long )((u32 const *)0)) { encode_getfattr(xdr, args->bitmask, & hdr); } else { } encode_nops(& hdr); return; } } static void nfs4_xdr_enc_commit(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_commitargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_commit(xdr, (struct nfs_commitargs const *)args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_fsinfo(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_fsinfo_arg *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, args->fh, & hdr); encode_fsinfo(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_pathconf(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_pathconf_arg const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_getattr_one(xdr, (unsigned int )*(args->bitmask) & (unsigned int )nfs4_pathconf_bitmap[0], & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_statfs(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_statfs_arg const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fh, & hdr); encode_getattr_two(xdr, (unsigned int )*(args->bitmask) & (unsigned int )nfs4_statfs_bitmap[0], (unsigned int )*(args->bitmask + 1UL) & (unsigned int )nfs4_statfs_bitmap[1], & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_server_caps(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_server_caps_arg *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fhandle, & hdr); encode_getattr_one(xdr, 8293U, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_renew(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_client *clp ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = 0U; encode_compound_hdr(xdr, req, & hdr); encode_renew(xdr, clp->cl_clientid, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_setclientid(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_setclientid *sc ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = 0U; encode_compound_hdr(xdr, req, & hdr); encode_setclientid(xdr, (struct nfs4_setclientid const *)sc, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_setclientid_confirm(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_setclientid_res *arg ) { struct compound_hdr hdr ; u32 lease_bitmap[3U] ; unsigned int tmp ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = 0U; lease_bitmap[0] = 1024U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } lease_bitmap[tmp] = 0U; tmp = tmp + 1U; } encode_compound_hdr(xdr, req, & hdr); encode_setclientid_confirm(xdr, (struct nfs4_setclientid_res const *)arg, & hdr); encode_putrootfh(xdr, & hdr); encode_fsinfo(xdr, (u32 const *)(& lease_bitmap), & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_delegreturnargs const *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion(& args->seq_args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, & args->seq_args, & hdr); encode_putfh(xdr, args->fhandle, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_delegreturn(xdr, args->stateid, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_fs_locations(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_fs_locations_arg *args ) { struct compound_hdr hdr ; u32 tmp ; uint32_t replen ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, args->dir_fh, & hdr); encode_lookup(xdr, args->name, & hdr); replen = hdr.replen; encode_fs_locations(xdr, args->bitmask, & hdr); xdr_inline_pages(& req->rq_rcv_buf, replen << 2, & args->page, 0U, 4096U); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_secinfo_arg *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, args->dir_fh, & hdr); encode_secinfo(xdr, args->name, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_client *clp ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = (clp->cl_mvops)->minor_version; encode_compound_hdr(xdr, req, & hdr); encode_bind_conn_to_session(xdr, clp->cl_session, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs41_exchange_id_args *args ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = ((args->client)->cl_mvops)->minor_version; encode_compound_hdr(xdr, req, & hdr); encode_exchange_id(xdr, args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_create_session(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs41_create_session_args *args ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = ((args->client)->cl_mvops)->minor_version; encode_compound_hdr(xdr, req, & hdr); encode_create_session(xdr, args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_session *session ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = ((session->clp)->cl_mvops)->minor_version; encode_compound_hdr(xdr, req, & hdr); encode_destroy_session(xdr, session, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_client *clp ) { struct compound_hdr hdr ; { hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = (clp->cl_mvops)->minor_version; encode_compound_hdr(xdr, req, & hdr); encode_destroy_clientid(xdr, clp->cl_clientid, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_sequence(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_sequence_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)args); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_get_lease_time(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_get_lease_time_args *args ) { struct compound_hdr hdr ; u32 tmp ; u32 lease_bitmap[3U] ; unsigned int tmp___0 ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->la_seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; lease_bitmap[0] = 1024U; tmp___0 = 1U; while (1) { if (tmp___0 >= 3U) { break; } else { } lease_bitmap[tmp___0] = 0U; tmp___0 = tmp___0 + 1U; } encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->la_seq_args), & hdr); encode_putrootfh(xdr, & hdr); encode_fsinfo(xdr, (u32 const *)(& lease_bitmap), & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_reclaim_complete(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs41_reclaim_complete_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_reclaim_complete(xdr, args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_getdevicelist(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_getdevicelist_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, args->fh, & hdr); encode_getdevicelist(xdr, (struct nfs4_getdevicelist_args const *)args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_getdeviceinfo(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_getdeviceinfo_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_getdeviceinfo(xdr, (struct nfs4_getdeviceinfo_args const *)args, & hdr); xdr_inline_pages(& req->rq_rcv_buf, (hdr.replen - 2U) << 2, (args->pdev)->pages, (args->pdev)->pgbase, (args->pdev)->pglen); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_layoutget(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_layoutget_args *args ) { struct compound_hdr hdr ; u32 tmp ; struct nfs_fh *tmp___0 ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); tmp___0 = NFS_FH((struct inode const *)args->inode); encode_putfh(xdr, (struct nfs_fh const *)tmp___0, & hdr); encode_layoutget(xdr, (struct nfs4_layoutget_args const *)args, & hdr); xdr_inline_pages(& req->rq_rcv_buf, hdr.replen << 2, args->layout.pages, 0U, args->layout.pglen); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_layoutcommit(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_layoutcommit_args *args ) { struct nfs4_layoutcommit_data *data ; struct nfs4_layoutcommit_args const *__mptr ; struct compound_hdr hdr ; u32 tmp ; struct nfs_fh *tmp___0 ; { __mptr = (struct nfs4_layoutcommit_args const *)args; data = (struct nfs4_layoutcommit_data *)__mptr + 0xfffffffffffffe20UL; tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); tmp___0 = NFS_FH((struct inode const *)args->inode); encode_putfh(xdr, (struct nfs_fh const *)tmp___0, & hdr); encode_layoutcommit(xdr, data->args.inode, (struct nfs4_layoutcommit_args const *)args, & hdr); encode_getfattr(xdr, args->bitmask, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_layoutreturn(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_layoutreturn_args *args ) { struct compound_hdr hdr ; u32 tmp ; struct nfs_fh *tmp___0 ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); tmp___0 = NFS_FH((struct inode const *)args->inode); encode_putfh(xdr, (struct nfs_fh const *)tmp___0, & hdr); encode_layoutreturn(xdr, (struct nfs4_layoutreturn_args const *)args, & hdr); encode_nops(& hdr); return; } } static int nfs4_xdr_enc_secinfo_no_name(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs41_secinfo_no_name_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putrootfh(xdr, & hdr); encode_secinfo_no_name(xdr, (struct nfs41_secinfo_no_name_args const *)args, & hdr); encode_nops(& hdr); return (0); } } static void nfs4_xdr_enc_test_stateid(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs41_test_stateid_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_test_stateid(xdr, args, & hdr); encode_nops(& hdr); return; } } static void nfs4_xdr_enc_free_stateid(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs41_free_stateid_args *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_free_stateid(xdr, args, & hdr); encode_nops(& hdr); return; } } static void print_overflow_msg(char const *func , struct xdr_stream const *xdr ) { long tmp ; { tmp = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp != 0L) { printk("\001dnfs: %s: prematurely hit end of receive buffer. Remaining buffer length is %tu words.\n", func, ((long )xdr->end - (long )xdr->p) / 4L); } else { } return; } } static int decode_opaque_inline(struct xdr_stream *xdr , unsigned int *len , char **string ) { __be32 *p ; long tmp ; long tmp___0 ; { p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } *len = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )*len); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *string = (char *)p; return (0); out_overflow: print_overflow_msg("decode_opaque_inline", (struct xdr_stream const *)xdr); return (-5); } } static int decode_compound_hdr(struct xdr_stream *xdr , struct compound_hdr *hdr ) { __be32 *p ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; long tmp___2 ; int tmp___3 ; long tmp___4 ; { p = xdr_inline_decode(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; tmp___1 = __be32_to_cpup((__be32 const *)tmp___0); hdr->status = (int32_t )tmp___1; hdr->taglen = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )(hdr->taglen + 4U)); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out_overflow; } else { } hdr->tag = (char *)p; p = p + (unsigned long )((hdr->taglen + 3U) >> 2); hdr->nops = __be32_to_cpup((__be32 const *)p); tmp___4 = ldv__builtin_expect(hdr->nops == 0U, 0L); if (tmp___4 != 0L) { tmp___3 = nfs4_stat_to_errno(hdr->status); return (tmp___3); } else { } return (0); out_overflow: print_overflow_msg("decode_compound_hdr", (struct xdr_stream const *)xdr); return (-5); } } static int decode_op_hdr(struct xdr_stream *xdr , enum nfs_opnum4 expected ) { __be32 *p ; uint32_t opnum ; int32_t nfserr ; long tmp ; __be32 *tmp___0 ; long tmp___1 ; __u32 tmp___2 ; int tmp___3 ; { p = xdr_inline_decode(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; opnum = __be32_to_cpup((__be32 const *)tmp___0); if (opnum != (uint32_t )expected) { tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dnfs: Server returned operation %d but we issued a request for %d\n", opnum, (unsigned int )expected); } else { } return (-5); } else { } tmp___2 = __be32_to_cpup((__be32 const *)p); nfserr = (int32_t )tmp___2; if (nfserr != 0) { tmp___3 = nfs4_stat_to_errno(nfserr); return (tmp___3); } else { } return (0); out_overflow: print_overflow_msg("decode_op_hdr", (struct xdr_stream const *)xdr); return (-5); } } static int decode_ace(struct xdr_stream *xdr , void *ace , struct nfs_client *clp ) { __be32 *p ; unsigned int strlen___0 ; char *str ; int tmp ; long tmp___0 ; { p = xdr_inline_decode(xdr, 12UL); tmp___0 = ldv__builtin_expect((unsigned long )p != (unsigned long )((__be32 *)0), 1L); if (tmp___0 != 0L) { tmp = decode_opaque_inline(xdr, & strlen___0, & str); return (tmp); } else { } print_overflow_msg("decode_ace", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_bitmap(struct xdr_stream *xdr , uint32_t *bitmap ) { uint32_t bmlen ; __be32 *p ; long tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; long tmp___2 ; __be32 *tmp___3 ; __be32 *tmp___4 ; { p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } bmlen = __be32_to_cpup((__be32 const *)p); tmp___1 = 0U; *(bitmap + 2UL) = tmp___1; tmp___0 = tmp___1; *(bitmap + 1UL) = tmp___0; *bitmap = tmp___0; p = xdr_inline_decode(xdr, (size_t )(bmlen << 2)); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out_overflow; } else { } if (bmlen != 0U) { tmp___3 = p; p = p + 1; *bitmap = __be32_to_cpup((__be32 const *)tmp___3); if (bmlen > 1U) { tmp___4 = p; p = p + 1; *(bitmap + 1UL) = __be32_to_cpup((__be32 const *)tmp___4); if (bmlen > 2U) { *(bitmap + 2UL) = __be32_to_cpup((__be32 const *)p); } else { } } else { } } else { } return (0); out_overflow: print_overflow_msg("decode_attr_bitmap", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_length(struct xdr_stream *xdr , uint32_t *attrlen , unsigned int *savep ) { __be32 *p ; long tmp ; { p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } *attrlen = __be32_to_cpup((__be32 const *)p); *savep = xdr_stream_pos((struct xdr_stream const *)xdr); return (0); out_overflow: print_overflow_msg("decode_attr_length", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_supported(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *bitmask ) { int ret ; long tmp ; uint32_t tmp___0 ; uint32_t tmp___1 ; long tmp___2 ; long tmp___3 ; { tmp___2 = ldv__builtin_expect((long )((int )*bitmap) & 1L, 1L); if (tmp___2 != 0L) { ret = decode_attr_bitmap(xdr, bitmask); tmp = ldv__builtin_expect(ret < 0, 0L); if (tmp != 0L) { return (ret); } else { } *bitmap = *bitmap & 4294967294U; } else { tmp___1 = 0U; *(bitmask + 2UL) = tmp___1; tmp___0 = tmp___1; *(bitmask + 1UL) = tmp___0; *bitmask = tmp___0; } tmp___3 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: bitmask=%08x:%08x:%08x\n", "decode_attr_supported", *bitmask, *(bitmask + 1UL), *(bitmask + 2UL)); } else { } return (0); } } static int decode_attr_type(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *type ) { __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { ret = 0; *type = 0U; tmp = ldv__builtin_expect((long )((int )*bitmap) & 1L, 0L); if (tmp != 0L) { return (-5); } else { } tmp___2 = ldv__builtin_expect(((unsigned long )*bitmap & 2UL) != 0UL, 1L); if (tmp___2 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *type = __be32_to_cpup((__be32 const *)p); if (*type == 0U || *type > 9U) { tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: bad type %d\n", "decode_attr_type", *type); } else { } return (-5); } else { } *bitmap = *bitmap & 4294967293U; ret = 1; } else { } tmp___3 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: type=0%o\n", "decode_attr_type", (int )nfs_type2fmt[*type]); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_type", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_fh_expire_type(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *type ) { __be32 *p ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { *type = 0U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 3UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 4UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *type = __be32_to_cpup((__be32 const *)p); *bitmap = *bitmap & 4294967291U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: expire type=0x%x\n", "decode_attr_fh_expire_type", *type); } else { } return (0); out_overflow: print_overflow_msg("decode_attr_fh_expire_type", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_change(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *change ) { __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ret = 0; *change = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 7UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 8UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, change); *bitmap = *bitmap & 4294967287U; ret = 131072; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: change attribute=%Lu\n", "decode_attr_change", *change); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_change", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_size(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *size ) { __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ret = 0; *size = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 15UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 16UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, size); *bitmap = *bitmap & 4294967279U; ret = 64; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: file size=%Lu\n", "decode_attr_size", *size); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_size", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_link_support(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *res ) { __be32 *p ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { *res = 0U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 31UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 32UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *res = __be32_to_cpup((__be32 const *)p); *bitmap = *bitmap & 4294967263U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: link support=%s\n", "decode_attr_link_support", *res == 0U ? (char *)"false" : (char *)"true"); } else { } return (0); out_overflow: print_overflow_msg("decode_attr_link_support", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_symlink_support(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *res ) { __be32 *p ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { *res = 0U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 63UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 64UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *res = __be32_to_cpup((__be32 const *)p); *bitmap = *bitmap & 4294967231U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: symlink support=%s\n", "decode_attr_symlink_support", *res == 0U ? (char *)"false" : (char *)"true"); } else { } return (0); out_overflow: print_overflow_msg("decode_attr_symlink_support", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_fsid(struct xdr_stream *xdr , uint32_t *bitmap , struct nfs_fsid *fsid ) { __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ret = 0; fsid->major = 0ULL; fsid->minor = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 255UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 256UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 16UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } p = xdr_decode_hyper(p, & fsid->major); xdr_decode_hyper(p, & fsid->minor); *bitmap = *bitmap & 4294967039U; ret = 1024; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: fsid=(0x%Lx/0x%Lx)\n", "decode_attr_fsid", fsid->major, fsid->minor); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_fsid", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_lease_time(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *res ) { __be32 *p ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { *res = 60U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 1023UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 1024UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *res = __be32_to_cpup((__be32 const *)p); *bitmap = *bitmap & 4294966271U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: file size=%u\n", "decode_attr_lease_time", *res); } else { } return (0); out_overflow: print_overflow_msg("decode_attr_lease_time", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_error(struct xdr_stream *xdr , uint32_t *bitmap , int32_t *res ) { __be32 *p ; long tmp ; long tmp___0 ; __u32 tmp___1 ; long tmp___2 ; { tmp = ldv__builtin_expect(((unsigned long )*bitmap & 2047UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___2 = ldv__builtin_expect(((unsigned long )*bitmap & 2048UL) != 0UL, 1L); if (tmp___2 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *bitmap = *bitmap & 4294965247U; tmp___1 = __be32_to_cpup((__be32 const *)p); *res = (int32_t )(- tmp___1); } else { } return (0); out_overflow: print_overflow_msg("decode_attr_error", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_filehandle(struct xdr_stream *xdr , uint32_t *bitmap , struct nfs_fh *fh ) { __be32 *p ; int len ; long tmp ; long tmp___0 ; __u32 tmp___1 ; long tmp___2 ; size_t __len ; void *__ret ; long tmp___3 ; { if ((unsigned long )fh != (unsigned long )((struct nfs_fh *)0)) { memset((void *)fh, 0, 130UL); } else { } tmp = ldv__builtin_expect(((unsigned long )*bitmap & 524287UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___3 = ldv__builtin_expect(((unsigned long )*bitmap & 524288UL) != 0UL, 1L); if (tmp___3 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } tmp___1 = __be32_to_cpup((__be32 const *)p); len = (int )tmp___1; if (len > 128) { return (-5); } else { } p = xdr_inline_decode(xdr, (size_t )len); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out_overflow; } else { } if ((unsigned long )fh != (unsigned long )((struct nfs_fh *)0)) { __len = (size_t )len; __ret = memcpy((void *)(& fh->data), (void const *)p, __len); fh->size = (unsigned short )len; } else { } *bitmap = *bitmap & 4294443007U; } else { } return (0); out_overflow: print_overflow_msg("decode_attr_filehandle", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_aclsupport(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *res ) { __be32 *p ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { *res = 3U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 8191UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 8192UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *res = __be32_to_cpup((__be32 const *)p); *bitmap = *bitmap & 4294959103U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: ACLs supported=%u\n", "decode_attr_aclsupport", *res); } else { } return (0); out_overflow: print_overflow_msg("decode_attr_aclsupport", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_fileid(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *fileid ) { __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ret = 0; *fileid = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 1048575UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 1048576UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, fileid); *bitmap = *bitmap & 4293918719U; ret = 2048; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: fileid=%Lu\n", "decode_attr_fileid", *fileid); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_fileid", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *fileid ) { __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ret = 0; *fileid = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 8388607UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 8388608UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, fileid); *(bitmap + 1UL) = *(bitmap + 1UL) & 4286578687U; ret = 4194304; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: fileid=%Lu\n", "decode_attr_mounted_on_fileid", *fileid); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_mounted_on_fileid", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_files_avail(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *res ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 2097151UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 2097152UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, res); *bitmap = *bitmap & 4292870143U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: files avail=%Lu\n", "decode_attr_files_avail", *res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_files_avail", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_files_free(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *res ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 4194303UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 4194304UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, res); *bitmap = *bitmap & 4290772991U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: files free=%Lu\n", "decode_attr_files_free", *res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_files_free", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_files_total(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *res ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 8388607UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 8388608UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, res); *bitmap = *bitmap & 4286578687U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: files total=%Lu\n", "decode_attr_files_total", *res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_files_total", (struct xdr_stream const *)xdr); return (-5); } } static int decode_pathname(struct xdr_stream *xdr , struct nfs4_pathname *path ) { u32 n ; __be32 *p ; int status ; long tmp ; long tmp___0 ; struct nfs4_string *component ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; { status = 0; p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } n = __be32_to_cpup((__be32 const *)p); if (n == 0U) { goto root_path; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dpathname4: "); } else { } path->ncomponents = 0U; goto ldv_50453; ldv_50452: component = (struct nfs4_string *)(& path->components) + (unsigned long )path->ncomponents; status = decode_opaque_inline(xdr, & component->len, & component->data); tmp___1 = ldv__builtin_expect(status != 0, 0L); if (tmp___1 != 0L) { goto out_eio; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("%s%.*s ", path->ncomponents != n ? (char *)"/ " : (char *)"", component->len, component->data); } else { } if (path->ncomponents <= 511U) { path->ncomponents = path->ncomponents + 1U; } else { tmp___3 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001dcannot parse %d components in path\n", n); } else { } goto out_eio; } ldv_50453: ; if (path->ncomponents < n) { goto ldv_50452; } else { } out: ; return (status); root_path: path->ncomponents = 1U; path->components[0].len = 0U; path->components[0].data = 0; tmp___4 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001dpathname4: /\n"); } else { } goto out; out_eio: tmp___5 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001d status %d", status); } else { } status = -5; goto out; out_overflow: print_overflow_msg("decode_pathname", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_fs_locations(struct xdr_stream *xdr , uint32_t *bitmap , struct nfs4_fs_locations *res ) { int n ; __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; __u32 tmp___5 ; u32 m ; struct nfs4_fs_location *loc ; long tmp___6 ; long tmp___7 ; struct nfs4_string *server ; long tmp___8 ; long tmp___9 ; unsigned int i ; long tmp___10 ; unsigned int len ; char *data ; long tmp___11 ; long tmp___12 ; long tmp___13 ; { status = -5; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 16777215UL) != 0UL, 0L); if (tmp != 0L) { goto out; } else { } status = 0; tmp___0 = ldv__builtin_expect(((unsigned long )*bitmap & 16777216UL) == 0UL, 0L); if (tmp___0 != 0L) { goto out; } else { } status = -5; tmp___1 = ldv__builtin_expect((unsigned long )res == (unsigned long )((struct nfs4_fs_locations *)0), 0L); if (tmp___1 != 0L) { goto out; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: fsroot:\n", "decode_attr_fs_locations"); } else { } status = decode_pathname(xdr, & res->fs_path); tmp___3 = ldv__builtin_expect(status != 0, 0L); if (tmp___3 != 0L) { goto out; } else { } p = xdr_inline_decode(xdr, 4UL); tmp___4 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___4 != 0L) { goto out_overflow; } else { } tmp___5 = __be32_to_cpup((__be32 const *)p); n = (int )tmp___5; if (n <= 0) { goto out_eio; } else { } res->nlocations = 0; goto ldv_50482; ldv_50481: loc = (struct nfs4_fs_location *)(& res->locations) + (unsigned long )res->nlocations; p = xdr_inline_decode(xdr, 4UL); tmp___6 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___6 != 0L) { goto out_overflow; } else { } m = __be32_to_cpup((__be32 const *)p); loc->nservers = 0U; tmp___7 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___7 != 0L) { printk("\001d%s: servers:\n", "decode_attr_fs_locations"); } else { } goto ldv_50479; ldv_50478: server = (struct nfs4_string *)(& loc->servers) + (unsigned long )loc->nservers; status = decode_opaque_inline(xdr, & server->len, & server->data); tmp___8 = ldv__builtin_expect(status != 0, 0L); if (tmp___8 != 0L) { goto out_eio; } else { } tmp___9 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___9 != 0L) { printk("\001d%s ", server->data); } else { } if (loc->nservers <= 9U) { loc->nservers = loc->nservers + 1U; } else { tmp___10 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___10 != 0L) { printk("\001d%s: using first %u of %u servers returned for location %u\n", "decode_attr_fs_locations", 10, m, res->nlocations); } else { } i = loc->nservers; goto ldv_50476; ldv_50475: status = decode_opaque_inline(xdr, & len, & data); tmp___11 = ldv__builtin_expect(status != 0, 0L); if (tmp___11 != 0L) { goto out_eio; } else { } i = i + 1U; ldv_50476: ; if (i < m) { goto ldv_50475; } else { } } ldv_50479: ; if (loc->nservers < m) { goto ldv_50478; } else { } status = decode_pathname(xdr, & loc->rootpath); tmp___12 = ldv__builtin_expect(status != 0, 0L); if (tmp___12 != 0L) { goto out_eio; } else { } if (res->nlocations <= 9) { res->nlocations = res->nlocations + 1; } else { } ldv_50482: ; if (res->nlocations < n) { goto ldv_50481; } else { } if (res->nlocations != 0) { status = 524288; } else { } out: tmp___13 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___13 != 0L) { printk("\001d%s: fs_locations done, error = %d\n", "decode_attr_fs_locations", status); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_fs_locations", (struct xdr_stream const *)xdr); out_eio: status = -5; goto out; } } static int decode_attr_maxfilesize(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *res ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 134217727UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 134217728UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, res); *bitmap = *bitmap & 4160749567U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: maxfilesize=%Lu\n", "decode_attr_maxfilesize", *res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_maxfilesize", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_maxlink(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *maxlink ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *maxlink = 1U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 268435455UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 268435456UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *maxlink = __be32_to_cpup((__be32 const *)p); *bitmap = *bitmap & 4026531839U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: maxlink=%u\n", "decode_attr_maxlink", *maxlink); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_maxlink", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_maxname(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *maxname ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *maxname = 1024U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 536870911UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 536870912UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *maxname = __be32_to_cpup((__be32 const *)p); *bitmap = *bitmap & 3758096383U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: maxname=%u\n", "decode_attr_maxname", *maxname); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_maxname", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_maxread(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *res ) { __be32 *p ; int status ; long tmp ; uint64_t maxread ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 1024U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 1073741823UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*bitmap & 1073741824UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, & maxread); if (maxread > 2147483647ULL) { maxread = 2147483647ULL; } else { } *res = (unsigned int )maxread; *bitmap = *bitmap & 3221225471U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: maxread=%lu\n", "decode_attr_maxread", (unsigned long )*res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_maxread", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_maxwrite(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *res ) { __be32 *p ; int status ; long tmp ; uint64_t maxwrite ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 1024U; tmp = ldv__builtin_expect(((unsigned long )*bitmap & 2147483647UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect((int )*bitmap < 0, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, & maxwrite); if (maxwrite > 2147483647ULL) { maxwrite = 2147483647ULL; } else { } *res = (unsigned int )maxwrite; *bitmap = *bitmap & 2147483647U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: maxwrite=%lu\n", "decode_attr_maxwrite", (unsigned long )*res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_maxwrite", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_mode(struct xdr_stream *xdr , uint32_t *bitmap , umode_t *mode ) { uint32_t tmp ; __be32 *p ; int ret ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { ret = 0; *mode = 0U; tmp___0 = ldv__builtin_expect((long )((int )*(bitmap + 1UL)) & 1L, 0L); if (tmp___0 != 0L) { return (-5); } else { } tmp___2 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 2UL) != 0UL, 1L); if (tmp___2 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } tmp = __be32_to_cpup((__be32 const *)p); *mode = (unsigned int )((umode_t )tmp) & 4095U; *(bitmap + 1UL) = *(bitmap + 1UL) & 4294967293U; ret = 2; } else { } tmp___3 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: file mode=0%o\n", "decode_attr_mode", (unsigned int )*mode); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_mode", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_nlink(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *nlink ) { __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ret = 0; *nlink = 1U; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 7UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 8UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } *nlink = __be32_to_cpup((__be32 const *)p); *(bitmap + 1UL) = *(bitmap + 1UL) & 4294967287U; ret = 4; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: nlink=%u\n", "decode_attr_nlink", *nlink); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_nlink", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_owner(struct xdr_stream *xdr , uint32_t *bitmap , struct nfs_server const *server , uint32_t *uid , struct nfs4_string *owner_name ) { uint32_t len ; __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; void *tmp___2 ; long tmp___3 ; long tmp___4 ; int tmp___5 ; long tmp___6 ; long tmp___7 ; { ret = 0; *uid = 4294967294U; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 15UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___6 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 16UL) != 0UL, 1L); if (tmp___6 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } len = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )len); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } if ((unsigned long )owner_name != (unsigned long )((struct nfs4_string *)0)) { tmp___2 = kmemdup((void const *)p, (size_t )len, 0U); owner_name->data = (char *)tmp___2; if ((unsigned long )owner_name->data != (unsigned long )((char *)0)) { owner_name->len = len; ret = 8388608; } else { } } else if (len <= 1023U) { tmp___5 = nfs_map_name_to_uid(server, (char const *)p, (size_t )len, uid); if (tmp___5 == 0) { ret = 8; } else { tmp___4 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s: nfs_map_name_to_uid failed!\n", "decode_attr_owner"); } else { tmp___3 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: name too long (%u)!\n", "decode_attr_owner", len); } else { } } } } else { } *(bitmap + 1UL) = *(bitmap + 1UL) & 4294967279U; } else { } tmp___7 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___7 != 0L) { printk("\001d%s: uid=%d\n", "decode_attr_owner", (int )*uid); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_owner", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_group(struct xdr_stream *xdr , uint32_t *bitmap , struct nfs_server const *server , uint32_t *gid , struct nfs4_string *group_name ) { uint32_t len ; __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; void *tmp___2 ; long tmp___3 ; long tmp___4 ; int tmp___5 ; long tmp___6 ; long tmp___7 ; { ret = 0; *gid = 4294967294U; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 31UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___6 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 32UL) != 0UL, 1L); if (tmp___6 != 0L) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } len = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )len); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } if ((unsigned long )group_name != (unsigned long )((struct nfs4_string *)0)) { tmp___2 = kmemdup((void const *)p, (size_t )len, 0U); group_name->data = (char *)tmp___2; if ((unsigned long )group_name->data != (unsigned long )((char *)0)) { group_name->len = len; ret = 16777216; } else { } } else if (len <= 1023U) { tmp___5 = nfs_map_group_to_gid(server, (char const *)p, (size_t )len, gid); if (tmp___5 == 0) { ret = 16; } else { tmp___4 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s: nfs_map_group_to_gid failed!\n", "decode_attr_group"); } else { tmp___3 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: name too long (%u)!\n", "decode_attr_group", len); } else { } } } } else { } *(bitmap + 1UL) = *(bitmap + 1UL) & 4294967263U; } else { } tmp___7 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___7 != 0L) { printk("\001d%s: gid=%d\n", "decode_attr_group", (int )*gid); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_group", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_rdev(struct xdr_stream *xdr , uint32_t *bitmap , dev_t *rdev ) { uint32_t major ; uint32_t minor ; __be32 *p ; int ret ; long tmp ; dev_t tmp___0 ; long tmp___1 ; __be32 *tmp___2 ; long tmp___3 ; long tmp___4 ; { major = 0U; minor = 0U; ret = 0; *rdev = 0U; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 511UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___3 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 512UL) != 0UL, 1L); if (tmp___3 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } tmp___2 = p; p = p + 1; major = __be32_to_cpup((__be32 const *)tmp___2); minor = __be32_to_cpup((__be32 const *)p); tmp___0 = (major << 20) | minor; if (tmp___0 >> 20 == major && (tmp___0 & 1048575U) == minor) { *rdev = tmp___0; } else { } *(bitmap + 1UL) = *(bitmap + 1UL) & 4294966783U; ret = 32; } else { } tmp___4 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s: rdev=(0x%x:0x%x)\n", "decode_attr_rdev", major, minor); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_rdev", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_space_avail(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *res ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 1023UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 1024UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, res); *(bitmap + 1UL) = *(bitmap + 1UL) & 4294966271U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: space avail=%Lu\n", "decode_attr_space_avail", *res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_space_avail", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_space_free(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *res ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 2047UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 2048UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, res); *(bitmap + 1UL) = *(bitmap + 1UL) & 4294965247U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: space free=%Lu\n", "decode_attr_space_free", *res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_space_free", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_space_total(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *res ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { status = 0; *res = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 4095UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 4096UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, res); *(bitmap + 1UL) = *(bitmap + 1UL) & 4294963199U; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: space total=%Lu\n", "decode_attr_space_total", *res); } else { } return (status); out_overflow: print_overflow_msg("decode_attr_space_total", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_space_used(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *used ) { __be32 *p ; int ret ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { ret = 0; *used = 0ULL; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 8191UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___1 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 8192UL) != 0UL, 1L); if (tmp___1 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, used); *(bitmap + 1UL) = *(bitmap + 1UL) & 4294959103U; ret = 512; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: space used=%Lu\n", "decode_attr_space_used", *used); } else { } return (ret); out_overflow: print_overflow_msg("decode_attr_space_used", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_time(struct xdr_stream *xdr , struct timespec *time ) { __be32 *p ; uint64_t sec ; uint32_t nsec ; long tmp ; { p = xdr_inline_decode(xdr, 12UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } p = xdr_decode_hyper(p, & sec); nsec = __be32_to_cpup((__be32 const *)p); time->tv_sec = (long )sec; time->tv_nsec = (long )nsec; return (0); out_overflow: print_overflow_msg("decode_attr_time", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_time_access(struct xdr_stream *xdr , uint32_t *bitmap , struct timespec *time ) { int status ; long tmp ; long tmp___0 ; long tmp___1 ; { status = 0; time->tv_sec = 0L; time->tv_nsec = 0L; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 32767UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___0 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 32768UL) != 0UL, 1L); if (tmp___0 != 0L) { status = decode_attr_time(xdr, time); if (status == 0) { status = 4096; } else { } *(bitmap + 1UL) = *(bitmap + 1UL) & 4294934527U; } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: atime=%ld\n", "decode_attr_time_access", time->tv_sec); } else { } return (status); } } static int decode_attr_time_metadata(struct xdr_stream *xdr , uint32_t *bitmap , struct timespec *time ) { int status ; long tmp ; long tmp___0 ; long tmp___1 ; { status = 0; time->tv_sec = 0L; time->tv_nsec = 0L; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 1048575UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___0 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 1048576UL) != 0UL, 1L); if (tmp___0 != 0L) { status = decode_attr_time(xdr, time); if (status == 0) { status = 16384; } else { } *(bitmap + 1UL) = *(bitmap + 1UL) & 4293918719U; } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: ctime=%ld\n", "decode_attr_time_metadata", time->tv_sec); } else { } return (status); } } static int decode_attr_time_delta(struct xdr_stream *xdr , uint32_t *bitmap , struct timespec *time ) { int status ; long tmp ; long tmp___0 ; long tmp___1 ; { status = 0; time->tv_sec = 0L; time->tv_nsec = 0L; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 524287UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___0 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 524288UL) != 0UL, 1L); if (tmp___0 != 0L) { status = decode_attr_time(xdr, time); *(bitmap + 1UL) = *(bitmap + 1UL) & 4294443007U; } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: time_delta=%ld %ld\n", "decode_attr_time_delta", time->tv_sec, time->tv_nsec); } else { } return (status); } } static int decode_attr_time_modify(struct xdr_stream *xdr , uint32_t *bitmap , struct timespec *time ) { int status ; long tmp ; long tmp___0 ; long tmp___1 ; { status = 0; time->tv_sec = 0L; time->tv_nsec = 0L; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 2097151UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } tmp___0 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 2097152UL) != 0UL, 1L); if (tmp___0 != 0L) { status = decode_attr_time(xdr, time); if (status == 0) { status = 8192; } else { } *(bitmap + 1UL) = *(bitmap + 1UL) & 4292870143U; } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: mtime=%ld\n", "decode_attr_time_modify", time->tv_sec); } else { } return (status); } } static int verify_attr_len(struct xdr_stream *xdr , unsigned int savep , uint32_t attrlen ) { unsigned int attrwords ; unsigned int nwords ; unsigned int tmp ; long tmp___0 ; long tmp___1 ; { attrwords = (attrlen + 3U) >> 2; tmp = xdr_stream_pos((struct xdr_stream const *)xdr); nwords = (tmp - savep) >> 2; tmp___1 = ldv__builtin_expect(attrwords != nwords, 0L); if (tmp___1 != 0L) { tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: server returned incorrect attribute length: %u %c %u\n", "verify_attr_len", attrwords << 2, attrwords < nwords ? 60 : 62, nwords << 2); } else { } return (-5); } else { } return (0); } } static int decode_change_info(struct xdr_stream *xdr , struct nfs4_change_info *cinfo ) { __be32 *p ; long tmp ; __be32 *tmp___0 ; { p = xdr_inline_decode(xdr, 20UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; cinfo->atomic = __be32_to_cpup((__be32 const *)tmp___0); p = xdr_decode_hyper(p, & cinfo->before); xdr_decode_hyper(p, & cinfo->after); return (0); out_overflow: print_overflow_msg("decode_change_info", (struct xdr_stream const *)xdr); return (-5); } } static int decode_access(struct xdr_stream *xdr , u32 *supported , u32 *access ) { __be32 *p ; uint32_t supp ; uint32_t acc ; int status ; long tmp ; __be32 *tmp___0 ; { status = decode_op_hdr(xdr, 3); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; supp = __be32_to_cpup((__be32 const *)tmp___0); acc = __be32_to_cpup((__be32 const *)p); *supported = supp; *access = acc; return (0); out_overflow: print_overflow_msg("decode_access", (struct xdr_stream const *)xdr); return (-5); } } static int decode_opaque_fixed(struct xdr_stream *xdr , void *buf , size_t len ) { __be32 *p ; size_t __len ; void *__ret ; long tmp ; { p = xdr_inline_decode(xdr, len); tmp = ldv__builtin_expect((unsigned long )p != (unsigned long )((__be32 *)0), 1L); if (tmp != 0L) { __len = len; __ret = memcpy(buf, (void const *)p, __len); return (0); } else { } print_overflow_msg("decode_opaque_fixed", (struct xdr_stream const *)xdr); return (-5); } } static int decode_stateid(struct xdr_stream *xdr , nfs4_stateid *stateid ) { int tmp ; { tmp = decode_opaque_fixed(xdr, (void *)stateid, 16UL); return (tmp); } } static int decode_close(struct xdr_stream *xdr , struct nfs_closeres *res ) { int status ; { status = decode_op_hdr(xdr, 4); if (status != -5) { nfs_increment_open_seqid(status, res->seqid); } else { } if (status == 0) { status = decode_stateid(xdr, & res->stateid); } else { } return (status); } } static int decode_verifier(struct xdr_stream *xdr , void *verifier ) { int tmp ; { tmp = decode_opaque_fixed(xdr, verifier, 8UL); return (tmp); } } static int decode_write_verifier(struct xdr_stream *xdr , struct nfs_write_verifier *verifier ) { int tmp ; { tmp = decode_opaque_fixed(xdr, (void *)(& verifier->data), 8UL); return (tmp); } } static int decode_commit(struct xdr_stream *xdr , struct nfs_commitres *res ) { int status ; { status = decode_op_hdr(xdr, 5); if (status == 0) { status = decode_write_verifier(xdr, & (res->verf)->verifier); } else { } return (status); } } static int decode_create(struct xdr_stream *xdr , struct nfs4_change_info *cinfo ) { __be32 *p ; uint32_t bmlen ; int status ; long tmp ; long tmp___0 ; { status = decode_op_hdr(xdr, 6); if (status != 0) { return (status); } else { } status = decode_change_info(xdr, cinfo); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } bmlen = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )(bmlen << 2)); tmp___0 = ldv__builtin_expect((unsigned long )p != (unsigned long )((__be32 *)0), 1L); if (tmp___0 != 0L) { return (0); } else { } out_overflow: print_overflow_msg("decode_create", (struct xdr_stream const *)xdr); return (-5); } } static int decode_server_caps(struct xdr_stream *xdr , struct nfs4_server_caps_res *res ) { unsigned int savep ; uint32_t attrlen ; uint32_t bitmap[3U] ; unsigned int tmp ; int status ; long tmp___0 ; { bitmap[0] = 0U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } bitmap[tmp] = 0U; tmp = tmp + 1U; } status = decode_op_hdr(xdr, 9); if (status != 0) { goto xdr_error; } else { } status = decode_attr_bitmap(xdr, (uint32_t *)(& bitmap)); if (status != 0) { goto xdr_error; } else { } status = decode_attr_length(xdr, & attrlen, & savep); if (status != 0) { goto xdr_error; } else { } status = decode_attr_supported(xdr, (uint32_t *)(& bitmap), (uint32_t *)(& res->attr_bitmask)); if (status != 0) { goto xdr_error; } else { } status = decode_attr_fh_expire_type(xdr, (uint32_t *)(& bitmap), & res->fh_expire_type); if (status != 0) { goto xdr_error; } else { } status = decode_attr_link_support(xdr, (uint32_t *)(& bitmap), & res->has_links); if (status != 0) { goto xdr_error; } else { } status = decode_attr_symlink_support(xdr, (uint32_t *)(& bitmap), & res->has_symlinks); if (status != 0) { goto xdr_error; } else { } status = decode_attr_aclsupport(xdr, (uint32_t *)(& bitmap), & res->acl_bitmask); if (status != 0) { goto xdr_error; } else { } status = verify_attr_len(xdr, savep, attrlen); xdr_error: tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: xdr returned %d!\n", "decode_server_caps", - status); } else { } return (status); } } static int decode_statfs(struct xdr_stream *xdr , struct nfs_fsstat *fsstat ) { unsigned int savep ; uint32_t attrlen ; uint32_t bitmap[3U] ; unsigned int tmp ; int status ; long tmp___0 ; { bitmap[0] = 0U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } bitmap[tmp] = 0U; tmp = tmp + 1U; } status = decode_op_hdr(xdr, 9); if (status != 0) { goto xdr_error; } else { } status = decode_attr_bitmap(xdr, (uint32_t *)(& bitmap)); if (status != 0) { goto xdr_error; } else { } status = decode_attr_length(xdr, & attrlen, & savep); if (status != 0) { goto xdr_error; } else { } status = decode_attr_files_avail(xdr, (uint32_t *)(& bitmap), & fsstat->afiles); if (status != 0) { goto xdr_error; } else { } status = decode_attr_files_free(xdr, (uint32_t *)(& bitmap), & fsstat->ffiles); if (status != 0) { goto xdr_error; } else { } status = decode_attr_files_total(xdr, (uint32_t *)(& bitmap), & fsstat->tfiles); if (status != 0) { goto xdr_error; } else { } status = decode_attr_space_avail(xdr, (uint32_t *)(& bitmap), & fsstat->abytes); if (status != 0) { goto xdr_error; } else { } status = decode_attr_space_free(xdr, (uint32_t *)(& bitmap), & fsstat->fbytes); if (status != 0) { goto xdr_error; } else { } status = decode_attr_space_total(xdr, (uint32_t *)(& bitmap), & fsstat->tbytes); if (status != 0) { goto xdr_error; } else { } status = verify_attr_len(xdr, savep, attrlen); xdr_error: tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: xdr returned %d!\n", "decode_statfs", - status); } else { } return (status); } } static int decode_pathconf(struct xdr_stream *xdr , struct nfs_pathconf *pathconf ) { unsigned int savep ; uint32_t attrlen ; uint32_t bitmap[3U] ; unsigned int tmp ; int status ; long tmp___0 ; { bitmap[0] = 0U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } bitmap[tmp] = 0U; tmp = tmp + 1U; } status = decode_op_hdr(xdr, 9); if (status != 0) { goto xdr_error; } else { } status = decode_attr_bitmap(xdr, (uint32_t *)(& bitmap)); if (status != 0) { goto xdr_error; } else { } status = decode_attr_length(xdr, & attrlen, & savep); if (status != 0) { goto xdr_error; } else { } status = decode_attr_maxlink(xdr, (uint32_t *)(& bitmap), & pathconf->max_link); if (status != 0) { goto xdr_error; } else { } status = decode_attr_maxname(xdr, (uint32_t *)(& bitmap), & pathconf->max_namelen); if (status != 0) { goto xdr_error; } else { } status = verify_attr_len(xdr, savep, attrlen); xdr_error: tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: xdr returned %d!\n", "decode_pathconf", - status); } else { } return (status); } } static int decode_threshold_hint(struct xdr_stream *xdr , uint32_t *bitmap , uint64_t *res , uint32_t hint_bit ) { __be32 *p ; long tmp ; long tmp___0 ; { *res = 0ULL; tmp___0 = ldv__builtin_expect((*bitmap & hint_bit) != 0U, 1L); if (tmp___0 != 0L) { p = xdr_inline_decode(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, res); } else { } return (0); out_overflow: print_overflow_msg("decode_threshold_hint", (struct xdr_stream const *)xdr); return (-5); } } static int decode_first_threshold_item4(struct xdr_stream *xdr , struct nfs4_threshold *res ) { __be32 *p ; unsigned int savep ; uint32_t bitmap[3U] ; unsigned int tmp ; uint32_t attrlen ; int status ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { bitmap[0] = 0U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } bitmap[tmp] = 0U; tmp = tmp + 1U; } p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { print_overflow_msg("decode_first_threshold_item4", (struct xdr_stream const *)xdr); return (-5); } else { } res->l_type = __be32_to_cpup((__be32 const *)p); status = decode_attr_bitmap(xdr, (uint32_t *)(& bitmap)); if (status < 0) { goto xdr_error; } else { } status = decode_attr_length(xdr, & attrlen, & savep); if (status < 0) { goto xdr_error; } else { } status = decode_threshold_hint(xdr, (uint32_t *)(& bitmap), & res->rd_sz, 1U); if (status < 0) { goto xdr_error; } else { } status = decode_threshold_hint(xdr, (uint32_t *)(& bitmap), & res->wr_sz, 2U); if (status < 0) { goto xdr_error; } else { } status = decode_threshold_hint(xdr, (uint32_t *)(& bitmap), & res->rd_io_sz, 4U); if (status < 0) { goto xdr_error; } else { } status = decode_threshold_hint(xdr, (uint32_t *)(& bitmap), & res->wr_io_sz, 8U); if (status < 0) { goto xdr_error; } else { } status = verify_attr_len(xdr, savep, attrlen); res->bm = bitmap[0]; tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n", "decode_first_threshold_item4", res->bm, res->rd_sz, res->wr_sz, res->rd_io_sz, res->wr_io_sz); } else { } xdr_error: tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s ret=%d!\n", "decode_first_threshold_item4", status); } else { } return (status); } } static int decode_attr_mdsthreshold(struct xdr_stream *xdr , uint32_t *bitmap , struct nfs4_threshold *res ) { __be32 *p ; int status ; uint32_t num ; long tmp ; long tmp___0 ; long tmp___1 ; { status = 0; tmp = ldv__builtin_expect(((unsigned long )*(bitmap + 2UL) & 15UL) != 0UL, 0L); if (tmp != 0L) { return (-5); } else { } if (((unsigned long )*(bitmap + 2UL) & 16UL) != 0UL) { tmp___0 = ldv__builtin_expect((unsigned long )res == (unsigned long )((struct nfs4_threshold *)0), 0L); if (tmp___0 != 0L) { return (-121); } else { } p = xdr_inline_decode(xdr, 4UL); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } num = __be32_to_cpup((__be32 const *)p); if (num == 0U) { return (0); } else { } if (num > 1U) { printk("\016%s: Warning: Multiple pNFS layout drivers per filesystem not supported\n", "decode_attr_mdsthreshold"); } else { } status = decode_first_threshold_item4(xdr, res); *(bitmap + 2UL) = *(bitmap + 2UL) & 4294967279U; } else { } return (status); out_overflow: print_overflow_msg("decode_attr_mdsthreshold", (struct xdr_stream const *)xdr); return (-5); } } static int decode_getfattr_attrs(struct xdr_stream *xdr , uint32_t *bitmap , struct nfs_fattr *fattr , struct nfs_fh *fh , struct nfs4_fs_locations *fs_loc , struct nfs_server const *server ) { int status ; umode_t fmode ; uint32_t type ; int32_t err ; long tmp ; { fmode = 0U; status = decode_attr_type(xdr, bitmap, & type); if (status < 0) { goto xdr_error; } else { } fattr->mode = 0U; if (status != 0) { fattr->mode = (umode_t )((int )fattr->mode | (int )((unsigned short )nfs_type2fmt[type])); fattr->valid = fattr->valid | (unsigned int )status; } else { } status = decode_attr_change(xdr, bitmap, & fattr->change_attr); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_size(xdr, bitmap, & fattr->size); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_fsid(xdr, bitmap, & fattr->fsid); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; err = 0; status = decode_attr_error(xdr, bitmap, & err); if (status < 0) { goto xdr_error; } else { } status = decode_attr_filehandle(xdr, bitmap, fh); if (status < 0) { goto xdr_error; } else { } status = decode_attr_fileid(xdr, bitmap, & fattr->fileid); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_fs_locations(xdr, bitmap, fs_loc); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_mode(xdr, bitmap, & fmode); if (status < 0) { goto xdr_error; } else { } if (status != 0) { fattr->mode = (umode_t )((int )fattr->mode | (int )fmode); fattr->valid = fattr->valid | (unsigned int )status; } else { } status = decode_attr_nlink(xdr, bitmap, & fattr->nlink); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_owner(xdr, bitmap, server, & fattr->uid, fattr->owner_name); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_group(xdr, bitmap, server, & fattr->gid, fattr->group_name); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_rdev(xdr, bitmap, & fattr->rdev); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_space_used(xdr, bitmap, & fattr->du.nfs3.used); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_time_access(xdr, bitmap, & fattr->atime); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_time_metadata(xdr, bitmap, & fattr->ctime); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_time_modify(xdr, bitmap, & fattr->mtime); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_mounted_on_fileid(xdr, bitmap, & fattr->mounted_on_fileid); if (status < 0) { goto xdr_error; } else { } fattr->valid = fattr->valid | (unsigned int )status; status = decode_attr_mdsthreshold(xdr, bitmap, fattr->mdsthreshold); if (status < 0) { } else { } xdr_error: tmp = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: xdr returned %d\n", "decode_getfattr_attrs", - status); } else { } return (status); } } static int decode_getfattr_generic(struct xdr_stream *xdr , struct nfs_fattr *fattr , struct nfs_fh *fh , struct nfs4_fs_locations *fs_loc , struct nfs_server const *server ) { unsigned int savep ; uint32_t attrlen ; uint32_t bitmap[3U] ; unsigned int tmp ; int status ; long tmp___0 ; { bitmap[0] = 0U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } bitmap[tmp] = 0U; tmp = tmp + 1U; } status = decode_op_hdr(xdr, 9); if (status < 0) { goto xdr_error; } else { } status = decode_attr_bitmap(xdr, (uint32_t *)(& bitmap)); if (status < 0) { goto xdr_error; } else { } status = decode_attr_length(xdr, & attrlen, & savep); if (status < 0) { goto xdr_error; } else { } status = decode_getfattr_attrs(xdr, (uint32_t *)(& bitmap), fattr, fh, fs_loc, server); if (status < 0) { goto xdr_error; } else { } status = verify_attr_len(xdr, savep, attrlen); xdr_error: tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: xdr returned %d\n", "decode_getfattr_generic", - status); } else { } return (status); } } static int decode_getfattr(struct xdr_stream *xdr , struct nfs_fattr *fattr , struct nfs_server const *server ) { int tmp ; { tmp = decode_getfattr_generic(xdr, fattr, 0, 0, server); return (tmp); } } static int decode_first_pnfs_layout_type(struct xdr_stream *xdr , uint32_t *layouttype ) { uint32_t *p ; int num ; __be32 *tmp ; long tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; long tmp___3 ; { tmp = xdr_inline_decode(xdr, 4UL); p = tmp; tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((uint32_t *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } tmp___1 = __be32_to_cpup((__be32 const *)p); num = (int )tmp___1; if (num == 0) { *layouttype = 0U; return (0); } else { } if (num > 1) { printk("\016NFS: %s: Warning: Multiple pNFS layout drivers per filesystem not supported\n", "decode_first_pnfs_layout_type"); } else { } tmp___2 = xdr_inline_decode(xdr, (size_t )(num * 4)); p = tmp___2; tmp___3 = ldv__builtin_expect((unsigned long )p == (unsigned long )((uint32_t *)0), 0L); if (tmp___3 != 0L) { goto out_overflow; } else { } *layouttype = __be32_to_cpup((__be32 const *)p); return (0); out_overflow: print_overflow_msg("decode_first_pnfs_layout_type", (struct xdr_stream const *)xdr); return (-5); } } static int decode_attr_pnfstype(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *layouttype ) { int status ; long tmp ; long tmp___0 ; { status = 0; tmp = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: bitmap is %x\n", "decode_attr_pnfstype", *(bitmap + 1UL)); } else { } tmp___0 = ldv__builtin_expect(((unsigned long )*(bitmap + 1UL) & 1073741823UL) != 0UL, 0L); if (tmp___0 != 0L) { return (-5); } else { } if (((unsigned long )*(bitmap + 1UL) & 1073741824UL) != 0UL) { status = decode_first_pnfs_layout_type(xdr, layouttype); *(bitmap + 1UL) = *(bitmap + 1UL) & 3221225471U; } else { *layouttype = 0U; } return (status); } } static int decode_attr_layout_blksize(struct xdr_stream *xdr , uint32_t *bitmap , uint32_t *res ) { __be32 *p ; long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: bitmap is %x\n", "decode_attr_layout_blksize", *(bitmap + 2UL)); } else { } *res = 0U; if (((unsigned long )*(bitmap + 2UL) & 2UL) != 0UL) { p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { print_overflow_msg("decode_attr_layout_blksize", (struct xdr_stream const *)xdr); return (-5); } else { } *res = __be32_to_cpup((__be32 const *)p); *(bitmap + 2UL) = *(bitmap + 2UL) & 4294967293U; } else { } return (0); } } static int decode_fsinfo(struct xdr_stream *xdr , struct nfs_fsinfo *fsinfo ) { unsigned int savep ; uint32_t attrlen ; uint32_t bitmap[3U] ; int status ; __u32 tmp ; __u32 tmp___0 ; long tmp___1 ; { status = decode_op_hdr(xdr, 9); if (status != 0) { goto xdr_error; } else { } status = decode_attr_bitmap(xdr, (uint32_t *)(& bitmap)); if (status != 0) { goto xdr_error; } else { } status = decode_attr_length(xdr, & attrlen, & savep); if (status != 0) { goto xdr_error; } else { } tmp = 512U; fsinfo->wtmult = tmp; fsinfo->rtmult = tmp; status = decode_attr_lease_time(xdr, (uint32_t *)(& bitmap), & fsinfo->lease_time); if (status != 0) { goto xdr_error; } else { } status = decode_attr_maxfilesize(xdr, (uint32_t *)(& bitmap), & fsinfo->maxfilesize); if (status != 0) { goto xdr_error; } else { } status = decode_attr_maxread(xdr, (uint32_t *)(& bitmap), & fsinfo->rtmax); if (status != 0) { goto xdr_error; } else { } tmp___0 = fsinfo->rtmax; fsinfo->dtpref = tmp___0; fsinfo->rtpref = tmp___0; status = decode_attr_maxwrite(xdr, (uint32_t *)(& bitmap), & fsinfo->wtmax); if (status != 0) { goto xdr_error; } else { } fsinfo->wtpref = fsinfo->wtmax; status = decode_attr_time_delta(xdr, (uint32_t *)(& bitmap), & fsinfo->time_delta); if (status != 0) { goto xdr_error; } else { } status = decode_attr_pnfstype(xdr, (uint32_t *)(& bitmap), & fsinfo->layouttype); if (status != 0) { goto xdr_error; } else { } status = decode_attr_layout_blksize(xdr, (uint32_t *)(& bitmap), & fsinfo->blksize); if (status != 0) { goto xdr_error; } else { } status = verify_attr_len(xdr, savep, attrlen); xdr_error: tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: xdr returned %d!\n", "decode_fsinfo", - status); } else { } return (status); } } static int decode_getfh(struct xdr_stream *xdr , struct nfs_fh *fh ) { __be32 *p ; uint32_t len ; int status ; long tmp ; long tmp___0 ; size_t __len ; void *__ret ; { memset((void *)fh, 0, 130UL); status = decode_op_hdr(xdr, 10); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } len = __be32_to_cpup((__be32 const *)p); if (len > 128U) { return (-5); } else { } fh->size = (unsigned short )len; p = xdr_inline_decode(xdr, (size_t )len); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } __len = (size_t )len; __ret = memcpy((void *)(& fh->data), (void const *)p, __len); return (0); out_overflow: print_overflow_msg("decode_getfh", (struct xdr_stream const *)xdr); return (-5); } } static int decode_link(struct xdr_stream *xdr , struct nfs4_change_info *cinfo ) { int status ; int tmp ; { status = decode_op_hdr(xdr, 11); if (status != 0) { return (status); } else { } tmp = decode_change_info(xdr, cinfo); return (tmp); } } static int decode_lock_denied(struct xdr_stream *xdr , struct file_lock *fl ) { uint64_t offset ; uint64_t length ; uint64_t clientid ; __be32 *p ; uint32_t namelen ; uint32_t type ; long tmp ; __be32 *tmp___0 ; long tmp___1 ; { p = xdr_inline_decode(xdr, 32UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } p = xdr_decode_hyper(p, & offset); p = xdr_decode_hyper(p, & length); tmp___0 = p; p = p + 1; type = __be32_to_cpup((__be32 const *)tmp___0); if ((unsigned long )fl != (unsigned long )((struct file_lock *)0)) { fl->fl_start = (long long )offset; fl->fl_end = (fl->fl_start + (long long )length) + -1LL; if (length == 0xffffffffffffffffULL) { fl->fl_end = 9223372036854775807LL; } else { } fl->fl_type = 1U; if ((int )type & 1) { fl->fl_type = 0U; } else { } fl->fl_pid = 0U; } else { } p = xdr_decode_hyper(p, & clientid); namelen = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )namelen); tmp___1 = ldv__builtin_expect((unsigned long )p != (unsigned long )((__be32 *)0), 1L); if (tmp___1 != 0L) { return (-10010); } else { } out_overflow: print_overflow_msg("decode_lock_denied", (struct xdr_stream const *)xdr); return (-5); } } static int decode_lock(struct xdr_stream *xdr , struct nfs_lock_res *res ) { int status ; long tmp ; { status = decode_op_hdr(xdr, 12); if (status == -5) { goto out; } else { } if (status == 0) { status = decode_stateid(xdr, & res->stateid); tmp = ldv__builtin_expect(status != 0, 0L); if (tmp != 0L) { goto out; } else { } } else if (status == -10010) { status = decode_lock_denied(xdr, 0); } else { } if ((unsigned long )res->open_seqid != (unsigned long )((struct nfs_seqid *)0)) { nfs_increment_open_seqid(status, res->open_seqid); } else { } nfs_increment_lock_seqid(status, res->lock_seqid); out: ; return (status); } } static int decode_lockt(struct xdr_stream *xdr , struct nfs_lockt_res *res ) { int status ; int tmp ; { status = decode_op_hdr(xdr, 13); if (status == -10010) { tmp = decode_lock_denied(xdr, res->denied); return (tmp); } else { } return (status); } } static int decode_locku(struct xdr_stream *xdr , struct nfs_locku_res *res ) { int status ; { status = decode_op_hdr(xdr, 14); if (status != -5) { nfs_increment_lock_seqid(status, res->seqid); } else { } if (status == 0) { status = decode_stateid(xdr, & res->stateid); } else { } return (status); } } static int decode_release_lockowner(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 39); return (tmp); } } static int decode_lookup(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 15); return (tmp); } } static int decode_space_limit(struct xdr_stream *xdr , u64 *maxsize ) { __be32 *p ; uint32_t limit_type ; uint32_t nblocks ; uint32_t blocksize ; long tmp ; __be32 *tmp___0 ; __be32 *tmp___1 ; { p = xdr_inline_decode(xdr, 12UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; limit_type = __be32_to_cpup((__be32 const *)tmp___0); switch (limit_type) { case 1U: xdr_decode_hyper(p, maxsize); goto ldv_50911; case 2U: tmp___1 = p; p = p + 1; nblocks = __be32_to_cpup((__be32 const *)tmp___1); blocksize = __be32_to_cpup((__be32 const *)p); *maxsize = (unsigned long long )nblocks * (unsigned long long )blocksize; } ldv_50911: ; return (0); out_overflow: print_overflow_msg("decode_space_limit", (struct xdr_stream const *)xdr); return (-5); } } static int decode_delegation(struct xdr_stream *xdr , struct nfs_openres *res ) { __be32 *p ; uint32_t delegation_type ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; int tmp___2 ; int tmp___3 ; { p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } delegation_type = __be32_to_cpup((__be32 const *)p); if (delegation_type == 0U) { res->delegation_type = 0U; return (0); } else { } status = decode_stateid(xdr, & res->delegation); tmp___0 = ldv__builtin_expect(status != 0, 0L); if (tmp___0 != 0L) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } res->do_recall = __be32_to_cpup((__be32 const *)p); switch (delegation_type) { case 1U: res->delegation_type = 1U; goto ldv_50923; case 2U: res->delegation_type = 3U; tmp___2 = decode_space_limit(xdr, & res->maxsize); if (tmp___2 < 0) { return (-5); } else { } } ldv_50923: tmp___3 = decode_ace(xdr, 0, (res->server)->nfs_client); return (tmp___3); out_overflow: print_overflow_msg("decode_delegation", (struct xdr_stream const *)xdr); return (-5); } } static int decode_open(struct xdr_stream *xdr , struct nfs_openres *res ) { __be32 *p ; uint32_t savewords ; uint32_t bmlen ; uint32_t i ; int status ; long tmp ; long tmp___0 ; __be32 *tmp___1 ; long tmp___2 ; uint32_t __min1 ; uint32_t __min2 ; __be32 *tmp___3 ; int tmp___4 ; long tmp___5 ; { status = decode_op_hdr(xdr, 18); if (status != -5) { nfs_increment_open_seqid(status, res->seqid); } else { } if (status == 0) { status = decode_stateid(xdr, & res->stateid); } else { } tmp = ldv__builtin_expect(status != 0, 0L); if (tmp != 0L) { return (status); } else { } decode_change_info(xdr, & res->cinfo); p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } tmp___1 = p; p = p + 1; res->rflags = __be32_to_cpup((__be32 const *)tmp___1); bmlen = __be32_to_cpup((__be32 const *)p); if (bmlen > 10U) { goto xdr_error; } else { } p = xdr_inline_decode(xdr, (size_t )(bmlen << 2)); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out_overflow; } else { } __min1 = bmlen; __min2 = 2U; savewords = __min1 < __min2 ? __min1 : __min2; i = 0U; goto ldv_50941; ldv_50940: tmp___3 = p; p = p + 1; res->attrset[i] = __be32_to_cpup((__be32 const *)tmp___3); i = i + 1U; ldv_50941: ; if (i < savewords) { goto ldv_50940; } else { } goto ldv_50944; ldv_50943: res->attrset[i] = 0U; i = i + 1U; ldv_50944: ; if (i <= 1U) { goto ldv_50943; } else { } tmp___4 = decode_delegation(xdr, res); return (tmp___4); xdr_error: tmp___5 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001d%s: Bitmap too large! Length = %u\n", "decode_open", bmlen); } else { } return (-5); out_overflow: print_overflow_msg("decode_open", (struct xdr_stream const *)xdr); return (-5); } } static int decode_open_confirm(struct xdr_stream *xdr , struct nfs_open_confirmres *res ) { int status ; { status = decode_op_hdr(xdr, 20); if (status != -5) { nfs_increment_open_seqid(status, res->seqid); } else { } if (status == 0) { status = decode_stateid(xdr, & res->stateid); } else { } return (status); } } static int decode_open_downgrade(struct xdr_stream *xdr , struct nfs_closeres *res ) { int status ; { status = decode_op_hdr(xdr, 21); if (status != -5) { nfs_increment_open_seqid(status, res->seqid); } else { } if (status == 0) { status = decode_stateid(xdr, & res->stateid); } else { } return (status); } } static int decode_putfh(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 22); return (tmp); } } static int decode_putrootfh(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 24); return (tmp); } } static int decode_read(struct xdr_stream *xdr , struct rpc_rqst *req , struct nfs_readres *res ) { __be32 *p ; uint32_t count ; uint32_t eof ; uint32_t recvd ; int status ; long tmp ; __be32 *tmp___0 ; long tmp___1 ; { status = decode_op_hdr(xdr, 25); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; eof = __be32_to_cpup((__be32 const *)tmp___0); count = __be32_to_cpup((__be32 const *)p); recvd = xdr_read_pages(xdr, count); if (count > recvd) { tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS: server cheating in read reply: count %u > recvd %u\n", count, recvd); } else { } count = recvd; eof = 0U; } else { } res->eof = (int )eof; res->count = count; return (0); out_overflow: print_overflow_msg("decode_read", (struct xdr_stream const *)xdr); return (-5); } } static int decode_readdir(struct xdr_stream *xdr , struct rpc_rqst *req , struct nfs4_readdir_res *readdir ) { int status ; __be32 verf[2U] ; long tmp ; size_t __len ; void *__ret ; long tmp___0 ; unsigned int tmp___1 ; { status = decode_op_hdr(xdr, 26); if (status == 0) { status = decode_verifier(xdr, (void *)(& readdir->verifier.data)); } else { } tmp = ldv__builtin_expect(status != 0, 0L); if (tmp != 0L) { return (status); } else { } __len = 8UL; if (__len > 63UL) { __ret = memcpy((void *)(& verf), (void const *)(& readdir->verifier.data), __len); } else { __ret = memcpy((void *)(& verf), (void const *)(& readdir->verifier.data), __len); } tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: verifier = %08x:%08x\n", "decode_readdir", verf[0], verf[1]); } else { } tmp___1 = xdr_read_pages(xdr, (xdr->buf)->page_len); return ((int )tmp___1); } } static int decode_readlink(struct xdr_stream *xdr , struct rpc_rqst *req ) { struct xdr_buf *rcvbuf ; u32 len ; u32 recvd ; __be32 *p ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; { rcvbuf = & req->rq_rcv_buf; status = decode_op_hdr(xdr, 27); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } len = __be32_to_cpup((__be32 const *)p); if (rcvbuf->page_len <= len || len == 0U) { tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dnfs: server returned giant symlink!\n"); } else { } return (-36); } else { } recvd = xdr_read_pages(xdr, len); if (recvd < len) { tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS: server cheating in readlink reply: count %u > recvd %u\n", len, recvd); } else { } return (-5); } else { } xdr_terminate_string(rcvbuf, len); return (0); out_overflow: print_overflow_msg("decode_readlink", (struct xdr_stream const *)xdr); return (-5); } } static int decode_remove(struct xdr_stream *xdr , struct nfs4_change_info *cinfo ) { int status ; { status = decode_op_hdr(xdr, 28); if (status != 0) { goto out; } else { } status = decode_change_info(xdr, cinfo); out: ; return (status); } } static int decode_rename(struct xdr_stream *xdr , struct nfs4_change_info *old_cinfo , struct nfs4_change_info *new_cinfo ) { int status ; { status = decode_op_hdr(xdr, 29); if (status != 0) { goto out; } else { } status = decode_change_info(xdr, old_cinfo); if (status != 0) { goto out; } else { } status = decode_change_info(xdr, new_cinfo); out: ; return (status); } } static int decode_renew(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 30); return (tmp); } } static int decode_restorefh(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 31); return (tmp); } } static int decode_getacl(struct xdr_stream *xdr , struct rpc_rqst *req , struct nfs_getaclres *res ) { unsigned int savep ; uint32_t attrlen ; uint32_t bitmap[3U] ; unsigned int tmp ; int status ; unsigned int pg_offset ; long tmp___0 ; unsigned int tmp___1 ; long tmp___2 ; long tmp___3 ; { bitmap[0] = 0U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } bitmap[tmp] = 0U; tmp = tmp + 1U; } res->acl_len = 0UL; status = decode_op_hdr(xdr, 9); if (status != 0) { goto out; } else { } xdr_enter_page(xdr, (xdr->buf)->page_len); pg_offset = (unsigned int )(xdr->buf)->head[0].iov_len; status = decode_attr_bitmap(xdr, (uint32_t *)(& bitmap)); if (status != 0) { goto out; } else { } status = decode_attr_length(xdr, & attrlen, & savep); if (status != 0) { goto out; } else { } tmp___0 = ldv__builtin_expect(((unsigned long )bitmap[0] & 4095UL) != 0UL, 0L); if (tmp___0 != 0L) { return (-5); } else { } tmp___3 = ldv__builtin_expect(((unsigned long )bitmap[0] & 4096UL) != 0UL, 1L); if (tmp___3 != 0L) { tmp___1 = xdr_stream_pos((struct xdr_stream const *)xdr); res->acl_data_offset = (size_t )(tmp___1 - pg_offset); res->acl_len = (size_t )attrlen; if (res->acl_len > (size_t )(xdr->nwords << 2) || res->acl_len + res->acl_data_offset > (size_t )(xdr->buf)->page_len) { res->acl_flags = res->acl_flags | 1; tmp___2 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001dNFS: acl reply: attrlen %u > page_len %u\n", attrlen, xdr->nwords << 2); } else { } } else { } } else { status = -95; } out: ; return (status); } } static int decode_savefh(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 32); return (tmp); } } static int decode_setattr(struct xdr_stream *xdr ) { __be32 *p ; uint32_t bmlen ; int status ; long tmp ; long tmp___0 ; { status = decode_op_hdr(xdr, 34); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } bmlen = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )(bmlen << 2)); tmp___0 = ldv__builtin_expect((unsigned long )p != (unsigned long )((__be32 *)0), 1L); if (tmp___0 != 0L) { return (0); } else { } out_overflow: print_overflow_msg("decode_setattr", (struct xdr_stream const *)xdr); return (-5); } } static int decode_setclientid(struct xdr_stream *xdr , struct nfs4_setclientid_res *res ) { __be32 *p ; uint32_t opnum ; int32_t nfserr ; long tmp ; __be32 *tmp___0 ; long tmp___1 ; __u32 tmp___2 ; long tmp___3 ; size_t __len ; void *__ret ; uint32_t len ; long tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; int tmp___8 ; { p = xdr_inline_decode(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; opnum = __be32_to_cpup((__be32 const *)tmp___0); if (opnum != 35U) { tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dnfs: decode_setclientid: Server returned operation %d\n", opnum); } else { } return (-5); } else { } tmp___2 = __be32_to_cpup((__be32 const *)p); nfserr = (int32_t )tmp___2; if (nfserr == 0) { p = xdr_inline_decode(xdr, 16UL); tmp___3 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___3 != 0L) { goto out_overflow; } else { } p = xdr_decode_hyper(p, & res->clientid); __len = 8UL; if (__len > 63UL) { __ret = memcpy((void *)(& res->confirm.data), (void const *)p, __len); } else { __ret = memcpy((void *)(& res->confirm.data), (void const *)p, __len); } } else if (nfserr == 10017) { p = xdr_inline_decode(xdr, 4UL); tmp___4 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___4 != 0L) { goto out_overflow; } else { } len = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )len); tmp___5 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___5 != 0L) { goto out_overflow; } else { } p = xdr_inline_decode(xdr, 4UL); tmp___6 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___6 != 0L) { goto out_overflow; } else { } len = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )len); tmp___7 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___7 != 0L) { goto out_overflow; } else { } return (-10017); } else { tmp___8 = nfs4_stat_to_errno(nfserr); return (tmp___8); } return (0); out_overflow: print_overflow_msg("decode_setclientid", (struct xdr_stream const *)xdr); return (-5); } } static int decode_setclientid_confirm(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 36); return (tmp); } } static int decode_write(struct xdr_stream *xdr , struct nfs_writeres *res ) { __be32 *p ; int status ; long tmp ; __be32 *tmp___0 ; __be32 *tmp___1 ; __u32 tmp___2 ; int tmp___3 ; { status = decode_op_hdr(xdr, 38); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; res->count = __be32_to_cpup((__be32 const *)tmp___0); tmp___1 = p; p = p + 1; tmp___2 = __be32_to_cpup((__be32 const *)tmp___1); (res->verf)->committed = (enum nfs3_stable_how )tmp___2; tmp___3 = decode_write_verifier(xdr, & (res->verf)->verifier); return (tmp___3); out_overflow: print_overflow_msg("decode_write", (struct xdr_stream const *)xdr); return (-5); } } static int decode_delegreturn(struct xdr_stream *xdr ) { int tmp ; { tmp = decode_op_hdr(xdr, 8); return (tmp); } } static int decode_secinfo_gss(struct xdr_stream *xdr , struct nfs4_secinfo_flavor *flavor ) { __be32 *p ; long tmp ; long tmp___0 ; size_t __len ; void *__ret ; long tmp___1 ; __be32 *tmp___2 ; { p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } flavor->gss.sec_oid4.len = __be32_to_cpup((__be32 const *)p); if (flavor->gss.sec_oid4.len > 32U) { goto out_err; } else { } p = xdr_inline_decode(xdr, (size_t )flavor->gss.sec_oid4.len); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } __len = (size_t )flavor->gss.sec_oid4.len; __ret = memcpy((void *)(& flavor->gss.sec_oid4.data), (void const *)p, __len); p = xdr_inline_decode(xdr, 8UL); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } tmp___2 = p; p = p + 1; flavor->gss.qop4 = __be32_to_cpup((__be32 const *)tmp___2); flavor->gss.service = __be32_to_cpup((__be32 const *)p); return (0); out_overflow: print_overflow_msg("decode_secinfo_gss", (struct xdr_stream const *)xdr); return (-5); out_err: ; return (-22); } } static int decode_secinfo_common(struct xdr_stream *xdr , struct nfs4_secinfo_res *res ) { struct nfs4_secinfo_flavor *sec_flavor ; int status ; __be32 *p ; int i ; int num_flavors ; long tmp ; __u32 tmp___0 ; long tmp___1 ; { p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } (res->flavors)->num_flavors = 0U; tmp___0 = __be32_to_cpup((__be32 const *)p); num_flavors = (int )tmp___0; i = 0; goto ldv_51089; ldv_51088: sec_flavor = (struct nfs4_secinfo_flavor *)(& (res->flavors)->flavors) + (unsigned long )i; if ((unsigned long )((long )((char *)sec_flavor + 1U) - (long )res->flavors) > 4096UL) { goto ldv_51086; } else { } p = xdr_inline_decode(xdr, 4UL); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } sec_flavor->flavor = __be32_to_cpup((__be32 const *)p); if (sec_flavor->flavor == 6U) { status = decode_secinfo_gss(xdr, sec_flavor); if (status != 0) { goto out; } else { } } else { } (res->flavors)->num_flavors = (res->flavors)->num_flavors + 1U; i = i + 1; ldv_51089: ; if (i < num_flavors) { goto ldv_51088; } else { } ldv_51086: status = 0; out: ; return (status); out_overflow: print_overflow_msg("decode_secinfo_common", (struct xdr_stream const *)xdr); return (-5); } } static int decode_secinfo(struct xdr_stream *xdr , struct nfs4_secinfo_res *res ) { int status ; int tmp ; int tmp___0 ; { tmp = decode_op_hdr(xdr, 33); status = tmp; if (status != 0) { return (status); } else { } tmp___0 = decode_secinfo_common(xdr, res); return (tmp___0); } } static int decode_secinfo_no_name(struct xdr_stream *xdr , struct nfs4_secinfo_res *res ) { int status ; int tmp ; int tmp___0 ; { tmp = decode_op_hdr(xdr, 52); status = tmp; if (status != 0) { return (status); } else { } tmp___0 = decode_secinfo_common(xdr, res); return (tmp___0); } } static int decode_exchange_id(struct xdr_stream *xdr , struct nfs41_exchange_id_res *res ) { __be32 *p ; uint32_t dummy ; char *dummy_str ; int status ; uint32_t impl_id_count ; long tmp ; long tmp___0 ; __be32 *tmp___1 ; __be32 *tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; size_t __len ; void *__ret ; long tmp___6 ; long tmp___7 ; size_t __len___0 ; void *__ret___0 ; long tmp___8 ; __be32 *tmp___9 ; long tmp___10 ; long tmp___11 ; size_t __len___1 ; void *__ret___1 ; long tmp___12 ; long tmp___13 ; size_t __len___2 ; void *__ret___2 ; long tmp___14 ; { status = decode_op_hdr(xdr, 42); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } xdr_decode_hyper(p, & res->clientid); p = xdr_inline_decode(xdr, 12UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } tmp___1 = p; p = p + 1; res->seqid = __be32_to_cpup((__be32 const *)tmp___1); tmp___2 = p; p = p + 1; res->flags = __be32_to_cpup((__be32 const *)tmp___2); dummy = __be32_to_cpup((__be32 const *)p); if (dummy != 0U) { return (-5); } else { } p = xdr_inline_decode(xdr, 8UL); tmp___3 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___3 != 0L) { goto out_overflow; } else { } p = xdr_decode_hyper(p, & (res->server_owner)->minor_id); status = decode_opaque_inline(xdr, & dummy, & dummy_str); tmp___4 = ldv__builtin_expect(status != 0, 0L); if (tmp___4 != 0L) { return (status); } else { } tmp___5 = ldv__builtin_expect(dummy > 1024U, 0L); if (tmp___5 != 0L) { return (-5); } else { } __len = (size_t )dummy; __ret = memcpy((void *)(& (res->server_owner)->major_id), (void const *)dummy_str, __len); (res->server_owner)->major_id_sz = dummy; status = decode_opaque_inline(xdr, & dummy, & dummy_str); tmp___6 = ldv__builtin_expect(status != 0, 0L); if (tmp___6 != 0L) { return (status); } else { } tmp___7 = ldv__builtin_expect(dummy > 1024U, 0L); if (tmp___7 != 0L) { return (-5); } else { } __len___0 = (size_t )dummy; __ret___0 = memcpy((void *)(& (res->server_scope)->server_scope), (void const *)dummy_str, __len___0); (res->server_scope)->server_scope_sz = dummy; p = xdr_inline_decode(xdr, 4UL); tmp___8 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___8 != 0L) { goto out_overflow; } else { } tmp___9 = p; p = p + 1; impl_id_count = __be32_to_cpup((__be32 const *)tmp___9); if (impl_id_count != 0U) { status = decode_opaque_inline(xdr, & dummy, & dummy_str); tmp___10 = ldv__builtin_expect(status != 0, 0L); if (tmp___10 != 0L) { return (status); } else { } tmp___11 = ldv__builtin_expect(dummy > 1024U, 0L); if (tmp___11 != 0L) { return (-5); } else { } __len___1 = (size_t )dummy; __ret___1 = memcpy((void *)(& (res->impl_id)->domain), (void const *)dummy_str, __len___1); status = decode_opaque_inline(xdr, & dummy, & dummy_str); tmp___12 = ldv__builtin_expect(status != 0, 0L); if (tmp___12 != 0L) { return (status); } else { } tmp___13 = ldv__builtin_expect(dummy > 1024U, 0L); if (tmp___13 != 0L) { return (-5); } else { } __len___2 = (size_t )dummy; __ret___2 = memcpy((void *)(& (res->impl_id)->name), (void const *)dummy_str, __len___2); p = xdr_inline_decode(xdr, 12UL); tmp___14 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___14 != 0L) { goto out_overflow; } else { } p = xdr_decode_hyper(p, & (res->impl_id)->date.seconds); (res->impl_id)->date.nseconds = __be32_to_cpup((__be32 const *)p); } else { } return (0); out_overflow: print_overflow_msg("decode_exchange_id", (struct xdr_stream const *)xdr); return (-5); } } static int decode_chan_attrs(struct xdr_stream *xdr , struct nfs4_channel_attrs *attrs ) { __be32 *p ; u32 nr_attrs ; u32 val ; long tmp ; __be32 *tmp___0 ; __be32 *tmp___1 ; __be32 *tmp___2 ; __be32 *tmp___3 ; __be32 *tmp___4 ; __be32 *tmp___5 ; long tmp___6 ; long tmp___7 ; { p = xdr_inline_decode(xdr, 28UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; val = __be32_to_cpup((__be32 const *)tmp___0); if (val != 0U) { return (-22); } else { } tmp___1 = p; p = p + 1; attrs->max_rqst_sz = __be32_to_cpup((__be32 const *)tmp___1); tmp___2 = p; p = p + 1; attrs->max_resp_sz = __be32_to_cpup((__be32 const *)tmp___2); tmp___3 = p; p = p + 1; attrs->max_resp_sz_cached = __be32_to_cpup((__be32 const *)tmp___3); tmp___4 = p; p = p + 1; attrs->max_ops = __be32_to_cpup((__be32 const *)tmp___4); tmp___5 = p; p = p + 1; attrs->max_reqs = __be32_to_cpup((__be32 const *)tmp___5); nr_attrs = __be32_to_cpup((__be32 const *)p); tmp___6 = ldv__builtin_expect(nr_attrs > 1U, 0L); if (tmp___6 != 0L) { printk("\fNFS: %s: Invalid rdma channel attrs count %u\n", "decode_chan_attrs", nr_attrs); return (-22); } else { } if (nr_attrs == 1U) { p = xdr_inline_decode(xdr, 4UL); tmp___7 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___7 != 0L) { goto out_overflow; } else { } } else { } return (0); out_overflow: print_overflow_msg("decode_chan_attrs", (struct xdr_stream const *)xdr); return (-5); } } static int decode_sessionid(struct xdr_stream *xdr , struct nfs4_sessionid *sid ) { int tmp ; { tmp = decode_opaque_fixed(xdr, (void *)(& sid->data), 16UL); return (tmp); } } static int decode_bind_conn_to_session(struct xdr_stream *xdr , struct nfs41_bind_conn_to_session_res *res ) { __be32 *p ; int status ; long tmp ; long tmp___0 ; __be32 *tmp___1 ; __u32 tmp___2 ; { status = decode_op_hdr(xdr, 41); if (status == 0) { status = decode_sessionid(xdr, & (res->session)->sess_id); } else { } tmp = ldv__builtin_expect(status != 0, 0L); if (tmp != 0L) { return (status); } else { } p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } tmp___1 = p; p = p + 1; res->dir = __be32_to_cpup((__be32 const *)tmp___1); if (res->dir == 0U || res->dir > 3U) { return (-5); } else { } tmp___2 = __be32_to_cpup((__be32 const *)p); if (tmp___2 == 0U) { res->use_conn_in_rdma_mode = 0; } else { res->use_conn_in_rdma_mode = 1; } return (0); out_overflow: print_overflow_msg("decode_bind_conn_to_session", (struct xdr_stream const *)xdr); return (-5); } } static int decode_create_session(struct xdr_stream *xdr , struct nfs41_create_session_res *res ) { __be32 *p ; int status ; struct nfs_client *clp ; struct nfs4_session *session ; long tmp ; long tmp___0 ; __be32 *tmp___1 ; { clp = res->client; session = clp->cl_session; status = decode_op_hdr(xdr, 43); if (status == 0) { status = decode_sessionid(xdr, & session->sess_id); } else { } tmp = ldv__builtin_expect(status != 0, 0L); if (tmp != 0L) { return (status); } else { } p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } tmp___1 = p; p = p + 1; clp->cl_seqid = __be32_to_cpup((__be32 const *)tmp___1); session->flags = __be32_to_cpup((__be32 const *)p); status = decode_chan_attrs(xdr, & session->fc_attrs); if (status == 0) { status = decode_chan_attrs(xdr, & session->bc_attrs); } else { } return (status); out_overflow: print_overflow_msg("decode_create_session", (struct xdr_stream const *)xdr); return (-5); } } static int decode_destroy_session(struct xdr_stream *xdr , void *dummy ) { int tmp ; { tmp = decode_op_hdr(xdr, 44); return (tmp); } } static int decode_destroy_clientid(struct xdr_stream *xdr , void *dummy ) { int tmp ; { tmp = decode_op_hdr(xdr, 57); return (tmp); } } static int decode_reclaim_complete(struct xdr_stream *xdr , void *dummy ) { int tmp ; { tmp = decode_op_hdr(xdr, 58); return (tmp); } } static int decode_sequence(struct xdr_stream *xdr , struct nfs4_sequence_res *res , struct rpc_rqst *rqstp ) { struct nfs4_session *session ; struct nfs4_sessionid id ; u32 dummy ; int status ; __be32 *p ; long tmp ; long tmp___0 ; int tmp___1 ; long tmp___2 ; __be32 *tmp___3 ; long tmp___4 ; __be32 *tmp___5 ; long tmp___6 ; __be32 *tmp___7 ; __be32 *tmp___8 ; { if ((unsigned long )res->sr_slot == (unsigned long )((struct nfs4_slot *)0)) { return (0); } else { } status = decode_op_hdr(xdr, 53); if (status == 0) { status = decode_sessionid(xdr, & id); } else { } tmp = ldv__builtin_expect(status != 0, 0L); if (tmp != 0L) { goto out_err; } else { } status = -121; session = ((res->sr_slot)->table)->session; tmp___1 = memcmp((void const *)(& id.data), (void const *)(& session->sess_id.data), 16UL); if (tmp___1 != 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s Invalid session id\n", "decode_sequence"); } else { } goto out_err; } else { } p = xdr_inline_decode(xdr, 20UL); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out_overflow; } else { } tmp___3 = p; p = p + 1; dummy = __be32_to_cpup((__be32 const *)tmp___3); if ((res->sr_slot)->seq_nr != dummy) { tmp___4 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s Invalid sequence number\n", "decode_sequence"); } else { } goto out_err; } else { } tmp___5 = p; p = p + 1; dummy = __be32_to_cpup((__be32 const *)tmp___5); if ((res->sr_slot)->slot_nr != dummy) { tmp___6 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___6 != 0L) { printk("\001d%s Invalid slot id\n", "decode_sequence"); } else { } goto out_err; } else { } tmp___7 = p; p = p + 1; res->sr_highest_slotid = __be32_to_cpup((__be32 const *)tmp___7); tmp___8 = p; p = p + 1; res->sr_target_highest_slotid = __be32_to_cpup((__be32 const *)tmp___8); res->sr_status_flags = __be32_to_cpup((__be32 const *)p); status = 0; out_err: res->sr_status = status; return (status); out_overflow: print_overflow_msg("decode_sequence", (struct xdr_stream const *)xdr); status = -5; goto out_err; } } static int decode_getdevicelist(struct xdr_stream *xdr , struct pnfs_devicelist *res ) { __be32 *p ; int status ; int i ; nfs4_verifier verftemp ; long tmp ; long tmp___0 ; long tmp___1 ; { status = decode_op_hdr(xdr, 48); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 20UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } p = p + 2UL; p = xdr_decode_opaque_fixed(p, (void *)(& verftemp.data), 8U); res->num_devs = __be32_to_cpup((__be32 const *)p); tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: num_dev %d\n", "decode_getdevicelist", res->num_devs); } else { } if (res->num_devs > 16U) { printk("\vNFS: %s too many result dev_num %u\n", "decode_getdevicelist", res->num_devs); return (-5); } else { } p = xdr_inline_decode(xdr, (size_t )(res->num_devs * 16U + 4U)); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } i = 0; goto ldv_51191; ldv_51190: p = xdr_decode_opaque_fixed(p, (void *)(& res->dev_id[i].data), 16U); i = i + 1; ldv_51191: ; if ((unsigned int )i < res->num_devs) { goto ldv_51190; } else { } res->eof = __be32_to_cpup((__be32 const *)p); return (0); out_overflow: print_overflow_msg("decode_getdevicelist", (struct xdr_stream const *)xdr); return (-5); } } static int decode_getdeviceinfo(struct xdr_stream *xdr , struct pnfs_device *pdev ) { __be32 *p ; uint32_t len ; uint32_t type ; int status ; long tmp ; long tmp___0 ; long tmp___1 ; __be32 *tmp___2 ; long tmp___3 ; unsigned int tmp___4 ; long tmp___5 ; uint32_t i ; long tmp___6 ; long tmp___7 ; __u32 tmp___8 ; { status = decode_op_hdr(xdr, 47); if (status != 0) { if (status == -525) { p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } pdev->mincount = __be32_to_cpup((__be32 const *)p); tmp___0 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: Min count too small. mincnt = %u\n", "decode_getdeviceinfo", pdev->mincount); } else { } } else { } return (status); } else { } p = xdr_inline_decode(xdr, 8UL); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } tmp___2 = p; p = p + 1; type = __be32_to_cpup((__be32 const *)tmp___2); if (pdev->layout_type != type) { tmp___3 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: layout mismatch req: %u pdev: %u\n", "decode_getdeviceinfo", pdev->layout_type, type); } else { } return (-22); } else { } pdev->mincount = __be32_to_cpup((__be32 const *)p); tmp___4 = xdr_read_pages(xdr, pdev->mincount); if (tmp___4 != pdev->mincount) { goto out_overflow; } else { } p = xdr_inline_decode(xdr, 4UL); tmp___5 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___5 != 0L) { goto out_overflow; } else { } len = __be32_to_cpup((__be32 const *)p); if (len != 0U) { p = xdr_inline_decode(xdr, (size_t )(len * 4U)); tmp___6 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___6 != 0L) { goto out_overflow; } else { } i = 0U; goto ldv_51205; ldv_51204: tmp___8 = __be32_to_cpup((__be32 const *)p); if (tmp___8 != 0U) { tmp___7 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___7 != 0L) { printk("\001d%s: notifications not supported\n", "decode_getdeviceinfo"); } else { } return (-5); } else { } i = i + 1U; p = p + 1; ldv_51205: ; if (i < len) { goto ldv_51204; } else { } } else { } return (0); out_overflow: print_overflow_msg("decode_getdeviceinfo", (struct xdr_stream const *)xdr); return (-5); } } static int decode_layoutget(struct xdr_stream *xdr , struct rpc_rqst *req , struct nfs4_layoutget_res *res ) { __be32 *p ; int status ; u32 layout_count ; u32 recvd ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; __be32 *tmp___3 ; __be32 *tmp___4 ; long tmp___5 ; long tmp___6 ; long tmp___7 ; { status = decode_op_hdr(xdr, 50); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } res->return_on_close = __be32_to_cpup((__be32 const *)p); decode_stateid(xdr, & res->stateid); p = xdr_inline_decode(xdr, 4UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } layout_count = __be32_to_cpup((__be32 const *)p); if (layout_count == 0U) { tmp___1 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: server responded with empty layout array\n", "decode_layoutget"); } else { } return (-22); } else { } p = xdr_inline_decode(xdr, 28UL); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out_overflow; } else { } p = xdr_decode_hyper(p, & res->range.offset); p = xdr_decode_hyper(p, & res->range.length); tmp___3 = p; p = p + 1; res->range.iomode = __be32_to_cpup((__be32 const *)tmp___3); tmp___4 = p; p = p + 1; res->type = __be32_to_cpup((__be32 const *)tmp___4); (res->layoutp)->len = __be32_to_cpup((__be32 const *)p); tmp___5 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001d%s roff:%lu rlen:%lu riomode:%d, lo_type:0x%x, lo.len:%d\n", "decode_layoutget", (unsigned long )res->range.offset, (unsigned long )res->range.length, res->range.iomode, res->type, (res->layoutp)->len); } else { } recvd = xdr_read_pages(xdr, (res->layoutp)->len); if ((res->layoutp)->len > recvd) { tmp___6 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___6 != 0L) { printk("\001dNFS: server cheating in layoutget reply: layout len %u > recvd %u\n", (res->layoutp)->len, recvd); } else { } return (-22); } else { } if (layout_count > 1U) { tmp___7 = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp___7 != 0L) { printk("\001d%s: server responded with %d layouts, dropping tail\n", "decode_layoutget", layout_count); } else { } } else { } return (0); out_overflow: print_overflow_msg("decode_layoutget", (struct xdr_stream const *)xdr); return (-5); } } static int decode_layoutreturn(struct xdr_stream *xdr , struct nfs4_layoutreturn_res *res ) { __be32 *p ; int status ; long tmp ; { status = decode_op_hdr(xdr, 51); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } res->lrs_present = __be32_to_cpup((__be32 const *)p); if (res->lrs_present != 0U) { status = decode_stateid(xdr, & res->stateid); } else { } return (status); out_overflow: print_overflow_msg("decode_layoutreturn", (struct xdr_stream const *)xdr); return (-5); } } static int decode_layoutcommit(struct xdr_stream *xdr , struct rpc_rqst *req , struct nfs4_layoutcommit_res *res ) { __be32 *p ; __u32 sizechanged ; int status ; long tmp ; long tmp___0 ; { status = decode_op_hdr(xdr, 49); res->status = status; if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } sizechanged = __be32_to_cpup((__be32 const *)p); if (sizechanged != 0U) { p = xdr_inline_decode(xdr, 8UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { goto out_overflow; } else { } } else { } return (0); out_overflow: print_overflow_msg("decode_layoutcommit", (struct xdr_stream const *)xdr); return (-5); } } static int decode_test_stateid(struct xdr_stream *xdr , struct nfs41_test_stateid_res *res ) { __be32 *p ; int status ; int num_res ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; long tmp___2 ; __be32 *tmp___3 ; { status = decode_op_hdr(xdr, 55); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; tmp___1 = __be32_to_cpup((__be32 const *)tmp___0); num_res = (int )tmp___1; if (num_res != 1) { goto out; } else { } p = xdr_inline_decode(xdr, 4UL); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out_overflow; } else { } tmp___3 = p; p = p + 1; res->status = __be32_to_cpup((__be32 const *)tmp___3); return (status); out_overflow: print_overflow_msg("decode_test_stateid", (struct xdr_stream const *)xdr); out: ; return (-5); } } static int decode_free_stateid(struct xdr_stream *xdr , struct nfs41_free_stateid_res *res ) { __be32 *p ; int status ; long tmp ; __be32 *tmp___0 ; { status = decode_op_hdr(xdr, 45); if (status != 0) { return (status); } else { } p = xdr_inline_decode(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out_overflow; } else { } tmp___0 = p; p = p + 1; res->status = __be32_to_cpup((__be32 const *)tmp___0); return ((int )res->status); out_overflow: print_overflow_msg("decode_free_stateid", (struct xdr_stream const *)xdr); return (-5); } } static int nfs4_xdr_dec_open_downgrade(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_closeres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_open_downgrade(xdr, res); if (status != 0) { goto out; } else { } decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_access(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_accessres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_access(xdr, & res->supported, & res->access); if (status != 0) { goto out; } else { } decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_lookup(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_lookup_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_lookup(xdr); if (status != 0) { goto out; } else { } status = decode_getfh(xdr, res->fh); if (status != 0) { goto out; } else { } status = decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_lookup_root(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_lookup_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putrootfh(xdr); if (status != 0) { goto out; } else { } status = decode_getfh(xdr, res->fh); if (status == 0) { status = decode_getfattr(xdr, res->fattr, res->server); } else { } out: ; return (status); } } static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_removeres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_remove(xdr, & res->cinfo); out: ; return (status); } } static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_renameres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_savefh(xdr); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_rename(xdr, & res->old_cinfo, & res->new_cinfo); out: ; return (status); } } static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_link_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_savefh(xdr); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_link(xdr, & res->cinfo); if (status != 0) { goto out; } else { } status = decode_restorefh(xdr); if (status != 0) { goto out; } else { } decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_create_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_create(xdr, & res->dir_cinfo); if (status != 0) { goto out; } else { } status = decode_getfh(xdr, res->fh); if (status != 0) { goto out; } else { } decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_symlink(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_create_res *res ) { int tmp ; { tmp = nfs4_xdr_dec_create(rqstp, xdr, res); return (tmp); } } static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_getattr_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static void nfs4_xdr_enc_setacl(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_setaclargs *args ) { struct compound_hdr hdr ; u32 tmp ; { tmp = nfs4_xdr_minorversion((struct nfs4_sequence_args const *)(& args->seq_args)); hdr.status = 0; hdr.nops = 0U; hdr.nops_p = 0; hdr.taglen = 0U; hdr.tag = 0; hdr.replen = 0U; hdr.minorversion = tmp; encode_compound_hdr(xdr, req, & hdr); encode_sequence(xdr, (struct nfs4_sequence_args const *)(& args->seq_args), & hdr); encode_putfh(xdr, (struct nfs_fh const *)args->fh, & hdr); encode_setacl(xdr, args, & hdr); encode_nops(& hdr); return; } } static int nfs4_xdr_dec_setacl(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_setaclres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_setattr(xdr); out: ; return (status); } } static int nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_getaclres *res ) { struct compound_hdr hdr ; int status ; void *p ; void *tmp ; { if ((unsigned long )res->acl_scratch != (unsigned long )((struct page *)0)) { tmp = lowmem_page_address((struct page const *)res->acl_scratch); p = tmp; xdr_set_scratch_buffer(xdr, p, 4096UL); } else { } status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_getacl(xdr, rqstp, res); out: ; return (status); } } static int nfs4_xdr_dec_close(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_closeres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_close(xdr, res); if (status != 0) { goto out; } else { } decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_openres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_open(xdr, res); if (status != 0) { goto out; } else { } status = decode_getfh(xdr, & res->fh); if (status != 0) { goto out; } else { } if (res->access_request != 0U) { decode_access(xdr, & res->access_supported, & res->access_result); } else { } decode_getfattr(xdr, res->f_attr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_open_confirm(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_open_confirmres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_open_confirm(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_open_noattr(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_openres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_open(xdr, res); if (status != 0) { goto out; } else { } if (res->access_request != 0U) { decode_access(xdr, & res->access_supported, & res->access_result); } else { } decode_getfattr(xdr, res->f_attr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_setattr(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_setattrres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_setattr(xdr); if (status != 0) { goto out; } else { } decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_lock(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_lock_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_lock(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_lockt(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_lockt_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_lockt(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_locku(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_locku_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_locku(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_release_lockowner(struct rpc_rqst *rqstp , struct xdr_stream *xdr , void *dummy ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_release_lockowner(xdr); } else { } return (status); } } static int nfs4_xdr_dec_readlink(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_readlink_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_readlink(xdr, rqstp); out: ; return (status); } } static int nfs4_xdr_dec_readdir(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_readdir_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_readdir(xdr, rqstp, res); out: ; return (status); } } static int nfs4_xdr_dec_read(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_readres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_read(xdr, rqstp, res); if (status == 0) { status = (int )res->count; } else { } out: ; return (status); } } static int nfs4_xdr_dec_write(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_writeres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_write(xdr, res); if (status != 0) { goto out; } else { } if ((unsigned long )res->fattr != (unsigned long )((struct nfs_fattr *)0)) { decode_getfattr(xdr, res->fattr, res->server); } else { } if (status == 0) { status = (int )res->count; } else { } out: ; return (status); } } static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs_commitres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_commit(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_fsinfo_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_sequence(xdr, & res->seq_res, req); } else { } if (status == 0) { status = decode_putfh(xdr); } else { } if (status == 0) { status = decode_fsinfo(xdr, res->fsinfo); } else { } return (status); } } static int nfs4_xdr_dec_pathconf(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_pathconf_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_sequence(xdr, & res->seq_res, req); } else { } if (status == 0) { status = decode_putfh(xdr); } else { } if (status == 0) { status = decode_pathconf(xdr, res->pathconf); } else { } return (status); } } static int nfs4_xdr_dec_statfs(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_statfs_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_sequence(xdr, & res->seq_res, req); } else { } if (status == 0) { status = decode_putfh(xdr); } else { } if (status == 0) { status = decode_statfs(xdr, res->fsstat); } else { } return (status); } } static int nfs4_xdr_dec_server_caps(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_server_caps_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, req); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_server_caps(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_renew(struct rpc_rqst *rqstp , struct xdr_stream *xdr , void *__unused ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_renew(xdr); } else { } return (status); } } static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_setclientid_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_setclientid(xdr, res); } else { } return (status); } } static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs_fsinfo *fsinfo ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_setclientid_confirm(xdr); } else { } if (status == 0) { status = decode_putrootfh(xdr); } else { } if (status == 0) { status = decode_fsinfo(xdr, fsinfo); } else { } return (status); } } static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_delegreturnres *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_getfattr(xdr, res->fattr, res->server); if (status != 0) { goto out; } else { } status = decode_delegreturn(xdr); out: ; return (status); } } static int nfs4_xdr_dec_fs_locations(struct rpc_rqst *req , struct xdr_stream *xdr , struct nfs4_fs_locations_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, req); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_lookup(xdr); if (status != 0) { goto out; } else { } xdr_enter_page(xdr, 4096U); status = decode_getfattr_generic(xdr, & (res->fs_locations)->fattr, 0, res->fs_locations, (res->fs_locations)->server); out: ; return (status); } } static int nfs4_xdr_dec_secinfo(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_secinfo_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_secinfo(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_bind_conn_to_session(struct rpc_rqst *rqstp , struct xdr_stream *xdr , void *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_bind_conn_to_session(xdr, (struct nfs41_bind_conn_to_session_res *)res); } else { } return (status); } } static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp , struct xdr_stream *xdr , void *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_exchange_id(xdr, (struct nfs41_exchange_id_res *)res); } else { } return (status); } } static int nfs4_xdr_dec_create_session(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs41_create_session_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_create_session(xdr, res); } else { } return (status); } } static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp , struct xdr_stream *xdr , void *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_destroy_session(xdr, res); } else { } return (status); } } static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp , struct xdr_stream *xdr , void *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_destroy_clientid(xdr, res); } else { } return (status); } } static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_sequence_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_sequence(xdr, res, rqstp); } else { } return (status); } } static int nfs4_xdr_dec_get_lease_time(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_get_lease_time_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_sequence(xdr, & res->lr_seq_res, rqstp); } else { } if (status == 0) { status = decode_putrootfh(xdr); } else { } if (status == 0) { status = decode_fsinfo(xdr, res->lr_fsinfo); } else { } return (status); } } static int nfs4_xdr_dec_reclaim_complete(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs41_reclaim_complete_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status == 0) { status = decode_sequence(xdr, & res->seq_res, rqstp); } else { } if (status == 0) { status = decode_reclaim_complete(xdr, 0); } else { } return (status); } } static int nfs4_xdr_dec_getdevicelist(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_getdevicelist_res *res ) { struct compound_hdr hdr ; int status ; long tmp ; { tmp = ldv__builtin_expect((nfs_debug & 32U) != 0U, 0L); if (tmp != 0L) { printk("\001dencoding getdevicelist!\n"); } else { } status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_getdevicelist(xdr, res->devlist); out: ; return (status); } } static int nfs4_xdr_dec_getdeviceinfo(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_getdeviceinfo_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_getdeviceinfo(xdr, res->pdev); out: ; return (status); } } static int nfs4_xdr_dec_layoutget(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_layoutget_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_layoutget(xdr, rqstp, res); out: ; return (status); } } static int nfs4_xdr_dec_layoutreturn(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_layoutreturn_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_layoutreturn(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_layoutcommit(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_layoutcommit_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putfh(xdr); if (status != 0) { goto out; } else { } status = decode_layoutcommit(xdr, rqstp, res); if (status != 0) { goto out; } else { } decode_getfattr(xdr, res->fattr, res->server); out: ; return (status); } } static int nfs4_xdr_dec_secinfo_no_name(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs4_secinfo_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_putrootfh(xdr); if (status != 0) { goto out; } else { } status = decode_secinfo_no_name(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_test_stateid(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs41_test_stateid_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_test_stateid(xdr, res); out: ; return (status); } } static int nfs4_xdr_dec_free_stateid(struct rpc_rqst *rqstp , struct xdr_stream *xdr , struct nfs41_free_stateid_res *res ) { struct compound_hdr hdr ; int status ; { status = decode_compound_hdr(xdr, & hdr); if (status != 0) { goto out; } else { } status = decode_sequence(xdr, & res->seq_res, rqstp); if (status != 0) { goto out; } else { } status = decode_free_stateid(xdr, res); out: ; return (status); } } int nfs4_decode_dirent(struct xdr_stream *xdr , struct nfs_entry *entry , int plus ) { unsigned int savep ; uint32_t bitmap[3U] ; unsigned int tmp ; uint32_t len ; __be32 *p ; __be32 *tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { bitmap[0] = 0U; tmp = 1U; while (1) { if (tmp >= 3U) { break; } else { } bitmap[tmp] = 0U; tmp = tmp + 1U; } tmp___0 = xdr_inline_decode(xdr, 4UL); p = tmp___0; tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { goto out_overflow; } else { } if (*p == 0U) { p = xdr_inline_decode(xdr, 4UL); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out_overflow; } else { } if (*p == 0U) { return (-11); } else { } entry->eof = 1; return (-523); } else { } p = xdr_inline_decode(xdr, 12UL); tmp___3 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___3 != 0L) { goto out_overflow; } else { } entry->prev_cookie = entry->cookie; p = xdr_decode_hyper(p, & entry->cookie); entry->len = __be32_to_cpup((__be32 const *)p); p = xdr_inline_decode(xdr, (size_t )entry->len); tmp___4 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___4 != 0L) { goto out_overflow; } else { } entry->name = (char const *)p; entry->ino = 1ULL; (entry->fattr)->valid = 0U; tmp___5 = decode_attr_bitmap(xdr, (uint32_t *)(& bitmap)); if (tmp___5 < 0) { goto out_overflow; } else { } tmp___6 = decode_attr_length(xdr, & len, & savep); if (tmp___6 < 0) { goto out_overflow; } else { } tmp___7 = decode_getfattr_attrs(xdr, (uint32_t *)(& bitmap), entry->fattr, entry->fh, 0, (struct nfs_server const *)entry->server); if (tmp___7 < 0) { goto out_overflow; } else { } if (((entry->fattr)->valid & 4194304U) != 0U) { entry->ino = (entry->fattr)->mounted_on_fileid; } else if (((entry->fattr)->valid & 2048U) != 0U) { entry->ino = (entry->fattr)->fileid; } else { } entry->d_type = 0U; if ((int )(entry->fattr)->valid & 1) { entry->d_type = nfs_umode_to_dtype((int )(entry->fattr)->mode); } else { } return (0); out_overflow: print_overflow_msg("nfs4_decode_dirent", (struct xdr_stream const *)xdr); return (-11); } } static struct __anonstruct_nfs_errtbl_265 nfs_errtbl[30U] = { {0, 0}, {1, -1}, {2, -2}, {5, -5}, {6, -6}, {13, -13}, {17, -17}, {18, -18}, {20, -20}, {21, -21}, {22, -22}, {27, -27}, {28, -28}, {30, -30}, {31, -31}, {63, -36}, {66, -39}, {69, -122}, {70, -116}, {10001, -521}, {10003, -523}, {10004, -524}, {10005, -525}, {10006, -121}, {10007, -527}, {10012, -11}, {10029, -40}, {10044, -95}, {10045, -35}, {-1, -5}}; static int nfs4_stat_to_errno(int stat ) { int i ; { i = 0; goto ldv_51679; ldv_51678: ; if (nfs_errtbl[i].stat == stat) { return (nfs_errtbl[i].errno); } else { } i = i + 1; ldv_51679: ; if (nfs_errtbl[i].stat != -1) { goto ldv_51678; } else { } if (stat <= 10000 || stat > 10100) { return (-121); } else { } return (- stat); } } struct rpc_procinfo nfs4_procedures[53U] = { {0U, 0, 0, 0U, 0U, 0U, 0U, 0U, 0}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_read), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_read), 54U, 20U, 0U, 0U, 1U, "READ"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_write), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_write), 60U, 136U, 0U, 0U, 2U, "WRITE"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_commit), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_commit), 50U, 20U, 0U, 0U, 3U, "COMMIT"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_open), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_open), 220U, 227U, 0U, 0U, 4U, "OPEN"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_open_confirm), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_open_confirm), 43U, 11U, 0U, 0U, 5U, "OPEN_CONFIRM"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_open_noattr), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_open_noattr), 219U, 192U, 0U, 0U, 6U, "OPEN_NOATTR"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_open_downgrade), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_open_downgrade), 59U, 136U, 0U, 0U, 7U, "OPEN_DOWNGRADE"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_close), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_close), 57U, 136U, 0U, 0U, 8U, "CLOSE"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_setattr), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_setattr), 138U, 136U, 0U, 0U, 9U, "SETATTR"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_fsinfo), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_fsinfo), 51U, 39U, 0U, 0U, 10U, "FSINFO"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_renew), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_renew), 7U, 5U, 0U, 0U, 11U, "RENEW"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_setclientid), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_setclientid), 56U, 1031U, 0U, 0U, 12U, "SETCLIENTID"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_setclientid_confirm), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_setclientid_confirm), 15U, 30U, 0U, 0U, 13U, "SETCLIENTID_CONFIRM"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_lock), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_lock), 67U, 59U, 0U, 0U, 14U, "LOCK"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_lockt), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_lockt), 59U, 59U, 0U, 0U, 15U, "LOCKT"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_locku), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_locku), 58U, 22U, 0U, 0U, 16U, "LOCKU"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_access), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_access), 53U, 134U, 0U, 0U, 17U, "ACCESS"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_getattr), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_getattr), 51U, 130U, 0U, 0U, 18U, "GETATTR"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_lookup), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_lookup), 118U, 167U, 0U, 0U, 19U, "LOOKUP"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_lookup_root), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_lookup_root), 19U, 165U, 0U, 0U, 20U, "LOOKUP_ROOT"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_remove), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_remove), 112U, 23U, 0U, 0U, 21U, "REMOVE"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_rename), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_rename), 212U, 32U, 0U, 0U, 22U, "RENAME"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_link), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_link), 153U, 143U, 0U, 0U, 23U, "LINK"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_symlink), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_symlink), 232U, 175U, 0U, 0U, 24U, "SYMLINK"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_create), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_create), 203U, 176U, 0U, 0U, 25U, "CREATE"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_pathconf), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_pathconf), 51U, 130U, 0U, 0U, 26U, "PATHCONF"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_statfs), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_statfs), 51U, 130U, 0U, 0U, 27U, "STATFS"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_readlink), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_readlink), 47U, 19U, 0U, 0U, 28U, "READLINK"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_readdir), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_readdir), 56U, 20U, 0U, 0U, 29U, "READDIR"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_server_caps), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_server_caps), 51U, 130U, 0U, 0U, 30U, "SERVER_CAPS"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_delegreturn), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_delegreturn), 56U, 130U, 0U, 0U, 31U, "DELEGRETURN"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_getacl), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_getacl), 51U, 23U, 0U, 0U, 32U, "GETACL"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_setacl), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_setacl), 54U, 22U, 0U, 0U, 33U, "SETACL"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_fs_locations), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_fs_locations), 117U, 18U, 0U, 0U, 34U, "FS_LOCATIONS"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_release_lockowner), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_release_lockowner), 10U, 36U, 0U, 0U, 35U, "RELEASE_LOCKOWNER"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_secinfo), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_secinfo), 112U, 163U, 0U, 0U, 36U, "SECINFO"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_exchange_id), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_exchange_id), 350U, 1044U, 0U, 0U, 37U, "EXCHANGE_ID"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_create_session), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_create_session), 46U, 27U, 0U, 0U, 38U, "CREATE_SESSION"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_destroy_session), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_destroy_session), 8U, 5U, 0U, 0U, 39U, "DESTROY_SESSION"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_sequence), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_sequence), 12U, 14U, 0U, 0U, 40U, "SEQUENCE"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_get_lease_time), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_get_lease_time), 18U, 39U, 0U, 0U, 41U, "GET_LEASE_TIME"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_reclaim_complete), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_reclaim_complete), 17U, 20U, 0U, 0U, 42U, "RECLAIM_COMPLETE"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_layoutget), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_layoutget), 61U, 1054U, 0U, 0U, 43U, "LAYOUTGET"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_getdeviceinfo), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_getdeviceinfo), 21U, 20U, 0U, 0U, 44U, "GETDEVICEINFO"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_layoutcommit), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_layoutcommit), 67U, 135U, 0U, 0U, 45U, "LAYOUTCOMMIT"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_layoutreturn), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_layoutreturn), 60U, 23U, 0U, 0U, 46U, "LAYOUTRETURN"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_secinfo_no_name), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_secinfo_no_name), 15U, 163U, 0U, 0U, 47U, "SECINFO_NO_NAME"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_test_stateid), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_test_stateid), 19U, 19U, 0U, 0U, 48U, "TEST_STATEID"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_free_stateid), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_free_stateid), 18U, 17U, 0U, 0U, 49U, "FREE_STATEID"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_getdevicelist), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_getdevicelist), 53U, 88U, 0U, 0U, 50U, "GETDEVICELIST"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_bind_conn_to_session), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_bind_conn_to_session), 10U, 11U, 0U, 0U, 51U, "BIND_CONN_TO_SESSION"}, {1U, (void (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_enc_destroy_clientid), (int (*)(void * , struct xdr_stream * , void * ))(& nfs4_xdr_dec_destroy_clientid), 6U, 5U, 0U, 0U, 52U, "DESTROY_CLIENTID"}}; struct rpc_version const nfs_version4 = {4U, 53U, (struct rpc_procinfo *)(& nfs4_procedures)}; void ldv_main1_sequence_infinite_withcheck_stateful(void) { struct nfs4_sequence_args const *var_nfs4_xdr_minorversion_77_p0 ; int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_51745; ldv_51744: tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 1: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 2: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 3: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 4: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 5: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 6: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 7: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 8: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 9: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 10: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 11: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 12: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 13: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 14: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 15: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 16: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 17: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 18: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 19: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 20: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 21: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 22: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 23: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 24: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 25: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 26: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 27: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 28: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 29: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 30: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 31: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 32: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 33: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 34: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 35: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 36: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 37: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 38: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; case 39: ldv_handler_precall(); nfs4_xdr_minorversion(var_nfs4_xdr_minorversion_77_p0); goto ldv_51703; default: ; goto ldv_51703; } ldv_51703: ; ldv_51745: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_51744; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_15(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_16(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_17(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_18(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_19(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_20(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_21(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static int test_and_set_bit(int nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return (oldbit); } } extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add(struct list_head *new , struct list_head *head ) { { __list_add(new, head, head->next); return; } } __inline static void list_add_tail(struct list_head *new , struct list_head *head ) { { __list_add(new, head->prev, head); return; } } extern void list_del(struct list_head * ) ; __inline static void list_move(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add(list, head); return; } } __inline static void list_move_tail(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add_tail(list, head); return; } } extern size_t strlen(char const * ) ; __inline static void rep_nop(void) { { __asm__ volatile ("rep; nop": : : "memory"); return; } } __inline static void cpu_relax(void) { { rep_nop(); return; } } int ldv_mutex_trylock_32(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_30(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_33(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_35(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_37(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_29(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_31(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_34(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_36(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_nfs_clid_init_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_nfs_clid_init_mutex(struct mutex *lock ) ; __inline static struct thread_info *current_thread_info___1(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6229; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6229; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6229; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6229; default: __bad_percpu_size(); } ldv_6229: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->ldv_5961.rlock); } } extern int _atomic_dec_and_lock(atomic_t * , spinlock_t * ) ; extern int out_of_line_wait_on_bit(void * , int , int (*)(void * ) , unsigned int ) ; __inline static int wait_on_bit(void *word , int bit , int (*action)(void * ) , unsigned int mode ) { int tmp ; int tmp___0 ; { tmp = variable_test_bit(bit, (unsigned long const volatile *)word); if (tmp == 0) { return (0); } else { } tmp___0 = out_of_line_wait_on_bit(word, bit, action, mode); return (tmp___0); } } __inline static unsigned int read_seqbegin(seqlock_t const *sl ) { unsigned int ret ; long tmp ; { repeat: ret = *((unsigned int const volatile *)(& sl->sequence)); tmp = ldv__builtin_expect((long )((int )ret) & 1L, 0L); if (tmp != 0L) { cpu_relax(); goto repeat; } else { } __asm__ volatile ("": : : "memory"); return (ret); } } __inline static int read_seqretry(seqlock_t const *sl , unsigned int start ) { long tmp ; { __asm__ volatile ("": : : "memory"); tmp = ldv__builtin_expect((unsigned int )sl->sequence != start, 0L); return ((int )tmp); } } extern void down_write(struct rw_semaphore * ) ; extern void up_write(struct rw_semaphore * ) ; extern int wait_for_completion_interruptible(struct completion * ) ; extern void complete(struct completion * ) ; __inline static void __rcu_read_lock___1(void) { struct thread_info *tmp ; { tmp = current_thread_info___1(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___1(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___1(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___1(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___1(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 763, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___1(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 784, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___1(); return; } } extern void rb_insert_color(struct rb_node * , struct rb_root * ) ; extern void rb_erase(struct rb_node * , struct rb_root * ) ; extern struct rb_node *rb_next(struct rb_node const * ) ; extern struct rb_node *rb_first(struct rb_root const * ) ; __inline static void rb_link_node(struct rb_node *node , struct rb_node *parent , struct rb_node **rb_link ) { struct rb_node *tmp ; { node->__rb_parent_color = (unsigned long )parent; tmp = 0; node->rb_right = tmp; node->rb_left = tmp; *rb_link = node; return; } } extern void lock_flocks(void) ; extern void unlock_flocks(void) ; extern void ihold(struct inode * ) ; extern void rpc_init_wait_queue(struct rpc_wait_queue * , char const * ) ; extern void rpc_destroy_wait_queue(struct rpc_wait_queue * ) ; extern void rpc_wake_up(struct rpc_wait_queue * ) ; __inline static struct rpc_cred *get_rpccred(struct rpc_cred *cred ) { { atomic_inc(& cred->cr_count); return (cred); } } extern ktime_t ktime_get(void) ; extern int wake_up_process(struct task_struct * ) ; extern int allow_signal(int ) ; extern int ida_pre_get(struct ida * , gfp_t ) ; extern int ida_get_new(struct ida * , int * ) ; extern void ida_remove(struct ida * , int ) ; extern int ida_simple_get(struct ida * , unsigned int , unsigned int , gfp_t ) ; extern void ida_simple_remove(struct ida * , unsigned int ) ; extern struct module __this_module ; extern struct rpc_clnt *rpc_clone_client_set_auth(struct rpc_clnt * , rpc_authflavor_t ) ; extern struct task_struct *kthread_create_on_node(int (*)(void * ) , void * , int , char const * , ...) ; extern void __module_put_and_exit(struct module * , long ) ; extern void __module_get(struct module * ) ; extern void module_put(struct module * ) ; int nfs4_discover_server_trunking(struct nfs_client *clp , struct nfs_client **result ) ; void nfs41_server_notify_target_slotid_update(struct nfs_client *clp ) ; void nfs4_purge_state_owners(struct nfs_server *server ) ; void nfs4_schedule_state_manager(struct nfs_client *clp ) ; nfs4_stateid const zero_stateid ; void nfs_expire_all_delegations(struct nfs_client *clp ) ; int nfs_client_return_marked_delegations(struct nfs_client *clp ) ; void nfs_delegation_mark_reclaim(struct nfs_client *clp ) ; void nfs_delegation_reap_unclaimed(struct nfs_client *clp ) ; extern void nfs_mark_client_ready(struct nfs_client * , int ) ; extern int nfs_wait_bit_killable(void * ) ; int nfs40_walk_client_list(struct nfs_client *new , struct nfs_client **result , struct rpc_cred *cred ) ; int nfs41_walk_client_list(struct nfs_client *new , struct nfs_client **result , struct rpc_cred *cred ) ; void nfs4_session_drain_complete(struct nfs4_session *session , struct nfs4_slot_table *tbl ) ; __inline static bool nfs4_session_draining(struct nfs4_session *session ) { int tmp ; { tmp = constant_test_bit(1U, (unsigned long const volatile *)(& session->session_state)); return (tmp != 0); } } void nfs41_wake_slot_table(struct nfs4_slot_table *tbl ) ; void pnfs_destroy_all_layouts(struct nfs_client *clp ) ; __inline static void *net_generic___1(struct net const *net , int id ) { struct net_generic *ng ; void *ptr ; struct net_generic *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { rcu_read_lock___1(); _________p1 = *((struct net_generic * const volatile *)(& net->gen)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("include/net/netns/generic.h", 40, "suspicious rcu_dereference_check() usage"); } else { } } else { } ng = _________p1; tmp___1 = ldv__builtin_expect(id == 0, 0L); if (tmp___1 != 0L) { goto _L; } else { tmp___2 = ldv__builtin_expect((unsigned int )id > ng->len, 0L); if (tmp___2 != 0L) { _L: /* CIL Label */ __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (41), "i" (12UL)); ldv_50170: ; goto ldv_50170; } else { } } ptr = ng->ptr[id + -1]; rcu_read_unlock___1(); tmp___3 = ldv__builtin_expect((unsigned long )ptr == (unsigned long )((void *)0), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (45), "i" (12UL)); ldv_50171: ; goto ldv_50171; } else { } return (ptr); } } static struct mutex nfs_clid_init_mutex = {{1}, {{{{{0U}}, 3735899821U, 4294967295U, 0xffffffffffffffffUL, {0, {0, 0}, "nfs_clid_init_mutex.wait_lock", 0, 0UL}}}}, {& nfs_clid_init_mutex.wait_list, & nfs_clid_init_mutex.wait_list}, 0, 0, (void *)(& nfs_clid_init_mutex), {0, {0, 0}, "nfs_clid_init_mutex", 0, 0UL}}; int nfs4_init_clientid(struct nfs_client *clp , struct rpc_cred *cred ) { struct nfs4_setclientid_res clid ; unsigned short port ; int status ; struct nfs_net *nn ; void *tmp ; int tmp___0 ; { clid.clientid = clp->cl_clientid; clid.confirm = clp->cl_confirm; tmp = net_generic___1((struct net const *)clp->cl_net, nfs_net_id); nn = (struct nfs_net *)tmp; tmp___0 = constant_test_bit(7U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___0 != 0) { goto do_confirm; } else { } port = nn->nfs_callback_tcpport; if ((unsigned int )clp->cl_addr.ss_family == 10U) { port = nn->nfs_callback_tcpport6; } else { } status = nfs4_proc_setclientid(clp, 1073741824U, (int )port, cred, & clid); if (status != 0) { goto out; } else { } clp->cl_clientid = clid.clientid; clp->cl_confirm = clid.confirm; set_bit(7U, (unsigned long volatile *)(& clp->cl_state)); do_confirm: status = nfs4_proc_setclientid_confirm(clp, & clid, cred); if (status != 0) { goto out; } else { } clear_bit(7, (unsigned long volatile *)(& clp->cl_state)); nfs4_schedule_state_renewal(clp); out: ; return (status); } } int nfs40_discover_server_trunking(struct nfs_client *clp , struct nfs_client **result , struct rpc_cred *cred ) { struct nfs4_setclientid_res clid ; struct nfs_net *nn ; void *tmp ; unsigned short port ; int status ; { clid.clientid = clp->cl_clientid; clid.confirm = clp->cl_confirm; tmp = net_generic___1((struct net const *)clp->cl_net, nfs_net_id); nn = (struct nfs_net *)tmp; port = nn->nfs_callback_tcpport; if ((unsigned int )clp->cl_addr.ss_family == 10U) { port = nn->nfs_callback_tcpport6; } else { } status = nfs4_proc_setclientid(clp, 1073741824U, (int )port, cred, & clid); if (status != 0) { goto out; } else { } clp->cl_clientid = clid.clientid; clp->cl_confirm = clid.confirm; status = nfs40_walk_client_list(clp, result, cred); switch (status) { case -10022: set_bit(7U, (unsigned long volatile *)(& clp->cl_state)); case 0: nfs4_schedule_state_renewal(*result); goto ldv_50216; } ldv_50216: ; out: ; return (status); } } struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp ) { struct rpc_cred *cred ; { cred = 0; if ((unsigned long )clp->cl_machine_cred != (unsigned long )((struct rpc_cred *)0)) { cred = get_rpccred(clp->cl_machine_cred); } else { } return (cred); } } static void nfs4_clear_machine_cred(struct nfs_client *clp ) { struct rpc_cred *cred ; { spin_lock(& clp->cl_lock); cred = clp->cl_machine_cred; clp->cl_machine_cred = 0; spin_unlock(& clp->cl_lock); if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { put_rpccred(cred); } else { } return; } } static struct rpc_cred *nfs4_get_renew_cred_server_locked(struct nfs_server *server ) { struct rpc_cred *cred ; struct nfs4_state_owner *sp ; struct rb_node *pos ; struct rb_node const *__mptr ; int tmp ; { cred = 0; pos = rb_first((struct rb_root const *)(& server->state_owners)); goto ldv_50236; ldv_50235: __mptr = (struct rb_node const *)pos; sp = (struct nfs4_state_owner *)__mptr + 0xffffffffffffffe0UL; tmp = list_empty((struct list_head const *)(& sp->so_states)); if (tmp != 0) { goto ldv_50233; } else { } cred = get_rpccred(sp->so_cred); goto ldv_50234; ldv_50233: pos = rb_next((struct rb_node const *)pos); ldv_50236: ; if ((unsigned long )pos != (unsigned long )((struct rb_node *)0)) { goto ldv_50235; } else { } ldv_50234: ; return (cred); } } struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp ) { struct rpc_cred *cred ; struct nfs_server *server ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { cred = 0; cred = nfs4_get_machine_cred_locked(clp); if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { goto out; } else { } rcu_read_lock___1(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_50259; ldv_50258: cred = nfs4_get_renew_cred_server_locked(server); if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { goto ldv_50257; } else { } __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_50259: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_50258; } else { } ldv_50257: rcu_read_unlock___1(); out: ; return (cred); } } static int nfs41_setup_state_renewal(struct nfs_client *clp ) { int status ; struct nfs_fsinfo fsinfo ; int tmp ; { tmp = constant_test_bit(5U, (unsigned long const volatile *)(& clp->cl_res_state)); if (tmp == 0) { nfs4_schedule_state_renewal(clp); return (0); } else { } status = nfs4_proc_get_lease_time(clp, & fsinfo); if (status == 0) { spin_lock(& clp->cl_lock); clp->cl_lease_time = (unsigned long )(fsinfo.lease_time * 250U); clp->cl_last_renewal = jiffies; spin_unlock(& clp->cl_lock); nfs4_schedule_state_renewal(clp); } else { } return (status); } } static void nfs4_end_drain_session(struct nfs_client *clp ) { struct nfs4_session *ses ; struct nfs4_slot_table *tbl ; int tmp ; { ses = clp->cl_session; if ((unsigned long )ses == (unsigned long )((struct nfs4_session *)0)) { return; } else { } tbl = & ses->fc_slot_table; tmp = test_and_clear_bit(1, (unsigned long volatile *)(& ses->session_state)); if (tmp != 0) { spin_lock(& tbl->slot_tbl_lock); nfs41_wake_slot_table(tbl); spin_unlock(& tbl->slot_tbl_lock); } else { } return; } } void nfs4_session_drain_complete(struct nfs4_session *session , struct nfs4_slot_table *tbl ) { bool tmp ; { tmp = nfs4_session_draining(session); if ((int )tmp) { complete(& tbl->complete); } else { } return; } } static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl ) { int tmp ; { spin_lock(& tbl->slot_tbl_lock); if (tbl->highest_used_slotid != 4294967295U) { tbl->complete.done = 0U; spin_unlock(& tbl->slot_tbl_lock); tmp = wait_for_completion_interruptible(& tbl->complete); return (tmp); } else { } spin_unlock(& tbl->slot_tbl_lock); return (0); } } static int nfs4_begin_drain_session(struct nfs_client *clp ) { struct nfs4_session *ses ; int ret ; int tmp ; { ses = clp->cl_session; ret = 0; set_bit(1U, (unsigned long volatile *)(& ses->session_state)); ret = nfs4_wait_on_slot_tbl(& ses->bc_slot_table); if (ret != 0) { return (ret); } else { } tmp = nfs4_wait_on_slot_tbl(& ses->fc_slot_table); return (tmp); } } static void nfs41_finish_session_reset(struct nfs_client *clp ) { { clear_bit(7, (unsigned long volatile *)(& clp->cl_state)); clear_bit(6, (unsigned long volatile *)(& clp->cl_state)); clear_bit(10, (unsigned long volatile *)(& clp->cl_state)); nfs41_setup_state_renewal(clp); return; } } int nfs41_init_clientid(struct nfs_client *clp , struct rpc_cred *cred ) { int status ; int tmp ; { tmp = constant_test_bit(7U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp != 0) { goto do_confirm; } else { } nfs4_begin_drain_session(clp); status = nfs4_proc_exchange_id(clp, cred); if (status != 0) { goto out; } else { } set_bit(7U, (unsigned long volatile *)(& clp->cl_state)); do_confirm: status = nfs4_proc_create_session(clp, cred); if (status != 0) { goto out; } else { } nfs41_finish_session_reset(clp); nfs_mark_client_ready(clp, 0); out: ; return (status); } } int nfs41_discover_server_trunking(struct nfs_client *clp , struct nfs_client **result , struct rpc_cred *cred ) { int status ; int tmp ; { status = nfs4_proc_exchange_id(clp, cred); if (status != 0) { return (status); } else { } set_bit(7U, (unsigned long volatile *)(& clp->cl_state)); tmp = nfs41_walk_client_list(clp, result, cred); return (tmp); } } struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp ) { struct rpc_cred *cred ; { spin_lock(& clp->cl_lock); cred = nfs4_get_machine_cred_locked(clp); spin_unlock(& clp->cl_lock); return (cred); } } static struct rpc_cred *nfs4_get_setclientid_cred_server(struct nfs_server *server ) { struct nfs_client *clp ; struct rpc_cred *cred ; struct nfs4_state_owner *sp ; struct rb_node *pos ; struct rb_node const *__mptr ; { clp = server->nfs_client; cred = 0; spin_lock(& clp->cl_lock); pos = rb_first((struct rb_root const *)(& server->state_owners)); if ((unsigned long )pos != (unsigned long )((struct rb_node *)0)) { __mptr = (struct rb_node const *)pos; sp = (struct nfs4_state_owner *)__mptr + 0xffffffffffffffe0UL; cred = get_rpccred(sp->so_cred); } else { } spin_unlock(& clp->cl_lock); return (cred); } } struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp ) { struct nfs_server *server ; struct rpc_cred *cred ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { spin_lock(& clp->cl_lock); cred = nfs4_get_machine_cred_locked(clp); spin_unlock(& clp->cl_lock); if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { goto out; } else { } rcu_read_lock___1(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_50333; ldv_50332: cred = nfs4_get_setclientid_cred_server(server); if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { goto ldv_50331; } else { } __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_50333: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_50332; } else { } ldv_50331: rcu_read_unlock___1(); out: ; return (cred); } } static struct nfs4_state_owner *nfs4_find_state_owner_locked(struct nfs_server *server , struct rpc_cred *cred ) { struct rb_node **p ; struct rb_node *parent ; struct nfs4_state_owner *sp ; struct rb_node const *__mptr ; int tmp ; { p = & server->state_owners.rb_node; parent = 0; goto ldv_50344; ldv_50343: parent = *p; __mptr = (struct rb_node const *)parent; sp = (struct nfs4_state_owner *)__mptr + 0xffffffffffffffe0UL; if ((unsigned long )sp->so_cred > (unsigned long )cred) { p = & parent->rb_left; } else if ((unsigned long )sp->so_cred < (unsigned long )cred) { p = & parent->rb_right; } else { tmp = list_empty((struct list_head const *)(& sp->so_lru)); if (tmp == 0) { list_del_init(& sp->so_lru); } else { } atomic_inc(& sp->so_count); return (sp); } ldv_50344: ; if ((unsigned long )*p != (unsigned long )((struct rb_node *)0)) { goto ldv_50343; } else { } return (0); } } static struct nfs4_state_owner *nfs4_insert_state_owner_locked(struct nfs4_state_owner *new ) { struct nfs_server *server ; struct rb_node **p ; struct rb_node *parent ; struct nfs4_state_owner *sp ; int err ; struct rb_node const *__mptr ; int tmp ; void *tmp___0 ; { server = new->so_server; p = & server->state_owners.rb_node; parent = 0; goto ldv_50357; ldv_50356: parent = *p; __mptr = (struct rb_node const *)parent; sp = (struct nfs4_state_owner *)__mptr + 0xffffffffffffffe0UL; if ((unsigned long )new->so_cred < (unsigned long )sp->so_cred) { p = & parent->rb_left; } else if ((unsigned long )new->so_cred > (unsigned long )sp->so_cred) { p = & parent->rb_right; } else { tmp = list_empty((struct list_head const *)(& sp->so_lru)); if (tmp == 0) { list_del_init(& sp->so_lru); } else { } atomic_inc(& sp->so_count); return (sp); } ldv_50357: ; if ((unsigned long )*p != (unsigned long )((struct rb_node *)0)) { goto ldv_50356; } else { } err = ida_get_new(& server->openowner_id, & new->so_seqid.owner_id); if (err != 0) { tmp___0 = ERR_PTR((long )err); return ((struct nfs4_state_owner *)tmp___0); } else { } rb_link_node(& new->so_server_node, parent, p); rb_insert_color(& new->so_server_node, & server->state_owners); return (new); } } static void nfs4_remove_state_owner_locked(struct nfs4_state_owner *sp ) { struct nfs_server *server ; { server = sp->so_server; if (sp->so_server_node.__rb_parent_color != (unsigned long )(& sp->so_server_node)) { rb_erase(& sp->so_server_node, & server->state_owners); } else { } ida_remove(& server->openowner_id, sp->so_seqid.owner_id); return; } } static void nfs4_init_seqid_counter(struct nfs_seqid_counter *sc ) { struct lock_class_key __key ; { sc->create_time = ktime_get(); sc->flags = 0; sc->counter = 0U; spinlock_check(& sc->lock); __raw_spin_lock_init(& sc->lock.ldv_5961.rlock, "&(&sc->lock)->rlock", & __key); INIT_LIST_HEAD(& sc->list); rpc_init_wait_queue(& sc->wait, "Seqid_waitqueue"); return; } } static void nfs4_destroy_seqid_counter(struct nfs_seqid_counter *sc ) { { rpc_destroy_wait_queue(& sc->wait); return; } } static struct nfs4_state_owner *nfs4_alloc_state_owner(struct nfs_server *server , struct rpc_cred *cred , gfp_t gfp_flags ) { struct nfs4_state_owner *sp ; void *tmp ; struct lock_class_key __key ; { tmp = kzalloc(592UL, gfp_flags); sp = (struct nfs4_state_owner *)tmp; if ((unsigned long )sp == (unsigned long )((struct nfs4_state_owner *)0)) { return (0); } else { } sp->so_server = server; sp->so_cred = get_rpccred(cred); spinlock_check(& sp->so_lock); __raw_spin_lock_init(& sp->so_lock.ldv_5961.rlock, "&(&sp->so_lock)->rlock", & __key); INIT_LIST_HEAD(& sp->so_states); nfs4_init_seqid_counter(& sp->so_seqid); atomic_set(& sp->so_count, 1); INIT_LIST_HEAD(& sp->so_lru); return (sp); } } static void nfs4_drop_state_owner(struct nfs4_state_owner *sp ) { struct rb_node *rb_node ; struct nfs_server *server ; struct nfs_client *clp ; { rb_node = & sp->so_server_node; if (rb_node->__rb_parent_color != (unsigned long )rb_node) { server = sp->so_server; clp = server->nfs_client; spin_lock(& clp->cl_lock); if (rb_node->__rb_parent_color != (unsigned long )rb_node) { rb_erase(rb_node, & server->state_owners); rb_node->__rb_parent_color = (unsigned long )rb_node; } else { } spin_unlock(& clp->cl_lock); } else { } return; } } static void nfs4_free_state_owner(struct nfs4_state_owner *sp ) { { nfs4_destroy_seqid_counter(& sp->so_seqid); put_rpccred(sp->so_cred); kfree((void const *)sp); return; } } static void nfs4_gc_state_owners(struct nfs_server *server ) { struct nfs_client *clp ; struct nfs4_state_owner *sp ; struct nfs4_state_owner *tmp ; unsigned long time_min ; unsigned long time_max ; struct list_head doomed ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { clp = server->nfs_client; doomed.next = & doomed; doomed.prev = & doomed; spin_lock(& clp->cl_lock); time_max = jiffies; time_min = (unsigned long )((long )time_max - (long )clp->cl_lease_time); __mptr = (struct list_head const *)server->state_owners_lru.next; sp = (struct nfs4_state_owner *)__mptr + 0xfffffffffffffff8UL; __mptr___0 = (struct list_head const *)sp->so_lru.next; tmp = (struct nfs4_state_owner *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_50415; ldv_50414: ; if ((long )sp->so_expires - (long )time_min >= 0L && (long )time_max - (long )sp->so_expires >= 0L) { goto ldv_50413; } else { } list_move(& sp->so_lru, & doomed); nfs4_remove_state_owner_locked(sp); sp = tmp; __mptr___1 = (struct list_head const *)tmp->so_lru.next; tmp = (struct nfs4_state_owner *)__mptr___1 + 0xfffffffffffffff8UL; ldv_50415: ; if ((unsigned long )(& sp->so_lru) != (unsigned long )(& server->state_owners_lru)) { goto ldv_50414; } else { } ldv_50413: spin_unlock(& clp->cl_lock); __mptr___2 = (struct list_head const *)doomed.next; sp = (struct nfs4_state_owner *)__mptr___2 + 0xfffffffffffffff8UL; __mptr___3 = (struct list_head const *)sp->so_lru.next; tmp = (struct nfs4_state_owner *)__mptr___3 + 0xfffffffffffffff8UL; goto ldv_50423; ldv_50422: list_del(& sp->so_lru); nfs4_free_state_owner(sp); sp = tmp; __mptr___4 = (struct list_head const *)tmp->so_lru.next; tmp = (struct nfs4_state_owner *)__mptr___4 + 0xfffffffffffffff8UL; ldv_50423: ; if ((unsigned long )(& sp->so_lru) != (unsigned long )(& doomed)) { goto ldv_50422; } else { } return; } } struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server , struct rpc_cred *cred , gfp_t gfp_flags ) { struct nfs_client *clp ; struct nfs4_state_owner *sp ; struct nfs4_state_owner *new ; int tmp ; void *tmp___0 ; { clp = server->nfs_client; spin_lock(& clp->cl_lock); sp = nfs4_find_state_owner_locked(server, cred); spin_unlock(& clp->cl_lock); if ((unsigned long )sp != (unsigned long )((struct nfs4_state_owner *)0)) { goto out; } else { } new = nfs4_alloc_state_owner(server, cred, gfp_flags); if ((unsigned long )new == (unsigned long )((struct nfs4_state_owner *)0)) { goto out; } else { } ldv_50435: tmp = ida_pre_get(& server->openowner_id, gfp_flags); if (tmp == 0) { goto ldv_50434; } else { } spin_lock(& clp->cl_lock); sp = nfs4_insert_state_owner_locked(new); spin_unlock(& clp->cl_lock); tmp___0 = ERR_PTR(-11L); if ((unsigned long )tmp___0 == (unsigned long )((void *)sp)) { goto ldv_50435; } else { } ldv_50434: ; if ((unsigned long )sp != (unsigned long )new) { nfs4_free_state_owner(new); } else { } out: nfs4_gc_state_owners(server); return (sp); } } void nfs4_put_state_owner(struct nfs4_state_owner *sp ) { struct nfs_server *server ; struct nfs_client *clp ; int tmp ; { server = sp->so_server; clp = server->nfs_client; tmp = _atomic_dec_and_lock(& sp->so_count, & clp->cl_lock); if (tmp == 0) { return; } else { } sp->so_expires = jiffies; list_add_tail(& sp->so_lru, & server->state_owners_lru); spin_unlock(& clp->cl_lock); return; } } void nfs4_purge_state_owners(struct nfs_server *server ) { struct nfs_client *clp ; struct nfs4_state_owner *sp ; struct nfs4_state_owner *tmp ; struct list_head doomed ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; { clp = server->nfs_client; doomed.next = & doomed; doomed.prev = & doomed; spin_lock(& clp->cl_lock); __mptr = (struct list_head const *)server->state_owners_lru.next; sp = (struct nfs4_state_owner *)__mptr + 0xfffffffffffffff8UL; __mptr___0 = (struct list_head const *)sp->so_lru.next; tmp = (struct nfs4_state_owner *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_50455; ldv_50454: list_move(& sp->so_lru, & doomed); nfs4_remove_state_owner_locked(sp); sp = tmp; __mptr___1 = (struct list_head const *)tmp->so_lru.next; tmp = (struct nfs4_state_owner *)__mptr___1 + 0xfffffffffffffff8UL; ldv_50455: ; if ((unsigned long )(& sp->so_lru) != (unsigned long )(& server->state_owners_lru)) { goto ldv_50454; } else { } spin_unlock(& clp->cl_lock); __mptr___2 = (struct list_head const *)doomed.next; sp = (struct nfs4_state_owner *)__mptr___2 + 0xfffffffffffffff8UL; __mptr___3 = (struct list_head const *)sp->so_lru.next; tmp = (struct nfs4_state_owner *)__mptr___3 + 0xfffffffffffffff8UL; goto ldv_50464; ldv_50463: list_del(& sp->so_lru); nfs4_free_state_owner(sp); sp = tmp; __mptr___4 = (struct list_head const *)tmp->so_lru.next; tmp = (struct nfs4_state_owner *)__mptr___4 + 0xfffffffffffffff8UL; ldv_50464: ; if ((unsigned long )(& sp->so_lru) != (unsigned long )(& doomed)) { goto ldv_50463; } else { } return; } } static struct nfs4_state *nfs4_alloc_open_state(void) { struct nfs4_state *state ; void *tmp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; { tmp = kzalloc(280UL, 80U); state = (struct nfs4_state *)tmp; if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { return (0); } else { } atomic_set(& state->count, 1); INIT_LIST_HEAD(& state->lock_states); spinlock_check(& state->state_lock); __raw_spin_lock_init(& state->state_lock.ldv_5961.rlock, "&(&state->state_lock)->rlock", & __key); state->seqlock.sequence = 0U; spinlock_check(& state->seqlock.lock); __raw_spin_lock_init(& state->seqlock.lock.ldv_5961.rlock, "&(&(&state->seqlock)->lock)->rlock", & __key___0); return (state); } } void nfs4_state_set_mode_locked(struct nfs4_state *state , fmode_t fmode ) { { if (state->state == fmode) { return; } else { } if (((state->state ^ fmode) & 2U) != 0U) { if ((fmode & 2U) != 0U) { list_move(& state->open_states, & (state->owner)->so_states); } else { list_move_tail(& state->open_states, & (state->owner)->so_states); } } else { } state->state = fmode; return; } } static struct nfs4_state *__nfs4_find_state_byowner(struct inode *inode , struct nfs4_state_owner *owner ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs4_state *state ; struct list_head const *__mptr ; int tmp___0 ; struct list_head const *__mptr___0 ; { tmp = NFS_I((struct inode const *)inode); nfsi = tmp; __mptr = (struct list_head const *)nfsi->open_states.next; state = (struct nfs4_state *)__mptr + 0xfffffffffffffff0UL; goto ldv_50488; ldv_50487: ; if ((unsigned long )state->owner != (unsigned long )owner) { goto ldv_50486; } else { } tmp___0 = atomic_add_unless(& state->count, 1, 0); if (tmp___0 != 0) { return (state); } else { } ldv_50486: __mptr___0 = (struct list_head const *)state->inode_states.next; state = (struct nfs4_state *)__mptr___0 + 0xfffffffffffffff0UL; ldv_50488: ; if ((unsigned long )(& state->inode_states) != (unsigned long )(& nfsi->open_states)) { goto ldv_50487; } else { } return (0); } } static void nfs4_free_open_state(struct nfs4_state *state ) { { kfree((void const *)state); return; } } struct nfs4_state *nfs4_get_open_state(struct inode *inode , struct nfs4_state_owner *owner ) { struct nfs4_state *state ; struct nfs4_state *new ; struct nfs_inode *nfsi ; struct nfs_inode *tmp ; { tmp = NFS_I((struct inode const *)inode); nfsi = tmp; spin_lock(& inode->i_lock); state = __nfs4_find_state_byowner(inode, owner); spin_unlock(& inode->i_lock); if ((unsigned long )state != (unsigned long )((struct nfs4_state *)0)) { goto out; } else { } new = nfs4_alloc_open_state(); spin_lock(& owner->so_lock); spin_lock(& inode->i_lock); state = __nfs4_find_state_byowner(inode, owner); if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0) && (unsigned long )new != (unsigned long )((struct nfs4_state *)0)) { state = new; state->owner = owner; atomic_inc(& owner->so_count); list_add(& state->inode_states, & nfsi->open_states); ihold(inode); state->inode = inode; spin_unlock(& inode->i_lock); list_add_tail(& state->open_states, & owner->so_states); spin_unlock(& owner->so_lock); } else { spin_unlock(& inode->i_lock); spin_unlock(& owner->so_lock); if ((unsigned long )new != (unsigned long )((struct nfs4_state *)0)) { nfs4_free_open_state(new); } else { } } out: ; return (state); } } void nfs4_put_open_state(struct nfs4_state *state ) { struct inode *inode ; struct nfs4_state_owner *owner ; int tmp ; { inode = state->inode; owner = state->owner; tmp = _atomic_dec_and_lock(& state->count, & owner->so_lock); if (tmp == 0) { return; } else { } spin_lock(& inode->i_lock); list_del(& state->inode_states); list_del(& state->open_states); spin_unlock(& inode->i_lock); spin_unlock(& owner->so_lock); iput(inode); nfs4_free_open_state(state); nfs4_put_state_owner(owner); return; } } static void __nfs4_close(struct nfs4_state *state , fmode_t fmode , gfp_t gfp_mask , int wait ) { struct nfs4_state_owner *owner ; int call_close ; fmode_t newstate ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { owner = state->owner; call_close = 0; atomic_inc(& owner->so_count); spin_lock(& owner->so_lock); switch (fmode & 3U) { case 1U: state->n_rdonly = state->n_rdonly - 1U; goto ldv_50516; case 2U: state->n_wronly = state->n_wronly - 1U; goto ldv_50516; case 3U: state->n_rdwr = state->n_rdwr - 1U; } ldv_50516: newstate = 3U; if (state->n_rdwr == 0U) { if (state->n_rdonly == 0U) { newstate = newstate & 4294967294U; tmp = constant_test_bit(2U, (unsigned long const volatile *)(& state->flags)); call_close = tmp | call_close; tmp___0 = constant_test_bit(4U, (unsigned long const volatile *)(& state->flags)); call_close = tmp___0 | call_close; } else { } if (state->n_wronly == 0U) { newstate = newstate & 4294967293U; tmp___1 = constant_test_bit(3U, (unsigned long const volatile *)(& state->flags)); call_close = tmp___1 | call_close; tmp___2 = constant_test_bit(4U, (unsigned long const volatile *)(& state->flags)); call_close = tmp___2 | call_close; } else { } if (newstate == 0U) { clear_bit(1, (unsigned long volatile *)(& state->flags)); } else { } } else { } nfs4_state_set_mode_locked(state, newstate); spin_unlock(& owner->so_lock); if (call_close == 0) { nfs4_put_open_state(state); nfs4_put_state_owner(owner); } else { nfs4_do_close(state, gfp_mask, wait); } return; } } void nfs4_close_state(struct nfs4_state *state , fmode_t fmode ) { { __nfs4_close(state, fmode, 80U, 0); return; } } void nfs4_close_sync(struct nfs4_state *state , fmode_t fmode ) { { __nfs4_close(state, fmode, 208U, 1); return; } } static struct nfs4_lock_state *__nfs4_find_lock_state(struct nfs4_state *state , fl_owner_t fl_owner , pid_t fl_pid , unsigned int type ) { struct nfs4_lock_state *pos ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)state->lock_states.next; pos = (struct nfs4_lock_state *)__mptr; goto ldv_50543; ldv_50542: ; if (type != 0U && pos->ls_owner.lo_type != type) { goto ldv_50538; } else { } switch (pos->ls_owner.lo_type) { case 2U: ; if ((unsigned long )pos->ls_owner.lo_u.posix_owner != (unsigned long )fl_owner) { goto ldv_50538; } else { } goto ldv_50540; case 1U: ; if (pos->ls_owner.lo_u.flock_owner != fl_pid) { goto ldv_50538; } else { } } ldv_50540: atomic_inc(& pos->ls_count); return (pos); ldv_50538: __mptr___0 = (struct list_head const *)pos->ls_locks.next; pos = (struct nfs4_lock_state *)__mptr___0; ldv_50543: ; if ((unsigned long )(& pos->ls_locks) != (unsigned long )(& state->lock_states)) { goto ldv_50542; } else { } return (0); } } static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state , fl_owner_t fl_owner , pid_t fl_pid , unsigned int type ) { struct nfs4_lock_state *lsp ; struct nfs_server *server ; void *tmp ; { server = (state->owner)->so_server; tmp = kzalloc(496UL, 80U); lsp = (struct nfs4_lock_state *)tmp; if ((unsigned long )lsp == (unsigned long )((struct nfs4_lock_state *)0)) { return (0); } else { } nfs4_init_seqid_counter(& lsp->ls_seqid); atomic_set(& lsp->ls_count, 1); lsp->ls_state = state; lsp->ls_owner.lo_type = type; switch (lsp->ls_owner.lo_type) { case 1U: lsp->ls_owner.lo_u.flock_owner = fl_pid; goto ldv_50554; case 2U: lsp->ls_owner.lo_u.posix_owner = fl_owner; goto ldv_50554; default: ; goto out_free; } ldv_50554: lsp->ls_seqid.owner_id = ida_simple_get(& server->lockowner_id, 0U, 0U, 80U); if (lsp->ls_seqid.owner_id < 0) { goto out_free; } else { } INIT_LIST_HEAD(& lsp->ls_locks); return (lsp); out_free: kfree((void const *)lsp); return (0); } } void nfs4_free_lock_state(struct nfs_server *server , struct nfs4_lock_state *lsp ) { { ida_simple_remove(& server->lockowner_id, (unsigned int )lsp->ls_seqid.owner_id); nfs4_destroy_seqid_counter(& lsp->ls_seqid); kfree((void const *)lsp); return; } } static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state , fl_owner_t owner , pid_t pid , unsigned int type ) { struct nfs4_lock_state *lsp ; struct nfs4_lock_state *new ; { new = 0; ldv_50571: spin_lock(& state->state_lock); lsp = __nfs4_find_lock_state(state, owner, pid, type); if ((unsigned long )lsp != (unsigned long )((struct nfs4_lock_state *)0)) { goto ldv_50570; } else { } if ((unsigned long )new != (unsigned long )((struct nfs4_lock_state *)0)) { list_add(& new->ls_locks, & state->lock_states); set_bit(0U, (unsigned long volatile *)(& state->flags)); lsp = new; new = 0; goto ldv_50570; } else { } spin_unlock(& state->state_lock); new = nfs4_alloc_lock_state(state, owner, pid, type); if ((unsigned long )new == (unsigned long )((struct nfs4_lock_state *)0)) { return (0); } else { } goto ldv_50571; ldv_50570: spin_unlock(& state->state_lock); if ((unsigned long )new != (unsigned long )((struct nfs4_lock_state *)0)) { nfs4_free_lock_state((state->owner)->so_server, new); } else { } return (lsp); } } void nfs4_put_lock_state(struct nfs4_lock_state *lsp ) { struct nfs4_state *state ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { if ((unsigned long )lsp == (unsigned long )((struct nfs4_lock_state *)0)) { return; } else { } state = lsp->ls_state; tmp = _atomic_dec_and_lock(& lsp->ls_count, & state->state_lock); if (tmp == 0) { return; } else { } list_del(& lsp->ls_locks); tmp___0 = list_empty((struct list_head const *)(& state->lock_states)); if (tmp___0 != 0) { clear_bit(0, (unsigned long volatile *)(& state->flags)); } else { } spin_unlock(& state->state_lock); tmp___2 = constant_test_bit(0U, (unsigned long const volatile *)(& lsp->ls_flags)); if (tmp___2 != 0) { tmp___1 = nfs4_release_lockowner(lsp); if (tmp___1 == 0) { return; } else { } } else { } nfs4_free_lock_state(((lsp->ls_state)->owner)->so_server, lsp); return; } } static void nfs4_fl_copy_lock(struct file_lock *dst , struct file_lock *src ) { struct nfs4_lock_state *lsp ; { lsp = src->fl_u.nfs4_fl.owner; dst->fl_u.nfs4_fl.owner = lsp; atomic_inc(& lsp->ls_count); return; } } static void nfs4_fl_release_lock(struct file_lock *fl ) { { nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner); return; } } static struct file_lock_operations const nfs4_fl_lock_ops = {& nfs4_fl_copy_lock, & nfs4_fl_release_lock}; int nfs4_set_lock_state(struct nfs4_state *state , struct file_lock *fl ) { struct nfs4_lock_state *lsp ; { if ((unsigned long )fl->fl_ops != (unsigned long )((struct file_lock_operations const *)0)) { return (0); } else { } if ((int )fl->fl_flags & 1) { lsp = nfs4_get_lock_state(state, fl->fl_owner, 0, 2U); } else if ((fl->fl_flags & 2U) != 0U) { lsp = nfs4_get_lock_state(state, 0, (pid_t )fl->fl_pid, 1U); } else { return (-22); } if ((unsigned long )lsp == (unsigned long )((struct nfs4_lock_state *)0)) { return (-12); } else { } fl->fl_u.nfs4_fl.owner = lsp; fl->fl_ops = & nfs4_fl_lock_ops; return (0); } } static bool nfs4_copy_lock_stateid(nfs4_stateid *dst , struct nfs4_state *state , struct nfs_lockowner const *lockowner ) { struct nfs4_lock_state *lsp ; fl_owner_t fl_owner ; pid_t fl_pid ; bool ret ; int tmp ; int tmp___0 ; { ret = 0; if ((unsigned long )lockowner == (unsigned long )((struct nfs_lockowner const *)0)) { goto out; } else { } tmp = constant_test_bit(0U, (unsigned long const volatile *)(& state->flags)); if (tmp == 0) { goto out; } else { } fl_owner = lockowner->l_owner; fl_pid = lockowner->l_pid; spin_lock(& state->state_lock); lsp = __nfs4_find_lock_state(state, fl_owner, fl_pid, 0U); if ((unsigned long )lsp != (unsigned long )((struct nfs4_lock_state *)0)) { tmp___0 = constant_test_bit(0U, (unsigned long const volatile *)(& lsp->ls_flags)); if (tmp___0 != 0) { nfs4_stateid_copy(dst, (nfs4_stateid const *)(& lsp->ls_stateid)); ret = 1; } else { } } else { } spin_unlock(& state->state_lock); nfs4_put_lock_state(lsp); out: ; return (ret); } } static void nfs4_copy_open_stateid(nfs4_stateid *dst , struct nfs4_state *state ) { int seq ; unsigned int tmp ; int tmp___0 ; { ldv_50605: tmp = read_seqbegin((seqlock_t const *)(& state->seqlock)); seq = (int )tmp; nfs4_stateid_copy(dst, (nfs4_stateid const *)(& state->stateid)); tmp___0 = read_seqretry((seqlock_t const *)(& state->seqlock), (unsigned int )seq); if (tmp___0 != 0) { goto ldv_50605; } else { } return; } } void nfs4_select_rw_stateid(nfs4_stateid *dst , struct nfs4_state *state , fmode_t fmode , struct nfs_lockowner const *lockowner ) { bool tmp ; bool tmp___0 ; { tmp = nfs4_copy_delegation_stateid(dst, state->inode, fmode); if ((int )tmp) { return; } else { } tmp___0 = nfs4_copy_lock_stateid(dst, state, lockowner); if ((int )tmp___0) { return; } else { } nfs4_copy_open_stateid(dst, state); return; } } struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter , gfp_t gfp_mask ) { struct nfs_seqid *new ; void *tmp ; { tmp = kmalloc(32UL, gfp_mask); new = (struct nfs_seqid *)tmp; if ((unsigned long )new != (unsigned long )((struct nfs_seqid *)0)) { new->sequence = counter; INIT_LIST_HEAD(& new->list); new->task = 0; } else { } return (new); } } void nfs_release_seqid(struct nfs_seqid *seqid ) { struct nfs_seqid_counter *sequence ; int tmp ; struct nfs_seqid *next ; struct list_head const *__mptr ; int tmp___0 ; { tmp = list_empty((struct list_head const *)(& seqid->list)); if (tmp != 0) { return; } else { } sequence = seqid->sequence; spin_lock(& sequence->lock); list_del_init(& seqid->list); tmp___0 = list_empty((struct list_head const *)(& sequence->list)); if (tmp___0 == 0) { __mptr = (struct list_head const *)sequence->list.next; next = (struct nfs_seqid *)__mptr + 0xfffffffffffffff8UL; rpc_wake_up_queued_task(& sequence->wait, next->task); } else { } spin_unlock(& sequence->lock); return; } } void nfs_free_seqid(struct nfs_seqid *seqid ) { { nfs_release_seqid(seqid); kfree((void const *)seqid); return; } } static void nfs_increment_seqid(int status , struct nfs_seqid *seqid ) { struct ratelimit_state _rs ; int tmp ; { switch (status) { case 0: ; goto ldv_50633; case -10026: ; if ((seqid->sequence)->flags & 1) { return; } else { } _rs.lock.raw_lock.ldv_2024.head_tail = 0U; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = 0xffffffffffffffffUL; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp = ___ratelimit(& _rs, "nfs_increment_seqid"); if (tmp != 0) { printk("\fNFS: v4 server returned a bad sequence-id error on an unconfirmed sequence %p!\n", seqid->sequence); } else { } case -10022: ; case -10023: ; case -10025: ; case -10036: ; case -10018: ; case -10020: ; return; } ldv_50633: (seqid->sequence)->counter = (seqid->sequence)->counter + 1U; return; } } void nfs_increment_open_seqid(int status , struct nfs_seqid *seqid ) { struct nfs4_state_owner *sp ; struct nfs_seqid_counter const *__mptr ; struct nfs_server *server ; int tmp ; { __mptr = (struct nfs_seqid_counter const *)seqid->sequence; sp = (struct nfs4_state_owner *)__mptr + 0xffffffffffffff58UL; server = sp->so_server; if (status == -10026) { nfs4_drop_state_owner(sp); } else { } tmp = nfs4_has_session((struct nfs_client const *)server->nfs_client); if (tmp == 0) { nfs_increment_seqid(status, seqid); } else { } return; } } void nfs_increment_lock_seqid(int status , struct nfs_seqid *seqid ) { { nfs_increment_seqid(status, seqid); return; } } int nfs_wait_on_sequence(struct nfs_seqid *seqid , struct rpc_task *task ) { struct nfs_seqid_counter *sequence ; int status ; int tmp ; struct list_head const *__mptr ; { sequence = seqid->sequence; status = 0; spin_lock(& sequence->lock); seqid->task = task; tmp = list_empty((struct list_head const *)(& seqid->list)); if (tmp != 0) { list_add_tail(& seqid->list, & sequence->list); } else { } __mptr = (struct list_head const *)sequence->list.next; if ((unsigned long )((struct nfs_seqid *)__mptr + 0xfffffffffffffff8UL) == (unsigned long )seqid) { goto unlock; } else { } rpc_sleep_on(& sequence->wait, task, 0); status = -11; unlock: spin_unlock(& sequence->lock); return (status); } } static int nfs4_run_state_manager(void *ptr ) ; static void nfs4_clear_state_manager_bit(struct nfs_client *clp ) { { __asm__ volatile ("": : : "memory"); clear_bit(0, (unsigned long volatile *)(& clp->cl_state)); __asm__ volatile ("": : : "memory"); wake_up_bit((void *)(& clp->cl_state), 0); rpc_wake_up(& clp->cl_rpcwaitq); return; } } void nfs4_schedule_state_manager(struct nfs_client *clp ) { struct task_struct *task ; char buf[58U] ; int tmp ; char const *tmp___0 ; struct task_struct *__k ; struct task_struct *tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; { tmp = test_and_set_bit(0, (unsigned long volatile *)(& clp->cl_state)); if (tmp != 0) { return; } else { } __module_get(& __this_module); atomic_inc(& clp->cl_count); rcu_read_lock___1(); tmp___0 = rpc_peeraddr2str(clp->cl_rpcclient, 0); snprintf((char *)(& buf), 58UL, "%s-manager", tmp___0); rcu_read_unlock___1(); tmp___1 = kthread_create_on_node(& nfs4_run_state_manager, (void *)clp, -1, (char const *)(& buf)); __k = tmp___1; tmp___2 = IS_ERR((void const *)__k); if (tmp___2 == 0L) { wake_up_process(__k); } else { } task = __k; tmp___4 = IS_ERR((void const *)task); if (tmp___4 != 0L) { tmp___3 = PTR_ERR((void const *)task); printk("\v%s: kthread_run: %ld\n", "nfs4_schedule_state_manager", tmp___3); nfs4_clear_state_manager_bit(clp); nfs_put_client(clp); module_put(& __this_module); } else { } return; } } void nfs4_schedule_lease_recovery(struct nfs_client *clp ) { int tmp ; long tmp___0 ; { if ((unsigned long )clp == (unsigned long )((struct nfs_client *)0)) { return; } else { } tmp = constant_test_bit(2U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp == 0) { set_bit(1U, (unsigned long volatile *)(& clp->cl_state)); } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: scheduling lease recovery for server %s\n", "nfs4_schedule_lease_recovery", clp->cl_hostname); } else { } nfs4_schedule_state_manager(clp); return; } } int nfs4_wait_clnt_recover(struct nfs_client *clp ) { int res ; { __might_sleep("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4state.c.prepared", 1281, 0); res = wait_on_bit((void *)(& clp->cl_state), 0, & nfs_wait_bit_killable, 130U); if (res != 0) { return (res); } else { } if (clp->cl_cons_state < 0) { return (clp->cl_cons_state); } else { } return (0); } } int nfs4_client_recover_expired_lease(struct nfs_client *clp ) { unsigned int loop ; int ret ; int tmp ; int tmp___0 ; { loop = 10U; goto ldv_50699; ldv_50698: ret = nfs4_wait_clnt_recover(clp); if (ret != 0) { goto ldv_50697; } else { } tmp = constant_test_bit(2U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp == 0) { tmp___0 = constant_test_bit(1U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___0 == 0) { goto ldv_50697; } else { } } else { } nfs4_schedule_state_manager(clp); ret = -5; loop = loop - 1U; ldv_50699: ; if (loop != 0U) { goto ldv_50698; } else { } ldv_50697: ; return (ret); } } static void nfs40_handle_cb_pathdown(struct nfs_client *clp ) { long tmp ; { set_bit(2U, (unsigned long volatile *)(& clp->cl_state)); nfs_expire_all_delegations(clp); tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: handling CB_PATHDOWN recovery for server %s\n", "nfs40_handle_cb_pathdown", clp->cl_hostname); } else { } return; } } void nfs4_schedule_path_down_recovery(struct nfs_client *clp ) { { nfs40_handle_cb_pathdown(clp); nfs4_schedule_state_manager(clp); return; } } static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp , struct nfs4_state *state ) { int tmp ; { set_bit(5U, (unsigned long volatile *)(& state->flags)); tmp = constant_test_bit(6U, (unsigned long const volatile *)(& state->flags)); if (tmp != 0) { clear_bit(5, (unsigned long volatile *)(& state->flags)); return (0); } else { } set_bit(0U, (unsigned long volatile *)(& (state->owner)->so_flags)); set_bit(3U, (unsigned long volatile *)(& clp->cl_state)); return (1); } } static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp , struct nfs4_state *state ) { { set_bit(6U, (unsigned long volatile *)(& state->flags)); clear_bit(5, (unsigned long volatile *)(& state->flags)); set_bit(1U, (unsigned long volatile *)(& (state->owner)->so_flags)); set_bit(4U, (unsigned long volatile *)(& clp->cl_state)); return (1); } } void nfs4_schedule_stateid_recovery(struct nfs_server const *server , struct nfs4_state *state ) { struct nfs_client *clp ; long tmp ; { clp = server->nfs_client; nfs4_state_mark_reclaim_nograce(clp, state); tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: scheduling stateid recovery for server %s\n", "nfs4_schedule_stateid_recovery", clp->cl_hostname); } else { } nfs4_schedule_state_manager(clp); return; } } void nfs_inode_find_state_and_recover(struct inode *inode , nfs4_stateid const *stateid ) { struct nfs_client *clp ; struct nfs_server *tmp ; struct nfs_inode *nfsi ; struct nfs_inode *tmp___0 ; struct nfs_open_context *ctx ; struct nfs4_state *state ; bool found ; struct list_head const *__mptr ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; struct list_head const *__mptr___0 ; { tmp = NFS_SERVER((struct inode const *)inode); clp = tmp->nfs_client; tmp___0 = NFS_I((struct inode const *)inode); nfsi = tmp___0; found = 0; spin_lock(& inode->i_lock); __mptr = (struct list_head const *)nfsi->open_files.next; ctx = (struct nfs_open_context *)__mptr + 0xffffffffffffffa0UL; goto ldv_50743; ldv_50742: state = ctx->state; if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { goto ldv_50741; } else { } tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp___1 == 0) { goto ldv_50741; } else { } tmp___2 = nfs4_stateid_match((nfs4_stateid const *)(& state->stateid), stateid); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { goto ldv_50741; } else { } nfs4_state_mark_reclaim_nograce(clp, state); found = 1; ldv_50741: __mptr___0 = (struct list_head const *)ctx->list.next; ctx = (struct nfs_open_context *)__mptr___0 + 0xffffffffffffffa0UL; ldv_50743: ; if ((unsigned long )(& ctx->list) != (unsigned long )(& nfsi->open_files)) { goto ldv_50742; } else { } spin_unlock(& inode->i_lock); if ((int )found) { nfs4_schedule_state_manager(clp); } else { } return; } } static int nfs4_reclaim_locks(struct nfs4_state *state , struct nfs4_state_recovery_ops const *ops ) { struct inode *inode ; struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct file_lock *fl ; int status ; struct nfs_open_context *tmp___0 ; { inode = state->inode; tmp = NFS_I((struct inode const *)inode); nfsi = tmp; status = 0; if ((unsigned long )inode->i_flock == (unsigned long )((struct file_lock *)0)) { return (0); } else { } down_write(& nfsi->rwsem); lock_flocks(); fl = inode->i_flock; goto ldv_50775; ldv_50774: ; if ((fl->fl_flags & 3U) == 0U) { goto ldv_50753; } else { } tmp___0 = nfs_file_open_context(fl->fl_file); if ((unsigned long )tmp___0->state != (unsigned long )state) { goto ldv_50753; } else { } unlock_flocks(); status = (*(ops->recover_lock))(state, fl); switch (status) { case 0: ; goto ldv_50755; case -116: ; case -10047: ; case -10023: ; case -10025: ; case -10011: ; case -10033: ; case -10022: ; case -10052: ; case -10053: ; case -10077: ; case -10055: ; goto out; default: printk("\vNFS: %s: unhandled error %d. Zeroing state\n", "nfs4_reclaim_locks", status); case -12: ; case -10010: ; case -10034: ; case -10035: status = 0; } ldv_50755: lock_flocks(); ldv_50753: fl = fl->fl_next; ldv_50775: ; if ((unsigned long )fl != (unsigned long )((struct file_lock *)0)) { goto ldv_50774; } else { } unlock_flocks(); out: up_write(& nfsi->rwsem); return (status); } } static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp , struct nfs4_state_recovery_ops const *ops ) { struct nfs4_state *state ; struct nfs4_lock_state *lock ; int status ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; struct ratelimit_state _rs ; int tmp___0 ; int tmp___1 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; { status = 0; restart: spin_lock(& sp->so_lock); __mptr = (struct list_head const *)sp->so_states.next; state = (struct nfs4_state *)__mptr; goto ldv_50819; ldv_50818: tmp = test_and_clear_bit(ops->state_flag_bit, (unsigned long volatile *)(& state->flags)); if (tmp == 0) { goto ldv_50789; } else { } if (state->state == 0U) { goto ldv_50789; } else { } atomic_inc(& state->count); spin_unlock(& sp->so_lock); status = (*(ops->recover_open))(sp, state); if (status >= 0) { status = nfs4_reclaim_locks(state, ops); if (status >= 0) { spin_lock(& state->state_lock); __mptr___0 = (struct list_head const *)state->lock_states.next; lock = (struct nfs4_lock_state *)__mptr___0; goto ldv_50798; ldv_50797: tmp___1 = constant_test_bit(0U, (unsigned long const volatile *)(& lock->ls_flags)); if (tmp___1 == 0) { _rs.lock.raw_lock.ldv_2024.head_tail = 0U; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = 0xffffffffffffffffUL; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___0 = ___ratelimit(& _rs, "nfs4_reclaim_open_state"); if (tmp___0 != 0) { printk("\fNFS: %s: Lock reclaim failed!\n", "nfs4_reclaim_open_state"); } else { } } else { } __mptr___1 = (struct list_head const *)lock->ls_locks.next; lock = (struct nfs4_lock_state *)__mptr___1; ldv_50798: ; if ((unsigned long )(& lock->ls_locks) != (unsigned long )(& state->lock_states)) { goto ldv_50797; } else { } spin_unlock(& state->state_lock); nfs4_put_open_state(state); goto restart; } else { } } else { } switch (status) { default: printk("\vNFS: %s: unhandled error %d. Zeroing state\n", "nfs4_reclaim_open_state", status); case -2: ; case -12: ; case -116: memset((void *)(& state->stateid), 0, 16UL); state->state = 0U; goto ldv_50804; case -10047: ; case -10023: ; case -10025: ; case -10034: ; case -10035: nfs4_state_mark_reclaim_nograce((sp->so_server)->nfs_client, state); goto ldv_50804; case -10011: ; case -10033: nfs4_state_mark_reclaim_nograce((sp->so_server)->nfs_client, state); case -10022: ; case -10052: ; case -10053: ; case -10077: ; case -10055: ; goto out_err; } ldv_50804: nfs4_put_open_state(state); goto restart; ldv_50789: __mptr___2 = (struct list_head const *)state->open_states.next; state = (struct nfs4_state *)__mptr___2; ldv_50819: ; if ((unsigned long )(& state->open_states) != (unsigned long )(& sp->so_states)) { goto ldv_50818; } else { } spin_unlock(& sp->so_lock); return (0); out_err: nfs4_put_open_state(state); return (status); } } static void nfs4_clear_open_state(struct nfs4_state *state ) { struct nfs4_lock_state *lock ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { clear_bit(1, (unsigned long volatile *)(& state->flags)); clear_bit(2, (unsigned long volatile *)(& state->flags)); clear_bit(3, (unsigned long volatile *)(& state->flags)); clear_bit(4, (unsigned long volatile *)(& state->flags)); spin_lock(& state->state_lock); __mptr = (struct list_head const *)state->lock_states.next; lock = (struct nfs4_lock_state *)__mptr; goto ldv_50830; ldv_50829: lock->ls_seqid.flags = 0; clear_bit(0, (unsigned long volatile *)(& lock->ls_flags)); __mptr___0 = (struct list_head const *)lock->ls_locks.next; lock = (struct nfs4_lock_state *)__mptr___0; ldv_50830: ; if ((unsigned long )(& lock->ls_locks) != (unsigned long )(& state->lock_states)) { goto ldv_50829; } else { } spin_unlock(& state->state_lock); return; } } static void nfs4_reset_seqids(struct nfs_server *server , int (*mark_reclaim)(struct nfs_client * , struct nfs4_state * ) ) { struct nfs_client *clp ; struct nfs4_state_owner *sp ; struct rb_node *pos ; struct nfs4_state *state ; struct rb_node const *__mptr ; struct list_head const *__mptr___0 ; int tmp ; struct list_head const *__mptr___1 ; { clp = server->nfs_client; spin_lock(& clp->cl_lock); pos = rb_first((struct rb_root const *)(& server->state_owners)); goto ldv_50852; ldv_50851: __mptr = (struct rb_node const *)pos; sp = (struct nfs4_state_owner *)__mptr + 0xffffffffffffffe0UL; sp->so_seqid.flags = 0; spin_lock(& sp->so_lock); __mptr___0 = (struct list_head const *)sp->so_states.next; state = (struct nfs4_state *)__mptr___0; goto ldv_50849; ldv_50848: tmp = (*mark_reclaim)(clp, state); if (tmp != 0) { nfs4_clear_open_state(state); } else { } __mptr___1 = (struct list_head const *)state->open_states.next; state = (struct nfs4_state *)__mptr___1; ldv_50849: ; if ((unsigned long )(& state->open_states) != (unsigned long )(& sp->so_states)) { goto ldv_50848; } else { } spin_unlock(& sp->so_lock); pos = rb_next((struct rb_node const *)pos); ldv_50852: ; if ((unsigned long )pos != (unsigned long )((struct rb_node *)0)) { goto ldv_50851; } else { } spin_unlock(& clp->cl_lock); return; } } static void nfs4_state_mark_reclaim_helper(struct nfs_client *clp , int (*mark_reclaim)(struct nfs_client * , struct nfs4_state * ) ) { struct nfs_server *server ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { rcu_read_lock___1(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_50876; ldv_50875: nfs4_reset_seqids(server, mark_reclaim); __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_50876: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_50875; } else { } rcu_read_unlock___1(); return; } } static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp ) { { nfs_delegation_mark_reclaim(clp); nfs4_state_mark_reclaim_helper(clp, & nfs4_state_mark_reclaim_reboot); return; } } static void nfs4_reclaim_complete(struct nfs_client *clp , struct nfs4_state_recovery_ops const *ops ) { { if ((unsigned long )ops->reclaim_complete != (unsigned long )((int (*/* const */)(struct nfs_client * ))0)) { (*(ops->reclaim_complete))(clp); } else { } return; } } static void nfs4_clear_reclaim_server(struct nfs_server *server ) { struct nfs_client *clp ; struct nfs4_state_owner *sp ; struct rb_node *pos ; struct nfs4_state *state ; struct rb_node const *__mptr ; struct list_head const *__mptr___0 ; int tmp ; struct list_head const *__mptr___1 ; { clp = server->nfs_client; spin_lock(& clp->cl_lock); pos = rb_first((struct rb_root const *)(& server->state_owners)); goto ldv_50903; ldv_50902: __mptr = (struct rb_node const *)pos; sp = (struct nfs4_state_owner *)__mptr + 0xffffffffffffffe0UL; spin_lock(& sp->so_lock); __mptr___0 = (struct list_head const *)sp->so_states.next; state = (struct nfs4_state *)__mptr___0; goto ldv_50900; ldv_50899: tmp = test_and_clear_bit(5, (unsigned long volatile *)(& state->flags)); if (tmp == 0) { goto ldv_50898; } else { } nfs4_state_mark_reclaim_nograce(clp, state); ldv_50898: __mptr___1 = (struct list_head const *)state->open_states.next; state = (struct nfs4_state *)__mptr___1; ldv_50900: ; if ((unsigned long )(& state->open_states) != (unsigned long )(& sp->so_states)) { goto ldv_50899; } else { } spin_unlock(& sp->so_lock); pos = rb_next((struct rb_node const *)pos); ldv_50903: ; if ((unsigned long )pos != (unsigned long )((struct rb_node *)0)) { goto ldv_50902; } else { } spin_unlock(& clp->cl_lock); return; } } static int nfs4_state_clear_reclaim_reboot(struct nfs_client *clp ) { struct nfs_server *server ; int tmp ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp___0 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___1 ; { tmp = test_and_clear_bit(3, (unsigned long volatile *)(& clp->cl_state)); if (tmp == 0) { return (0); } else { } rcu_read_lock___1(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_50924; ldv_50923: nfs4_clear_reclaim_server(server); __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_50924: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_50923; } else { } rcu_read_unlock___1(); nfs_delegation_reap_unclaimed(clp); return (1); } } static void nfs4_state_end_reclaim_reboot(struct nfs_client *clp ) { int tmp ; { tmp = nfs4_state_clear_reclaim_reboot(clp); if (tmp == 0) { return; } else { } nfs4_reclaim_complete(clp, (clp->cl_mvops)->reboot_recovery_ops); return; } } static void nfs_delegation_clear_all(struct nfs_client *clp ) { { nfs_delegation_mark_reclaim(clp); nfs_delegation_reap_unclaimed(clp); return; } } static void nfs4_state_start_reclaim_nograce(struct nfs_client *clp ) { { nfs_delegation_clear_all(clp); nfs4_state_mark_reclaim_helper(clp, & nfs4_state_mark_reclaim_nograce); return; } } static int nfs4_recovery_handle_error(struct nfs_client *clp , int error ) { long tmp ; long tmp___0 ; { switch (error) { case 0: ; goto ldv_50940; case -10048: nfs40_handle_cb_pathdown(clp); goto ldv_50940; case -10033: nfs4_state_end_reclaim_reboot(clp); goto ldv_50940; case -10022: ; case -10031: set_bit(2U, (unsigned long volatile *)(& clp->cl_state)); nfs4_state_clear_reclaim_reboot(clp); nfs4_state_start_reclaim_reboot(clp); goto ldv_50940; case -10011: set_bit(2U, (unsigned long volatile *)(& clp->cl_state)); nfs4_state_start_reclaim_nograce(clp); goto ldv_50940; case -10052: ; case -10053: ; case -10077: ; case -10078: ; case -10076: ; case -10063: set_bit(6U, (unsigned long volatile *)(& clp->cl_state)); goto ldv_50940; case -10055: set_bit(10U, (unsigned long volatile *)(& clp->cl_state)); goto ldv_50940; default: tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: failed to handle error %d for server %s\n", "nfs4_recovery_handle_error", error, clp->cl_hostname); } else { } return (error); } ldv_50940: tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: handled error %d for server %s\n", "nfs4_recovery_handle_error", error, clp->cl_hostname); } else { } return (0); } } static int nfs4_do_reclaim(struct nfs_client *clp , struct nfs4_state_recovery_ops const *ops ) { struct nfs4_state_owner *sp ; struct nfs_server *server ; struct rb_node *pos ; int status ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct rb_node const *__mptr___0 ; int tmp___0 ; int tmp___1 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___1 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___2 ; { status = 0; restart: rcu_read_lock___1(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_50985; ldv_50984: nfs4_purge_state_owners(server); spin_lock(& clp->cl_lock); pos = rb_first((struct rb_root const *)(& server->state_owners)); goto ldv_50982; ldv_50981: __mptr___0 = (struct rb_node const *)pos; sp = (struct nfs4_state_owner *)__mptr___0 + 0xffffffffffffffe0UL; tmp___0 = test_and_clear_bit(ops->owner_flag_bit, (unsigned long volatile *)(& sp->so_flags)); if (tmp___0 == 0) { goto ldv_50980; } else { } atomic_inc(& sp->so_count); spin_unlock(& clp->cl_lock); rcu_read_unlock___1(); status = nfs4_reclaim_open_state(sp, ops); if (status < 0) { set_bit((unsigned int )ops->owner_flag_bit, (unsigned long volatile *)(& sp->so_flags)); nfs4_put_state_owner(sp); tmp___1 = nfs4_recovery_handle_error(clp, status); return (tmp___1); } else { } nfs4_put_state_owner(sp); goto restart; ldv_50980: pos = rb_next((struct rb_node const *)pos); ldv_50982: ; if ((unsigned long )pos != (unsigned long )((struct rb_node *)0)) { goto ldv_50981; } else { } spin_unlock(& clp->cl_lock); __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___1 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___1 + 0xfffffffffffffff8UL; ldv_50985: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_50984; } else { } rcu_read_unlock___1(); return (status); } } static int nfs4_check_lease(struct nfs_client *clp ) { struct rpc_cred *cred ; struct nfs4_state_maintenance_ops const *ops ; int status ; int tmp ; int tmp___0 ; { ops = (clp->cl_mvops)->state_renewal_ops; tmp = constant_test_bit(2U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp != 0) { return (0); } else { } spin_lock(& clp->cl_lock); cred = (*(ops->get_state_renewal_cred_locked))(clp); spin_unlock(& clp->cl_lock); if ((unsigned long )cred == (unsigned long )((struct rpc_cred *)0)) { cred = nfs4_get_setclientid_cred(clp); status = -126; if ((unsigned long )cred == (unsigned long )((struct rpc_cred *)0)) { goto out; } else { } } else { } status = (*(ops->renew_lease))(clp, cred); put_rpccred(cred); out: tmp___0 = nfs4_recovery_handle_error(clp, status); return (tmp___0); } } static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp , int status ) { int tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { switch (status) { case -10063: tmp = test_and_set_bit(9, (unsigned long volatile *)(& clp->cl_state)); if (tmp != 0) { return (-526); } else { } ssleep(1U); clear_bit(7, (unsigned long volatile *)(& clp->cl_state)); goto ldv_50999; case -10022: clear_bit(7, (unsigned long volatile *)(& clp->cl_state)); nfs4_state_clear_reclaim_reboot(clp); nfs4_state_start_reclaim_reboot(clp); goto ldv_50999; case -10017: printk("\vNFS: Server %s reports our clientid is in use\n", clp->cl_hostname); nfs_mark_client_ready(clp, -1); clear_bit(7, (unsigned long volatile *)(& clp->cl_state)); return (-1); case -13: ; if ((unsigned long )clp->cl_machine_cred == (unsigned long )((struct rpc_cred *)0)) { return (-13); } else { } nfs4_clear_machine_cred(clp); case -10008: ; case -110: ; case -11: ssleep(1U); goto ldv_50999; case -10021: ; if (clp->cl_cons_state == 2) { nfs_mark_client_ready(clp, -93); } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: exit with error %d for server %s\n", "nfs4_handle_reclaim_lease_error", -93, clp->cl_hostname); } else { } return (-93); case -10027: ; default: tmp___1 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: exit with error %d for server %s\n", "nfs4_handle_reclaim_lease_error", status, clp->cl_hostname); } else { } return (status); } ldv_50999: set_bit(2U, (unsigned long volatile *)(& clp->cl_state)); tmp___2 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: handled error %d for server %s\n", "nfs4_handle_reclaim_lease_error", status, clp->cl_hostname); } else { } return (0); } } static int nfs4_establish_lease(struct nfs_client *clp ) { struct rpc_cred *cred ; struct nfs4_state_recovery_ops const *ops ; int status ; { ops = (clp->cl_mvops)->reboot_recovery_ops; cred = (*(ops->get_clid_cred))(clp); if ((unsigned long )cred == (unsigned long )((struct rpc_cred *)0)) { return (-2); } else { } status = (*(ops->establish_clid))(clp, cred); put_rpccred(cred); if (status != 0) { return (status); } else { } pnfs_destroy_all_layouts(clp); return (0); } } static int nfs4_reclaim_lease(struct nfs_client *clp ) { int status ; int tmp ; int tmp___0 ; int tmp___1 ; { status = nfs4_establish_lease(clp); if (status < 0) { tmp = nfs4_handle_reclaim_lease_error(clp, status); return (tmp); } else { } tmp___0 = test_and_clear_bit(8, (unsigned long volatile *)(& clp->cl_state)); if (tmp___0 != 0) { nfs4_state_start_reclaim_nograce(clp); } else { } tmp___1 = constant_test_bit(4U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___1 == 0) { set_bit(3U, (unsigned long volatile *)(& clp->cl_state)); } else { } clear_bit(1, (unsigned long volatile *)(& clp->cl_state)); clear_bit(2, (unsigned long volatile *)(& clp->cl_state)); return (0); } } static int nfs4_purge_lease(struct nfs_client *clp ) { int status ; int tmp ; { status = nfs4_establish_lease(clp); if (status < 0) { tmp = nfs4_handle_reclaim_lease_error(clp, status); return (tmp); } else { } clear_bit(9, (unsigned long volatile *)(& clp->cl_state)); set_bit(2U, (unsigned long volatile *)(& clp->cl_state)); nfs4_state_start_reclaim_nograce(clp); return (0); } } int nfs4_discover_server_trunking(struct nfs_client *clp , struct nfs_client **result ) { struct nfs4_state_recovery_ops const *ops ; rpc_authflavor_t *flavors ; rpc_authflavor_t flav ; rpc_authflavor_t save ; struct rpc_clnt *clnt ; struct rpc_cred *cred ; int i ; int len ; int status ; long tmp ; void *tmp___0 ; long tmp___1 ; int tmp___2 ; int tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; { ops = (clp->cl_mvops)->reboot_recovery_ops; tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS: %s: testing \'%s\'\n", "nfs4_discover_server_trunking", clp->cl_hostname); } else { } len = 12; tmp___0 = kcalloc((size_t )len, 4UL, 208U); flavors = (rpc_authflavor_t *)tmp___0; if ((unsigned long )flavors == (unsigned long )((rpc_authflavor_t *)0)) { status = -12; goto out; } else { } len = rpcauth_list_flavors(flavors, len); if (len < 0) { status = len; goto out_free; } else { } clnt = clp->cl_rpcclient; save = (clnt->cl_auth)->au_flavor; i = 0; ldv_mutex_lock_36(& nfs_clid_init_mutex); status = -2; again: cred = (*(ops->get_clid_cred))(clp); if ((unsigned long )cred == (unsigned long )((struct rpc_cred *)0)) { goto out_unlock; } else { } status = (*(ops->detect_trunking))(clp, result, cred); put_rpccred(cred); switch (status) { case 0: ; goto ldv_51043; case -13: ; if ((unsigned long )clp->cl_machine_cred == (unsigned long )((struct rpc_cred *)0)) { goto ldv_51043; } else { } nfs4_clear_machine_cred(clp); case -10008: ; case -110: ; case -11: ssleep(1U); tmp___1 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS: %s after status %d, retrying\n", "nfs4_discover_server_trunking", status); } else { } goto again; case -10017: ; case -10016: status = -1; if (i >= len) { goto ldv_51043; } else { } tmp___2 = i; i = i + 1; flav = *(flavors + (unsigned long )tmp___2); if (flav == save) { tmp___3 = i; i = i + 1; flav = *(flavors + (unsigned long )tmp___3); } else { } clnt = rpc_clone_client_set_auth(clnt, flav); tmp___5 = IS_ERR((void const *)clnt); if (tmp___5 != 0L) { tmp___4 = PTR_ERR((void const *)clnt); status = (int )tmp___4; goto ldv_51043; } else { } clp->cl_rpcclient = clnt; goto again; case -10021: status = -93; goto ldv_51043; case -127: ; case -10027: status = -127; } ldv_51043: ; out_unlock: ldv_mutex_unlock_37(& nfs_clid_init_mutex); out_free: kfree((void const *)flavors); out: tmp___6 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___6 != 0L) { printk("\001dNFS: %s: status = %d\n", "nfs4_discover_server_trunking", status); } else { } return (status); } } void nfs4_schedule_session_recovery(struct nfs4_session *session , int err ) { struct nfs_client *clp ; { clp = session->clp; switch (err) { default: set_bit(6U, (unsigned long volatile *)(& clp->cl_state)); goto ldv_51059; case -10055: set_bit(10U, (unsigned long volatile *)(& clp->cl_state)); } ldv_51059: nfs4_schedule_lease_recovery(clp); return; } } static void nfs41_ping_server(struct nfs_client *clp ) { { set_bit(1U, (unsigned long volatile *)(& clp->cl_state)); nfs4_schedule_state_manager(clp); return; } } void nfs41_server_notify_target_slotid_update(struct nfs_client *clp ) { { nfs41_ping_server(clp); return; } } void nfs41_server_notify_highest_slotid_update(struct nfs_client *clp ) { { nfs41_ping_server(clp); return; } } static void nfs4_reset_all_state(struct nfs_client *clp ) { long tmp ; int tmp___0 ; { tmp___0 = test_and_set_bit(2, (unsigned long volatile *)(& clp->cl_state)); if (tmp___0 == 0) { set_bit(9U, (unsigned long volatile *)(& clp->cl_state)); clear_bit(7, (unsigned long volatile *)(& clp->cl_state)); nfs4_state_start_reclaim_nograce(clp); tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: scheduling reset of all state for server %s!\n", "nfs4_reset_all_state", clp->cl_hostname); } else { } nfs4_schedule_state_manager(clp); } else { } return; } } static void nfs41_handle_server_reboot(struct nfs_client *clp ) { long tmp ; int tmp___0 ; { tmp___0 = test_and_set_bit(2, (unsigned long volatile *)(& clp->cl_state)); if (tmp___0 == 0) { nfs4_state_start_reclaim_reboot(clp); tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: server %s rebooted!\n", "nfs41_handle_server_reboot", clp->cl_hostname); } else { } nfs4_schedule_state_manager(clp); } else { } return; } } static void nfs41_handle_state_revoked(struct nfs_client *clp ) { long tmp ; { nfs4_reset_all_state(clp); tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: state revoked on server %s\n", "nfs41_handle_state_revoked", clp->cl_hostname); } else { } return; } } static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp ) { long tmp ; { nfs_expire_all_delegations(clp); tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: Recallable state revoked on server %s!\n", "nfs41_handle_recallable_state_revoked", clp->cl_hostname); } else { } return; } } static void nfs41_handle_backchannel_fault(struct nfs_client *clp ) { int tmp ; long tmp___0 ; { nfs_expire_all_delegations(clp); tmp = test_and_set_bit(6, (unsigned long volatile *)(& clp->cl_state)); if (tmp == 0) { nfs4_schedule_state_manager(clp); } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: server %s declared a backchannel fault\n", "nfs41_handle_backchannel_fault", clp->cl_hostname); } else { } return; } } static void nfs41_handle_cb_path_down(struct nfs_client *clp ) { int tmp ; { tmp = test_and_set_bit(10, (unsigned long volatile *)(& clp->cl_state)); if (tmp == 0) { nfs4_schedule_state_manager(clp); } else { } return; } } void nfs41_handle_sequence_flag_errors(struct nfs_client *clp , u32 flags ) { long tmp ; { if (flags == 0U) { return; } else { } tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: \"%s\" (client ID %llx) flags=0x%08x\n", "nfs41_handle_sequence_flag_errors", clp->cl_hostname, clp->cl_clientid, flags); } else { } if ((flags & 256U) != 0U) { nfs41_handle_server_reboot(clp); } else { } if ((flags & 184U) != 0U) { nfs41_handle_state_revoked(clp); } else { } if ((flags & 64U) != 0U) { nfs41_handle_recallable_state_revoked(clp); } else { } if ((flags & 1024U) != 0U) { nfs41_handle_backchannel_fault(clp); } else if ((flags & 513U) != 0U) { nfs41_handle_cb_path_down(clp); } else { } return; } } static int nfs4_reset_session(struct nfs_client *clp ) { struct rpc_cred *cred ; int status ; int tmp ; long tmp___0 ; long tmp___1 ; { tmp = nfs4_has_session((struct nfs_client const *)clp); if (tmp == 0) { return (0); } else { } nfs4_begin_drain_session(clp); cred = nfs4_get_exchange_id_cred(clp); status = nfs4_proc_destroy_session(clp->cl_session, cred); if ((status != 0 && status != -10052) && status != -10078) { status = nfs4_recovery_handle_error(clp, status); goto out; } else { } memset((void *)(& (clp->cl_session)->sess_id.data), 0, 16UL); status = nfs4_proc_create_session(clp, cred); if (status != 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: session reset failed with status %d for server %s!\n", "nfs4_reset_session", status, clp->cl_hostname); } else { } status = nfs4_handle_reclaim_lease_error(clp, status); goto out; } else { } nfs41_finish_session_reset(clp); tmp___1 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: session reset was successful for server %s!\n", "nfs4_reset_session", clp->cl_hostname); } else { } out: ; if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { put_rpccred(cred); } else { } return (status); } } static int nfs4_bind_conn_to_session(struct nfs_client *clp ) { struct rpc_cred *cred ; int ret ; int tmp ; long tmp___0 ; int tmp___1 ; { tmp = nfs4_has_session((struct nfs_client const *)clp); if (tmp == 0) { return (0); } else { } nfs4_begin_drain_session(clp); cred = nfs4_get_exchange_id_cred(clp); ret = nfs4_proc_bind_conn_to_session(clp, cred); if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { put_rpccred(cred); } else { } clear_bit(10, (unsigned long volatile *)(& clp->cl_state)); switch (ret) { case 0: tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: bind_conn_to_session was successful for server %s!\n", "nfs4_bind_conn_to_session", clp->cl_hostname); } else { } goto ldv_51119; case -10008: ssleep(1U); set_bit(10U, (unsigned long volatile *)(& clp->cl_state)); goto ldv_51119; default: tmp___1 = nfs4_recovery_handle_error(clp, ret); return (tmp___1); } ldv_51119: ; return (0); } } static void nfs4_state_manager(struct nfs_client *clp ) { int status ; char const *section ; char const *section_sep ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; size_t tmp___16 ; struct ratelimit_state _rs ; int tmp___17 ; { status = 0; section = ""; section_sep = ""; ldv_51131: tmp = constant_test_bit(9U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp != 0) { section = "purge state"; status = nfs4_purge_lease(clp); if (status < 0) { goto out_error; } else { } goto ldv_51129; } else { } tmp___0 = constant_test_bit(2U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___0 != 0) { section = "lease expired"; status = nfs4_reclaim_lease(clp); if (status < 0) { goto out_error; } else { } goto ldv_51129; } else { } tmp___2 = test_and_clear_bit(6, (unsigned long volatile *)(& clp->cl_state)); if (tmp___2 != 0) { section = "reset session"; status = nfs4_reset_session(clp); tmp___1 = constant_test_bit(2U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___1 != 0) { goto ldv_51129; } else { } if (status < 0) { goto out_error; } else { } } else { } tmp___3 = test_and_clear_bit(10, (unsigned long volatile *)(& clp->cl_state)); if (tmp___3 != 0) { section = "bind conn to session"; status = nfs4_bind_conn_to_session(clp); if (status < 0) { goto out_error; } else { } goto ldv_51129; } else { } tmp___4 = test_and_clear_bit(1, (unsigned long volatile *)(& clp->cl_state)); if (tmp___4 != 0) { section = "check lease"; status = nfs4_check_lease(clp); if (status < 0) { goto out_error; } else { } goto ldv_51129; } else { } tmp___8 = constant_test_bit(3U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___8 != 0) { section = "reclaim reboot"; status = nfs4_do_reclaim(clp, (clp->cl_mvops)->reboot_recovery_ops); tmp___5 = constant_test_bit(2U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___5 != 0) { goto ldv_51129; } else { tmp___6 = constant_test_bit(6U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___6 != 0) { goto ldv_51129; } else { } } nfs4_state_end_reclaim_reboot(clp); tmp___7 = constant_test_bit(4U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___7 != 0) { goto ldv_51129; } else { } if (status < 0) { goto out_error; } else { } } else { } tmp___12 = test_and_clear_bit(4, (unsigned long volatile *)(& clp->cl_state)); if (tmp___12 != 0) { section = "reclaim nograce"; status = nfs4_do_reclaim(clp, (clp->cl_mvops)->nograce_recovery_ops); tmp___9 = constant_test_bit(2U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___9 != 0) { goto ldv_51129; } else { tmp___10 = constant_test_bit(6U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___10 != 0) { goto ldv_51129; } else { tmp___11 = constant_test_bit(3U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___11 != 0) { goto ldv_51129; } else { } } } if (status < 0) { goto out_error; } else { } } else { } nfs4_end_drain_session(clp); tmp___13 = test_and_clear_bit(5, (unsigned long volatile *)(& clp->cl_state)); if (tmp___13 != 0) { nfs_client_return_marked_delegations(clp); goto ldv_51129; } else { } nfs4_clear_state_manager_bit(clp); if (clp->cl_state == 0UL) { goto ldv_51130; } else { } tmp___14 = test_and_set_bit(0, (unsigned long volatile *)(& clp->cl_state)); if (tmp___14 != 0) { goto ldv_51130; } else { } ldv_51129: tmp___15 = atomic_read((atomic_t const *)(& clp->cl_count)); if (tmp___15 > 1) { goto ldv_51131; } else { } ldv_51130: ; return; out_error: tmp___16 = strlen(section); if (tmp___16 != 0UL) { section_sep = ": "; } else { } _rs.lock.raw_lock.ldv_2024.head_tail = 0U; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = 0xffffffffffffffffUL; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___17 = ___ratelimit(& _rs, "nfs4_state_manager"); if (tmp___17 != 0) { printk("\fNFS: state manager%s%s failed on NFSv4 server %s with error %d\n", section_sep, section, clp->cl_hostname, - status); } else { } ssleep(1U); nfs4_end_drain_session(clp); nfs4_clear_state_manager_bit(clp); return; } } static int nfs4_run_state_manager(void *ptr ) { struct nfs_client *clp ; { clp = (struct nfs_client *)ptr; allow_signal(9); nfs4_state_manager(clp); nfs_put_client(clp); __module_put_and_exit(& __this_module, 0L); return (0); } } void ldv_main2_sequence_infinite_withcheck_stateful(void) { struct file_lock *var_group1 ; int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_51161; ldv_51160: tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_handler_precall(); nfs4_fl_copy_lock(var_group1, var_group1); goto ldv_51157; case 1: ldv_handler_precall(); nfs4_fl_release_lock(var_group1); goto ldv_51157; default: ; goto ldv_51157; } ldv_51157: ; ldv_51161: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_51160; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_29(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_30(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_31(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_32(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_33(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_34(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_35(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_36(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_nfs_clid_init_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_37(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_nfs_clid_init_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_50(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_48(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_51(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_53(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_47(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_49(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_52(struct mutex *ldv_func_arg1 ) ; extern struct workqueue_struct *system_wq ; extern bool mod_delayed_work(struct workqueue_struct * , struct delayed_work * , unsigned long ) ; extern bool cancel_delayed_work_sync(struct delayed_work * ) ; void nfs4_kill_renewd(struct nfs_client *clp ) ; void nfs4_renew_state(struct work_struct *work ) ; void nfs_expire_unreferenced_delegations(struct nfs_client *clp ) ; int nfs_delegations_present(struct nfs_client *clp ) ; void nfs4_renew_state(struct work_struct *work ) { struct nfs4_state_maintenance_ops const *ops ; struct nfs_client *clp ; struct work_struct const *__mptr ; struct rpc_cred *cred ; long lease ; unsigned long last ; unsigned long now ; unsigned int renew_flags ; long tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; { __mptr = (struct work_struct const *)work; clp = (struct nfs_client *)__mptr + 0xfffffffffffffe90UL; renew_flags = 0U; ops = (clp->cl_mvops)->state_renewal_ops; tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: start\n", "nfs4_renew_state"); } else { } tmp___0 = constant_test_bit(4U, (unsigned long const volatile *)(& clp->cl_res_state)); if (tmp___0 != 0) { goto out; } else { } spin_lock(& clp->cl_lock); lease = (long )clp->cl_lease_time; last = clp->cl_last_renewal; now = jiffies; if ((long )((unsigned long )(lease / 3L) + last) - (long )now < 0L) { renew_flags = renew_flags | 1U; } else { } tmp___1 = nfs_delegations_present(clp); if (tmp___1 != 0) { renew_flags = renew_flags | 2U; } else { } if (renew_flags != 0U) { cred = (*(ops->get_state_renewal_cred_locked))(clp); spin_unlock(& clp->cl_lock); if ((unsigned long )cred == (unsigned long )((struct rpc_cred *)0)) { if ((renew_flags & 2U) == 0U) { set_bit(2U, (unsigned long volatile *)(& clp->cl_state)); goto out; } else { } nfs_expire_all_delegations(clp); } else { (*(ops->sched_state_renewal))(clp, cred, renew_flags); put_rpccred(cred); goto out_exp; } } else { tmp___2 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: failed to call renewd. Reason: lease not expired \n", "nfs4_renew_state"); } else { } spin_unlock(& clp->cl_lock); } nfs4_schedule_state_renewal(clp); out_exp: nfs_expire_unreferenced_delegations(clp); out: tmp___3 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: done\n", "nfs4_renew_state"); } else { } return; } } void nfs4_schedule_state_renewal(struct nfs_client *clp ) { long timeout ; long tmp ; { spin_lock(& clp->cl_lock); timeout = (long )(((clp->cl_lease_time * 2UL) / 3UL + clp->cl_last_renewal) - (unsigned long )jiffies); if (timeout <= 1249L) { timeout = 1250L; } else { } tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: requeueing work. Lease period = %ld\n", "nfs4_schedule_state_renewal", (timeout + 249L) / 250L); } else { } mod_delayed_work(system_wq, & clp->cl_renewd, (unsigned long )timeout); set_bit(3U, (unsigned long volatile *)(& clp->cl_res_state)); spin_unlock(& clp->cl_lock); return; } } void nfs4_kill_renewd(struct nfs_client *clp ) { { cancel_delayed_work_sync(& clp->cl_renewd); return; } } void ldv_mutex_lock_47(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_48(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_49(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_50(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_51(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_52(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_53(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } extern char *strchr(char const * , int ) ; int ldv_mutex_trylock_64(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_62(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_65(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_67(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_61(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_63(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_66(struct mutex *ldv_func_arg1 ) ; int nfs_idmap_init(void) ; void nfs_idmap_quit(void) ; extern void truncate_inode_pages(struct address_space * , loff_t ) ; extern struct dentry *mount_subtree(struct vfsmount * , char const * ) ; extern void clear_inode(struct inode * ) ; extern struct nfs_fh *nfs_alloc_fhandle(void) ; __inline static void nfs_free_fhandle(struct nfs_fh const *fh ) { { kfree((void const *)fh); return; } } void nfs_inode_return_delegation_noreclaim(struct inode *inode ) ; extern struct file_system_type nfs4_fs_type ; struct nfs_subversion nfs_v4 ; int nfs4_register_sysctl(void) ; void nfs4_unregister_sysctl(void) ; extern void mntput(struct vfsmount * ) ; extern struct vfsmount *vfs_kern_mount(struct file_system_type * , int , char const * , void * ) ; struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data , struct nfs_fh *mntfh ) ; extern struct inode *nfs_alloc_inode(struct super_block * ) ; extern void nfs_destroy_inode(struct inode * ) ; extern int nfs_write_inode(struct inode * , struct writeback_control * ) ; extern int nfs_drop_inode(struct inode * ) ; extern void nfs_clear_inode(struct inode * ) ; struct file_system_type nfs4_referral_fs_type ; extern int nfs_set_sb_security(struct super_block * , struct dentry * , struct nfs_mount_info * ) ; extern int nfs_clone_sb_security(struct super_block * , struct dentry * , struct nfs_mount_info * ) ; extern struct dentry *nfs_fs_mount_common(struct nfs_server * , int , char const * , struct nfs_mount_info * , struct nfs_subversion * ) ; extern void nfs_kill_super(struct super_block * ) ; extern void nfs_fill_super(struct super_block * , struct nfs_mount_info * ) ; extern void nfs_umount_begin(struct super_block * ) ; extern int nfs_statfs(struct dentry * , struct kstatfs * ) ; extern int nfs_show_options(struct seq_file * , struct dentry * ) ; extern int nfs_show_devname(struct seq_file * , struct dentry * ) ; extern int nfs_show_path(struct seq_file * , struct dentry * ) ; extern int nfs_show_stats(struct seq_file * , struct dentry * ) ; extern void nfs_put_super(struct super_block * ) ; extern int nfs_remount(struct super_block * , int * , char * ) ; void pnfs_destroy_layout(struct nfs_inode *nfsi ) ; int pnfs_layoutcommit_inode(struct inode *inode , bool sync ) ; extern void register_nfs_version(struct nfs_subversion * ) ; extern void unregister_nfs_version(struct nfs_subversion * ) ; static int nfs4_write_inode(struct inode *inode , struct writeback_control *wbc ) ; static void nfs4_evict_inode(struct inode *inode ) ; static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type , int flags , char const *dev_name___0 , void *info ) ; static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type , int flags , char const *dev_name___0 , void *raw_data ) ; static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type , int flags , char const *dev_name___0 , void *raw_data ) ; static struct file_system_type nfs4_remote_fs_type = {"nfs4", 49154, & nfs4_remote_mount, & nfs_kill_super, & __this_module, 0, {0}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}; static struct file_system_type nfs4_remote_referral_fs_type = {"nfs4", 49154, & nfs4_remote_referral_mount, & nfs_kill_super, & __this_module, 0, {0}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}; struct file_system_type nfs4_referral_fs_type = {"nfs4", 49154, & nfs4_referral_mount, & nfs_kill_super, & __this_module, 0, {0}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}; static struct super_operations const nfs4_sops = {& nfs_alloc_inode, & nfs_destroy_inode, 0, & nfs4_write_inode, & nfs_drop_inode, & nfs4_evict_inode, & nfs_put_super, 0, 0, 0, & nfs_statfs, & nfs_remount, & nfs_umount_begin, & nfs_show_options, & nfs_show_devname, & nfs_show_path, & nfs_show_stats, 0, 0, 0, 0, 0}; struct nfs_subversion nfs_v4 = {& __this_module, & nfs4_fs_type, & nfs_version4, & nfs_v4_clientops, & nfs4_sops, (struct xattr_handler const **)(& nfs4_xattr_handlers), {0, 0}}; static int nfs4_write_inode(struct inode *inode , struct writeback_control *wbc ) { int ret ; int tmp ; int status ; bool sync ; struct nfs_inode *tmp___0 ; int tmp___1 ; { tmp = nfs_write_inode(inode, wbc); ret = tmp; if (ret >= 0) { tmp___0 = NFS_I((struct inode const *)inode); tmp___1 = constant_test_bit(9U, (unsigned long const volatile *)(& tmp___0->flags)); if (tmp___1 != 0) { sync = 1; if ((unsigned int )wbc->sync_mode == 0U) { sync = 0; } else { } status = pnfs_layoutcommit_inode(inode, (int )sync); if (status < 0) { return (status); } else { } } else { } } else { } return (ret); } } static void nfs4_evict_inode(struct inode *inode ) { struct nfs_inode *tmp ; { truncate_inode_pages(& inode->i_data, 0LL); clear_inode(inode); pnfs_return_layout(inode); tmp = NFS_I((struct inode const *)inode); pnfs_destroy_layout(tmp); nfs_inode_return_delegation_noreclaim(inode); nfs_clear_inode(inode); return; } } static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type , int flags , char const *dev_name___0 , void *info ) { struct nfs_mount_info *mount_info ; struct nfs_server *server ; struct dentry *mntroot ; void *tmp ; void *tmp___0 ; long tmp___1 ; { mount_info = (struct nfs_mount_info *)info; tmp = ERR_PTR(-12L); mntroot = (struct dentry *)tmp; mount_info->set_security = & nfs_set_sb_security; server = nfs4_create_server(mount_info, & nfs_v4); tmp___1 = IS_ERR((void const *)server); if (tmp___1 != 0L) { tmp___0 = ERR_CAST((void const *)server); mntroot = (struct dentry *)tmp___0; goto out; } else { } mntroot = nfs_fs_mount_common(server, flags, dev_name___0, mount_info, & nfs_v4); out: ; return (mntroot); } } static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type , int flags , void *data , char const *hostname ) { struct vfsmount *root_mnt ; char *root_devname ; size_t len ; size_t tmp ; void *tmp___0 ; void *tmp___1 ; char *tmp___2 ; { tmp = strlen(hostname); len = tmp + 5UL; tmp___0 = kmalloc(len, 208U); root_devname = (char *)tmp___0; if ((unsigned long )root_devname == (unsigned long )((char *)0)) { tmp___1 = ERR_PTR(-12L); return ((struct vfsmount *)tmp___1); } else { } tmp___2 = strchr(hostname, 58); if ((unsigned long )tmp___2 != (unsigned long )((char *)0)) { snprintf(root_devname, len, "[%s]:/", hostname); } else { snprintf(root_devname, len, "%s:/", hostname); } root_mnt = vfs_kern_mount(fs_type, flags, (char const *)root_devname, data); kfree((void const *)root_devname); return (root_mnt); } } static struct list_head nfs_referral_count_list = {& nfs_referral_count_list, & nfs_referral_count_list}; static spinlock_t nfs_referral_count_list_lock = {{{{{0U}}, 3735899821U, 4294967295U, 0xffffffffffffffffUL, {0, {0, 0}, "nfs_referral_count_list_lock", 0, 0UL}}}}; static struct nfs_referral_count *nfs_find_referral_count(void) { struct nfs_referral_count *p ; struct list_head const *__mptr ; struct task_struct *tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)nfs_referral_count_list.next; p = (struct nfs_referral_count *)__mptr; goto ldv_49443; ldv_49442: tmp = get_current(); if ((unsigned long )p->task == (unsigned long )((struct task_struct const *)tmp)) { return (p); } else { } __mptr___0 = (struct list_head const *)p->list.next; p = (struct nfs_referral_count *)__mptr___0; ldv_49443: ; if ((unsigned long )(& p->list) != (unsigned long )(& nfs_referral_count_list)) { goto ldv_49442; } else { } return (0); } } static int nfs_referral_loop_protect(void) { struct nfs_referral_count *p ; struct nfs_referral_count *new ; int ret ; void *tmp ; struct task_struct *tmp___0 ; { ret = -12; tmp = kmalloc(32UL, 208U); new = (struct nfs_referral_count *)tmp; if ((unsigned long )new == (unsigned long )((struct nfs_referral_count *)0)) { goto out; } else { } tmp___0 = get_current(); new->task = (struct task_struct const *)tmp___0; new->referral_count = 1U; ret = 0; spin_lock(& nfs_referral_count_list_lock); p = nfs_find_referral_count(); if ((unsigned long )p != (unsigned long )((struct nfs_referral_count *)0)) { if (p->referral_count > 1U) { ret = -40; } else { p->referral_count = p->referral_count + 1U; } } else { list_add(& new->list, & nfs_referral_count_list); new = 0; } spin_unlock(& nfs_referral_count_list_lock); kfree((void const *)new); out: ; return (ret); } } static void nfs_referral_loop_unprotect(void) { struct nfs_referral_count *p ; { spin_lock(& nfs_referral_count_list_lock); p = nfs_find_referral_count(); p->referral_count = p->referral_count - 1U; if (p->referral_count == 0U) { list_del(& p->list); } else { p = 0; } spin_unlock(& nfs_referral_count_list_lock); kfree((void const *)p); return; } } static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt , char const *export_path ) { struct dentry *dentry ; int err ; void *tmp ; long tmp___0 ; void *tmp___1 ; { tmp___0 = IS_ERR((void const *)root_mnt); if (tmp___0 != 0L) { tmp = ERR_CAST((void const *)root_mnt); return ((struct dentry *)tmp); } else { } err = nfs_referral_loop_protect(); if (err != 0) { mntput(root_mnt); tmp___1 = ERR_PTR((long )err); return ((struct dentry *)tmp___1); } else { } dentry = mount_subtree(root_mnt, export_path); nfs_referral_loop_unprotect(); return (dentry); } } struct dentry *nfs4_try_mount(int flags , char const *dev_name___0 , struct nfs_mount_info *mount_info , struct nfs_subversion *nfs_mod ) { char *export_path ; struct vfsmount *root_mnt ; struct dentry *res ; struct nfs_parsed_mount_data *data ; long tmp ; long tmp___0 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; { data = mount_info->parsed; tmp = ldv__builtin_expect((nfs_debug & 1024U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> nfs4_try_mount()\n"); } else { } export_path = data->nfs_server.export_path; data->nfs_server.export_path = (char *)"/"; root_mnt = nfs_do_root_mount(& nfs4_remote_fs_type, flags, (void *)mount_info, (char const *)data->nfs_server.hostname); data->nfs_server.export_path = export_path; res = nfs_follow_remote_path(root_mnt, (char const *)export_path); tmp___5 = ldv__builtin_expect((nfs_debug & 1024U) != 0U, 0L); if (tmp___5 != 0L) { tmp___0 = IS_ERR((void const *)res); tmp___4 = IS_ERR((void const *)res); if (tmp___4 != 0L) { tmp___2 = PTR_ERR((void const *)res); tmp___3 = tmp___2; } else { tmp___3 = 0L; } printk("\001d<-- nfs4_try_mount() = %ld%s\n", tmp___3, tmp___0 != 0L ? (char *)" [error]" : (char *)""); } else { } return (res); } } static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type , int flags , char const *dev_name___0 , void *raw_data ) { struct nfs_mount_info mount_info ; struct nfs_server *server ; struct dentry *mntroot ; void *tmp ; long tmp___0 ; void *tmp___1 ; long tmp___2 ; { mount_info.fill_super = & nfs_fill_super; mount_info.set_security = & nfs_clone_sb_security; mount_info.parsed = 0; mount_info.cloned = (struct nfs_clone_mount *)raw_data; mount_info.mntfh = 0; tmp = ERR_PTR(-12L); mntroot = (struct dentry *)tmp; tmp___0 = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp___0 != 0L) { printk("\001d--> nfs4_referral_get_sb()\n"); } else { } mount_info.mntfh = nfs_alloc_fhandle(); if ((unsigned long )mount_info.cloned == (unsigned long )((struct nfs_clone_mount *)0) || (unsigned long )mount_info.mntfh == (unsigned long )((struct nfs_fh *)0)) { goto out; } else { } server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh); tmp___2 = IS_ERR((void const *)server); if (tmp___2 != 0L) { tmp___1 = ERR_CAST((void const *)server); mntroot = (struct dentry *)tmp___1; goto out; } else { } mntroot = nfs_fs_mount_common(server, flags, dev_name___0, & mount_info, & nfs_v4); out: nfs_free_fhandle((struct nfs_fh const *)mount_info.mntfh); return (mntroot); } } static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type , int flags , char const *dev_name___0 , void *raw_data ) { struct nfs_clone_mount *data ; char *export_path ; struct vfsmount *root_mnt ; struct dentry *res ; long tmp ; long tmp___0 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; { data = (struct nfs_clone_mount *)raw_data; tmp = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp != 0L) { printk("\001d--> nfs4_referral_mount()\n"); } else { } export_path = data->mnt_path; data->mnt_path = (char *)"/"; root_mnt = nfs_do_root_mount(& nfs4_remote_referral_fs_type, flags, (void *)data, (char const *)data->hostname); data->mnt_path = export_path; res = nfs_follow_remote_path(root_mnt, (char const *)export_path); tmp___5 = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp___5 != 0L) { tmp___0 = IS_ERR((void const *)res); tmp___4 = IS_ERR((void const *)res); if (tmp___4 != 0L) { tmp___2 = PTR_ERR((void const *)res); tmp___3 = tmp___2; } else { tmp___3 = 0L; } printk("\001d<-- nfs4_referral_mount() = %ld%s\n", tmp___3, tmp___0 != 0L ? (char *)" [error]" : (char *)""); } else { } return (res); } } static int init_nfs_v4(void) { int err ; { err = nfs_idmap_init(); if (err != 0) { goto out; } else { } err = nfs4_register_sysctl(); if (err != 0) { goto out1; } else { } register_nfs_version(& nfs_v4); return (0); out1: nfs_idmap_quit(); out: ; return (err); } } static void exit_nfs_v4(void) { { unregister_nfs_version(& nfs_v4); nfs4_unregister_sysctl(); nfs_idmap_quit(); return; } } int main(void) { struct file_system_type *var_group1 ; int var_nfs4_remote_mount_2_p1 ; char const *var_nfs4_remote_mount_2_p2 ; void *var_nfs4_remote_mount_2_p3 ; int var_nfs4_remote_referral_mount_9_p1 ; char const *var_nfs4_remote_referral_mount_9_p2 ; void *var_nfs4_remote_referral_mount_9_p3 ; int var_nfs4_referral_mount_10_p1 ; char const *var_nfs4_referral_mount_10_p2 ; void *var_nfs4_referral_mount_10_p3 ; struct inode *var_group2 ; struct writeback_control *var_group3 ; int tmp ; int tmp___0 ; int tmp___1 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); ldv_handler_precall(); tmp = init_nfs_v4(); if (tmp != 0) { goto ldv_final; } else { } goto ldv_49549; ldv_49548: tmp___0 = __VERIFIER_nondet_int(); switch (tmp___0) { case 0: ldv_handler_precall(); nfs4_remote_mount(var_group1, var_nfs4_remote_mount_2_p1, var_nfs4_remote_mount_2_p2, var_nfs4_remote_mount_2_p3); goto ldv_49542; case 1: ldv_handler_precall(); nfs4_remote_referral_mount(var_group1, var_nfs4_remote_referral_mount_9_p1, var_nfs4_remote_referral_mount_9_p2, var_nfs4_remote_referral_mount_9_p3); goto ldv_49542; case 2: ldv_handler_precall(); nfs4_referral_mount(var_group1, var_nfs4_referral_mount_10_p1, var_nfs4_referral_mount_10_p2, var_nfs4_referral_mount_10_p3); goto ldv_49542; case 3: ldv_handler_precall(); nfs4_write_inode(var_group2, var_group3); goto ldv_49542; case 4: ldv_handler_precall(); nfs4_evict_inode(var_group2); goto ldv_49542; default: ; goto ldv_49542; } ldv_49542: ; ldv_49549: tmp___1 = __VERIFIER_nondet_int(); if (tmp___1 != 0) { goto ldv_49548; } else { } ldv_handler_precall(); exit_nfs_v4(); ldv_final: ldv_check_final_state(); return 0; } } void ldv_mutex_lock_61(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_62(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_63(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_64(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_65(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_66(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_67(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_78(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_76(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_79(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_81(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_83(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_75(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_77(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_80(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_82(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_i_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_i_mutex(struct mutex *lock ) ; extern int filemap_write_and_wait_range(struct address_space * , loff_t , loff_t ) ; extern ssize_t do_sync_read(struct file * , char * , size_t , loff_t * ) ; extern ssize_t do_sync_write(struct file * , char const * , size_t , loff_t * ) ; __inline static struct nfs_rpc_ops const *NFS_PROTO(struct inode const *inode ) { struct nfs_server *tmp ; { tmp = NFS_SERVER(inode); return ((tmp->nfs_client)->rpc_ops); } } extern void nfs_file_set_open_context(struct file * , struct nfs_open_context * ) ; extern int nfs_wb_all(struct inode * ) ; extern int nfs_file_fsync_commit(struct file * , loff_t , loff_t , int ) ; extern loff_t nfs_file_llseek(struct file * , loff_t , int ) ; extern int nfs_file_flush(struct file * , fl_owner_t ) ; extern ssize_t nfs_file_read(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; extern ssize_t nfs_file_splice_read(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; extern int nfs_file_mmap(struct file * , struct vm_area_struct * ) ; extern ssize_t nfs_file_write(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; extern int nfs_file_release(struct inode * , struct file * ) ; extern int nfs_lock(struct file * , int , struct file_lock * ) ; extern int nfs_flock(struct file * , int , struct file_lock * ) ; extern ssize_t nfs_file_splice_write(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; extern int nfs_check_flags(int ) ; extern int nfs_setlease(struct file * , long , struct file_lock ** ) ; extern void nfs_fscache_set_inode_cookie(struct inode * , struct file * ) ; static int nfs4_file_open(struct inode *inode , struct file *filp ) { struct nfs_open_context *ctx ; struct dentry *dentry ; struct dentry *parent ; struct inode *dir ; unsigned int openflags ; struct iattr attr ; int err ; long tmp ; long tmp___0 ; long tmp___1 ; struct nfs_rpc_ops const *tmp___2 ; long tmp___3 ; long tmp___4 ; unsigned long tmp___5 ; { dentry = filp->f_path.dentry; parent = 0; openflags = filp->f_flags; tmp = ldv__builtin_expect((nfs_debug & 64U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS: open file(%s/%s)\n", (dentry->d_parent)->d_name.name, dentry->d_name.name); } else { } if ((openflags & 3U) == 3U) { openflags = openflags - 1U; } else { } openflags = openflags & 4294967103U; parent = dget_parent(dentry); dir = parent->d_inode; ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode); tmp___0 = PTR_ERR((void const *)ctx); err = (int )tmp___0; tmp___1 = IS_ERR((void const *)ctx); if (tmp___1 != 0L) { goto out; } else { } attr.ia_valid = 32768U; if ((openflags & 512U) != 0U) { attr.ia_valid = attr.ia_valid | 8U; attr.ia_size = 0LL; nfs_wb_all(inode); } else { } tmp___2 = NFS_PROTO((struct inode const *)dir); inode = (*(tmp___2->open_context))(dir, ctx, (int )openflags, & attr); tmp___4 = IS_ERR((void const *)inode); if (tmp___4 != 0L) { tmp___3 = PTR_ERR((void const *)inode); err = (int )tmp___3; switch (err) { case -1: ; case -13: ; case -122: ; case -28: ; case -30: ; goto out_put_ctx; default: ; goto out_drop; } } else { } iput(inode); if ((unsigned long )dentry->d_inode != (unsigned long )inode) { goto out_drop; } else { } tmp___5 = nfs_save_change_attribute(dir); nfs_set_verifier(dentry, tmp___5); nfs_file_set_open_context(filp, ctx); nfs_fscache_set_inode_cookie(inode, filp); err = 0; out_put_ctx: put_nfs_open_context(ctx); out: dput(parent); return (err); out_drop: d_drop(dentry); err = -518; goto out_put_ctx; } } static int nfs4_file_fsync(struct file *file , loff_t start , loff_t end , int datasync ) { int ret ; struct inode *inode ; { inode = (file->f_path.dentry)->d_inode; ldv_48872: ret = filemap_write_and_wait_range(inode->i_mapping, start, end); if (ret != 0) { goto ldv_48871; } else { } ldv_mutex_lock_82(& inode->i_mutex); ret = nfs_file_fsync_commit(file, start, end, datasync); if (ret == 0 && datasync == 0) { ret = pnfs_layoutcommit_inode(inode, 1); } else { } ldv_mutex_unlock_83(& inode->i_mutex); start = 0LL; end = 9223372036854775807LL; if (ret == -11) { goto ldv_48872; } else { } ldv_48871: ; return (ret); } } struct file_operations const nfs4_file_operations = {0, & nfs_file_llseek, & do_sync_read, & do_sync_write, & nfs_file_read, & nfs_file_write, 0, 0, 0, 0, & nfs_file_mmap, & nfs4_file_open, & nfs_file_flush, & nfs_file_release, & nfs4_file_fsync, 0, 0, & nfs_lock, 0, 0, & nfs_check_flags, & nfs_flock, & nfs_file_splice_write, & nfs_file_splice_read, & nfs_setlease, 0, 0}; extern void ldv_check_return_value(int ) ; void ldv_main5_sequence_infinite_withcheck_stateful(void) { struct inode *var_group1 ; struct file *var_group2 ; int res_nfs4_file_open_0 ; loff_t var_nfs4_file_fsync_1_p1 ; loff_t var_nfs4_file_fsync_1_p2 ; int var_nfs4_file_fsync_1_p3 ; int ldv_s_nfs4_file_operations_file_operations ; int tmp ; int tmp___0 ; { ldv_s_nfs4_file_operations_file_operations = 0; LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_48903; ldv_48902: tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ; if (ldv_s_nfs4_file_operations_file_operations == 0) { ldv_handler_precall(); res_nfs4_file_open_0 = nfs4_file_open(var_group1, var_group2); ldv_check_return_value(res_nfs4_file_open_0); if (res_nfs4_file_open_0 != 0) { goto ldv_module_exit; } else { } ldv_s_nfs4_file_operations_file_operations = 0; } else { } goto ldv_48899; case 1: ldv_handler_precall(); nfs4_file_fsync(var_group2, var_nfs4_file_fsync_1_p1, var_nfs4_file_fsync_1_p2, var_nfs4_file_fsync_1_p3); goto ldv_48899; default: ; goto ldv_48899; } ldv_48899: ; ldv_48903: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0 || ldv_s_nfs4_file_operations_file_operations != 0) { goto ldv_48902; } else { } ldv_module_exit: ; ldv_check_final_state(); return; } } void ldv_mutex_lock_75(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_76(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_77(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_78(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_79(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_80(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_81(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_82(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_i_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_83(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_i_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_98(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_94(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_96(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_99(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_93(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_95(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_97(struct mutex *ldv_func_arg1 ) ; extern void kfree_call_rcu(struct callback_head * , void (*)(struct callback_head * ) ) ; extern void __list_add_rcu(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add_rcu(struct list_head *new , struct list_head *head ) { { __list_add_rcu(new, head, head->next); return; } } __inline static void list_del_rcu(struct list_head *entry ) { { __list_del_entry(entry); entry->prev = 0xdead000000200200UL; return; } } extern int filemap_flush(struct address_space * ) ; __inline static int nfs_compare_fh(struct nfs_fh const *a , struct nfs_fh const *b ) { int tmp ; int tmp___0 ; { if ((int )((unsigned short )a->size) != (int )((unsigned short )b->size)) { tmp___0 = 1; } else { tmp = memcmp((void const *)(& a->data), (void const *)(& b->data), (size_t )a->size); if (tmp != 0) { tmp___0 = 1; } else { tmp___0 = 0; } } return (tmp___0); } } int nfs_async_inode_return_delegation(struct inode *inode , nfs4_stateid const *stateid ) ; struct inode *nfs_delegation_find_inode(struct nfs_client *clp , struct nfs_fh const *fhandle ) ; void nfs_server_return_all_delegations(struct nfs_server *server ) ; void nfs_expire_all_delegation_types(struct nfs_client *clp , fmode_t flags ) ; static void nfs_free_delegation(struct nfs_delegation *delegation ) { { if ((unsigned long )delegation->cred != (unsigned long )((struct rpc_cred *)0)) { put_rpccred(delegation->cred); delegation->cred = 0; } else { } kfree_call_rcu(& delegation->rcu, 152); return; } } void nfs_mark_delegation_referenced(struct nfs_delegation *delegation ) { { set_bit(2U, (unsigned long volatile *)(& delegation->flags)); return; } } int nfs4_have_delegation(struct inode *inode , fmode_t flags ) { struct nfs_delegation *delegation ; int ret ; struct nfs_delegation *_________p1 ; struct nfs_inode *tmp ; bool __warned ; int tmp___0 ; int tmp___1 ; { ret = 0; flags = flags & 3U; rcu_read_lock(); tmp = NFS_I((struct inode const *)inode); _________p1 = *((struct nfs_delegation * volatile *)(& tmp->delegation)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/delegation.c.prepared", 120, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0) && (delegation->type & flags) == flags) { nfs_mark_delegation_referenced(delegation); ret = 1; } else { } rcu_read_unlock(); return (ret); } } static int nfs_delegation_claim_locks(struct nfs_open_context *ctx , struct nfs4_state *state ) { struct inode *inode ; struct file_lock *fl ; int status ; struct nfs_open_context *tmp ; { inode = state->inode; status = 0; if ((unsigned long )inode->i_flock == (unsigned long )((struct file_lock *)0)) { goto out; } else { } lock_flocks(); fl = inode->i_flock; goto ldv_48976; ldv_48975: ; if ((fl->fl_flags & 3U) == 0U) { goto ldv_48974; } else { } tmp = nfs_file_open_context(fl->fl_file); if ((unsigned long )tmp != (unsigned long )ctx) { goto ldv_48974; } else { } unlock_flocks(); status = nfs4_lock_delegation_recall(state, fl); if (status < 0) { goto out; } else { } lock_flocks(); ldv_48974: fl = fl->fl_next; ldv_48976: ; if ((unsigned long )fl != (unsigned long )((struct file_lock *)0)) { goto ldv_48975; } else { } unlock_flocks(); out: ; return (status); } } static int nfs_delegation_claim_opens(struct inode *inode , nfs4_stateid const *stateid ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs_open_context *ctx ; struct nfs4_state *state ; int err ; struct list_head const *__mptr ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; struct list_head const *__mptr___0 ; { tmp = NFS_I((struct inode const *)inode); nfsi = tmp; again: spin_lock(& inode->i_lock); __mptr = (struct list_head const *)nfsi->open_files.next; ctx = (struct nfs_open_context *)__mptr + 0xffffffffffffffa0UL; goto ldv_48993; ldv_48992: state = ctx->state; if ((unsigned long )state == (unsigned long )((struct nfs4_state *)0)) { goto ldv_48991; } else { } tmp___0 = constant_test_bit(1U, (unsigned long const volatile *)(& state->flags)); if (tmp___0 == 0) { goto ldv_48991; } else { } tmp___1 = nfs4_stateid_match((nfs4_stateid const *)(& state->stateid), stateid); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto ldv_48991; } else { } get_nfs_open_context(ctx); spin_unlock(& inode->i_lock); err = nfs4_open_delegation_recall(ctx, state, stateid); if (err >= 0) { err = nfs_delegation_claim_locks(ctx, state); } else { } put_nfs_open_context(ctx); if (err != 0) { return (err); } else { } goto again; ldv_48991: __mptr___0 = (struct list_head const *)ctx->list.next; ctx = (struct nfs_open_context *)__mptr___0 + 0xffffffffffffffa0UL; ldv_48993: ; if ((unsigned long )(& ctx->list) != (unsigned long )(& nfsi->open_files)) { goto ldv_48992; } else { } spin_unlock(& inode->i_lock); return (0); } } void nfs_inode_reclaim_delegation(struct inode *inode , struct rpc_cred *cred , struct nfs_openres *res ) { struct nfs_delegation *delegation ; struct rpc_cred *oldcred ; struct nfs_delegation *_________p1 ; struct nfs_inode *tmp ; bool __warned ; int tmp___0 ; int tmp___1 ; struct nfs_inode *tmp___2 ; { oldcred = 0; rcu_read_lock(); tmp = NFS_I((struct inode const *)inode); _________p1 = *((struct nfs_delegation * volatile *)(& tmp->delegation)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/delegation.c.prepared", 201, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { spin_lock(& delegation->lock); if ((unsigned long )delegation->inode != (unsigned long )((struct inode *)0)) { nfs4_stateid_copy(& delegation->stateid, (nfs4_stateid const *)(& res->delegation)); delegation->type = res->delegation_type; delegation->maxsize = (loff_t )res->maxsize; oldcred = delegation->cred; delegation->cred = get_rpccred(cred); clear_bit(0, (unsigned long volatile *)(& delegation->flags)); tmp___2 = NFS_I((struct inode const *)inode); tmp___2->delegation_state = delegation->type; spin_unlock(& delegation->lock); put_rpccred(oldcred); rcu_read_unlock(); } else { spin_unlock(& delegation->lock); rcu_read_unlock(); nfs_inode_set_delegation(inode, cred, res); } } else { rcu_read_unlock(); } return; } } static int nfs_do_return_delegation(struct inode *inode , struct nfs_delegation *delegation , int issync ) { int res ; { res = 0; res = nfs4_proc_delegreturn(inode, delegation->cred, (nfs4_stateid const *)(& delegation->stateid), issync); nfs_free_delegation(delegation); return (res); } } static struct inode *nfs_delegation_grab_inode(struct nfs_delegation *delegation ) { struct inode *inode ; { inode = 0; spin_lock(& delegation->lock); if ((unsigned long )delegation->inode != (unsigned long )((struct inode *)0)) { inode = igrab(delegation->inode); } else { } spin_unlock(& delegation->lock); return (inode); } } static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi , struct nfs_server *server ) { struct nfs_delegation *delegation ; bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = lock_is_held(& (server->nfs_client)->cl_lock.ldv_5961.ldv_5960.dep_map); if (tmp___0 == 0) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/delegation.c.prepared", 253, "suspicious rcu_dereference_protected() usage"); } else { } } else { } delegation = nfsi->delegation; if ((unsigned long )delegation == (unsigned long )((struct nfs_delegation *)0)) { goto nomatch; } else { } spin_lock(& delegation->lock); list_del_rcu(& delegation->super_list); delegation->inode = 0; nfsi->delegation_state = 0U; __asm__ volatile ("": : : "memory"); nfsi->delegation = 0; spin_unlock(& delegation->lock); return (delegation); nomatch: ; return (0); } } static struct nfs_delegation *nfs_detach_delegation(struct nfs_inode *nfsi , struct nfs_server *server ) { struct nfs_client *clp ; struct nfs_delegation *delegation ; { clp = server->nfs_client; spin_lock(& clp->cl_lock); delegation = nfs_detach_delegation_locked(nfsi, server); spin_unlock(& clp->cl_lock); return (delegation); } } int nfs_inode_set_delegation(struct inode *inode , struct rpc_cred *cred , struct nfs_openres *res ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_client *clp ; struct nfs_inode *nfsi ; struct nfs_inode *tmp___0 ; struct nfs_delegation *delegation ; struct nfs_delegation *old_delegation ; struct nfs_delegation *freeme ; int status ; void *tmp___1 ; struct lock_class_key __key ; bool __warned ; int tmp___2 ; int tmp___3 ; bool tmp___4 ; long tmp___5 ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; clp = server->nfs_client; tmp___0 = NFS_I((struct inode const *)inode); nfsi = tmp___0; freeme = 0; status = 0; tmp___1 = kmalloc(168UL, 80U); delegation = (struct nfs_delegation *)tmp___1; if ((unsigned long )delegation == (unsigned long )((struct nfs_delegation *)0)) { return (-12); } else { } nfs4_stateid_copy(& delegation->stateid, (nfs4_stateid const *)(& res->delegation)); delegation->type = res->delegation_type; delegation->maxsize = (loff_t )res->maxsize; delegation->change_attr = inode->i_version; delegation->cred = get_rpccred(cred); delegation->inode = inode; delegation->flags = 4UL; spinlock_check(& delegation->lock); __raw_spin_lock_init(& delegation->lock.ldv_5961.rlock, "&(&delegation->lock)->rlock", & __key); spin_lock(& clp->cl_lock); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = lock_is_held(& clp->cl_lock.ldv_5961.ldv_5960.dep_map); if (tmp___3 == 0) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/delegation.c.prepared", 312, "suspicious rcu_dereference_protected() usage"); } else { } } else { } old_delegation = nfsi->delegation; if ((unsigned long )old_delegation != (unsigned long )((struct nfs_delegation *)0)) { tmp___4 = nfs4_stateid_match((nfs4_stateid const *)(& delegation->stateid), (nfs4_stateid const *)(& old_delegation->stateid)); if ((int )tmp___4 && delegation->type == old_delegation->type) { goto out; } else { } tmp___5 = ldv__builtin_expect((nfs_debug & 64U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001d%s: server %s handed out a duplicate delegation!\n", "nfs_inode_set_delegation", clp->cl_hostname); } else { } if (delegation->type == old_delegation->type || (delegation->type & 2U) == 0U) { freeme = delegation; delegation = 0; goto out; } else { } freeme = nfs_detach_delegation_locked(nfsi, server); } else { } list_add_rcu(& delegation->super_list, & server->delegations); nfsi->delegation_state = delegation->type; __asm__ volatile ("": : : "memory"); nfsi->delegation = delegation; delegation = 0; spin_lock(& inode->i_lock); nfsi->cache_validity = nfsi->cache_validity | 64UL; spin_unlock(& inode->i_lock); out: spin_unlock(& clp->cl_lock); if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { nfs_free_delegation(delegation); } else { } if ((unsigned long )freeme != (unsigned long )((struct nfs_delegation *)0)) { nfs_do_return_delegation(inode, freeme, 0); } else { } return (status); } } static int __nfs_inode_return_delegation(struct inode *inode , struct nfs_delegation *delegation , int issync ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; int err ; { tmp = NFS_I((struct inode const *)inode); nfsi = tmp; down_write(& nfsi->rwsem); err = nfs_delegation_claim_opens(inode, (nfs4_stateid const *)(& delegation->stateid)); up_write(& nfsi->rwsem); if (err != 0) { goto out; } else { } err = nfs_do_return_delegation(inode, delegation, issync); out: ; return (err); } } int nfs_client_return_marked_delegations(struct nfs_client *clp ) { struct nfs_delegation *delegation ; struct nfs_server *server ; struct inode *inode ; int err ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; int tmp___1 ; struct nfs_inode *tmp___2 ; struct list_head *__ptr___1 ; struct list_head const *__mptr___1 ; struct list_head *_________p1___1 ; bool __warned___1 ; int tmp___3 ; struct list_head *__ptr___2 ; struct list_head const *__mptr___2 ; struct list_head *_________p1___2 ; bool __warned___2 ; int tmp___4 ; { err = 0; restart: rcu_read_lock(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49095; ldv_49094: __ptr___0 = server->delegations.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; delegation = (struct nfs_delegation *)__mptr___0; goto ldv_49092; ldv_49091: tmp___1 = test_and_clear_bit(1, (unsigned long volatile *)(& delegation->flags)); if (tmp___1 == 0) { goto ldv_49090; } else { } inode = nfs_delegation_grab_inode(delegation); if ((unsigned long )inode == (unsigned long )((struct inode *)0)) { goto ldv_49090; } else { } tmp___2 = NFS_I((struct inode const *)inode); delegation = nfs_detach_delegation(tmp___2, server); rcu_read_unlock(); if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { err = __nfs_inode_return_delegation(inode, delegation, 0); } else { } iput(inode); if (err == 0) { goto restart; } else { } set_bit(5U, (unsigned long volatile *)(& clp->cl_state)); return (err); ldv_49090: __ptr___1 = delegation->super_list.next; _________p1___1 = *((struct list_head * volatile *)(& __ptr___1)); tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned___1) { rcu_read_lock_held(); } else { } __mptr___1 = (struct list_head const *)_________p1___1; delegation = (struct nfs_delegation *)__mptr___1; ldv_49092: ; if ((unsigned long )(& delegation->super_list) != (unsigned long )(& server->delegations)) { goto ldv_49091; } else { } __ptr___2 = server->client_link.next; _________p1___2 = *((struct list_head * volatile *)(& __ptr___2)); tmp___4 = debug_lockdep_rcu_enabled(); if (tmp___4 != 0 && ! __warned___2) { rcu_read_lock_held(); } else { } __mptr___2 = (struct list_head const *)_________p1___2; server = (struct nfs_server *)__mptr___2 + 0xfffffffffffffff8UL; ldv_49095: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49094; } else { } rcu_read_unlock(); return (0); } } void nfs_inode_return_delegation_noreclaim(struct inode *inode ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_inode *nfsi ; struct nfs_inode *tmp___0 ; struct nfs_delegation *delegation ; struct nfs_delegation *_________p1 ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; tmp___0 = NFS_I((struct inode const *)inode); nfsi = tmp___0; _________p1 = *((struct nfs_delegation * volatile *)(& nfsi->delegation)); if ((unsigned long )_________p1 != (unsigned long )((struct nfs_delegation *)0)) { delegation = nfs_detach_delegation(nfsi, server); if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { nfs_do_return_delegation(inode, delegation, 0); } else { } } else { } return; } } int nfs4_inode_return_delegation(struct inode *inode ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_inode *nfsi ; struct nfs_inode *tmp___0 ; struct nfs_delegation *delegation ; int err ; struct nfs_delegation *_________p1 ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; tmp___0 = NFS_I((struct inode const *)inode); nfsi = tmp___0; err = 0; nfs_wb_all(inode); _________p1 = *((struct nfs_delegation * volatile *)(& nfsi->delegation)); if ((unsigned long )_________p1 != (unsigned long )((struct nfs_delegation *)0)) { delegation = nfs_detach_delegation(nfsi, server); if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { err = __nfs_inode_return_delegation(inode, delegation, 1); } else { } } else { } return (err); } } static void nfs_mark_return_delegation(struct nfs_server *server , struct nfs_delegation *delegation ) { { set_bit(1U, (unsigned long volatile *)(& delegation->flags)); set_bit(5U, (unsigned long volatile *)(& (server->nfs_client)->cl_state)); return; } } void nfs_server_return_all_delegations(struct nfs_server *server ) { struct nfs_client *clp ; struct nfs_delegation *delegation ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; int tmp___1 ; { clp = server->nfs_client; if ((unsigned long )clp == (unsigned long )((struct nfs_client *)0)) { return; } else { } rcu_read_lock(); __ptr = server->delegations.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; delegation = (struct nfs_delegation *)__mptr; goto ldv_49138; ldv_49137: spin_lock(& delegation->lock); set_bit(1U, (unsigned long volatile *)(& delegation->flags)); spin_unlock(& delegation->lock); __ptr___0 = delegation->super_list.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; delegation = (struct nfs_delegation *)__mptr___0; ldv_49138: ; if ((unsigned long )(& delegation->super_list) != (unsigned long )(& server->delegations)) { goto ldv_49137; } else { } rcu_read_unlock(); tmp___1 = nfs_client_return_marked_delegations(clp); if (tmp___1 != 0) { nfs4_schedule_state_manager(clp); } else { } return; } } static void nfs_mark_return_all_delegation_types(struct nfs_server *server , fmode_t flags ) { struct nfs_delegation *delegation ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { __ptr = server->delegations.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; delegation = (struct nfs_delegation *)__mptr; goto ldv_49161; ldv_49160: ; if (delegation->type == 3U && (flags & 2U) == 0U) { goto ldv_49159; } else { } if ((delegation->type & flags) != 0U) { nfs_mark_return_delegation(server, delegation); } else { } ldv_49159: __ptr___0 = delegation->super_list.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; delegation = (struct nfs_delegation *)__mptr___0; ldv_49161: ; if ((unsigned long )(& delegation->super_list) != (unsigned long )(& server->delegations)) { goto ldv_49160; } else { } return; } } static void nfs_client_mark_return_all_delegation_types(struct nfs_client *clp , fmode_t flags ) { struct nfs_server *server ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { rcu_read_lock(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49183; ldv_49182: nfs_mark_return_all_delegation_types(server, flags); __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49183: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49182; } else { } rcu_read_unlock(); return; } } static void nfs_delegation_run_state_manager(struct nfs_client *clp ) { int tmp ; { tmp = constant_test_bit(5U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp != 0) { nfs4_schedule_state_manager(clp); } else { } return; } } void nfs_remove_bad_delegation(struct inode *inode ) { struct nfs_delegation *delegation ; struct nfs_server *tmp ; struct nfs_inode *tmp___0 ; { tmp = NFS_SERVER((struct inode const *)inode); tmp___0 = NFS_I((struct inode const *)inode); delegation = nfs_detach_delegation(tmp___0, tmp); if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { nfs_inode_find_state_and_recover(inode, (nfs4_stateid const *)(& delegation->stateid)); nfs_free_delegation(delegation); } else { } return; } } void nfs_expire_all_delegation_types(struct nfs_client *clp , fmode_t flags ) { { nfs_client_mark_return_all_delegation_types(clp, flags); nfs_delegation_run_state_manager(clp); return; } } void nfs_expire_all_delegations(struct nfs_client *clp ) { { nfs_expire_all_delegation_types(clp, 3U); return; } } static void nfs_mark_return_unreferenced_delegations(struct nfs_server *server ) { struct nfs_delegation *delegation ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___1 ; { __ptr = server->delegations.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; delegation = (struct nfs_delegation *)__mptr; goto ldv_49225; ldv_49224: tmp___0 = test_and_clear_bit(2, (unsigned long volatile *)(& delegation->flags)); if (tmp___0 != 0) { goto ldv_49223; } else { } nfs_mark_return_delegation(server, delegation); ldv_49223: __ptr___0 = delegation->super_list.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; delegation = (struct nfs_delegation *)__mptr___0; ldv_49225: ; if ((unsigned long )(& delegation->super_list) != (unsigned long )(& server->delegations)) { goto ldv_49224; } else { } return; } } void nfs_expire_unreferenced_delegations(struct nfs_client *clp ) { struct nfs_server *server ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { rcu_read_lock(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49246; ldv_49245: nfs_mark_return_unreferenced_delegations(server); __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49246: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49245; } else { } rcu_read_unlock(); nfs_delegation_run_state_manager(clp); return; } } int nfs_async_inode_return_delegation(struct inode *inode , nfs4_stateid const *stateid ) { struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_client *clp ; struct nfs_delegation *delegation ; struct nfs_delegation *_________p1 ; struct nfs_inode *tmp___0 ; bool __warned ; int tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; clp = server->nfs_client; filemap_flush(inode->i_mapping); rcu_read_lock(); tmp___0 = NFS_I((struct inode const *)inode); _________p1 = *((struct nfs_delegation * volatile *)(& tmp___0->delegation)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/delegation.c.prepared", 612, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; tmp___3 = (*((clp->cl_mvops)->match_stateid))((nfs4_stateid const *)(& delegation->stateid), stateid); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { rcu_read_unlock(); return (-2); } else { } nfs_mark_return_delegation(server, delegation); rcu_read_unlock(); nfs_delegation_run_state_manager(clp); return (0); } } static struct inode *nfs_delegation_find_inode_server(struct nfs_server *server , struct nfs_fh const *fhandle ) { struct nfs_delegation *delegation ; struct inode *res ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct nfs_inode *tmp___0 ; int tmp___1 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___2 ; { res = 0; __ptr = server->delegations.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; delegation = (struct nfs_delegation *)__mptr; goto ldv_49280; ldv_49279: spin_lock(& delegation->lock); if ((unsigned long )delegation->inode != (unsigned long )((struct inode *)0)) { tmp___0 = NFS_I((struct inode const *)delegation->inode); tmp___1 = nfs_compare_fh(fhandle, (struct nfs_fh const *)(& tmp___0->fh)); if (tmp___1 == 0) { res = igrab(delegation->inode); } else { } } else { } spin_unlock(& delegation->lock); if ((unsigned long )res != (unsigned long )((struct inode *)0)) { goto ldv_49278; } else { } __ptr___0 = delegation->super_list.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; delegation = (struct nfs_delegation *)__mptr___0; ldv_49280: ; if ((unsigned long )(& delegation->super_list) != (unsigned long )(& server->delegations)) { goto ldv_49279; } else { } ldv_49278: ; return (res); } } struct inode *nfs_delegation_find_inode(struct nfs_client *clp , struct nfs_fh const *fhandle ) { struct nfs_server *server ; struct inode *res ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { res = 0; rcu_read_lock(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49303; ldv_49302: res = nfs_delegation_find_inode_server(server, fhandle); if ((unsigned long )res != (unsigned long )((struct inode *)0)) { goto ldv_49301; } else { } __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49303: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49302; } else { } ldv_49301: rcu_read_unlock(); return (res); } } static void nfs_delegation_mark_reclaim_server(struct nfs_server *server ) { struct nfs_delegation *delegation ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { __ptr = server->delegations.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; delegation = (struct nfs_delegation *)__mptr; goto ldv_49323; ldv_49322: set_bit(0U, (unsigned long volatile *)(& delegation->flags)); __ptr___0 = delegation->super_list.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; delegation = (struct nfs_delegation *)__mptr___0; ldv_49323: ; if ((unsigned long )(& delegation->super_list) != (unsigned long )(& server->delegations)) { goto ldv_49322; } else { } return; } } void nfs_delegation_mark_reclaim(struct nfs_client *clp ) { struct nfs_server *server ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; { rcu_read_lock(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49344; ldv_49343: nfs_delegation_mark_reclaim_server(server); __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49344: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49343; } else { } rcu_read_unlock(); return; } } void nfs_delegation_reap_unclaimed(struct nfs_client *clp ) { struct nfs_delegation *delegation ; struct nfs_server *server ; struct inode *inode ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___0 ; int tmp___1 ; struct nfs_inode *tmp___2 ; struct list_head *__ptr___1 ; struct list_head const *__mptr___1 ; struct list_head *_________p1___1 ; bool __warned___1 ; int tmp___3 ; struct list_head *__ptr___2 ; struct list_head const *__mptr___2 ; struct list_head *_________p1___2 ; bool __warned___2 ; int tmp___4 ; { restart: rcu_read_lock(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49386; ldv_49385: __ptr___0 = server->delegations.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; delegation = (struct nfs_delegation *)__mptr___0; goto ldv_49383; ldv_49382: tmp___1 = constant_test_bit(0U, (unsigned long const volatile *)(& delegation->flags)); if (tmp___1 == 0) { goto ldv_49381; } else { } inode = nfs_delegation_grab_inode(delegation); if ((unsigned long )inode == (unsigned long )((struct inode *)0)) { goto ldv_49381; } else { } tmp___2 = NFS_I((struct inode const *)inode); delegation = nfs_detach_delegation(tmp___2, server); rcu_read_unlock(); if ((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0)) { nfs_free_delegation(delegation); } else { } iput(inode); goto restart; ldv_49381: __ptr___1 = delegation->super_list.next; _________p1___1 = *((struct list_head * volatile *)(& __ptr___1)); tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned___1) { rcu_read_lock_held(); } else { } __mptr___1 = (struct list_head const *)_________p1___1; delegation = (struct nfs_delegation *)__mptr___1; ldv_49383: ; if ((unsigned long )(& delegation->super_list) != (unsigned long )(& server->delegations)) { goto ldv_49382; } else { } __ptr___2 = server->client_link.next; _________p1___2 = *((struct list_head * volatile *)(& __ptr___2)); tmp___4 = debug_lockdep_rcu_enabled(); if (tmp___4 != 0 && ! __warned___2) { rcu_read_lock_held(); } else { } __mptr___2 = (struct list_head const *)_________p1___2; server = (struct nfs_server *)__mptr___2 + 0xfffffffffffffff8UL; ldv_49386: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49385; } else { } rcu_read_unlock(); return; } } int nfs_delegations_present(struct nfs_client *clp ) { struct nfs_server *server ; int ret ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___1 ; { ret = 0; rcu_read_lock(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49409; ldv_49408: tmp___0 = list_empty((struct list_head const *)(& server->delegations)); if (tmp___0 == 0) { ret = 1; goto ldv_49407; } else { } __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49409: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49408; } else { } ldv_49407: rcu_read_unlock(); return (ret); } } bool nfs4_copy_delegation_stateid(nfs4_stateid *dst , struct inode *inode , fmode_t flags ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct nfs_delegation *delegation ; bool ret ; struct nfs_delegation *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; { tmp = NFS_I((struct inode const *)inode); nfsi = tmp; flags = flags & 3U; rcu_read_lock(); _________p1 = *((struct nfs_delegation * volatile *)(& nfsi->delegation)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/delegation.c.prepared", 767, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; ret = (bool )((unsigned long )delegation != (unsigned long )((struct nfs_delegation *)0) && (delegation->type & flags) == flags); if ((int )ret) { nfs4_stateid_copy(dst, (nfs4_stateid const *)(& delegation->stateid)); nfs_mark_delegation_referenced(delegation); } else { } rcu_read_unlock(); return (ret); } } void ldv_mutex_lock_93(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_94(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_95(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_96(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_97(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_98(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_99(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } extern void might_fault(void) ; extern int kstrtoull(char const * , unsigned int , unsigned long long * ) ; extern int kstrtoll(char const * , unsigned int , long long * ) ; __inline static int kstrtoul(char const *s , unsigned int base , unsigned long *res ) { int tmp ; { tmp = kstrtoull(s, base, (unsigned long long *)res); return (tmp); } } __inline static int kstrtol(char const *s , unsigned int base , long *res ) { int tmp ; { tmp = kstrtoll(s, base, (long long *)res); return (tmp); } } extern int sprintf(char * , char const * , ...) ; extern __kernel_size_t strnlen(char const * , __kernel_size_t ) ; extern void *memchr(void const * , int , __kernel_size_t ) ; extern void warn_slowpath_fmt(char const * , int const , char const * , ...) ; __inline static long IS_ERR_OR_NULL(void const *ptr ) { long tmp ; int tmp___0 ; { if ((unsigned long )ptr == (unsigned long )((void const *)0)) { tmp___0 = 1; } else { tmp = ldv__builtin_expect((unsigned long )ptr > 0xfffffffffffff000UL, 0L); if (tmp != 0L) { tmp___0 = 1; } else { tmp___0 = 0; } } return ((long )tmp___0); } } __inline static int atomic_dec_and_test(atomic_t *v ) { unsigned char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0; sete %1": "+m" (v->counter), "=qm" (c): : "memory"); return ((unsigned int )c != 0U); } } extern void __mutex_init(struct mutex * , char const * , struct lock_class_key * ) ; int ldv_mutex_trylock_110(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_108(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_111(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_113(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_115(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_107(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_109(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_112(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_114(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_idmap_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_idmap_mutex(struct mutex *lock ) ; extern int match_token(char * , struct match_token const * , substring_t * ) ; extern int match_int(substring_t * , int * ) ; extern size_t match_strlcpy(char * , substring_t const * , size_t ) ; __inline static struct thread_info *current_thread_info___3(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6258; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6258; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6258; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6258; default: __bad_percpu_size(); } ldv_6258: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static void __rcu_read_lock___3(void) { struct thread_info *tmp ; { tmp = current_thread_info___3(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___3(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___3(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___3(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___3(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 763, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___3(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 784, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___3(); return; } } int nfs_idmap_new(struct nfs_client *clp ) ; void nfs_idmap_delete(struct nfs_client *clp ) ; extern unsigned int nfs_idmap_cache_timeout ; extern unsigned long _copy_from_user(void * , void const * , unsigned int ) ; __inline static unsigned long copy_from_user(void *to , void const *from , unsigned long n ) { int sz ; unsigned long tmp ; int __ret_warn_on ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = __builtin_object_size((void const *)to, 0); sz = (int )tmp; might_fault(); tmp___1 = ldv__builtin_expect(sz == -1, 1L); if (tmp___1 != 0L) { n = _copy_from_user(to, from, (unsigned int )n); } else { tmp___2 = ldv__builtin_expect((unsigned long )sz >= n, 1L); if (tmp___2 != 0L) { n = _copy_from_user(to, from, (unsigned int )n); } else { __ret_warn_on = 1; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_fmt("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/inst/current/envs/linux-3.8-rc1/linux-3.8-rc1/arch/x86/include/asm/uaccess_64.h", 66, "Buffer overflow detected!\n"); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } } return (n); } } __inline static struct rpc_inode *RPC_I(struct inode *inode ) { struct inode const *__mptr ; { __mptr = (struct inode const *)inode; return ((struct rpc_inode *)__mptr); } } extern int rpc_pipefs_notifier_register(struct notifier_block * ) ; extern void rpc_pipefs_notifier_unregister(struct notifier_block * ) ; extern struct super_block *rpc_get_sb_net(struct net const * ) ; extern void rpc_put_sb_net(struct net const * ) ; extern ssize_t rpc_pipe_generic_upcall(struct file * , struct rpc_pipe_msg * , char * , size_t ) ; extern int rpc_queue_upcall(struct rpc_pipe * , struct rpc_pipe_msg * ) ; extern int rpc_rmdir(struct dentry * ) ; extern struct rpc_pipe *rpc_mkpipe_data(struct rpc_pipe_ops const * , int ) ; extern void rpc_destroy_pipe_data(struct rpc_pipe * ) ; extern struct dentry *rpc_mkpipe_dentry(struct dentry * , char const * , void * , struct rpc_pipe * ) ; extern int rpc_unlink(struct dentry * ) ; extern void key_revoke(struct key * ) ; extern void key_put(struct key * ) ; extern struct key *request_key(struct key_type * , char const * , char const * ) ; extern struct key *request_key_with_auxdata(struct key_type * , char const * , void const * , size_t , void * ) ; extern int key_validate(struct key const * ) ; extern struct key *keyring_alloc(char const * , kuid_t , kgid_t , struct cred const * , key_perm_t , unsigned long , struct key * ) ; extern void key_set_timeout(struct key * , unsigned int ) ; extern void __put_cred(struct cred * ) ; extern struct cred const *override_creds(struct cred const * ) ; extern void revert_creds(struct cred const * ) ; extern struct cred *prepare_kernel_cred(struct task_struct * ) ; extern void __invalid_creds(struct cred const * , char const * , unsigned int ) ; extern bool creds_are_invalid(struct cred const * ) ; __inline static void __validate_creds(struct cred const *cred , char const *file , unsigned int line ) { bool tmp ; long tmp___0 ; { tmp = creds_are_invalid(cred); tmp___0 = ldv__builtin_expect((long )tmp, 0L); if (tmp___0 != 0L) { __invalid_creds(cred, file, line); } else { } return; } } __inline static void put_cred(struct cred const *_cred ) { struct cred *cred ; int tmp ; { cred = (struct cred *)_cred; __validate_creds((struct cred const *)cred, "include/linux/cred.h", 248U); tmp = atomic_dec_and_test(& cred->usage); if (tmp != 0) { __put_cred(cred); } else { } return; } } extern int register_key_type(struct key_type * ) ; extern void unregister_key_type(struct key_type * ) ; extern int key_instantiate_and_link(struct key * , void const * , size_t , struct key * , struct key * ) ; extern void complete_request_key(struct key_construction * , int ) ; extern int user_instantiate(struct key * , struct key_preparsed_payload * ) ; extern int user_match(struct key const * , void const * ) ; extern void user_revoke(struct key * ) ; extern void user_destroy(struct key * ) ; extern void user_describe(struct key const * , struct seq_file * ) ; extern long user_read(struct key const * , char * , size_t ) ; extern bool try_module_get(struct module * ) ; extern int nfs_wait_client_init_complete(struct nfs_client const * ) ; __inline static void *net_generic___2(struct net const *net , int id ) { struct net_generic *ng ; void *ptr ; struct net_generic *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { rcu_read_lock___3(); _________p1 = *((struct net_generic * const volatile *)(& net->gen)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("include/net/netns/generic.h", 40, "suspicious rcu_dereference_check() usage"); } else { } } else { } ng = _________p1; tmp___1 = ldv__builtin_expect(id == 0, 0L); if (tmp___1 != 0L) { goto _L; } else { tmp___2 = ldv__builtin_expect((unsigned int )id > ng->len, 0L); if (tmp___2 != 0L) { _L: /* CIL Label */ __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (41), "i" (12UL)); ldv_49109: ; goto ldv_49109; } else { } } ptr = ng->ptr[id + -1]; rcu_read_unlock___3(); tmp___3 = ldv__builtin_expect((unsigned long )ptr == (unsigned long )((void *)0), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (45), "i" (12UL)); ldv_49110: ; goto ldv_49110; } else { } return (ptr); } } static struct cred const *id_resolver_cache ; static struct key_type key_type_id_resolver_legacy ; void nfs_fattr_init_names(struct nfs_fattr *fattr , struct nfs4_string *owner_name , struct nfs4_string *group_name ) { { fattr->owner_name = owner_name; fattr->group_name = group_name; return; } } static void nfs_fattr_free_owner_name(struct nfs_fattr *fattr ) { { fattr->valid = fattr->valid & 4286578687U; kfree((void const *)(fattr->owner_name)->data); return; } } static void nfs_fattr_free_group_name(struct nfs_fattr *fattr ) { { fattr->valid = fattr->valid & 4278190079U; kfree((void const *)(fattr->group_name)->data); return; } } static bool nfs_fattr_map_owner_name(struct nfs_server *server , struct nfs_fattr *fattr ) { struct nfs4_string *owner ; __u32 uid ; int tmp ; { owner = fattr->owner_name; if ((fattr->valid & 8388608U) == 0U) { return (0); } else { } tmp = nfs_map_name_to_uid((struct nfs_server const *)server, (char const *)owner->data, (size_t )owner->len, & uid); if (tmp == 0) { fattr->uid = uid; fattr->valid = fattr->valid | 8U; } else { } return (1); } } static bool nfs_fattr_map_group_name(struct nfs_server *server , struct nfs_fattr *fattr ) { struct nfs4_string *group ; __u32 gid ; int tmp ; { group = fattr->group_name; if ((fattr->valid & 16777216U) == 0U) { return (0); } else { } tmp = nfs_map_group_to_gid((struct nfs_server const *)server, (char const *)group->data, (size_t )group->len, & gid); if (tmp == 0) { fattr->gid = gid; fattr->valid = fattr->valid | 16U; } else { } return (1); } } void nfs_fattr_free_names(struct nfs_fattr *fattr ) { { if ((fattr->valid & 8388608U) != 0U) { nfs_fattr_free_owner_name(fattr); } else { } if ((fattr->valid & 16777216U) != 0U) { nfs_fattr_free_group_name(fattr); } else { } return; } } void nfs_fattr_map_and_free_names(struct nfs_server *server , struct nfs_fattr *fattr ) { bool tmp ; bool tmp___0 ; { tmp = nfs_fattr_map_owner_name(server, fattr); if ((int )tmp) { nfs_fattr_free_owner_name(fattr); } else { } tmp___0 = nfs_fattr_map_group_name(server, fattr); if ((int )tmp___0) { nfs_fattr_free_group_name(fattr); } else { } return; } } static int nfs_map_string_to_numeric(char const *name , size_t namelen , __u32 *res ) { unsigned long val ; char buf[16U] ; void *tmp ; size_t __len ; void *__ret ; int tmp___0 ; { tmp = memchr((void const *)name, 64, namelen); if ((unsigned long )tmp != (unsigned long )((void *)0) || namelen > 15UL) { return (0); } else { } __len = namelen; __ret = memcpy((void *)(& buf), (void const *)name, __len); buf[namelen] = 0; tmp___0 = kstrtoul((char const *)(& buf), 0U, & val); if (tmp___0 != 0) { return (0); } else { } *res = (__u32 )val; return (1); } } static int nfs_map_numeric_to_string(__u32 id , char *buf , size_t buflen ) { int tmp ; { tmp = snprintf(buf, buflen, "%u", id); return (tmp); } } static struct key_type key_type_id_resolver = {"id_resolver", 0UL, 0, 0, 0, & user_instantiate, 0, & user_match, & user_revoke, & user_destroy, & user_describe, & user_read, 0, {0, 0}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}; static int nfs_idmap_init_keyring(void) { struct cred *cred ; struct key *keyring ; int ret ; long tmp ; long tmp___0 ; { ret = 0; printk("\rNFS: Registering the %s key type\n", key_type_id_resolver.name); cred = prepare_kernel_cred(0); if ((unsigned long )cred == (unsigned long )((struct cred *)0)) { return (-12); } else { } keyring = keyring_alloc(".id_resolver", 0U, 0U, (struct cred const *)cred, 520290304U, 2UL, 0); tmp___0 = IS_ERR((void const *)keyring); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)keyring); ret = (int )tmp; goto failed_put_cred; } else { } ret = register_key_type(& key_type_id_resolver); if (ret < 0) { goto failed_put_key; } else { } ret = register_key_type(& key_type_id_resolver_legacy); if (ret < 0) { goto failed_reg_legacy; } else { } set_bit(6U, (unsigned long volatile *)(& keyring->flags)); cred->thread_keyring = keyring; cred->jit_keyring = 1U; id_resolver_cache = (struct cred const *)cred; return (0); failed_reg_legacy: unregister_key_type(& key_type_id_resolver); failed_put_key: key_put(keyring); failed_put_cred: put_cred((struct cred const *)cred); return (ret); } } static void nfs_idmap_quit_keyring(void) { { key_revoke(id_resolver_cache->thread_keyring); unregister_key_type(& key_type_id_resolver); unregister_key_type(& key_type_id_resolver_legacy); put_cred(id_resolver_cache); return; } } static ssize_t nfs_idmap_get_desc(char const *name , size_t namelen , char const *type , size_t typelen , char **desc ) { char *cp ; size_t desclen ; void *tmp ; size_t __len ; void *__ret ; char *tmp___0 ; size_t __len___0 ; void *__ret___0 ; { desclen = (typelen + namelen) + 2UL; tmp = kmalloc(desclen, 208U); *desc = (char *)tmp; if ((unsigned long )*desc == (unsigned long )((char *)0)) { return (-12L); } else { } cp = *desc; __len = typelen; __ret = memcpy((void *)cp, (void const *)type, __len); cp = cp + typelen; tmp___0 = cp; cp = cp + 1; *tmp___0 = 58; __len___0 = namelen; __ret___0 = memcpy((void *)cp, (void const *)name, __len___0); cp = cp + namelen; *cp = 0; return ((ssize_t )desclen); } } static ssize_t nfs_idmap_request_key(struct key_type *key_type , char const *name , size_t namelen , char const *type , void *data , size_t data_size , struct idmap *idmap ) { struct cred const *saved_cred ; struct key *rkey ; char *desc ; struct user_key_payload *payload ; ssize_t ret ; size_t tmp ; long tmp___0 ; int tmp___1 ; void *_________p1 ; bool __warned ; int tmp___2 ; int tmp___3 ; long tmp___4 ; size_t __len ; void *__ret ; { tmp = strlen(type); ret = nfs_idmap_get_desc(name, namelen, type, tmp, & desc); if (ret <= 0L) { goto out; } else { } saved_cred = override_creds(id_resolver_cache); if ((unsigned long )idmap != (unsigned long )((struct idmap *)0)) { rkey = request_key_with_auxdata(key_type, (char const *)desc, (void const *)"", 0UL, (void *)idmap); } else { rkey = request_key(& key_type_id_resolver, (char const *)desc, ""); } revert_creds(saved_cred); kfree((void const *)desc); tmp___0 = IS_ERR((void const *)rkey); if (tmp___0 != 0L) { ret = PTR_ERR((void const *)rkey); goto out; } else { } rcu_read_lock___3(); rkey->perm = rkey->perm | 65536U; tmp___1 = key_validate((struct key const *)rkey); ret = (ssize_t )tmp___1; if (ret < 0L) { goto out_up; } else { } _________p1 = *((void * volatile *)(& rkey->payload.data)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/idmap.c.prepared", 363, "suspicious rcu_dereference_check() usage"); } else { } } else { } payload = (struct user_key_payload *)_________p1; tmp___4 = IS_ERR_OR_NULL((void const *)payload); if (tmp___4 != 0L) { ret = PTR_ERR((void const *)payload); goto out_up; } else { } ret = (ssize_t )payload->datalen; if (ret > 0L && (unsigned long )ret <= data_size) { __len = (size_t )ret; __ret = memcpy(data, (void const *)(& payload->data), __len); } else { ret = -22L; } out_up: rcu_read_unlock___3(); key_put(rkey); out: ; return (ret); } } static ssize_t nfs_idmap_get_key(char const *name , size_t namelen , char const *type , void *data , size_t data_size , struct idmap *idmap ) { ssize_t ret ; ssize_t tmp ; { tmp = nfs_idmap_request_key(& key_type_id_resolver, name, namelen, type, data, data_size, 0); ret = tmp; if (ret < 0L) { ldv_mutex_lock_114(& idmap->idmap_mutex); ret = nfs_idmap_request_key(& key_type_id_resolver_legacy, name, namelen, type, data, data_size, idmap); ldv_mutex_unlock_115(& idmap->idmap_mutex); } else { } return (ret); } } static ssize_t nfs_idmap_lookup_name(__u32 id , char const *type , char *buf , size_t buflen , struct idmap *idmap ) { char id_str[11U] ; int id_len ; ssize_t ret ; { id_len = snprintf((char *)(& id_str), 11UL, "%u", id); ret = nfs_idmap_get_key((char const *)(& id_str), (size_t )id_len, type, (void *)buf, buflen, idmap); if (ret < 0L) { return (-22L); } else { } return (ret); } } static int nfs_idmap_lookup_id(char const *name , size_t namelen , char const *type , __u32 *id , struct idmap *idmap ) { char id_str[11U] ; long id_long ; ssize_t data_size ; int ret ; { ret = 0; data_size = nfs_idmap_get_key(name, namelen, type, (void *)(& id_str), 11UL, idmap); if (data_size <= 0L) { ret = -22; } else { ret = kstrtol((char const *)(& id_str), 10U, & id_long); *id = (unsigned int )id_long; } return (ret); } } static struct match_token const nfs_idmap_tokens[5U] = { {0, "uid:%s"}, {1, "gid:%s"}, {2, "user:%s"}, {3, "group:%s"}, {4, 0}}; static int nfs_idmap_legacy_upcall(struct key_construction *cons , char const *op , void *aux ) ; static ssize_t idmap_pipe_downcall(struct file *filp , char const *src , size_t mlen ) ; static void idmap_release_pipe(struct inode *inode ) ; static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg ) ; static struct rpc_pipe_ops const idmap_upcall_ops = {& rpc_pipe_generic_upcall, & idmap_pipe_downcall, & idmap_release_pipe, 0, & idmap_pipe_destroy_msg}; static struct key_type key_type_id_resolver_legacy = {"id_legacy", 0UL, 0, 0, 0, & user_instantiate, 0, & user_match, & user_revoke, & user_destroy, & user_describe, & user_read, & nfs_idmap_legacy_upcall, {0, 0}, {{{(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}, {(char)0}}}}; static void __nfs_idmap_unregister(struct rpc_pipe *pipe ) { { if ((unsigned long )pipe->dentry != (unsigned long )((struct dentry *)0)) { rpc_unlink(pipe->dentry); } else { } return; } } static int __nfs_idmap_register(struct dentry *dir , struct idmap *idmap , struct rpc_pipe *pipe ) { struct dentry *dentry ; long tmp ; long tmp___0 ; { dentry = rpc_mkpipe_dentry(dir, "idmap", (void *)idmap, pipe); tmp___0 = IS_ERR((void const *)dentry); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)dentry); return ((int )tmp); } else { } pipe->dentry = dentry; return (0); } } static void nfs_idmap_unregister(struct nfs_client *clp , struct rpc_pipe *pipe ) { struct net *net ; struct super_block *pipefs_sb ; { net = clp->cl_net; pipefs_sb = rpc_get_sb_net((struct net const *)net); if ((unsigned long )pipefs_sb != (unsigned long )((struct super_block *)0)) { __nfs_idmap_unregister(pipe); rpc_put_sb_net((struct net const *)net); } else { } return; } } static int nfs_idmap_register(struct nfs_client *clp , struct idmap *idmap , struct rpc_pipe *pipe ) { struct net *net ; struct super_block *pipefs_sb ; int err ; { net = clp->cl_net; err = 0; pipefs_sb = rpc_get_sb_net((struct net const *)net); if ((unsigned long )pipefs_sb != (unsigned long )((struct super_block *)0)) { if ((unsigned long )(clp->cl_rpcclient)->cl_dentry != (unsigned long )((struct dentry *)0)) { err = __nfs_idmap_register((clp->cl_rpcclient)->cl_dentry, idmap, pipe); } else { } rpc_put_sb_net((struct net const *)net); } else { } return (err); } } int nfs_idmap_new(struct nfs_client *clp ) { struct idmap *idmap ; struct rpc_pipe *pipe ; int error ; void *tmp ; long tmp___0 ; long tmp___1 ; struct lock_class_key __key ; { tmp = kzalloc(184UL, 208U); idmap = (struct idmap *)tmp; if ((unsigned long )idmap == (unsigned long )((struct idmap *)0)) { return (-12); } else { } pipe = rpc_mkpipe_data(& idmap_upcall_ops, 0); tmp___1 = IS_ERR((void const *)pipe); if (tmp___1 != 0L) { tmp___0 = PTR_ERR((void const *)pipe); error = (int )tmp___0; kfree((void const *)idmap); return (error); } else { } error = nfs_idmap_register(clp, idmap, pipe); if (error != 0) { rpc_destroy_pipe_data(pipe); kfree((void const *)idmap); return (error); } else { } idmap->idmap_pipe = pipe; __mutex_init(& idmap->idmap_mutex, "&idmap->idmap_mutex", & __key); clp->cl_idmap = idmap; return (0); } } void nfs_idmap_delete(struct nfs_client *clp ) { struct idmap *idmap ; { idmap = clp->cl_idmap; if ((unsigned long )idmap == (unsigned long )((struct idmap *)0)) { return; } else { } nfs_idmap_unregister(clp, idmap->idmap_pipe); rpc_destroy_pipe_data(idmap->idmap_pipe); clp->cl_idmap = 0; kfree((void const *)idmap); return; } } static int __rpc_pipefs_event(struct nfs_client *clp , unsigned long event , struct super_block *sb ) { int err ; struct dentry *parent ; int tmp ; { err = 0; switch (event) { case 0UL: err = __nfs_idmap_register((clp->cl_rpcclient)->cl_dentry, clp->cl_idmap, (clp->cl_idmap)->idmap_pipe); goto ldv_49326; case 1UL: ; if ((unsigned long )(clp->cl_idmap)->idmap_pipe != (unsigned long )((struct rpc_pipe *)0)) { parent = (((clp->cl_idmap)->idmap_pipe)->dentry)->d_parent; __nfs_idmap_unregister((clp->cl_idmap)->idmap_pipe); tmp = rpc_rmdir(parent); if (tmp != 0) { printk("\vNFS: %s: failed to remove clnt dir!\n", "__rpc_pipefs_event"); } else { } } else { } goto ldv_49326; default: printk("\vNFS: %s: unknown event: %ld\n", "__rpc_pipefs_event", event); return (-524); } ldv_49326: ; return (err); } } static struct nfs_client *nfs_get_client_for_event(struct net *net , int event ) { struct nfs_net *nn ; void *tmp ; struct dentry *cl_dentry ; struct nfs_client *clp ; int err ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; { tmp = net_generic___2((struct net const *)net, nfs_net_id); nn = (struct nfs_net *)tmp; restart: spin_lock(& nn->nfs_client_lock); __mptr = (struct list_head const *)nn->nfs_client_list.next; clp = (struct nfs_client *)__mptr + 0xffffffffffffff50UL; goto ldv_49346; ldv_49345: ; if (clp->cl_cons_state == 1) { atomic_inc(& clp->cl_count); spin_unlock(& nn->nfs_client_lock); err = nfs_wait_client_init_complete((struct nfs_client const *)clp); nfs_put_client(clp); if (err != 0) { return (0); } else { } goto restart; } else { } if (clp->cl_cons_state < 0) { goto ldv_49344; } else { } __asm__ volatile ("": : : "memory"); if ((unsigned long )clp->rpc_ops != (unsigned long )(& nfs_v4_clientops)) { goto ldv_49344; } else { } cl_dentry = ((clp->cl_idmap)->idmap_pipe)->dentry; if ((event == 0 && (unsigned long )cl_dentry != (unsigned long )((struct dentry *)0)) || (event == 1 && (unsigned long )cl_dentry == (unsigned long )((struct dentry *)0))) { goto ldv_49344; } else { } atomic_inc(& clp->cl_count); spin_unlock(& nn->nfs_client_lock); return (clp); ldv_49344: __mptr___0 = (struct list_head const *)clp->cl_share_link.next; clp = (struct nfs_client *)__mptr___0 + 0xffffffffffffff50UL; ldv_49346: ; if ((unsigned long )(& clp->cl_share_link) != (unsigned long )(& nn->nfs_client_list)) { goto ldv_49345; } else { } spin_unlock(& nn->nfs_client_lock); return (0); } } static int rpc_pipefs_event(struct notifier_block *nb , unsigned long event , void *ptr ) { struct super_block *sb ; struct nfs_client *clp ; int error ; bool tmp ; int tmp___0 ; { sb = (struct super_block *)ptr; error = 0; tmp = try_module_get(& __this_module); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } goto ldv_49358; ldv_49357: error = __rpc_pipefs_event(clp, event, sb); nfs_put_client(clp); if (error != 0) { goto ldv_49356; } else { } ldv_49358: clp = nfs_get_client_for_event((struct net *)sb->s_fs_info, (int )event); if ((unsigned long )clp != (unsigned long )((struct nfs_client *)0)) { goto ldv_49357; } else { } ldv_49356: module_put(& __this_module); return (error); } } static struct notifier_block nfs_idmap_block = {& rpc_pipefs_event, 0, 0}; int nfs_idmap_init(void) { int ret ; { ret = nfs_idmap_init_keyring(); if (ret != 0) { goto out; } else { } ret = rpc_pipefs_notifier_register(& nfs_idmap_block); if (ret != 0) { nfs_idmap_quit_keyring(); } else { } out: ; return (ret); } } void nfs_idmap_quit(void) { { rpc_pipefs_notifier_unregister(& nfs_idmap_block); nfs_idmap_quit_keyring(); return; } } static int nfs_idmap_prepare_message(char *desc , struct idmap *idmap , struct idmap_msg *im , struct rpc_pipe_msg *msg ) { substring_t substr ; int token ; int ret ; size_t tmp ; { im->im_type = 1U; token = match_token(desc, (struct match_token const *)(& nfs_idmap_tokens), & substr); switch (token) { case 0: im->im_type = 0U; case 1: im->im_conv = 1U; tmp = match_strlcpy((char *)(& im->im_name), (substring_t const *)(& substr), 128UL); ret = (int )tmp; goto ldv_49379; case 2: im->im_type = 0U; case 3: im->im_conv = 0U; ret = match_int(& substr, (int *)(& im->im_id)); goto ldv_49379; default: ret = -22; goto out; } ldv_49379: msg->data = (void *)im; msg->len = 140UL; out: ; return (ret); } } static bool nfs_idmap_prepare_pipe_upcall(struct idmap *idmap , struct idmap_legacy_upcalldata *data ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { if ((unsigned long )idmap->idmap_upcall_data != (unsigned long )((struct idmap_legacy_upcalldata *)0)) { __ret_warn_once = 1; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/idmap.c.prepared", 727); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return (0); } else { } idmap->idmap_upcall_data = data; return (1); } } static void nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap , int ret ) { struct key_construction *cons ; { cons = (idmap->idmap_upcall_data)->key_cons; kfree((void const *)idmap->idmap_upcall_data); idmap->idmap_upcall_data = 0; complete_request_key(cons, ret); return; } } static void nfs_idmap_abort_pipe_upcall(struct idmap *idmap , int ret ) { { if ((unsigned long )idmap->idmap_upcall_data != (unsigned long )((struct idmap_legacy_upcalldata *)0)) { nfs_idmap_complete_pipe_upcall_locked(idmap, ret); } else { } return; } } static int nfs_idmap_legacy_upcall(struct key_construction *cons , char const *op , void *aux ) { struct idmap_legacy_upcalldata *data ; struct rpc_pipe_msg *msg ; struct idmap_msg *im ; struct idmap *idmap ; struct key *key ; int ret ; void *tmp ; bool tmp___0 ; int tmp___1 ; { idmap = (struct idmap *)aux; key = cons->key; ret = -12; tmp = kzalloc(208UL, 208U); data = (struct idmap_legacy_upcalldata *)tmp; if ((unsigned long )data == (unsigned long )((struct idmap_legacy_upcalldata *)0)) { goto out1; } else { } msg = & data->pipe_msg; im = & data->idmap_msg; data->idmap = idmap; data->key_cons = cons; ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); if (ret < 0) { goto out2; } else { } ret = -11; tmp___0 = nfs_idmap_prepare_pipe_upcall(idmap, data); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto out2; } else { } ret = rpc_queue_upcall(idmap->idmap_pipe, msg); if (ret < 0) { nfs_idmap_abort_pipe_upcall(idmap, ret); } else { } return (ret); out2: kfree((void const *)data); out1: complete_request_key(cons, ret); return (ret); } } static int nfs_idmap_instantiate(struct key *key , struct key *authkey , char *data ) { size_t tmp ; int tmp___0 ; { tmp = strlen((char const *)data); tmp___0 = key_instantiate_and_link(key, (void const *)data, tmp + 1UL, id_resolver_cache->thread_keyring, authkey); return (tmp___0); } } static int nfs_idmap_read_and_verify_message(struct idmap_msg *im , struct idmap_msg *upcall , struct key *key , struct key *authkey ) { char id_str[11U] ; int ret ; int tmp ; { ret = -126; if ((int )upcall->im_type != (int )im->im_type || (int )upcall->im_conv != (int )im->im_conv) { goto out; } else { } switch ((int )im->im_conv) { case 1: tmp = strcmp((char const *)(& upcall->im_name), (char const *)(& im->im_name)); if (tmp != 0) { goto ldv_49430; } else { } sprintf((char *)(& id_str), "%d", im->im_id); ret = nfs_idmap_instantiate(key, authkey, (char *)(& id_str)); goto ldv_49430; case 0: ; if (upcall->im_id != im->im_id) { goto ldv_49430; } else { } ret = nfs_idmap_instantiate(key, authkey, (char *)(& im->im_name)); goto ldv_49430; default: ret = -22; } ldv_49430: ; out: ; return (ret); } } static ssize_t idmap_pipe_downcall(struct file *filp , char const *src , size_t mlen ) { struct rpc_inode *rpci ; struct rpc_inode *tmp ; struct idmap *idmap ; struct key_construction *cons ; struct idmap_msg im ; size_t namelen_in ; int ret ; unsigned long tmp___0 ; { tmp = RPC_I((filp->f_path.dentry)->d_inode); rpci = tmp; idmap = (struct idmap *)rpci->private; ret = -126; if ((unsigned long )idmap->idmap_upcall_data == (unsigned long )((struct idmap_legacy_upcalldata *)0)) { goto out_noupcall; } else { } cons = (idmap->idmap_upcall_data)->key_cons; if (mlen != 140UL) { ret = -28; goto out; } else { } tmp___0 = copy_from_user((void *)(& im), (void const *)src, mlen); if (tmp___0 != 0UL) { ret = -14; goto out; } else { } if (((int )im.im_status & 8) == 0) { ret = -126; goto out; } else { } namelen_in = strnlen((char const *)(& im.im_name), 128UL); if (namelen_in == 0UL || namelen_in == 128UL) { ret = -22; goto out; } else { } ret = nfs_idmap_read_and_verify_message(& im, & (idmap->idmap_upcall_data)->idmap_msg, cons->key, cons->authkey); if (ret >= 0) { key_set_timeout(cons->key, nfs_idmap_cache_timeout); ret = (int )mlen; } else { } out: nfs_idmap_complete_pipe_upcall_locked(idmap, ret); out_noupcall: ; return ((ssize_t )ret); } } static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg ) { struct idmap_legacy_upcalldata *data ; struct rpc_pipe_msg const *__mptr ; struct idmap *idmap ; { __mptr = (struct rpc_pipe_msg const *)msg; data = (struct idmap_legacy_upcalldata *)__mptr; idmap = data->idmap; if (msg->errno != 0) { nfs_idmap_abort_pipe_upcall(idmap, msg->errno); } else { } return; } } static void idmap_release_pipe(struct inode *inode ) { struct rpc_inode *rpci ; struct rpc_inode *tmp ; struct idmap *idmap ; { tmp = RPC_I(inode); rpci = tmp; idmap = (struct idmap *)rpci->private; nfs_idmap_abort_pipe_upcall(idmap, -32); return; } } int nfs_map_name_to_uid(struct nfs_server const *server , char const *name , size_t namelen , __u32 *uid ) { struct idmap *idmap ; int tmp ; int tmp___0 ; { idmap = (server->nfs_client)->cl_idmap; tmp = nfs_map_string_to_numeric(name, namelen, uid); if (tmp != 0) { return (0); } else { } tmp___0 = nfs_idmap_lookup_id(name, namelen, "uid", uid, idmap); return (tmp___0); } } int nfs_map_group_to_gid(struct nfs_server const *server , char const *name , size_t namelen , __u32 *gid ) { struct idmap *idmap ; int tmp ; int tmp___0 ; { idmap = (server->nfs_client)->cl_idmap; tmp = nfs_map_string_to_numeric(name, namelen, gid); if (tmp != 0) { return (0); } else { } tmp___0 = nfs_idmap_lookup_id(name, namelen, "gid", gid, idmap); return (tmp___0); } } int nfs_map_uid_to_name(struct nfs_server const *server , __u32 uid , char *buf , size_t buflen ) { struct idmap *idmap ; int ret ; ssize_t tmp ; { idmap = (server->nfs_client)->cl_idmap; ret = -22; if (((unsigned int )server->caps & 32768U) == 0U) { tmp = nfs_idmap_lookup_name(uid, "user", buf, buflen, idmap); ret = (int )tmp; } else { } if (ret < 0) { ret = nfs_map_numeric_to_string(uid, buf, buflen); } else { } return (ret); } } int nfs_map_gid_to_group(struct nfs_server const *server , __u32 gid , char *buf , size_t buflen ) { struct idmap *idmap ; int ret ; ssize_t tmp ; { idmap = (server->nfs_client)->cl_idmap; ret = -22; if (((unsigned int )server->caps & 32768U) == 0U) { tmp = nfs_idmap_lookup_name(gid, "group", buf, buflen, idmap); ret = (int )tmp; } else { } if (ret < 0) { ret = nfs_map_numeric_to_string(gid, buf, buflen); } else { } return (ret); } } void ldv_main7_sequence_infinite_withcheck_stateful(void) { struct file *var_group1 ; char const *var_idmap_pipe_downcall_34_p1 ; size_t var_idmap_pipe_downcall_34_p2 ; struct inode *var_group2 ; struct rpc_pipe_msg *var_group3 ; struct key_construction *var_nfs_idmap_legacy_upcall_31_p0 ; char const *var_nfs_idmap_legacy_upcall_31_p1 ; void *var_nfs_idmap_legacy_upcall_31_p2 ; struct notifier_block *var_group4 ; unsigned long var_rpc_pipefs_event_24_p1 ; void *var_rpc_pipefs_event_24_p2 ; int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_49523; ldv_49522: tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_handler_precall(); idmap_pipe_downcall(var_group1, var_idmap_pipe_downcall_34_p1, var_idmap_pipe_downcall_34_p2); goto ldv_49516; case 1: ldv_handler_precall(); idmap_release_pipe(var_group2); goto ldv_49516; case 2: ldv_handler_precall(); idmap_pipe_destroy_msg(var_group3); goto ldv_49516; case 3: ldv_handler_precall(); nfs_idmap_legacy_upcall(var_nfs_idmap_legacy_upcall_31_p0, var_nfs_idmap_legacy_upcall_31_p1, var_nfs_idmap_legacy_upcall_31_p2); goto ldv_49516; case 4: ldv_handler_precall(); rpc_pipefs_event(var_group4, var_rpc_pipefs_event_24_p1, var_rpc_pipefs_event_24_p2); goto ldv_49516; default: ; goto ldv_49516; } ldv_49516: ; ldv_49523: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_49522; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_107(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_108(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_109(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_110(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_111(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_112(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_113(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_114(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_idmap_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_115(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_idmap_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_128(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_126(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_129(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_131(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_133(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_135(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_125(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_127(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_130(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_132(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_134(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_nfs_callback_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_nfs_callback_mutex(struct mutex *lock ) ; __inline static struct thread_info *current_thread_info___4(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6237; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6237; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6237; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6237; default: __bad_percpu_size(); } ldv_6237: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } extern void _raw_spin_lock_bh(raw_spinlock_t * ) ; extern void _raw_spin_unlock_bh(raw_spinlock_t * ) ; __inline static void spin_lock_bh(spinlock_t *lock ) { { _raw_spin_lock_bh(& lock->ldv_5961.rlock); return; } } __inline static void spin_unlock_bh(spinlock_t *lock ) { { _raw_spin_unlock_bh(& lock->ldv_5961.rlock); return; } } extern void __init_waitqueue_head(wait_queue_head_t * , char const * , struct lock_class_key * ) ; extern void prepare_to_wait(wait_queue_head_t * , wait_queue_t * , int ) ; extern void finish_wait(wait_queue_head_t * , wait_queue_t * ) ; extern int autoremove_wake_function(wait_queue_t * , unsigned int , int , void * ) ; __inline static void __rcu_read_lock___4(void) { struct thread_info *tmp ; { tmp = current_thread_info___4(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___4(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___4(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___4(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___4(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 763, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___4(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 784, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___4(); return; } } __inline static void svc_get(struct svc_serv *serv ) { { serv->sv_nrthreads = serv->sv_nrthreads + 1U; return; } } extern void svc_rpcb_cleanup(struct svc_serv * , struct net * ) ; extern int svc_bind(struct svc_serv * , struct net * ) ; extern struct svc_serv *svc_create(struct svc_program * , unsigned int , void (*)(struct svc_serv * , struct net * ) ) ; extern struct svc_rqst *svc_prepare_thread(struct svc_serv * , struct svc_pool * , int ) ; extern void svc_exit_thread(struct svc_rqst * ) ; extern void svc_destroy(struct svc_serv * ) ; extern void svc_shutdown_net(struct svc_serv * , struct net * ) ; extern int svc_process(struct svc_rqst * ) ; extern int bc_svc_process(struct svc_serv * , struct rpc_rqst * , struct svc_rqst * ) ; extern int svc_create_xprt(struct svc_serv * , char const * , struct net * , int const , unsigned short const , int ) ; extern void svc_xprt_put(struct svc_xprt * ) ; extern int svc_recv(struct svc_rqst * , long ) ; extern void svc_sock_update_bufs(struct svc_serv * ) ; extern void schedule(void) ; extern bool set_freezable(void) ; extern int kthread_stop(struct task_struct * ) ; extern bool kthread_should_stop(void) ; __inline static int svc_is_backchannel(struct svc_rqst const *rqstp ) { { if ((unsigned long )(rqstp->rq_server)->sv_bc_xprt != (unsigned long )((struct svc_xprt *)0)) { return (1); } else { } return (0); } } struct svc_version nfs4_callback_version1 ; struct svc_version nfs4_callback_version4 ; int check_gss_callback_principal(struct nfs_client *clp , struct svc_rqst *rqstp ) ; int nfs_callback_up(u32 minorversion , struct rpc_xprt *xprt ) ; void nfs_callback_down(int minorversion , struct net *net ) ; extern unsigned int nfs_callback_set_tcpport ; __inline static void *net_generic___3(struct net const *net , int id ) { struct net_generic *ng ; void *ptr ; struct net_generic *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { rcu_read_lock___4(); _________p1 = *((struct net_generic * const volatile *)(& net->gen)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("include/net/netns/generic.h", 40, "suspicious rcu_dereference_check() usage"); } else { } } else { } ng = _________p1; tmp___1 = ldv__builtin_expect(id == 0, 0L); if (tmp___1 != 0L) { goto _L; } else { tmp___2 = ldv__builtin_expect((unsigned int )id > ng->len, 0L); if (tmp___2 != 0L) { _L: /* CIL Label */ __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (41), "i" (12UL)); ldv_49930: ; goto ldv_49930; } else { } } ptr = ng->ptr[id + -1]; rcu_read_unlock___4(); tmp___3 = ldv__builtin_expect((unsigned long )ptr == (unsigned long )((void *)0), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (45), "i" (12UL)); ldv_49931: ; goto ldv_49931; } else { } return (ptr); } } static struct nfs_callback_data nfs_callback_info[2U] ; static struct mutex nfs_callback_mutex = {{1}, {{{{{0U}}, 3735899821U, 4294967295U, 0xffffffffffffffffUL, {0, {0, 0}, "nfs_callback_mutex.wait_lock", 0, 0UL}}}}, {& nfs_callback_mutex.wait_list, & nfs_callback_mutex.wait_list}, 0, 0, (void *)(& nfs_callback_mutex), {0, {0, 0}, "nfs_callback_mutex", 0, 0UL}}; static struct svc_program nfs4_callback_program ; static int nfs4_callback_up_net(struct svc_serv *serv , struct net *net ) { int ret ; struct nfs_net *nn ; void *tmp ; long tmp___0 ; long tmp___1 ; { tmp = net_generic___3((struct net const *)net, nfs_net_id); nn = (struct nfs_net *)tmp; ret = svc_create_xprt(serv, "tcp", net, 2, (int )((unsigned short const )nfs_callback_set_tcpport), 1); if (ret <= 0) { goto out_err; } else { } nn->nfs_callback_tcpport = (unsigned short )ret; tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS: Callback listener port = %u (af %u, net %p)\n", (int )nn->nfs_callback_tcpport, 2, net); } else { } ret = svc_create_xprt(serv, "tcp", net, 10, (int )((unsigned short const )nfs_callback_set_tcpport), 1); if (ret > 0) { nn->nfs_callback_tcpport6 = (unsigned short )ret; tmp___1 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS: Callback listener port = %u (af %u, net %p)\n", (int )nn->nfs_callback_tcpport6, 10, net); } else { } } else if (ret != -97) { goto out_err; } else { } return (0); out_err: ; return (ret != 0 ? ret : -12); } } static int nfs4_callback_svc(void *vrqstp ) { int err ; struct svc_rqst *rqstp ; bool tmp ; int tmp___0 ; { rqstp = (struct svc_rqst *)vrqstp; set_freezable(); goto ldv_49972; ldv_49973: err = svc_recv(rqstp, 9223372036854775807L); if (err == -11 || err == -4) { goto ldv_49972; } else { } svc_process(rqstp); ldv_49972: tmp = kthread_should_stop(); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_49973; } else { } return (0); } } static struct svc_rqst *nfs4_callback_up(struct svc_serv *serv ) { struct svc_rqst *tmp ; { tmp = svc_prepare_thread(serv, serv->sv_pools, -1); return (tmp); } } static int nfs41_callback_up_net(struct svc_serv *serv , struct net *net ) { int tmp ; { tmp = svc_create_xprt(serv, "tcp-bc", net, 2, 0, 1); return (tmp); } } static int nfs41_callback_svc(void *vrqstp ) { struct svc_rqst *rqstp ; struct svc_serv *serv ; struct rpc_rqst *req ; int error ; wait_queue_t wq ; struct task_struct *tmp ; struct list_head const *__mptr ; long tmp___0 ; long tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; { rqstp = (struct svc_rqst *)vrqstp; serv = rqstp->rq_server; tmp = get_current(); wq.flags = 0U; wq.private = (void *)tmp; wq.func = & autoremove_wake_function; wq.task_list.next = & wq.task_list; wq.task_list.prev = & wq.task_list; set_freezable(); goto ldv_49993; ldv_49992: prepare_to_wait(& serv->sv_cb_waitq, & wq, 1); spin_lock_bh(& serv->sv_cb_lock); tmp___2 = list_empty((struct list_head const *)(& serv->sv_cb_list)); if (tmp___2 == 0) { __mptr = (struct list_head const *)serv->sv_cb_list.next; req = (struct rpc_rqst *)__mptr + 0xfffffffffffffe98UL; list_del(& req->rq_bc_list); spin_unlock_bh(& serv->sv_cb_lock); tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dInvoking bc_svc_process()\n"); } else { } error = bc_svc_process(serv, req, rqstp); tmp___1 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dbc_svc_process() returned w/ error code= %d\n", error); } else { } } else { spin_unlock_bh(& serv->sv_cb_lock); schedule(); } finish_wait(& serv->sv_cb_waitq, & wq); ldv_49993: tmp___3 = kthread_should_stop(); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { goto ldv_49992; } else { } return (0); } } static struct svc_rqst *nfs41_callback_up(struct svc_serv *serv ) { struct svc_rqst *rqstp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; long tmp ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; { INIT_LIST_HEAD(& serv->sv_cb_list); spinlock_check(& serv->sv_cb_lock); __raw_spin_lock_init(& serv->sv_cb_lock.ldv_5961.rlock, "&(&serv->sv_cb_lock)->rlock", & __key); __init_waitqueue_head(& serv->sv_cb_waitq, "&serv->sv_cb_waitq", & __key___0); rqstp = svc_prepare_thread(serv, serv->sv_pools, -1); tmp = IS_ERR((void const *)rqstp); if (tmp != 0L) { svc_xprt_put(serv->sv_bc_xprt); serv->sv_bc_xprt = 0; } else { } tmp___4 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___4 != 0L) { tmp___3 = IS_ERR((void const *)rqstp); if (tmp___3 != 0L) { tmp___1 = PTR_ERR((void const *)rqstp); tmp___2 = tmp___1; } else { tmp___2 = 0L; } printk("\001d--> %s return %ld\n", "nfs41_callback_up", tmp___2); } else { } return (rqstp); } } static void nfs_minorversion_callback_svc_setup(struct svc_serv *serv , struct svc_rqst **rqstpp , int (**callback_svc)(void * ) ) { { *rqstpp = nfs41_callback_up(serv); *callback_svc = & nfs41_callback_svc; return; } } __inline static void nfs_callback_bc_serv(u32 minorversion , struct rpc_xprt *xprt , struct svc_serv *serv ) { { if (minorversion != 0U) { xprt->bc_serv = serv; } else { } return; } } static int nfs_callback_start_svc(int minorversion , struct rpc_xprt *xprt , struct svc_serv *serv ) { struct svc_rqst *rqstp ; int (*callback_svc)(void * ) ; struct nfs_callback_data *cb_info ; char svc_name[12U] ; int ret ; long tmp ; long tmp___0 ; struct task_struct *__k ; struct task_struct *tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; { cb_info = (struct nfs_callback_data *)(& nfs_callback_info) + (unsigned long )minorversion; nfs_callback_bc_serv((u32 )minorversion, xprt, serv); if ((unsigned long )cb_info->task != (unsigned long )((struct task_struct *)0)) { return (0); } else { } switch (minorversion) { case 0: rqstp = nfs4_callback_up(serv); callback_svc = & nfs4_callback_svc; goto ldv_50025; default: nfs_minorversion_callback_svc_setup(serv, & rqstp, & callback_svc); } ldv_50025: tmp___0 = IS_ERR((void const *)rqstp); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)rqstp); return ((int )tmp); } else { } svc_sock_update_bufs(serv); sprintf((char *)(& svc_name), "nfsv4.%u-svc", minorversion); cb_info->serv = serv; cb_info->rqst = rqstp; tmp___1 = kthread_create_on_node(callback_svc, (void *)cb_info->rqst, -1, (char const *)(& svc_name)); __k = tmp___1; tmp___2 = IS_ERR((void const *)__k); if (tmp___2 == 0L) { wake_up_process(__k); } else { } cb_info->task = __k; tmp___4 = IS_ERR((void const *)cb_info->task); if (tmp___4 != 0L) { tmp___3 = PTR_ERR((void const *)cb_info->task); ret = (int )tmp___3; svc_exit_thread(cb_info->rqst); cb_info->rqst = 0; cb_info->task = 0; return (ret); } else { } tmp___5 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001dnfs_callback_up: service started\n"); } else { } return (0); } } static void nfs_callback_down_net(u32 minorversion , struct svc_serv *serv , struct net *net ) { struct nfs_net *nn ; void *tmp ; long tmp___0 ; { tmp = net_generic___3((struct net const *)net, nfs_net_id); nn = (struct nfs_net *)tmp; nn->cb_users[minorversion] = nn->cb_users[minorversion] - 1; if (nn->cb_users[minorversion] != 0) { return; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS: destroy per-net callback data; net=%p\n", net); } else { } svc_shutdown_net(serv, net); return; } } static int nfs_callback_up_net(int minorversion , struct svc_serv *serv , struct net *net ) { struct nfs_net *nn ; void *tmp ; int ret ; int tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = net_generic___3((struct net const *)net, nfs_net_id); nn = (struct nfs_net *)tmp; tmp___0 = nn->cb_users[minorversion]; nn->cb_users[minorversion] = nn->cb_users[minorversion] + 1; if (tmp___0 != 0) { return (0); } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS: create per-net callback data; net=%p\n", net); } else { } ret = svc_bind(serv, net); if (ret < 0) { printk("\fNFS: bind callback service failed\n"); goto err_bind; } else { } switch (minorversion) { case 0: ret = nfs4_callback_up_net(serv, net); goto ldv_50044; case 1: ret = nfs41_callback_up_net(serv, net); goto ldv_50044; default: printk("\vNFS: unknown callback version: %d\n", minorversion); ret = -22; goto ldv_50044; } ldv_50044: ; if (ret < 0) { printk("\vNFS: callback service start failed\n"); goto err_socks; } else { } return (0); err_socks: svc_rpcb_cleanup(serv, net); err_bind: tmp___2 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001dNFS: Couldn\'t create callback socket: err = %d; net = %p\n", ret, net); } else { } return (ret); } } static struct svc_serv *nfs_callback_create_svc(int minorversion ) { struct nfs_callback_data *cb_info ; struct svc_serv *serv ; void *tmp ; long tmp___0 ; { cb_info = (struct nfs_callback_data *)(& nfs_callback_info) + (unsigned long )minorversion; if ((unsigned long )cb_info->task != (unsigned long )((struct task_struct *)0)) { svc_get(cb_info->serv); return (cb_info->serv); } else { } if (cb_info->users != 0U) { printk("\fnfs_callback_create_svc: no kthread, %d users??\n", cb_info->users); } else { } serv = svc_create(& nfs4_callback_program, 3072U, 0); if ((unsigned long )serv == (unsigned long )((struct svc_serv *)0)) { printk("\vnfs_callback_create_svc: create service failed\n"); tmp = ERR_PTR(-12L); return ((struct svc_serv *)tmp); } else { } serv->sv_maxconn = 1024U; tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dnfs_callback_create_svc: service created\n"); } else { } return (serv); } } int nfs_callback_up(u32 minorversion , struct rpc_xprt *xprt ) { struct svc_serv *serv ; struct nfs_callback_data *cb_info ; int ret ; struct net *net ; long tmp ; long tmp___0 ; long tmp___1 ; { cb_info = (struct nfs_callback_data *)(& nfs_callback_info) + (unsigned long )minorversion; net = xprt->xprt_net; ldv_mutex_lock_132(& nfs_callback_mutex); serv = nfs_callback_create_svc((int )minorversion); tmp___0 = IS_ERR((void const *)serv); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)serv); ret = (int )tmp; goto err_create; } else { } ret = nfs_callback_up_net((int )minorversion, serv, net); if (ret < 0) { goto err_net; } else { } ret = nfs_callback_start_svc((int )minorversion, xprt, serv); if (ret < 0) { goto err_start; } else { } cb_info->users = cb_info->users + 1U; err_net: svc_destroy(serv); err_create: ldv_mutex_unlock_133(& nfs_callback_mutex); return (ret); err_start: nfs_callback_down_net(minorversion, serv, net); tmp___1 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS: Couldn\'t create server thread; err = %d\n", ret); } else { } goto err_net; } } void nfs_callback_down(int minorversion , struct net *net ) { struct nfs_callback_data *cb_info ; long tmp ; long tmp___0 ; { cb_info = (struct nfs_callback_data *)(& nfs_callback_info) + (unsigned long )minorversion; ldv_mutex_lock_134(& nfs_callback_mutex); nfs_callback_down_net((u32 )minorversion, cb_info->serv, net); cb_info->users = cb_info->users - 1U; if (cb_info->users == 0U && (unsigned long )cb_info->task != (unsigned long )((struct task_struct *)0)) { kthread_stop(cb_info->task); tmp = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp != 0L) { printk("\001dnfs_callback_down: service stopped\n"); } else { } svc_exit_thread(cb_info->rqst); tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dnfs_callback_down: service destroyed\n"); } else { } cb_info->serv = 0; cb_info->rqst = 0; cb_info->task = 0; } else { } ldv_mutex_unlock_135(& nfs_callback_mutex); return; } } int check_gss_callback_principal(struct nfs_client *clp , struct svc_rqst *rqstp ) { char *p ; int tmp ; int tmp___0 ; { p = rqstp->rq_cred.cr_principal; if ((rqstp->rq_authop)->flavour != 6) { return (1); } else { } if (clp->cl_minorversion != 0U) { return (0); } else { } if ((unsigned long )p == (unsigned long )((char *)0)) { return (0); } else { } tmp = memcmp((void const *)p, (void const *)"nfs@", 4UL); if (tmp != 0) { return (0); } else { } p = p + 4UL; tmp___0 = strcmp((char const *)p, (char const *)clp->cl_hostname); if (tmp___0 != 0) { return (0); } else { } return (1); } } static int nfs_callback_authenticate(struct svc_rqst *rqstp ) { int tmp ; { switch ((rqstp->rq_authop)->flavour) { case 0: ; if (rqstp->rq_proc != 0U) { return (6); } else { } goto ldv_50078; case 6: tmp = svc_is_backchannel((struct svc_rqst const *)rqstp); if (tmp != 0) { return (6); } else { } } ldv_50078: ; return (5); } } static struct svc_version *nfs4_callback_version[5U] = { 0, & nfs4_callback_version1, 0, 0, & nfs4_callback_version4}; static struct svc_stat nfs4_callback_stats ; static struct svc_program nfs4_callback_program = {0, 1073741824U, 0U, 0U, 5U, (struct svc_version **)(& nfs4_callback_version), (char *)"NFSv4 callback", (char *)"nfs", & nfs4_callback_stats, & nfs_callback_authenticate}; void ldv_main8_sequence_infinite_withcheck_stateful(void) { struct svc_rqst *var_group1 ; int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_50106; ldv_50105: tmp = __VERIFIER_nondet_int(); switch (tmp) { case 0: ldv_handler_precall(); nfs_callback_authenticate(var_group1); goto ldv_50103; default: ; goto ldv_50103; } ldv_50103: ; ldv_50106: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_50105; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_125(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_126(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_127(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_128(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_129(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_130(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_131(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_132(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_nfs_callback_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_133(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_nfs_callback_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_134(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_nfs_callback_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_135(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_nfs_callback_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_150(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_148(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_151(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_153(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_147(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_149(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_152(struct mutex *ldv_func_arg1 ) ; extern void xdr_init_encode(struct xdr_stream * , struct xdr_buf * , __be32 * ) ; extern void xdr_init_decode(struct xdr_stream * , struct xdr_buf * , __be32 * ) ; __inline static struct sockaddr *svc_addr(struct svc_rqst const *rqst ) { { return ((struct sockaddr *)(& rqst->rq_addr)); } } __inline static int xdr_argsize_check(struct svc_rqst *rqstp , __be32 *p ) { char *cp ; struct kvec *vec ; { cp = (char *)p; vec = (struct kvec *)(& rqstp->rq_arg.head); return ((unsigned long )((char *)vec->iov_base) <= (unsigned long )cp && (unsigned long )((char *)vec->iov_base + vec->iov_len) >= (unsigned long )cp); } } __inline static int xdr_ressize_check(struct svc_rqst *rqstp , __be32 *p ) { struct kvec *vec ; char *cp ; { vec = (struct kvec *)(& rqstp->rq_res.head); cp = (char *)p; vec->iov_len = (size_t )((long )cp - (long )vec->iov_base); return (vec->iov_len <= 4096UL); } } __be32 nfs4_callback_sequence(struct cb_sequenceargs *args , struct cb_sequenceres *res , struct cb_process_state *cps ) ; __be32 nfs4_callback_recallany(struct cb_recallanyargs *args , void *dummy , struct cb_process_state *cps ) ; __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args , void *dummy , struct cb_process_state *cps ) ; __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args , void *dummy , struct cb_process_state *cps ) ; __be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args , void *dummy , struct cb_process_state *cps ) ; __be32 nfs4_callback_getattr(struct cb_getattrargs *args , struct cb_getattrres *res , struct cb_process_state *cps ) ; __be32 nfs4_callback_recall(struct cb_recallargs *args , void *dummy , struct cb_process_state *cps ) ; struct nfs_client *nfs4_find_client_ident(struct net *net , int cb_ident ) ; static struct callback_op callback_ops[15U] ; static __be32 nfs4_callback_null(struct svc_rqst *rqstp , void *argp , void *resp ) { { return (0U); } } static int nfs4_decode_void(struct svc_rqst *rqstp , __be32 *p , void *dummy ) { int tmp ; { tmp = xdr_argsize_check(rqstp, p); return (tmp); } } static int nfs4_encode_void(struct svc_rqst *rqstp , __be32 *p , void *dummy ) { int tmp ; { tmp = xdr_ressize_check(rqstp, p); return (tmp); } } static __be32 *read_buf(struct xdr_stream *xdr , int nbytes ) { __be32 *p ; long tmp ; { p = xdr_inline_decode(xdr, (size_t )nbytes); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { printk("\fNFS: NFSv4 callback reply buffer overflowed!\n"); } else { } return (p); } } static __be32 decode_string(struct xdr_stream *xdr , unsigned int *len , char const **str ) { __be32 *p ; long tmp ; __u32 tmp___0 ; long tmp___1 ; { p = read_buf(xdr, 4); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } tmp___0 = __fswab32(*p); *len = tmp___0; if (*len != 0U) { p = read_buf(xdr, (int )*len); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { return (572981248U); } else { } *str = (char const *)p; } else { *str = 0; } return (0U); } } static __be32 decode_fh(struct xdr_stream *xdr , struct nfs_fh *fh ) { __be32 *p ; long tmp ; __u32 tmp___0 ; long tmp___1 ; size_t __len ; void *__ret ; { p = read_buf(xdr, 4); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } tmp___0 = __fswab32(*p); fh->size = (unsigned short )tmp___0; if ((unsigned int )fh->size > 128U) { return (287768576U); } else { } p = read_buf(xdr, (int )fh->size); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { return (572981248U); } else { } __len = (size_t )fh->size; __ret = memcpy((void *)(& fh->data), (void const *)p, __len); memset((void *)(& fh->data) + (unsigned long )fh->size, 0, 128UL - (unsigned long )fh->size); return (0U); } } static __be32 decode_bitmap(struct xdr_stream *xdr , uint32_t *bitmap ) { __be32 *p ; unsigned int attrlen ; long tmp ; __u32 tmp___0 ; long tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; long tmp___4 ; __u32 tmp___5 ; { p = read_buf(xdr, 4); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } tmp___0 = __fswab32(*p); attrlen = tmp___0; p = read_buf(xdr, (int )(attrlen << 2)); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { return (572981248U); } else { } tmp___4 = ldv__builtin_expect(attrlen != 0U, 1L); if (tmp___4 != 0L) { tmp___2 = p; p = p + 1; tmp___3 = __fswab32(*tmp___2); *bitmap = tmp___3; } else { } if (attrlen > 1U) { tmp___5 = __fswab32(*p); *(bitmap + 1UL) = tmp___5; } else { } return (0U); } } static __be32 decode_stateid___0(struct xdr_stream *xdr , nfs4_stateid *stateid ) { __be32 *p ; long tmp ; size_t __len ; void *__ret ; { p = read_buf(xdr, 16); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } __len = 16UL; if (__len > 63UL) { __ret = memcpy((void *)stateid, (void const *)p, __len); } else { __ret = memcpy((void *)stateid, (void const *)p, __len); } return (0U); } } static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr , struct cb_compound_hdr_arg *hdr ) { __be32 *p ; __be32 status ; long tmp ; long tmp___0 ; __be32 *tmp___1 ; __u32 tmp___2 ; __be32 *tmp___3 ; __u32 tmp___4 ; struct ratelimit_state _rs ; int tmp___5 ; __u32 tmp___6 ; long tmp___7 ; { status = decode_string(xdr, & hdr->taglen, & hdr->tag); tmp = ldv__builtin_expect(status != 0U, 0L); if (tmp != 0L) { return (status); } else { } if (hdr->taglen > 500U) { printk("NFS: NFSv4 CALLBACK %s: client sent tag of length %u\n", "decode_compound_hdr_arg", hdr->taglen); return (572981248U); } else { } p = read_buf(xdr, 12); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { return (572981248U); } else { } tmp___1 = p; p = p + 1; tmp___2 = __fswab32(*tmp___1); hdr->minorversion = tmp___2; if (hdr->minorversion <= 1U) { tmp___3 = p; p = p + 1; tmp___4 = __fswab32(*tmp___3); hdr->cb_ident = tmp___4; } else { _rs.lock.raw_lock.ldv_2024.head_tail = 0U; _rs.lock.magic = 3735899821U; _rs.lock.owner_cpu = 4294967295U; _rs.lock.owner = 0xffffffffffffffffUL; _rs.lock.dep_map.key = 0; _rs.lock.dep_map.class_cache[0] = 0; _rs.lock.dep_map.class_cache[1] = 0; _rs.lock.dep_map.name = "_rs.lock"; _rs.lock.dep_map.cpu = 0; _rs.lock.dep_map.ip = 0UL; _rs.interval = 1250; _rs.burst = 10; _rs.printed = 0; _rs.missed = 0; _rs.begin = 0UL; tmp___5 = ___ratelimit(& _rs, "decode_compound_hdr_arg"); if (tmp___5 != 0) { printk("\fNFS: %s: NFSv4 server callback with illegal minor version %u!\n", "decode_compound_hdr_arg", hdr->minorversion); } else { } return (623312896U); } tmp___6 = __fswab32(*p); hdr->nops = tmp___6; tmp___7 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___7 != 0L) { printk("\001d%s: minorversion %d nops %d\n", "decode_compound_hdr_arg", hdr->minorversion, hdr->nops); } else { } return (0U); } } static __be32 decode_op_hdr___0(struct xdr_stream *xdr , unsigned int *op ) { __be32 *p ; long tmp ; __u32 tmp___0 ; { p = read_buf(xdr, 4); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (707461120U); } else { } tmp___0 = __fswab32(*p); *op = tmp___0; return (0U); } } static __be32 decode_getattr_args(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_getattrargs *args ) { __be32 status ; long tmp ; __u32 tmp___0 ; long tmp___1 ; { status = decode_fh(xdr, & args->fh); tmp = ldv__builtin_expect(status != 0U, 0L); if (tmp != 0L) { goto out; } else { } args->addr = svc_addr((struct svc_rqst const *)rqstp); status = decode_bitmap(xdr, (uint32_t *)(& args->bitmap)); out: tmp___1 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___1 != 0L) { tmp___0 = __fswab32(status); printk("\001d%s: exit with status = %d\n", "decode_getattr_args", tmp___0); } else { } return (status); } } static __be32 decode_recall_args(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_recallargs *args ) { __be32 *p ; __be32 status ; long tmp ; long tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; long tmp___3 ; { args->addr = svc_addr((struct svc_rqst const *)rqstp); status = decode_stateid___0(xdr, & args->stateid); tmp = ldv__builtin_expect(status != 0U, 0L); if (tmp != 0L) { goto out; } else { } p = read_buf(xdr, 4); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { status = 572981248U; goto out; } else { } tmp___1 = __fswab32(*p); args->truncate = tmp___1; status = decode_fh(xdr, & args->fh); out: tmp___3 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___3 != 0L) { tmp___2 = __fswab32(status); printk("\001d%s: exit with status = %d\n", "decode_recall_args", tmp___2); } else { } return (status); } } static __be32 decode_layoutrecall_args(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_layoutrecallargs *args ) { __be32 *p ; __be32 status ; uint32_t iomode ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; __be32 *tmp___4 ; __u32 tmp___5 ; __be32 *tmp___6 ; __u32 tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; long tmp___11 ; long tmp___12 ; __u32 tmp___13 ; long tmp___14 ; { status = 0U; args->cbl_addr = svc_addr((struct svc_rqst const *)rqstp); p = read_buf(xdr, 16); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { status = 874971136U; goto out; } else { } tmp___0 = p; p = p + 1; tmp___1 = __fswab32(*tmp___0); args->cbl_layout_type = tmp___1; tmp___2 = p; p = p + 1; tmp___3 = __fswab32(*tmp___2); iomode = tmp___3; tmp___4 = p; p = p + 1; tmp___5 = __fswab32(*tmp___4); args->cbl_layoutchanged = tmp___5; tmp___6 = p; p = p + 1; tmp___7 = __fswab32(*tmp___6); args->cbl_recall_type = tmp___7; if (args->cbl_recall_type == 1U) { args->ldv_48220.ldv_48218.cbl_range.iomode = iomode; status = decode_fh(xdr, & args->ldv_48220.ldv_48218.cbl_fh); tmp___8 = ldv__builtin_expect(status != 0U, 0L); if (tmp___8 != 0L) { goto out; } else { } p = read_buf(xdr, 16); tmp___9 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___9 != 0L) { status = 874971136U; goto out; } else { } p = xdr_decode_hyper(p, & args->ldv_48220.ldv_48218.cbl_range.offset); p = xdr_decode_hyper(p, & args->ldv_48220.ldv_48218.cbl_range.length); status = decode_stateid___0(xdr, & args->ldv_48220.ldv_48218.cbl_stateid); tmp___10 = ldv__builtin_expect(status != 0U, 0L); if (tmp___10 != 0L) { goto out; } else { } } else if (args->cbl_recall_type == 2U) { p = read_buf(xdr, 16); tmp___11 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___11 != 0L) { status = 874971136U; goto out; } else { } p = xdr_decode_hyper(p, & args->ldv_48220.cbl_fsid.major); p = xdr_decode_hyper(p, & args->ldv_48220.cbl_fsid.minor); } else if (args->cbl_recall_type != 3U) { status = 874971136U; goto out; } else { } tmp___12 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___12 != 0L) { printk("\001d%s: ltype 0x%x iomode %d changed %d recall_type %d\n", "decode_layoutrecall_args", args->cbl_layout_type, iomode, args->cbl_layoutchanged, args->cbl_recall_type); } else { } out: tmp___14 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___14 != 0L) { tmp___13 = __fswab32(status); printk("\001d%s: exit with status = %d\n", "decode_layoutrecall_args", tmp___13); } else { } return (status); } } static __be32 decode_devicenotify_args(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_devicenotifyargs *args ) { __be32 *p ; __be32 status ; u32 tmp ; int n ; int i ; long tmp___0 ; __be32 *tmp___1 ; __u32 tmp___2 ; void *tmp___3 ; struct cb_devicenotifyitem *dev ; long tmp___4 ; __be32 *tmp___5 ; __u32 tmp___6 ; __be32 *tmp___7 ; __u32 tmp___8 ; __be32 *tmp___9 ; __u32 tmp___10 ; __be32 *tmp___11 ; __u32 tmp___12 ; size_t __len ; void *__ret ; long tmp___13 ; __be32 *tmp___14 ; __u32 tmp___15 ; long tmp___16 ; __u32 tmp___17 ; long tmp___18 ; { status = 0U; args->ndevs = 0; p = read_buf(xdr, 4); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { status = 874971136U; goto out; } else { } tmp___1 = p; p = p + 1; tmp___2 = __fswab32(*tmp___1); n = (int )tmp___2; if (n <= 0) { goto out; } else { } if (n < 0) { status = 874971136U; goto out; } else { } tmp___3 = kmalloc((unsigned long )n * 28UL, 208U); args->devs = (struct cb_devicenotifyitem *)tmp___3; if ((unsigned long )args->devs == (unsigned long )((struct cb_devicenotifyitem *)0)) { status = 405209088U; goto out; } else { } i = 0; goto ldv_49120; ldv_49119: dev = args->devs + (unsigned long )i; p = read_buf(xdr, 32); tmp___4 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___4 != 0L) { status = 874971136U; goto err; } else { } tmp___5 = p; p = p + 1; tmp___6 = __fswab32(*tmp___5); tmp = tmp___6; if (tmp != 1U) { status = 369098752U; goto err; } else { } tmp___7 = p; p = p + 1; tmp___8 = __fswab32(*tmp___7); dev->cbd_notify_type = tmp___8; if (dev->cbd_notify_type != 2U && dev->cbd_notify_type != 4U) { status = 369098752U; goto err; } else { } tmp___9 = p; p = p + 1; tmp___10 = __fswab32(*tmp___9); tmp = tmp___10; if ((dev->cbd_notify_type == 2U && tmp != 24U) || (dev->cbd_notify_type == 4U && tmp != 20U)) { status = 369098752U; goto err; } else { } tmp___11 = p; p = p + 1; tmp___12 = __fswab32(*tmp___11); dev->cbd_layout_type = tmp___12; __len = 16UL; if (__len > 63UL) { __ret = memcpy((void *)(& dev->cbd_dev_id.data), (void const *)p, __len); } else { __ret = memcpy((void *)(& dev->cbd_dev_id.data), (void const *)p, __len); } p = p + 4UL; if (dev->cbd_layout_type == 2U) { p = read_buf(xdr, 4); tmp___13 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___13 != 0L) { status = 874971136U; goto err; } else { } tmp___14 = p; p = p + 1; tmp___15 = __fswab32(*tmp___14); dev->cbd_immediate = tmp___15; } else { dev->cbd_immediate = 0U; } args->ndevs = args->ndevs + 1; tmp___16 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___16 != 0L) { printk("\001d%s: type %d layout 0x%x immediate %d\n", "decode_devicenotify_args", dev->cbd_notify_type, dev->cbd_layout_type, dev->cbd_immediate); } else { } i = i + 1; ldv_49120: ; if (i < n) { goto ldv_49119; } else { } out: tmp___18 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___18 != 0L) { tmp___17 = __fswab32(status); printk("\001d%s: status %d ndevs %d\n", "decode_devicenotify_args", tmp___17, args->ndevs); } else { } return (status); err: kfree((void const *)args->devs); goto out; } } static __be32 decode_sessionid___0(struct xdr_stream *xdr , struct nfs4_sessionid *sid ) { __be32 *p ; int len ; long tmp ; size_t __len ; void *__ret ; { len = 16; p = read_buf(xdr, len); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } __len = (size_t )len; __ret = memcpy((void *)(& sid->data), (void const *)p, __len); return (0U); } } static __be32 decode_rc_list(struct xdr_stream *xdr , struct referring_call_list *rc_list ) { __be32 *p ; int i ; __be32 status ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; long tmp___2 ; void *tmp___3 ; long tmp___4 ; __be32 *tmp___5 ; __u32 tmp___6 ; __be32 *tmp___7 ; __u32 tmp___8 ; { status = decode_sessionid___0(xdr, & rc_list->rcl_sessionid); if (status != 0U) { goto out; } else { } status = 572981248U; p = read_buf(xdr, 4); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out; } else { } tmp___0 = p; p = p + 1; tmp___1 = __fswab32(*tmp___0); rc_list->rcl_nrefcalls = tmp___1; if (rc_list->rcl_nrefcalls != 0U) { p = read_buf(xdr, (int )(rc_list->rcl_nrefcalls * 8U)); tmp___2 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___2 != 0L) { goto out; } else { } tmp___3 = kmalloc((unsigned long )rc_list->rcl_nrefcalls * 8UL, 208U); rc_list->rcl_refcalls = (struct referring_call *)tmp___3; tmp___4 = ldv__builtin_expect((unsigned long )rc_list->rcl_refcalls == (unsigned long )((struct referring_call *)0), 0L); if (tmp___4 != 0L) { goto out; } else { } i = 0; goto ldv_49140; ldv_49139: tmp___5 = p; p = p + 1; tmp___6 = __fswab32(*tmp___5); (rc_list->rcl_refcalls + (unsigned long )i)->rc_sequenceid = tmp___6; tmp___7 = p; p = p + 1; tmp___8 = __fswab32(*tmp___7); (rc_list->rcl_refcalls + (unsigned long )i)->rc_slotid = tmp___8; i = i + 1; ldv_49140: ; if ((uint32_t )i < rc_list->rcl_nrefcalls) { goto ldv_49139; } else { } } else { } status = 0U; out: ; return (status); } } static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_sequenceargs *args ) { __be32 *p ; int i ; __be32 status ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; __be32 *tmp___2 ; __u32 tmp___3 ; __be32 *tmp___4 ; __u32 tmp___5 ; __be32 *tmp___6 ; __u32 tmp___7 ; __be32 *tmp___8 ; __u32 tmp___9 ; void *tmp___10 ; long tmp___11 ; long tmp___12 ; __u32 tmp___13 ; long tmp___14 ; { status = decode_sessionid___0(xdr, & args->csa_sessionid); if (status != 0U) { goto out; } else { } status = 572981248U; p = read_buf(xdr, 20); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { goto out; } else { } args->csa_addr = svc_addr((struct svc_rqst const *)rqstp); tmp___0 = p; p = p + 1; tmp___1 = __fswab32(*tmp___0); args->csa_sequenceid = tmp___1; tmp___2 = p; p = p + 1; tmp___3 = __fswab32(*tmp___2); args->csa_slotid = tmp___3; tmp___4 = p; p = p + 1; tmp___5 = __fswab32(*tmp___4); args->csa_highestslotid = tmp___5; tmp___6 = p; p = p + 1; tmp___7 = __fswab32(*tmp___6); args->csa_cachethis = tmp___7; tmp___8 = p; p = p + 1; tmp___9 = __fswab32(*tmp___8); args->csa_nrclists = tmp___9; args->csa_rclists = 0; if (args->csa_nrclists != 0U) { tmp___10 = kmalloc_array((size_t )args->csa_nrclists, 32UL, 208U); args->csa_rclists = (struct referring_call_list *)tmp___10; tmp___11 = ldv__builtin_expect((unsigned long )args->csa_rclists == (unsigned long )((struct referring_call_list *)0), 0L); if (tmp___11 != 0L) { goto out; } else { } i = 0; goto ldv_49153; ldv_49152: status = decode_rc_list(xdr, args->csa_rclists + (unsigned long )i); if (status != 0U) { goto out_free; } else { } i = i + 1; ldv_49153: ; if ((uint32_t )i < args->csa_nrclists) { goto ldv_49152; } else { } } else { } status = 0U; tmp___12 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___12 != 0L) { printk("\001d%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u highestslotid %u cachethis %d nrclists %u\n", "decode_cb_sequence_args", *((u32 *)(& args->csa_sessionid)), *((u32 *)(& args->csa_sessionid) + 1UL), *((u32 *)(& args->csa_sessionid) + 2UL), *((u32 *)(& args->csa_sessionid) + 3UL), args->csa_sequenceid, args->csa_slotid, args->csa_highestslotid, args->csa_cachethis, args->csa_nrclists); } else { } out: tmp___14 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___14 != 0L) { tmp___13 = __fswab32(status); printk("\001d%s: exit with status = %d\n", "decode_cb_sequence_args", tmp___13); } else { } return (status); out_free: i = 0; goto ldv_49157; ldv_49156: kfree((void const *)(args->csa_rclists + (unsigned long )i)->rcl_refcalls); i = i + 1; ldv_49157: ; if ((uint32_t )i < args->csa_nrclists) { goto ldv_49156; } else { } kfree((void const *)args->csa_rclists); goto out; } } static __be32 decode_recallany_args(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_recallanyargs *args ) { uint32_t bitmap[2U] ; __be32 *p ; __be32 status ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; long tmp___2 ; { args->craa_addr = svc_addr((struct svc_rqst const *)rqstp); p = read_buf(xdr, 4); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (874971136U); } else { } tmp___0 = p; p = p + 1; tmp___1 = __fswab32(*tmp___0); args->craa_objs_to_keep = tmp___1; status = decode_bitmap(xdr, (uint32_t *)(& bitmap)); tmp___2 = ldv__builtin_expect(status != 0U, 0L); if (tmp___2 != 0L) { return (status); } else { } args->craa_type_mask = bitmap[0]; return (0U); } } static __be32 decode_recallslot_args(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_recallslotargs *args ) { __be32 *p ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; { args->crsa_addr = svc_addr((struct svc_rqst const *)rqstp); p = read_buf(xdr, 4); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (874971136U); } else { } tmp___0 = p; p = p + 1; tmp___1 = __fswab32(*tmp___0); args->crsa_target_highest_slotid = tmp___1; return (0U); } } static __be32 encode_string___0(struct xdr_stream *xdr , unsigned int len , char const *str ) { __be32 *p ; long tmp ; { p = xdr_reserve_space(xdr, (size_t )(len + 4U)); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } xdr_encode_opaque(p, (void const *)str, len); return (0U); } } static __be32 encode_attr_bitmap(struct xdr_stream *xdr , uint32_t const *bitmap , __be32 **savep ) { __be32 bm[2U] ; __be32 *p ; __u32 tmp ; __u32 tmp___0 ; long tmp___1 ; __be32 *tmp___2 ; __be32 *tmp___3 ; __be32 *tmp___4 ; long tmp___5 ; __be32 *tmp___6 ; __be32 *tmp___7 ; long tmp___8 ; __be32 *tmp___9 ; { tmp = __fswab32((__u32 )*bitmap & 24U); bm[0] = tmp; tmp___0 = __fswab32((__u32 )*(bitmap + 1UL) & 3145728U); bm[1] = tmp___0; if (bm[1] != 0U) { p = xdr_reserve_space(xdr, 16UL); tmp___1 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { return (572981248U); } else { } tmp___2 = p; p = p + 1; *tmp___2 = 33554432U; tmp___3 = p; p = p + 1; *tmp___3 = bm[0]; tmp___4 = p; p = p + 1; *tmp___4 = bm[1]; } else if (bm[0] != 0U) { p = xdr_reserve_space(xdr, 12UL); tmp___5 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___5 != 0L) { return (572981248U); } else { } tmp___6 = p; p = p + 1; *tmp___6 = 16777216U; tmp___7 = p; p = p + 1; *tmp___7 = bm[0]; } else { p = xdr_reserve_space(xdr, 8UL); tmp___8 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___8 != 0L) { return (572981248U); } else { } tmp___9 = p; p = p + 1; *tmp___9 = 0U; } *savep = p; return (0U); } } static __be32 encode_attr_change(struct xdr_stream *xdr , uint32_t const *bitmap , uint64_t change ) { __be32 *p ; long tmp ; { if (((unsigned long )*bitmap & 8UL) == 0UL) { return (0U); } else { } p = xdr_reserve_space(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } p = xdr_encode_hyper(p, change); return (0U); } } static __be32 encode_attr_size(struct xdr_stream *xdr , uint32_t const *bitmap , uint64_t size ) { __be32 *p ; long tmp ; { if (((unsigned long )*bitmap & 16UL) == 0UL) { return (0U); } else { } p = xdr_reserve_space(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } p = xdr_encode_hyper(p, size); return (0U); } } static __be32 encode_attr_time(struct xdr_stream *xdr , struct timespec const *time ) { __be32 *p ; long tmp ; __u32 tmp___0 ; { p = xdr_reserve_space(xdr, 12UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } p = xdr_encode_hyper(p, (__u64 )time->tv_sec); tmp___0 = __fswab32((__u32 )time->tv_nsec); *p = tmp___0; return (0U); } } static __be32 encode_attr_ctime(struct xdr_stream *xdr , uint32_t const *bitmap , struct timespec const *time ) { __be32 tmp ; { if (((unsigned long )*(bitmap + 1UL) & 1048576UL) == 0UL) { return (0U); } else { } tmp = encode_attr_time(xdr, time); return (tmp); } } static __be32 encode_attr_mtime(struct xdr_stream *xdr , uint32_t const *bitmap , struct timespec const *time ) { __be32 tmp ; { if (((unsigned long )*(bitmap + 1UL) & 2097152UL) == 0UL) { return (0U); } else { } tmp = encode_attr_time(xdr, time); return (tmp); } } static __be32 encode_compound_hdr_res(struct xdr_stream *xdr , struct cb_compound_hdr_res *hdr ) { __be32 status ; long tmp ; long tmp___0 ; long tmp___1 ; { hdr->status = xdr_reserve_space(xdr, 4UL); tmp = ldv__builtin_expect((unsigned long )hdr->status == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } status = encode_string___0(xdr, hdr->taglen, hdr->tag); tmp___0 = ldv__builtin_expect(status != 0U, 0L); if (tmp___0 != 0L) { return (status); } else { } hdr->nops = xdr_reserve_space(xdr, 4UL); tmp___1 = ldv__builtin_expect((unsigned long )hdr->nops == (unsigned long )((__be32 *)0), 0L); if (tmp___1 != 0L) { return (572981248U); } else { } return (0U); } } static __be32 encode_op_hdr___0(struct xdr_stream *xdr , uint32_t op , __be32 res ) { __be32 *p ; long tmp ; __be32 *tmp___0 ; __u32 tmp___1 ; { p = xdr_reserve_space(xdr, 8UL); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (707461120U); } else { } tmp___0 = p; p = p + 1; tmp___1 = __fswab32(op); *tmp___0 = tmp___1; *p = res; return (0U); } } static __be32 encode_getattr_res(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_getattrres const *res ) { __be32 *savep ; __be32 status ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; long tmp___6 ; { savep = 0; status = res->status; tmp = ldv__builtin_expect(status != 0U, 0L); if (tmp != 0L) { goto out; } else { } status = encode_attr_bitmap(xdr, (uint32_t const *)(& res->bitmap), & savep); tmp___0 = ldv__builtin_expect(status != 0U, 0L); if (tmp___0 != 0L) { goto out; } else { } status = encode_attr_change(xdr, (uint32_t const *)(& res->bitmap), res->change_attr); tmp___1 = ldv__builtin_expect(status != 0U, 0L); if (tmp___1 != 0L) { goto out; } else { } status = encode_attr_size(xdr, (uint32_t const *)(& res->bitmap), res->size); tmp___2 = ldv__builtin_expect(status != 0U, 0L); if (tmp___2 != 0L) { goto out; } else { } status = encode_attr_ctime(xdr, (uint32_t const *)(& res->bitmap), & res->ctime); tmp___3 = ldv__builtin_expect(status != 0U, 0L); if (tmp___3 != 0L) { goto out; } else { } status = encode_attr_mtime(xdr, (uint32_t const *)(& res->bitmap), & res->mtime); tmp___4 = __fswab32((unsigned int )((long )xdr->p) - (unsigned int )((long )((char *)savep + 1U))); *savep = tmp___4; out: tmp___6 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___6 != 0L) { tmp___5 = __fswab32(status); printk("\001d%s: exit with status = %d\n", "encode_getattr_res", tmp___5); } else { } return (status); } } static __be32 encode_sessionid(struct xdr_stream *xdr , struct nfs4_sessionid const *sid ) { __be32 *p ; int len ; long tmp ; size_t __len ; void *__ret ; { len = 16; p = xdr_reserve_space(xdr, (size_t )len); tmp = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp != 0L) { return (572981248U); } else { } __len = (size_t )len; __ret = memcpy((void *)p, (void const *)sid, __len); return (0U); } } static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp , struct xdr_stream *xdr , struct cb_sequenceres const *res ) { __be32 *p ; __be32 status ; long tmp ; long tmp___0 ; __be32 *tmp___1 ; __u32 tmp___2 ; __be32 *tmp___3 ; __u32 tmp___4 ; __be32 *tmp___5 ; __u32 tmp___6 ; __be32 *tmp___7 ; __u32 tmp___8 ; __u32 tmp___9 ; long tmp___10 ; { status = res->csr_status; tmp = ldv__builtin_expect(status != 0U, 0L); if (tmp != 0L) { goto out; } else { } encode_sessionid(xdr, & res->csr_sessionid); p = xdr_reserve_space(xdr, 16UL); tmp___0 = ldv__builtin_expect((unsigned long )p == (unsigned long )((__be32 *)0), 0L); if (tmp___0 != 0L) { return (572981248U); } else { } tmp___1 = p; p = p + 1; tmp___2 = __fswab32(res->csr_sequenceid); *tmp___1 = tmp___2; tmp___3 = p; p = p + 1; tmp___4 = __fswab32(res->csr_slotid); *tmp___3 = tmp___4; tmp___5 = p; p = p + 1; tmp___6 = __fswab32(res->csr_highestslotid); *tmp___5 = tmp___6; tmp___7 = p; p = p + 1; tmp___8 = __fswab32(res->csr_target_highestslotid); *tmp___7 = tmp___8; out: tmp___10 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___10 != 0L) { tmp___9 = __fswab32(status); printk("\001d%s: exit with status = %d\n", "encode_cb_sequence_res", tmp___9); } else { } return (status); } } static __be32 preprocess_nfs41_op(int nop , unsigned int op_nr , struct callback_op **op ) { { if (op_nr == 11U) { if (nop != 0) { return (1344733184U); } else if (nop == 0) { return (1462173696U); } else { } } else { } switch (op_nr) { case 3U: ; case 4U: ; case 11U: ; case 8U: ; case 10U: ; case 5U: ; case 14U: *op = (struct callback_op *)(& callback_ops) + (unsigned long )op_nr; goto ldv_49263; case 6U: ; case 7U: ; case 9U: ; case 12U: ; case 13U: ; return (338100224U); default: ; return (1009188864U); } ldv_49263: ; return (0U); } } static void nfs4_callback_free_slot(struct nfs4_session *session ) { struct nfs4_slot_table *tbl ; { tbl = & session->bc_slot_table; spin_lock(& tbl->slot_tbl_lock); tbl->highest_used_slotid = 4294967295U; nfs4_session_drain_complete(session, tbl); spin_unlock(& tbl->slot_tbl_lock); return; } } static void nfs4_cb_free_slot(struct cb_process_state *cps ) { { if (cps->slotid != 4294967295U) { nfs4_callback_free_slot((cps->clp)->cl_session); } else { } return; } } static __be32 preprocess_nfs4_op(unsigned int op_nr , struct callback_op **op ) { { switch (op_nr) { case 3U: ; case 4U: *op = (struct callback_op *)(& callback_ops) + (unsigned long )op_nr; goto ldv_49283; default: ; return (1009188864U); } ldv_49283: ; return (0U); } } static __be32 process_op(uint32_t minorversion , int nop , struct svc_rqst *rqstp , struct xdr_stream *xdr_in , void *argp , struct xdr_stream *xdr_out , void *resp , struct cb_process_state *cps ) { struct callback_op *op ; unsigned int op_nr ; __be32 status ; long maxlen ; __be32 res ; long tmp ; long tmp___0 ; long tmp___1 ; __be32 tmp___2 ; __be32 tmp___3 ; long tmp___4 ; long tmp___5 ; __u32 tmp___6 ; long tmp___7 ; { op = (struct callback_op *)(& callback_ops); tmp = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: start\n", "process_op"); } else { } status = decode_op_hdr___0(xdr_in, & op_nr); tmp___0 = ldv__builtin_expect(status != 0U, 0L); if (tmp___0 != 0L) { return (status); } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: minorversion=%d nop=%d op_nr=%u\n", "process_op", minorversion, nop, op_nr); } else { } if (minorversion != 0U) { tmp___2 = preprocess_nfs41_op(nop, op_nr, & op); status = tmp___2; } else { tmp___3 = preprocess_nfs4_op(op_nr, & op); status = tmp___3; } if (status == 1009188864U) { op_nr = 10044U; } else { } if (status != 0U) { goto encode_hdr; } else { } if (cps->drc_status != 0U) { status = cps->drc_status; goto encode_hdr; } else { } maxlen = ((long )xdr_out->end - (long )xdr_out->p) / 4L; if (maxlen > 0L && (unsigned long )maxlen <= 4095UL) { status = (*(op->decode_args))(rqstp, xdr_in, argp); tmp___4 = ldv__builtin_expect(status == 0U, 1L); if (tmp___4 != 0L) { status = (*(op->process_op))(argp, resp, cps); } else { } } else { status = 572981248U; } encode_hdr: res = encode_op_hdr___0(xdr_out, op_nr, status); tmp___5 = ldv__builtin_expect(res != 0U, 0L); if (tmp___5 != 0L) { return (res); } else { } if ((unsigned long )op->encode_res != (unsigned long )((__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))0) && status == 0U) { status = (*(op->encode_res))(rqstp, xdr_out, resp); } else { } tmp___7 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___7 != 0L) { tmp___6 = __fswab32(status); printk("\001d%s: done, status = %d\n", "process_op", tmp___6); } else { } return (status); } } static __be32 nfs4_callback_compound(struct svc_rqst *rqstp , void *argp , void *resp ) { struct cb_compound_hdr_arg hdr_arg ; struct cb_compound_hdr_res hdr_res ; struct xdr_stream xdr_in ; struct xdr_stream xdr_out ; __be32 *p ; __be32 status ; struct cb_process_state cps ; unsigned int nops ; long tmp ; int tmp___0 ; __be32 tmp___1 ; long tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; long tmp___5 ; { hdr_arg.taglen = 0U; hdr_arg.tag = 0; hdr_arg.minorversion = 0U; hdr_arg.cb_ident = 0U; hdr_arg.nops = 0U; hdr_res.status = 0; hdr_res.taglen = 0U; hdr_res.tag = 0; hdr_res.nops = 0; cps.drc_status = 0U; cps.clp = 0; cps.slotid = 4294967295U; cps.net = (rqstp->rq_xprt)->xpt_net; nops = 0U; tmp = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: start\n", "nfs4_callback_compound"); } else { } xdr_init_decode(& xdr_in, & rqstp->rq_arg, (__be32 *)rqstp->rq_arg.head[0].iov_base); p = (__be32 *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len; xdr_init_encode(& xdr_out, & rqstp->rq_res, p); status = decode_compound_hdr_arg(& xdr_in, & hdr_arg); if (status == 572981248U) { return (67108864U); } else { } if (hdr_arg.minorversion == 0U) { cps.clp = nfs4_find_client_ident((rqstp->rq_xprt)->xpt_net, (int )hdr_arg.cb_ident); if ((unsigned long )cps.clp == (unsigned long )((struct nfs_client *)0)) { return (1625948160U); } else { tmp___0 = check_gss_callback_principal(cps.clp, rqstp); if (tmp___0 == 0) { return (1625948160U); } else { } } } else { } hdr_res.taglen = hdr_arg.taglen; hdr_res.tag = hdr_arg.tag; tmp___1 = encode_compound_hdr_res(& xdr_out, & hdr_res); if (tmp___1 != 0U) { return (83886080U); } else { } goto ldv_49317; ldv_49316: status = process_op(hdr_arg.minorversion, (int )nops, rqstp, & xdr_in, argp, & xdr_out, resp, & cps); nops = nops + 1U; ldv_49317: ; if (status == 0U && hdr_arg.nops != nops) { goto ldv_49316; } else { } tmp___2 = ldv__builtin_expect(status == 707461120U, 0L); if (tmp___2 != 0L) { status = 572981248U; nops = nops - 1U; } else { } *(hdr_res.status) = status; tmp___3 = __fswab32(nops); *(hdr_res.nops) = tmp___3; nfs4_cb_free_slot(& cps); nfs_put_client(cps.clp); tmp___5 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___5 != 0L) { tmp___4 = __fswab32(status); printk("\001d%s: done, status = %u\n", "nfs4_callback_compound", tmp___4); } else { } return (0U); } } static struct callback_op callback_ops[15U] = { {0, 0, 0, 514L}, {0, 0, 0, 0L}, {0, 0, 0, 0L}, {(__be32 (*)(void * , void * , struct cb_process_state * ))(& nfs4_callback_getattr), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& decode_getattr_args), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& encode_getattr_res), 528L}, {(__be32 (*)(void * , void * , struct cb_process_state * ))(& nfs4_callback_recall), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& decode_recall_args), 0, 514L}, {(__be32 (*)(void * , void * , struct cb_process_state * ))(& nfs4_callback_layoutrecall), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& decode_layoutrecall_args), 0, 514L}, {0, 0, 0, 0L}, {0, 0, 0, 0L}, {(__be32 (*)(void * , void * , struct cb_process_state * ))(& nfs4_callback_recallany), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& decode_recallany_args), 0, 514L}, {0, 0, 0, 0L}, {(__be32 (*)(void * , void * , struct cb_process_state * ))(& nfs4_callback_recallslot), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& decode_recallslot_args), 0, 514L}, {(__be32 (*)(void * , void * , struct cb_process_state * ))(& nfs4_callback_sequence), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& decode_cb_sequence_args), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& encode_cb_sequence_res), 522L}, {0, 0, 0, 0L}, {0, 0, 0, 0L}, {(__be32 (*)(void * , void * , struct cb_process_state * ))(& nfs4_callback_devicenotify), (__be32 (*)(struct svc_rqst * , struct xdr_stream * , void * ))(& decode_devicenotify_args), 0, 514L}}; static struct svc_procedure nfs4_callback_procedures1[2U] = { {& nfs4_callback_null, (int (*)(void * , __be32 * , void * ))(& nfs4_decode_void), (int (*)(void * , __be32 * , void * ))(& nfs4_encode_void), 0, 0U, 0U, 0U, 0U, 1U}, {& nfs4_callback_compound, 0, (int (*)(void * , __be32 * , void * ))(& nfs4_encode_void), 0, 256U, 256U, 0U, 0U, 3072U}}; struct svc_version nfs4_callback_version1 = {1U, 2U, (struct svc_procedure *)(& nfs4_callback_procedures1), 2048U, 1U, 0}; struct svc_version nfs4_callback_version4 = {4U, 2U, (struct svc_procedure *)(& nfs4_callback_procedures1), 2048U, 1U, 0}; void ldv_main9_sequence_infinite_withcheck_stateful(void) { int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_49346; ldv_49345: tmp = __VERIFIER_nondet_int(); switch (tmp) { default: ; goto ldv_49344; } ldv_49344: ; ldv_49346: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_49345; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_147(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_148(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_149(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_150(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_151(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_152(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_153(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_164(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_162(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_165(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_167(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_161(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_163(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_166(struct mutex *ldv_func_arg1 ) ; __inline static struct thread_info *current_thread_info___5(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6775; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6775; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6775; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6775; default: __bad_percpu_size(); } ldv_6775: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static void __rcu_read_lock___5(void) { struct thread_info *tmp ; { tmp = current_thread_info___5(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___5(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___5(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___5(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___5(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 763, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___5(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 784, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___5(); return; } } __inline static loff_t i_size_read(struct inode const *inode ) { { return ((loff_t )inode->i_size); } } struct nfs_client *nfs4_find_client_sessionid(struct net *net , struct sockaddr const *addr , struct nfs4_sessionid *sid ) ; void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo ) ; void nfs4_delete_deviceid(struct pnfs_layoutdriver_type const *ld , struct nfs_client const *clp , struct nfs4_deviceid const *id ) ; void nfs41_set_target_slotid(struct nfs4_slot_table *tbl , u32 target_highest_slotid ) ; __be32 nfs4_callback_getattr(struct cb_getattrargs *args , struct cb_getattrres *res , struct cb_process_state *cps ) { struct nfs_delegation *delegation ; struct nfs_inode *nfsi ; struct inode *inode ; uint32_t tmp ; char const *tmp___0 ; long tmp___1 ; struct nfs_delegation *_________p1 ; bool __warned ; int tmp___2 ; int tmp___3 ; loff_t tmp___4 ; __u32 tmp___5 ; long tmp___6 ; { res->status = 1462173696U; if ((unsigned long )cps->clp == (unsigned long )((struct nfs_client *)0)) { goto out; } else { } tmp = 0U; res->bitmap[1] = tmp; res->bitmap[0] = tmp; res->status = 287768576U; tmp___1 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___1 != 0L) { rcu_read_lock___5(); tmp___0 = rpc_peeraddr2str((cps->clp)->cl_rpcclient, 0); printk("\001dNFS: GETATTR callback request from %s\n", tmp___0); rcu_read_unlock___5(); } else { } inode = nfs_delegation_find_inode(cps->clp, (struct nfs_fh const *)(& args->fh)); if ((unsigned long )inode == (unsigned long )((struct inode *)0)) { goto out; } else { } nfsi = NFS_I((struct inode const *)inode); rcu_read_lock___5(); _________p1 = *((struct nfs_delegation * volatile *)(& nfsi->delegation)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/callback_proc.c.prepared", 110, "suspicious rcu_dereference_check() usage"); } else { } } else { } delegation = _________p1; if ((unsigned long )delegation == (unsigned long )((struct nfs_delegation *)0) || (delegation->type & 2U) == 0U) { goto out_iput; } else { } tmp___4 = i_size_read((struct inode const *)inode); res->size = (uint64_t )tmp___4; res->change_attr = delegation->change_attr; if (nfsi->npages != 0UL) { res->change_attr = res->change_attr + 1ULL; } else { } res->ctime = inode->i_ctime; res->mtime = inode->i_mtime; res->bitmap[0] = args->bitmap[0] & 24U; res->bitmap[1] = args->bitmap[1] & 3145728U; res->status = 0U; out_iput: rcu_read_unlock___5(); iput(inode); out: tmp___6 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___6 != 0L) { tmp___5 = __fswab32(res->status); printk("\001d%s: exit with status = %d\n", "nfs4_callback_getattr", tmp___5); } else { } return (res->status); } } __be32 nfs4_callback_recall(struct cb_recallargs *args , void *dummy , struct cb_process_state *cps ) { struct inode *inode ; __be32 res ; char const *tmp ; long tmp___0 ; int tmp___1 ; __u32 tmp___2 ; long tmp___3 ; { res = 1462173696U; if ((unsigned long )cps->clp == (unsigned long )((struct nfs_client *)0)) { goto out; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { rcu_read_lock___5(); tmp = rpc_peeraddr2str((cps->clp)->cl_rpcclient, 0); printk("\001dNFS: RECALL callback request from %s\n", tmp); rcu_read_unlock___5(); } else { } res = 287768576U; inode = nfs_delegation_find_inode(cps->clp, (struct nfs_fh const *)(& args->fh)); if ((unsigned long )inode == (unsigned long )((struct inode *)0)) { goto out; } else { } tmp___1 = nfs_async_inode_return_delegation(inode, (nfs4_stateid const *)(& args->stateid)); switch (tmp___1) { case 0: res = 0U; goto ldv_49283; case -2: res = 690421760U; goto ldv_49283; default: res = 572981248U; } ldv_49283: iput(inode); out: tmp___3 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___3 != 0L) { tmp___2 = __fswab32(res); printk("\001d%s: exit with status = %d\n", "nfs4_callback_recall", tmp___2); } else { } return (res); } } static struct pnfs_layout_hdr *get_layout_by_fh_locked(struct nfs_client *clp , struct nfs_fh *fh ) { struct nfs_server *server ; struct inode *ino ; struct pnfs_layout_hdr *lo ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; struct list_head const *__mptr___0 ; struct nfs_inode *tmp___0 ; int tmp___1 ; struct nfs_inode *tmp___2 ; struct list_head const *__mptr___1 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___2 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___3 ; { __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49317; ldv_49316: __mptr___0 = (struct list_head const *)server->layouts.next; lo = (struct pnfs_layout_hdr *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_49314; ldv_49313: tmp___0 = NFS_I((struct inode const *)lo->plh_inode); tmp___1 = nfs_compare_fh((struct nfs_fh const *)fh, (struct nfs_fh const *)(& tmp___0->fh)); if (tmp___1 != 0) { goto ldv_49312; } else { } ino = igrab(lo->plh_inode); if ((unsigned long )ino == (unsigned long )((struct inode *)0)) { goto ldv_49312; } else { } spin_lock(& ino->i_lock); tmp___2 = NFS_I((struct inode const *)ino); if ((unsigned long )tmp___2->layout != (unsigned long )lo) { spin_unlock(& ino->i_lock); iput(ino); goto ldv_49312; } else { } pnfs_get_layout_hdr(lo); spin_unlock(& ino->i_lock); return (lo); ldv_49312: __mptr___1 = (struct list_head const *)lo->plh_layouts.next; lo = (struct pnfs_layout_hdr *)__mptr___1 + 0xfffffffffffffff8UL; ldv_49314: ; if ((unsigned long )(& lo->plh_layouts) != (unsigned long )(& server->layouts)) { goto ldv_49313; } else { } __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___2 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___2 + 0xfffffffffffffff8UL; ldv_49317: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49316; } else { } return (0); } } static struct pnfs_layout_hdr *get_layout_by_fh(struct nfs_client *clp , struct nfs_fh *fh ) { struct pnfs_layout_hdr *lo ; { spin_lock(& clp->cl_lock); rcu_read_lock___5(); lo = get_layout_by_fh_locked(clp, fh); rcu_read_unlock___5(); spin_unlock(& clp->cl_lock); return (lo); } } static u32 initiate_file_draining(struct nfs_client *clp , struct cb_layoutrecallargs *args ) { struct inode *ino ; struct pnfs_layout_hdr *lo ; u32 rv ; struct list_head free_me_list ; int tmp ; int tmp___0 ; { rv = 10060U; free_me_list.next = & free_me_list; free_me_list.prev = & free_me_list; lo = get_layout_by_fh(clp, & args->ldv_48220.ldv_48218.cbl_fh); if ((unsigned long )lo == (unsigned long )((struct pnfs_layout_hdr *)0)) { return (10060U); } else { } ino = lo->plh_inode; spin_lock(& ino->i_lock); tmp = constant_test_bit(2U, (unsigned long const volatile *)(& lo->plh_flags)); if (tmp != 0) { rv = 10008U; } else { tmp___0 = pnfs_mark_matching_lsegs_invalid(lo, & free_me_list, & args->ldv_48220.ldv_48218.cbl_range); if (tmp___0 != 0) { rv = 10008U; } else { rv = 10060U; } } pnfs_set_layout_stateid(lo, (nfs4_stateid const *)(& args->ldv_48220.ldv_48218.cbl_stateid), 1); spin_unlock(& ino->i_lock); pnfs_free_lseg_list(& free_me_list); pnfs_put_layout_hdr(lo); iput(ino); return (rv); } } static u32 initiate_bulk_draining(struct nfs_client *clp , struct cb_layoutrecallargs *args ) { struct nfs_server *server ; struct pnfs_layout_hdr *lo ; struct inode *ino ; u32 rv ; struct pnfs_layout_hdr *tmp ; struct list_head recall_list ; struct list_head free_me_list ; struct pnfs_layout_range range ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; struct list_head const *__mptr___0 ; struct nfs_inode *tmp___2 ; struct list_head const *__mptr___1 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___2 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___3 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; int tmp___4 ; struct list_head const *__mptr___5 ; { rv = 10060U; recall_list.next = & recall_list; recall_list.prev = & recall_list; free_me_list.next = & free_me_list; free_me_list.prev = & free_me_list; range.iomode = 3U; range.offset = 0ULL; range.length = 0xffffffffffffffffULL; spin_lock(& clp->cl_lock); rcu_read_lock___5(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49368; ldv_49367: ; if (args->cbl_recall_type == 2U) { tmp___1 = memcmp((void const *)(& server->fsid), (void const *)(& args->ldv_48220.cbl_fsid), 16UL); if (tmp___1 != 0) { goto ldv_49358; } else { } } else { } __mptr___0 = (struct list_head const *)server->layouts.next; lo = (struct pnfs_layout_hdr *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_49365; ldv_49364: ino = igrab(lo->plh_inode); if ((unsigned long )ino != (unsigned long )((struct inode *)0)) { goto ldv_49363; } else { } spin_lock(& ino->i_lock); tmp___2 = NFS_I((struct inode const *)ino); if ((unsigned long )tmp___2->layout != (unsigned long )lo) { spin_unlock(& ino->i_lock); iput(ino); goto ldv_49363; } else { } pnfs_get_layout_hdr(lo); spin_unlock(& ino->i_lock); list_add(& lo->plh_bulk_recall, & recall_list); ldv_49363: __mptr___1 = (struct list_head const *)lo->plh_layouts.next; lo = (struct pnfs_layout_hdr *)__mptr___1 + 0xfffffffffffffff8UL; ldv_49365: ; if ((unsigned long )(& lo->plh_layouts) != (unsigned long )(& server->layouts)) { goto ldv_49364; } else { } ldv_49358: __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___2 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___2 + 0xfffffffffffffff8UL; ldv_49368: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49367; } else { } rcu_read_unlock___5(); spin_unlock(& clp->cl_lock); __mptr___3 = (struct list_head const *)recall_list.next; lo = (struct pnfs_layout_hdr *)__mptr___3 + 0xffffffffffffffe8UL; __mptr___4 = (struct list_head const *)lo->plh_bulk_recall.next; tmp = (struct pnfs_layout_hdr *)__mptr___4 + 0xffffffffffffffe8UL; goto ldv_49377; ldv_49376: ino = lo->plh_inode; spin_lock(& ino->i_lock); set_bit(2U, (unsigned long volatile *)(& lo->plh_flags)); tmp___4 = pnfs_mark_matching_lsegs_invalid(lo, & free_me_list, & range); if (tmp___4 != 0) { rv = 10008U; } else { } list_del_init(& lo->plh_bulk_recall); spin_unlock(& ino->i_lock); pnfs_free_lseg_list(& free_me_list); pnfs_put_layout_hdr(lo); iput(ino); lo = tmp; __mptr___5 = (struct list_head const *)tmp->plh_bulk_recall.next; tmp = (struct pnfs_layout_hdr *)__mptr___5 + 0xffffffffffffffe8UL; ldv_49377: ; if ((unsigned long )(& lo->plh_bulk_recall) != (unsigned long )(& recall_list)) { goto ldv_49376; } else { } return (rv); } } static u32 do_callback_layoutrecall(struct nfs_client *clp , struct cb_layoutrecallargs *args ) { u32 res ; long tmp ; long tmp___0 ; { tmp = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s enter, type=%i\n", "do_callback_layoutrecall", args->cbl_recall_type); } else { } if (args->cbl_recall_type == 1U) { res = initiate_file_draining(clp, args); } else { res = initiate_bulk_draining(clp, args); } tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s returning %i\n", "do_callback_layoutrecall", res); } else { } return (res); } } __be32 nfs4_callback_layoutrecall(struct cb_layoutrecallargs *args , void *dummy , struct cb_process_state *cps ) { u32 res ; long tmp ; long tmp___0 ; __u32 tmp___1 ; { tmp = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: -->\n", "nfs4_callback_layoutrecall"); } else { } if ((unsigned long )cps->clp != (unsigned long )((struct nfs_client *)0)) { res = do_callback_layoutrecall(cps->clp, args); } else { res = 10071U; } tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: exit with status = %d\n", "nfs4_callback_layoutrecall", res); } else { } tmp___1 = __fswab32(res); return (tmp___1); } } static void pnfs_recall_all_layouts(struct nfs_client *clp ) { struct cb_layoutrecallargs args ; { memset((void *)(& args), 0, 200UL); args.cbl_recall_type = 3U; do_callback_layoutrecall(clp, & args); return; } } __be32 nfs4_callback_devicenotify(struct cb_devicenotifyargs *args , void *dummy , struct cb_process_state *cps ) { int i ; __be32 res ; struct nfs_client *clp ; struct nfs_server *server ; long tmp ; struct cb_devicenotifyitem *dev ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp___0 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; __u32 tmp___4 ; long tmp___5 ; { res = 0U; clp = cps->clp; server = 0; tmp = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: -->\n", "nfs4_callback_devicenotify"); } else { } if ((unsigned long )clp == (unsigned long )((struct nfs_client *)0)) { res = 1462173696U; goto out; } else { } i = 0; goto ldv_49428; ldv_49427: dev = args->devs + (unsigned long )i; if ((unsigned long )server == (unsigned long )((struct nfs_server *)0) || (unsigned int )(server->pnfs_curr_ld)->id != dev->cbd_layout_type) { rcu_read_lock___5(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49424; ldv_49423: ; if ((unsigned long )server->pnfs_curr_ld != (unsigned long )((struct pnfs_layoutdriver_type *)0) && (unsigned int )(server->pnfs_curr_ld)->id == dev->cbd_layout_type) { rcu_read_unlock___5(); goto found; } else { } __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49424: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49423; } else { } rcu_read_unlock___5(); tmp___2 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: layout type %u not found\n", "nfs4_callback_devicenotify", dev->cbd_layout_type); } else { } goto ldv_49426; } else { } found: ; if (dev->cbd_notify_type == 2U) { tmp___3 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: NOTIFY_DEVICEID4_CHANGE not supported, deleting instead\n", "nfs4_callback_devicenotify"); } else { } } else { } nfs4_delete_deviceid((struct pnfs_layoutdriver_type const *)server->pnfs_curr_ld, (struct nfs_client const *)clp, (struct nfs4_deviceid const *)(& dev->cbd_dev_id)); ldv_49426: i = i + 1; ldv_49428: ; if (args->ndevs > i) { goto ldv_49427; } else { } out: kfree((void const *)args->devs); tmp___5 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___5 != 0L) { tmp___4 = __fswab32(res); printk("\001d%s: exit with status = %u\n", "nfs4_callback_devicenotify", tmp___4); } else { } return (res); } } static __be32 validate_seqid(struct nfs4_slot_table *tbl , struct cb_sequenceargs *args ) { struct nfs4_slot *slot ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; { tmp = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s enter. slotid %d seqid %d\n", "validate_seqid", args->csa_slotid, args->csa_sequenceid); } else { } if (args->csa_slotid != 0U) { return (1160183808U); } else { } slot = tbl->slots + (unsigned long )args->csa_slotid; tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s slot table seqid: %d\n", "validate_seqid", slot->seq_nr); } else { } tmp___1 = ldv__builtin_expect(args->csa_sequenceid == slot->seq_nr + 1U, 1L); if (tmp___1 != 0L) { slot->seq_nr = slot->seq_nr + 1U; goto out_ok; } else { } if (args->csa_sequenceid == slot->seq_nr) { tmp___2 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s seqid %d is a replay\n", "validate_seqid", args->csa_sequenceid); } else { } if (args->csa_cachethis == 0U) { return (1411842048U); } else if (args->csa_cachethis == 1U) { return (1395064832U); } else { } } else { } if (args->csa_sequenceid == 1U && slot->seq_nr == 4294967295U) { slot->seq_nr = 1U; goto out_ok; } else { } return (1327955968U); out_ok: tbl->highest_used_slotid = args->csa_slotid; return (0U); } } static bool referring_call_exists(struct nfs_client *clp , uint32_t nrclists , struct referring_call_list *rclists ) { bool status ; int i ; int j ; struct nfs4_session *session ; struct nfs4_slot_table *tbl ; struct referring_call_list *rclist ; struct referring_call *ref ; int tmp ; long tmp___0 ; int tmp___1 ; { status = 0; session = clp->cl_session; tbl = & session->fc_slot_table; i = 0; goto ldv_49456; ldv_49455: rclist = rclists + (unsigned long )i; tmp = memcmp((void const *)(& session->sess_id.data), (void const *)(& rclist->rcl_sessionid.data), 16UL); if (tmp != 0) { goto ldv_49449; } else { } j = 0; goto ldv_49453; ldv_49452: ref = rclist->rcl_refcalls + (unsigned long )j; tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: sessionid %x:%x:%x:%x sequenceid %u slotid %u\n", "referring_call_exists", *((u32 *)(& rclist->rcl_sessionid.data)), *((u32 *)(& rclist->rcl_sessionid.data) + 1UL), *((u32 *)(& rclist->rcl_sessionid.data) + 2UL), *((u32 *)(& rclist->rcl_sessionid.data) + 3UL), ref->rc_sequenceid, ref->rc_slotid); } else { } spin_lock(& tbl->slot_tbl_lock); tmp___1 = variable_test_bit((int )ref->rc_slotid, (unsigned long const volatile *)(& tbl->used_slots)); status = (bool )(tmp___1 != 0 && (tbl->slots + (unsigned long )ref->rc_slotid)->seq_nr == ref->rc_sequenceid); spin_unlock(& tbl->slot_tbl_lock); if ((int )status) { goto out; } else { } j = j + 1; ldv_49453: ; if ((uint32_t )j < rclist->rcl_nrefcalls) { goto ldv_49452; } else { } ldv_49449: i = i + 1; ldv_49456: ; if ((uint32_t )i < nrclists) { goto ldv_49455; } else { } out: ; return (status); } } __be32 nfs4_callback_sequence(struct cb_sequenceargs *args , struct cb_sequenceres *res , struct cb_process_state *cps ) { struct nfs4_slot_table *tbl ; struct nfs_client *clp ; int i ; __be32 status ; int tmp ; int tmp___0 ; bool tmp___1 ; size_t __len ; void *__ret ; __u32 tmp___2 ; __u32 tmp___3 ; long tmp___4 ; { status = 1143406592U; clp = nfs4_find_client_sessionid(cps->net, (struct sockaddr const *)args->csa_addr, & args->csa_sessionid); if ((unsigned long )clp == (unsigned long )((struct nfs_client *)0)) { goto out; } else { } tbl = & (clp->cl_session)->bc_slot_table; spin_lock(& tbl->slot_tbl_lock); tmp___0 = constant_test_bit(1U, (unsigned long const volatile *)(& (clp->cl_session)->session_state)); if (tmp___0 != 0) { spin_unlock(& tbl->slot_tbl_lock); status = 405209088U; tmp = constant_test_bit(6U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp != 0) { status = 1143406592U; } else { } goto out; } else { } status = validate_seqid(& (clp->cl_session)->bc_slot_table, args); spin_unlock(& tbl->slot_tbl_lock); if (status != 0U) { goto out; } else { } cps->slotid = args->csa_slotid; tmp___1 = referring_call_exists(clp, args->csa_nrclists, args->csa_rclists); if ((int )tmp___1) { status = 405209088U; goto out; } else { } __len = 16UL; if (__len > 63UL) { __ret = memcpy((void *)(& res->csr_sessionid), (void const *)(& args->csa_sessionid), __len); } else { __ret = memcpy((void *)(& res->csr_sessionid), (void const *)(& args->csa_sessionid), __len); } res->csr_sequenceid = args->csa_sequenceid; res->csr_slotid = args->csa_slotid; res->csr_highestslotid = 0U; res->csr_target_highestslotid = 0U; out: cps->clp = clp; i = 0; goto ldv_49472; ldv_49471: kfree((void const *)(args->csa_rclists + (unsigned long )i)->rcl_refcalls); i = i + 1; ldv_49472: ; if ((uint32_t )i < args->csa_nrclists) { goto ldv_49471; } else { } kfree((void const *)args->csa_rclists); if (status == 1411842048U) { cps->drc_status = status; status = 0U; } else { res->csr_status = status; } tmp___4 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___4 != 0L) { tmp___2 = __fswab32(res->csr_status); tmp___3 = __fswab32(status); printk("\001d%s: exit with status = %d res->csr_status %d\n", "nfs4_callback_sequence", tmp___3, tmp___2); } else { } return (status); } } static bool validate_bitmap_values(unsigned long mask ) { { return ((mask & 0xffffffffffff0ce0UL) == 0UL); } } __be32 nfs4_callback_recallany(struct cb_recallanyargs *args , void *dummy , struct cb_process_state *cps ) { __be32 status ; fmode_t flags ; char const *tmp ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; __u32 tmp___6 ; long tmp___7 ; { flags = 0U; status = 1462173696U; if ((unsigned long )cps->clp == (unsigned long )((struct nfs_client *)0)) { goto out; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { rcu_read_lock___5(); tmp = rpc_peeraddr2str((cps->clp)->cl_rpcclient, 0); printk("\001dNFS: RECALL_ANY callback request from %s\n", tmp); rcu_read_unlock___5(); } else { } status = 369098752U; tmp___1 = validate_bitmap_values((unsigned long )args->craa_type_mask); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { goto out; } else { } status = 0U; tmp___3 = constant_test_bit(0U, (unsigned long const volatile *)(& args->craa_type_mask)); if (tmp___3 != 0) { flags = 1U; } else { } tmp___4 = constant_test_bit(1U, (unsigned long const volatile *)(& args->craa_type_mask)); if (tmp___4 != 0) { flags = flags | 2U; } else { } tmp___5 = constant_test_bit(3U, (unsigned long const volatile *)(& args->craa_type_mask)); if (tmp___5 != 0) { pnfs_recall_all_layouts(cps->clp); } else { } if (flags != 0U) { nfs_expire_all_delegation_types(cps->clp, flags); } else { } out: tmp___7 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___7 != 0L) { tmp___6 = __fswab32(status); printk("\001d%s: exit with status = %d\n", "nfs4_callback_recallany", tmp___6); } else { } return (status); } } __be32 nfs4_callback_recallslot(struct cb_recallslotargs *args , void *dummy , struct cb_process_state *cps ) { struct nfs4_slot_table *fc_tbl ; __be32 status ; char const *tmp ; long tmp___0 ; __u32 tmp___1 ; long tmp___2 ; { status = 1462173696U; if ((unsigned long )cps->clp == (unsigned long )((struct nfs_client *)0)) { goto out; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___0 != 0L) { rcu_read_lock___5(); tmp = rpc_peeraddr2str((cps->clp)->cl_rpcclient, 0); printk("\001dNFS: CB_RECALL_SLOT request from %s target highest slotid %d\n", tmp, args->crsa_target_highest_slotid); rcu_read_unlock___5(); } else { } fc_tbl = & ((cps->clp)->cl_session)->fc_slot_table; status = 0U; nfs41_set_target_slotid(fc_tbl, args->crsa_target_highest_slotid); nfs41_server_notify_target_slotid_update(cps->clp); out: tmp___2 = ldv__builtin_expect((nfs_debug & 256U) != 0U, 0L); if (tmp___2 != 0L) { tmp___1 = __fswab32(status); printk("\001d%s: exit with status = %d\n", "nfs4_callback_recallslot", tmp___1); } else { } return (status); } } void ldv_main10_sequence_infinite_withcheck_stateful(void) { int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_49515; ldv_49514: tmp = __VERIFIER_nondet_int(); switch (tmp) { default: ; goto ldv_49513; } ldv_49513: ; ldv_49515: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_49514; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_161(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_162(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_163(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_164(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_165(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_166(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_167(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static __u16 __fswab16(__u16 val ) { { return ((__u16 )((int )((short )((int )val << 8)) | (int )((short )((int )val >> 8)))); } } extern int strncmp(char const * , char const * , __kernel_size_t ) ; int ldv_mutex_trylock_178(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_176(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_179(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_181(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_175(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_177(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_180(struct mutex *ldv_func_arg1 ) ; extern unsigned long __get_free_pages(gfp_t , unsigned int ) ; extern void free_pages(unsigned long , unsigned int ) ; extern struct net *rpc_net_ns(struct rpc_clnt * ) ; extern size_t rpc_pton(struct net * , char const * , size_t const , struct sockaddr * , size_t const ) ; __inline static void rpc_set_port(struct sockaddr *sap , unsigned short const port ) { __u16 tmp ; __u16 tmp___0 ; { switch ((int )sap->sa_family) { case 2: tmp = __fswab16((int )port); ((struct sockaddr_in *)sap)->sin_port = tmp; goto ldv_44244; case 10: tmp___0 = __fswab16((int )port); ((struct sockaddr_in6 *)sap)->sin6_port = tmp___0; goto ldv_44244; } ldv_44244: ; return; } } extern u32 gss_svc_to_pseudoflavor(struct gss_api_mech * , u32 ) ; extern struct gss_api_mech *gss_mech_get_by_OID(struct xdr_netobj * ) ; extern void gss_mech_put(struct gss_api_mech * ) ; extern char *nfs_path(char ** , struct dentry * , char * , ssize_t , unsigned int ) ; extern struct vfsmount *nfs_do_submount(struct dentry * , struct nfs_fh * , struct nfs_fattr * , rpc_authflavor_t ) ; extern ssize_t nfs_dns_resolve_name(struct net * , char * , size_t , struct sockaddr * , size_t ) ; __inline static char *nfs4_pathname_string(struct nfs4_pathname const *pathname , char *buffer , ssize_t buflen ) { char *end ; int n ; struct nfs4_string const *component ; size_t __len ; void *__ret ; void *tmp ; { end = buffer + (unsigned long )buflen; end = end - 1; *end = 0; buflen = buflen - 1L; n = (int )pathname->ncomponents; goto ldv_48199; ldv_48198: component = (struct nfs4_string const *)(& pathname->components) + (unsigned long )n; buflen = buflen - (ssize_t )((unsigned int )component->len + 1U); if (buflen < 0L) { goto Elong; } else { } end = end + - ((unsigned long )component->len); __len = (size_t )component->len; __ret = memcpy((void *)end, (void const *)component->data, __len); end = end - 1; *end = 47; ldv_48199: n = n - 1; if (n >= 0) { goto ldv_48198; } else { } return (end); Elong: tmp = ERR_PTR(-36L); return ((char *)tmp); } } static char *nfs_path_component(char const *nfspath , char const *end ) { char *p ; { if ((int )((signed char )*nfspath) == 91) { p = strchr(nfspath, 93); if ((unsigned long )p != (unsigned long )((char *)0)) { p = p + 1; if ((unsigned long )((char const *)p) < (unsigned long )end) { if ((int )((signed char )*p) == 58) { return (p + 1UL); } else { } } else { } } else { } } else { p = strchr(nfspath, 58); if ((unsigned long )p != (unsigned long )((char *)0) && (unsigned long )((char const *)p) < (unsigned long )end) { return (p + 1UL); } else { } } return (0); } } static char *nfs4_path(struct dentry *dentry , char *buffer , ssize_t buflen ) { char *limit ; char *path ; char *tmp ; char *path_component ; char *tmp___0 ; long tmp___1 ; { tmp = nfs_path(& limit, dentry, buffer, buflen, 1U); path = tmp; tmp___1 = IS_ERR((void const *)path); if (tmp___1 == 0L) { tmp___0 = nfs_path_component((char const *)path, (char const *)limit); path_component = tmp___0; if ((unsigned long )path_component != (unsigned long )((char *)0)) { return (path_component); } else { } } else { } return (path); } } static int nfs4_validate_fspath(struct dentry *dentry , struct nfs4_fs_locations const *locations , char *page , char *page2 ) { char const *path ; char const *fs_path ; char *tmp ; long tmp___0 ; long tmp___1 ; char *tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; size_t tmp___6 ; int tmp___7 ; { tmp = nfs4_path(dentry, page, 4096L); path = (char const *)tmp; tmp___1 = IS_ERR((void const *)path); if (tmp___1 != 0L) { tmp___0 = PTR_ERR((void const *)path); return ((int )tmp___0); } else { } tmp___2 = nfs4_pathname_string(& locations->fs_path, page2, 4096L); fs_path = (char const *)tmp___2; tmp___4 = IS_ERR((void const *)fs_path); if (tmp___4 != 0L) { tmp___3 = PTR_ERR((void const *)fs_path); return ((int )tmp___3); } else { } tmp___6 = strlen(fs_path); tmp___7 = strncmp(path, fs_path, tmp___6); if (tmp___7 != 0) { tmp___5 = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp___5 != 0L) { printk("\001d%s: path %s does not begin with fsroot %s\n", "nfs4_validate_fspath", path, fs_path); } else { } return (-2); } else { } return (0); } } static size_t nfs_parse_server_name(char *string , size_t len , struct sockaddr *sa , size_t salen , struct nfs_server *server ) { struct net *net ; struct net *tmp ; ssize_t ret ; size_t tmp___0 ; { tmp = rpc_net_ns(server->client); net = tmp; tmp___0 = rpc_pton(net, (char const *)string, len, sa, salen); ret = (ssize_t )tmp___0; if (ret == 0L) { ret = nfs_dns_resolve_name(net, string, len, sa, salen); if (ret < 0L) { ret = 0L; } else { } } else { } return ((size_t )ret); } } rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors ) { struct gss_api_mech *mech ; struct xdr_netobj oid ; int i ; rpc_authflavor_t pseudoflavor ; struct nfs4_secinfo_flavor *flavor ; { pseudoflavor = 1U; i = 0; goto ldv_48243; ldv_48242: flavor = (struct nfs4_secinfo_flavor *)(& flavors->flavors) + (unsigned long )i; if (flavor->flavor == 0U || flavor->flavor == 1U) { pseudoflavor = flavor->flavor; goto ldv_48240; } else if (flavor->flavor == 6U) { oid.len = flavor->gss.sec_oid4.len; oid.data = (u8 *)(& flavor->gss.sec_oid4.data); mech = gss_mech_get_by_OID(& oid); if ((unsigned long )mech == (unsigned long )((struct gss_api_mech *)0)) { goto ldv_48241; } else { } pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service); gss_mech_put(mech); goto ldv_48240; } else { } ldv_48241: i = i + 1; ldv_48243: ; if ((unsigned int )i < flavors->num_flavors) { goto ldv_48242; } else { } ldv_48240: ; return (pseudoflavor); } } static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode , struct qstr *name ) { struct page *page ; struct nfs4_secinfo_flavors *flavors ; rpc_authflavor_t flavor ; int err ; void *tmp ; { page = alloc_pages(208U, 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { return (4294967284U); } else { } tmp = lowmem_page_address((struct page const *)page); flavors = (struct nfs4_secinfo_flavors *)tmp; err = nfs4_proc_secinfo(inode, (struct qstr const *)name, flavors); if (err < 0) { flavor = (rpc_authflavor_t )err; goto out; } else { } flavor = nfs_find_best_sec(flavors); out: put_page(page); return (flavor); } } struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt , struct inode *inode , struct qstr *name ) { rpc_authflavor_t flavor ; void *tmp ; struct rpc_clnt *tmp___0 ; { flavor = nfs4_negotiate_security(inode, name); if ((int )flavor < 0) { tmp = ERR_PTR((long )((int )flavor)); return ((struct rpc_clnt *)tmp); } else { } tmp___0 = rpc_clone_client_set_auth(clnt, flavor); return (tmp___0); } } static struct vfsmount *try_location(struct nfs_clone_mount *mountdata , char *page , char *page2 , struct nfs4_fs_location const *location ) { size_t addr_bufsize ; struct vfsmount *mnt ; void *tmp ; char *mnt_path ; unsigned int maxbuflen ; unsigned int s ; void *tmp___0 ; long tmp___1 ; void *tmp___2 ; void *tmp___3 ; struct nfs4_string const *buf ; void *tmp___4 ; struct nfs_server *tmp___5 ; size_t __len ; void *__ret ; long tmp___6 ; { addr_bufsize = 128UL; tmp = ERR_PTR(-2L); mnt = (struct vfsmount *)tmp; mnt_path = nfs4_pathname_string(& location->rootpath, page2, 4096L); tmp___1 = IS_ERR((void const *)mnt_path); if (tmp___1 != 0L) { tmp___0 = ERR_CAST((void const *)mnt_path); return ((struct vfsmount *)tmp___0); } else { } mountdata->mnt_path = mnt_path; maxbuflen = (unsigned int )((long )(mnt_path + 0xffffffffffffffffUL)) - (unsigned int )((long )page2); tmp___2 = kmalloc(addr_bufsize, 208U); mountdata->addr = (struct sockaddr *)tmp___2; if ((unsigned long )mountdata->addr == (unsigned long )((struct sockaddr *)0)) { tmp___3 = ERR_PTR(-12L); return ((struct vfsmount *)tmp___3); } else { } s = 0U; goto ldv_48277; ldv_48276: buf = (struct nfs4_string const *)(& location->servers) + (unsigned long )s; if ((unsigned int )buf->len == 0U || (unsigned int )buf->len >= maxbuflen) { goto ldv_48271; } else { } tmp___4 = memchr((void const *)buf->data, 37, (__kernel_size_t )buf->len); if ((unsigned long )tmp___4 != (unsigned long )((void *)0)) { goto ldv_48271; } else { } tmp___5 = NFS_SB(mountdata->sb); mountdata->addrlen = nfs_parse_server_name(buf->data, (size_t )buf->len, mountdata->addr, addr_bufsize, tmp___5); if (mountdata->addrlen == 0UL) { goto ldv_48271; } else { } rpc_set_port(mountdata->addr, 2049); __len = (size_t )buf->len; __ret = memcpy((void *)page2, (void const *)buf->data, __len); *(page2 + (unsigned long )buf->len) = 0; mountdata->hostname = page2; snprintf(page, 4096UL, "%s:%s", mountdata->hostname, mountdata->mnt_path); mnt = vfs_kern_mount(& nfs4_referral_fs_type, 0, (char const *)page, (void *)mountdata); tmp___6 = IS_ERR((void const *)mnt); if (tmp___6 == 0L) { goto ldv_48275; } else { } ldv_48271: s = s + 1U; ldv_48277: ; if ((unsigned int )location->nservers > s) { goto ldv_48276; } else { } ldv_48275: kfree((void const *)mountdata->addr); return (mnt); } } static struct vfsmount *nfs_follow_referral(struct dentry *dentry , struct nfs4_fs_locations const *locations ) { struct vfsmount *mnt ; void *tmp ; struct nfs_clone_mount mountdata ; struct nfs_server *tmp___0 ; char *page ; char *page2 ; int loc ; int error ; long tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; void *tmp___4 ; struct nfs4_fs_location const *location ; long tmp___5 ; long tmp___6 ; { tmp = ERR_PTR(-2L); mnt = (struct vfsmount *)tmp; tmp___0 = NFS_SB((struct super_block const *)dentry->d_sb); mountdata.sb = (struct super_block const *)dentry->d_sb; mountdata.dentry = (struct dentry const *)dentry; mountdata.fh = 0; mountdata.fattr = 0; mountdata.hostname = 0; mountdata.mnt_path = 0; mountdata.addr = 0; mountdata.addrlen = 0UL; mountdata.authflavor = ((tmp___0->client)->cl_auth)->au_flavor; page = 0; page2 = 0; if ((unsigned long )locations == (unsigned long )((struct nfs4_fs_locations const *)0) || (int )locations->nlocations <= 0) { goto out; } else { } tmp___1 = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp___1 != 0L) { printk("\001d%s: referral at %s/%s\n", "nfs_follow_referral", (dentry->d_parent)->d_name.name, dentry->d_name.name); } else { } tmp___2 = __get_free_pages(131280U, 0U); page = (char *)tmp___2; if ((unsigned long )page == (unsigned long )((char *)0)) { goto out; } else { } tmp___3 = __get_free_pages(131280U, 0U); page2 = (char *)tmp___3; if ((unsigned long )page2 == (unsigned long )((char *)0)) { goto out; } else { } error = nfs4_validate_fspath(dentry, locations, page, page2); if (error < 0) { tmp___4 = ERR_PTR((long )error); mnt = (struct vfsmount *)tmp___4; goto out; } else { } loc = 0; goto ldv_48294; ldv_48293: location = (struct nfs4_fs_location const *)(& locations->locations) + (unsigned long )loc; if (((unsigned long )location == (unsigned long )((struct nfs4_fs_location const *)0) || (unsigned int )location->nservers == 0U) || (unsigned int )location->rootpath.ncomponents == 0U) { goto ldv_48291; } else { } mnt = try_location(& mountdata, page, page2, location); tmp___5 = IS_ERR((void const *)mnt); if (tmp___5 == 0L) { goto ldv_48292; } else { } ldv_48291: loc = loc + 1; ldv_48294: ; if ((int )locations->nlocations > loc) { goto ldv_48293; } else { } ldv_48292: ; out: free_pages((unsigned long )page, 0U); free_pages((unsigned long )page2, 0U); tmp___6 = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp___6 != 0L) { printk("\001d%s: done\n", "nfs_follow_referral"); } else { } return (mnt); } } static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client , struct dentry *dentry ) { struct vfsmount *mnt ; void *tmp ; struct dentry *parent ; struct nfs4_fs_locations *fs_locations ; struct page *page ; int err ; long tmp___0 ; void *tmp___1 ; void *tmp___2 ; long tmp___3 ; long tmp___4 ; { tmp = ERR_PTR(-12L); mnt = (struct vfsmount *)tmp; fs_locations = 0; tmp___0 = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp___0 != 0L) { printk("\001d%s: enter\n", "nfs_do_refmount"); } else { } page = alloc_pages(208U, 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { goto out; } else { } tmp___1 = kmalloc(92112UL, 208U); fs_locations = (struct nfs4_fs_locations *)tmp___1; if ((unsigned long )fs_locations == (unsigned long )((struct nfs4_fs_locations *)0)) { goto out_free; } else { } tmp___2 = ERR_PTR(-2L); mnt = (struct vfsmount *)tmp___2; parent = dget_parent(dentry); tmp___3 = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp___3 != 0L) { printk("\001d%s: getting locations for %s/%s\n", "nfs_do_refmount", parent->d_name.name, dentry->d_name.name); } else { } err = nfs4_proc_fs_locations(client, parent->d_inode, (struct qstr const *)(& dentry->d_name), fs_locations, page); dput(parent); if ((err != 0 || fs_locations->nlocations <= 0) || fs_locations->fs_path.ncomponents == 0U) { goto out_free; } else { } mnt = nfs_follow_referral(dentry, (struct nfs4_fs_locations const *)fs_locations); out_free: __free_pages(page, 0U); kfree((void const *)fs_locations); out: tmp___4 = ldv__builtin_expect((long )((int )nfs_debug) & 1L, 0L); if (tmp___4 != 0L) { printk("\001d%s: done\n", "nfs_do_refmount"); } else { } return (mnt); } } struct vfsmount *nfs4_submount(struct nfs_server *server , struct dentry *dentry , struct nfs_fh *fh , struct nfs_fattr *fattr ) { struct dentry *parent ; struct dentry *tmp ; struct rpc_clnt *client ; struct vfsmount *mnt ; void *tmp___0 ; long tmp___1 ; { tmp = dget_parent(dentry); parent = tmp; client = nfs4_proc_lookup_mountpoint(parent->d_inode, & dentry->d_name, fh, fattr); dput(parent); tmp___1 = IS_ERR((void const *)client); if (tmp___1 != 0L) { tmp___0 = ERR_CAST((void const *)client); return ((struct vfsmount *)tmp___0); } else { } if ((fattr->valid & 1048576U) != 0U) { mnt = nfs_do_refmount(client, dentry); } else { mnt = nfs_do_submount(dentry, fh, fattr, (client->cl_auth)->au_flavor); } rpc_shutdown_client(client); return (mnt); } } void ldv_main11_sequence_infinite_withcheck_stateful(void) { int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_48335; ldv_48334: tmp = __VERIFIER_nondet_int(); switch (tmp) { default: ; goto ldv_48333; } ldv_48333: ; ldv_48335: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_48334; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_175(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_176(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_177(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_178(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_179(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_180(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_181(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_192(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_190(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_193(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_195(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_189(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_191(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_194(struct mutex *ldv_func_arg1 ) ; int nfs4_get_rootfh(struct nfs_server *server , struct nfs_fh *mntfh ) ; int nfs4_get_rootfh(struct nfs_server *server , struct nfs_fh *mntfh ) { struct nfs_fsinfo fsinfo ; int ret ; long tmp ; long tmp___0 ; size_t __len ; void *__ret ; long tmp___1 ; { ret = -12; tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> nfs4_get_rootfh()\n"); } else { } fsinfo.fattr = nfs_alloc_fattr(); if ((unsigned long )fsinfo.fattr == (unsigned long )((struct nfs_fattr *)0)) { goto out; } else { } ret = nfs4_proc_get_rootfh(server, mntfh, & fsinfo); if (ret < 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dnfs4_get_rootfh: getroot error = %d\n", - ret); } else { } goto out; } else { } if (((fsinfo.fattr)->valid & 1U) == 0U || ((int )(fsinfo.fattr)->mode & 61440) != 16384) { printk("\vnfs4_get_rootfh: getroot encountered non-directory\n"); ret = -20; goto out; } else { } if (((fsinfo.fattr)->valid & 1048576U) != 0U) { printk("\vnfs4_get_rootfh: getroot obtained referral\n"); ret = -66; goto out; } else { } __len = 16UL; if (__len > 63UL) { __ret = memcpy((void *)(& server->fsid), (void const *)(& (fsinfo.fattr)->fsid), __len); } else { __ret = memcpy((void *)(& server->fsid), (void const *)(& (fsinfo.fattr)->fsid), __len); } out: nfs_free_fattr((struct nfs_fattr const *)fsinfo.fattr); tmp___1 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d<-- nfs4_get_rootfh() = %d\n", ret); } else { } return (ret); } } void ldv_mutex_lock_189(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_190(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_191(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_192(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_193(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_194(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_195(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static void __set_bit(int nr , unsigned long volatile *addr ) { { __asm__ volatile ("bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static int __test_and_clear_bit(int nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile ("btr %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr)); return (oldbit); } } extern size_t strlcpy(char * , char const * , size_t ) ; extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; int ldv_mutex_trylock_206(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_204(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_207(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_209(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_203(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_205(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_208(struct mutex *ldv_func_arg1 ) ; __inline static struct thread_info *current_thread_info___6(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6252; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6252; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6252; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6252; default: __bad_percpu_size(); } ldv_6252: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static void __rcu_read_lock___6(void) { struct thread_info *tmp ; { tmp = current_thread_info___6(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___6(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___6(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___6(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___6(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 763, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___6(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 784, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___6(); return; } } extern void init_timer_key(struct timer_list * , unsigned int , char const * , struct lock_class_key * ) ; extern void delayed_work_timer_fn(unsigned long ) ; extern void __init_work(struct work_struct * , int ) ; extern void *idr_find(struct idr * , int ) ; extern int idr_pre_get(struct idr * , gfp_t ) ; extern int idr_get_new(struct idr * , void * , int * ) ; extern void *idr_replace(struct idr * , void * , int ) ; extern int rpc_protocol(struct rpc_clnt * ) ; extern int rpc_localaddr(struct rpc_clnt * , struct sockaddr * , size_t ) ; extern size_t rpc_ntop(struct sockaddr const * , char * , size_t const ) ; extern int xprt_setup_backchannel(struct rpc_xprt * , unsigned int ) ; __inline static bool is_ds_only_client(struct nfs_client *clp ) { { return ((clp->cl_exchange_flags & 458752U) == 262144U); } } extern bool nfs4_disable_idmapping ; extern struct nfs_client *nfs_alloc_client(struct nfs_client_initdata const * ) ; extern int nfs_create_rpc_client(struct nfs_client * , struct rpc_timeout const * , rpc_authflavor_t ) ; extern struct nfs_client *nfs_get_client(struct nfs_client_initdata const * , struct rpc_timeout const * , char const * , rpc_authflavor_t ) ; extern int nfs_probe_fsinfo(struct nfs_server * , struct nfs_fh * , struct nfs_fattr * ) ; extern void nfs_server_insert_lists(struct nfs_server * ) ; extern void nfs_init_timeout_values(struct rpc_timeout * , int , unsigned int , unsigned int ) ; extern int nfs_init_server_rpcclient(struct nfs_server * , struct rpc_timeout const * , rpc_authflavor_t ) ; extern struct nfs_server *nfs_alloc_server(void) ; extern void nfs_server_copy_userdata(struct nfs_server * , struct nfs_server * ) ; extern void nfs_free_client(struct nfs_client * ) ; extern void nfs_free_server(struct nfs_server * ) ; struct nfs_client *nfs4_set_ds_client(struct nfs_client *mds_clp , struct sockaddr const *ds_addr , int ds_addrlen , int ds_proto , unsigned int ds_timeo , unsigned int ds_retrans ) ; extern int nfs_sockaddr_match_ipaddr(struct sockaddr const * , struct sockaddr const * ) ; __inline static unsigned long nfs_block_bits(unsigned long bsize , unsigned char *nrbitsp ) { unsigned char nrbits ; { if (((bsize - 1UL) & bsize) != 0UL || (unsigned long )nrbitsp != (unsigned long )((unsigned char *)0)) { nrbits = 31U; goto ldv_49591; ldv_49590: nrbits = (unsigned char )((int )nrbits - 1); ldv_49591: ; if ((unsigned int )nrbits != 0U && ((unsigned long )(1 << (int )nrbits) & bsize) == 0UL) { goto ldv_49590; } else { } bsize = (unsigned long )(1 << (int )nrbits); if ((unsigned long )nrbitsp != (unsigned long )((unsigned char *)0)) { *nrbitsp = nrbits; } else { } } else { } return (bsize); } } __inline static unsigned long nfs_block_size(unsigned long bsize , unsigned char *nrbitsp ) { unsigned long tmp ; { if (bsize <= 1023UL) { bsize = 4096UL; } else if (bsize > 1048575UL) { bsize = 1048576UL; } else { } tmp = nfs_block_bits(bsize, nrbitsp); return (tmp); } } struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp ) ; void nfs4_destroy_session(struct nfs4_session *session ) ; int nfs4_init_session(struct nfs_server *server ) ; void unset_pnfs_layoutdriver(struct nfs_server *nfss ) ; __inline static void *net_generic___4(struct net const *net , int id ) { struct net_generic *ng ; void *ptr ; struct net_generic *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { rcu_read_lock___6(); _________p1 = *((struct net_generic * const volatile *)(& net->gen)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("include/net/netns/generic.h", 40, "suspicious rcu_dereference_check() usage"); } else { } } else { } ng = _________p1; tmp___1 = ldv__builtin_expect(id == 0, 0L); if (tmp___1 != 0L) { goto _L; } else { tmp___2 = ldv__builtin_expect((unsigned int )id > ng->len, 0L); if (tmp___2 != 0L) { _L: /* CIL Label */ __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (41), "i" (12UL)); ldv_50336: ; goto ldv_50336; } else { } } ptr = ng->ptr[id + -1]; rcu_read_unlock___6(); tmp___3 = ldv__builtin_expect((unsigned long )ptr == (unsigned long )((void *)0), 0L); if (tmp___3 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/net/netns/generic.h"), "i" (45), "i" (12UL)); ldv_50337: ; goto ldv_50337; } else { } return (ptr); } } static int nfs_get_cb_ident_idr(struct nfs_client *clp , int minorversion ) { int ret ; struct nfs_net *nn ; void *tmp ; int tmp___0 ; { ret = 0; tmp = net_generic___4((struct net const *)clp->cl_net, nfs_net_id); nn = (struct nfs_net *)tmp; if ((unsigned int )(clp->rpc_ops)->version != 4U || minorversion != 0) { return (ret); } else { } retry: tmp___0 = idr_pre_get(& nn->cb_ident_idr, 208U); if (tmp___0 == 0) { return (-12); } else { } spin_lock(& nn->nfs_client_lock); ret = idr_get_new(& nn->cb_ident_idr, (void *)clp, (int *)(& clp->cl_cb_ident)); spin_unlock(& nn->nfs_client_lock); if (ret == -11) { goto retry; } else { } return (ret); } } static void nfs4_shutdown_session(struct nfs_client *clp ) { int tmp ; { tmp = nfs4_has_session((struct nfs_client const *)clp); if (tmp != 0) { nfs4_destroy_session(clp->cl_session); nfs4_destroy_clientid(clp); } else { } return; } } struct nfs_client *nfs4_alloc_client(struct nfs_client_initdata const *cl_init ) { int err ; struct nfs_client *clp ; struct nfs_client *tmp ; long tmp___0 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___1 ; void *tmp___1 ; { tmp = nfs_alloc_client(cl_init); clp = tmp; tmp___0 = IS_ERR((void const *)clp); if (tmp___0 != 0L) { return (clp); } else { } err = nfs_get_cb_ident_idr(clp, (int )cl_init->minorversion); if (err != 0) { goto error; } else { } spinlock_check(& clp->cl_lock); __raw_spin_lock_init(& clp->cl_lock.ldv_5961.rlock, "&(&clp->cl_lock)->rlock", & __key); __init_work(& clp->cl_renewd.work, 0); __constr_expr_0.counter = 4195328L; clp->cl_renewd.work.data = __constr_expr_0; lockdep_init_map(& clp->cl_renewd.work.lockdep_map, "(&(&clp->cl_renewd)->work)", & __key___0, 0); INIT_LIST_HEAD(& clp->cl_renewd.work.entry); clp->cl_renewd.work.func = & nfs4_renew_state; init_timer_key(& clp->cl_renewd.timer, 2U, "(&(&clp->cl_renewd)->timer)", & __key___1); clp->cl_renewd.timer.function = & delayed_work_timer_fn; clp->cl_renewd.timer.data = (unsigned long )(& clp->cl_renewd); rpc_init_wait_queue(& clp->cl_rpcwaitq, "NFS client"); clp->cl_state = 4UL; clp->cl_minorversion = cl_init->minorversion; clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion]; return (clp); error: nfs_free_client(clp); tmp___1 = ERR_PTR((long )err); return ((struct nfs_client *)tmp___1); } } static void nfs4_destroy_callback(struct nfs_client *clp ) { int tmp ; { tmp = __test_and_clear_bit(1, (unsigned long volatile *)(& clp->cl_res_state)); if (tmp != 0) { nfs_callback_down((int )(clp->cl_mvops)->minor_version, clp->cl_net); } else { } return; } } static void nfs4_shutdown_client(struct nfs_client *clp ) { int tmp ; int tmp___0 ; { tmp = __test_and_clear_bit(3, (unsigned long volatile *)(& clp->cl_res_state)); if (tmp != 0) { nfs4_kill_renewd(clp); } else { } nfs4_shutdown_session(clp); nfs4_destroy_callback(clp); tmp___0 = __test_and_clear_bit(2, (unsigned long volatile *)(& clp->cl_res_state)); if (tmp___0 != 0) { nfs_idmap_delete(clp); } else { } rpc_destroy_wait_queue(& clp->cl_rpcwaitq); kfree((void const *)clp->cl_serverowner); kfree((void const *)clp->cl_serverscope); kfree((void const *)clp->cl_implid); return; } } void nfs4_free_client(struct nfs_client *clp ) { { nfs4_shutdown_client(clp); nfs_free_client(clp); return; } } static int nfs4_init_callback(struct nfs_client *clp ) { int error ; struct rpc_xprt *xprt ; struct rpc_xprt *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; { if ((unsigned int )(clp->rpc_ops)->version == 4U) { _________p1 = *((struct rpc_xprt * volatile *)(& (clp->cl_rpcclient)->cl_xprt)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } xprt = _________p1; tmp___0 = nfs4_has_session((struct nfs_client const *)clp); if (tmp___0 != 0) { error = xprt_setup_backchannel(xprt, 1U); if (error < 0) { return (error); } else { } } else { } error = nfs_callback_up((clp->cl_mvops)->minor_version, xprt); if (error < 0) { tmp___1 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: failed to start callback. Error = %d\n", "nfs4_init_callback", error); } else { } return (error); } else { } __set_bit(1, (unsigned long volatile *)(& clp->cl_res_state)); } else { } return (0); } } static int nfs4_init_client_minor_version(struct nfs_client *clp ) { struct nfs4_session *session ; int tmp ; { if ((unsigned int )(clp->cl_mvops)->minor_version != 0U) { session = 0; session = nfs4_alloc_session(clp); if ((unsigned long )session == (unsigned long )((struct nfs4_session *)0)) { return (-12); } else { } clp->cl_session = session; nfs_mark_client_ready(clp, 2); } else { } tmp = nfs4_init_callback(clp); return (tmp); } } struct nfs_client *nfs4_init_client(struct nfs_client *clp , struct rpc_timeout const *timeparms , char const *ip_addr , rpc_authflavor_t authflavour ) { char buf[49U] ; struct nfs_client *old ; int error ; long tmp ; struct __kernel_sockaddr_storage cb_addr ; struct sockaddr *sap ; size_t tmp___0 ; long tmp___1 ; int tmp___2 ; long tmp___3 ; void *tmp___4 ; { if (clp->cl_cons_state == 0) { tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001d<-- nfs4_init_client() = 0 [already %p]\n", clp); } else { } return (clp); } else { } clp->rpc_ops = & nfs_v4_clientops; __set_bit(1, (unsigned long volatile *)(& clp->cl_flags)); error = nfs_create_rpc_client(clp, timeparms, authflavour); if (error < 0) { goto error; } else { } if ((unsigned long )ip_addr == (unsigned long )((char const *)0)) { sap = (struct sockaddr *)(& cb_addr); error = rpc_localaddr(clp->cl_rpcclient, sap, 128UL); if (error < 0) { goto error; } else { } tmp___0 = rpc_ntop((struct sockaddr const *)sap, (char *)(& buf), 49UL); error = (int )tmp___0; if (error < 0) { goto error; } else { } ip_addr = (char const *)(& buf); } else { } strlcpy((char *)(& clp->cl_ipaddr), ip_addr, 48UL); error = nfs_idmap_new(clp); if (error < 0) { tmp___1 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: failed to create idmapper. Error = %d\n", "nfs4_init_client", error); } else { } goto error; } else { } __set_bit(2, (unsigned long volatile *)(& clp->cl_res_state)); error = nfs4_init_client_minor_version(clp); if (error < 0) { goto error; } else { } tmp___2 = nfs4_has_session((struct nfs_client const *)clp); if (tmp___2 == 0) { nfs_mark_client_ready(clp, 0); } else { } error = nfs4_discover_server_trunking(clp, & old); if (error < 0) { goto error; } else { } if ((unsigned long )clp != (unsigned long )old) { clp->cl_preserve_clid = 1; nfs_put_client(clp); clp = old; atomic_inc(& clp->cl_count); } else { } return (clp); error: nfs_mark_client_ready(clp, error); nfs_put_client(clp); tmp___3 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d<-- nfs4_init_client() = xerror %d\n", error); } else { } tmp___4 = ERR_PTR((long )error); return ((struct nfs_client *)tmp___4); } } static void nfs4_swap_callback_idents(struct nfs_client *keep , struct nfs_client *drop ) { struct nfs_net *nn ; void *tmp ; unsigned int save ; long tmp___0 ; { tmp = net_generic___4((struct net const *)keep->cl_net, nfs_net_id); nn = (struct nfs_net *)tmp; save = keep->cl_cb_ident; if (keep->cl_cb_ident == drop->cl_cb_ident) { return; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: keeping callback ident %u and dropping ident %u\n", "nfs4_swap_callback_idents", keep->cl_cb_ident, drop->cl_cb_ident); } else { } spin_lock(& nn->nfs_client_lock); idr_replace(& nn->cb_ident_idr, (void *)keep, (int )drop->cl_cb_ident); keep->cl_cb_ident = drop->cl_cb_ident; idr_replace(& nn->cb_ident_idr, (void *)drop, (int )save); drop->cl_cb_ident = save; spin_unlock(& nn->nfs_client_lock); return; } } int nfs40_walk_client_list(struct nfs_client *new , struct nfs_client **result , struct rpc_cred *cred ) { struct nfs_net *nn ; void *tmp ; struct nfs_client *pos ; struct nfs_client *n ; struct nfs_client *prev ; struct nfs4_setclientid_res clid ; int status ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___0 ; long tmp___1 ; long tmp___2 ; struct list_head const *__mptr___1 ; { tmp = net_generic___4((struct net const *)new->cl_net, nfs_net_id); nn = (struct nfs_net *)tmp; prev = 0; clid.clientid = new->cl_clientid; clid.confirm = new->cl_confirm; spin_lock(& nn->nfs_client_lock); __mptr = (struct list_head const *)nn->nfs_client_list.next; pos = (struct nfs_client *)__mptr + 0xffffffffffffff50UL; __mptr___0 = (struct list_head const *)pos->cl_share_link.next; n = (struct nfs_client *)__mptr___0 + 0xffffffffffffff50UL; goto ldv_50439; ldv_50438: ; if (pos->cl_cons_state < 0) { goto ldv_50436; } else { } if ((unsigned long )pos->rpc_ops != (unsigned long )new->rpc_ops) { goto ldv_50436; } else { } if (pos->cl_proto != new->cl_proto) { goto ldv_50436; } else { } if (pos->cl_minorversion != new->cl_minorversion) { goto ldv_50436; } else { } if (pos->cl_clientid != new->cl_clientid) { goto ldv_50436; } else { } atomic_inc(& pos->cl_count); spin_unlock(& nn->nfs_client_lock); if ((unsigned long )prev != (unsigned long )((struct nfs_client *)0)) { nfs_put_client(prev); } else { } status = nfs4_proc_setclientid_confirm(pos, & clid, cred); if (status == 0) { nfs4_swap_callback_idents(pos, new); nfs_put_client(pos); *result = pos; tmp___1 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___1 != 0L) { tmp___0 = atomic_read((atomic_t const *)(& pos->cl_count)); printk("\001dNFS: <-- %s using nfs_client = %p ({%d})\n", "nfs40_walk_client_list", pos, tmp___0); } else { } return (0); } else { } if (status != -10022) { nfs_put_client(pos); tmp___2 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001dNFS: <-- %s status = %d, no result\n", "nfs40_walk_client_list", status); } else { } return (status); } else { } spin_lock(& nn->nfs_client_lock); prev = pos; ldv_50436: pos = n; __mptr___1 = (struct list_head const *)n->cl_share_link.next; n = (struct nfs_client *)__mptr___1 + 0xffffffffffffff50UL; ldv_50439: ; if ((unsigned long )(& pos->cl_share_link) != (unsigned long )(& nn->nfs_client_list)) { goto ldv_50438; } else { } if ((unsigned long )prev != (unsigned long )((struct nfs_client *)0)) { nfs_put_client(prev); } else { } spin_unlock(& nn->nfs_client_lock); printk("\vNFS: %s Error: no matching nfs_client found\n", "nfs40_walk_client_list"); return (-10022); } } static bool nfs4_match_clientids(struct nfs_client *a , struct nfs_client *b ) { long tmp ; long tmp___0 ; { if (a->cl_clientid != b->cl_clientid) { tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS: --> %s client ID %llx does not match %llx\n", "nfs4_match_clientids", a->cl_clientid, b->cl_clientid); } else { } return (0); } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS: --> %s client ID %llx matches %llx\n", "nfs4_match_clientids", a->cl_clientid, b->cl_clientid); } else { } return (1); } } static bool nfs4_match_serverowners(struct nfs_client *a , struct nfs_client *b ) { struct nfs41_server_owner *o1 ; struct nfs41_server_owner *o2 ; long tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; { o1 = a->cl_serverowner; o2 = b->cl_serverowner; if (o1->minor_id != o2->minor_id) { tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001dNFS: --> %s server owner minor IDs do not match\n", "nfs4_match_serverowners"); } else { } return (0); } else { } if (o1->major_id_sz != o2->major_id_sz) { goto out_major_mismatch; } else { } tmp___0 = memcmp((void const *)(& o1->major_id), (void const *)(& o2->major_id), (size_t )o1->major_id_sz); if (tmp___0 != 0) { goto out_major_mismatch; } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS: --> %s server owners match\n", "nfs4_match_serverowners"); } else { } return (1); out_major_mismatch: tmp___2 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001dNFS: --> %s server owner major IDs do not match\n", "nfs4_match_serverowners"); } else { } return (0); } } int nfs41_walk_client_list(struct nfs_client *new , struct nfs_client **result , struct rpc_cred *cred ) { struct nfs_net *nn ; void *tmp ; struct nfs_client *pos ; struct nfs_client *n ; struct nfs_client *prev ; int error ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; long tmp___5 ; struct list_head const *__mptr___1 ; { tmp = net_generic___4((struct net const *)new->cl_net, nfs_net_id); nn = (struct nfs_net *)tmp; prev = 0; spin_lock(& nn->nfs_client_lock); __mptr = (struct list_head const *)nn->nfs_client_list.next; pos = (struct nfs_client *)__mptr + 0xffffffffffffff50UL; __mptr___0 = (struct list_head const *)pos->cl_share_link.next; n = (struct nfs_client *)__mptr___0 + 0xffffffffffffff50UL; goto ldv_50473; ldv_50472: ; if (pos->cl_cons_state < 0) { atomic_inc(& pos->cl_count); spin_unlock(& nn->nfs_client_lock); if ((unsigned long )prev != (unsigned long )((struct nfs_client *)0)) { nfs_put_client(prev); } else { } prev = pos; error = nfs_wait_client_init_complete((struct nfs_client const *)pos); if (error < 0) { nfs_put_client(pos); spin_lock(& nn->nfs_client_lock); goto ldv_50470; } else { } spin_lock(& nn->nfs_client_lock); } else { } if ((unsigned long )pos->rpc_ops != (unsigned long )new->rpc_ops) { goto ldv_50470; } else { } if (pos->cl_proto != new->cl_proto) { goto ldv_50470; } else { } if (pos->cl_minorversion != new->cl_minorversion) { goto ldv_50470; } else { } tmp___0 = nfs4_match_clientids(pos, new); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto ldv_50470; } else { } tmp___2 = nfs4_match_serverowners(pos, new); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { goto ldv_50470; } else { } spin_unlock(& nn->nfs_client_lock); tmp___5 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___5 != 0L) { tmp___4 = atomic_read((atomic_t const *)(& pos->cl_count)); printk("\001dNFS: <-- %s using nfs_client = %p ({%d})\n", "nfs41_walk_client_list", pos, tmp___4); } else { } *result = pos; return (0); ldv_50470: pos = n; __mptr___1 = (struct list_head const *)n->cl_share_link.next; n = (struct nfs_client *)__mptr___1 + 0xffffffffffffff50UL; ldv_50473: ; if ((unsigned long )(& pos->cl_share_link) != (unsigned long )(& nn->nfs_client_list)) { goto ldv_50472; } else { } spin_unlock(& nn->nfs_client_lock); printk("\vNFS: %s Error: no matching nfs_client found\n", "nfs41_walk_client_list"); return (-10022); } } static void nfs4_destroy_server(struct nfs_server *server ) { { nfs_server_return_all_delegations(server); unset_pnfs_layoutdriver(server); nfs4_purge_state_owners(server); return; } } struct nfs_client *nfs4_find_client_ident(struct net *net , int cb_ident ) { struct nfs_client *clp ; struct nfs_net *nn ; void *tmp ; void *tmp___0 ; { tmp = net_generic___4((struct net const *)net, nfs_net_id); nn = (struct nfs_net *)tmp; spin_lock(& nn->nfs_client_lock); tmp___0 = idr_find(& nn->cb_ident_idr, cb_ident); clp = (struct nfs_client *)tmp___0; if ((unsigned long )clp != (unsigned long )((struct nfs_client *)0)) { atomic_inc(& clp->cl_count); } else { } spin_unlock(& nn->nfs_client_lock); return (clp); } } static bool nfs4_cb_match_client(struct sockaddr const *addr , struct nfs_client *clp , u32 minorversion ) { struct sockaddr *clap ; int tmp ; { clap = (struct sockaddr *)(& clp->cl_addr); if (clp->cl_cons_state != 0 && clp->cl_cons_state != 2) { return (0); } else { } __asm__ volatile ("": : : "memory"); if ((unsigned int )(clp->rpc_ops)->version != 4U || clp->cl_minorversion != minorversion) { return (0); } else { } tmp = nfs_sockaddr_match_ipaddr(addr, (struct sockaddr const *)clap); if (tmp == 0) { return (0); } else { } return (1); } } struct nfs_client *nfs4_find_client_sessionid(struct net *net , struct sockaddr const *addr , struct nfs4_sessionid *sid ) { struct nfs_client *clp ; struct nfs_net *nn ; void *tmp ; struct list_head const *__mptr ; bool tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; struct list_head const *__mptr___0 ; { tmp = net_generic___4((struct net const *)net, nfs_net_id); nn = (struct nfs_net *)tmp; spin_lock(& nn->nfs_client_lock); __mptr = (struct list_head const *)nn->nfs_client_list.next; clp = (struct nfs_client *)__mptr + 0xffffffffffffff50UL; goto ldv_50503; ldv_50502: tmp___0 = nfs4_cb_match_client(addr, clp, 1U); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { goto ldv_50501; } else { } tmp___2 = nfs4_has_session((struct nfs_client const *)clp); if (tmp___2 == 0) { goto ldv_50501; } else { } tmp___3 = memcmp((void const *)(& (clp->cl_session)->sess_id.data), (void const *)(& sid->data), 16UL); if (tmp___3 != 0) { goto ldv_50501; } else { } atomic_inc(& clp->cl_count); spin_unlock(& nn->nfs_client_lock); return (clp); ldv_50501: __mptr___0 = (struct list_head const *)clp->cl_share_link.next; clp = (struct nfs_client *)__mptr___0 + 0xffffffffffffff50UL; ldv_50503: ; if ((unsigned long )(& clp->cl_share_link) != (unsigned long )(& nn->nfs_client_list)) { goto ldv_50502; } else { } spin_unlock(& nn->nfs_client_lock); return (0); } } static int nfs4_set_client(struct nfs_server *server , char const *hostname , struct sockaddr const *addr , size_t const addrlen , char const *ip_addr , rpc_authflavor_t authflavour , int proto , struct rpc_timeout const *timeparms , u32 minorversion , struct net *net ) { struct nfs_client_initdata cl_init ; struct nfs_client *clp ; int error ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; { cl_init.init_flags = 0UL; cl_init.hostname = hostname; cl_init.addr = addr; cl_init.addrlen = addrlen; cl_init.nfs_mod = & nfs_v4; cl_init.proto = proto; cl_init.minorversion = minorversion; cl_init.net = net; tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> nfs4_set_client()\n"); } else { } if ((server->flags & 262144) != 0) { set_bit(0U, (unsigned long volatile *)(& cl_init.init_flags)); } else { } clp = nfs_get_client((struct nfs_client_initdata const *)(& cl_init), timeparms, ip_addr, authflavour); tmp___1 = IS_ERR((void const *)clp); if (tmp___1 != 0L) { tmp___0 = PTR_ERR((void const *)clp); error = (int )tmp___0; goto error; } else { } set_bit(5U, (unsigned long volatile *)(& clp->cl_res_state)); server->nfs_client = clp; tmp___2 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- nfs4_set_client() = 0 [new %p]\n", clp); } else { } return (0); error: tmp___3 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d<-- nfs4_set_client() = xerror %d\n", error); } else { } return (error); } } struct nfs_client *nfs4_set_ds_client(struct nfs_client *mds_clp , struct sockaddr const *ds_addr , int ds_addrlen , int ds_proto , unsigned int ds_timeo , unsigned int ds_retrans ) { struct nfs_client_initdata cl_init ; struct rpc_timeout ds_timeout ; struct nfs_client *clp ; long tmp ; { cl_init.init_flags = 0UL; cl_init.hostname = 0; cl_init.addr = ds_addr; cl_init.addrlen = (unsigned long )ds_addrlen; cl_init.nfs_mod = & nfs_v4; cl_init.proto = ds_proto; cl_init.minorversion = mds_clp->cl_minorversion; cl_init.net = mds_clp->cl_net; nfs_init_timeout_values(& ds_timeout, ds_proto, ds_timeo, ds_retrans); clp = nfs_get_client((struct nfs_client_initdata const *)(& cl_init), (struct rpc_timeout const *)(& ds_timeout), (char const *)(& mds_clp->cl_ipaddr), ((mds_clp->cl_rpcclient)->cl_auth)->au_flavor); tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001d<-- %s %p\n", "nfs4_set_ds_client", clp); } else { } return (clp); } } static void nfs4_session_set_rwsize(struct nfs_server *server ) { struct nfs4_session *sess ; u32 server_resp_sz ; u32 server_rqst_sz ; int tmp ; { tmp = nfs4_has_session((struct nfs_client const *)server->nfs_client); if (tmp == 0) { return; } else { } sess = (server->nfs_client)->cl_session; server_resp_sz = sess->fc_attrs.max_resp_sz - (u32 )nfs41_maxread_overhead; server_rqst_sz = sess->fc_attrs.max_rqst_sz - (u32 )nfs41_maxwrite_overhead; if (server->rsize > server_resp_sz) { server->rsize = server_resp_sz; } else { } if (server->wsize > server_rqst_sz) { server->wsize = server_rqst_sz; } else { } return; } } static int nfs4_server_common_setup(struct nfs_server *server , struct nfs_fh *mntfh ) { struct nfs_fattr *fattr ; int error ; bool tmp ; long tmp___0 ; long tmp___1 ; { tmp = is_ds_only_client(server->nfs_client); if ((int )tmp) { return (-93); } else { } fattr = nfs_alloc_fattr(); if ((unsigned long )fattr == (unsigned long )((struct nfs_fattr *)0)) { return (-12); } else { } error = nfs4_init_session(server); if (error < 0) { goto out; } else { } error = nfs4_get_rootfh(server, mntfh); if (error < 0) { goto out; } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dServer FSID: %llx:%llx\n", server->fsid.major, server->fsid.minor); } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dMount FH: %d\n", (int )mntfh->size); } else { } nfs4_session_set_rwsize(server); error = nfs_probe_fsinfo(server, mntfh, fattr); if (error < 0) { goto out; } else { } if (server->namelen == 0U || server->namelen > 255U) { server->namelen = 255U; } else { } nfs_server_insert_lists(server); server->mount_time = jiffies; server->destroy = & nfs4_destroy_server; out: nfs_free_fattr((struct nfs_fattr const *)fattr); return (error); } } static int nfs4_init_server(struct nfs_server *server , struct nfs_parsed_mount_data const *data ) { struct rpc_timeout timeparms ; int error ; long tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; long tmp___2 ; { tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> nfs4_init_server()\n"); } else { } nfs_init_timeout_values(& timeparms, (int )data->nfs_server.protocol, data->timeo, data->retrans); server->flags = data->flags; server->caps = server->caps | 16432U; if (((int )data->flags & 16384) == 0) { server->caps = server->caps | 1U; } else { } server->options = data->options; error = nfs4_set_client(server, (char const *)data->nfs_server.hostname, (struct sockaddr const *)(& data->nfs_server.address), data->nfs_server.addrlen, (char const *)data->client_address, data->auth_flavors[0], (int )data->nfs_server.protocol, (struct rpc_timeout const *)(& timeparms), data->minorversion, data->net); if (error < 0) { goto error; } else { } if ((int )nfs4_disable_idmapping && data->auth_flavors[0] == 1U) { server->caps = server->caps | 32768U; } else { } if ((unsigned int )data->rsize != 0U) { tmp___0 = nfs_block_size((unsigned long )data->rsize, 0); server->rsize = (unsigned int )tmp___0; } else { } if ((unsigned int )data->wsize != 0U) { tmp___1 = nfs_block_size((unsigned long )data->wsize, 0); server->wsize = (unsigned int )tmp___1; } else { } server->acregmin = (unsigned int )data->acregmin * 250U; server->acregmax = (unsigned int )data->acregmax * 250U; server->acdirmin = (unsigned int )data->acdirmin * 250U; server->acdirmax = (unsigned int )data->acdirmax * 250U; server->port = (unsigned short )data->nfs_server.port; error = nfs_init_server_rpcclient(server, (struct rpc_timeout const *)(& timeparms), data->auth_flavors[0]); error: tmp___2 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- nfs4_init_server() = %d\n", error); } else { } return (error); } } struct nfs_server *nfs4_create_server(struct nfs_mount_info *mount_info , struct nfs_subversion *nfs_mod ) { struct nfs_server *server ; int error ; long tmp ; void *tmp___0 ; long tmp___1 ; long tmp___2 ; void *tmp___3 ; { tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> nfs4_create_server()\n"); } else { } server = nfs_alloc_server(); if ((unsigned long )server == (unsigned long )((struct nfs_server *)0)) { tmp___0 = ERR_PTR(-12L); return ((struct nfs_server *)tmp___0); } else { } error = nfs4_init_server(server, (struct nfs_parsed_mount_data const *)mount_info->parsed); if (error < 0) { goto error; } else { } error = nfs4_server_common_setup(server, mount_info->mntfh); if (error < 0) { goto error; } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d<-- nfs4_create_server() = %p\n", server); } else { } return (server); error: nfs_free_server(server); tmp___2 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- nfs4_create_server() = error %d\n", error); } else { } tmp___3 = ERR_PTR((long )error); return ((struct nfs_server *)tmp___3); } } struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data , struct nfs_fh *mntfh ) { struct nfs_client *parent_client ; struct nfs_server *server ; struct nfs_server *parent_server ; int error ; long tmp ; void *tmp___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; void *tmp___4 ; { tmp = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> nfs4_create_referral_server()\n"); } else { } server = nfs_alloc_server(); if ((unsigned long )server == (unsigned long )((struct nfs_server *)0)) { tmp___0 = ERR_PTR(-12L); return ((struct nfs_server *)tmp___0); } else { } parent_server = NFS_SB(data->sb); parent_client = parent_server->nfs_client; nfs_server_copy_userdata(server, parent_server); server->caps = server->caps | 48U; tmp___1 = rpc_protocol(parent_server->client); error = nfs4_set_client(server, (char const *)data->hostname, (struct sockaddr const *)data->addr, data->addrlen, (char const *)(& parent_client->cl_ipaddr), data->authflavor, tmp___1, (parent_server->client)->cl_timeout, (parent_client->cl_mvops)->minor_version, parent_client->cl_net); if (error < 0) { goto error; } else { } error = nfs_init_server_rpcclient(server, (parent_server->client)->cl_timeout, data->authflavor); if (error < 0) { goto error; } else { } error = nfs4_server_common_setup(server, mntfh); if (error < 0) { goto error; } else { } tmp___2 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d<-- nfs_create_referral_server() = %p\n", server); } else { } return (server); error: nfs_free_server(server); tmp___3 = ldv__builtin_expect((nfs_debug & 512U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d<-- nfs4_create_referral_server() = error %d\n", error); } else { } tmp___4 = ERR_PTR((long )error); return ((struct nfs_server *)tmp___4); } } void ldv_main13_sequence_infinite_withcheck_stateful(void) { int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_50599; ldv_50598: tmp = __VERIFIER_nondet_int(); switch (tmp) { default: ; goto ldv_50597; } ldv_50597: ; ldv_50599: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_50598; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_203(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_204(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_205(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_206(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_207(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_208(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_209(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_220(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_218(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_221(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_223(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_217(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_219(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_222(struct mutex *ldv_func_arg1 ) ; extern int proc_dointvec_minmax(struct ctl_table * , int , void * , size_t * , loff_t * ) ; extern int proc_dointvec_jiffies(struct ctl_table * , int , void * , size_t * , loff_t * ) ; extern struct ctl_table_header *register_sysctl_table(struct ctl_table * ) ; extern void unregister_sysctl_table(struct ctl_table_header * ) ; static int const nfs_set_port_min = 0; static int const nfs_set_port_max = 65535; static struct ctl_table_header *nfs4_callback_sysctl_table ; static ctl_table nfs4_cb_sysctls[3U] = { {"nfs_callback_tcpport", (void *)(& nfs_callback_set_tcpport), 4, 420U, 0, & proc_dointvec_minmax, 0, (void *)(& nfs_set_port_min), (void *)(& nfs_set_port_max)}, {"idmap_cache_timeout", (void *)(& nfs_idmap_cache_timeout), 4, 420U, 0, & proc_dointvec_jiffies, 0, 0, 0}}; static ctl_table nfs4_cb_sysctl_dir[2U] = { {"nfs", 0, 0, 365U, (struct ctl_table *)(& nfs4_cb_sysctls), 0, 0, 0, 0}}; static ctl_table nfs4_cb_sysctl_root[2U] = { {"fs", 0, 0, 365U, (struct ctl_table *)(& nfs4_cb_sysctl_dir), 0, 0, 0, 0}}; int nfs4_register_sysctl(void) { { nfs4_callback_sysctl_table = register_sysctl_table((struct ctl_table *)(& nfs4_cb_sysctl_root)); if ((unsigned long )nfs4_callback_sysctl_table == (unsigned long )((struct ctl_table_header *)0)) { return (-12); } else { } return (0); } } void nfs4_unregister_sysctl(void) { { unregister_sysctl_table(nfs4_callback_sysctl_table); nfs4_callback_sysctl_table = 0; return; } } void ldv_mutex_lock_217(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_218(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_219(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_220(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_221(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_222(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_223(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static void __clear_bit(int nr , unsigned long volatile *addr ) { { __asm__ volatile ("btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } extern unsigned long find_first_zero_bit(unsigned long const * , unsigned long ) ; extern unsigned long find_last_bit(unsigned long const * , unsigned long ) ; int ldv_mutex_trylock_234(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_232(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_235(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_237(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_231(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_233(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_236(struct mutex *ldv_func_arg1 ) ; __inline static void init_completion(struct completion *x ) { struct lock_class_key __key ; { x->done = 0U; __init_waitqueue_head(& x->wait, "&x->wait", & __key); return; } } extern void rpc_init_priority_wait_queue(struct rpc_wait_queue * , char const * ) ; extern struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue * , bool (*)(struct rpc_task * , void * ) , void * ) ; extern void xprt_destroy_backchannel(struct rpc_xprt * , unsigned int ) ; __inline static bool is_ds_client(struct nfs_client *clp ) { { return ((clp->cl_exchange_flags & 262144U) != 0U); } } int nfs4_init_ds_session(struct nfs_client *clp , unsigned long lease_time ) ; static void nfs4_shrink_slot_table(struct nfs4_slot_table *tbl , u32 newsize ) { struct nfs4_slot **p ; u32 tmp ; struct nfs4_slot *slot ; { if (tbl->max_slots <= newsize) { return; } else { } p = & tbl->slots; goto ldv_49786; ldv_49785: p = & (*p)->next; ldv_49786: tmp = newsize; newsize = newsize - 1U; if (tmp != 0U) { goto ldv_49785; } else { } goto ldv_49790; ldv_49789: slot = *p; *p = slot->next; kfree((void const *)slot); tbl->max_slots = tbl->max_slots - 1U; ldv_49790: ; if ((unsigned long )*p != (unsigned long )((struct nfs4_slot *)0)) { goto ldv_49789; } else { } return; } } void nfs4_free_slot(struct nfs4_slot_table *tbl , struct nfs4_slot *slot ) { u32 slotid ; u32 new_max ; unsigned long tmp ; long tmp___0 ; { slotid = slot->slot_nr; __clear_bit((int )slotid, (unsigned long volatile *)(& tbl->used_slots)); if (tbl->highest_used_slotid == slotid) { tmp = find_last_bit((unsigned long const *)(& tbl->used_slots), (unsigned long )slotid); new_max = (u32 )tmp; if (new_max < slotid) { tbl->highest_used_slotid = new_max; } else { tbl->highest_used_slotid = 4294967295U; nfs4_session_drain_complete(tbl->session, tbl); } } else { } tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: slotid %u highest_used_slotid %d\n", "nfs4_free_slot", slotid, tbl->highest_used_slotid); } else { } return; } } static struct nfs4_slot *nfs4_new_slot(struct nfs4_slot_table *tbl , u32 slotid , u32 seq_init , gfp_t gfp_mask ) { struct nfs4_slot *slot ; void *tmp ; { tmp = kzalloc(40UL, gfp_mask); slot = (struct nfs4_slot *)tmp; if ((unsigned long )slot != (unsigned long )((struct nfs4_slot *)0)) { slot->table = tbl; slot->slot_nr = slotid; slot->seq_nr = seq_init; } else { } return (slot); } } static struct nfs4_slot *nfs4_find_or_create_slot(struct nfs4_slot_table *tbl , u32 slotid , u32 seq_init , gfp_t gfp_mask ) { struct nfs4_slot **p ; struct nfs4_slot *slot ; void *tmp ; { p = & tbl->slots; ldv_49815: ; if ((unsigned long )*p == (unsigned long )((struct nfs4_slot *)0)) { *p = nfs4_new_slot(tbl, tbl->max_slots, seq_init, gfp_mask); if ((unsigned long )*p == (unsigned long )((struct nfs4_slot *)0)) { goto ldv_49814; } else { } tbl->max_slots = tbl->max_slots + 1U; } else { } slot = *p; if (slot->slot_nr == slotid) { return (slot); } else { } p = & slot->next; goto ldv_49815; ldv_49814: tmp = ERR_PTR(-12L); return ((struct nfs4_slot *)tmp); } } struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl ) { struct nfs4_slot *ret ; void *tmp ; u32 slotid ; long tmp___0 ; unsigned long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; { tmp = ERR_PTR(-16L); ret = (struct nfs4_slot *)tmp; tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d--> %s used_slots=%04lx highest_used=%u max_slots=%u\n", "nfs4_alloc_slot", tbl->used_slots[0], tbl->highest_used_slotid, tbl->max_slotid + 1U); } else { } tmp___1 = find_first_zero_bit((unsigned long const *)(& tbl->used_slots), (unsigned long )(tbl->max_slotid + 1U)); slotid = (u32 )tmp___1; if (tbl->max_slotid < slotid) { goto out; } else { } ret = nfs4_find_or_create_slot(tbl, slotid, 1U, 0U); tmp___2 = IS_ERR((void const *)ret); if (tmp___2 != 0L) { goto out; } else { } __set_bit((int )slotid, (unsigned long volatile *)(& tbl->used_slots)); if (tbl->highest_used_slotid < slotid || tbl->highest_used_slotid == 4294967295U) { tbl->highest_used_slotid = slotid; } else { } ret->generation = tbl->generation; out: tmp___4 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___4 != 0L) { tmp___3 = IS_ERR((void const *)ret); printk("\001d<-- %s used_slots=%04lx highest_used=%d slotid=%d \n", "nfs4_alloc_slot", tbl->used_slots[0], tbl->highest_used_slotid, tmp___3 == 0L ? ret->slot_nr : 4294967295U); } else { } return (ret); } } static int nfs4_grow_slot_table(struct nfs4_slot_table *tbl , u32 max_reqs , u32 ivalue ) { struct nfs4_slot *tmp ; long tmp___0 ; { if (tbl->max_slots >= max_reqs) { return (0); } else { } tmp = nfs4_find_or_create_slot(tbl, max_reqs - 1U, ivalue, 80U); tmp___0 = IS_ERR((void const *)tmp); if (tmp___0 == 0L) { return (0); } else { } return (-12); } } static void nfs4_reset_slot_table(struct nfs4_slot_table *tbl , u32 server_highest_slotid , u32 ivalue ) { struct nfs4_slot **p ; { nfs4_shrink_slot_table(tbl, server_highest_slotid + 1U); p = & tbl->slots; goto ldv_49835; ldv_49834: (*p)->seq_nr = ivalue; (*p)->interrupted = 0U; p = & (*p)->next; ldv_49835: ; if ((unsigned long )*p != (unsigned long )((struct nfs4_slot *)0)) { goto ldv_49834; } else { } tbl->highest_used_slotid = 4294967295U; tbl->target_highest_slotid = server_highest_slotid; tbl->server_highest_slotid = server_highest_slotid; tbl->d_target_highest_slotid = 0; tbl->d2_target_highest_slotid = 0; tbl->max_slotid = server_highest_slotid; return; } } static int nfs4_realloc_slot_table(struct nfs4_slot_table *tbl , u32 max_reqs , u32 ivalue ) { int ret ; long tmp ; long tmp___0 ; long tmp___1 ; { tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s: max_reqs=%u, tbl->max_slots %d\n", "nfs4_realloc_slot_table", max_reqs, tbl->max_slots); } else { } if (max_reqs > 1024U) { max_reqs = 1024U; } else { } ret = nfs4_grow_slot_table(tbl, max_reqs, ivalue); if (ret != 0) { goto out; } else { } spin_lock(& tbl->slot_tbl_lock); nfs4_reset_slot_table(tbl, max_reqs - 1U, ivalue); spin_unlock(& tbl->slot_tbl_lock); tmp___0 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: tbl=%p slots=%p max_slots=%d\n", "nfs4_realloc_slot_table", tbl, tbl->slots, tbl->max_slots); } else { } out: tmp___1 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d<-- %s: return %d\n", "nfs4_realloc_slot_table", ret); } else { } return (ret); } } static void nfs4_destroy_slot_tables(struct nfs4_session *session ) { { nfs4_shrink_slot_table(& session->fc_slot_table, 0U); nfs4_shrink_slot_table(& session->bc_slot_table, 0U); return; } } static bool nfs41_assign_slot(struct rpc_task *task , void *pslot ) { struct nfs4_sequence_args *args ; struct nfs4_sequence_res *res ; struct nfs4_slot *slot ; struct nfs4_slot_table *tbl ; bool tmp ; { args = (struct nfs4_sequence_args *)task->tk_msg.rpc_argp; res = (struct nfs4_sequence_res *)task->tk_msg.rpc_resp; slot = (struct nfs4_slot *)pslot; tbl = slot->table; tmp = nfs4_session_draining(tbl->session); if ((int )tmp && (unsigned int )*((unsigned char *)args + 8UL) == 0U) { return (0); } else { } slot->generation = tbl->generation; args->sa_slot = slot; res->sr_timestamp = jiffies; res->sr_slot = slot; res->sr_status_flags = 0U; res->sr_status = 1; return (1); } } static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl , struct nfs4_slot *slot ) { struct rpc_task *tmp ; { tmp = rpc_wake_up_first(& tbl->slot_tbl_waitq, & nfs41_assign_slot, (void *)slot); if ((unsigned long )tmp != (unsigned long )((struct rpc_task *)0)) { return (1); } else { } return (0); } } bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl , struct nfs4_slot *slot ) { bool tmp ; { if (slot->slot_nr > tbl->max_slotid) { return (0); } else { } tmp = __nfs41_wake_and_assign_slot(tbl, slot); return (tmp); } } static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl ) { struct nfs4_slot *slot ; struct nfs4_slot *tmp ; bool ret ; bool tmp___0 ; long tmp___1 ; { tmp = nfs4_alloc_slot(tbl); slot = tmp; tmp___1 = IS_ERR((void const *)slot); if (tmp___1 == 0L) { tmp___0 = __nfs41_wake_and_assign_slot(tbl, slot); ret = tmp___0; if ((int )ret) { return (ret); } else { } nfs4_free_slot(tbl, slot); } else { } return (0); } } void nfs41_wake_slot_table(struct nfs4_slot_table *tbl ) { bool tmp ; int tmp___0 ; { ldv_49873: tmp = nfs41_try_wake_next_slot_table_entry(tbl); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto ldv_49872; } else { } goto ldv_49873; ldv_49872: ; return; } } static void nfs41_set_max_slotid_locked(struct nfs4_slot_table *tbl , u32 target_highest_slotid ) { u32 max_slotid ; unsigned int _min1 ; u32 _min2 ; { _min1 = 1023U; _min2 = target_highest_slotid; max_slotid = _min1 < _min2 ? _min1 : _min2; if (tbl->server_highest_slotid < max_slotid) { max_slotid = tbl->server_highest_slotid; } else { } if (tbl->target_highest_slotid < max_slotid) { max_slotid = tbl->target_highest_slotid; } else { } tbl->max_slotid = max_slotid; nfs41_wake_slot_table(tbl); return; } } static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl , u32 target_highest_slotid ) { { if (tbl->target_highest_slotid == target_highest_slotid) { return; } else { } tbl->target_highest_slotid = target_highest_slotid; tbl->generation = tbl->generation + 1UL; return; } } void nfs41_set_target_slotid(struct nfs4_slot_table *tbl , u32 target_highest_slotid ) { { spin_lock(& tbl->slot_tbl_lock); nfs41_set_target_slotid_locked(tbl, target_highest_slotid); tbl->d_target_highest_slotid = 0; tbl->d2_target_highest_slotid = 0; nfs41_set_max_slotid_locked(tbl, target_highest_slotid); spin_unlock(& tbl->slot_tbl_lock); return; } } static void nfs41_set_server_slotid_locked(struct nfs4_slot_table *tbl , u32 highest_slotid ) { { if (tbl->server_highest_slotid == highest_slotid) { return; } else { } if (tbl->highest_used_slotid > highest_slotid) { return; } else { } nfs4_shrink_slot_table(tbl, highest_slotid + 1U); tbl->server_highest_slotid = highest_slotid; return; } } static s32 nfs41_derivative_target_slotid(s32 s1 , s32 s2 ) { { s1 = s1 - s2; if (s1 == 0) { return (0); } else { } if (s1 < 0) { return ((s1 + -1) >> 1); } else { } return ((s1 + 1) >> 1); } } static int nfs41_sign_s32(s32 s1 ) { { if (s1 > 0) { return (1); } else { } if (s1 < 0) { return (-1); } else { } return (0); } } static bool nfs41_same_sign_or_zero_s32(s32 s1 , s32 s2 ) { int tmp ; int tmp___0 ; { if (s1 == 0 || s2 == 0) { return (1); } else { } tmp = nfs41_sign_s32(s1); tmp___0 = nfs41_sign_s32(s2); return (tmp == tmp___0); } } static bool nfs41_is_outlier_target_slotid(struct nfs4_slot_table *tbl , u32 new_target ) { s32 d_target ; s32 d2_target ; bool ret ; bool tmp ; bool tmp___0 ; { ret = 1; d_target = nfs41_derivative_target_slotid((s32 )new_target, (s32 )tbl->target_highest_slotid); d2_target = nfs41_derivative_target_slotid(d_target, tbl->d_target_highest_slotid); tmp = nfs41_same_sign_or_zero_s32(d_target, tbl->d_target_highest_slotid); if ((int )tmp) { ret = 0; } else { } tmp___0 = nfs41_same_sign_or_zero_s32(d2_target, tbl->d2_target_highest_slotid); if ((int )tmp___0) { ret = 0; } else { } tbl->d_target_highest_slotid = d_target; tbl->d2_target_highest_slotid = d2_target; return (ret); } } void nfs41_update_target_slotid(struct nfs4_slot_table *tbl , struct nfs4_slot *slot , struct nfs4_sequence_res *res ) { bool tmp ; int tmp___0 ; { spin_lock(& tbl->slot_tbl_lock); tmp = nfs41_is_outlier_target_slotid(tbl, res->sr_target_highest_slotid); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { nfs41_set_target_slotid_locked(tbl, res->sr_target_highest_slotid); } else { } if (tbl->generation == slot->generation) { nfs41_set_server_slotid_locked(tbl, res->sr_highest_slotid); } else { } nfs41_set_max_slotid_locked(tbl, res->sr_target_highest_slotid); spin_unlock(& tbl->slot_tbl_lock); return; } } int nfs4_setup_session_slot_tables(struct nfs4_session *ses ) { struct nfs4_slot_table *tbl ; int status ; long tmp ; { tmp = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "nfs4_setup_session_slot_tables"); } else { } tbl = & ses->fc_slot_table; tbl->session = ses; status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1U); if (status != 0) { return (status); } else { } tbl = & ses->bc_slot_table; tbl->session = ses; status = nfs4_realloc_slot_table(tbl, ses->bc_attrs.max_reqs, 0U); if (status != 0 && (unsigned long )tbl->slots == (unsigned long )((struct nfs4_slot *)0)) { nfs4_destroy_slot_tables(ses); } else { } return (status); } } struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp ) { struct nfs4_session *session ; struct nfs4_slot_table *tbl ; void *tmp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; { tmp = kzalloc(1432UL, 80U); session = (struct nfs4_session *)tmp; if ((unsigned long )session == (unsigned long )((struct nfs4_session *)0)) { return (0); } else { } tbl = & session->fc_slot_table; tbl->highest_used_slotid = 4294967295U; spinlock_check(& tbl->slot_tbl_lock); __raw_spin_lock_init(& tbl->slot_tbl_lock.ldv_5961.rlock, "&(&tbl->slot_tbl_lock)->rlock", & __key); rpc_init_priority_wait_queue(& tbl->slot_tbl_waitq, "ForeChannel Slot table"); init_completion(& tbl->complete); tbl = & session->bc_slot_table; tbl->highest_used_slotid = 4294967295U; spinlock_check(& tbl->slot_tbl_lock); __raw_spin_lock_init(& tbl->slot_tbl_lock.ldv_5961.rlock, "&(&tbl->slot_tbl_lock)->rlock", & __key___0); rpc_init_wait_queue(& tbl->slot_tbl_waitq, "BackChannel Slot table"); init_completion(& tbl->complete); session->session_state = 1UL; session->clp = clp; return (session); } } void nfs4_destroy_session(struct nfs4_session *session ) { struct rpc_xprt *xprt ; struct rpc_cred *cred ; struct rpc_xprt *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long tmp___1 ; { cred = nfs4_get_exchange_id_cred(session->clp); nfs4_proc_destroy_session(session, cred); if ((unsigned long )cred != (unsigned long )((struct rpc_cred *)0)) { put_rpccred(cred); } else { } rcu_read_lock(); _________p1 = *((struct rpc_xprt * volatile *)(& ((session->clp)->cl_rpcclient)->cl_xprt)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/nfs4session.c.prepared", 513, "suspicious rcu_dereference_check() usage"); } else { } } else { } xprt = _________p1; rcu_read_unlock(); tmp___1 = ldv__builtin_expect((nfs_debug & 16384U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s Destroy backchannel for xprt %p\n", "nfs4_destroy_session", xprt); } else { } xprt_destroy_backchannel(xprt, 1U); nfs4_destroy_slot_tables(session); kfree((void const *)session); return; } } static int nfs41_check_session_ready(struct nfs_client *clp ) { int ret ; { if (clp->cl_cons_state == 2) { ret = nfs4_client_recover_expired_lease(clp); if (ret != 0) { return (ret); } else { } } else { } if (clp->cl_cons_state < 0) { return (-93); } else { } __asm__ volatile ("": : : "memory"); return (0); } } int nfs4_init_session(struct nfs_server *server ) { struct nfs_client *clp ; struct nfs4_session *session ; unsigned int target_max_rqst_sz ; unsigned int target_max_resp_sz ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { clp = server->nfs_client; target_max_rqst_sz = 1048576U; target_max_resp_sz = 1048576U; tmp = nfs4_has_session((struct nfs_client const *)clp); if (tmp == 0) { return (0); } else { } if (server->rsize != 0U) { target_max_resp_sz = server->rsize; } else { } target_max_resp_sz = target_max_resp_sz + (unsigned int )nfs41_maxread_overhead; if (server->wsize != 0U) { target_max_rqst_sz = server->wsize; } else { } target_max_rqst_sz = target_max_rqst_sz + (unsigned int )nfs41_maxwrite_overhead; session = clp->cl_session; spin_lock(& clp->cl_lock); tmp___0 = test_and_clear_bit(0, (unsigned long volatile *)(& session->session_state)); if (tmp___0 != 0) { session->fc_target_max_rqst_sz = target_max_rqst_sz; session->fc_attrs.max_rqst_sz = target_max_rqst_sz; session->fc_target_max_resp_sz = target_max_resp_sz; session->fc_attrs.max_resp_sz = target_max_resp_sz; } else { if (session->fc_target_max_rqst_sz < target_max_rqst_sz) { session->fc_target_max_rqst_sz = target_max_rqst_sz; set_bit(6U, (unsigned long volatile *)(& clp->cl_state)); } else { } if (session->fc_target_max_resp_sz < target_max_resp_sz) { session->fc_target_max_resp_sz = target_max_resp_sz; set_bit(6U, (unsigned long volatile *)(& clp->cl_state)); } else { } } spin_unlock(& clp->cl_lock); tmp___1 = constant_test_bit(6U, (unsigned long const volatile *)(& clp->cl_state)); if (tmp___1 != 0) { nfs4_schedule_lease_recovery(clp); } else { } tmp___2 = nfs41_check_session_ready(clp); return (tmp___2); } } int nfs4_init_ds_session(struct nfs_client *clp , unsigned long lease_time ) { struct nfs4_session *session ; int ret ; int tmp ; bool tmp___0 ; int tmp___1 ; { session = clp->cl_session; spin_lock(& clp->cl_lock); tmp = test_and_clear_bit(0, (unsigned long volatile *)(& session->session_state)); if (tmp != 0) { clp->cl_lease_time = lease_time; clp->cl_last_renewal = jiffies; } else { } spin_unlock(& clp->cl_lock); ret = nfs41_check_session_ready(clp); if (ret != 0) { return (ret); } else { } tmp___0 = is_ds_client(clp); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (-19); } else { } return (0); } } void ldv_mutex_lock_231(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_232(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_233(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_234(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_235(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_236(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_237(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static void __list_splice(struct list_head const *list , struct list_head *prev , struct list_head *next ) { struct list_head *first ; struct list_head *last ; { first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; return; } } __inline static void list_splice_init(struct list_head *list , struct list_head *head ) { int tmp ; { tmp = list_empty((struct list_head const *)list); if (tmp == 0) { __list_splice((struct list_head const *)list, head, head->next); INIT_LIST_HEAD(list); } else { } return; } } __inline static void list_splice_tail_init(struct list_head *list , struct list_head *head ) { int tmp ; { tmp = list_empty((struct list_head const *)list); if (tmp == 0) { __list_splice((struct list_head const *)list, head->prev, head); INIT_LIST_HEAD(list); } else { } return; } } __inline static void atomic_dec(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0": "+m" (v->counter)); return; } } int ldv_mutex_trylock_248(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_246(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_249(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_251(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_245(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_247(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_250(struct mutex *ldv_func_arg1 ) ; __inline static struct thread_info *current_thread_info___8(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6448; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6448; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6448; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6448; default: __bad_percpu_size(); } ldv_6448: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } extern int out_of_line_wait_on_bit_lock(void * , int , int (*)(void * ) , unsigned int ) ; __inline static int wait_on_bit_lock(void *word , int bit , int (*action)(void * ) , unsigned int mode ) { int tmp ; int tmp___0 ; { tmp = test_and_set_bit(bit, (unsigned long volatile *)word); if (tmp == 0) { return (0); } else { } tmp___0 = out_of_line_wait_on_bit_lock(word, bit, action, mode); return (tmp___0); } } __inline static void __rcu_read_lock___8(void) { struct thread_info *tmp ; { tmp = current_thread_info___8(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___8(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___8(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___8(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___8(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 763, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___8(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 784, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___8(); return; } } extern void __mark_inode_dirty(struct inode * , int ) ; __inline static void mark_inode_dirty_sync(struct inode *inode ) { { __mark_inode_dirty(inode, 1); return; } } extern void nfs_pageio_init(struct nfs_pageio_descriptor * , struct inode * , struct nfs_pageio_ops const * , struct nfs_pgio_completion_ops const * , size_t , int ) ; extern int nfs_pageio_add_request(struct nfs_pageio_descriptor * , struct nfs_page * ) ; extern void nfs_pageio_complete(struct nfs_pageio_descriptor * ) ; extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor * , struct nfs_page * , struct nfs_page * ) ; __inline static void nfs_list_add_request(struct nfs_page *req , struct list_head *head ) { { list_add_tail(& req->wb_list, head); return; } } __inline static void nfs_list_remove_request(struct nfs_page *req ) { int tmp ; { tmp = list_empty((struct list_head const *)(& req->wb_list)); if (tmp != 0) { return; } else { } list_del_init(& req->wb_list); return; } } __inline static struct nfs_page *nfs_list_entry(struct list_head *head ) { struct list_head const *__mptr ; { __mptr = (struct list_head const *)head; return ((struct nfs_page *)__mptr); } } __inline static loff_t req_offset(struct nfs_page *req ) { { return (((long long )req->wb_index << 12) + (long long )req->wb_offset); } } extern int __request_module(bool , char const * , ...) ; extern void nfs_pgheader_init(struct nfs_pageio_descriptor * , struct nfs_pgio_header * , void (*)(struct nfs_pgio_header * ) ) ; extern struct nfs_read_header *nfs_readhdr_alloc(void) ; extern void nfs_readhdr_free(struct nfs_pgio_header * ) ; extern void nfs_pageio_init_read(struct nfs_pageio_descriptor * , struct inode * , struct nfs_pgio_completion_ops const * ) ; extern int nfs_generic_pagein(struct nfs_pageio_descriptor * , struct nfs_pgio_header * ) ; extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor * ) ; extern void nfs_readdata_release(struct nfs_read_data * ) ; extern void nfs_pageio_init_write(struct nfs_pageio_descriptor * , struct inode * , int , struct nfs_pgio_completion_ops const * ) ; extern struct nfs_write_header *nfs_writehdr_alloc(void) ; extern void nfs_writehdr_free(struct nfs_pgio_header * ) ; extern int nfs_generic_flush(struct nfs_pageio_descriptor * , struct nfs_pgio_header * ) ; extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor * ) ; extern void nfs_writedata_release(struct nfs_write_data * ) ; extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req * ) ; int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type ) ; void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type ) ; void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio , struct nfs_page *req ) ; int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc ) ; void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio , struct nfs_page *req , u64 wb_size ) ; int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc ) ; bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio , struct nfs_page *prev , struct nfs_page *req ) ; void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg ) ; void pnfs_set_layoutcommit(struct nfs_write_data *wdata ) ; void pnfs_ld_write_done(struct nfs_write_data *data ) ; void pnfs_ld_read_done(struct nfs_read_data *data ) ; struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino , struct nfs_open_context *ctx , loff_t pos , u64 count , enum pnfs_iomode iomode , gfp_t gfp_flags ) ; void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp ) ; int pnfs_read_done_resend_to_mds(struct inode *inode , struct list_head *head , struct nfs_pgio_completion_ops const *compl_ops ) ; int pnfs_write_done_resend_to_mds(struct inode *inode , struct list_head *head , struct nfs_pgio_completion_ops const *compl_ops ) ; void nfs4_deviceid_purge_client(struct nfs_client const *clp ) ; __inline static struct pnfs_layout_segment *pnfs_get_lseg(struct pnfs_layout_segment *lseg ) { { if ((unsigned long )lseg != (unsigned long )((struct pnfs_layout_segment *)0)) { atomic_inc(& lseg->pls_refcount); __asm__ volatile ("": : : "memory"); } else { } return (lseg); } } __inline static void nfs_inc_server_stats___0(struct nfs_server const *server , enum nfs_stat_eventcounters stat ) { void const *__vpp_verify ; int pao_ID__ ; int pao_ID_____0 ; int pao_ID_____1 ; int pao_ID_____2 ; { __vpp_verify = 0; switch (8UL) { case 1UL: pao_ID__ = 1; switch (8UL) { case 1UL: ; if (pao_ID__ == 1) { __asm__ ("incb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID__ == -1) { __asm__ ("decb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addb %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "qi" (1UL)); } goto ldv_49278; case 2UL: ; if (pao_ID__ == 1) { __asm__ ("incw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID__ == -1) { __asm__ ("decw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addw %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49278; case 4UL: ; if (pao_ID__ == 1) { __asm__ ("incl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID__ == -1) { __asm__ ("decl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addl %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49278; case 8UL: ; if (pao_ID__ == 1) { __asm__ ("incq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID__ == -1) { __asm__ ("decq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addq %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "re" (1UL)); } goto ldv_49278; default: __bad_percpu_size(); } ldv_49278: ; goto ldv_49283; case 2UL: pao_ID_____0 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____0 == 1) { __asm__ ("incb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____0 == -1) { __asm__ ("decb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addb %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "qi" (1UL)); } goto ldv_49289; case 2UL: ; if (pao_ID_____0 == 1) { __asm__ ("incw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____0 == -1) { __asm__ ("decw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addw %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49289; case 4UL: ; if (pao_ID_____0 == 1) { __asm__ ("incl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____0 == -1) { __asm__ ("decl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addl %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49289; case 8UL: ; if (pao_ID_____0 == 1) { __asm__ ("incq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____0 == -1) { __asm__ ("decq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addq %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "re" (1UL)); } goto ldv_49289; default: __bad_percpu_size(); } ldv_49289: ; goto ldv_49283; case 4UL: pao_ID_____1 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____1 == 1) { __asm__ ("incb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____1 == -1) { __asm__ ("decb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addb %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "qi" (1UL)); } goto ldv_49299; case 2UL: ; if (pao_ID_____1 == 1) { __asm__ ("incw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____1 == -1) { __asm__ ("decw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addw %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49299; case 4UL: ; if (pao_ID_____1 == 1) { __asm__ ("incl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____1 == -1) { __asm__ ("decl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addl %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49299; case 8UL: ; if (pao_ID_____1 == 1) { __asm__ ("incq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____1 == -1) { __asm__ ("decq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addq %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "re" (1UL)); } goto ldv_49299; default: __bad_percpu_size(); } ldv_49299: ; goto ldv_49283; case 8UL: pao_ID_____2 = 1; switch (8UL) { case 1UL: ; if (pao_ID_____2 == 1) { __asm__ ("incb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____2 == -1) { __asm__ ("decb %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addb %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "qi" (1UL)); } goto ldv_49309; case 2UL: ; if (pao_ID_____2 == 1) { __asm__ ("incw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____2 == -1) { __asm__ ("decw %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addw %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49309; case 4UL: ; if (pao_ID_____2 == 1) { __asm__ ("incl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____2 == -1) { __asm__ ("decl %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addl %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "ri" (1UL)); } goto ldv_49309; case 8UL: ; if (pao_ID_____2 == 1) { __asm__ ("incq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else if (pao_ID_____2 == -1) { __asm__ ("decq %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat])); } else { __asm__ ("addq %1, %%gs:%P0": "+m" ((server->io_stats)->events[(unsigned int )stat]): "re" (1UL)); } goto ldv_49309; default: __bad_percpu_size(); } ldv_49309: ; goto ldv_49283; default: __bad_size_call_parameter(); goto ldv_49283; } ldv_49283: ; return; } } __inline static void nfs_inc_stats(struct inode const *inode , enum nfs_stat_eventcounters stat ) { struct nfs_server *tmp ; { tmp = NFS_SERVER(inode); nfs_inc_server_stats___0((struct nfs_server const *)tmp, stat); return; } } static spinlock_t pnfs_spinlock = {{{{{0U}}, 3735899821U, 4294967295U, 0xffffffffffffffffUL, {0, {0, 0}, "pnfs_spinlock", 0, 0UL}}}}; static struct list_head pnfs_modules_tbl = {& pnfs_modules_tbl, & pnfs_modules_tbl}; static struct pnfs_layoutdriver_type *find_pnfs_driver_locked(u32 id ) { struct pnfs_layoutdriver_type *local ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; long tmp ; { __mptr = (struct list_head const *)pnfs_modules_tbl.next; local = (struct pnfs_layoutdriver_type *)__mptr; goto ldv_49439; ldv_49438: ; if ((unsigned int )local->id == id) { goto out; } else { } __mptr___0 = (struct list_head const *)local->pnfs_tblid.next; local = (struct pnfs_layoutdriver_type *)__mptr___0; ldv_49439: ; if ((unsigned long )(& local->pnfs_tblid) != (unsigned long )(& pnfs_modules_tbl)) { goto ldv_49438; } else { } local = 0; out: tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: Searching for id %u, found %p\n", "find_pnfs_driver_locked", id, local); } else { } return (local); } } static struct pnfs_layoutdriver_type *find_pnfs_driver(u32 id ) { struct pnfs_layoutdriver_type *local ; long tmp ; bool tmp___0 ; int tmp___1 ; { spin_lock(& pnfs_spinlock); local = find_pnfs_driver_locked(id); if ((unsigned long )local != (unsigned long )((struct pnfs_layoutdriver_type *)0)) { tmp___0 = try_module_get(local->owner); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: Could not grab reference on module\n", "find_pnfs_driver"); } else { } local = 0; } else { } } else { } spin_unlock(& pnfs_spinlock); return (local); } } void unset_pnfs_layoutdriver(struct nfs_server *nfss ) { int tmp ; { if ((unsigned long )nfss->pnfs_curr_ld != (unsigned long )((struct pnfs_layoutdriver_type *)0)) { if ((unsigned long )(nfss->pnfs_curr_ld)->clear_layoutdriver != (unsigned long )((int (*)(struct nfs_server * ))0)) { (*((nfss->pnfs_curr_ld)->clear_layoutdriver))(nfss); } else { } tmp = atomic_dec_and_test(& (nfss->nfs_client)->cl_mds_count); if (tmp != 0) { nfs4_deviceid_purge_client((struct nfs_client const *)nfss->nfs_client); } else { } module_put((nfss->pnfs_curr_ld)->owner); } else { } nfss->pnfs_curr_ld = 0; return; } } void set_pnfs_layoutdriver(struct nfs_server *server , struct nfs_fh const *mntfh , u32 id ) { struct pnfs_layoutdriver_type *ld_type ; long tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; { ld_type = 0; if (id == 0U) { goto out_no_driver; } else { } if (((server->nfs_client)->cl_exchange_flags & 196608U) == 0U) { printk("\vNFS: %s: id %u cl_exchange_flags 0x%x\n", "set_pnfs_layoutdriver", id, (server->nfs_client)->cl_exchange_flags); goto out_no_driver; } else { } ld_type = find_pnfs_driver(id); if ((unsigned long )ld_type == (unsigned long )((struct pnfs_layoutdriver_type *)0)) { __request_module(1, "%s-%u", (char *)"nfs-layouttype4", id); ld_type = find_pnfs_driver(id); if ((unsigned long )ld_type == (unsigned long )((struct pnfs_layoutdriver_type *)0)) { tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: No pNFS module found for %u.\n", "set_pnfs_layoutdriver", id); } else { } goto out_no_driver; } else { } } else { } server->pnfs_curr_ld = ld_type; if ((unsigned long )ld_type->set_layoutdriver != (unsigned long )((int (*)(struct nfs_server * , struct nfs_fh const * ))0)) { tmp___0 = (*(ld_type->set_layoutdriver))(server, mntfh); if (tmp___0 != 0) { printk("\vNFS: %s: Error initializing pNFS layout driver %u.\n", "set_pnfs_layoutdriver", id); module_put(ld_type->owner); goto out_no_driver; } else { } } else { } atomic_inc(& (server->nfs_client)->cl_mds_count); tmp___1 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: pNFS module for %u set\n", "set_pnfs_layoutdriver", id); } else { } return; out_no_driver: tmp___2 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: Using NFSv4 I/O\n", "set_pnfs_layoutdriver"); } else { } server->pnfs_curr_ld = 0; return; } } int pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type ) { int status ; struct pnfs_layoutdriver_type *tmp ; long tmp___0 ; { status = -22; if ((unsigned int )ld_type->id == 0U) { printk("\vNFS: %s id 0 is reserved\n", "pnfs_register_layoutdriver"); return (status); } else { } if ((unsigned long )ld_type->alloc_lseg == (unsigned long )((struct pnfs_layout_segment *(*)(struct pnfs_layout_hdr * , struct nfs4_layoutget_res * , gfp_t ))0) || (unsigned long )ld_type->free_lseg == (unsigned long )((void (*)(struct pnfs_layout_segment * ))0)) { printk("\vNFS: %s Layout driver must provide alloc_lseg and free_lseg.\n", "pnfs_register_layoutdriver"); return (status); } else { } spin_lock(& pnfs_spinlock); tmp = find_pnfs_driver_locked(ld_type->id); if ((unsigned long )tmp == (unsigned long )((struct pnfs_layoutdriver_type *)0)) { list_add(& ld_type->pnfs_tblid, & pnfs_modules_tbl); status = 0; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s Registering id:%u name:%s\n", "pnfs_register_layoutdriver", ld_type->id, ld_type->name); } else { } } else { printk("\vNFS: %s Module with id %d already loaded!\n", "pnfs_register_layoutdriver", ld_type->id); } spin_unlock(& pnfs_spinlock); return (status); } } void pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type ) { long tmp ; { tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s Deregistering id:%u\n", "pnfs_unregister_layoutdriver", ld_type->id); } else { } spin_lock(& pnfs_spinlock); list_del(& ld_type->pnfs_tblid); spin_unlock(& pnfs_spinlock); return; } } void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo ) { { atomic_inc(& lo->plh_refcount); return; } } static struct pnfs_layout_hdr *pnfs_alloc_layout_hdr(struct inode *ino , gfp_t gfp_flags ) { struct pnfs_layoutdriver_type *ld ; struct nfs_server *tmp ; struct pnfs_layout_hdr *tmp___0 ; { tmp = NFS_SERVER((struct inode const *)ino); ld = tmp->pnfs_curr_ld; tmp___0 = (*(ld->alloc_layout_hdr))(ino, gfp_flags); return (tmp___0); } } static void pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo ) { struct nfs_server *server ; struct nfs_server *tmp ; struct pnfs_layoutdriver_type *ld ; struct nfs_client *clp ; int tmp___0 ; { tmp = NFS_SERVER((struct inode const *)lo->plh_inode); server = tmp; ld = server->pnfs_curr_ld; tmp___0 = list_empty((struct list_head const *)(& lo->plh_layouts)); if (tmp___0 == 0) { clp = server->nfs_client; spin_lock(& clp->cl_lock); list_del_init(& lo->plh_layouts); spin_unlock(& clp->cl_lock); } else { } put_rpccred(lo->plh_lc_cred); return; } } static void pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; long tmp___0 ; { tmp = NFS_I((struct inode const *)lo->plh_inode); nfsi = tmp; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: freeing layout cache %p\n", "pnfs_detach_layout_hdr", lo); } else { } nfsi->layout = 0; nfsi->write_io = 0ULL; nfsi->read_io = 0ULL; return; } } void pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo ) { struct inode *inode ; int tmp ; { inode = lo->plh_inode; tmp = _atomic_dec_and_lock(& lo->plh_refcount, & inode->i_lock); if (tmp != 0) { pnfs_detach_layout_hdr(lo); spin_unlock(& inode->i_lock); pnfs_free_layout_hdr(lo); } else { } return; } } static int pnfs_iomode_to_fail_bit(u32 iomode ) { { return (iomode == 2U); } } static void pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo , int fail_bit ) { int tmp ; { lo->plh_retry_timestamp = jiffies; tmp = test_and_set_bit(fail_bit, (unsigned long volatile *)(& lo->plh_flags)); if (tmp != 0) { atomic_inc(& lo->plh_refcount); } else { } return; } } static void pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo , int fail_bit ) { int tmp ; { tmp = test_and_clear_bit(fail_bit, (unsigned long volatile *)(& lo->plh_flags)); if (tmp != 0) { atomic_dec(& lo->plh_refcount); } else { } return; } } static void pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo , u32 iomode ) { struct inode *inode ; struct pnfs_layout_range range ; struct list_head head ; int tmp ; long tmp___0 ; { inode = lo->plh_inode; range.iomode = iomode; range.offset = 0ULL; range.length = 0xffffffffffffffffULL; head.next = & head; head.prev = & head; spin_lock(& inode->i_lock); tmp = pnfs_iomode_to_fail_bit(iomode); pnfs_layout_set_fail_bit(lo, tmp); pnfs_mark_matching_lsegs_invalid(lo, & head, & range); spin_unlock(& inode->i_lock); pnfs_free_lseg_list(& head); tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s Setting layout IOMODE_%s fail bit\n", "pnfs_layout_io_set_failed", iomode == 2U ? (char *)"RW" : (char *)"READ"); } else { } return; } } static bool pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo , u32 iomode ) { unsigned long start ; unsigned long end ; int fail_bit ; int tmp ; int tmp___0 ; { tmp = pnfs_iomode_to_fail_bit(iomode); fail_bit = tmp; tmp___0 = variable_test_bit(fail_bit, (unsigned long const volatile *)(& lo->plh_flags)); if (tmp___0 == 0) { return (0); } else { } end = jiffies; start = end - 30000UL; if ((long )lo->plh_retry_timestamp - (long )start < 0L || (long )end - (long )lo->plh_retry_timestamp < 0L) { pnfs_layout_clear_fail_bit(lo, fail_bit); return (0); } else { } return (1); } } static void init_lseg(struct pnfs_layout_hdr *lo , struct pnfs_layout_segment *lseg ) { { INIT_LIST_HEAD(& lseg->pls_list); INIT_LIST_HEAD(& lseg->pls_lc_list); atomic_set(& lseg->pls_refcount, 1); __asm__ volatile ("mfence": : : "memory"); set_bit(0U, (unsigned long volatile *)(& lseg->pls_flags)); lseg->pls_layout = lo; return; } } static void pnfs_free_lseg(struct pnfs_layout_segment *lseg ) { struct inode *ino ; struct nfs_server *tmp ; { ino = (lseg->pls_layout)->plh_inode; tmp = NFS_SERVER((struct inode const *)ino); (*((tmp->pnfs_curr_ld)->free_lseg))(lseg); return; } } static void pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo , struct pnfs_layout_segment *lseg ) { struct inode *inode ; int __ret_warn_on ; int tmp ; long tmp___0 ; int tmp___1 ; struct nfs_server *tmp___2 ; { inode = lo->plh_inode; tmp = constant_test_bit(0U, (unsigned long const volatile *)(& lseg->pls_flags)); __ret_warn_on = tmp != 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/pnfs.c.prepared", 394); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); list_del_init(& lseg->pls_list); atomic_dec(& lo->plh_refcount); tmp___1 = list_empty((struct list_head const *)(& lo->plh_segs)); if (tmp___1 != 0) { clear_bit(2, (unsigned long volatile *)(& lo->plh_flags)); } else { } tmp___2 = NFS_SERVER((struct inode const *)inode); rpc_wake_up(& tmp___2->roc_rpcwaitq); return; } } void pnfs_put_lseg(struct pnfs_layout_segment *lseg ) { struct pnfs_layout_hdr *lo ; struct inode *inode ; int tmp ; int tmp___0 ; long tmp___1 ; int tmp___2 ; { if ((unsigned long )lseg == (unsigned long )((struct pnfs_layout_segment *)0)) { return; } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___1 != 0L) { tmp = constant_test_bit(0U, (unsigned long const volatile *)(& lseg->pls_flags)); tmp___0 = atomic_read((atomic_t const *)(& lseg->pls_refcount)); printk("\001d%s: lseg %p ref %d valid %d\n", "pnfs_put_lseg", lseg, tmp___0, tmp); } else { } lo = lseg->pls_layout; inode = lo->plh_inode; tmp___2 = _atomic_dec_and_lock(& lseg->pls_refcount, & inode->i_lock); if (tmp___2 != 0) { pnfs_get_layout_hdr(lo); pnfs_layout_remove_lseg(lo, lseg); spin_unlock(& inode->i_lock); pnfs_free_lseg(lseg); pnfs_put_layout_hdr(lo); } else { } return; } } __inline static u64 end_offset(u64 start , u64 len ) { u64 end ; { end = start + len; return (end >= start ? end : 0xffffffffffffffffULL); } } __inline static int lo_seg_contained(struct pnfs_layout_range *l1 , struct pnfs_layout_range *l2 ) { u64 start1 ; u64 end1 ; u64 tmp ; u64 start2 ; u64 end2 ; u64 tmp___0 ; { start1 = l1->offset; tmp = end_offset(start1, l1->length); end1 = tmp; start2 = l2->offset; tmp___0 = end_offset(start2, l2->length); end2 = tmp___0; return (start1 <= start2 && end1 >= end2); } } __inline static int lo_seg_intersecting(struct pnfs_layout_range *l1 , struct pnfs_layout_range *l2 ) { u64 start1 ; u64 end1 ; u64 tmp ; u64 start2 ; u64 end2 ; u64 tmp___0 ; { start1 = l1->offset; tmp = end_offset(start1, l1->length); end1 = tmp; start2 = l2->offset; tmp___0 = end_offset(start2, l2->length); end2 = tmp___0; return ((end1 == 0xffffffffffffffffULL || end1 > start2) && (end2 == 0xffffffffffffffffULL || end2 > start1)); } } static bool should_free_lseg(struct pnfs_layout_range *lseg_range , struct pnfs_layout_range *recall_range ) { int tmp ; int tmp___0 ; { if (recall_range->iomode == 3U || lseg_range->iomode == recall_range->iomode) { tmp = lo_seg_intersecting(lseg_range, recall_range); if (tmp != 0) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } return ((bool )tmp___0); } } static int mark_lseg_invalid(struct pnfs_layout_segment *lseg , struct list_head *tmp_list ) { int rv ; int tmp ; long tmp___0 ; int tmp___1 ; int tmp___2 ; { rv = 0; tmp___2 = test_and_clear_bit(0, (unsigned long volatile *)(& lseg->pls_flags)); if (tmp___2 != 0) { tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { tmp = atomic_read((atomic_t const *)(& lseg->pls_refcount)); printk("\001d%s: lseg %p ref %d\n", "mark_lseg_invalid", lseg, tmp); } else { } tmp___1 = atomic_dec_and_test(& lseg->pls_refcount); if (tmp___1 != 0) { pnfs_layout_remove_lseg(lseg->pls_layout, lseg); list_add(& lseg->pls_list, tmp_list); rv = 1; } else { } } else { } return (rv); } } int pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo , struct list_head *tmp_list , struct pnfs_layout_range *recall_range ) { struct pnfs_layout_segment *lseg ; struct pnfs_layout_segment *next ; int invalid ; int removed ; long tmp ; int tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; long tmp___1 ; int tmp___2 ; bool tmp___3 ; struct list_head const *__mptr___1 ; long tmp___4 ; { invalid = 0; removed = 0; tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s:Begin lo %p\n", "pnfs_mark_matching_lsegs_invalid", lo); } else { } tmp___0 = list_empty((struct list_head const *)(& lo->plh_segs)); if (tmp___0 != 0) { return (0); } else { } __mptr = (struct list_head const *)lo->plh_segs.next; lseg = (struct pnfs_layout_segment *)__mptr; __mptr___0 = (struct list_head const *)lseg->pls_list.next; next = (struct pnfs_layout_segment *)__mptr___0; goto ldv_49616; ldv_49615: ; if ((unsigned long )recall_range == (unsigned long )((struct pnfs_layout_range *)0)) { goto _L; } else { tmp___3 = should_free_lseg(& lseg->pls_range, recall_range); if ((int )tmp___3) { _L: /* CIL Label */ tmp___1 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: freeing lseg %p iomode %d offset %llu length %llu\n", "pnfs_mark_matching_lsegs_invalid", lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); } else { } invalid = invalid + 1; tmp___2 = mark_lseg_invalid(lseg, tmp_list); removed = tmp___2 + removed; } else { } } lseg = next; __mptr___1 = (struct list_head const *)next->pls_list.next; next = (struct pnfs_layout_segment *)__mptr___1; ldv_49616: ; if ((unsigned long )(& lseg->pls_list) != (unsigned long )(& lo->plh_segs)) { goto ldv_49615; } else { } tmp___4 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s:Return %i\n", "pnfs_mark_matching_lsegs_invalid", invalid - removed); } else { } return (invalid - removed); } } void pnfs_free_lseg_list(struct list_head *free_me ) { struct pnfs_layout_segment *lseg ; struct pnfs_layout_segment *tmp ; int tmp___0 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { tmp___0 = list_empty((struct list_head const *)free_me); if (tmp___0 != 0) { return; } else { } __mptr = (struct list_head const *)free_me->next; lseg = (struct pnfs_layout_segment *)__mptr; __mptr___0 = (struct list_head const *)lseg->pls_list.next; tmp = (struct pnfs_layout_segment *)__mptr___0; goto ldv_49630; ldv_49629: list_del(& lseg->pls_list); pnfs_free_lseg(lseg); lseg = tmp; __mptr___1 = (struct list_head const *)tmp->pls_list.next; tmp = (struct pnfs_layout_segment *)__mptr___1; ldv_49630: ; if ((unsigned long )(& lseg->pls_list) != (unsigned long )free_me) { goto ldv_49629; } else { } return; } } void pnfs_destroy_layout(struct nfs_inode *nfsi ) { struct pnfs_layout_hdr *lo ; struct list_head tmp_list ; { tmp_list.next = & tmp_list; tmp_list.prev = & tmp_list; spin_lock(& nfsi->vfs_inode.i_lock); lo = nfsi->layout; if ((unsigned long )lo != (unsigned long )((struct pnfs_layout_hdr *)0)) { lo->plh_block_lgets = lo->plh_block_lgets + 1UL; pnfs_mark_matching_lsegs_invalid(lo, & tmp_list, 0); pnfs_get_layout_hdr(lo); pnfs_layout_clear_fail_bit(lo, 0); pnfs_layout_clear_fail_bit(lo, 1); spin_unlock(& nfsi->vfs_inode.i_lock); pnfs_free_lseg_list(& tmp_list); pnfs_put_layout_hdr(lo); } else { spin_unlock(& nfsi->vfs_inode.i_lock); } return; } } void pnfs_destroy_all_layouts(struct nfs_client *clp ) { struct nfs_server *server ; struct pnfs_layout_hdr *lo ; struct list_head tmp_list ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___1 ; struct list_head const *__mptr___1 ; long tmp___2 ; struct nfs_inode *tmp___3 ; int tmp___4 ; { tmp_list.next = & tmp_list; tmp_list.prev = & tmp_list; nfs4_deviceid_mark_client_invalid(clp); nfs4_deviceid_purge_client((struct nfs_client const *)clp); spin_lock(& clp->cl_lock); rcu_read_lock___8(); __ptr = clp->cl_superblocks.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; server = (struct nfs_server *)__mptr + 0xfffffffffffffff8UL; goto ldv_49664; ldv_49663: tmp___0 = list_empty((struct list_head const *)(& server->layouts)); if (tmp___0 == 0) { list_splice_init(& server->layouts, & tmp_list); } else { } __ptr___0 = server->client_link.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; server = (struct nfs_server *)__mptr___0 + 0xfffffffffffffff8UL; ldv_49664: ; if ((unsigned long )(& server->client_link) != (unsigned long )(& clp->cl_superblocks)) { goto ldv_49663; } else { } rcu_read_unlock___8(); spin_unlock(& clp->cl_lock); goto ldv_49670; ldv_49669: __mptr___1 = (struct list_head const *)tmp_list.next; lo = (struct pnfs_layout_hdr *)__mptr___1 + 0xfffffffffffffff8UL; tmp___2 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s freeing layout for inode %lu\n", "pnfs_destroy_all_layouts", (lo->plh_inode)->i_ino); } else { } list_del_init(& lo->plh_layouts); tmp___3 = NFS_I((struct inode const *)lo->plh_inode); pnfs_destroy_layout(tmp___3); ldv_49670: tmp___4 = list_empty((struct list_head const *)(& tmp_list)); if (tmp___4 == 0) { goto ldv_49669; } else { } return; } } static bool pnfs_seqid_is_newer(u32 s1 , u32 s2 ) { { return ((int )s1 - (int )s2 > 0); } } void pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo , nfs4_stateid const *new , bool update_barrier ) { u32 oldseq ; u32 newseq ; u32 new_barrier ; int empty ; int tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; int tmp___3 ; bool tmp___4 ; bool tmp___5 ; { tmp = list_empty((struct list_head const *)(& lo->plh_segs)); empty = tmp; tmp___0 = __fswab32(lo->plh_stateid.seqid); oldseq = tmp___0; tmp___1 = __fswab32(new->seqid); newseq = tmp___1; if (empty != 0) { goto _L; } else { tmp___5 = pnfs_seqid_is_newer(newseq, oldseq); if ((int )tmp___5) { _L: /* CIL Label */ nfs4_stateid_copy(& lo->plh_stateid, new); if ((int )update_barrier) { tmp___2 = __fswab32(new->seqid); new_barrier = tmp___2; } else { tmp___3 = atomic_read((atomic_t const *)(& lo->plh_outstanding)); new_barrier = newseq - (u32 )tmp___3; } if (empty != 0) { lo->plh_barrier = new_barrier; } else { tmp___4 = pnfs_seqid_is_newer(new_barrier, lo->plh_barrier); if ((int )tmp___4) { lo->plh_barrier = new_barrier; } else { } } } else { } } return; } } static bool pnfs_layout_stateid_blocked(struct pnfs_layout_hdr const *lo , nfs4_stateid const *stateid ) { u32 seqid ; __u32 tmp ; bool tmp___0 ; int tmp___1 ; { tmp = __fswab32(stateid->seqid); seqid = tmp; tmp___0 = pnfs_seqid_is_newer(seqid, lo->plh_barrier); if ((int )tmp___0 != 0) { tmp___1 = 0; } else { tmp___1 = 1; } return ((bool )tmp___1); } } static bool pnfs_layoutgets_blocked(struct pnfs_layout_hdr const *lo , int lget ) { int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { if ((unsigned long )lo->plh_block_lgets != 0UL) { tmp___2 = 1; } else { tmp = constant_test_bit(2U, (unsigned long const volatile *)(& lo->plh_flags)); if (tmp != 0) { tmp___2 = 1; } else { tmp___0 = list_empty(& lo->plh_segs); if (tmp___0 != 0) { tmp___1 = atomic_read(& lo->plh_outstanding); if (tmp___1 > lget) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } } } return ((bool )tmp___2); } } int pnfs_choose_layoutget_stateid(nfs4_stateid *dst , struct pnfs_layout_hdr *lo , struct nfs4_state *open_state ) { int status ; long tmp ; int seq ; unsigned int tmp___0 ; int tmp___1 ; int tmp___2 ; bool tmp___3 ; long tmp___4 ; { status = 0; tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d--> %s\n", "pnfs_choose_layoutget_stateid"); } else { } spin_lock(& (lo->plh_inode)->i_lock); tmp___3 = pnfs_layoutgets_blocked((struct pnfs_layout_hdr const *)lo, 1); if ((int )tmp___3) { status = -11; } else { tmp___2 = list_empty((struct list_head const *)(& lo->plh_segs)); if (tmp___2 != 0) { ldv_49702: tmp___0 = read_seqbegin((seqlock_t const *)(& open_state->seqlock)); seq = (int )tmp___0; nfs4_stateid_copy(dst, (nfs4_stateid const *)(& open_state->stateid)); tmp___1 = read_seqretry((seqlock_t const *)(& open_state->seqlock), (unsigned int )seq); if (tmp___1 != 0) { goto ldv_49702; } else { } } else { nfs4_stateid_copy(dst, (nfs4_stateid const *)(& lo->plh_stateid)); } } spin_unlock(& (lo->plh_inode)->i_lock); tmp___4 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d<-- %s\n", "pnfs_choose_layoutget_stateid"); } else { } return (status); } } static struct pnfs_layout_segment *send_layoutget(struct pnfs_layout_hdr *lo , struct nfs_open_context *ctx , struct pnfs_layout_range *range , gfp_t gfp_flags ) { struct inode *ino ; struct nfs_server *server ; struct nfs_server *tmp ; struct nfs4_layoutget *lgp ; struct pnfs_layout_segment *lseg ; long tmp___0 ; void *tmp___1 ; long tmp___2 ; long tmp___3 ; { ino = lo->plh_inode; tmp = NFS_SERVER((struct inode const *)ino); server = tmp; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d--> %s\n", "send_layoutget"); } else { } tmp___1 = kzalloc(216UL, gfp_flags); lgp = (struct nfs4_layoutget *)tmp___1; if ((unsigned long )lgp == (unsigned long )((struct nfs4_layoutget *)0)) { return (0); } else { } lgp->args.minlength = 4096ULL; if (lgp->args.minlength > range->length) { lgp->args.minlength = range->length; } else { } lgp->args.maxcount = 4096U; lgp->args.range = *range; lgp->args.type = (server->pnfs_curr_ld)->id; lgp->args.inode = ino; lgp->args.ctx = get_nfs_open_context(ctx); lgp->gfp_flags = gfp_flags; lseg = nfs4_proc_layoutget(lgp, gfp_flags); tmp___3 = IS_ERR((void const *)lseg); if (tmp___3 != 0L) { tmp___2 = PTR_ERR((void const *)lseg); switch (tmp___2) { case -12L: ; case -512L: ; goto ldv_49717; default: pnfs_layout_io_set_failed(lo, range->iomode); } ldv_49717: ; return (0); } else { } return (lseg); } } int _pnfs_return_layout(struct inode *ino ) { struct pnfs_layout_hdr *lo ; struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct list_head tmp_list ; struct nfs4_layoutreturn *lrp ; nfs4_stateid stateid ; int status ; int empty ; long tmp___0 ; long tmp___1 ; long tmp___2 ; int __ret_warn_on ; int tmp___3 ; long tmp___4 ; void *tmp___5 ; long tmp___6 ; struct nfs_server *tmp___7 ; struct nfs_server *tmp___8 ; long tmp___9 ; { lo = 0; tmp = NFS_I((struct inode const *)ino); nfsi = tmp; tmp_list.next = & tmp_list; tmp_list.prev = & tmp_list; status = 0; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001dNFS: %s for inode %lu\n", "_pnfs_return_layout", ino->i_ino); } else { } spin_lock(& ino->i_lock); lo = nfsi->layout; if ((unsigned long )lo == (unsigned long )((struct pnfs_layout_hdr *)0)) { spin_unlock(& ino->i_lock); tmp___1 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001dNFS: %s no layout to return\n", "_pnfs_return_layout"); } else { } goto out; } else { } stateid = (nfsi->layout)->plh_stateid; pnfs_get_layout_hdr(lo); empty = list_empty((struct list_head const *)(& lo->plh_segs)); pnfs_mark_matching_lsegs_invalid(lo, & tmp_list, 0); if (empty != 0) { spin_unlock(& ino->i_lock); pnfs_put_layout_hdr(lo); tmp___2 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001dNFS: %s no layout segments to return\n", "_pnfs_return_layout"); } else { } goto out; } else { } lo->plh_block_lgets = lo->plh_block_lgets + 1UL; spin_unlock(& ino->i_lock); pnfs_free_lseg_list(& tmp_list); tmp___3 = constant_test_bit(9U, (unsigned long const volatile *)(& nfsi->flags)); __ret_warn_on = tmp___3 != 0; tmp___4 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___4 != 0L) { warn_slowpath_null("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/pnfs.c.prepared", 777); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___5 = kzalloc(136UL, 208U); lrp = (struct nfs4_layoutreturn *)tmp___5; tmp___6 = ldv__builtin_expect((unsigned long )lrp == (unsigned long )((struct nfs4_layoutreturn *)0), 0L); if (tmp___6 != 0L) { status = -12; spin_lock(& ino->i_lock); lo->plh_block_lgets = lo->plh_block_lgets - 1UL; spin_unlock(& ino->i_lock); pnfs_put_layout_hdr(lo); goto out; } else { } lrp->args.stateid = stateid; tmp___7 = NFS_SERVER((struct inode const *)ino); lrp->args.layout_type = (tmp___7->pnfs_curr_ld)->id; lrp->args.inode = ino; lrp->args.layout = lo; tmp___8 = NFS_SERVER((struct inode const *)ino); lrp->clp = tmp___8->nfs_client; status = nfs4_proc_layoutreturn(lrp); out: tmp___9 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___9 != 0L) { printk("\001d<-- %s status: %d\n", "_pnfs_return_layout", status); } else { } return (status); } } bool pnfs_roc(struct inode *ino ) { struct pnfs_layout_hdr *lo ; struct pnfs_layout_segment *lseg ; struct pnfs_layout_segment *tmp ; struct list_head tmp_list ; bool found ; struct nfs_inode *tmp___0 ; int tmp___1 ; int tmp___2 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; int tmp___3 ; struct list_head const *__mptr___1 ; { tmp_list.next = & tmp_list; tmp_list.prev = & tmp_list; found = 0; spin_lock(& ino->i_lock); tmp___0 = NFS_I((struct inode const *)ino); lo = tmp___0->layout; if ((unsigned long )lo == (unsigned long )((struct pnfs_layout_hdr *)0)) { goto out_nolayout; } else { tmp___1 = test_and_clear_bit(3, (unsigned long volatile *)(& lo->plh_flags)); if (tmp___1 == 0) { goto out_nolayout; } else { tmp___2 = constant_test_bit(2U, (unsigned long const volatile *)(& lo->plh_flags)); if (tmp___2 != 0) { goto out_nolayout; } else { } } } __mptr = (struct list_head const *)lo->plh_segs.next; lseg = (struct pnfs_layout_segment *)__mptr; __mptr___0 = (struct list_head const *)lseg->pls_list.next; tmp = (struct pnfs_layout_segment *)__mptr___0; goto ldv_49755; ldv_49754: tmp___3 = constant_test_bit(1U, (unsigned long const volatile *)(& lseg->pls_flags)); if (tmp___3 != 0) { mark_lseg_invalid(lseg, & tmp_list); found = 1; } else { } lseg = tmp; __mptr___1 = (struct list_head const *)tmp->pls_list.next; tmp = (struct pnfs_layout_segment *)__mptr___1; ldv_49755: ; if ((unsigned long )(& lseg->pls_list) != (unsigned long )(& lo->plh_segs)) { goto ldv_49754; } else { } if (! found) { goto out_nolayout; } else { } lo->plh_block_lgets = lo->plh_block_lgets + 1UL; pnfs_get_layout_hdr(lo); spin_unlock(& ino->i_lock); pnfs_free_lseg_list(& tmp_list); return (1); out_nolayout: spin_unlock(& ino->i_lock); return (0); } } void pnfs_roc_release(struct inode *ino ) { struct pnfs_layout_hdr *lo ; struct nfs_inode *tmp ; int tmp___0 ; { spin_lock(& ino->i_lock); tmp = NFS_I((struct inode const *)ino); lo = tmp->layout; lo->plh_block_lgets = lo->plh_block_lgets - 1UL; tmp___0 = atomic_dec_and_test(& lo->plh_refcount); if (tmp___0 != 0) { pnfs_detach_layout_hdr(lo); spin_unlock(& ino->i_lock); pnfs_free_layout_hdr(lo); } else { spin_unlock(& ino->i_lock); } return; } } void pnfs_roc_set_barrier(struct inode *ino , u32 barrier ) { struct pnfs_layout_hdr *lo ; struct nfs_inode *tmp ; bool tmp___0 ; { spin_lock(& ino->i_lock); tmp = NFS_I((struct inode const *)ino); lo = tmp->layout; tmp___0 = pnfs_seqid_is_newer(barrier, lo->plh_barrier); if ((int )tmp___0) { lo->plh_barrier = barrier; } else { } spin_unlock(& ino->i_lock); return; } } bool pnfs_roc_drain(struct inode *ino , u32 *barrier , struct rpc_task *task ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct pnfs_layout_hdr *lo ; struct pnfs_layout_segment *lseg ; u32 current_seqid ; bool found ; struct list_head const *__mptr ; struct nfs_server *tmp___0 ; int tmp___1 ; struct list_head const *__mptr___0 ; __u32 tmp___2 ; int tmp___3 ; { tmp = NFS_I((struct inode const *)ino); nfsi = tmp; found = 0; spin_lock(& ino->i_lock); __mptr = (struct list_head const *)(nfsi->layout)->plh_segs.next; lseg = (struct pnfs_layout_segment *)__mptr; goto ldv_49782; ldv_49781: tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& lseg->pls_flags)); if (tmp___1 != 0) { tmp___0 = NFS_SERVER((struct inode const *)ino); rpc_sleep_on(& tmp___0->roc_rpcwaitq, task, 0); found = 1; goto out; } else { } __mptr___0 = (struct list_head const *)lseg->pls_list.next; lseg = (struct pnfs_layout_segment *)__mptr___0; ldv_49782: ; if ((unsigned long )(& lseg->pls_list) != (unsigned long )(& (nfsi->layout)->plh_segs)) { goto ldv_49781; } else { } lo = nfsi->layout; tmp___2 = __fswab32(lo->plh_stateid.seqid); current_seqid = tmp___2; tmp___3 = atomic_read((atomic_t const *)(& lo->plh_outstanding)); *barrier = (u32 )tmp___3 + current_seqid; out: spin_unlock(& ino->i_lock); return (found); } } static s64 cmp_layout(struct pnfs_layout_range *l1 , struct pnfs_layout_range *l2 ) { s64 d ; { d = (s64 )(l1->offset - l2->offset); if (d != 0LL) { return (d); } else { } d = (s64 )(l2->length - l1->length); if (d != 0LL) { return (d); } else { } return ((s64 )((l1->iomode == 1U) - (l2->iomode == 1U))); } } static void pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo , struct pnfs_layout_segment *lseg ) { struct pnfs_layout_segment *lp ; long tmp ; struct list_head const *__mptr ; s64 tmp___0 ; long tmp___1 ; struct list_head const *__mptr___0 ; long tmp___2 ; long tmp___3 ; { tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s:Begin\n", "pnfs_layout_insert_lseg"); } else { } __mptr = (struct list_head const *)lo->plh_segs.next; lp = (struct pnfs_layout_segment *)__mptr; goto ldv_49802; ldv_49801: tmp___0 = cmp_layout(& lseg->pls_range, & lp->pls_range); if (tmp___0 > 0LL) { goto ldv_49799; } else { } list_add_tail(& lseg->pls_list, & lp->pls_list); tmp___1 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s: inserted lseg %p iomode %d offset %llu length %llu before lp %p iomode %d offset %llu length %llu\n", "pnfs_layout_insert_lseg", lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length, lp, lp->pls_range.iomode, lp->pls_range.offset, lp->pls_range.length); } else { } goto out; ldv_49799: __mptr___0 = (struct list_head const *)lp->pls_list.next; lp = (struct pnfs_layout_segment *)__mptr___0; ldv_49802: ; if ((unsigned long )(& lp->pls_list) != (unsigned long )(& lo->plh_segs)) { goto ldv_49801; } else { } list_add_tail(& lseg->pls_list, & lo->plh_segs); tmp___2 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: inserted lseg %p iomode %d offset %llu length %llu at tail\n", "pnfs_layout_insert_lseg", lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); } else { } out: pnfs_get_layout_hdr(lo); tmp___3 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s:Return\n", "pnfs_layout_insert_lseg"); } else { } return; } } static struct pnfs_layout_hdr *alloc_init_layout_hdr(struct inode *ino , struct nfs_open_context *ctx , gfp_t gfp_flags ) { struct pnfs_layout_hdr *lo ; { lo = pnfs_alloc_layout_hdr(ino, gfp_flags); if ((unsigned long )lo == (unsigned long )((struct pnfs_layout_hdr *)0)) { return (0); } else { } atomic_set(& lo->plh_refcount, 1); INIT_LIST_HEAD(& lo->plh_layouts); INIT_LIST_HEAD(& lo->plh_segs); INIT_LIST_HEAD(& lo->plh_bulk_recall); lo->plh_inode = ino; lo->plh_lc_cred = get_rpccred(((ctx->state)->owner)->so_cred); return (lo); } } static struct pnfs_layout_hdr *pnfs_find_alloc_layout(struct inode *ino , struct nfs_open_context *ctx , gfp_t gfp_flags ) { struct nfs_inode *nfsi ; struct nfs_inode *tmp ; struct pnfs_layout_hdr *new ; long tmp___0 ; long tmp___1 ; { tmp = NFS_I((struct inode const *)ino); nfsi = tmp; new = 0; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s Begin ino=%p layout=%p\n", "pnfs_find_alloc_layout", ino, nfsi->layout); } else { } if ((unsigned long )nfsi->layout != (unsigned long )((struct pnfs_layout_hdr *)0)) { goto out_existing; } else { } spin_unlock(& ino->i_lock); new = alloc_init_layout_hdr(ino, ctx, gfp_flags); spin_lock(& ino->i_lock); tmp___1 = ldv__builtin_expect((unsigned long )nfsi->layout == (unsigned long )((struct pnfs_layout_hdr *)0), 1L); if (tmp___1 != 0L) { nfsi->layout = new; return (new); } else if ((unsigned long )new != (unsigned long )((struct pnfs_layout_hdr *)0)) { pnfs_free_layout_hdr(new); } else { } out_existing: pnfs_get_layout_hdr(nfsi->layout); return (nfsi->layout); } } static int is_matching_lseg(struct pnfs_layout_range *ls_range , struct pnfs_layout_range *range ) { struct pnfs_layout_range range1 ; int tmp ; int tmp___0 ; { if (range->iomode == 2U && ls_range->iomode != 2U) { return (0); } else { tmp = lo_seg_intersecting(ls_range, range); if (tmp == 0) { return (0); } else { } } range1 = *range; range1.length = 1ULL; tmp___0 = lo_seg_contained(ls_range, & range1); return (tmp___0); } } static struct pnfs_layout_segment *pnfs_find_lseg(struct pnfs_layout_hdr *lo , struct pnfs_layout_range *range ) { struct pnfs_layout_segment *lseg ; struct pnfs_layout_segment *ret ; long tmp ; struct list_head const *__mptr ; int tmp___0 ; int tmp___1 ; struct list_head const *__mptr___0 ; int tmp___2 ; int tmp___3 ; long tmp___4 ; { ret = 0; tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s:Begin\n", "pnfs_find_lseg"); } else { } __mptr = (struct list_head const *)lo->plh_segs.next; lseg = (struct pnfs_layout_segment *)__mptr; goto ldv_49837; ldv_49836: tmp___0 = constant_test_bit(0U, (unsigned long const volatile *)(& lseg->pls_flags)); if (tmp___0 != 0) { tmp___1 = is_matching_lseg(& lseg->pls_range, range); if (tmp___1 != 0) { ret = pnfs_get_lseg(lseg); goto ldv_49835; } else { } } else { } if (lseg->pls_range.offset > range->offset) { goto ldv_49835; } else { } __mptr___0 = (struct list_head const *)lseg->pls_list.next; lseg = (struct pnfs_layout_segment *)__mptr___0; ldv_49837: ; if ((unsigned long )(& lseg->pls_list) != (unsigned long )(& lo->plh_segs)) { goto ldv_49836; } else { } ldv_49835: tmp___4 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___4 != 0L) { if ((unsigned long )ret != (unsigned long )((struct pnfs_layout_segment *)0)) { tmp___2 = atomic_read((atomic_t const *)(& ret->pls_refcount)); tmp___3 = tmp___2; } else { tmp___3 = 0; } printk("\001d%s:Return lseg %p ref %d\n", "pnfs_find_lseg", ret, tmp___3); } else { } return (ret); } } static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx , struct inode *ino , int iomode ) { struct nfs4_threshold *t ; struct nfs_inode *nfsi ; struct nfs_inode *tmp ; loff_t fsize ; loff_t tmp___0 ; bool size ; bool size_set ; bool io ; bool io_set ; bool ret ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; long tmp___5 ; long tmp___6 ; { t = ctx->mdsthreshold; tmp = NFS_I((struct inode const *)ino); nfsi = tmp; tmp___0 = i_size_read((struct inode const *)ino); fsize = tmp___0; size = 0; size_set = 0; io = 0; io_set = 0; ret = 0; if ((unsigned long )t == (unsigned long )((struct nfs4_threshold *)0)) { return (ret); } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n", "pnfs_within_mdsthreshold", t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz); } else { } switch (iomode) { case 1: ; if ((int )t->bm & 1) { tmp___2 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s fsize %llu\n", "pnfs_within_mdsthreshold", fsize); } else { } size_set = 1; if ((unsigned long long )fsize < t->rd_sz) { size = 1; } else { } } else { } if (((unsigned long )t->bm & 4UL) != 0UL) { tmp___3 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s nfsi->read_io %llu\n", "pnfs_within_mdsthreshold", nfsi->read_io); } else { } io_set = 1; if (nfsi->read_io < t->rd_io_sz) { io = 1; } else { } } else { } goto ldv_49853; case 2: ; if (((unsigned long )t->bm & 2UL) != 0UL) { tmp___4 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s fsize %llu\n", "pnfs_within_mdsthreshold", fsize); } else { } size_set = 1; if ((unsigned long long )fsize < t->wr_sz) { size = 1; } else { } } else { } if (((unsigned long )t->bm & 8UL) != 0UL) { tmp___5 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___5 != 0L) { printk("\001d%s nfsi->write_io %llu\n", "pnfs_within_mdsthreshold", nfsi->write_io); } else { } io_set = 1; if (nfsi->write_io < t->wr_io_sz) { io = 1; } else { } } else { } goto ldv_49853; } ldv_49853: ; if ((int )size_set && (int )io_set) { if ((int )size && (int )io) { ret = 1; } else if ((int )size || (int )io) { ret = 1; } else { } } else { } tmp___6 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___6 != 0L) { printk("\001d<-- %s size %d io %d ret %d\n", "pnfs_within_mdsthreshold", (int )size, (int )io, (int )ret); } else { } return (ret); } } struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino , struct nfs_open_context *ctx , loff_t pos , u64 count , enum pnfs_iomode iomode , gfp_t gfp_flags ) { struct pnfs_layout_range arg ; unsigned int pg_offset ; struct nfs_server *server ; struct nfs_server *tmp ; struct nfs_client *clp ; struct pnfs_layout_hdr *lo ; struct pnfs_layout_segment *lseg ; bool first ; struct nfs_server *tmp___0 ; int tmp___1 ; bool tmp___2 ; long tmp___3 ; int tmp___4 ; bool tmp___5 ; bool tmp___6 ; int tmp___7 ; __u64 tmp___8 ; long tmp___9 ; { arg.iomode = iomode; arg.offset = (unsigned long long )pos; arg.length = count; tmp = NFS_SERVER((struct inode const *)ino); server = tmp; clp = server->nfs_client; lseg = 0; first = 0; tmp___0 = NFS_SERVER((struct inode const *)ino); tmp___1 = pnfs_enabled_sb(tmp___0); if (tmp___1 == 0) { goto out; } else { } tmp___2 = pnfs_within_mdsthreshold(ctx, ino, (int )iomode); if ((int )tmp___2) { goto out; } else { } spin_lock(& ino->i_lock); lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); if ((unsigned long )lo == (unsigned long )((struct pnfs_layout_hdr *)0)) { spin_unlock(& ino->i_lock); goto out; } else { } tmp___4 = constant_test_bit(2U, (unsigned long const volatile *)(& lo->plh_flags)); if (tmp___4 != 0) { tmp___3 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s matches recall, use MDS\n", "pnfs_update_layout"); } else { } goto out_unlock; } else { } tmp___5 = pnfs_layout_io_test_failed(lo, (u32 )iomode); if ((int )tmp___5) { goto out_unlock; } else { } lseg = pnfs_find_lseg(lo, & arg); if ((unsigned long )lseg != (unsigned long )((struct pnfs_layout_segment *)0)) { goto out_unlock; } else { } tmp___6 = pnfs_layoutgets_blocked((struct pnfs_layout_hdr const *)lo, 0); if ((int )tmp___6) { goto out_unlock; } else { } atomic_inc(& lo->plh_outstanding); tmp___7 = list_empty((struct list_head const *)(& lo->plh_segs)); if (tmp___7 != 0) { first = 1; } else { } spin_unlock(& ino->i_lock); if ((int )first) { spin_lock(& clp->cl_lock); list_add_tail(& lo->plh_layouts, & server->layouts); spin_unlock(& clp->cl_lock); } else { } pg_offset = (unsigned int )arg.offset & 4095U; if (pg_offset != 0U) { arg.offset = arg.offset - (u64 )pg_offset; arg.length = arg.length + (u64 )pg_offset; } else { } if (arg.length != 0xffffffffffffffffULL) { arg.length = (arg.length + 4095ULL) & 0xfffffffffffff000ULL; } else { } lseg = send_layoutget(lo, ctx, & arg, gfp_flags); atomic_dec(& lo->plh_outstanding); out_put_layout_hdr: pnfs_put_layout_hdr(lo); out: tmp___9 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___9 != 0L) { tmp___8 = NFS_FILEID((struct inode const *)ino); printk("\001d%s: inode %s/%llu pNFS layout segment %s for (%s, offset: %llu, length: %llu)\n", "pnfs_update_layout", (char *)(& (ino->i_sb)->s_id), tmp___8, (unsigned long )lseg == (unsigned long )((struct pnfs_layout_segment *)0) ? (char *)"not found" : (char *)"found", (unsigned int )iomode == 2U ? (char *)"read/write" : (char *)"read-only", (unsigned long long )pos, count); } else { } return (lseg); out_unlock: spin_unlock(& ino->i_lock); goto out_put_layout_hdr; } } struct pnfs_layout_segment *pnfs_layout_process(struct nfs4_layoutget *lgp ) { struct pnfs_layout_hdr *lo ; struct nfs_inode *tmp ; struct nfs4_layoutget_res *res ; struct pnfs_layout_segment *lseg ; struct inode *ino ; int status ; struct nfs_server *tmp___0 ; long tmp___1 ; long tmp___2 ; long tmp___3 ; long tmp___4 ; int tmp___5 ; long tmp___6 ; bool tmp___7 ; bool tmp___8 ; void *tmp___9 ; struct nfs_server *tmp___10 ; { tmp = NFS_I((struct inode const *)lgp->args.inode); lo = tmp->layout; res = & lgp->res; ino = lo->plh_inode; status = 0; tmp___0 = NFS_SERVER((struct inode const *)ino); lseg = (*((tmp___0->pnfs_curr_ld)->alloc_lseg))(lo, res, lgp->gfp_flags); if ((unsigned long )lseg == (unsigned long )((struct pnfs_layout_segment *)0)) { goto _L; } else { tmp___3 = IS_ERR((void const *)lseg); if (tmp___3 != 0L) { _L: /* CIL Label */ if ((unsigned long )lseg == (unsigned long )((struct pnfs_layout_segment *)0)) { status = -12; } else { tmp___1 = PTR_ERR((void const *)lseg); status = (int )tmp___1; } tmp___2 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___2 != 0L) { printk("\001d%s: Could not allocate layout: error %d\n", "pnfs_layout_process", status); } else { } goto out; } else { } } spin_lock(& ino->i_lock); tmp___5 = constant_test_bit(2U, (unsigned long const volatile *)(& lo->plh_flags)); if (tmp___5 != 0) { tmp___4 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___4 != 0L) { printk("\001d%s forget reply due to recall\n", "pnfs_layout_process"); } else { } goto out_forget_reply; } else { } tmp___7 = pnfs_layoutgets_blocked((struct pnfs_layout_hdr const *)lo, 1); if ((int )tmp___7) { goto _L___0; } else { tmp___8 = pnfs_layout_stateid_blocked((struct pnfs_layout_hdr const *)lo, (nfs4_stateid const *)(& res->stateid)); if ((int )tmp___8) { _L___0: /* CIL Label */ tmp___6 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___6 != 0L) { printk("\001d%s forget reply due to state\n", "pnfs_layout_process"); } else { } goto out_forget_reply; } else { } } pnfs_set_layout_stateid(lo, (nfs4_stateid const *)(& res->stateid), 0); init_lseg(lo, lseg); lseg->pls_range = res->range; pnfs_get_lseg(lseg); pnfs_layout_insert_lseg(lo, lseg); if (res->return_on_close != 0U) { set_bit(1U, (unsigned long volatile *)(& lseg->pls_flags)); set_bit(3U, (unsigned long volatile *)(& lo->plh_flags)); } else { } spin_unlock(& ino->i_lock); return (lseg); out: tmp___9 = ERR_PTR((long )status); return ((struct pnfs_layout_segment *)tmp___9); out_forget_reply: spin_unlock(& ino->i_lock); lseg->pls_layout = lo; tmp___10 = NFS_SERVER((struct inode const *)ino); (*((tmp___10->pnfs_curr_ld)->free_lseg))(lseg); goto out; } } void pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio , struct nfs_page *req ) { u64 rd_size ; bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; loff_t tmp___2 ; loff_t tmp___3 ; ssize_t tmp___4 ; loff_t tmp___5 ; { rd_size = (u64 )req->wb_bytes; __ret_warn_once = (unsigned long )pgio->pg_lseg != (unsigned long )((struct pnfs_layout_segment *)0); tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/pnfs.c.prepared", 1276); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); if (req->wb_offset != req->wb_pgbase) { nfs_pageio_reset_read_mds(pgio); return; } else { } if ((unsigned long )pgio->pg_dreq == (unsigned long )((struct nfs_direct_req *)0)) { tmp___2 = i_size_read((struct inode const *)pgio->pg_inode); tmp___3 = req_offset(req); rd_size = (u64 )(tmp___2 - tmp___3); } else { tmp___4 = nfs_dreq_bytes_left(pgio->pg_dreq); rd_size = (u64 )tmp___4; } tmp___5 = req_offset(req); pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, req->wb_context, tmp___5, rd_size, 1, 208U); if ((unsigned long )pgio->pg_lseg == (unsigned long )((struct pnfs_layout_segment *)0)) { nfs_pageio_reset_read_mds(pgio); } else { } return; } } void pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio , struct nfs_page *req , u64 wb_size ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; loff_t tmp___2 ; { __ret_warn_once = (unsigned long )pgio->pg_lseg != (unsigned long )((struct pnfs_layout_segment *)0); tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/home/mikhail/launches/cpachecker-regression2/launcher-working-dir/ldv-manager-work-dir/work/current--X--fs/nfs/nfsv4.ko--X--regression-testlinux-3.8-rc1--X--32_7a--X--cpachecker/linux-3.8-rc1/csd_deg_dscv/58/dscv_tempdir/dscv/ri/32_7a/fs/nfs/pnfs.c.prepared", 1305); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); if (req->wb_offset != req->wb_pgbase) { nfs_pageio_reset_write_mds(pgio); return; } else { } tmp___2 = req_offset(req); pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, req->wb_context, tmp___2, wb_size, 2, 80U); if ((unsigned long )pgio->pg_lseg == (unsigned long )((struct pnfs_layout_segment *)0)) { nfs_pageio_reset_write_mds(pgio); } else { } return; } } void pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio , struct inode *inode , struct nfs_pgio_completion_ops const *compl_ops ) { struct nfs_server *server ; struct nfs_server *tmp ; struct pnfs_layoutdriver_type *ld ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; ld = server->pnfs_curr_ld; if ((unsigned long )ld == (unsigned long )((struct pnfs_layoutdriver_type *)0)) { nfs_pageio_init_read(pgio, inode, compl_ops); } else { nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, (size_t )server->rsize, 0); } return; } } void pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio , struct inode *inode , int ioflags , struct nfs_pgio_completion_ops const *compl_ops ) { struct nfs_server *server ; struct nfs_server *tmp ; struct pnfs_layoutdriver_type *ld ; { tmp = NFS_SERVER((struct inode const *)inode); server = tmp; ld = server->pnfs_curr_ld; if ((unsigned long )ld == (unsigned long )((struct pnfs_layoutdriver_type *)0)) { nfs_pageio_init_write(pgio, inode, ioflags, compl_ops); } else { nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, (size_t )server->wsize, ioflags); } return; } } bool pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio , struct nfs_page *prev , struct nfs_page *req ) { bool tmp ; loff_t tmp___0 ; u64 tmp___1 ; { if ((unsigned long )pgio->pg_lseg == (unsigned long )((struct pnfs_layout_segment *)0)) { tmp = nfs_generic_pg_test(pgio, prev, req); return (tmp); } else { } tmp___0 = req_offset(req); tmp___1 = end_offset((pgio->pg_lseg)->pls_range.offset, (pgio->pg_lseg)->pls_range.length); return ((unsigned long long )tmp___0 < tmp___1); } } int pnfs_write_done_resend_to_mds(struct inode *inode , struct list_head *head , struct nfs_pgio_completion_ops const *compl_ops ) { struct nfs_pageio_descriptor pgio ; struct list_head failed ; struct nfs_page *req ; struct nfs_page *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { failed.next = & failed; failed.prev = & failed; nfs_pageio_init_write(& pgio, inode, 4, compl_ops); goto ldv_49968; ldv_49967: tmp = nfs_list_entry(head->next); req = tmp; nfs_list_remove_request(req); tmp___0 = nfs_pageio_add_request(& pgio, req); if (tmp___0 == 0) { nfs_list_add_request(req, & failed); } else { } ldv_49968: tmp___1 = list_empty((struct list_head const *)head); if (tmp___1 == 0) { goto ldv_49967; } else { } nfs_pageio_complete(& pgio); tmp___2 = list_empty((struct list_head const *)(& failed)); if (tmp___2 == 0) { list_move(& failed, head); return (-5); } else { } return (0); } } static void pnfs_ld_handle_write_error(struct nfs_write_data *data ) { struct nfs_pgio_header *hdr ; long tmp ; struct nfs_inode *tmp___0 ; struct nfs_server *tmp___1 ; int tmp___2 ; { hdr = data->header; tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001dpnfs write error = %d\n", hdr->pnfs_error); } else { } tmp___1 = NFS_SERVER((struct inode const *)hdr->inode); if (((tmp___1->pnfs_curr_ld)->flags & 2U) != 0U) { tmp___0 = NFS_I((struct inode const *)hdr->inode); clear_bit(9, (unsigned long volatile *)(& tmp___0->flags)); pnfs_return_layout(hdr->inode); } else { } tmp___2 = test_and_set_bit(2, (unsigned long volatile *)(& hdr->flags)); if (tmp___2 == 0) { data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode, & hdr->pages, hdr->completion_ops); } else { } return; } } void pnfs_ld_write_done(struct nfs_write_data *data ) { struct nfs_pgio_header *hdr ; { hdr = data->header; if (hdr->pnfs_error == 0) { pnfs_set_layoutcommit(data); (*((hdr->mds_ops)->rpc_call_done))(& data->task, (void *)data); } else { pnfs_ld_handle_write_error(data); } (*((hdr->mds_ops)->rpc_release))((void *)data); return; } } static void pnfs_write_through_mds(struct nfs_pageio_descriptor *desc , struct nfs_write_data *data ) { struct nfs_pgio_header *hdr ; int tmp ; { hdr = data->header; tmp = test_and_set_bit(2, (unsigned long volatile *)(& hdr->flags)); if (tmp == 0) { list_splice_tail_init(& hdr->pages, & desc->pg_list); nfs_pageio_reset_write_mds(desc); desc->pg_recoalesce = 1U; } else { } nfs_writedata_release(data); return; } } static enum pnfs_try_status pnfs_try_to_write_data(struct nfs_write_data *wdata , struct rpc_call_ops const *call_ops , struct pnfs_layout_segment *lseg , int how ) { struct nfs_pgio_header *hdr ; struct inode *inode ; enum pnfs_try_status trypnfs ; struct nfs_server *nfss ; struct nfs_server *tmp ; long tmp___0 ; long tmp___1 ; { hdr = wdata->header; inode = hdr->inode; tmp = NFS_SERVER((struct inode const *)inode); nfss = tmp; hdr->mds_ops = call_ops; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: Writing ino:%lu %u@%llu (how %d)\n", "pnfs_try_to_write_data", inode->i_ino, wdata->args.count, wdata->args.offset, how); } else { } trypnfs = (*((nfss->pnfs_curr_ld)->write_pagelist))(wdata, how); if ((unsigned int )trypnfs != 1U) { nfs_inc_stats((struct inode const *)inode, 26); } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s End (trypnfs:%d)\n", "pnfs_try_to_write_data", (unsigned int )trypnfs); } else { } return (trypnfs); } } static void pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc , struct list_head *head , int how ) { struct nfs_write_data *data ; struct rpc_call_ops const *call_ops ; struct pnfs_layout_segment *lseg ; enum pnfs_try_status trypnfs ; struct list_head const *__mptr ; int tmp ; { call_ops = desc->pg_rpc_callops; lseg = desc->pg_lseg; desc->pg_lseg = 0; goto ldv_50020; ldv_50019: __mptr = (struct list_head const *)head->next; data = (struct nfs_write_data *)__mptr + 0xfffffffffffffff8UL; list_del_init(& data->list); trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how); if ((unsigned int )trypnfs == 1U) { pnfs_write_through_mds(desc, data); } else { } ldv_50020: tmp = list_empty((struct list_head const *)head); if (tmp == 0) { goto ldv_50019; } else { } pnfs_put_lseg(lseg); return; } } static void pnfs_writehdr_free(struct nfs_pgio_header *hdr ) { { pnfs_put_lseg(hdr->lseg); nfs_writehdr_free(hdr); return; } } int pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc ) { struct nfs_write_header *whdr ; struct nfs_pgio_header *hdr ; int ret ; int tmp ; { whdr = nfs_writehdr_alloc(); if ((unsigned long )whdr == (unsigned long )((struct nfs_write_header *)0)) { (*((desc->pg_completion_ops)->error_cleanup))(& desc->pg_list); pnfs_put_lseg(desc->pg_lseg); desc->pg_lseg = 0; return (-12); } else { } hdr = & whdr->header; nfs_pgheader_init(desc, hdr, & pnfs_writehdr_free); hdr->lseg = pnfs_get_lseg(desc->pg_lseg); atomic_inc(& hdr->refcnt); ret = nfs_generic_flush(desc, hdr); if (ret != 0) { pnfs_put_lseg(desc->pg_lseg); desc->pg_lseg = 0; } else { pnfs_do_multiple_writes(desc, & hdr->rpc_list, desc->pg_ioflags); } tmp = atomic_dec_and_test(& hdr->refcnt); if (tmp != 0) { (*((hdr->completion_ops)->completion))(hdr); } else { } return (ret); } } int pnfs_read_done_resend_to_mds(struct inode *inode , struct list_head *head , struct nfs_pgio_completion_ops const *compl_ops ) { struct nfs_pageio_descriptor pgio ; struct list_head failed ; struct nfs_page *req ; struct nfs_page *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { failed.next = & failed; failed.prev = & failed; nfs_pageio_init_read(& pgio, inode, compl_ops); goto ldv_50052; ldv_50051: tmp = nfs_list_entry(head->next); req = tmp; nfs_list_remove_request(req); tmp___0 = nfs_pageio_add_request(& pgio, req); if (tmp___0 == 0) { nfs_list_add_request(req, & failed); } else { } ldv_50052: tmp___1 = list_empty((struct list_head const *)head); if (tmp___1 == 0) { goto ldv_50051; } else { } nfs_pageio_complete(& pgio); tmp___2 = list_empty((struct list_head const *)(& failed)); if (tmp___2 == 0) { list_move(& failed, head); return (-5); } else { } return (0); } } static void pnfs_ld_handle_read_error(struct nfs_read_data *data ) { struct nfs_pgio_header *hdr ; long tmp ; struct nfs_inode *tmp___0 ; struct nfs_server *tmp___1 ; int tmp___2 ; { hdr = data->header; tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001dpnfs read error = %d\n", hdr->pnfs_error); } else { } tmp___1 = NFS_SERVER((struct inode const *)hdr->inode); if (((tmp___1->pnfs_curr_ld)->flags & 2U) != 0U) { tmp___0 = NFS_I((struct inode const *)hdr->inode); clear_bit(9, (unsigned long volatile *)(& tmp___0->flags)); pnfs_return_layout(hdr->inode); } else { } tmp___2 = test_and_set_bit(2, (unsigned long volatile *)(& hdr->flags)); if (tmp___2 == 0) { data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode, & hdr->pages, hdr->completion_ops); } else { } return; } } void pnfs_ld_read_done(struct nfs_read_data *data ) { struct nfs_pgio_header *hdr ; long tmp ; { hdr = data->header; tmp = ldv__builtin_expect(hdr->pnfs_error == 0, 1L); if (tmp != 0L) { __nfs4_read_done_cb(data); (*((hdr->mds_ops)->rpc_call_done))(& data->task, (void *)data); } else { pnfs_ld_handle_read_error(data); } (*((hdr->mds_ops)->rpc_release))((void *)data); return; } } static void pnfs_read_through_mds(struct nfs_pageio_descriptor *desc , struct nfs_read_data *data ) { struct nfs_pgio_header *hdr ; int tmp ; { hdr = data->header; tmp = test_and_set_bit(2, (unsigned long volatile *)(& hdr->flags)); if (tmp == 0) { list_splice_tail_init(& hdr->pages, & desc->pg_list); nfs_pageio_reset_read_mds(desc); desc->pg_recoalesce = 1U; } else { } nfs_readdata_release(data); return; } } static enum pnfs_try_status pnfs_try_to_read_data(struct nfs_read_data *rdata , struct rpc_call_ops const *call_ops , struct pnfs_layout_segment *lseg ) { struct nfs_pgio_header *hdr ; struct inode *inode ; struct nfs_server *nfss ; struct nfs_server *tmp ; enum pnfs_try_status trypnfs ; long tmp___0 ; long tmp___1 ; { hdr = rdata->header; inode = hdr->inode; tmp = NFS_SERVER((struct inode const *)inode); nfss = tmp; hdr->mds_ops = call_ops; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: Reading ino:%lu %u@%llu\n", "pnfs_try_to_read_data", inode->i_ino, rdata->args.count, rdata->args.offset); } else { } trypnfs = (*((nfss->pnfs_curr_ld)->read_pagelist))(rdata); if ((unsigned int )trypnfs != 1U) { nfs_inc_stats((struct inode const *)inode, 25); } else { } tmp___1 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___1 != 0L) { printk("\001d%s End (trypnfs:%d)\n", "pnfs_try_to_read_data", (unsigned int )trypnfs); } else { } return (trypnfs); } } static void pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc , struct list_head *head ) { struct nfs_read_data *data ; struct rpc_call_ops const *call_ops ; struct pnfs_layout_segment *lseg ; enum pnfs_try_status trypnfs ; struct list_head const *__mptr ; int tmp ; { call_ops = desc->pg_rpc_callops; lseg = desc->pg_lseg; desc->pg_lseg = 0; goto ldv_50102; ldv_50101: __mptr = (struct list_head const *)head->next; data = (struct nfs_read_data *)__mptr + 0xfffffffffffffff8UL; list_del_init(& data->list); trypnfs = pnfs_try_to_read_data(data, call_ops, lseg); if ((unsigned int )trypnfs == 1U) { pnfs_read_through_mds(desc, data); } else { } ldv_50102: tmp = list_empty((struct list_head const *)head); if (tmp == 0) { goto ldv_50101; } else { } pnfs_put_lseg(lseg); return; } } static void pnfs_readhdr_free(struct nfs_pgio_header *hdr ) { { pnfs_put_lseg(hdr->lseg); nfs_readhdr_free(hdr); return; } } int pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc ) { struct nfs_read_header *rhdr ; struct nfs_pgio_header *hdr ; int ret ; int tmp ; { rhdr = nfs_readhdr_alloc(); if ((unsigned long )rhdr == (unsigned long )((struct nfs_read_header *)0)) { (*((desc->pg_completion_ops)->error_cleanup))(& desc->pg_list); ret = -12; pnfs_put_lseg(desc->pg_lseg); desc->pg_lseg = 0; return (ret); } else { } hdr = & rhdr->header; nfs_pgheader_init(desc, hdr, & pnfs_readhdr_free); hdr->lseg = pnfs_get_lseg(desc->pg_lseg); atomic_inc(& hdr->refcnt); ret = nfs_generic_pagein(desc, hdr); if (ret != 0) { pnfs_put_lseg(desc->pg_lseg); desc->pg_lseg = 0; } else { pnfs_do_multiple_reads(desc, & hdr->rpc_list); } tmp = atomic_dec_and_test(& hdr->refcnt); if (tmp != 0) { (*((hdr->completion_ops)->completion))(hdr); } else { } return (ret); } } static void pnfs_list_write_lseg(struct inode *inode , struct list_head *listp ) { struct pnfs_layout_segment *lseg ; struct list_head const *__mptr ; struct nfs_inode *tmp ; int tmp___0 ; struct list_head const *__mptr___0 ; struct nfs_inode *tmp___1 ; { tmp = NFS_I((struct inode const *)inode); __mptr = (struct list_head const *)(tmp->layout)->plh_segs.next; lseg = (struct pnfs_layout_segment *)__mptr; goto ldv_50135; ldv_50134: ; if (lseg->pls_range.iomode == 2U) { tmp___0 = constant_test_bit(2U, (unsigned long const volatile *)(& lseg->pls_flags)); if (tmp___0 != 0) { list_add(& lseg->pls_lc_list, listp); } else { } } else { } __mptr___0 = (struct list_head const *)lseg->pls_list.next; lseg = (struct pnfs_layout_segment *)__mptr___0; ldv_50135: tmp___1 = NFS_I((struct inode const *)inode); if ((unsigned long )(& lseg->pls_list) != (unsigned long )(& (tmp___1->layout)->plh_segs)) { goto ldv_50134; } else { } return; } } void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg ) { { pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode); return; } } void pnfs_set_layoutcommit(struct nfs_write_data *wdata ) { struct nfs_pgio_header *hdr ; struct inode *inode ; struct nfs_inode *nfsi ; struct nfs_inode *tmp ; loff_t end_pos ; bool mark_as_dirty ; long tmp___0 ; int tmp___1 ; int tmp___2 ; long tmp___3 ; { hdr = wdata->header; inode = hdr->inode; tmp = NFS_I((struct inode const *)inode); nfsi = tmp; end_pos = (loff_t )(wdata->mds_offset + (__u64 )wdata->res.count); mark_as_dirty = 0; spin_lock(& inode->i_lock); tmp___1 = test_and_set_bit(9, (unsigned long volatile *)(& nfsi->flags)); if (tmp___1 == 0) { mark_as_dirty = 1; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s: Set layoutcommit for inode %lu ", "pnfs_set_layoutcommit", inode->i_ino); } else { } } else { } tmp___2 = test_and_set_bit(2, (unsigned long volatile *)(& (hdr->lseg)->pls_flags)); if (tmp___2 == 0) { pnfs_get_lseg(hdr->lseg); } else { } if ((nfsi->layout)->plh_lwb < end_pos) { (nfsi->layout)->plh_lwb = end_pos; } else { } spin_unlock(& inode->i_lock); tmp___3 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___3 != 0L) { printk("\001d%s: lseg %p end_pos %llu\n", "pnfs_set_layoutcommit", hdr->lseg, (nfsi->layout)->plh_lwb); } else { } if ((int )mark_as_dirty) { mark_inode_dirty_sync(inode); } else { } return; } } void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data ) { struct nfs_server *nfss ; struct nfs_server *tmp ; { tmp = NFS_SERVER((struct inode const *)data->args.inode); nfss = tmp; if ((unsigned long )(nfss->pnfs_curr_ld)->cleanup_layoutcommit != (unsigned long )((void (*)(struct nfs4_layoutcommit_data * ))0)) { (*((nfss->pnfs_curr_ld)->cleanup_layoutcommit))(data); } else { } return; } } int pnfs_layoutcommit_inode(struct inode *inode , bool sync ) { struct nfs4_layoutcommit_data *data ; struct nfs_inode *nfsi ; struct nfs_inode *tmp ; loff_t end_pos ; int status ; long tmp___0 ; int tmp___1 ; void *tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; struct nfs_server *tmp___6 ; struct nfs_server *tmp___7 ; long tmp___8 ; { tmp = NFS_I((struct inode const *)inode); nfsi = tmp; status = 0; tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d--> %s inode %lu\n", "pnfs_layoutcommit_inode", inode->i_ino); } else { } tmp___1 = constant_test_bit(9U, (unsigned long const volatile *)(& nfsi->flags)); if (tmp___1 == 0) { return (0); } else { } tmp___2 = kzalloc(592UL, 80U); data = (struct nfs4_layoutcommit_data *)tmp___2; if ((unsigned long )data == (unsigned long )((struct nfs4_layoutcommit_data *)0)) { status = -12; goto out; } else { } tmp___3 = constant_test_bit(9U, (unsigned long const volatile *)(& nfsi->flags)); if (tmp___3 == 0) { goto out_free; } else { } tmp___4 = test_and_set_bit(10, (unsigned long volatile *)(& nfsi->flags)); if (tmp___4 != 0) { if (! sync) { status = -11; goto out_free; } else { } status = wait_on_bit_lock((void *)(& nfsi->flags), 10, & nfs_wait_bit_killable, 130U); if (status != 0) { goto out_free; } else { } } else { } INIT_LIST_HEAD(& data->lseg_list); spin_lock(& inode->i_lock); tmp___5 = test_and_clear_bit(9, (unsigned long volatile *)(& nfsi->flags)); if (tmp___5 == 0) { clear_bit(10, (unsigned long volatile *)(& nfsi->flags)); spin_unlock(& inode->i_lock); wake_up_bit((void *)(& nfsi->flags), 10); goto out_free; } else { } pnfs_list_write_lseg(inode, & data->lseg_list); end_pos = (nfsi->layout)->plh_lwb; (nfsi->layout)->plh_lwb = 0LL; nfs4_stateid_copy(& data->args.stateid, (nfs4_stateid const *)(& (nfsi->layout)->plh_stateid)); spin_unlock(& inode->i_lock); data->args.inode = inode; data->cred = get_rpccred((nfsi->layout)->plh_lc_cred); nfs_fattr_init(& data->fattr); tmp___6 = NFS_SERVER((struct inode const *)inode); data->args.bitmask = (u32 const *)(& tmp___6->cache_consistency_bitmask); data->res.fattr = & data->fattr; data->args.lastbytewritten = (__u64 )(end_pos + -1LL); tmp___7 = NFS_SERVER((struct inode const *)inode); data->res.server = (struct nfs_server const *)tmp___7; status = nfs4_proc_layoutcommit(data, (int )sync); out: ; if (status != 0) { mark_inode_dirty_sync(inode); } else { } tmp___8 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___8 != 0L) { printk("\001d<-- %s status %d\n", "pnfs_layoutcommit_inode", status); } else { } return (status); out_free: kfree((void const *)data); goto out; } } struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) { struct nfs4_threshold *thp ; void *tmp ; long tmp___0 ; { tmp = kzalloc(40UL, 80U); thp = (struct nfs4_threshold *)tmp; if ((unsigned long )thp == (unsigned long )((struct nfs4_threshold *)0)) { tmp___0 = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp___0 != 0L) { printk("\001d%s mdsthreshold allocation failed\n", "pnfs_mdsthreshold_alloc"); } else { } return (0); } else { } return (thp); } } void ldv_main16_sequence_infinite_withcheck_stateful(void) { int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_50200; ldv_50199: tmp = __VERIFIER_nondet_int(); switch (tmp) { default: ; goto ldv_50198; } ldv_50198: ; ldv_50200: tmp___0 = __VERIFIER_nondet_int(); if (tmp___0 != 0) { goto ldv_50199; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_245(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_246(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_247(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_248(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_249(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_250(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_251(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static void INIT_HLIST_NODE(struct hlist_node *h ) { { h->next = 0; h->pprev = 0; return; } } __inline static int hlist_unhashed(struct hlist_node const *h ) { { return ((unsigned long )h->pprev == (unsigned long )((struct hlist_node **/* const */)0)); } } __inline static int hlist_empty(struct hlist_head const *h ) { { return ((unsigned long )h->first == (unsigned long )((struct hlist_node */* const */)0)); } } __inline static void __hlist_del(struct hlist_node *n ) { struct hlist_node *next ; struct hlist_node **pprev ; { next = n->next; pprev = n->pprev; *pprev = next; if ((unsigned long )next != (unsigned long )((struct hlist_node *)0)) { next->pprev = pprev; } else { } return; } } __inline static void hlist_del(struct hlist_node *n ) { { __hlist_del(n); n->next = 0xdead000000100100UL; n->pprev = 0xdead000000200200UL; return; } } __inline static void hlist_add_head(struct hlist_node *n , struct hlist_head *h ) { struct hlist_node *first ; { first = h->first; n->next = first; if ((unsigned long )first != (unsigned long )((struct hlist_node *)0)) { first->pprev = & n->next; } else { } h->first = n; n->pprev = & h->first; return; } } int ldv_mutex_trylock_262(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_260(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_263(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_265(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_259(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_261(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_264(struct mutex *ldv_func_arg1 ) ; __inline static struct thread_info *current_thread_info___9(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6452; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6452; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6452; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6452; default: __bad_percpu_size(); } ldv_6452: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } extern void synchronize_sched(void) ; __inline static void __rcu_read_lock___9(void) { struct thread_info *tmp ; { tmp = current_thread_info___9(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___9(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___9(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void synchronize_rcu(void) { { synchronize_sched(); return; } } __inline static void rcu_read_lock___9(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___9(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 763, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___9(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 784, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___9(); return; } } __inline static void hlist_del_init_rcu(struct hlist_node *n ) { int tmp ; { tmp = hlist_unhashed((struct hlist_node const *)n); if (tmp == 0) { __hlist_del(n); n->pprev = 0; } else { } return; } } __inline static void hlist_add_head_rcu(struct hlist_node *n , struct hlist_head *h ) { struct hlist_node *first ; { first = h->first; n->next = first; n->pprev = & h->first; __asm__ volatile ("": : : "memory"); h->first = n; if ((unsigned long )first != (unsigned long )((struct hlist_node *)0)) { first->pprev = & n->next; } else { } return; } } struct nfs4_deviceid_node *nfs4_find_get_deviceid(struct pnfs_layoutdriver_type const *ld , struct nfs_client const *clp , struct nfs4_deviceid const *id ) ; void nfs4_init_deviceid_node(struct nfs4_deviceid_node *d , struct pnfs_layoutdriver_type const *ld , struct nfs_client const *nfs_client , struct nfs4_deviceid const *id ) ; struct nfs4_deviceid_node *nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new ) ; bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *d ) ; void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node ) ; bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node ) ; void nfs4_print_deviceid(struct nfs4_deviceid const *id ) ; static struct hlist_head nfs4_deviceid_cache[32U] ; static spinlock_t nfs4_deviceid_lock = {{{{{0U}}, 3735899821U, 4294967295U, 0xffffffffffffffffUL, {0, {0, 0}, "nfs4_deviceid_lock", 0, 0UL}}}}; void nfs4_print_deviceid(struct nfs4_deviceid const *id ) { u32 *p ; long tmp ; { p = (u32 *)id; tmp = ldv__builtin_expect((nfs_debug & 4096U) != 0U, 0L); if (tmp != 0L) { printk("\001d%s: device id= [%x%x%x%x]\n", "nfs4_print_deviceid", *p, *(p + 1UL), *(p + 2UL), *(p + 3UL)); } else { } return; } } __inline static u32 nfs4_deviceid_hash(struct nfs4_deviceid const *id ) { unsigned char *cptr ; unsigned int nbytes ; u32 x ; unsigned char *tmp ; unsigned int tmp___0 ; { cptr = (unsigned char *)(& id->data); nbytes = 16U; x = 0U; goto ldv_47415; ldv_47414: x = x * 37U; tmp = cptr; cptr = cptr + 1; x = (u32 )*tmp + x; ldv_47415: tmp___0 = nbytes; nbytes = nbytes - 1U; if (tmp___0 != 0U) { goto ldv_47414; } else { } return (x & 31U); } } static struct nfs4_deviceid_node *_lookup_deviceid(struct pnfs_layoutdriver_type const *ld , struct nfs_client const *clp , struct nfs4_deviceid const *id , long hash ) { struct nfs4_deviceid_node *d ; struct hlist_node *n ; struct hlist_node *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; struct hlist_node *_________p1___0 ; bool __warned___0 ; int tmp___2 ; struct hlist_node const *__mptr ; { _________p1 = *((struct hlist_node * volatile *)(& ((struct hlist_head *)(& nfs4_deviceid_cache) + (unsigned long )hash)->first)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } n = _________p1; goto ldv_47436; ldv_47435: ; if ((unsigned long )d->ld == (unsigned long )ld && (unsigned long )d->nfs_client == (unsigned long )clp) { tmp___1 = memcmp((void const *)(& d->deviceid), (void const *)id, 16UL); if (tmp___1 == 0) { tmp___0 = atomic_read((atomic_t const *)(& d->ref)); if (tmp___0 != 0) { return (d); } else { goto ldv_47434; } } else { } } else { } ldv_47434: _________p1___0 = *((struct hlist_node * volatile *)(& n->next)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } n = _________p1___0; ldv_47436: ; if ((unsigned long )n != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)n; d = (struct nfs4_deviceid_node *)__mptr; goto ldv_47435; } else { } return (0); } } static struct nfs4_deviceid_node *_find_get_deviceid(struct pnfs_layoutdriver_type const *ld , struct nfs_client const *clp , struct nfs4_deviceid const *id , long hash ) { struct nfs4_deviceid_node *d ; { rcu_read_lock___9(); d = _lookup_deviceid(ld, clp, id, hash); if ((unsigned long )d != (unsigned long )((struct nfs4_deviceid_node *)0)) { atomic_inc(& d->ref); } else { } rcu_read_unlock___9(); return (d); } } struct nfs4_deviceid_node *nfs4_find_get_deviceid(struct pnfs_layoutdriver_type const *ld , struct nfs_client const *clp , struct nfs4_deviceid const *id ) { u32 tmp ; struct nfs4_deviceid_node *tmp___0 ; { tmp = nfs4_deviceid_hash(id); tmp___0 = _find_get_deviceid(ld, clp, id, (long )tmp); return (tmp___0); } } void nfs4_delete_deviceid(struct pnfs_layoutdriver_type const *ld , struct nfs_client const *clp , struct nfs4_deviceid const *id ) { struct nfs4_deviceid_node *d ; u32 tmp ; int tmp___0 ; { spin_lock(& nfs4_deviceid_lock); rcu_read_lock___9(); tmp = nfs4_deviceid_hash(id); d = _lookup_deviceid(ld, clp, id, (long )tmp); rcu_read_unlock___9(); if ((unsigned long )d == (unsigned long )((struct nfs4_deviceid_node *)0)) { spin_unlock(& nfs4_deviceid_lock); return; } else { } hlist_del_init_rcu(& d->node); spin_unlock(& nfs4_deviceid_lock); synchronize_rcu(); tmp___0 = atomic_dec_and_test(& d->ref); if (tmp___0 != 0) { (*((d->ld)->free_deviceid_node))(d); } else { } return; } } void nfs4_init_deviceid_node(struct nfs4_deviceid_node *d , struct pnfs_layoutdriver_type const *ld , struct nfs_client const *nfs_client , struct nfs4_deviceid const *id ) { { INIT_HLIST_NODE(& d->node); INIT_HLIST_NODE(& d->tmpnode); d->ld = ld; d->nfs_client = nfs_client; d->flags = 0UL; d->deviceid = *id; atomic_set(& d->ref, 1); return; } } struct nfs4_deviceid_node *nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new ) { struct nfs4_deviceid_node *d ; long hash ; u32 tmp ; { spin_lock(& nfs4_deviceid_lock); tmp = nfs4_deviceid_hash((struct nfs4_deviceid const *)(& new->deviceid)); hash = (long )tmp; d = _find_get_deviceid(new->ld, new->nfs_client, (struct nfs4_deviceid const *)(& new->deviceid), hash); if ((unsigned long )d != (unsigned long )((struct nfs4_deviceid_node *)0)) { spin_unlock(& nfs4_deviceid_lock); return (d); } else { } hlist_add_head_rcu(& new->node, (struct hlist_head *)(& nfs4_deviceid_cache) + (unsigned long )hash); spin_unlock(& nfs4_deviceid_lock); atomic_inc(& new->ref); return (new); } } bool nfs4_put_deviceid_node(struct nfs4_deviceid_node *d ) { int tmp ; { tmp = atomic_dec_and_test(& d->ref); if (tmp == 0) { return (0); } else { } (*((d->ld)->free_deviceid_node))(d); return (1); } } void nfs4_mark_deviceid_unavailable(struct nfs4_deviceid_node *node ) { { node->timestamp_unavailable = jiffies; set_bit(1U, (unsigned long volatile *)(& node->flags)); return; } } bool nfs4_test_deviceid_unavailable(struct nfs4_deviceid_node *node ) { unsigned long start ; unsigned long end ; int tmp ; { tmp = constant_test_bit(1U, (unsigned long const volatile *)(& node->flags)); if (tmp != 0) { end = jiffies; start = end - 30000UL; if ((long )node->timestamp_unavailable - (long )start >= 0L && (long )end - (long )node->timestamp_unavailable >= 0L) { return (1); } else { } clear_bit(1, (unsigned long volatile *)(& node->flags)); } else { } return (0); } } static void _deviceid_purge_client(struct nfs_client const *clp , long hash ) { struct nfs4_deviceid_node *d ; struct hlist_node *n ; struct hlist_head tmp ; struct hlist_node *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; struct hlist_node *_________p1___0 ; bool __warned___0 ; int tmp___2 ; struct hlist_node const *__mptr ; int tmp___3 ; struct hlist_node const *__mptr___0 ; int tmp___4 ; int tmp___5 ; { tmp.first = 0; spin_lock(& nfs4_deviceid_lock); rcu_read_lock___9(); _________p1 = *((struct hlist_node * volatile *)(& ((struct hlist_head *)(& nfs4_deviceid_cache) + (unsigned long )hash)->first)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { rcu_read_lock_held(); } else { } n = _________p1; goto ldv_47556; ldv_47555: ; if ((unsigned long )d->nfs_client == (unsigned long )clp) { tmp___1 = atomic_read((atomic_t const *)(& d->ref)); if (tmp___1 != 0) { hlist_del_init_rcu(& d->node); hlist_add_head(& d->tmpnode, & tmp); } else { } } else { } _________p1___0 = *((struct hlist_node * volatile *)(& n->next)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } n = _________p1___0; ldv_47556: ; if ((unsigned long )n != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)n; d = (struct nfs4_deviceid_node *)__mptr; goto ldv_47555; } else { } rcu_read_unlock___9(); spin_unlock(& nfs4_deviceid_lock); tmp___3 = hlist_empty((struct hlist_head const *)(& tmp)); if (tmp___3 != 0) { return; } else { } synchronize_rcu(); goto ldv_47561; ldv_47560: __mptr___0 = (struct hlist_node const *)tmp.first; d = (struct nfs4_deviceid_node *)__mptr___0 + 0xfffffffffffffff0UL; hlist_del(& d->tmpnode); tmp___4 = atomic_dec_and_test(& d->ref); if (tmp___4 != 0) { (*((d->ld)->free_deviceid_node))(d); } else { } ldv_47561: tmp___5 = hlist_empty((struct hlist_head const *)(& tmp)); if (tmp___5 == 0) { goto ldv_47560; } else { } return; } } void nfs4_deviceid_purge_client(struct nfs_client const *clp ) { long h ; { if (((unsigned int )clp->cl_exchange_flags & 131072U) == 0U) { return; } else { } h = 0L; goto ldv_47568; ldv_47567: _deviceid_purge_client(clp, h); h = h + 1L; ldv_47568: ; if (h <= 31L) { goto ldv_47567; } else { } return; } } void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp ) { struct nfs4_deviceid_node *d ; struct hlist_node *n ; int i ; struct hlist_node *_________p1 ; bool __warned ; int tmp ; struct hlist_node *_________p1___0 ; bool __warned___0 ; int tmp___0 ; struct hlist_node const *__mptr ; { rcu_read_lock___9(); i = 0; goto ldv_47589; ldv_47588: _________p1 = *((struct hlist_node * volatile *)(& ((struct hlist_head *)(& nfs4_deviceid_cache) + (unsigned long )i)->first)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } n = _________p1; goto ldv_47586; ldv_47585: ; if ((unsigned long )d->nfs_client == (unsigned long )((struct nfs_client const *)clp)) { set_bit(0U, (unsigned long volatile *)(& d->flags)); } else { } _________p1___0 = *((struct hlist_node * volatile *)(& n->next)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } n = _________p1___0; ldv_47586: ; if ((unsigned long )n != (unsigned long )((struct hlist_node *)0)) { __mptr = (struct hlist_node const *)n; d = (struct nfs4_deviceid_node *)__mptr; goto ldv_47585; } else { } i = i + 1; ldv_47589: ; if (i <= 31) { goto ldv_47588; } else { } rcu_read_unlock___9(); return; } } void ldv_mutex_lock_259(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_260(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_261(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_262(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_263(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_264(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_265(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static void ldv_error(void) __attribute__((__no_instrument_function__)) ; __inline static void ldv_error(void) { { ERROR: {reach_error();abort();} } } extern int __VERIFIER_nondet_int(void) ; long ldv__builtin_expect(long exp , long c ) { { return (exp); } } static int ldv_mutex_cred_guard_mutex ; int ldv_mutex_lock_interruptible_cred_guard_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cred_guard_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_cred_guard_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_cred_guard_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cred_guard_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_cred_guard_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_cred_guard_mutex(struct mutex *lock ) { { if (ldv_mutex_cred_guard_mutex == 1) { } else { ldv_error(); } ldv_mutex_cred_guard_mutex = 2; return; } } int ldv_mutex_trylock_cred_guard_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_cred_guard_mutex == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = __VERIFIER_nondet_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_cred_guard_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_cred_guard_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_cred_guard_mutex == 1) { } else { ldv_error(); } atomic_value_after_dec = __VERIFIER_nondet_int(); if (atomic_value_after_dec == 0) { ldv_mutex_cred_guard_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_cred_guard_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cred_guard_mutex == 1) { nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_cred_guard_mutex(struct mutex *lock ) { { if (ldv_mutex_cred_guard_mutex == 2) { } else { ldv_error(); } ldv_mutex_cred_guard_mutex = 1; return; } } static int ldv_mutex_i_mutex ; int ldv_mutex_lock_interruptible_i_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_i_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_i_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_i_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_i_mutex(struct mutex *lock ) { { if (ldv_mutex_i_mutex == 1) { } else { ldv_error(); } ldv_mutex_i_mutex = 2; return; } } int ldv_mutex_trylock_i_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_i_mutex == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = __VERIFIER_nondet_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_i_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_i_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_i_mutex == 1) { } else { ldv_error(); } atomic_value_after_dec = __VERIFIER_nondet_int(); if (atomic_value_after_dec == 0) { ldv_mutex_i_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_i_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_i_mutex == 1) { nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_i_mutex(struct mutex *lock ) { { if (ldv_mutex_i_mutex == 2) { } else { ldv_error(); } ldv_mutex_i_mutex = 1; return; } } static int ldv_mutex_idmap_mutex ; int ldv_mutex_lock_interruptible_idmap_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_idmap_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_idmap_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_idmap_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_idmap_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_idmap_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_idmap_mutex(struct mutex *lock ) { { if (ldv_mutex_idmap_mutex == 1) { } else { ldv_error(); } ldv_mutex_idmap_mutex = 2; return; } } int ldv_mutex_trylock_idmap_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_idmap_mutex == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = __VERIFIER_nondet_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_idmap_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_idmap_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_idmap_mutex == 1) { } else { ldv_error(); } atomic_value_after_dec = __VERIFIER_nondet_int(); if (atomic_value_after_dec == 0) { ldv_mutex_idmap_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_idmap_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_idmap_mutex == 1) { nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_idmap_mutex(struct mutex *lock ) { { if (ldv_mutex_idmap_mutex == 2) { } else { ldv_error(); } ldv_mutex_idmap_mutex = 1; return; } } static int ldv_mutex_lock ; int ldv_mutex_lock_interruptible_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_lock = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_lock = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_lock(struct mutex *lock ) { { if (ldv_mutex_lock == 1) { } else { ldv_error(); } ldv_mutex_lock = 2; return; } } int ldv_mutex_trylock_lock(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_lock == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = __VERIFIER_nondet_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_lock = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_lock == 1) { } else { ldv_error(); } atomic_value_after_dec = __VERIFIER_nondet_int(); if (atomic_value_after_dec == 0) { ldv_mutex_lock = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock == 1) { nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_lock(struct mutex *lock ) { { if (ldv_mutex_lock == 2) { } else { ldv_error(); } ldv_mutex_lock = 1; return; } } static int ldv_mutex_mutex ; int ldv_mutex_lock_interruptible_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_mutex(struct mutex *lock ) { { if (ldv_mutex_mutex == 1) { } else { ldv_error(); } ldv_mutex_mutex = 2; return; } } int ldv_mutex_trylock_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_mutex == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = __VERIFIER_nondet_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_mutex == 1) { } else { ldv_error(); } atomic_value_after_dec = __VERIFIER_nondet_int(); if (atomic_value_after_dec == 0) { ldv_mutex_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex == 1) { nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_mutex(struct mutex *lock ) { { if (ldv_mutex_mutex == 2) { } else { ldv_error(); } ldv_mutex_mutex = 1; return; } } static int ldv_mutex_nfs_callback_mutex ; int ldv_mutex_lock_interruptible_nfs_callback_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_nfs_callback_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_nfs_callback_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_nfs_callback_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_nfs_callback_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_nfs_callback_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_nfs_callback_mutex(struct mutex *lock ) { { if (ldv_mutex_nfs_callback_mutex == 1) { } else { ldv_error(); } ldv_mutex_nfs_callback_mutex = 2; return; } } int ldv_mutex_trylock_nfs_callback_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_nfs_callback_mutex == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = __VERIFIER_nondet_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_nfs_callback_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_nfs_callback_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_nfs_callback_mutex == 1) { } else { ldv_error(); } atomic_value_after_dec = __VERIFIER_nondet_int(); if (atomic_value_after_dec == 0) { ldv_mutex_nfs_callback_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_nfs_callback_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_nfs_callback_mutex == 1) { nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_nfs_callback_mutex(struct mutex *lock ) { { if (ldv_mutex_nfs_callback_mutex == 2) { } else { ldv_error(); } ldv_mutex_nfs_callback_mutex = 1; return; } } static int ldv_mutex_nfs_clid_init_mutex ; int ldv_mutex_lock_interruptible_nfs_clid_init_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_nfs_clid_init_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_nfs_clid_init_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_nfs_clid_init_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_nfs_clid_init_mutex == 1) { } else { ldv_error(); } nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { ldv_mutex_nfs_clid_init_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_nfs_clid_init_mutex(struct mutex *lock ) { { if (ldv_mutex_nfs_clid_init_mutex == 1) { } else { ldv_error(); } ldv_mutex_nfs_clid_init_mutex = 2; return; } } int ldv_mutex_trylock_nfs_clid_init_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_nfs_clid_init_mutex == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = __VERIFIER_nondet_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_nfs_clid_init_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_nfs_clid_init_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_nfs_clid_init_mutex == 1) { } else { ldv_error(); } atomic_value_after_dec = __VERIFIER_nondet_int(); if (atomic_value_after_dec == 0) { ldv_mutex_nfs_clid_init_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_nfs_clid_init_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_nfs_clid_init_mutex == 1) { nondetermined = __VERIFIER_nondet_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_nfs_clid_init_mutex(struct mutex *lock ) { { if (ldv_mutex_nfs_clid_init_mutex == 2) { } else { ldv_error(); } ldv_mutex_nfs_clid_init_mutex = 1; return; } } void ldv_initialize(void) { { ldv_mutex_cred_guard_mutex = 1; ldv_mutex_i_mutex = 1; ldv_mutex_idmap_mutex = 1; ldv_mutex_lock = 1; ldv_mutex_mutex = 1; ldv_mutex_nfs_callback_mutex = 1; ldv_mutex_nfs_clid_init_mutex = 1; return; } } void ldv_check_final_state(void) { { if (ldv_mutex_cred_guard_mutex == 1) { } else { ldv_error(); } if (ldv_mutex_i_mutex == 1) { } else { ldv_error(); } if (ldv_mutex_idmap_mutex == 1) { } else { ldv_error(); } if (ldv_mutex_lock == 1) { } else { ldv_error(); } if (ldv_mutex_mutex == 1) { } else { ldv_error(); } if (ldv_mutex_nfs_callback_mutex == 1) { } else { ldv_error(); } if (ldv_mutex_nfs_clid_init_mutex == 1) { } else { ldv_error(); } return; } } #include "model/32_7a_cilled_true-unreach-call_linux-3.8-rc1-32_7a-fs--nfs--nfsv4.ko-ldv_main4_sequence_infinite_withcheck_stateful.env.c" #include "model/common.env.c"