extern void abort(void); extern void __assert_fail(const char *, const char *, unsigned int, const char *) __attribute__ ((__nothrow__ , __leaf__)) __attribute__ ((__noreturn__)); void reach_error() { __assert_fail("0", "drivers--block--drbd--drbd.ko_942.ed635cb0.32_7a.cil_true-unreach-call.i", 3, "reach_error"); } /* Generated by CIL v. 1.5.1 */ /* print_CIL_Input is false */ typedef signed char __s8; typedef unsigned char __u8; typedef short __s16; typedef unsigned short __u16; typedef int __s32; typedef unsigned int __u32; typedef long long __s64; typedef unsigned long long __u64; typedef signed char s8; typedef unsigned char u8; typedef unsigned short u16; typedef int s32; typedef unsigned int u32; typedef long long s64; typedef unsigned long long u64; typedef long __kernel_long_t; typedef unsigned long __kernel_ulong_t; typedef int __kernel_pid_t; typedef unsigned int __kernel_uid32_t; typedef unsigned int __kernel_gid32_t; typedef __kernel_ulong_t __kernel_size_t; typedef __kernel_long_t __kernel_ssize_t; typedef __kernel_long_t __kernel_off_t; typedef long long __kernel_loff_t; typedef __kernel_long_t __kernel_time_t; typedef __kernel_long_t __kernel_clock_t; typedef int __kernel_timer_t; typedef int __kernel_clockid_t; typedef __u16 __be16; typedef __u32 __be32; typedef __u32 __wsum; typedef __u32 __kernel_dev_t; typedef __kernel_dev_t dev_t; typedef unsigned short umode_t; typedef __u32 nlink_t; typedef __kernel_off_t off_t; typedef __kernel_pid_t pid_t; typedef __kernel_clockid_t clockid_t; typedef _Bool bool; typedef __kernel_uid32_t uid_t; typedef __kernel_gid32_t gid_t; typedef __kernel_loff_t loff_t; typedef __kernel_size_t size_t; typedef __kernel_ssize_t ssize_t; typedef __kernel_time_t time_t; typedef __s32 int32_t; typedef __u8 uint8_t; typedef __u32 uint32_t; typedef __u64 uint64_t; typedef unsigned long sector_t; typedef unsigned long blkcnt_t; typedef u64 dma_addr_t; typedef unsigned int gfp_t; typedef unsigned int fmode_t; struct __anonstruct_atomic_t_6 { int counter ; }; typedef struct __anonstruct_atomic_t_6 atomic_t; struct __anonstruct_atomic64_t_7 { long counter ; }; typedef struct __anonstruct_atomic64_t_7 atomic64_t; struct list_head { struct list_head *next ; struct list_head *prev ; }; struct hlist_node; struct hlist_head { struct hlist_node *first ; }; struct hlist_node { struct hlist_node *next ; struct hlist_node **pprev ; }; struct callback_head { struct callback_head *next ; void (*func)(struct callback_head * ) ; }; struct module; struct file_operations; struct _ddebug { char const *modname ; char const *function ; char const *filename ; char const *format ; unsigned int lineno : 18 ; unsigned char flags ; }; struct device; struct net_device; struct completion; struct pt_regs; struct pid; typedef u16 __ticket_t; typedef u32 __ticketpair_t; struct __raw_tickets { __ticket_t head ; __ticket_t tail ; }; union __anonunion_ldv_2023_8 { __ticketpair_t head_tail ; struct __raw_tickets tickets ; }; struct arch_spinlock { union __anonunion_ldv_2023_8 ldv_2023 ; }; typedef struct arch_spinlock arch_spinlock_t; struct __anonstruct_ldv_2030_10 { u32 read ; s32 write ; }; union __anonunion_arch_rwlock_t_9 { s64 lock ; struct __anonstruct_ldv_2030_10 ldv_2030 ; }; typedef union __anonunion_arch_rwlock_t_9 arch_rwlock_t; struct task_struct; struct lockdep_map; struct mm_struct; struct pt_regs { unsigned long r15 ; unsigned long r14 ; unsigned long r13 ; unsigned long r12 ; unsigned long bp ; unsigned long bx ; unsigned long r11 ; unsigned long r10 ; unsigned long r9 ; unsigned long r8 ; unsigned long ax ; unsigned long cx ; unsigned long dx ; unsigned long si ; unsigned long di ; unsigned long orig_ax ; unsigned long ip ; unsigned long cs ; unsigned long flags ; unsigned long sp ; unsigned long ss ; }; struct __anonstruct_ldv_2147_12 { unsigned int a ; unsigned int b ; }; struct __anonstruct_ldv_2162_13 { u16 limit0 ; u16 base0 ; unsigned char base1 ; unsigned char type : 4 ; unsigned char s : 1 ; unsigned char dpl : 2 ; unsigned char p : 1 ; unsigned char limit : 4 ; unsigned char avl : 1 ; unsigned char l : 1 ; unsigned char d : 1 ; unsigned char g : 1 ; unsigned char base2 ; }; union __anonunion_ldv_2163_11 { struct __anonstruct_ldv_2147_12 ldv_2147 ; struct __anonstruct_ldv_2162_13 ldv_2162 ; }; struct desc_struct { union __anonunion_ldv_2163_11 ldv_2163 ; }; typedef unsigned long pgdval_t; typedef unsigned long pgprotval_t; struct pgprot { pgprotval_t pgprot ; }; typedef struct pgprot pgprot_t; struct __anonstruct_pgd_t_15 { pgdval_t pgd ; }; typedef struct __anonstruct_pgd_t_15 pgd_t; struct page; typedef struct page *pgtable_t; struct file; struct seq_file; struct thread_struct; struct cpumask; struct kernel_vm86_regs { struct pt_regs pt ; unsigned short es ; unsigned short __esh ; unsigned short ds ; unsigned short __dsh ; unsigned short fs ; unsigned short __fsh ; unsigned short gs ; unsigned short __gsh ; }; union __anonunion_ldv_2766_18 { struct pt_regs *regs ; struct kernel_vm86_regs *vm86 ; }; struct math_emu_info { long ___orig_eip ; union __anonunion_ldv_2766_18 ldv_2766 ; }; struct cpumask { unsigned long bits[64U] ; }; typedef struct cpumask cpumask_t; typedef struct cpumask *cpumask_var_t; struct exec_domain; struct map_segment; struct exec_domain { char const *name ; void (*handler)(int , struct pt_regs * ) ; unsigned char pers_low ; unsigned char pers_high ; unsigned long *signal_map ; unsigned long *signal_invmap ; struct map_segment *err_map ; struct map_segment *socktype_map ; struct map_segment *sockopt_map ; struct map_segment *af_map ; struct module *module ; struct exec_domain *next ; }; struct seq_operations; struct i387_fsave_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u32 status ; }; struct __anonstruct_ldv_5121_23 { u64 rip ; u64 rdp ; }; struct __anonstruct_ldv_5127_24 { u32 fip ; u32 fcs ; u32 foo ; u32 fos ; }; union __anonunion_ldv_5128_22 { struct __anonstruct_ldv_5121_23 ldv_5121 ; struct __anonstruct_ldv_5127_24 ldv_5127 ; }; union __anonunion_ldv_5137_25 { u32 padding1[12U] ; u32 sw_reserved[12U] ; }; struct i387_fxsave_struct { u16 cwd ; u16 swd ; u16 twd ; u16 fop ; union __anonunion_ldv_5128_22 ldv_5128 ; u32 mxcsr ; u32 mxcsr_mask ; u32 st_space[32U] ; u32 xmm_space[64U] ; u32 padding[12U] ; union __anonunion_ldv_5137_25 ldv_5137 ; }; struct i387_soft_struct { u32 cwd ; u32 swd ; u32 twd ; u32 fip ; u32 fcs ; u32 foo ; u32 fos ; u32 st_space[20U] ; u8 ftop ; u8 changed ; u8 lookahead ; u8 no_update ; u8 rm ; u8 alimit ; struct math_emu_info *info ; u32 entry_eip ; }; struct ymmh_struct { u32 ymmh_space[64U] ; }; struct xsave_hdr_struct { u64 xstate_bv ; u64 reserved1[2U] ; u64 reserved2[5U] ; }; struct xsave_struct { struct i387_fxsave_struct i387 ; struct xsave_hdr_struct xsave_hdr ; struct ymmh_struct ymmh ; }; union thread_xstate { struct i387_fsave_struct fsave ; struct i387_fxsave_struct fxsave ; struct i387_soft_struct soft ; struct xsave_struct xsave ; }; struct fpu { unsigned int last_cpu ; unsigned int has_fpu ; union thread_xstate *state ; }; struct kmem_cache; struct perf_event; struct thread_struct { struct desc_struct tls_array[3U] ; unsigned long sp0 ; unsigned long sp ; unsigned long usersp ; unsigned short es ; unsigned short ds ; unsigned short fsindex ; unsigned short gsindex ; unsigned long fs ; unsigned long gs ; struct perf_event *ptrace_bps[4U] ; unsigned long debugreg6 ; unsigned long ptrace_dr7 ; unsigned long cr2 ; unsigned long trap_nr ; unsigned long error_code ; struct fpu fpu ; unsigned long *io_bitmap_ptr ; unsigned long iopl ; unsigned int io_bitmap_max ; }; struct __anonstruct_mm_segment_t_27 { unsigned long seg ; }; typedef struct __anonstruct_mm_segment_t_27 mm_segment_t; typedef atomic64_t atomic_long_t; struct stack_trace { unsigned int nr_entries ; unsigned int max_entries ; unsigned long *entries ; int skip ; }; struct lockdep_subclass_key { char __one_byte ; } __attribute__((__packed__)) ; struct lock_class_key { struct lockdep_subclass_key subkeys[8U] ; }; struct lock_class { struct list_head hash_entry ; struct list_head lock_entry ; struct lockdep_subclass_key *key ; unsigned int subclass ; unsigned int dep_gen_id ; unsigned long usage_mask ; struct stack_trace usage_traces[13U] ; struct list_head locks_after ; struct list_head locks_before ; unsigned int version ; unsigned long ops ; char const *name ; int name_version ; unsigned long contention_point[4U] ; unsigned long contending_point[4U] ; }; struct lockdep_map { struct lock_class_key *key ; struct lock_class *class_cache[2U] ; char const *name ; int cpu ; unsigned long ip ; }; struct held_lock { u64 prev_chain_key ; unsigned long acquire_ip ; struct lockdep_map *instance ; struct lockdep_map *nest_lock ; u64 waittime_stamp ; u64 holdtime_stamp ; unsigned short class_idx : 13 ; unsigned char irq_context : 2 ; unsigned char trylock : 1 ; unsigned char read : 2 ; unsigned char check : 2 ; unsigned char hardirqs_off : 1 ; unsigned short references : 11 ; }; struct raw_spinlock { arch_spinlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct raw_spinlock raw_spinlock_t; struct __anonstruct_ldv_5956_29 { u8 __padding[24U] ; struct lockdep_map dep_map ; }; union __anonunion_ldv_5957_28 { struct raw_spinlock rlock ; struct __anonstruct_ldv_5956_29 ldv_5956 ; }; struct spinlock { union __anonunion_ldv_5957_28 ldv_5957 ; }; typedef struct spinlock spinlock_t; struct __anonstruct_rwlock_t_30 { arch_rwlock_t raw_lock ; unsigned int magic ; unsigned int owner_cpu ; void *owner ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_rwlock_t_30 rwlock_t; struct mutex { atomic_t count ; spinlock_t wait_lock ; struct list_head wait_list ; struct task_struct *owner ; char const *name ; void *magic ; struct lockdep_map dep_map ; }; struct mutex_waiter { struct list_head list ; struct task_struct *task ; void *magic ; }; struct timespec; struct compat_timespec; struct __anonstruct_futex_32 { u32 *uaddr ; u32 val ; u32 flags ; u32 bitset ; u64 time ; u32 *uaddr2 ; }; struct __anonstruct_nanosleep_33 { clockid_t clockid ; struct timespec *rmtp ; struct compat_timespec *compat_rmtp ; u64 expires ; }; struct pollfd; struct __anonstruct_poll_34 { struct pollfd *ufds ; int nfds ; int has_timeout ; unsigned long tv_sec ; unsigned long tv_nsec ; }; union __anonunion_ldv_6276_31 { struct __anonstruct_futex_32 futex ; struct __anonstruct_nanosleep_33 nanosleep ; struct __anonstruct_poll_34 poll ; }; struct restart_block { long (*fn)(struct restart_block * ) ; union __anonunion_ldv_6276_31 ldv_6276 ; }; struct thread_info { struct task_struct *task ; struct exec_domain *exec_domain ; __u32 flags ; __u32 status ; __u32 cpu ; int preempt_count ; mm_segment_t addr_limit ; struct restart_block restart_block ; void *sysenter_return ; unsigned char sig_on_uaccess_error : 1 ; unsigned char uaccess_err : 1 ; }; struct vm_area_struct; struct __anonstruct_seqlock_t_35 { unsigned int sequence ; spinlock_t lock ; }; typedef struct __anonstruct_seqlock_t_35 seqlock_t; struct seqcount { unsigned int sequence ; }; typedef struct seqcount seqcount_t; struct timespec { __kernel_time_t tv_sec ; long tv_nsec ; }; union ktime { s64 tv64 ; }; typedef union ktime ktime_t; struct tvec_base; struct timer_list { struct list_head entry ; unsigned long expires ; struct tvec_base *base ; void (*function)(unsigned long ) ; unsigned long data ; int slack ; int start_pid ; void *start_site ; char start_comm[16U] ; struct lockdep_map lockdep_map ; }; struct hrtimer; enum hrtimer_restart; struct work_struct; struct work_struct { atomic_long_t data ; struct list_head entry ; void (*func)(struct work_struct * ) ; struct lockdep_map lockdep_map ; }; struct delayed_work { struct work_struct work ; struct timer_list timer ; int cpu ; }; struct call_single_data { struct list_head list ; void (*func)(void * ) ; void *info ; u16 flags ; u16 priv ; }; struct __wait_queue; typedef struct __wait_queue wait_queue_t; struct __wait_queue { unsigned int flags ; void *private ; int (*func)(wait_queue_t * , unsigned int , int , void * ) ; struct list_head task_list ; }; struct __wait_queue_head { spinlock_t lock ; struct list_head task_list ; }; typedef struct __wait_queue_head wait_queue_head_t; struct completion { unsigned int done ; wait_queue_head_t wait ; }; struct pm_message { int event ; }; typedef struct pm_message pm_message_t; struct dev_pm_ops { int (*prepare)(struct device * ) ; void (*complete)(struct device * ) ; int (*suspend)(struct device * ) ; int (*resume)(struct device * ) ; int (*freeze)(struct device * ) ; int (*thaw)(struct device * ) ; int (*poweroff)(struct device * ) ; int (*restore)(struct device * ) ; int (*suspend_late)(struct device * ) ; int (*resume_early)(struct device * ) ; int (*freeze_late)(struct device * ) ; int (*thaw_early)(struct device * ) ; int (*poweroff_late)(struct device * ) ; int (*restore_early)(struct device * ) ; int (*suspend_noirq)(struct device * ) ; int (*resume_noirq)(struct device * ) ; int (*freeze_noirq)(struct device * ) ; int (*thaw_noirq)(struct device * ) ; int (*poweroff_noirq)(struct device * ) ; int (*restore_noirq)(struct device * ) ; int (*runtime_suspend)(struct device * ) ; int (*runtime_resume)(struct device * ) ; int (*runtime_idle)(struct device * ) ; }; enum rpm_status { RPM_ACTIVE = 0, RPM_RESUMING = 1, RPM_SUSPENDED = 2, RPM_SUSPENDING = 3 } ; enum rpm_request { RPM_REQ_NONE = 0, RPM_REQ_IDLE = 1, RPM_REQ_SUSPEND = 2, RPM_REQ_AUTOSUSPEND = 3, RPM_REQ_RESUME = 4 } ; struct wakeup_source; struct pm_subsys_data { spinlock_t lock ; unsigned int refcount ; }; struct dev_pm_qos_request; struct pm_qos_constraints; struct dev_pm_info { pm_message_t power_state ; unsigned char can_wakeup : 1 ; unsigned char async_suspend : 1 ; bool is_prepared ; bool is_suspended ; bool ignore_children ; bool early_init ; spinlock_t lock ; struct list_head entry ; struct completion completion ; struct wakeup_source *wakeup ; bool wakeup_path ; bool syscore ; struct timer_list suspend_timer ; unsigned long timer_expires ; struct work_struct work ; wait_queue_head_t wait_queue ; atomic_t usage_count ; atomic_t child_count ; unsigned char disable_depth : 3 ; unsigned char idle_notification : 1 ; unsigned char request_pending : 1 ; unsigned char deferred_resume : 1 ; unsigned char run_wake : 1 ; unsigned char runtime_auto : 1 ; unsigned char no_callbacks : 1 ; unsigned char irq_safe : 1 ; unsigned char use_autosuspend : 1 ; unsigned char timer_autosuspends : 1 ; enum rpm_request request ; enum rpm_status runtime_status ; int runtime_error ; int autosuspend_delay ; unsigned long last_busy ; unsigned long active_jiffies ; unsigned long suspended_jiffies ; unsigned long accounting_timestamp ; struct dev_pm_qos_request *pq_req ; struct pm_subsys_data *subsys_data ; struct pm_qos_constraints *constraints ; }; struct dev_pm_domain { struct dev_pm_ops ops ; }; struct __anonstruct_nodemask_t_100 { unsigned long bits[16U] ; }; typedef struct __anonstruct_nodemask_t_100 nodemask_t; struct __anonstruct_mm_context_t_101 { void *ldt ; int size ; unsigned short ia32_compat ; struct mutex lock ; void *vdso ; }; typedef struct __anonstruct_mm_context_t_101 mm_context_t; struct bio_vec; struct rw_semaphore; struct rw_semaphore { long count ; raw_spinlock_t wait_lock ; struct list_head wait_list ; struct lockdep_map dep_map ; }; struct notifier_block; struct notifier_block { int (*notifier_call)(struct notifier_block * , unsigned long , void * ) ; struct notifier_block *next ; int priority ; }; struct blocking_notifier_head { struct rw_semaphore rwsem ; struct notifier_block *head ; }; struct ctl_table; struct proc_dir_entry; struct hlist_nulls_node; struct hlist_nulls_head { struct hlist_nulls_node *first ; }; struct hlist_nulls_node { struct hlist_nulls_node *next ; struct hlist_nulls_node **pprev ; }; struct plist_head { struct list_head node_list ; }; struct plist_node { int prio ; struct list_head prio_list ; struct list_head node_list ; }; struct sock; struct kobject; enum kobj_ns_type { KOBJ_NS_TYPE_NONE = 0, KOBJ_NS_TYPE_NET = 1, KOBJ_NS_TYPES = 2 } ; struct kobj_ns_type_operations { enum kobj_ns_type type ; void *(*grab_current_ns)(void) ; void const *(*netlink_ns)(struct sock * ) ; void const *(*initial_ns)(void) ; void (*drop_ns)(void * ) ; }; struct attribute { char const *name ; umode_t mode ; bool ignore_lockdep ; struct lock_class_key *key ; struct lock_class_key skey ; }; struct attribute_group { char const *name ; umode_t (*is_visible)(struct kobject * , struct attribute * , int ) ; struct attribute **attrs ; }; struct bin_attribute { struct attribute attr ; size_t size ; void *private ; ssize_t (*read)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; ssize_t (*write)(struct file * , struct kobject * , struct bin_attribute * , char * , loff_t , size_t ) ; int (*mmap)(struct file * , struct kobject * , struct bin_attribute * , struct vm_area_struct * ) ; }; struct sysfs_ops { ssize_t (*show)(struct kobject * , struct attribute * , char * ) ; ssize_t (*store)(struct kobject * , struct attribute * , char const * , size_t ) ; void const *(*namespace)(struct kobject * , struct attribute const * ) ; }; struct sysfs_dirent; struct kref { atomic_t refcount ; }; struct kset; struct kobj_type; struct kobject { char const *name ; struct list_head entry ; struct kobject *parent ; struct kset *kset ; struct kobj_type *ktype ; struct sysfs_dirent *sd ; struct kref kref ; unsigned char state_initialized : 1 ; unsigned char state_in_sysfs : 1 ; unsigned char state_add_uevent_sent : 1 ; unsigned char state_remove_uevent_sent : 1 ; unsigned char uevent_suppress : 1 ; }; struct kobj_type { void (*release)(struct kobject * ) ; struct sysfs_ops const *sysfs_ops ; struct attribute **default_attrs ; struct kobj_ns_type_operations const *(*child_ns_type)(struct kobject * ) ; void const *(*namespace)(struct kobject * ) ; }; struct kobj_uevent_env { char *envp[32U] ; int envp_idx ; char buf[2048U] ; int buflen ; }; struct kset_uevent_ops { int (* const filter)(struct kset * , struct kobject * ) ; char const *(* const name)(struct kset * , struct kobject * ) ; int (* const uevent)(struct kset * , struct kobject * , struct kobj_uevent_env * ) ; }; struct kset { struct list_head list ; spinlock_t list_lock ; struct kobject kobj ; struct kset_uevent_ops const *uevent_ops ; }; struct klist_node; struct klist_node { void *n_klist ; struct list_head n_node ; struct kref n_ref ; }; struct ratelimit_state { raw_spinlock_t lock ; int interval ; int burst ; int printed ; int missed ; unsigned long begin ; }; struct dma_map_ops; struct dev_archdata { void *acpi_handle ; struct dma_map_ops *dma_ops ; void *iommu ; }; struct device_private; struct device_driver; struct driver_private; struct class; struct subsys_private; struct bus_type; struct device_node; struct iommu_ops; struct iommu_group; struct bus_attribute { struct attribute attr ; ssize_t (*show)(struct bus_type * , char * ) ; ssize_t (*store)(struct bus_type * , char const * , size_t ) ; }; struct device_attribute; struct driver_attribute; struct bus_type { char const *name ; char const *dev_name ; struct device *dev_root ; struct bus_attribute *bus_attrs ; struct device_attribute *dev_attrs ; struct driver_attribute *drv_attrs ; int (*match)(struct device * , struct device_driver * ) ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct dev_pm_ops const *pm ; struct iommu_ops *iommu_ops ; struct subsys_private *p ; }; struct device_type; struct of_device_id; struct device_driver { char const *name ; struct bus_type *bus ; struct module *owner ; char const *mod_name ; bool suppress_bind_attrs ; struct of_device_id const *of_match_table ; int (*probe)(struct device * ) ; int (*remove)(struct device * ) ; void (*shutdown)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct attribute_group const **groups ; struct dev_pm_ops const *pm ; struct driver_private *p ; }; struct driver_attribute { struct attribute attr ; ssize_t (*show)(struct device_driver * , char * ) ; ssize_t (*store)(struct device_driver * , char const * , size_t ) ; }; struct class_attribute; struct class { char const *name ; struct module *owner ; struct class_attribute *class_attrs ; struct device_attribute *dev_attrs ; struct bin_attribute *dev_bin_attrs ; struct kobject *dev_kobj ; int (*dev_uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*class_release)(struct class * ) ; void (*dev_release)(struct device * ) ; int (*suspend)(struct device * , pm_message_t ) ; int (*resume)(struct device * ) ; struct kobj_ns_type_operations const *ns_type ; void const *(*namespace)(struct device * ) ; struct dev_pm_ops const *pm ; struct subsys_private *p ; }; struct class_attribute { struct attribute attr ; ssize_t (*show)(struct class * , struct class_attribute * , char * ) ; ssize_t (*store)(struct class * , struct class_attribute * , char const * , size_t ) ; void const *(*namespace)(struct class * , struct class_attribute const * ) ; }; struct device_type { char const *name ; struct attribute_group const **groups ; int (*uevent)(struct device * , struct kobj_uevent_env * ) ; char *(*devnode)(struct device * , umode_t * ) ; void (*release)(struct device * ) ; struct dev_pm_ops const *pm ; }; struct device_attribute { struct attribute attr ; ssize_t (*show)(struct device * , struct device_attribute * , char * ) ; ssize_t (*store)(struct device * , struct device_attribute * , char const * , size_t ) ; }; struct device_dma_parameters { unsigned int max_segment_size ; unsigned long segment_boundary_mask ; }; struct dma_coherent_mem; struct device { struct device *parent ; struct device_private *p ; struct kobject kobj ; char const *init_name ; struct device_type const *type ; struct mutex mutex ; struct bus_type *bus ; struct device_driver *driver ; void *platform_data ; struct dev_pm_info power ; struct dev_pm_domain *pm_domain ; int numa_node ; u64 *dma_mask ; u64 coherent_dma_mask ; struct device_dma_parameters *dma_parms ; struct list_head dma_pools ; struct dma_coherent_mem *dma_mem ; struct dev_archdata archdata ; struct device_node *of_node ; dev_t devt ; u32 id ; spinlock_t devres_lock ; struct list_head devres_head ; struct klist_node knode_class ; struct class *class ; struct attribute_group const **groups ; void (*release)(struct device * ) ; struct iommu_group *iommu_group ; }; struct wakeup_source { char const *name ; struct list_head entry ; spinlock_t lock ; struct timer_list timer ; unsigned long timer_expires ; ktime_t total_time ; ktime_t max_time ; ktime_t last_time ; ktime_t start_prevent_time ; ktime_t prevent_sleep_time ; unsigned long event_count ; unsigned long active_count ; unsigned long relax_count ; unsigned long expire_count ; unsigned long wakeup_count ; bool active ; bool autosleep_enabled ; }; struct pm_qos_request { struct plist_node node ; int pm_qos_class ; struct delayed_work work ; }; struct dev_pm_qos_request { struct plist_node node ; struct device *dev ; }; enum pm_qos_type { PM_QOS_UNITIALIZED = 0, PM_QOS_MAX = 1, PM_QOS_MIN = 2 } ; struct pm_qos_constraints { struct plist_head list ; s32 target_value ; s32 default_value ; enum pm_qos_type type ; struct blocking_notifier_head *notifiers ; }; struct iovec { void *iov_base ; __kernel_size_t iov_len ; }; struct rb_node { unsigned long __rb_parent_color ; struct rb_node *rb_right ; struct rb_node *rb_left ; }; struct rb_root { struct rb_node *rb_node ; }; struct inode; struct arch_uprobe_task { unsigned long saved_scratch_register ; unsigned int saved_trap_nr ; unsigned int saved_tf ; }; enum uprobe_task_state { UTASK_RUNNING = 0, UTASK_SSTEP = 1, UTASK_SSTEP_ACK = 2, UTASK_SSTEP_TRAPPED = 3 } ; struct uprobe; struct uprobe_task { enum uprobe_task_state state ; struct arch_uprobe_task autask ; struct uprobe *active_uprobe ; unsigned long xol_vaddr ; unsigned long vaddr ; }; struct xol_area { wait_queue_head_t wq ; atomic_t slot_count ; unsigned long *bitmap ; struct page *page ; unsigned long vaddr ; }; struct uprobes_state { struct xol_area *xol_area ; }; struct address_space; union __anonunion_ldv_14711_130 { unsigned long index ; void *freelist ; bool pfmemalloc ; }; struct __anonstruct_ldv_14721_134 { unsigned short inuse ; unsigned short objects : 15 ; unsigned char frozen : 1 ; }; union __anonunion_ldv_14723_133 { atomic_t _mapcount ; struct __anonstruct_ldv_14721_134 ldv_14721 ; int units ; }; struct __anonstruct_ldv_14725_132 { union __anonunion_ldv_14723_133 ldv_14723 ; atomic_t _count ; }; union __anonunion_ldv_14726_131 { unsigned long counters ; struct __anonstruct_ldv_14725_132 ldv_14725 ; }; struct __anonstruct_ldv_14727_129 { union __anonunion_ldv_14711_130 ldv_14711 ; union __anonunion_ldv_14726_131 ldv_14726 ; }; struct __anonstruct_ldv_14734_136 { struct page *next ; int pages ; int pobjects ; }; struct slab; struct __anonstruct_ldv_14740_137 { struct kmem_cache *slab_cache ; struct slab *slab_page ; }; union __anonunion_ldv_14741_135 { struct list_head lru ; struct __anonstruct_ldv_14734_136 ldv_14734 ; struct list_head list ; struct __anonstruct_ldv_14740_137 ldv_14740 ; }; union __anonunion_ldv_14746_138 { unsigned long private ; struct kmem_cache *slab ; struct page *first_page ; }; struct page { unsigned long flags ; struct address_space *mapping ; struct __anonstruct_ldv_14727_129 ldv_14727 ; union __anonunion_ldv_14741_135 ldv_14741 ; union __anonunion_ldv_14746_138 ldv_14746 ; unsigned long debug_flags ; }; struct page_frag { struct page *page ; __u32 offset ; __u32 size ; }; struct __anonstruct_linear_140 { struct rb_node rb ; unsigned long rb_subtree_last ; }; union __anonunion_shared_139 { struct __anonstruct_linear_140 linear ; struct list_head nonlinear ; }; struct anon_vma; struct vm_operations_struct; struct mempolicy; struct vm_area_struct { struct mm_struct *vm_mm ; unsigned long vm_start ; unsigned long vm_end ; struct vm_area_struct *vm_next ; struct vm_area_struct *vm_prev ; pgprot_t vm_page_prot ; unsigned long vm_flags ; struct rb_node vm_rb ; union __anonunion_shared_139 shared ; struct list_head anon_vma_chain ; struct anon_vma *anon_vma ; struct vm_operations_struct const *vm_ops ; unsigned long vm_pgoff ; struct file *vm_file ; void *vm_private_data ; struct mempolicy *vm_policy ; }; struct core_thread { struct task_struct *task ; struct core_thread *next ; }; struct core_state { atomic_t nr_threads ; struct core_thread dumper ; struct completion startup ; }; struct mm_rss_stat { atomic_long_t count[3U] ; }; struct linux_binfmt; struct mmu_notifier_mm; struct mm_struct { struct vm_area_struct *mmap ; struct rb_root mm_rb ; struct vm_area_struct *mmap_cache ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; void (*unmap_area)(struct mm_struct * , unsigned long ) ; unsigned long mmap_base ; unsigned long task_size ; unsigned long cached_hole_size ; unsigned long free_area_cache ; pgd_t *pgd ; atomic_t mm_users ; atomic_t mm_count ; int map_count ; spinlock_t page_table_lock ; struct rw_semaphore mmap_sem ; struct list_head mmlist ; unsigned long hiwater_rss ; unsigned long hiwater_vm ; unsigned long total_vm ; unsigned long locked_vm ; unsigned long pinned_vm ; unsigned long shared_vm ; unsigned long exec_vm ; unsigned long stack_vm ; unsigned long def_flags ; unsigned long nr_ptes ; unsigned long start_code ; unsigned long end_code ; unsigned long start_data ; unsigned long end_data ; unsigned long start_brk ; unsigned long brk ; unsigned long start_stack ; unsigned long arg_start ; unsigned long arg_end ; unsigned long env_start ; unsigned long env_end ; unsigned long saved_auxv[44U] ; struct mm_rss_stat rss_stat ; struct linux_binfmt *binfmt ; cpumask_var_t cpu_vm_mask_var ; mm_context_t context ; unsigned long flags ; struct core_state *core_state ; spinlock_t ioctx_lock ; struct hlist_head ioctx_list ; struct task_struct *owner ; struct file *exe_file ; struct mmu_notifier_mm *mmu_notifier_mm ; pgtable_t pmd_huge_pte ; struct cpumask cpumask_allocation ; struct uprobes_state uprobes_state ; }; struct shrink_control { gfp_t gfp_mask ; unsigned long nr_to_scan ; }; struct shrinker { int (*shrink)(struct shrinker * , struct shrink_control * ) ; int seeks ; long batch ; struct list_head list ; atomic_long_t nr_in_batch ; }; struct file_ra_state; struct user_struct; struct writeback_control; struct vm_fault { unsigned int flags ; unsigned long pgoff ; void *virtual_address ; struct page *page ; }; struct vm_operations_struct { void (*open)(struct vm_area_struct * ) ; void (*close)(struct vm_area_struct * ) ; int (*fault)(struct vm_area_struct * , struct vm_fault * ) ; int (*page_mkwrite)(struct vm_area_struct * , struct vm_fault * ) ; int (*access)(struct vm_area_struct * , unsigned long , void * , int , int ) ; int (*set_policy)(struct vm_area_struct * , struct mempolicy * ) ; struct mempolicy *(*get_policy)(struct vm_area_struct * , unsigned long ) ; int (*migrate)(struct vm_area_struct * , nodemask_t const * , nodemask_t const * , unsigned long ) ; int (*remap_pages)(struct vm_area_struct * , unsigned long , unsigned long , unsigned long ) ; }; struct scatterlist { unsigned long sg_magic ; unsigned long page_link ; unsigned int offset ; unsigned int length ; dma_addr_t dma_address ; unsigned int dma_length ; }; struct sg_table { struct scatterlist *sgl ; unsigned int nents ; unsigned int orig_nents ; }; typedef s32 dma_cookie_t; struct dql { unsigned int num_queued ; unsigned int adj_limit ; unsigned int last_obj_cnt ; unsigned int limit ; unsigned int num_completed ; unsigned int prev_ovlimit ; unsigned int prev_num_queued ; unsigned int prev_last_obj_cnt ; unsigned int lowest_slack ; unsigned long slack_start_time ; unsigned int max_limit ; unsigned int min_limit ; unsigned int slack_hold_time ; }; struct user_namespace; typedef uid_t kuid_t; typedef gid_t kgid_t; struct kstat { u64 ino ; dev_t dev ; umode_t mode ; unsigned int nlink ; kuid_t uid ; kgid_t gid ; dev_t rdev ; loff_t size ; struct timespec atime ; struct timespec mtime ; struct timespec ctime ; unsigned long blksize ; unsigned long long blocks ; }; struct sem_undo_list; struct sysv_sem { struct sem_undo_list *undo_list ; }; typedef unsigned short __kernel_sa_family_t; struct __kernel_sockaddr_storage { __kernel_sa_family_t ss_family ; char __data[126U] ; }; struct cred; typedef __kernel_sa_family_t sa_family_t; struct sockaddr { sa_family_t sa_family ; char sa_data[14U] ; }; struct msghdr { void *msg_name ; int msg_namelen ; struct iovec *msg_iov ; __kernel_size_t msg_iovlen ; void *msg_control ; __kernel_size_t msg_controllen ; unsigned int msg_flags ; }; struct __anonstruct_sync_serial_settings_142 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; }; typedef struct __anonstruct_sync_serial_settings_142 sync_serial_settings; struct __anonstruct_te1_settings_143 { unsigned int clock_rate ; unsigned int clock_type ; unsigned short loopback ; unsigned int slot_map ; }; typedef struct __anonstruct_te1_settings_143 te1_settings; struct __anonstruct_raw_hdlc_proto_144 { unsigned short encoding ; unsigned short parity ; }; typedef struct __anonstruct_raw_hdlc_proto_144 raw_hdlc_proto; struct __anonstruct_fr_proto_145 { unsigned int t391 ; unsigned int t392 ; unsigned int n391 ; unsigned int n392 ; unsigned int n393 ; unsigned short lmi ; unsigned short dce ; }; typedef struct __anonstruct_fr_proto_145 fr_proto; struct __anonstruct_fr_proto_pvc_146 { unsigned int dlci ; }; typedef struct __anonstruct_fr_proto_pvc_146 fr_proto_pvc; struct __anonstruct_fr_proto_pvc_info_147 { unsigned int dlci ; char master[16U] ; }; typedef struct __anonstruct_fr_proto_pvc_info_147 fr_proto_pvc_info; struct __anonstruct_cisco_proto_148 { unsigned int interval ; unsigned int timeout ; }; typedef struct __anonstruct_cisco_proto_148 cisco_proto; struct ifmap { unsigned long mem_start ; unsigned long mem_end ; unsigned short base_addr ; unsigned char irq ; unsigned char dma ; unsigned char port ; }; union __anonunion_ifs_ifsu_149 { raw_hdlc_proto *raw_hdlc ; cisco_proto *cisco ; fr_proto *fr ; fr_proto_pvc *fr_pvc ; fr_proto_pvc_info *fr_pvc_info ; sync_serial_settings *sync ; te1_settings *te1 ; }; struct if_settings { unsigned int type ; unsigned int size ; union __anonunion_ifs_ifsu_149 ifs_ifsu ; }; union __anonunion_ifr_ifrn_150 { char ifrn_name[16U] ; }; union __anonunion_ifr_ifru_151 { struct sockaddr ifru_addr ; struct sockaddr ifru_dstaddr ; struct sockaddr ifru_broadaddr ; struct sockaddr ifru_netmask ; struct sockaddr ifru_hwaddr ; short ifru_flags ; int ifru_ivalue ; int ifru_mtu ; struct ifmap ifru_map ; char ifru_slave[16U] ; char ifru_newname[16U] ; void *ifru_data ; struct if_settings ifru_settings ; }; struct ifreq { union __anonunion_ifr_ifrn_150 ifr_ifrn ; union __anonunion_ifr_ifru_151 ifr_ifru ; }; struct hlist_bl_node; struct hlist_bl_head { struct hlist_bl_node *first ; }; struct hlist_bl_node { struct hlist_bl_node *next ; struct hlist_bl_node **pprev ; }; struct nameidata; struct path; struct vfsmount; struct __anonstruct_ldv_18552_154 { u32 hash ; u32 len ; }; union __anonunion_ldv_18554_153 { struct __anonstruct_ldv_18552_154 ldv_18552 ; u64 hash_len ; }; struct qstr { union __anonunion_ldv_18554_153 ldv_18554 ; unsigned char const *name ; }; struct dentry_operations; struct super_block; union __anonunion_d_u_155 { struct list_head d_child ; struct callback_head d_rcu ; }; struct dentry { unsigned int d_flags ; seqcount_t d_seq ; struct hlist_bl_node d_hash ; struct dentry *d_parent ; struct qstr d_name ; struct inode *d_inode ; unsigned char d_iname[32U] ; unsigned int d_count ; spinlock_t d_lock ; struct dentry_operations const *d_op ; struct super_block *d_sb ; unsigned long d_time ; void *d_fsdata ; struct list_head d_lru ; union __anonunion_d_u_155 d_u ; struct list_head d_subdirs ; struct hlist_node d_alias ; }; struct dentry_operations { int (*d_revalidate)(struct dentry * , unsigned int ) ; int (*d_hash)(struct dentry const * , struct inode const * , struct qstr * ) ; int (*d_compare)(struct dentry const * , struct inode const * , struct dentry const * , struct inode const * , unsigned int , char const * , struct qstr const * ) ; int (*d_delete)(struct dentry const * ) ; void (*d_release)(struct dentry * ) ; void (*d_prune)(struct dentry * ) ; void (*d_iput)(struct dentry * , struct inode * ) ; char *(*d_dname)(struct dentry * , char * , int ) ; struct vfsmount *(*d_automount)(struct path * ) ; int (*d_manage)(struct dentry * , bool ) ; }; struct path { struct vfsmount *mnt ; struct dentry *dentry ; }; struct radix_tree_node; struct radix_tree_root { unsigned int height ; gfp_t gfp_mask ; struct radix_tree_node *rnode ; }; enum pid_type { PIDTYPE_PID = 0, PIDTYPE_PGID = 1, PIDTYPE_SID = 2, PIDTYPE_MAX = 3 } ; struct pid_namespace; struct upid { int nr ; struct pid_namespace *ns ; struct hlist_node pid_chain ; }; struct pid { atomic_t count ; unsigned int level ; struct hlist_head tasks[3U] ; struct callback_head rcu ; struct upid numbers[1U] ; }; struct pid_link { struct hlist_node node ; struct pid *pid ; }; struct kernel_cap_struct { __u32 cap[2U] ; }; typedef struct kernel_cap_struct kernel_cap_t; struct fiemap_extent { __u64 fe_logical ; __u64 fe_physical ; __u64 fe_length ; __u64 fe_reserved64[2U] ; __u32 fe_flags ; __u32 fe_reserved[3U] ; }; enum migrate_mode { MIGRATE_ASYNC = 0, MIGRATE_SYNC_LIGHT = 1, MIGRATE_SYNC = 2 } ; struct percpu_rw_semaphore { unsigned int *counters ; bool locked ; struct mutex mtx ; }; struct bio_set; struct bio; struct bio_integrity_payload; struct block_device; struct io_context; struct cgroup_subsys_state; typedef void bio_end_io_t(struct bio * , int ); struct bio_vec { struct page *bv_page ; unsigned int bv_len ; unsigned int bv_offset ; }; struct bio { sector_t bi_sector ; struct bio *bi_next ; struct block_device *bi_bdev ; unsigned long bi_flags ; unsigned long bi_rw ; unsigned short bi_vcnt ; unsigned short bi_idx ; unsigned int bi_phys_segments ; unsigned int bi_size ; unsigned int bi_seg_front_size ; unsigned int bi_seg_back_size ; bio_end_io_t *bi_end_io ; void *bi_private ; struct io_context *bi_ioc ; struct cgroup_subsys_state *bi_css ; struct bio_integrity_payload *bi_integrity ; unsigned int bi_max_vecs ; atomic_t bi_cnt ; struct bio_vec *bi_io_vec ; struct bio_set *bi_pool ; struct bio_vec bi_inline_vecs[0U] ; }; struct export_operations; struct hd_geometry; struct kiocb; struct pipe_inode_info; struct poll_table_struct; struct kstatfs; struct swap_info_struct; struct iattr { unsigned int ia_valid ; umode_t ia_mode ; kuid_t ia_uid ; kgid_t ia_gid ; loff_t ia_size ; struct timespec ia_atime ; struct timespec ia_mtime ; struct timespec ia_ctime ; struct file *ia_file ; }; struct percpu_counter { raw_spinlock_t lock ; s64 count ; struct list_head list ; s32 *counters ; }; struct fs_disk_quota { __s8 d_version ; __s8 d_flags ; __u16 d_fieldmask ; __u32 d_id ; __u64 d_blk_hardlimit ; __u64 d_blk_softlimit ; __u64 d_ino_hardlimit ; __u64 d_ino_softlimit ; __u64 d_bcount ; __u64 d_icount ; __s32 d_itimer ; __s32 d_btimer ; __u16 d_iwarns ; __u16 d_bwarns ; __s32 d_padding2 ; __u64 d_rtb_hardlimit ; __u64 d_rtb_softlimit ; __u64 d_rtbcount ; __s32 d_rtbtimer ; __u16 d_rtbwarns ; __s16 d_padding3 ; char d_padding4[8U] ; }; struct fs_qfilestat { __u64 qfs_ino ; __u64 qfs_nblks ; __u32 qfs_nextents ; }; typedef struct fs_qfilestat fs_qfilestat_t; struct fs_quota_stat { __s8 qs_version ; __u16 qs_flags ; __s8 qs_pad ; fs_qfilestat_t qs_uquota ; fs_qfilestat_t qs_gquota ; __u32 qs_incoredqs ; __s32 qs_btimelimit ; __s32 qs_itimelimit ; __s32 qs_rtbtimelimit ; __u16 qs_bwarnlimit ; __u16 qs_iwarnlimit ; }; struct dquot; typedef __kernel_uid32_t projid_t; typedef projid_t kprojid_t; struct if_dqinfo { __u64 dqi_bgrace ; __u64 dqi_igrace ; __u32 dqi_flags ; __u32 dqi_valid ; }; enum quota_type { USRQUOTA = 0, GRPQUOTA = 1, PRJQUOTA = 2 } ; typedef long long qsize_t; union __anonunion_ldv_19713_157 { kuid_t uid ; kgid_t gid ; kprojid_t projid ; }; struct kqid { union __anonunion_ldv_19713_157 ldv_19713 ; enum quota_type type ; }; struct mem_dqblk { qsize_t dqb_bhardlimit ; qsize_t dqb_bsoftlimit ; qsize_t dqb_curspace ; qsize_t dqb_rsvspace ; qsize_t dqb_ihardlimit ; qsize_t dqb_isoftlimit ; qsize_t dqb_curinodes ; time_t dqb_btime ; time_t dqb_itime ; }; struct quota_format_type; struct mem_dqinfo { struct quota_format_type *dqi_format ; int dqi_fmt_id ; struct list_head dqi_dirty_list ; unsigned long dqi_flags ; unsigned int dqi_bgrace ; unsigned int dqi_igrace ; qsize_t dqi_maxblimit ; qsize_t dqi_maxilimit ; void *dqi_priv ; }; struct dquot { struct hlist_node dq_hash ; struct list_head dq_inuse ; struct list_head dq_free ; struct list_head dq_dirty ; struct mutex dq_lock ; atomic_t dq_count ; wait_queue_head_t dq_wait_unused ; struct super_block *dq_sb ; struct kqid dq_id ; loff_t dq_off ; unsigned long dq_flags ; struct mem_dqblk dq_dqb ; }; struct quota_format_ops { int (*check_quota_file)(struct super_block * , int ) ; int (*read_file_info)(struct super_block * , int ) ; int (*write_file_info)(struct super_block * , int ) ; int (*free_file_info)(struct super_block * , int ) ; int (*read_dqblk)(struct dquot * ) ; int (*commit_dqblk)(struct dquot * ) ; int (*release_dqblk)(struct dquot * ) ; }; struct dquot_operations { int (*write_dquot)(struct dquot * ) ; struct dquot *(*alloc_dquot)(struct super_block * , int ) ; void (*destroy_dquot)(struct dquot * ) ; int (*acquire_dquot)(struct dquot * ) ; int (*release_dquot)(struct dquot * ) ; int (*mark_dirty)(struct dquot * ) ; int (*write_info)(struct super_block * , int ) ; qsize_t *(*get_reserved_space)(struct inode * ) ; }; struct quotactl_ops { int (*quota_on)(struct super_block * , int , int , struct path * ) ; int (*quota_on_meta)(struct super_block * , int , int ) ; int (*quota_off)(struct super_block * , int ) ; int (*quota_sync)(struct super_block * , int ) ; int (*get_info)(struct super_block * , int , struct if_dqinfo * ) ; int (*set_info)(struct super_block * , int , struct if_dqinfo * ) ; int (*get_dqblk)(struct super_block * , struct kqid , struct fs_disk_quota * ) ; int (*set_dqblk)(struct super_block * , struct kqid , struct fs_disk_quota * ) ; int (*get_xstate)(struct super_block * , struct fs_quota_stat * ) ; int (*set_xstate)(struct super_block * , unsigned int , int ) ; }; struct quota_format_type { int qf_fmt_id ; struct quota_format_ops const *qf_ops ; struct module *qf_owner ; struct quota_format_type *qf_next ; }; struct quota_info { unsigned int flags ; struct mutex dqio_mutex ; struct mutex dqonoff_mutex ; struct rw_semaphore dqptr_sem ; struct inode *files[2U] ; struct mem_dqinfo info[2U] ; struct quota_format_ops const *ops[2U] ; }; union __anonunion_arg_159 { char *buf ; void *data ; }; struct __anonstruct_read_descriptor_t_158 { size_t written ; size_t count ; union __anonunion_arg_159 arg ; int error ; }; typedef struct __anonstruct_read_descriptor_t_158 read_descriptor_t; struct address_space_operations { int (*writepage)(struct page * , struct writeback_control * ) ; int (*readpage)(struct file * , struct page * ) ; int (*writepages)(struct address_space * , struct writeback_control * ) ; int (*set_page_dirty)(struct page * ) ; int (*readpages)(struct file * , struct address_space * , struct list_head * , unsigned int ) ; int (*write_begin)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page ** , void ** ) ; int (*write_end)(struct file * , struct address_space * , loff_t , unsigned int , unsigned int , struct page * , void * ) ; sector_t (*bmap)(struct address_space * , sector_t ) ; void (*invalidatepage)(struct page * , unsigned long ) ; int (*releasepage)(struct page * , gfp_t ) ; void (*freepage)(struct page * ) ; ssize_t (*direct_IO)(int , struct kiocb * , struct iovec const * , loff_t , unsigned long ) ; int (*get_xip_mem)(struct address_space * , unsigned long , int , void ** , unsigned long * ) ; int (*migratepage)(struct address_space * , struct page * , struct page * , enum migrate_mode ) ; int (*launder_page)(struct page * ) ; int (*is_partially_uptodate)(struct page * , read_descriptor_t * , unsigned long ) ; int (*error_remove_page)(struct address_space * , struct page * ) ; int (*swap_activate)(struct swap_info_struct * , struct file * , sector_t * ) ; void (*swap_deactivate)(struct file * ) ; }; struct backing_dev_info; struct address_space { struct inode *host ; struct radix_tree_root page_tree ; spinlock_t tree_lock ; unsigned int i_mmap_writable ; struct rb_root i_mmap ; struct list_head i_mmap_nonlinear ; struct mutex i_mmap_mutex ; unsigned long nrpages ; unsigned long writeback_index ; struct address_space_operations const *a_ops ; unsigned long flags ; struct backing_dev_info *backing_dev_info ; spinlock_t private_lock ; struct list_head private_list ; struct address_space *assoc_mapping ; }; struct request_queue; struct hd_struct; struct gendisk; struct block_device { dev_t bd_dev ; int bd_openers ; struct inode *bd_inode ; struct super_block *bd_super ; struct mutex bd_mutex ; struct list_head bd_inodes ; void *bd_claiming ; void *bd_holder ; int bd_holders ; bool bd_write_holder ; struct list_head bd_holder_disks ; struct block_device *bd_contains ; unsigned int bd_block_size ; struct hd_struct *bd_part ; unsigned int bd_part_count ; int bd_invalidated ; struct gendisk *bd_disk ; struct request_queue *bd_queue ; struct list_head bd_list ; unsigned long bd_private ; int bd_fsfreeze_count ; struct mutex bd_fsfreeze_mutex ; struct percpu_rw_semaphore bd_block_size_semaphore ; }; struct posix_acl; struct inode_operations; union __anonunion_ldv_20148_160 { unsigned int const i_nlink ; unsigned int __i_nlink ; }; union __anonunion_ldv_20168_161 { struct hlist_head i_dentry ; struct callback_head i_rcu ; }; struct file_lock; struct cdev; union __anonunion_ldv_20184_162 { struct pipe_inode_info *i_pipe ; struct block_device *i_bdev ; struct cdev *i_cdev ; }; struct inode { umode_t i_mode ; unsigned short i_opflags ; kuid_t i_uid ; kgid_t i_gid ; unsigned int i_flags ; struct posix_acl *i_acl ; struct posix_acl *i_default_acl ; struct inode_operations const *i_op ; struct super_block *i_sb ; struct address_space *i_mapping ; void *i_security ; unsigned long i_ino ; union __anonunion_ldv_20148_160 ldv_20148 ; dev_t i_rdev ; loff_t i_size ; struct timespec i_atime ; struct timespec i_mtime ; struct timespec i_ctime ; spinlock_t i_lock ; unsigned short i_bytes ; unsigned int i_blkbits ; blkcnt_t i_blocks ; unsigned long i_state ; struct mutex i_mutex ; unsigned long dirtied_when ; struct hlist_node i_hash ; struct list_head i_wb_list ; struct list_head i_lru ; struct list_head i_sb_list ; union __anonunion_ldv_20168_161 ldv_20168 ; u64 i_version ; atomic_t i_count ; atomic_t i_dio_count ; atomic_t i_writecount ; struct file_operations const *i_fop ; struct file_lock *i_flock ; struct address_space i_data ; struct dquot *i_dquot[2U] ; struct list_head i_devices ; union __anonunion_ldv_20184_162 ldv_20184 ; __u32 i_generation ; __u32 i_fsnotify_mask ; struct hlist_head i_fsnotify_marks ; atomic_t i_readcount ; void *i_private ; }; struct fown_struct { rwlock_t lock ; struct pid *pid ; enum pid_type pid_type ; kuid_t uid ; kuid_t euid ; int signum ; }; struct file_ra_state { unsigned long start ; unsigned int size ; unsigned int async_size ; unsigned int ra_pages ; unsigned int mmap_miss ; loff_t prev_pos ; }; union __anonunion_f_u_163 { struct list_head fu_list ; struct callback_head fu_rcuhead ; }; struct file { union __anonunion_f_u_163 f_u ; struct path f_path ; struct file_operations const *f_op ; spinlock_t f_lock ; int f_sb_list_cpu ; atomic_long_t f_count ; unsigned int f_flags ; fmode_t f_mode ; loff_t f_pos ; struct fown_struct f_owner ; struct cred const *f_cred ; struct file_ra_state f_ra ; u64 f_version ; void *f_security ; void *private_data ; struct list_head f_ep_links ; struct list_head f_tfile_llink ; struct address_space *f_mapping ; unsigned long f_mnt_write_state ; }; struct files_struct; typedef struct files_struct *fl_owner_t; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock * , struct file_lock * ) ; void (*fl_release_private)(struct file_lock * ) ; }; struct lock_manager_operations { int (*lm_compare_owner)(struct file_lock * , struct file_lock * ) ; void (*lm_notify)(struct file_lock * ) ; int (*lm_grant)(struct file_lock * , struct file_lock * , int ) ; void (*lm_break)(struct file_lock * ) ; int (*lm_change)(struct file_lock ** , int ) ; }; struct net; struct nlm_lockowner; struct nfs_lock_info { u32 state ; struct nlm_lockowner *owner ; struct list_head list ; }; struct nfs4_lock_state; struct nfs4_lock_info { struct nfs4_lock_state *owner ; }; struct fasync_struct; struct __anonstruct_afs_165 { struct list_head link ; int state ; }; union __anonunion_fl_u_164 { struct nfs_lock_info nfs_fl ; struct nfs4_lock_info nfs4_fl ; struct __anonstruct_afs_165 afs ; }; struct file_lock { struct file_lock *fl_next ; struct list_head fl_link ; struct list_head fl_block ; fl_owner_t fl_owner ; unsigned int fl_flags ; unsigned char fl_type ; unsigned int fl_pid ; struct pid *fl_nspid ; wait_queue_head_t fl_wait ; struct file *fl_file ; loff_t fl_start ; loff_t fl_end ; struct fasync_struct *fl_fasync ; unsigned long fl_break_time ; unsigned long fl_downgrade_time ; struct file_lock_operations const *fl_ops ; struct lock_manager_operations const *fl_lmops ; union __anonunion_fl_u_164 fl_u ; }; struct fasync_struct { spinlock_t fa_lock ; int magic ; int fa_fd ; struct fasync_struct *fa_next ; struct file *fa_file ; struct callback_head fa_rcu ; }; struct sb_writers { struct percpu_counter counter[3U] ; wait_queue_head_t wait ; int frozen ; wait_queue_head_t wait_unfrozen ; struct lockdep_map lock_map[3U] ; }; struct file_system_type; struct super_operations; struct xattr_handler; struct mtd_info; struct super_block { struct list_head s_list ; dev_t s_dev ; unsigned char s_blocksize_bits ; unsigned long s_blocksize ; loff_t s_maxbytes ; struct file_system_type *s_type ; struct super_operations const *s_op ; struct dquot_operations const *dq_op ; struct quotactl_ops const *s_qcop ; struct export_operations const *s_export_op ; unsigned long s_flags ; unsigned long s_magic ; struct dentry *s_root ; struct rw_semaphore s_umount ; int s_count ; atomic_t s_active ; void *s_security ; struct xattr_handler const **s_xattr ; struct list_head s_inodes ; struct hlist_bl_head s_anon ; struct list_head *s_files ; struct list_head s_mounts ; struct list_head s_dentry_lru ; int s_nr_dentry_unused ; spinlock_t s_inode_lru_lock ; struct list_head s_inode_lru ; int s_nr_inodes_unused ; struct block_device *s_bdev ; struct backing_dev_info *s_bdi ; struct mtd_info *s_mtd ; struct hlist_node s_instances ; struct quota_info s_dquot ; struct sb_writers s_writers ; char s_id[32U] ; u8 s_uuid[16U] ; void *s_fs_info ; unsigned int s_max_links ; fmode_t s_mode ; u32 s_time_gran ; struct mutex s_vfs_rename_mutex ; char *s_subtype ; char *s_options ; struct dentry_operations const *s_d_op ; int cleancache_poolid ; struct shrinker s_shrink ; atomic_long_t s_remove_count ; int s_readonly_remount ; }; struct fiemap_extent_info { unsigned int fi_flags ; unsigned int fi_extents_mapped ; unsigned int fi_extents_max ; struct fiemap_extent *fi_extents_start ; }; struct block_device_operations; struct file_operations { struct module *owner ; loff_t (*llseek)(struct file * , loff_t , int ) ; ssize_t (*read)(struct file * , char * , size_t , loff_t * ) ; ssize_t (*write)(struct file * , char const * , size_t , loff_t * ) ; ssize_t (*aio_read)(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; ssize_t (*aio_write)(struct kiocb * , struct iovec const * , unsigned long , loff_t ) ; int (*readdir)(struct file * , void * , int (*)(void * , char const * , int , loff_t , u64 , unsigned int ) ) ; unsigned int (*poll)(struct file * , struct poll_table_struct * ) ; long (*unlocked_ioctl)(struct file * , unsigned int , unsigned long ) ; long (*compat_ioctl)(struct file * , unsigned int , unsigned long ) ; int (*mmap)(struct file * , struct vm_area_struct * ) ; int (*open)(struct inode * , struct file * ) ; int (*flush)(struct file * , fl_owner_t ) ; int (*release)(struct inode * , struct file * ) ; int (*fsync)(struct file * , loff_t , loff_t , int ) ; int (*aio_fsync)(struct kiocb * , int ) ; int (*fasync)(int , struct file * , int ) ; int (*lock)(struct file * , int , struct file_lock * ) ; ssize_t (*sendpage)(struct file * , struct page * , int , size_t , loff_t * , int ) ; unsigned long (*get_unmapped_area)(struct file * , unsigned long , unsigned long , unsigned long , unsigned long ) ; int (*check_flags)(int ) ; int (*flock)(struct file * , int , struct file_lock * ) ; ssize_t (*splice_write)(struct pipe_inode_info * , struct file * , loff_t * , size_t , unsigned int ) ; ssize_t (*splice_read)(struct file * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; int (*setlease)(struct file * , long , struct file_lock ** ) ; long (*fallocate)(struct file * , int , loff_t , loff_t ) ; }; struct inode_operations { struct dentry *(*lookup)(struct inode * , struct dentry * , unsigned int ) ; void *(*follow_link)(struct dentry * , struct nameidata * ) ; int (*permission)(struct inode * , int ) ; struct posix_acl *(*get_acl)(struct inode * , int ) ; int (*readlink)(struct dentry * , char * , int ) ; void (*put_link)(struct dentry * , struct nameidata * , void * ) ; int (*create)(struct inode * , struct dentry * , umode_t , bool ) ; int (*link)(struct dentry * , struct inode * , struct dentry * ) ; int (*unlink)(struct inode * , struct dentry * ) ; int (*symlink)(struct inode * , struct dentry * , char const * ) ; int (*mkdir)(struct inode * , struct dentry * , umode_t ) ; int (*rmdir)(struct inode * , struct dentry * ) ; int (*mknod)(struct inode * , struct dentry * , umode_t , dev_t ) ; int (*rename)(struct inode * , struct dentry * , struct inode * , struct dentry * ) ; void (*truncate)(struct inode * ) ; int (*setattr)(struct dentry * , struct iattr * ) ; int (*getattr)(struct vfsmount * , struct dentry * , struct kstat * ) ; int (*setxattr)(struct dentry * , char const * , void const * , size_t , int ) ; ssize_t (*getxattr)(struct dentry * , char const * , void * , size_t ) ; ssize_t (*listxattr)(struct dentry * , char * , size_t ) ; int (*removexattr)(struct dentry * , char const * ) ; int (*fiemap)(struct inode * , struct fiemap_extent_info * , u64 , u64 ) ; int (*update_time)(struct inode * , struct timespec * , int ) ; int (*atomic_open)(struct inode * , struct dentry * , struct file * , unsigned int , umode_t , int * ) ; }; struct super_operations { struct inode *(*alloc_inode)(struct super_block * ) ; void (*destroy_inode)(struct inode * ) ; void (*dirty_inode)(struct inode * , int ) ; int (*write_inode)(struct inode * , struct writeback_control * ) ; int (*drop_inode)(struct inode * ) ; void (*evict_inode)(struct inode * ) ; void (*put_super)(struct super_block * ) ; int (*sync_fs)(struct super_block * , int ) ; int (*freeze_fs)(struct super_block * ) ; int (*unfreeze_fs)(struct super_block * ) ; int (*statfs)(struct dentry * , struct kstatfs * ) ; int (*remount_fs)(struct super_block * , int * , char * ) ; void (*umount_begin)(struct super_block * ) ; int (*show_options)(struct seq_file * , struct dentry * ) ; int (*show_devname)(struct seq_file * , struct dentry * ) ; int (*show_path)(struct seq_file * , struct dentry * ) ; int (*show_stats)(struct seq_file * , struct dentry * ) ; ssize_t (*quota_read)(struct super_block * , int , char * , size_t , loff_t ) ; ssize_t (*quota_write)(struct super_block * , int , char const * , size_t , loff_t ) ; int (*bdev_try_to_free_page)(struct super_block * , struct page * , gfp_t ) ; int (*nr_cached_objects)(struct super_block * ) ; void (*free_cached_objects)(struct super_block * , int ) ; }; struct file_system_type { char const *name ; int fs_flags ; struct dentry *(*mount)(struct file_system_type * , int , char const * , void * ) ; void (*kill_sb)(struct super_block * ) ; struct module *owner ; struct file_system_type *next ; struct hlist_head fs_supers ; struct lock_class_key s_lock_key ; struct lock_class_key s_umount_key ; struct lock_class_key s_vfs_rename_key ; struct lock_class_key s_writers_key[3U] ; struct lock_class_key i_lock_key ; struct lock_class_key i_mutex_key ; struct lock_class_key i_mutex_dir_key ; }; struct io_event { __u64 data ; __u64 obj ; __s64 res ; __s64 res2 ; }; typedef unsigned long cputime_t; struct siginfo; struct __anonstruct_sigset_t_166 { unsigned long sig[1U] ; }; typedef struct __anonstruct_sigset_t_166 sigset_t; typedef void __signalfn_t(int ); typedef __signalfn_t *__sighandler_t; typedef void __restorefn_t(void); typedef __restorefn_t *__sigrestore_t; struct sigaction { __sighandler_t sa_handler ; unsigned long sa_flags ; __sigrestore_t sa_restorer ; sigset_t sa_mask ; }; struct k_sigaction { struct sigaction sa ; }; union sigval { int sival_int ; void *sival_ptr ; }; typedef union sigval sigval_t; struct __anonstruct__kill_168 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; }; struct __anonstruct__timer_169 { __kernel_timer_t _tid ; int _overrun ; char _pad[0U] ; sigval_t _sigval ; int _sys_private ; }; struct __anonstruct__rt_170 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; sigval_t _sigval ; }; struct __anonstruct__sigchld_171 { __kernel_pid_t _pid ; __kernel_uid32_t _uid ; int _status ; __kernel_clock_t _utime ; __kernel_clock_t _stime ; }; struct __anonstruct__sigfault_172 { void *_addr ; short _addr_lsb ; }; struct __anonstruct__sigpoll_173 { long _band ; int _fd ; }; struct __anonstruct__sigsys_174 { void *_call_addr ; int _syscall ; unsigned int _arch ; }; union __anonunion__sifields_167 { int _pad[28U] ; struct __anonstruct__kill_168 _kill ; struct __anonstruct__timer_169 _timer ; struct __anonstruct__rt_170 _rt ; struct __anonstruct__sigchld_171 _sigchld ; struct __anonstruct__sigfault_172 _sigfault ; struct __anonstruct__sigpoll_173 _sigpoll ; struct __anonstruct__sigsys_174 _sigsys ; }; struct siginfo { int si_signo ; int si_errno ; int si_code ; union __anonunion__sifields_167 _sifields ; }; typedef struct siginfo siginfo_t; struct sigpending { struct list_head list ; sigset_t signal ; }; struct seccomp_filter; struct seccomp { int mode ; struct seccomp_filter *filter ; }; struct rt_mutex_waiter; struct rlimit { unsigned long rlim_cur ; unsigned long rlim_max ; }; struct timerqueue_node { struct rb_node node ; ktime_t expires ; }; struct timerqueue_head { struct rb_root head ; struct timerqueue_node *next ; }; struct hrtimer_clock_base; struct hrtimer_cpu_base; enum hrtimer_restart { HRTIMER_NORESTART = 0, HRTIMER_RESTART = 1 } ; struct hrtimer { struct timerqueue_node node ; ktime_t _softexpires ; enum hrtimer_restart (*function)(struct hrtimer * ) ; struct hrtimer_clock_base *base ; unsigned long state ; int start_pid ; void *start_site ; char start_comm[16U] ; }; struct hrtimer_clock_base { struct hrtimer_cpu_base *cpu_base ; int index ; clockid_t clockid ; struct timerqueue_head active ; ktime_t resolution ; ktime_t (*get_time)(void) ; ktime_t softirq_time ; ktime_t offset ; }; struct hrtimer_cpu_base { raw_spinlock_t lock ; unsigned int active_bases ; unsigned int clock_was_set ; ktime_t expires_next ; int hres_active ; int hang_detected ; unsigned long nr_events ; unsigned long nr_retries ; unsigned long nr_hangs ; ktime_t max_hang_time ; struct hrtimer_clock_base clock_base[3U] ; }; struct task_io_accounting { u64 rchar ; u64 wchar ; u64 syscr ; u64 syscw ; u64 read_bytes ; u64 write_bytes ; u64 cancelled_write_bytes ; }; struct latency_record { unsigned long backtrace[12U] ; unsigned int count ; unsigned long time ; unsigned long max ; }; struct nsproxy; struct ctl_table_root; struct ctl_table_header; struct ctl_dir; typedef int proc_handler(struct ctl_table * , int , void * , size_t * , loff_t * ); struct ctl_table_poll { atomic_t event ; wait_queue_head_t wait ; }; struct ctl_table { char const *procname ; void *data ; int maxlen ; umode_t mode ; struct ctl_table *child ; proc_handler *proc_handler ; struct ctl_table_poll *poll ; void *extra1 ; void *extra2 ; }; struct ctl_node { struct rb_node node ; struct ctl_table_header *header ; }; struct __anonstruct_ldv_23754_178 { struct ctl_table *ctl_table ; int used ; int count ; int nreg ; }; union __anonunion_ldv_23756_177 { struct __anonstruct_ldv_23754_178 ldv_23754 ; struct callback_head rcu ; }; struct ctl_table_set; struct ctl_table_header { union __anonunion_ldv_23756_177 ldv_23756 ; struct completion *unregistering ; struct ctl_table *ctl_table_arg ; struct ctl_table_root *root ; struct ctl_table_set *set ; struct ctl_dir *parent ; struct ctl_node *node ; }; struct ctl_dir { struct ctl_table_header header ; struct rb_root root ; }; struct ctl_table_set { int (*is_seen)(struct ctl_table_set * ) ; struct ctl_dir dir ; }; struct ctl_table_root { struct ctl_table_set default_set ; struct ctl_table_set *(*lookup)(struct ctl_table_root * , struct nsproxy * ) ; int (*permissions)(struct ctl_table_root * , struct nsproxy * , struct ctl_table * ) ; }; typedef int32_t key_serial_t; typedef uint32_t key_perm_t; struct key; struct signal_struct; struct key_type; struct keyring_list; union __anonunion_ldv_23835_179 { struct list_head graveyard_link ; struct rb_node serial_node ; }; struct key_user; union __anonunion_ldv_23844_180 { time_t expiry ; time_t revoked_at ; }; union __anonunion_type_data_181 { struct list_head link ; unsigned long x[2U] ; void *p[2U] ; int reject_error ; }; union __anonunion_payload_182 { unsigned long value ; void *rcudata ; void *data ; struct keyring_list *subscriptions ; }; struct key { atomic_t usage ; key_serial_t serial ; union __anonunion_ldv_23835_179 ldv_23835 ; struct key_type *type ; struct rw_semaphore sem ; struct key_user *user ; void *security ; union __anonunion_ldv_23844_180 ldv_23844 ; time_t last_used_at ; kuid_t uid ; kgid_t gid ; key_perm_t perm ; unsigned short quotalen ; unsigned short datalen ; unsigned long flags ; char *description ; union __anonunion_type_data_181 type_data ; union __anonunion_payload_182 payload ; }; struct audit_context; struct group_info { atomic_t usage ; int ngroups ; int nblocks ; kgid_t small_block[32U] ; kgid_t *blocks[0U] ; }; struct thread_group_cred { atomic_t usage ; pid_t tgid ; spinlock_t lock ; struct key *session_keyring ; struct key *process_keyring ; struct callback_head rcu ; }; struct cred { atomic_t usage ; atomic_t subscribers ; void *put_addr ; unsigned int magic ; kuid_t uid ; kgid_t gid ; kuid_t suid ; kgid_t sgid ; kuid_t euid ; kgid_t egid ; kuid_t fsuid ; kgid_t fsgid ; unsigned int securebits ; kernel_cap_t cap_inheritable ; kernel_cap_t cap_permitted ; kernel_cap_t cap_effective ; kernel_cap_t cap_bset ; unsigned char jit_keyring ; struct key *thread_keyring ; struct key *request_key_auth ; struct thread_group_cred *tgcred ; void *security ; struct user_struct *user ; struct user_namespace *user_ns ; struct group_info *group_info ; struct callback_head rcu ; }; struct llist_node; struct llist_node { struct llist_node *next ; }; struct futex_pi_state; struct robust_list_head; struct bio_list; struct fs_struct; struct perf_event_context; struct blk_plug; struct cfs_rq; struct task_group; struct kioctx; union __anonunion_ki_obj_183 { void *user ; struct task_struct *tsk ; }; struct eventfd_ctx; struct kiocb { struct list_head ki_run_list ; unsigned long ki_flags ; int ki_users ; unsigned int ki_key ; struct file *ki_filp ; struct kioctx *ki_ctx ; int (*ki_cancel)(struct kiocb * , struct io_event * ) ; ssize_t (*ki_retry)(struct kiocb * ) ; void (*ki_dtor)(struct kiocb * ) ; union __anonunion_ki_obj_183 ki_obj ; __u64 ki_user_data ; loff_t ki_pos ; void *private ; unsigned short ki_opcode ; size_t ki_nbytes ; char *ki_buf ; size_t ki_left ; struct iovec ki_inline_vec ; struct iovec *ki_iovec ; unsigned long ki_nr_segs ; unsigned long ki_cur_seg ; struct list_head ki_list ; struct list_head ki_batch ; struct eventfd_ctx *ki_eventfd ; }; struct aio_ring_info { unsigned long mmap_base ; unsigned long mmap_size ; struct page **ring_pages ; spinlock_t ring_lock ; long nr_pages ; unsigned int nr ; unsigned int tail ; struct page *internal_pages[8U] ; }; struct kioctx { atomic_t users ; int dead ; struct mm_struct *mm ; unsigned long user_id ; struct hlist_node list ; wait_queue_head_t wait ; spinlock_t ctx_lock ; int reqs_active ; struct list_head active_reqs ; struct list_head run_list ; unsigned int max_reqs ; struct aio_ring_info ring_info ; struct delayed_work wq ; struct callback_head callback_head ; }; struct sighand_struct { atomic_t count ; struct k_sigaction action[64U] ; spinlock_t siglock ; wait_queue_head_t signalfd_wqh ; }; struct pacct_struct { int ac_flag ; long ac_exitcode ; unsigned long ac_mem ; cputime_t ac_utime ; cputime_t ac_stime ; unsigned long ac_minflt ; unsigned long ac_majflt ; }; struct cpu_itimer { cputime_t expires ; cputime_t incr ; u32 error ; u32 incr_error ; }; struct task_cputime { cputime_t utime ; cputime_t stime ; unsigned long long sum_exec_runtime ; }; struct thread_group_cputimer { struct task_cputime cputime ; int running ; raw_spinlock_t lock ; }; struct autogroup; struct tty_struct; struct taskstats; struct tty_audit_buf; struct signal_struct { atomic_t sigcnt ; atomic_t live ; int nr_threads ; wait_queue_head_t wait_chldexit ; struct task_struct *curr_target ; struct sigpending shared_pending ; int group_exit_code ; int notify_count ; struct task_struct *group_exit_task ; int group_stop_count ; unsigned int flags ; unsigned char is_child_subreaper : 1 ; unsigned char has_child_subreaper : 1 ; struct list_head posix_timers ; struct hrtimer real_timer ; struct pid *leader_pid ; ktime_t it_real_incr ; struct cpu_itimer it[2U] ; struct thread_group_cputimer cputimer ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct pid *tty_old_pgrp ; int leader ; struct tty_struct *tty ; struct autogroup *autogroup ; cputime_t utime ; cputime_t stime ; cputime_t cutime ; cputime_t cstime ; cputime_t gtime ; cputime_t cgtime ; cputime_t prev_utime ; cputime_t prev_stime ; unsigned long nvcsw ; unsigned long nivcsw ; unsigned long cnvcsw ; unsigned long cnivcsw ; unsigned long min_flt ; unsigned long maj_flt ; unsigned long cmin_flt ; unsigned long cmaj_flt ; unsigned long inblock ; unsigned long oublock ; unsigned long cinblock ; unsigned long coublock ; unsigned long maxrss ; unsigned long cmaxrss ; struct task_io_accounting ioac ; unsigned long long sum_sched_runtime ; struct rlimit rlim[16U] ; struct pacct_struct pacct ; struct taskstats *stats ; unsigned int audit_tty ; struct tty_audit_buf *tty_audit_buf ; struct rw_semaphore group_rwsem ; int oom_score_adj ; int oom_score_adj_min ; struct mutex cred_guard_mutex ; }; struct user_struct { atomic_t __count ; atomic_t processes ; atomic_t files ; atomic_t sigpending ; atomic_t inotify_watches ; atomic_t inotify_devs ; atomic_t fanotify_listeners ; atomic_long_t epoll_watches ; unsigned long mq_bytes ; unsigned long locked_shm ; struct key *uid_keyring ; struct key *session_keyring ; struct hlist_node uidhash_node ; kuid_t uid ; atomic_long_t locked_vm ; }; struct reclaim_state; struct sched_info { unsigned long pcount ; unsigned long long run_delay ; unsigned long long last_arrival ; unsigned long long last_queued ; }; struct task_delay_info { spinlock_t lock ; unsigned int flags ; struct timespec blkio_start ; struct timespec blkio_end ; u64 blkio_delay ; u64 swapin_delay ; u32 blkio_count ; u32 swapin_count ; struct timespec freepages_start ; struct timespec freepages_end ; u64 freepages_delay ; u32 freepages_count ; }; struct uts_namespace; struct rq; struct sched_class { struct sched_class const *next ; void (*enqueue_task)(struct rq * , struct task_struct * , int ) ; void (*dequeue_task)(struct rq * , struct task_struct * , int ) ; void (*yield_task)(struct rq * ) ; bool (*yield_to_task)(struct rq * , struct task_struct * , bool ) ; void (*check_preempt_curr)(struct rq * , struct task_struct * , int ) ; struct task_struct *(*pick_next_task)(struct rq * ) ; void (*put_prev_task)(struct rq * , struct task_struct * ) ; int (*select_task_rq)(struct task_struct * , int , int ) ; void (*pre_schedule)(struct rq * , struct task_struct * ) ; void (*post_schedule)(struct rq * ) ; void (*task_waking)(struct task_struct * ) ; void (*task_woken)(struct rq * , struct task_struct * ) ; void (*set_cpus_allowed)(struct task_struct * , struct cpumask const * ) ; void (*rq_online)(struct rq * ) ; void (*rq_offline)(struct rq * ) ; void (*set_curr_task)(struct rq * ) ; void (*task_tick)(struct rq * , struct task_struct * , int ) ; void (*task_fork)(struct task_struct * ) ; void (*switched_from)(struct rq * , struct task_struct * ) ; void (*switched_to)(struct rq * , struct task_struct * ) ; void (*prio_changed)(struct rq * , struct task_struct * , int ) ; unsigned int (*get_rr_interval)(struct rq * , struct task_struct * ) ; void (*task_move_group)(struct task_struct * , int ) ; }; struct load_weight { unsigned long weight ; unsigned long inv_weight ; }; struct sched_statistics { u64 wait_start ; u64 wait_max ; u64 wait_count ; u64 wait_sum ; u64 iowait_count ; u64 iowait_sum ; u64 sleep_start ; u64 sleep_max ; s64 sum_sleep_runtime ; u64 block_start ; u64 block_max ; u64 exec_max ; u64 slice_max ; u64 nr_migrations_cold ; u64 nr_failed_migrations_affine ; u64 nr_failed_migrations_running ; u64 nr_failed_migrations_hot ; u64 nr_forced_migrations ; u64 nr_wakeups ; u64 nr_wakeups_sync ; u64 nr_wakeups_migrate ; u64 nr_wakeups_local ; u64 nr_wakeups_remote ; u64 nr_wakeups_affine ; u64 nr_wakeups_affine_attempts ; u64 nr_wakeups_passive ; u64 nr_wakeups_idle ; }; struct sched_entity { struct load_weight load ; struct rb_node run_node ; struct list_head group_node ; unsigned int on_rq ; u64 exec_start ; u64 sum_exec_runtime ; u64 vruntime ; u64 prev_sum_exec_runtime ; u64 nr_migrations ; struct sched_statistics statistics ; struct sched_entity *parent ; struct cfs_rq *cfs_rq ; struct cfs_rq *my_q ; }; struct rt_rq; struct sched_rt_entity { struct list_head run_list ; unsigned long timeout ; unsigned int time_slice ; struct sched_rt_entity *back ; struct sched_rt_entity *parent ; struct rt_rq *rt_rq ; struct rt_rq *my_q ; }; struct mem_cgroup; struct memcg_batch_info { int do_batch ; struct mem_cgroup *memcg ; unsigned long nr_pages ; unsigned long memsw_nr_pages ; }; struct css_set; struct compat_robust_list_head; struct task_struct { long volatile state ; void *stack ; atomic_t usage ; unsigned int flags ; unsigned int ptrace ; struct llist_node wake_entry ; int on_cpu ; int on_rq ; int prio ; int static_prio ; int normal_prio ; unsigned int rt_priority ; struct sched_class const *sched_class ; struct sched_entity se ; struct sched_rt_entity rt ; struct task_group *sched_task_group ; struct hlist_head preempt_notifiers ; unsigned char fpu_counter ; unsigned int policy ; int nr_cpus_allowed ; cpumask_t cpus_allowed ; struct sched_info sched_info ; struct list_head tasks ; struct plist_node pushable_tasks ; struct mm_struct *mm ; struct mm_struct *active_mm ; unsigned char brk_randomized : 1 ; int exit_state ; int exit_code ; int exit_signal ; int pdeath_signal ; unsigned int jobctl ; unsigned int personality ; unsigned char did_exec : 1 ; unsigned char in_execve : 1 ; unsigned char in_iowait : 1 ; unsigned char no_new_privs : 1 ; unsigned char sched_reset_on_fork : 1 ; unsigned char sched_contributes_to_load : 1 ; pid_t pid ; pid_t tgid ; unsigned long stack_canary ; struct task_struct *real_parent ; struct task_struct *parent ; struct list_head children ; struct list_head sibling ; struct task_struct *group_leader ; struct list_head ptraced ; struct list_head ptrace_entry ; struct pid_link pids[3U] ; struct list_head thread_group ; struct completion *vfork_done ; int *set_child_tid ; int *clear_child_tid ; cputime_t utime ; cputime_t stime ; cputime_t utimescaled ; cputime_t stimescaled ; cputime_t gtime ; cputime_t prev_utime ; cputime_t prev_stime ; unsigned long nvcsw ; unsigned long nivcsw ; struct timespec start_time ; struct timespec real_start_time ; unsigned long min_flt ; unsigned long maj_flt ; struct task_cputime cputime_expires ; struct list_head cpu_timers[3U] ; struct cred const *real_cred ; struct cred const *cred ; char comm[16U] ; int link_count ; int total_link_count ; struct sysv_sem sysvsem ; unsigned long last_switch_count ; struct thread_struct thread ; struct fs_struct *fs ; struct files_struct *files ; struct nsproxy *nsproxy ; struct signal_struct *signal ; struct sighand_struct *sighand ; sigset_t blocked ; sigset_t real_blocked ; sigset_t saved_sigmask ; struct sigpending pending ; unsigned long sas_ss_sp ; size_t sas_ss_size ; int (*notifier)(void * ) ; void *notifier_data ; sigset_t *notifier_mask ; struct callback_head *task_works ; struct audit_context *audit_context ; kuid_t loginuid ; unsigned int sessionid ; struct seccomp seccomp ; u32 parent_exec_id ; u32 self_exec_id ; spinlock_t alloc_lock ; raw_spinlock_t pi_lock ; struct plist_head pi_waiters ; struct rt_mutex_waiter *pi_blocked_on ; struct mutex_waiter *blocked_on ; unsigned int irq_events ; unsigned long hardirq_enable_ip ; unsigned long hardirq_disable_ip ; unsigned int hardirq_enable_event ; unsigned int hardirq_disable_event ; int hardirqs_enabled ; int hardirq_context ; unsigned long softirq_disable_ip ; unsigned long softirq_enable_ip ; unsigned int softirq_disable_event ; unsigned int softirq_enable_event ; int softirqs_enabled ; int softirq_context ; u64 curr_chain_key ; int lockdep_depth ; unsigned int lockdep_recursion ; struct held_lock held_locks[48U] ; gfp_t lockdep_reclaim_gfp ; void *journal_info ; struct bio_list *bio_list ; struct blk_plug *plug ; struct reclaim_state *reclaim_state ; struct backing_dev_info *backing_dev_info ; struct io_context *io_context ; unsigned long ptrace_message ; siginfo_t *last_siginfo ; struct task_io_accounting ioac ; u64 acct_rss_mem1 ; u64 acct_vm_mem1 ; cputime_t acct_timexpd ; nodemask_t mems_allowed ; seqcount_t mems_allowed_seq ; int cpuset_mem_spread_rotor ; int cpuset_slab_spread_rotor ; struct css_set *cgroups ; struct list_head cg_list ; struct robust_list_head *robust_list ; struct compat_robust_list_head *compat_robust_list ; struct list_head pi_state_list ; struct futex_pi_state *pi_state_cache ; struct perf_event_context *perf_event_ctxp[2U] ; struct mutex perf_event_mutex ; struct list_head perf_event_list ; struct mempolicy *mempolicy ; short il_next ; short pref_node_fork ; struct callback_head rcu ; struct pipe_inode_info *splice_pipe ; struct page_frag task_frag ; struct task_delay_info *delays ; int make_it_fail ; int nr_dirtied ; int nr_dirtied_pause ; unsigned long dirty_paused_when ; int latency_record_count ; struct latency_record latency_record[32U] ; unsigned long timer_slack_ns ; unsigned long default_timer_slack_ns ; unsigned long trace ; unsigned long trace_recursion ; struct memcg_batch_info memcg_batch ; atomic_t ptrace_bp_refcnt ; struct uprobe_task *utask ; }; typedef s32 compat_time_t; typedef s32 compat_long_t; typedef u32 compat_uptr_t; struct compat_timespec { compat_time_t tv_sec ; s32 tv_nsec ; }; struct compat_robust_list { compat_uptr_t next ; }; struct compat_robust_list_head { struct compat_robust_list list ; compat_long_t futex_offset ; compat_uptr_t list_op_pending ; }; enum ldv_22060 { SS_FREE = 0, SS_UNCONNECTED = 1, SS_CONNECTING = 2, SS_CONNECTED = 3, SS_DISCONNECTING = 4 } ; typedef enum ldv_22060 socket_state; struct socket_wq { wait_queue_head_t wait ; struct fasync_struct *fasync_list ; struct callback_head rcu ; }; struct proto_ops; struct socket { socket_state state ; short type ; unsigned long flags ; struct socket_wq *wq ; struct file *file ; struct sock *sk ; struct proto_ops const *ops ; }; struct proto_ops { int family ; struct module *owner ; int (*release)(struct socket * ) ; int (*bind)(struct socket * , struct sockaddr * , int ) ; int (*connect)(struct socket * , struct sockaddr * , int , int ) ; int (*socketpair)(struct socket * , struct socket * ) ; int (*accept)(struct socket * , struct socket * , int ) ; int (*getname)(struct socket * , struct sockaddr * , int * , int ) ; unsigned int (*poll)(struct file * , struct socket * , struct poll_table_struct * ) ; int (*ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct socket * , unsigned int , unsigned long ) ; int (*listen)(struct socket * , int ) ; int (*shutdown)(struct socket * , int ) ; int (*setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct socket * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct socket * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct socket * , int , int , char * , int * ) ; int (*sendmsg)(struct kiocb * , struct socket * , struct msghdr * , size_t ) ; int (*recvmsg)(struct kiocb * , struct socket * , struct msghdr * , size_t , int ) ; int (*mmap)(struct file * , struct socket * , struct vm_area_struct * ) ; ssize_t (*sendpage)(struct socket * , struct page * , int , size_t , int ) ; ssize_t (*splice_read)(struct socket * , loff_t * , struct pipe_inode_info * , size_t , unsigned int ) ; void (*set_peek_off)(struct sock * , int ) ; }; struct kmem_cache_cpu { void **freelist ; unsigned long tid ; struct page *page ; struct page *partial ; unsigned int stat[26U] ; }; struct kmem_cache_node { spinlock_t list_lock ; unsigned long nr_partial ; struct list_head partial ; atomic_long_t nr_slabs ; atomic_long_t total_objects ; struct list_head full ; }; struct kmem_cache_order_objects { unsigned long x ; }; struct kmem_cache { struct kmem_cache_cpu *cpu_slab ; unsigned long flags ; unsigned long min_partial ; int size ; int object_size ; int offset ; int cpu_partial ; struct kmem_cache_order_objects oo ; struct kmem_cache_order_objects max ; struct kmem_cache_order_objects min ; gfp_t allocflags ; int refcount ; void (*ctor)(void * ) ; int inuse ; int align ; int reserved ; char const *name ; struct list_head list ; struct kobject kobj ; int remote_node_defrag_ratio ; struct kmem_cache_node *node[1024U] ; }; struct in6_addr; struct sk_buff; struct dma_attrs { unsigned long flags[1U] ; }; enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3 } ; struct dma_map_ops { void *(*alloc)(struct device * , size_t , dma_addr_t * , gfp_t , struct dma_attrs * ) ; void (*free)(struct device * , size_t , void * , dma_addr_t , struct dma_attrs * ) ; int (*mmap)(struct device * , struct vm_area_struct * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; int (*get_sgtable)(struct device * , struct sg_table * , void * , dma_addr_t , size_t , struct dma_attrs * ) ; dma_addr_t (*map_page)(struct device * , struct page * , unsigned long , size_t , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_page)(struct device * , dma_addr_t , size_t , enum dma_data_direction , struct dma_attrs * ) ; int (*map_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*unmap_sg)(struct device * , struct scatterlist * , int , enum dma_data_direction , struct dma_attrs * ) ; void (*sync_single_for_cpu)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_single_for_device)(struct device * , dma_addr_t , size_t , enum dma_data_direction ) ; void (*sync_sg_for_cpu)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; void (*sync_sg_for_device)(struct device * , struct scatterlist * , int , enum dma_data_direction ) ; int (*mapping_error)(struct device * , dma_addr_t ) ; int (*dma_supported)(struct device * , u64 ) ; int (*set_dma_mask)(struct device * , u64 ) ; int is_phys ; }; typedef u64 netdev_features_t; struct nf_conntrack { atomic_t use ; }; struct nf_bridge_info { atomic_t use ; unsigned int mask ; struct net_device *physindev ; struct net_device *physoutdev ; unsigned long data[4U] ; }; struct sk_buff_head { struct sk_buff *next ; struct sk_buff *prev ; __u32 qlen ; spinlock_t lock ; }; typedef unsigned int sk_buff_data_t; struct sec_path; struct __anonstruct_ldv_28250_201 { __u16 csum_start ; __u16 csum_offset ; }; union __anonunion_ldv_28251_200 { __wsum csum ; struct __anonstruct_ldv_28250_201 ldv_28250 ; }; union __anonunion_ldv_28289_202 { __u32 mark ; __u32 dropcount ; __u32 avail_size ; }; struct sk_buff { struct sk_buff *next ; struct sk_buff *prev ; ktime_t tstamp ; struct sock *sk ; struct net_device *dev ; char cb[48U] ; unsigned long _skb_refdst ; struct sec_path *sp ; unsigned int len ; unsigned int data_len ; __u16 mac_len ; __u16 hdr_len ; union __anonunion_ldv_28251_200 ldv_28251 ; __u32 priority ; unsigned char local_df : 1 ; unsigned char cloned : 1 ; unsigned char ip_summed : 2 ; unsigned char nohdr : 1 ; unsigned char nfctinfo : 3 ; unsigned char pkt_type : 3 ; unsigned char fclone : 2 ; unsigned char ipvs_property : 1 ; unsigned char peeked : 1 ; unsigned char nf_trace : 1 ; __be16 protocol ; void (*destructor)(struct sk_buff * ) ; struct nf_conntrack *nfct ; struct sk_buff *nfct_reasm ; struct nf_bridge_info *nf_bridge ; int skb_iif ; __u32 rxhash ; __u16 vlan_tci ; __u16 tc_index ; __u16 tc_verd ; __u16 queue_mapping ; unsigned char ndisc_nodetype : 2 ; unsigned char pfmemalloc : 1 ; unsigned char ooo_okay : 1 ; unsigned char l4_rxhash : 1 ; unsigned char wifi_acked_valid : 1 ; unsigned char wifi_acked : 1 ; unsigned char no_fcs : 1 ; unsigned char head_frag : 1 ; dma_cookie_t dma_cookie ; __u32 secmark ; union __anonunion_ldv_28289_202 ldv_28289 ; sk_buff_data_t transport_header ; sk_buff_data_t network_header ; sk_buff_data_t mac_header ; sk_buff_data_t tail ; sk_buff_data_t end ; unsigned char *head ; unsigned char *data ; unsigned int truesize ; atomic_t users ; }; struct dst_entry; struct rtable; struct ethhdr { unsigned char h_dest[6U] ; unsigned char h_source[6U] ; __be16 h_proto ; }; struct ethtool_cmd { __u32 cmd ; __u32 supported ; __u32 advertising ; __u16 speed ; __u8 duplex ; __u8 port ; __u8 phy_address ; __u8 transceiver ; __u8 autoneg ; __u8 mdio_support ; __u32 maxtxpkt ; __u32 maxrxpkt ; __u16 speed_hi ; __u8 eth_tp_mdix ; __u8 eth_tp_mdix_ctrl ; __u32 lp_advertising ; __u32 reserved[2U] ; }; struct ethtool_drvinfo { __u32 cmd ; char driver[32U] ; char version[32U] ; char fw_version[32U] ; char bus_info[32U] ; char reserved1[32U] ; char reserved2[12U] ; __u32 n_priv_flags ; __u32 n_stats ; __u32 testinfo_len ; __u32 eedump_len ; __u32 regdump_len ; }; struct ethtool_wolinfo { __u32 cmd ; __u32 supported ; __u32 wolopts ; __u8 sopass[6U] ; }; struct ethtool_regs { __u32 cmd ; __u32 version ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eeprom { __u32 cmd ; __u32 magic ; __u32 offset ; __u32 len ; __u8 data[0U] ; }; struct ethtool_eee { __u32 cmd ; __u32 supported ; __u32 advertised ; __u32 lp_advertised ; __u32 eee_active ; __u32 eee_enabled ; __u32 tx_lpi_enabled ; __u32 tx_lpi_timer ; __u32 reserved[2U] ; }; struct ethtool_modinfo { __u32 cmd ; __u32 type ; __u32 eeprom_len ; __u32 reserved[8U] ; }; struct ethtool_coalesce { __u32 cmd ; __u32 rx_coalesce_usecs ; __u32 rx_max_coalesced_frames ; __u32 rx_coalesce_usecs_irq ; __u32 rx_max_coalesced_frames_irq ; __u32 tx_coalesce_usecs ; __u32 tx_max_coalesced_frames ; __u32 tx_coalesce_usecs_irq ; __u32 tx_max_coalesced_frames_irq ; __u32 stats_block_coalesce_usecs ; __u32 use_adaptive_rx_coalesce ; __u32 use_adaptive_tx_coalesce ; __u32 pkt_rate_low ; __u32 rx_coalesce_usecs_low ; __u32 rx_max_coalesced_frames_low ; __u32 tx_coalesce_usecs_low ; __u32 tx_max_coalesced_frames_low ; __u32 pkt_rate_high ; __u32 rx_coalesce_usecs_high ; __u32 rx_max_coalesced_frames_high ; __u32 tx_coalesce_usecs_high ; __u32 tx_max_coalesced_frames_high ; __u32 rate_sample_interval ; }; struct ethtool_ringparam { __u32 cmd ; __u32 rx_max_pending ; __u32 rx_mini_max_pending ; __u32 rx_jumbo_max_pending ; __u32 tx_max_pending ; __u32 rx_pending ; __u32 rx_mini_pending ; __u32 rx_jumbo_pending ; __u32 tx_pending ; }; struct ethtool_channels { __u32 cmd ; __u32 max_rx ; __u32 max_tx ; __u32 max_other ; __u32 max_combined ; __u32 rx_count ; __u32 tx_count ; __u32 other_count ; __u32 combined_count ; }; struct ethtool_pauseparam { __u32 cmd ; __u32 autoneg ; __u32 rx_pause ; __u32 tx_pause ; }; struct ethtool_test { __u32 cmd ; __u32 flags ; __u32 reserved ; __u32 len ; __u64 data[0U] ; }; struct ethtool_stats { __u32 cmd ; __u32 n_stats ; __u64 data[0U] ; }; struct ethtool_tcpip4_spec { __be32 ip4src ; __be32 ip4dst ; __be16 psrc ; __be16 pdst ; __u8 tos ; }; struct ethtool_ah_espip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 spi ; __u8 tos ; }; struct ethtool_usrip4_spec { __be32 ip4src ; __be32 ip4dst ; __be32 l4_4_bytes ; __u8 tos ; __u8 ip_ver ; __u8 proto ; }; union ethtool_flow_union { struct ethtool_tcpip4_spec tcp_ip4_spec ; struct ethtool_tcpip4_spec udp_ip4_spec ; struct ethtool_tcpip4_spec sctp_ip4_spec ; struct ethtool_ah_espip4_spec ah_ip4_spec ; struct ethtool_ah_espip4_spec esp_ip4_spec ; struct ethtool_usrip4_spec usr_ip4_spec ; struct ethhdr ether_spec ; __u8 hdata[60U] ; }; struct ethtool_flow_ext { __be16 vlan_etype ; __be16 vlan_tci ; __be32 data[2U] ; }; struct ethtool_rx_flow_spec { __u32 flow_type ; union ethtool_flow_union h_u ; struct ethtool_flow_ext h_ext ; union ethtool_flow_union m_u ; struct ethtool_flow_ext m_ext ; __u64 ring_cookie ; __u32 location ; }; struct ethtool_rxnfc { __u32 cmd ; __u32 flow_type ; __u64 data ; struct ethtool_rx_flow_spec fs ; __u32 rule_cnt ; __u32 rule_locs[0U] ; }; struct ethtool_flash { __u32 cmd ; __u32 region ; char data[128U] ; }; struct ethtool_dump { __u32 cmd ; __u32 version ; __u32 flag ; __u32 len ; __u8 data[0U] ; }; struct ethtool_ts_info { __u32 cmd ; __u32 so_timestamping ; __s32 phc_index ; __u32 tx_types ; __u32 tx_reserved[3U] ; __u32 rx_filters ; __u32 rx_reserved[3U] ; }; enum ethtool_phys_id_state { ETHTOOL_ID_INACTIVE = 0, ETHTOOL_ID_ACTIVE = 1, ETHTOOL_ID_ON = 2, ETHTOOL_ID_OFF = 3 } ; struct ethtool_ops { int (*get_settings)(struct net_device * , struct ethtool_cmd * ) ; int (*set_settings)(struct net_device * , struct ethtool_cmd * ) ; void (*get_drvinfo)(struct net_device * , struct ethtool_drvinfo * ) ; int (*get_regs_len)(struct net_device * ) ; void (*get_regs)(struct net_device * , struct ethtool_regs * , void * ) ; void (*get_wol)(struct net_device * , struct ethtool_wolinfo * ) ; int (*set_wol)(struct net_device * , struct ethtool_wolinfo * ) ; u32 (*get_msglevel)(struct net_device * ) ; void (*set_msglevel)(struct net_device * , u32 ) ; int (*nway_reset)(struct net_device * ) ; u32 (*get_link)(struct net_device * ) ; int (*get_eeprom_len)(struct net_device * ) ; int (*get_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*set_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; int (*set_coalesce)(struct net_device * , struct ethtool_coalesce * ) ; void (*get_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; int (*set_ringparam)(struct net_device * , struct ethtool_ringparam * ) ; void (*get_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; int (*set_pauseparam)(struct net_device * , struct ethtool_pauseparam * ) ; void (*self_test)(struct net_device * , struct ethtool_test * , u64 * ) ; void (*get_strings)(struct net_device * , u32 , u8 * ) ; int (*set_phys_id)(struct net_device * , enum ethtool_phys_id_state ) ; void (*get_ethtool_stats)(struct net_device * , struct ethtool_stats * , u64 * ) ; int (*begin)(struct net_device * ) ; void (*complete)(struct net_device * ) ; u32 (*get_priv_flags)(struct net_device * ) ; int (*set_priv_flags)(struct net_device * , u32 ) ; int (*get_sset_count)(struct net_device * , int ) ; int (*get_rxnfc)(struct net_device * , struct ethtool_rxnfc * , u32 * ) ; int (*set_rxnfc)(struct net_device * , struct ethtool_rxnfc * ) ; int (*flash_device)(struct net_device * , struct ethtool_flash * ) ; int (*reset)(struct net_device * , u32 * ) ; u32 (*get_rxfh_indir_size)(struct net_device * ) ; int (*get_rxfh_indir)(struct net_device * , u32 * ) ; int (*set_rxfh_indir)(struct net_device * , u32 const * ) ; void (*get_channels)(struct net_device * , struct ethtool_channels * ) ; int (*set_channels)(struct net_device * , struct ethtool_channels * ) ; int (*get_dump_flag)(struct net_device * , struct ethtool_dump * ) ; int (*get_dump_data)(struct net_device * , struct ethtool_dump * , void * ) ; int (*set_dump)(struct net_device * , struct ethtool_dump * ) ; int (*get_ts_info)(struct net_device * , struct ethtool_ts_info * ) ; int (*get_module_info)(struct net_device * , struct ethtool_modinfo * ) ; int (*get_module_eeprom)(struct net_device * , struct ethtool_eeprom * , u8 * ) ; int (*get_eee)(struct net_device * , struct ethtool_eee * ) ; int (*set_eee)(struct net_device * , struct ethtool_eee * ) ; }; struct prot_inuse; struct netns_core { struct ctl_table_header *sysctl_hdr ; int sysctl_somaxconn ; struct prot_inuse *inuse ; }; struct u64_stats_sync { }; struct ipstats_mib { u64 mibs[31U] ; struct u64_stats_sync syncp ; }; struct icmp_mib { unsigned long mibs[27U] ; }; struct icmpmsg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6_mib { unsigned long mibs[5U] ; }; struct icmpv6_mib_device { atomic_long_t mibs[5U] ; }; struct icmpv6msg_mib { atomic_long_t mibs[512U] ; }; struct icmpv6msg_mib_device { atomic_long_t mibs[512U] ; }; struct tcp_mib { unsigned long mibs[15U] ; }; struct udp_mib { unsigned long mibs[7U] ; }; struct linux_mib { unsigned long mibs[92U] ; }; struct linux_xfrm_mib { unsigned long mibs[27U] ; }; struct netns_mib { struct tcp_mib *tcp_statistics[1U] ; struct ipstats_mib *ip_statistics[1U] ; struct linux_mib *net_statistics[1U] ; struct udp_mib *udp_statistics[1U] ; struct udp_mib *udplite_statistics[1U] ; struct icmp_mib *icmp_statistics[1U] ; struct icmpmsg_mib *icmpmsg_statistics ; struct proc_dir_entry *proc_net_devsnmp6 ; struct udp_mib *udp_stats_in6[1U] ; struct udp_mib *udplite_stats_in6[1U] ; struct ipstats_mib *ipv6_statistics[1U] ; struct icmpv6_mib *icmpv6_statistics[1U] ; struct icmpv6msg_mib *icmpv6msg_statistics ; struct linux_xfrm_mib *xfrm_statistics[1U] ; }; struct netns_unix { int sysctl_max_dgram_qlen ; struct ctl_table_header *ctl ; }; struct netns_packet { struct mutex sklist_lock ; struct hlist_head sklist ; }; struct netns_frags { int nqueues ; atomic_t mem ; struct list_head lru_list ; int timeout ; int high_thresh ; int low_thresh ; }; struct tcpm_hash_bucket; struct ipv4_devconf; struct fib_rules_ops; struct fib_table; struct inet_peer_base; struct xt_table; struct netns_ipv4 { struct ctl_table_header *forw_hdr ; struct ctl_table_header *frags_hdr ; struct ctl_table_header *ipv4_hdr ; struct ctl_table_header *route_hdr ; struct ipv4_devconf *devconf_all ; struct ipv4_devconf *devconf_dflt ; struct fib_rules_ops *rules_ops ; bool fib_has_custom_rules ; struct fib_table *fib_local ; struct fib_table *fib_main ; struct fib_table *fib_default ; int fib_num_tclassid_users ; struct hlist_head *fib_table_hash ; struct sock *fibnl ; struct sock **icmp_sk ; struct inet_peer_base *peers ; struct tcpm_hash_bucket *tcp_metrics_hash ; unsigned int tcp_metrics_hash_log ; struct netns_frags frags ; struct xt_table *iptable_filter ; struct xt_table *iptable_mangle ; struct xt_table *iptable_raw ; struct xt_table *arptable_filter ; struct xt_table *iptable_security ; struct xt_table *nat_table ; int sysctl_icmp_echo_ignore_all ; int sysctl_icmp_echo_ignore_broadcasts ; int sysctl_icmp_ignore_bogus_error_responses ; int sysctl_icmp_ratelimit ; int sysctl_icmp_ratemask ; int sysctl_icmp_errors_use_inbound_ifaddr ; kgid_t sysctl_ping_group_range[2U] ; long sysctl_tcp_mem[3U] ; atomic_t dev_addr_genid ; struct list_head mr_tables ; struct fib_rules_ops *mr_rules_ops ; }; struct neighbour; struct dst_ops { unsigned short family ; __be16 protocol ; unsigned int gc_thresh ; int (*gc)(struct dst_ops * ) ; struct dst_entry *(*check)(struct dst_entry * , __u32 ) ; unsigned int (*default_advmss)(struct dst_entry const * ) ; unsigned int (*mtu)(struct dst_entry const * ) ; u32 *(*cow_metrics)(struct dst_entry * , unsigned long ) ; void (*destroy)(struct dst_entry * ) ; void (*ifdown)(struct dst_entry * , struct net_device * , int ) ; struct dst_entry *(*negative_advice)(struct dst_entry * ) ; void (*link_failure)(struct sk_buff * ) ; void (*update_pmtu)(struct dst_entry * , struct sock * , struct sk_buff * , u32 ) ; void (*redirect)(struct dst_entry * , struct sock * , struct sk_buff * ) ; int (*local_out)(struct sk_buff * ) ; struct neighbour *(*neigh_lookup)(struct dst_entry const * , struct sk_buff * , void const * ) ; struct kmem_cache *kmem_cachep ; struct percpu_counter pcpuc_entries ; }; struct netns_sysctl_ipv6 { struct ctl_table_header *hdr ; struct ctl_table_header *route_hdr ; struct ctl_table_header *icmp_hdr ; struct ctl_table_header *frags_hdr ; int bindv6only ; int flush_delay ; int ip6_rt_max_size ; int ip6_rt_gc_min_interval ; int ip6_rt_gc_timeout ; int ip6_rt_gc_interval ; int ip6_rt_gc_elasticity ; int ip6_rt_mtu_expires ; int ip6_rt_min_advmss ; int icmpv6_time ; }; struct ipv6_devconf; struct rt6_info; struct rt6_statistics; struct fib6_table; struct netns_ipv6 { struct netns_sysctl_ipv6 sysctl ; struct ipv6_devconf *devconf_all ; struct ipv6_devconf *devconf_dflt ; struct inet_peer_base *peers ; struct netns_frags frags ; struct xt_table *ip6table_filter ; struct xt_table *ip6table_mangle ; struct xt_table *ip6table_raw ; struct xt_table *ip6table_security ; struct xt_table *ip6table_nat ; struct rt6_info *ip6_null_entry ; struct rt6_statistics *rt6_stats ; struct timer_list ip6_fib_timer ; struct hlist_head *fib_table_hash ; struct fib6_table *fib6_main_tbl ; struct dst_ops ip6_dst_ops ; unsigned int ip6_rt_gc_expire ; unsigned long ip6_rt_last_gc ; struct rt6_info *ip6_prohibit_entry ; struct rt6_info *ip6_blk_hole_entry ; struct fib6_table *fib6_local_tbl ; struct fib_rules_ops *fib6_rules_ops ; struct sock **icmp_sk ; struct sock *ndisc_sk ; struct sock *tcp_sk ; struct sock *igmp_sk ; struct list_head mr6_tables ; struct fib_rules_ops *mr6_rules_ops ; }; struct netns_nf_frag { struct netns_sysctl_ipv6 sysctl ; struct netns_frags frags ; }; struct sctp_mib; struct netns_sctp { struct sctp_mib *sctp_statistics[1U] ; struct proc_dir_entry *proc_net_sctp ; struct ctl_table_header *sysctl_header ; struct sock *ctl_sock ; struct list_head local_addr_list ; struct list_head addr_waitq ; struct timer_list addr_wq_timer ; struct list_head auto_asconf_splist ; spinlock_t addr_wq_lock ; spinlock_t local_addr_lock ; unsigned int rto_initial ; unsigned int rto_min ; unsigned int rto_max ; int rto_alpha ; int rto_beta ; int max_burst ; int cookie_preserve_enable ; unsigned int valid_cookie_life ; unsigned int sack_timeout ; unsigned int hb_interval ; int max_retrans_association ; int max_retrans_path ; int max_retrans_init ; int pf_retrans ; int sndbuf_policy ; int rcvbuf_policy ; int default_auto_asconf ; int addip_enable ; int addip_noauth ; int prsctp_enable ; int auth_enable ; int scope_policy ; int rwnd_upd_shift ; unsigned long max_autoclose ; }; struct netns_dccp { struct sock *v4_ctl_sk ; struct sock *v6_ctl_sk ; }; union __anonunion_in6_u_205 { __u8 u6_addr8[16U] ; __be16 u6_addr16[8U] ; __be32 u6_addr32[4U] ; }; struct in6_addr { union __anonunion_in6_u_205 in6_u ; }; typedef int read_proc_t(char * , char ** , off_t , int , int * , void * ); typedef int write_proc_t(struct file * , char const * , unsigned long , void * ); struct proc_dir_entry { unsigned int low_ino ; umode_t mode ; nlink_t nlink ; kuid_t uid ; kgid_t gid ; loff_t size ; struct inode_operations const *proc_iops ; struct file_operations const *proc_fops ; struct proc_dir_entry *next ; struct proc_dir_entry *parent ; struct proc_dir_entry *subdir ; void *data ; read_proc_t *read_proc ; write_proc_t *write_proc ; atomic_t count ; int pde_users ; struct completion *pde_unload_completion ; struct list_head pde_openers ; spinlock_t pde_unload_lock ; u8 namelen ; char name[] ; }; struct nlattr; struct ebt_table; struct netns_xt { struct list_head tables[13U] ; struct ebt_table *broute_table ; struct ebt_table *frame_filter ; struct ebt_table *frame_nat ; }; struct nf_proto_net { struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; struct ctl_table_header *ctl_compat_header ; struct ctl_table *ctl_compat_table ; unsigned int users ; }; struct nf_generic_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_tcp_net { struct nf_proto_net pn ; unsigned int timeouts[14U] ; unsigned int tcp_loose ; unsigned int tcp_be_liberal ; unsigned int tcp_max_retrans ; }; struct nf_udp_net { struct nf_proto_net pn ; unsigned int timeouts[2U] ; }; struct nf_icmp_net { struct nf_proto_net pn ; unsigned int timeout ; }; struct nf_ip_net { struct nf_generic_net generic ; struct nf_tcp_net tcp ; struct nf_udp_net udp ; struct nf_icmp_net icmp ; struct nf_icmp_net icmpv6 ; struct ctl_table_header *ctl_table_header ; struct ctl_table *ctl_table ; }; struct ip_conntrack_stat; struct nf_ct_event_notifier; struct nf_exp_event_notifier; struct netns_ct { atomic_t count ; unsigned int expect_count ; unsigned int htable_size ; struct kmem_cache *nf_conntrack_cachep ; struct hlist_nulls_head *hash ; struct hlist_head *expect_hash ; struct hlist_nulls_head unconfirmed ; struct hlist_nulls_head dying ; struct ip_conntrack_stat *stat ; struct nf_ct_event_notifier *nf_conntrack_event_cb ; struct nf_exp_event_notifier *nf_expect_event_cb ; int sysctl_events ; unsigned int sysctl_events_retry_timeout ; int sysctl_acct ; int sysctl_tstamp ; int sysctl_checksum ; unsigned int sysctl_log_invalid ; int sysctl_auto_assign_helper ; bool auto_assign_helper_warned ; struct nf_ip_net nf_ct_proto ; struct hlist_head *nat_bysource ; unsigned int nat_htable_size ; struct ctl_table_header *sysctl_header ; struct ctl_table_header *acct_sysctl_header ; struct ctl_table_header *tstamp_sysctl_header ; struct ctl_table_header *event_sysctl_header ; struct ctl_table_header *helper_sysctl_header ; char *slabname ; }; struct xfrm_policy_hash { struct hlist_head *table ; unsigned int hmask ; }; struct netns_xfrm { struct list_head state_all ; struct hlist_head *state_bydst ; struct hlist_head *state_bysrc ; struct hlist_head *state_byspi ; unsigned int state_hmask ; unsigned int state_num ; struct work_struct state_hash_work ; struct hlist_head state_gc_list ; struct work_struct state_gc_work ; wait_queue_head_t km_waitq ; struct list_head policy_all ; struct hlist_head *policy_byidx ; unsigned int policy_idx_hmask ; struct hlist_head policy_inexact[6U] ; struct xfrm_policy_hash policy_bydst[6U] ; unsigned int policy_count[6U] ; struct work_struct policy_hash_work ; struct sock *nlsk ; struct sock *nlsk_stash ; u32 sysctl_aevent_etime ; u32 sysctl_aevent_rseqth ; int sysctl_larval_drop ; u32 sysctl_acq_expires ; struct ctl_table_header *sysctl_hdr ; struct dst_ops xfrm4_dst_ops ; struct dst_ops xfrm6_dst_ops ; }; struct net_generic; struct netns_ipvs; struct net { atomic_t passive ; atomic_t count ; spinlock_t rules_mod_lock ; struct list_head list ; struct list_head cleanup_list ; struct list_head exit_list ; struct proc_dir_entry *proc_net ; struct proc_dir_entry *proc_net_stat ; struct ctl_table_set sysctls ; struct sock *rtnl ; struct sock *genl_sock ; struct list_head dev_base_head ; struct hlist_head *dev_name_head ; struct hlist_head *dev_index_head ; unsigned int dev_base_seq ; int ifindex ; struct list_head rules_ops ; struct net_device *loopback_dev ; struct netns_core core ; struct netns_mib mib ; struct netns_packet packet ; struct netns_unix unx ; struct netns_ipv4 ipv4 ; struct netns_ipv6 ipv6 ; struct netns_sctp sctp ; struct netns_dccp dccp ; struct netns_xt xt ; struct netns_ct ct ; struct netns_nf_frag nf_frag ; struct sock *nfnl ; struct sock *nfnl_stash ; struct sk_buff_head wext_nlevents ; struct net_generic *gen ; struct netns_xfrm xfrm ; struct netns_ipvs *ipvs ; struct sock *diag_nlsk ; atomic_t rt_genid ; }; struct seq_file { char *buf ; size_t size ; size_t from ; size_t count ; loff_t index ; loff_t read_pos ; u64 version ; struct mutex lock ; struct seq_operations const *op ; int poll_event ; void *private ; }; struct seq_operations { void *(*start)(struct seq_file * , loff_t * ) ; void (*stop)(struct seq_file * , void * ) ; void *(*next)(struct seq_file * , void * , loff_t * ) ; int (*show)(struct seq_file * , void * ) ; }; struct dsa_chip_data { struct device *mii_bus ; int sw_addr ; char *port_names[12U] ; s8 *rtable ; }; struct dsa_platform_data { struct device *netdev ; int nr_chips ; struct dsa_chip_data *chip ; }; struct dsa_switch; struct dsa_switch_tree { struct dsa_platform_data *pd ; struct net_device *master_netdev ; __be16 tag_protocol ; s8 cpu_switch ; s8 cpu_port ; int link_poll_needed ; struct work_struct link_poll_work ; struct timer_list link_poll_timer ; struct dsa_switch *ds[4U] ; }; struct dsa_switch_driver; struct mii_bus; struct dsa_switch { struct dsa_switch_tree *dst ; int index ; struct dsa_chip_data *pd ; struct dsa_switch_driver *drv ; struct mii_bus *master_mii_bus ; u32 dsa_port_mask ; u32 phys_port_mask ; struct mii_bus *slave_mii_bus ; struct net_device *ports[12U] ; }; struct dsa_switch_driver { struct list_head list ; __be16 tag_protocol ; int priv_size ; char *(*probe)(struct mii_bus * , int ) ; int (*setup)(struct dsa_switch * ) ; int (*set_addr)(struct dsa_switch * , u8 * ) ; int (*phy_read)(struct dsa_switch * , int , int ) ; int (*phy_write)(struct dsa_switch * , int , int , u16 ) ; void (*poll_link)(struct dsa_switch * ) ; void (*get_strings)(struct dsa_switch * , int , uint8_t * ) ; void (*get_ethtool_stats)(struct dsa_switch * , int , uint64_t * ) ; int (*get_sset_count)(struct dsa_switch * ) ; }; struct ieee_ets { __u8 willing ; __u8 ets_cap ; __u8 cbs ; __u8 tc_tx_bw[8U] ; __u8 tc_rx_bw[8U] ; __u8 tc_tsa[8U] ; __u8 prio_tc[8U] ; __u8 tc_reco_bw[8U] ; __u8 tc_reco_tsa[8U] ; __u8 reco_prio_tc[8U] ; }; struct ieee_maxrate { __u64 tc_maxrate[8U] ; }; struct ieee_pfc { __u8 pfc_cap ; __u8 pfc_en ; __u8 mbc ; __u16 delay ; __u64 requests[8U] ; __u64 indications[8U] ; }; struct cee_pg { __u8 willing ; __u8 error ; __u8 pg_en ; __u8 tcs_supported ; __u8 pg_bw[8U] ; __u8 prio_pg[8U] ; }; struct cee_pfc { __u8 willing ; __u8 error ; __u8 pfc_en ; __u8 tcs_supported ; }; struct dcb_app { __u8 selector ; __u8 priority ; __u16 protocol ; }; struct dcb_peer_app_info { __u8 willing ; __u8 error ; }; struct dcbnl_rtnl_ops { int (*ieee_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_setets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_getmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_setmaxrate)(struct net_device * , struct ieee_maxrate * ) ; int (*ieee_getpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_setpfc)(struct net_device * , struct ieee_pfc * ) ; int (*ieee_getapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_setapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_delapp)(struct net_device * , struct dcb_app * ) ; int (*ieee_peer_getets)(struct net_device * , struct ieee_ets * ) ; int (*ieee_peer_getpfc)(struct net_device * , struct ieee_pfc * ) ; u8 (*getstate)(struct net_device * ) ; u8 (*setstate)(struct net_device * , u8 ) ; void (*getpermhwaddr)(struct net_device * , u8 * ) ; void (*setpgtccfgtx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgtx)(struct net_device * , int , u8 ) ; void (*setpgtccfgrx)(struct net_device * , int , u8 , u8 , u8 , u8 ) ; void (*setpgbwgcfgrx)(struct net_device * , int , u8 ) ; void (*getpgtccfgtx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgtx)(struct net_device * , int , u8 * ) ; void (*getpgtccfgrx)(struct net_device * , int , u8 * , u8 * , u8 * , u8 * ) ; void (*getpgbwgcfgrx)(struct net_device * , int , u8 * ) ; void (*setpfccfg)(struct net_device * , int , u8 ) ; void (*getpfccfg)(struct net_device * , int , u8 * ) ; u8 (*setall)(struct net_device * ) ; u8 (*getcap)(struct net_device * , int , u8 * ) ; int (*getnumtcs)(struct net_device * , int , u8 * ) ; int (*setnumtcs)(struct net_device * , int , u8 ) ; u8 (*getpfcstate)(struct net_device * ) ; void (*setpfcstate)(struct net_device * , u8 ) ; void (*getbcncfg)(struct net_device * , int , u32 * ) ; void (*setbcncfg)(struct net_device * , int , u32 ) ; void (*getbcnrp)(struct net_device * , int , u8 * ) ; void (*setbcnrp)(struct net_device * , int , u8 ) ; u8 (*setapp)(struct net_device * , u8 , u16 , u8 ) ; u8 (*getapp)(struct net_device * , u8 , u16 ) ; u8 (*getfeatcfg)(struct net_device * , int , u8 * ) ; u8 (*setfeatcfg)(struct net_device * , int , u8 ) ; u8 (*getdcbx)(struct net_device * ) ; u8 (*setdcbx)(struct net_device * , u8 ) ; int (*peer_getappinfo)(struct net_device * , struct dcb_peer_app_info * , u16 * ) ; int (*peer_getapptable)(struct net_device * , struct dcb_app * ) ; int (*cee_peer_getpg)(struct net_device * , struct cee_pg * ) ; int (*cee_peer_getpfc)(struct net_device * , struct cee_pfc * ) ; }; struct taskstats { __u16 version ; __u32 ac_exitcode ; __u8 ac_flag ; __u8 ac_nice ; __u64 cpu_count ; __u64 cpu_delay_total ; __u64 blkio_count ; __u64 blkio_delay_total ; __u64 swapin_count ; __u64 swapin_delay_total ; __u64 cpu_run_real_total ; __u64 cpu_run_virtual_total ; char ac_comm[32U] ; __u8 ac_sched ; __u8 ac_pad[3U] ; __u32 ac_uid ; __u32 ac_gid ; __u32 ac_pid ; __u32 ac_ppid ; __u32 ac_btime ; __u64 ac_etime ; __u64 ac_utime ; __u64 ac_stime ; __u64 ac_minflt ; __u64 ac_majflt ; __u64 coremem ; __u64 virtmem ; __u64 hiwater_rss ; __u64 hiwater_vm ; __u64 read_char ; __u64 write_char ; __u64 read_syscalls ; __u64 write_syscalls ; __u64 read_bytes ; __u64 write_bytes ; __u64 cancelled_write_bytes ; __u64 nvcsw ; __u64 nivcsw ; __u64 ac_utimescaled ; __u64 ac_stimescaled ; __u64 cpu_scaled_run_real_total ; __u64 freepages_count ; __u64 freepages_delay_total ; }; struct idr_layer { unsigned long bitmap ; struct idr_layer *ary[64U] ; int count ; int layer ; struct callback_head callback_head ; }; struct idr { struct idr_layer *top ; struct idr_layer *id_free ; int layers ; int id_free_cnt ; spinlock_t lock ; }; struct xattr_handler { char const *prefix ; int flags ; size_t (*list)(struct dentry * , char * , size_t , char const * , size_t , int ) ; int (*get)(struct dentry * , char const * , void * , size_t , int ) ; int (*set)(struct dentry * , char const * , void const * , size_t , int , int ) ; }; struct simple_xattrs { struct list_head head ; spinlock_t lock ; }; struct cgroupfs_root; struct cgroup_subsys; struct cgroup; struct css_id; struct cgroup_subsys_state { struct cgroup *cgroup ; atomic_t refcnt ; unsigned long flags ; struct css_id *id ; struct work_struct dput_work ; }; struct cgroup { unsigned long flags ; atomic_t count ; struct list_head sibling ; struct list_head children ; struct list_head files ; struct cgroup *parent ; struct dentry *dentry ; struct cgroup_subsys_state *subsys[12U] ; struct cgroupfs_root *root ; struct cgroup *top_cgroup ; struct list_head css_sets ; struct list_head allcg_node ; struct list_head cft_q_node ; struct list_head release_list ; struct list_head pidlists ; struct mutex pidlist_mutex ; struct callback_head callback_head ; struct list_head event_list ; spinlock_t event_list_lock ; struct simple_xattrs xattrs ; }; struct css_set { atomic_t refcount ; struct hlist_node hlist ; struct list_head tasks ; struct list_head cg_links ; struct cgroup_subsys_state *subsys[12U] ; struct callback_head callback_head ; }; struct cgroup_map_cb { int (*fill)(struct cgroup_map_cb * , char const * , u64 ) ; void *state ; }; struct cftype { char name[64U] ; int private ; umode_t mode ; size_t max_write_len ; unsigned int flags ; struct simple_xattrs xattrs ; int (*open)(struct inode * , struct file * ) ; ssize_t (*read)(struct cgroup * , struct cftype * , struct file * , char * , size_t , loff_t * ) ; u64 (*read_u64)(struct cgroup * , struct cftype * ) ; s64 (*read_s64)(struct cgroup * , struct cftype * ) ; int (*read_map)(struct cgroup * , struct cftype * , struct cgroup_map_cb * ) ; int (*read_seq_string)(struct cgroup * , struct cftype * , struct seq_file * ) ; ssize_t (*write)(struct cgroup * , struct cftype * , struct file * , char const * , size_t , loff_t * ) ; int (*write_u64)(struct cgroup * , struct cftype * , u64 ) ; int (*write_s64)(struct cgroup * , struct cftype * , s64 ) ; int (*write_string)(struct cgroup * , struct cftype * , char const * ) ; int (*trigger)(struct cgroup * , unsigned int ) ; int (*release)(struct inode * , struct file * ) ; int (*register_event)(struct cgroup * , struct cftype * , struct eventfd_ctx * , char const * ) ; void (*unregister_event)(struct cgroup * , struct cftype * , struct eventfd_ctx * ) ; }; struct cftype_set { struct list_head node ; struct cftype *cfts ; }; struct cgroup_taskset; struct cgroup_subsys { struct cgroup_subsys_state *(*create)(struct cgroup * ) ; int (*pre_destroy)(struct cgroup * ) ; void (*destroy)(struct cgroup * ) ; int (*can_attach)(struct cgroup * , struct cgroup_taskset * ) ; void (*cancel_attach)(struct cgroup * , struct cgroup_taskset * ) ; void (*attach)(struct cgroup * , struct cgroup_taskset * ) ; void (*fork)(struct task_struct * ) ; void (*exit)(struct cgroup * , struct cgroup * , struct task_struct * ) ; void (*post_clone)(struct cgroup * ) ; void (*bind)(struct cgroup * ) ; int subsys_id ; int active ; int disabled ; int early_init ; bool use_id ; bool __DEPRECATED_clear_css_refs ; bool broken_hierarchy ; bool warned_broken_hierarchy ; char const *name ; struct cgroupfs_root *root ; struct list_head sibling ; struct idr idr ; spinlock_t id_lock ; struct list_head cftsets ; struct cftype *base_cftypes ; struct cftype_set base_cftset ; struct module *module ; }; struct netprio_map { struct callback_head rcu ; u32 priomap_len ; u32 priomap[] ; }; struct xfrm_policy; struct xfrm_state; struct request_sock; struct mnt_namespace; struct ipc_namespace; struct nsproxy { atomic_t count ; struct uts_namespace *uts_ns ; struct ipc_namespace *ipc_ns ; struct mnt_namespace *mnt_ns ; struct pid_namespace *pid_ns ; struct net *net_ns ; }; struct nlmsghdr { __u32 nlmsg_len ; __u16 nlmsg_type ; __u16 nlmsg_flags ; __u32 nlmsg_seq ; __u32 nlmsg_pid ; }; struct nlattr { __u16 nla_len ; __u16 nla_type ; }; struct netlink_callback { struct sk_buff *skb ; struct nlmsghdr const *nlh ; int (*dump)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; void *data ; struct module *module ; u16 family ; u16 min_dump_alloc ; unsigned int prev_seq ; unsigned int seq ; long args[6U] ; }; struct ndmsg { __u8 ndm_family ; __u8 ndm_pad1 ; __u16 ndm_pad2 ; __s32 ndm_ifindex ; __u16 ndm_state ; __u8 ndm_flags ; __u8 ndm_type ; }; struct rtnl_link_stats64 { __u64 rx_packets ; __u64 tx_packets ; __u64 rx_bytes ; __u64 tx_bytes ; __u64 rx_errors ; __u64 tx_errors ; __u64 rx_dropped ; __u64 tx_dropped ; __u64 multicast ; __u64 collisions ; __u64 rx_length_errors ; __u64 rx_over_errors ; __u64 rx_crc_errors ; __u64 rx_frame_errors ; __u64 rx_fifo_errors ; __u64 rx_missed_errors ; __u64 tx_aborted_errors ; __u64 tx_carrier_errors ; __u64 tx_fifo_errors ; __u64 tx_heartbeat_errors ; __u64 tx_window_errors ; __u64 rx_compressed ; __u64 tx_compressed ; }; struct ifla_vf_info { __u32 vf ; __u8 mac[32U] ; __u32 vlan ; __u32 qos ; __u32 tx_rate ; __u32 spoofchk ; }; struct netpoll_info; struct phy_device; struct wireless_dev; enum netdev_tx { __NETDEV_TX_MIN = (-0x7FFFFFFF-1), NETDEV_TX_OK = 0, NETDEV_TX_BUSY = 16, NETDEV_TX_LOCKED = 32 } ; typedef enum netdev_tx netdev_tx_t; struct net_device_stats { unsigned long rx_packets ; unsigned long tx_packets ; unsigned long rx_bytes ; unsigned long tx_bytes ; unsigned long rx_errors ; unsigned long tx_errors ; unsigned long rx_dropped ; unsigned long tx_dropped ; unsigned long multicast ; unsigned long collisions ; unsigned long rx_length_errors ; unsigned long rx_over_errors ; unsigned long rx_crc_errors ; unsigned long rx_frame_errors ; unsigned long rx_fifo_errors ; unsigned long rx_missed_errors ; unsigned long tx_aborted_errors ; unsigned long tx_carrier_errors ; unsigned long tx_fifo_errors ; unsigned long tx_heartbeat_errors ; unsigned long tx_window_errors ; unsigned long rx_compressed ; unsigned long tx_compressed ; }; struct neigh_parms; struct netdev_hw_addr_list { struct list_head list ; int count ; }; struct hh_cache { u16 hh_len ; u16 __pad ; seqlock_t hh_lock ; unsigned long hh_data[16U] ; }; struct header_ops { int (*create)(struct sk_buff * , struct net_device * , unsigned short , void const * , void const * , unsigned int ) ; int (*parse)(struct sk_buff const * , unsigned char * ) ; int (*rebuild)(struct sk_buff * ) ; int (*cache)(struct neighbour const * , struct hh_cache * , __be16 ) ; void (*cache_update)(struct hh_cache * , struct net_device const * , unsigned char const * ) ; }; enum rx_handler_result { RX_HANDLER_CONSUMED = 0, RX_HANDLER_ANOTHER = 1, RX_HANDLER_EXACT = 2, RX_HANDLER_PASS = 3 } ; typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff ** ); struct Qdisc; struct netdev_queue { struct net_device *dev ; struct Qdisc *qdisc ; struct Qdisc *qdisc_sleeping ; struct kobject kobj ; int numa_node ; spinlock_t _xmit_lock ; int xmit_lock_owner ; unsigned long trans_start ; unsigned long trans_timeout ; unsigned long state ; struct dql dql ; }; struct rps_map { unsigned int len ; struct callback_head rcu ; u16 cpus[0U] ; }; struct rps_dev_flow { u16 cpu ; u16 filter ; unsigned int last_qtail ; }; struct rps_dev_flow_table { unsigned int mask ; struct callback_head rcu ; struct work_struct free_work ; struct rps_dev_flow flows[0U] ; }; struct netdev_rx_queue { struct rps_map *rps_map ; struct rps_dev_flow_table *rps_flow_table ; struct kobject kobj ; struct net_device *dev ; }; struct xps_map { unsigned int len ; unsigned int alloc_len ; struct callback_head rcu ; u16 queues[0U] ; }; struct xps_dev_maps { struct callback_head rcu ; struct xps_map *cpu_map[0U] ; }; struct netdev_tc_txq { u16 count ; u16 offset ; }; struct netdev_fcoe_hbainfo { char manufacturer[64U] ; char serial_number[64U] ; char hardware_version[64U] ; char driver_version[64U] ; char optionrom_version[64U] ; char firmware_version[64U] ; char model[256U] ; char model_description[256U] ; }; struct net_device_ops { int (*ndo_init)(struct net_device * ) ; void (*ndo_uninit)(struct net_device * ) ; int (*ndo_open)(struct net_device * ) ; int (*ndo_stop)(struct net_device * ) ; netdev_tx_t (*ndo_start_xmit)(struct sk_buff * , struct net_device * ) ; u16 (*ndo_select_queue)(struct net_device * , struct sk_buff * ) ; void (*ndo_change_rx_flags)(struct net_device * , int ) ; void (*ndo_set_rx_mode)(struct net_device * ) ; int (*ndo_set_mac_address)(struct net_device * , void * ) ; int (*ndo_validate_addr)(struct net_device * ) ; int (*ndo_do_ioctl)(struct net_device * , struct ifreq * , int ) ; int (*ndo_set_config)(struct net_device * , struct ifmap * ) ; int (*ndo_change_mtu)(struct net_device * , int ) ; int (*ndo_neigh_setup)(struct net_device * , struct neigh_parms * ) ; void (*ndo_tx_timeout)(struct net_device * ) ; struct rtnl_link_stats64 *(*ndo_get_stats64)(struct net_device * , struct rtnl_link_stats64 * ) ; struct net_device_stats *(*ndo_get_stats)(struct net_device * ) ; int (*ndo_vlan_rx_add_vid)(struct net_device * , unsigned short ) ; int (*ndo_vlan_rx_kill_vid)(struct net_device * , unsigned short ) ; void (*ndo_poll_controller)(struct net_device * ) ; int (*ndo_netpoll_setup)(struct net_device * , struct netpoll_info * , gfp_t ) ; void (*ndo_netpoll_cleanup)(struct net_device * ) ; int (*ndo_set_vf_mac)(struct net_device * , int , u8 * ) ; int (*ndo_set_vf_vlan)(struct net_device * , int , u16 , u8 ) ; int (*ndo_set_vf_tx_rate)(struct net_device * , int , int ) ; int (*ndo_set_vf_spoofchk)(struct net_device * , int , bool ) ; int (*ndo_get_vf_config)(struct net_device * , int , struct ifla_vf_info * ) ; int (*ndo_set_vf_port)(struct net_device * , int , struct nlattr ** ) ; int (*ndo_get_vf_port)(struct net_device * , int , struct sk_buff * ) ; int (*ndo_setup_tc)(struct net_device * , u8 ) ; int (*ndo_fcoe_enable)(struct net_device * ) ; int (*ndo_fcoe_disable)(struct net_device * ) ; int (*ndo_fcoe_ddp_setup)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_ddp_done)(struct net_device * , u16 ) ; int (*ndo_fcoe_ddp_target)(struct net_device * , u16 , struct scatterlist * , unsigned int ) ; int (*ndo_fcoe_get_hbainfo)(struct net_device * , struct netdev_fcoe_hbainfo * ) ; int (*ndo_fcoe_get_wwn)(struct net_device * , u64 * , int ) ; int (*ndo_rx_flow_steer)(struct net_device * , struct sk_buff const * , u16 , u32 ) ; int (*ndo_add_slave)(struct net_device * , struct net_device * ) ; int (*ndo_del_slave)(struct net_device * , struct net_device * ) ; netdev_features_t (*ndo_fix_features)(struct net_device * , netdev_features_t ) ; int (*ndo_set_features)(struct net_device * , netdev_features_t ) ; int (*ndo_neigh_construct)(struct neighbour * ) ; void (*ndo_neigh_destroy)(struct neighbour * ) ; int (*ndo_fdb_add)(struct ndmsg * , struct nlattr ** , struct net_device * , unsigned char const * , u16 ) ; int (*ndo_fdb_del)(struct ndmsg * , struct net_device * , unsigned char const * ) ; int (*ndo_fdb_dump)(struct sk_buff * , struct netlink_callback * , struct net_device * , int ) ; }; struct iw_handler_def; struct iw_public_data; struct vlan_info; struct in_device; struct dn_dev; struct inet6_dev; struct cpu_rmap; struct pcpu_lstats; struct pcpu_tstats; struct pcpu_dstats; union __anonunion_ldv_35542_214 { void *ml_priv ; struct pcpu_lstats *lstats ; struct pcpu_tstats *tstats ; struct pcpu_dstats *dstats ; }; struct garp_port; struct rtnl_link_ops; struct net_device { char name[16U] ; struct hlist_node name_hlist ; char *ifalias ; unsigned long mem_end ; unsigned long mem_start ; unsigned long base_addr ; unsigned int irq ; unsigned long state ; struct list_head dev_list ; struct list_head napi_list ; struct list_head unreg_list ; netdev_features_t features ; netdev_features_t hw_features ; netdev_features_t wanted_features ; netdev_features_t vlan_features ; int ifindex ; int iflink ; struct net_device_stats stats ; atomic_long_t rx_dropped ; struct iw_handler_def const *wireless_handlers ; struct iw_public_data *wireless_data ; struct net_device_ops const *netdev_ops ; struct ethtool_ops const *ethtool_ops ; struct header_ops const *header_ops ; unsigned int flags ; unsigned int priv_flags ; unsigned short gflags ; unsigned short padded ; unsigned char operstate ; unsigned char link_mode ; unsigned char if_port ; unsigned char dma ; unsigned int mtu ; unsigned short type ; unsigned short hard_header_len ; unsigned short needed_headroom ; unsigned short needed_tailroom ; unsigned char perm_addr[32U] ; unsigned char addr_assign_type ; unsigned char addr_len ; unsigned char neigh_priv_len ; unsigned short dev_id ; spinlock_t addr_list_lock ; struct netdev_hw_addr_list uc ; struct netdev_hw_addr_list mc ; bool uc_promisc ; unsigned int promiscuity ; unsigned int allmulti ; struct vlan_info *vlan_info ; struct dsa_switch_tree *dsa_ptr ; void *atalk_ptr ; struct in_device *ip_ptr ; struct dn_dev *dn_ptr ; struct inet6_dev *ip6_ptr ; void *ax25_ptr ; struct wireless_dev *ieee80211_ptr ; unsigned long last_rx ; struct net_device *master ; unsigned char *dev_addr ; struct netdev_hw_addr_list dev_addrs ; unsigned char broadcast[32U] ; struct kset *queues_kset ; struct netdev_rx_queue *_rx ; unsigned int num_rx_queues ; unsigned int real_num_rx_queues ; struct cpu_rmap *rx_cpu_rmap ; rx_handler_func_t *rx_handler ; void *rx_handler_data ; struct netdev_queue *ingress_queue ; struct netdev_queue *_tx ; unsigned int num_tx_queues ; unsigned int real_num_tx_queues ; struct Qdisc *qdisc ; unsigned long tx_queue_len ; spinlock_t tx_global_lock ; struct xps_dev_maps *xps_maps ; unsigned long trans_start ; int watchdog_timeo ; struct timer_list watchdog_timer ; int *pcpu_refcnt ; struct list_head todo_list ; struct hlist_node index_hlist ; struct list_head link_watch_list ; unsigned char reg_state ; bool dismantle ; unsigned short rtnl_link_state ; void (*destructor)(struct net_device * ) ; struct netpoll_info *npinfo ; struct net *nd_net ; union __anonunion_ldv_35542_214 ldv_35542 ; struct garp_port *garp_port ; struct device dev ; struct attribute_group const *sysfs_groups[4U] ; struct rtnl_link_ops const *rtnl_link_ops ; unsigned int gso_max_size ; u16 gso_max_segs ; struct dcbnl_rtnl_ops const *dcbnl_ops ; u8 num_tc ; struct netdev_tc_txq tc_to_txq[16U] ; u8 prio_tc_map[16U] ; unsigned int fcoe_ddp_xid ; struct netprio_map *priomap ; struct phy_device *phydev ; struct lock_class_key *qdisc_tx_busylock ; int group ; struct pm_qos_request pm_qos_req ; }; struct res_counter { unsigned long long usage ; unsigned long long max_usage ; unsigned long long limit ; unsigned long long soft_limit ; unsigned long long failcnt ; spinlock_t lock ; struct res_counter *parent ; }; struct sock_filter { __u16 code ; __u8 jt ; __u8 jf ; __u32 k ; }; struct sk_filter { atomic_t refcnt ; unsigned int len ; unsigned int (*bpf_func)(struct sk_buff const * , struct sock_filter const * ) ; struct callback_head rcu ; struct sock_filter insns[0U] ; }; struct pollfd { int fd ; short events ; short revents ; }; struct poll_table_struct { void (*_qproc)(struct file * , wait_queue_head_t * , struct poll_table_struct * ) ; unsigned long _key ; }; struct nla_policy { u16 type ; u16 len ; }; struct rtnl_link_ops { struct list_head list ; char const *kind ; size_t priv_size ; void (*setup)(struct net_device * ) ; int maxtype ; struct nla_policy const *policy ; int (*validate)(struct nlattr ** , struct nlattr ** ) ; int (*newlink)(struct net * , struct net_device * , struct nlattr ** , struct nlattr ** ) ; int (*changelink)(struct net_device * , struct nlattr ** , struct nlattr ** ) ; void (*dellink)(struct net_device * , struct list_head * ) ; size_t (*get_size)(struct net_device const * ) ; int (*fill_info)(struct sk_buff * , struct net_device const * ) ; size_t (*get_xstats_size)(struct net_device const * ) ; int (*fill_xstats)(struct sk_buff * , struct net_device const * ) ; unsigned int (*get_num_tx_queues)(void) ; unsigned int (*get_num_rx_queues)(void) ; }; struct neigh_table; struct neigh_parms { struct net *net ; struct net_device *dev ; struct neigh_parms *next ; int (*neigh_setup)(struct neighbour * ) ; void (*neigh_cleanup)(struct neighbour * ) ; struct neigh_table *tbl ; void *sysctl_table ; int dead ; atomic_t refcnt ; struct callback_head callback_head ; int base_reachable_time ; int retrans_time ; int gc_staletime ; int reachable_time ; int delay_probe_time ; int queue_len_bytes ; int ucast_probes ; int app_probes ; int mcast_probes ; int anycast_delay ; int proxy_delay ; int proxy_qlen ; int locktime ; }; struct neigh_statistics { unsigned long allocs ; unsigned long destroys ; unsigned long hash_grows ; unsigned long res_failed ; unsigned long lookups ; unsigned long hits ; unsigned long rcv_probes_mcast ; unsigned long rcv_probes_ucast ; unsigned long periodic_gc_runs ; unsigned long forced_gc_runs ; unsigned long unres_discards ; }; struct neigh_ops; struct neighbour { struct neighbour *next ; struct neigh_table *tbl ; struct neigh_parms *parms ; unsigned long confirmed ; unsigned long updated ; rwlock_t lock ; atomic_t refcnt ; struct sk_buff_head arp_queue ; unsigned int arp_queue_len_bytes ; struct timer_list timer ; unsigned long used ; atomic_t probes ; __u8 flags ; __u8 nud_state ; __u8 type ; __u8 dead ; seqlock_t ha_lock ; unsigned char ha[32U] ; struct hh_cache hh ; int (*output)(struct neighbour * , struct sk_buff * ) ; struct neigh_ops const *ops ; struct callback_head rcu ; struct net_device *dev ; u8 primary_key[0U] ; }; struct neigh_ops { int family ; void (*solicit)(struct neighbour * , struct sk_buff * ) ; void (*error_report)(struct neighbour * , struct sk_buff * ) ; int (*output)(struct neighbour * , struct sk_buff * ) ; int (*connected_output)(struct neighbour * , struct sk_buff * ) ; }; struct pneigh_entry { struct pneigh_entry *next ; struct net *net ; struct net_device *dev ; u8 flags ; u8 key[0U] ; }; struct neigh_hash_table { struct neighbour **hash_buckets ; unsigned int hash_shift ; __u32 hash_rnd[4U] ; struct callback_head rcu ; }; struct neigh_table { struct neigh_table *next ; int family ; int entry_size ; int key_len ; __u32 (*hash)(void const * , struct net_device const * , __u32 * ) ; int (*constructor)(struct neighbour * ) ; int (*pconstructor)(struct pneigh_entry * ) ; void (*pdestructor)(struct pneigh_entry * ) ; void (*proxy_redo)(struct sk_buff * ) ; char *id ; struct neigh_parms parms ; int gc_interval ; int gc_thresh1 ; int gc_thresh2 ; int gc_thresh3 ; unsigned long last_flush ; struct delayed_work gc_work ; struct timer_list proxy_timer ; struct sk_buff_head proxy_queue ; atomic_t entries ; rwlock_t lock ; unsigned long last_rand ; struct neigh_statistics *stats ; struct neigh_hash_table *nht ; struct pneigh_entry **phash_buckets ; }; union __anonunion_ldv_38387_219 { unsigned long expires ; struct dst_entry *from ; }; struct dn_route; union __anonunion_ldv_38412_220 { struct dst_entry *next ; struct rtable *rt_next ; struct rt6_info *rt6_next ; struct dn_route *dn_next ; }; struct dst_entry { struct callback_head callback_head ; struct dst_entry *child ; struct net_device *dev ; struct dst_ops *ops ; unsigned long _metrics ; union __anonunion_ldv_38387_219 ldv_38387 ; struct dst_entry *path ; void *__pad0 ; struct xfrm_state *xfrm ; int (*input)(struct sk_buff * ) ; int (*output)(struct sk_buff * ) ; unsigned short flags ; unsigned short pending_confirm ; short error ; short obsolete ; unsigned short header_len ; unsigned short trailer_len ; __u32 tclassid ; long __pad_to_align_refcnt[2U] ; atomic_t __refcnt ; int __use ; unsigned long lastuse ; union __anonunion_ldv_38412_220 ldv_38412 ; }; struct __anonstruct_socket_lock_t_221 { spinlock_t slock ; int owned ; wait_queue_head_t wq ; struct lockdep_map dep_map ; }; typedef struct __anonstruct_socket_lock_t_221 socket_lock_t; struct proto; union __anonunion_ldv_38627_222 { unsigned int skc_hash ; __u16 skc_u16hashes[2U] ; }; union __anonunion_ldv_38635_223 { struct hlist_node skc_bind_node ; struct hlist_nulls_node skc_portaddr_node ; }; union __anonunion_ldv_38642_224 { struct hlist_node skc_node ; struct hlist_nulls_node skc_nulls_node ; }; struct sock_common { __be32 skc_daddr ; __be32 skc_rcv_saddr ; union __anonunion_ldv_38627_222 ldv_38627 ; unsigned short skc_family ; unsigned char volatile skc_state ; unsigned char skc_reuse ; int skc_bound_dev_if ; union __anonunion_ldv_38635_223 ldv_38635 ; struct proto *skc_prot ; struct net *skc_net ; int skc_dontcopy_begin[0U] ; union __anonunion_ldv_38642_224 ldv_38642 ; int skc_tx_queue_mapping ; atomic_t skc_refcnt ; int skc_dontcopy_end[0U] ; }; struct cg_proto; struct __anonstruct_sk_backlog_225 { atomic_t rmem_alloc ; int len ; struct sk_buff *head ; struct sk_buff *tail ; }; struct sock { struct sock_common __sk_common ; socket_lock_t sk_lock ; struct sk_buff_head sk_receive_queue ; struct __anonstruct_sk_backlog_225 sk_backlog ; int sk_forward_alloc ; __u32 sk_rxhash ; atomic_t sk_drops ; int sk_rcvbuf ; struct sk_filter *sk_filter ; struct socket_wq *sk_wq ; struct sk_buff_head sk_async_wait_queue ; struct xfrm_policy *sk_policy[2U] ; unsigned long sk_flags ; struct dst_entry *sk_rx_dst ; struct dst_entry *sk_dst_cache ; spinlock_t sk_dst_lock ; atomic_t sk_wmem_alloc ; atomic_t sk_omem_alloc ; int sk_sndbuf ; struct sk_buff_head sk_write_queue ; unsigned char sk_shutdown : 2 ; unsigned char sk_no_check : 2 ; unsigned char sk_userlocks : 4 ; unsigned char sk_protocol ; unsigned short sk_type ; int sk_wmem_queued ; gfp_t sk_allocation ; netdev_features_t sk_route_caps ; netdev_features_t sk_route_nocaps ; int sk_gso_type ; unsigned int sk_gso_max_size ; u16 sk_gso_max_segs ; int sk_rcvlowat ; unsigned long sk_lingertime ; struct sk_buff_head sk_error_queue ; struct proto *sk_prot_creator ; rwlock_t sk_callback_lock ; int sk_err ; int sk_err_soft ; unsigned short sk_ack_backlog ; unsigned short sk_max_ack_backlog ; __u32 sk_priority ; __u32 sk_cgrp_prioidx ; struct pid *sk_peer_pid ; struct cred const *sk_peer_cred ; long sk_rcvtimeo ; long sk_sndtimeo ; void *sk_protinfo ; struct timer_list sk_timer ; ktime_t sk_stamp ; struct socket *sk_socket ; void *sk_user_data ; struct page_frag sk_frag ; struct sk_buff *sk_send_head ; __s32 sk_peek_off ; int sk_write_pending ; void *sk_security ; __u32 sk_mark ; u32 sk_classid ; struct cg_proto *sk_cgrp ; void (*sk_state_change)(struct sock * ) ; void (*sk_data_ready)(struct sock * , int ) ; void (*sk_write_space)(struct sock * ) ; void (*sk_error_report)(struct sock * ) ; int (*sk_backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*sk_destruct)(struct sock * ) ; }; struct request_sock_ops; struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; struct udp_table; union __anonunion_h_226 { struct inet_hashinfo *hashinfo ; struct udp_table *udp_table ; struct raw_hashinfo *raw_hash ; }; struct proto { void (*close)(struct sock * , long ) ; int (*connect)(struct sock * , struct sockaddr * , int ) ; int (*disconnect)(struct sock * , int ) ; struct sock *(*accept)(struct sock * , int , int * ) ; int (*ioctl)(struct sock * , int , unsigned long ) ; int (*init)(struct sock * ) ; void (*destroy)(struct sock * ) ; void (*shutdown)(struct sock * , int ) ; int (*setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_setsockopt)(struct sock * , int , int , char * , unsigned int ) ; int (*compat_getsockopt)(struct sock * , int , int , char * , int * ) ; int (*compat_ioctl)(struct sock * , unsigned int , unsigned long ) ; int (*sendmsg)(struct kiocb * , struct sock * , struct msghdr * , size_t ) ; int (*recvmsg)(struct kiocb * , struct sock * , struct msghdr * , size_t , int , int , int * ) ; int (*sendpage)(struct sock * , struct page * , int , size_t , int ) ; int (*bind)(struct sock * , struct sockaddr * , int ) ; int (*backlog_rcv)(struct sock * , struct sk_buff * ) ; void (*release_cb)(struct sock * ) ; void (*mtu_reduced)(struct sock * ) ; void (*hash)(struct sock * ) ; void (*unhash)(struct sock * ) ; void (*rehash)(struct sock * ) ; int (*get_port)(struct sock * , unsigned short ) ; void (*clear_sk)(struct sock * , int ) ; unsigned int inuse_idx ; void (*enter_memory_pressure)(struct sock * ) ; atomic_long_t *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; int *sysctl_wmem ; int *sysctl_rmem ; int max_header ; bool no_autobind ; struct kmem_cache *slab ; unsigned int obj_size ; int slab_flags ; struct percpu_counter *orphan_count ; struct request_sock_ops *rsk_prot ; struct timewait_sock_ops *twsk_prot ; union __anonunion_h_226 h ; struct module *owner ; char name[32U] ; struct list_head node ; int (*init_cgroup)(struct mem_cgroup * , struct cgroup_subsys * ) ; void (*destroy_cgroup)(struct mem_cgroup * ) ; struct cg_proto *(*proto_cgroup)(struct mem_cgroup * ) ; }; struct cg_proto { void (*enter_memory_pressure)(struct sock * ) ; struct res_counter *memory_allocated ; struct percpu_counter *sockets_allocated ; int *memory_pressure ; long *sysctl_mem ; unsigned long flags ; struct mem_cgroup *memcg ; }; enum drbd_io_error_p { EP_PASS_ON = 0, EP_CALL_HELPER = 1, EP_DETACH = 2 } ; enum drbd_conns { C_STANDALONE = 0, C_DISCONNECTING = 1, C_UNCONNECTED = 2, C_TIMEOUT = 3, C_BROKEN_PIPE = 4, C_NETWORK_FAILURE = 5, C_PROTOCOL_ERROR = 6, C_TEAR_DOWN = 7, C_WF_CONNECTION = 8, C_WF_REPORT_PARAMS = 9, C_CONNECTED = 10, C_STARTING_SYNC_S = 11, C_STARTING_SYNC_T = 12, C_WF_BITMAP_S = 13, C_WF_BITMAP_T = 14, C_WF_SYNC_UUID = 15, C_SYNC_SOURCE = 16, C_SYNC_TARGET = 17, C_VERIFY_S = 18, C_VERIFY_T = 19, C_PAUSED_SYNC_S = 20, C_PAUSED_SYNC_T = 21, C_AHEAD = 22, C_BEHIND = 23, C_MASK = 31 } ; enum drbd_disk_state { D_DISKLESS = 0, D_ATTACHING = 1, D_FAILED = 2, D_NEGOTIATING = 3, D_INCONSISTENT = 4, D_OUTDATED = 5, D_UNKNOWN = 6, D_CONSISTENT = 7, D_UP_TO_DATE = 8, D_MASK = 15 } ; struct __anonstruct_ldv_40024_227 { unsigned char role : 2 ; unsigned char peer : 2 ; unsigned char conn : 5 ; unsigned char disk : 4 ; unsigned char pdsk : 4 ; unsigned char susp : 1 ; unsigned char aftr_isp : 1 ; unsigned char peer_isp : 1 ; unsigned char user_isp : 1 ; unsigned char susp_nod : 1 ; unsigned char susp_fen : 1 ; unsigned short _pad : 9 ; }; union drbd_state { struct __anonstruct_ldv_40024_227 ldv_40024 ; unsigned int i ; }; enum drbd_state_rv { SS_CW_NO_NEED = 4, SS_CW_SUCCESS = 3, SS_NOTHING_TO_DO = 2, SS_SUCCESS = 1, SS_UNKNOWN_ERROR = 0, SS_TWO_PRIMARIES = -1, SS_NO_UP_TO_DATE_DISK = -2, SS_NO_LOCAL_DISK = -4, SS_NO_REMOTE_DISK = -5, SS_CONNECTED_OUTDATES = -6, SS_PRIMARY_NOP = -7, SS_RESYNC_RUNNING = -8, SS_ALREADY_STANDALONE = -9, SS_CW_FAILED_BY_PEER = -10, SS_IS_DISKLESS = -11, SS_DEVICE_IN_USE = -12, SS_NO_NET_CONFIG = -13, SS_NO_VERIFY_ALG = -14, SS_NEED_CONNECTION = -15, SS_LOWER_THAN_OUTDATED = -16, SS_NOT_SUPPORTED = -17, SS_IN_TRANSIENT_STATE = -18, SS_CONCURRENT_ST_CHG = -19, SS_O_VOL_PEER_PRI = -20, SS_AFTER_LAST_ERROR = -21 } ; struct crypto_ablkcipher; struct crypto_async_request; struct crypto_aead; struct crypto_blkcipher; struct crypto_hash; struct crypto_rng; struct crypto_tfm; struct crypto_type; struct aead_givcrypt_request; struct skcipher_givcrypt_request; struct crypto_async_request { struct list_head list ; void (*complete)(struct crypto_async_request * , int ) ; void *data ; struct crypto_tfm *tfm ; u32 flags ; }; struct ablkcipher_request { struct crypto_async_request base ; unsigned int nbytes ; void *info ; struct scatterlist *src ; struct scatterlist *dst ; void *__ctx[] ; }; struct aead_request { struct crypto_async_request base ; unsigned int assoclen ; unsigned int cryptlen ; u8 *iv ; struct scatterlist *assoc ; struct scatterlist *src ; struct scatterlist *dst ; void *__ctx[] ; }; struct blkcipher_desc { struct crypto_blkcipher *tfm ; void *info ; u32 flags ; }; struct hash_desc { struct crypto_hash *tfm ; u32 flags ; }; struct ablkcipher_alg { int (*setkey)(struct crypto_ablkcipher * , u8 const * , unsigned int ) ; int (*encrypt)(struct ablkcipher_request * ) ; int (*decrypt)(struct ablkcipher_request * ) ; int (*givencrypt)(struct skcipher_givcrypt_request * ) ; int (*givdecrypt)(struct skcipher_givcrypt_request * ) ; char const *geniv ; unsigned int min_keysize ; unsigned int max_keysize ; unsigned int ivsize ; }; struct aead_alg { int (*setkey)(struct crypto_aead * , u8 const * , unsigned int ) ; int (*setauthsize)(struct crypto_aead * , unsigned int ) ; int (*encrypt)(struct aead_request * ) ; int (*decrypt)(struct aead_request * ) ; int (*givencrypt)(struct aead_givcrypt_request * ) ; int (*givdecrypt)(struct aead_givcrypt_request * ) ; char const *geniv ; unsigned int ivsize ; unsigned int maxauthsize ; }; struct blkcipher_alg { int (*setkey)(struct crypto_tfm * , u8 const * , unsigned int ) ; int (*encrypt)(struct blkcipher_desc * , struct scatterlist * , struct scatterlist * , unsigned int ) ; int (*decrypt)(struct blkcipher_desc * , struct scatterlist * , struct scatterlist * , unsigned int ) ; char const *geniv ; unsigned int min_keysize ; unsigned int max_keysize ; unsigned int ivsize ; }; struct cipher_alg { unsigned int cia_min_keysize ; unsigned int cia_max_keysize ; int (*cia_setkey)(struct crypto_tfm * , u8 const * , unsigned int ) ; void (*cia_encrypt)(struct crypto_tfm * , u8 * , u8 const * ) ; void (*cia_decrypt)(struct crypto_tfm * , u8 * , u8 const * ) ; }; struct compress_alg { int (*coa_compress)(struct crypto_tfm * , u8 const * , unsigned int , u8 * , unsigned int * ) ; int (*coa_decompress)(struct crypto_tfm * , u8 const * , unsigned int , u8 * , unsigned int * ) ; }; struct rng_alg { int (*rng_make_random)(struct crypto_rng * , u8 * , unsigned int ) ; int (*rng_reset)(struct crypto_rng * , u8 * , unsigned int ) ; unsigned int seedsize ; }; union __anonunion_cra_u_228 { struct ablkcipher_alg ablkcipher ; struct aead_alg aead ; struct blkcipher_alg blkcipher ; struct cipher_alg cipher ; struct compress_alg compress ; struct rng_alg rng ; }; struct crypto_alg { struct list_head cra_list ; struct list_head cra_users ; u32 cra_flags ; unsigned int cra_blocksize ; unsigned int cra_ctxsize ; unsigned int cra_alignmask ; int cra_priority ; atomic_t cra_refcnt ; char cra_name[64U] ; char cra_driver_name[64U] ; struct crypto_type const *cra_type ; union __anonunion_cra_u_228 cra_u ; int (*cra_init)(struct crypto_tfm * ) ; void (*cra_exit)(struct crypto_tfm * ) ; void (*cra_destroy)(struct crypto_alg * ) ; struct module *cra_module ; }; struct ablkcipher_tfm { int (*setkey)(struct crypto_ablkcipher * , u8 const * , unsigned int ) ; int (*encrypt)(struct ablkcipher_request * ) ; int (*decrypt)(struct ablkcipher_request * ) ; int (*givencrypt)(struct skcipher_givcrypt_request * ) ; int (*givdecrypt)(struct skcipher_givcrypt_request * ) ; struct crypto_ablkcipher *base ; unsigned int ivsize ; unsigned int reqsize ; }; struct aead_tfm { int (*setkey)(struct crypto_aead * , u8 const * , unsigned int ) ; int (*encrypt)(struct aead_request * ) ; int (*decrypt)(struct aead_request * ) ; int (*givencrypt)(struct aead_givcrypt_request * ) ; int (*givdecrypt)(struct aead_givcrypt_request * ) ; struct crypto_aead *base ; unsigned int ivsize ; unsigned int authsize ; unsigned int reqsize ; }; struct blkcipher_tfm { void *iv ; int (*setkey)(struct crypto_tfm * , u8 const * , unsigned int ) ; int (*encrypt)(struct blkcipher_desc * , struct scatterlist * , struct scatterlist * , unsigned int ) ; int (*decrypt)(struct blkcipher_desc * , struct scatterlist * , struct scatterlist * , unsigned int ) ; }; struct cipher_tfm { int (*cit_setkey)(struct crypto_tfm * , u8 const * , unsigned int ) ; void (*cit_encrypt_one)(struct crypto_tfm * , u8 * , u8 const * ) ; void (*cit_decrypt_one)(struct crypto_tfm * , u8 * , u8 const * ) ; }; struct hash_tfm { int (*init)(struct hash_desc * ) ; int (*update)(struct hash_desc * , struct scatterlist * , unsigned int ) ; int (*final)(struct hash_desc * , u8 * ) ; int (*digest)(struct hash_desc * , struct scatterlist * , unsigned int , u8 * ) ; int (*setkey)(struct crypto_hash * , u8 const * , unsigned int ) ; unsigned int digestsize ; }; struct compress_tfm { int (*cot_compress)(struct crypto_tfm * , u8 const * , unsigned int , u8 * , unsigned int * ) ; int (*cot_decompress)(struct crypto_tfm * , u8 const * , unsigned int , u8 * , unsigned int * ) ; }; struct rng_tfm { int (*rng_gen_random)(struct crypto_rng * , u8 * , unsigned int ) ; int (*rng_reset)(struct crypto_rng * , u8 * , unsigned int ) ; }; union __anonunion_crt_u_229 { struct ablkcipher_tfm ablkcipher ; struct aead_tfm aead ; struct blkcipher_tfm blkcipher ; struct cipher_tfm cipher ; struct hash_tfm hash ; struct compress_tfm compress ; struct rng_tfm rng ; }; struct crypto_tfm { u32 crt_flags ; union __anonunion_crt_u_229 crt_u ; void (*exit)(struct crypto_tfm * ) ; struct crypto_alg *__crt_alg ; void *__crt_ctx[] ; }; struct crypto_ablkcipher { struct crypto_tfm base ; }; struct crypto_aead { struct crypto_tfm base ; }; struct crypto_blkcipher { struct crypto_tfm base ; }; struct crypto_hash { struct crypto_tfm base ; }; struct crypto_rng { struct crypto_tfm base ; }; struct request_values { }; struct request_sock_ops { int family ; int obj_size ; struct kmem_cache *slab ; char *slab_name ; int (*rtx_syn_ack)(struct sock * , struct request_sock * , struct request_values * ) ; void (*send_ack)(struct sock * , struct sk_buff * , struct request_sock * ) ; void (*send_reset)(struct sock * , struct sk_buff * ) ; void (*destructor)(struct request_sock * ) ; void (*syn_ack_timeout)(struct sock * , struct request_sock * ) ; }; struct request_sock { struct request_sock *dl_next ; u16 mss ; u8 retrans ; u8 cookie_ts ; u32 window_clamp ; u32 rcv_wnd ; u32 ts_recent ; unsigned long expires ; struct request_sock_ops const *rsk_ops ; struct sock *sk ; u32 secid ; u32 peer_secid ; }; struct timewait_sock_ops { struct kmem_cache *twsk_slab ; char *twsk_slab_name ; unsigned int twsk_obj_size ; int (*twsk_unique)(struct sock * , struct sock * , void * ) ; void (*twsk_destructor)(struct sock * ) ; }; struct disk_stats { unsigned long sectors[2U] ; unsigned long ios[2U] ; unsigned long merges[2U] ; unsigned long ticks[2U] ; unsigned long io_ticks ; unsigned long time_in_queue ; }; struct partition_meta_info { u8 uuid[16U] ; u8 volname[64U] ; }; struct hd_struct { sector_t start_sect ; sector_t nr_sects ; seqcount_t nr_sects_seq ; sector_t alignment_offset ; unsigned int discard_alignment ; struct device __dev ; struct kobject *holder_dir ; int policy ; int partno ; struct partition_meta_info *info ; int make_it_fail ; unsigned long stamp ; atomic_t in_flight[2U] ; struct disk_stats *dkstats ; atomic_t ref ; struct callback_head callback_head ; }; struct disk_part_tbl { struct callback_head callback_head ; int len ; struct hd_struct *last_lookup ; struct hd_struct *part[] ; }; struct disk_events; struct timer_rand_state; struct blk_integrity; struct gendisk { int major ; int first_minor ; int minors ; char disk_name[32U] ; char *(*devnode)(struct gendisk * , umode_t * ) ; unsigned int events ; unsigned int async_events ; struct disk_part_tbl *part_tbl ; struct hd_struct part0 ; struct block_device_operations const *fops ; struct request_queue *queue ; void *private_data ; int flags ; struct device *driverfs_dev ; struct kobject *slave_dir ; struct timer_rand_state *random ; atomic_t sync_io ; struct disk_events *ev ; struct blk_integrity *integrity ; int node_id ; }; struct fprop_local_percpu { struct percpu_counter events ; unsigned int period ; raw_spinlock_t lock ; }; enum writeback_sync_modes { WB_SYNC_NONE = 0, WB_SYNC_ALL = 1 } ; struct writeback_control { long nr_to_write ; long pages_skipped ; loff_t range_start ; loff_t range_end ; enum writeback_sync_modes sync_mode ; unsigned char for_kupdate : 1 ; unsigned char for_background : 1 ; unsigned char tagged_writepages : 1 ; unsigned char for_reclaim : 1 ; unsigned char range_cyclic : 1 ; }; struct bdi_writeback; typedef int congested_fn(void * , int ); struct bdi_writeback { struct backing_dev_info *bdi ; unsigned int nr ; unsigned long last_old_flush ; unsigned long last_active ; struct task_struct *task ; struct timer_list wakeup_timer ; struct list_head b_dirty ; struct list_head b_io ; struct list_head b_more_io ; spinlock_t list_lock ; }; struct backing_dev_info { struct list_head bdi_list ; unsigned long ra_pages ; unsigned long state ; unsigned int capabilities ; congested_fn *congested_fn ; void *congested_data ; char *name ; struct percpu_counter bdi_stat[4U] ; unsigned long bw_time_stamp ; unsigned long dirtied_stamp ; unsigned long written_stamp ; unsigned long write_bandwidth ; unsigned long avg_write_bandwidth ; unsigned long dirty_ratelimit ; unsigned long balanced_dirty_ratelimit ; struct fprop_local_percpu completions ; int dirty_exceeded ; unsigned int min_ratio ; unsigned int max_ratio ; unsigned int max_prop_frac ; struct bdi_writeback wb ; spinlock_t wb_lock ; struct list_head work_list ; struct device *dev ; struct timer_list laptop_mode_wb_timer ; struct dentry *debug_dir ; struct dentry *debug_stats ; }; typedef void *mempool_alloc_t(gfp_t , void * ); typedef void mempool_free_t(void * , void * ); struct mempool_s { spinlock_t lock ; int min_nr ; int curr_nr ; void **elements ; void *pool_data ; mempool_alloc_t *alloc ; mempool_free_t *free ; wait_queue_head_t wait ; }; typedef struct mempool_s mempool_t; union __anonunion_ldv_43454_236 { struct list_head q_node ; struct kmem_cache *__rcu_icq_cache ; }; union __anonunion_ldv_43458_237 { struct hlist_node ioc_node ; struct callback_head __rcu_head ; }; struct io_cq { struct request_queue *q ; struct io_context *ioc ; union __anonunion_ldv_43454_236 ldv_43454 ; union __anonunion_ldv_43458_237 ldv_43458 ; unsigned int flags ; }; struct io_context { atomic_long_t refcount ; atomic_t active_ref ; atomic_t nr_tasks ; spinlock_t lock ; unsigned short ioprio ; int nr_batch_requests ; unsigned long last_waited ; struct radix_tree_root icq_tree ; struct io_cq *icq_hint ; struct hlist_head icq_list ; struct work_struct release_work ; }; struct bio_integrity_payload { struct bio *bip_bio ; sector_t bip_sector ; void *bip_buf ; bio_end_io_t *bip_end_io ; unsigned int bip_size ; unsigned short bip_slab ; unsigned short bip_vcnt ; unsigned short bip_idx ; struct work_struct bip_work ; struct bio_vec bip_vec[0U] ; }; struct bio_set { struct kmem_cache *bio_slab ; unsigned int front_pad ; mempool_t *bio_pool ; mempool_t *bio_integrity_pool ; mempool_t *bvec_pool ; }; struct bio_list { struct bio *head ; struct bio *tail ; }; struct bsg_class_device { struct device *class_dev ; struct device *parent ; int minor ; struct request_queue *queue ; struct kref ref ; void (*release)(struct device * ) ; }; struct elevator_queue; struct request; struct bsg_job; struct blkcg_gq; typedef void rq_end_io_fn(struct request * , int ); struct request_list { struct request_queue *q ; struct blkcg_gq *blkg ; int count[2U] ; int starved[2U] ; mempool_t *rq_pool ; wait_queue_head_t wait[2U] ; unsigned int flags ; }; enum rq_cmd_type_bits { REQ_TYPE_FS = 1, REQ_TYPE_BLOCK_PC = 2, REQ_TYPE_SENSE = 3, REQ_TYPE_PM_SUSPEND = 4, REQ_TYPE_PM_RESUME = 5, REQ_TYPE_PM_SHUTDOWN = 6, REQ_TYPE_SPECIAL = 7, REQ_TYPE_ATA_TASKFILE = 8, REQ_TYPE_ATA_PC = 9 } ; union __anonunion_ldv_43905_238 { struct rb_node rb_node ; void *completion_data ; }; struct __anonstruct_elv_240 { struct io_cq *icq ; void *priv[2U] ; }; struct __anonstruct_flush_241 { unsigned int seq ; struct list_head list ; rq_end_io_fn *saved_end_io ; }; union __anonunion_ldv_43916_239 { struct __anonstruct_elv_240 elv ; struct __anonstruct_flush_241 flush ; }; struct request { struct list_head queuelist ; struct call_single_data csd ; struct request_queue *q ; unsigned int cmd_flags ; enum rq_cmd_type_bits cmd_type ; unsigned long atomic_flags ; int cpu ; unsigned int __data_len ; sector_t __sector ; struct bio *bio ; struct bio *biotail ; struct hlist_node hash ; union __anonunion_ldv_43905_238 ldv_43905 ; union __anonunion_ldv_43916_239 ldv_43916 ; struct gendisk *rq_disk ; struct hd_struct *part ; unsigned long start_time ; struct request_list *rl ; unsigned long long start_time_ns ; unsigned long long io_start_time_ns ; unsigned short nr_phys_segments ; unsigned short nr_integrity_segments ; unsigned short ioprio ; int ref_count ; void *special ; char *buffer ; int tag ; int errors ; unsigned char __cmd[16U] ; unsigned char *cmd ; unsigned short cmd_len ; unsigned int extra_len ; unsigned int sense_len ; unsigned int resid_len ; void *sense ; unsigned long deadline ; struct list_head timeout_list ; unsigned int timeout ; int retries ; rq_end_io_fn *end_io ; void *end_io_data ; struct request *next_rq ; }; typedef int elevator_merge_fn(struct request_queue * , struct request ** , struct bio * ); typedef void elevator_merge_req_fn(struct request_queue * , struct request * , struct request * ); typedef void elevator_merged_fn(struct request_queue * , struct request * , int ); typedef int elevator_allow_merge_fn(struct request_queue * , struct request * , struct bio * ); typedef void elevator_bio_merged_fn(struct request_queue * , struct request * , struct bio * ); typedef int elevator_dispatch_fn(struct request_queue * , int ); typedef void elevator_add_req_fn(struct request_queue * , struct request * ); typedef struct request *elevator_request_list_fn(struct request_queue * , struct request * ); typedef void elevator_completed_req_fn(struct request_queue * , struct request * ); typedef int elevator_may_queue_fn(struct request_queue * , int ); typedef void elevator_init_icq_fn(struct io_cq * ); typedef void elevator_exit_icq_fn(struct io_cq * ); typedef int elevator_set_req_fn(struct request_queue * , struct request * , struct bio * , gfp_t ); typedef void elevator_put_req_fn(struct request * ); typedef void elevator_activate_req_fn(struct request_queue * , struct request * ); typedef void elevator_deactivate_req_fn(struct request_queue * , struct request * ); typedef int elevator_init_fn(struct request_queue * ); typedef void elevator_exit_fn(struct elevator_queue * ); struct elevator_ops { elevator_merge_fn *elevator_merge_fn ; elevator_merged_fn *elevator_merged_fn ; elevator_merge_req_fn *elevator_merge_req_fn ; elevator_allow_merge_fn *elevator_allow_merge_fn ; elevator_bio_merged_fn *elevator_bio_merged_fn ; elevator_dispatch_fn *elevator_dispatch_fn ; elevator_add_req_fn *elevator_add_req_fn ; elevator_activate_req_fn *elevator_activate_req_fn ; elevator_deactivate_req_fn *elevator_deactivate_req_fn ; elevator_completed_req_fn *elevator_completed_req_fn ; elevator_request_list_fn *elevator_former_req_fn ; elevator_request_list_fn *elevator_latter_req_fn ; elevator_init_icq_fn *elevator_init_icq_fn ; elevator_exit_icq_fn *elevator_exit_icq_fn ; elevator_set_req_fn *elevator_set_req_fn ; elevator_put_req_fn *elevator_put_req_fn ; elevator_may_queue_fn *elevator_may_queue_fn ; elevator_init_fn *elevator_init_fn ; elevator_exit_fn *elevator_exit_fn ; }; struct elv_fs_entry { struct attribute attr ; ssize_t (*show)(struct elevator_queue * , char * ) ; ssize_t (*store)(struct elevator_queue * , char const * , size_t ) ; }; struct elevator_type { struct kmem_cache *icq_cache ; struct elevator_ops ops ; size_t icq_size ; size_t icq_align ; struct elv_fs_entry *elevator_attrs ; char elevator_name[16U] ; struct module *elevator_owner ; char icq_cache_name[21U] ; struct list_head list ; }; struct elevator_queue { struct elevator_type *type ; void *elevator_data ; struct kobject kobj ; struct mutex sysfs_lock ; struct hlist_head *hash ; unsigned char registered : 1 ; }; typedef void request_fn_proc(struct request_queue * ); typedef void make_request_fn(struct request_queue * , struct bio * ); typedef int prep_rq_fn(struct request_queue * , struct request * ); typedef void unprep_rq_fn(struct request_queue * , struct request * ); struct bvec_merge_data { struct block_device *bi_bdev ; sector_t bi_sector ; unsigned int bi_size ; unsigned long bi_rw ; }; typedef int merge_bvec_fn(struct request_queue * , struct bvec_merge_data * , struct bio_vec * ); typedef void softirq_done_fn(struct request * ); typedef int dma_drain_needed_fn(struct request * ); typedef int lld_busy_fn(struct request_queue * ); typedef int bsg_job_fn(struct bsg_job * ); enum blk_eh_timer_return { BLK_EH_NOT_HANDLED = 0, BLK_EH_HANDLED = 1, BLK_EH_RESET_TIMER = 2 } ; typedef enum blk_eh_timer_return rq_timed_out_fn(struct request * ); struct blk_queue_tag { struct request **tag_index ; unsigned long *tag_map ; int busy ; int max_depth ; int real_max_depth ; atomic_t refcnt ; }; struct queue_limits { unsigned long bounce_pfn ; unsigned long seg_boundary_mask ; unsigned int max_hw_sectors ; unsigned int max_sectors ; unsigned int max_segment_size ; unsigned int physical_block_size ; unsigned int alignment_offset ; unsigned int io_min ; unsigned int io_opt ; unsigned int max_discard_sectors ; unsigned int max_write_same_sectors ; unsigned int discard_granularity ; unsigned int discard_alignment ; unsigned short logical_block_size ; unsigned short max_segments ; unsigned short max_integrity_segments ; unsigned char misaligned ; unsigned char discard_misaligned ; unsigned char cluster ; unsigned char discard_zeroes_data ; }; struct throtl_data; struct request_queue { struct list_head queue_head ; struct request *last_merge ; struct elevator_queue *elevator ; int nr_rqs[2U] ; int nr_rqs_elvpriv ; struct request_list root_rl ; request_fn_proc *request_fn ; make_request_fn *make_request_fn ; prep_rq_fn *prep_rq_fn ; unprep_rq_fn *unprep_rq_fn ; merge_bvec_fn *merge_bvec_fn ; softirq_done_fn *softirq_done_fn ; rq_timed_out_fn *rq_timed_out_fn ; dma_drain_needed_fn *dma_drain_needed ; lld_busy_fn *lld_busy_fn ; sector_t end_sector ; struct request *boundary_rq ; struct delayed_work delay_work ; struct backing_dev_info backing_dev_info ; void *queuedata ; unsigned long queue_flags ; int id ; gfp_t bounce_gfp ; spinlock_t __queue_lock ; spinlock_t *queue_lock ; struct kobject kobj ; unsigned long nr_requests ; unsigned int nr_congestion_on ; unsigned int nr_congestion_off ; unsigned int nr_batching ; unsigned int dma_drain_size ; void *dma_drain_buffer ; unsigned int dma_pad_mask ; unsigned int dma_alignment ; struct blk_queue_tag *queue_tags ; struct list_head tag_busy_list ; unsigned int nr_sorted ; unsigned int in_flight[2U] ; unsigned int rq_timeout ; struct timer_list timeout ; struct list_head timeout_list ; struct list_head icq_list ; unsigned long blkcg_pols[1U] ; struct blkcg_gq *root_blkg ; struct list_head blkg_list ; struct queue_limits limits ; unsigned int sg_timeout ; unsigned int sg_reserved_size ; int node ; unsigned int flush_flags ; unsigned char flush_not_queueable : 1 ; unsigned char flush_queue_delayed : 1 ; unsigned char flush_pending_idx : 1 ; unsigned char flush_running_idx : 1 ; unsigned long flush_pending_since ; struct list_head flush_queue[2U] ; struct list_head flush_data_in_flight ; struct request flush_rq ; struct mutex sysfs_lock ; int bypass_depth ; bsg_job_fn *bsg_job_fn ; int bsg_job_size ; struct bsg_class_device bsg_dev ; struct list_head all_q_node ; struct throtl_data *td ; }; struct blk_plug { unsigned long magic ; struct list_head list ; struct list_head cb_list ; unsigned int should_sort ; }; struct blk_integrity_exchg { void *prot_buf ; void *data_buf ; sector_t sector ; unsigned int data_size ; unsigned short sector_size ; char const *disk_name ; }; typedef void integrity_gen_fn(struct blk_integrity_exchg * ); typedef int integrity_vrfy_fn(struct blk_integrity_exchg * ); typedef void integrity_set_tag_fn(void * , void * , unsigned int ); typedef void integrity_get_tag_fn(void * , void * , unsigned int ); struct blk_integrity { integrity_gen_fn *generate_fn ; integrity_vrfy_fn *verify_fn ; integrity_set_tag_fn *set_tag_fn ; integrity_get_tag_fn *get_tag_fn ; unsigned short flags ; unsigned short tuple_size ; unsigned short sector_size ; unsigned short tag_size ; char const *name ; struct kobject kobj ; }; struct block_device_operations { int (*open)(struct block_device * , fmode_t ) ; int (*release)(struct gendisk * , fmode_t ) ; int (*ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*compat_ioctl)(struct block_device * , fmode_t , unsigned int , unsigned long ) ; int (*direct_access)(struct block_device * , sector_t , void ** , unsigned long * ) ; unsigned int (*check_events)(struct gendisk * , unsigned int ) ; int (*media_changed)(struct gendisk * ) ; void (*unlock_native_capacity)(struct gendisk * ) ; int (*revalidate_disk)(struct gendisk * ) ; int (*getgeo)(struct block_device * , struct hd_geometry * ) ; void (*swap_slot_free_notify)(struct block_device * , unsigned long ) ; struct module *owner ; }; struct ipv6_devconf { __s32 forwarding ; __s32 hop_limit ; __s32 mtu6 ; __s32 accept_ra ; __s32 accept_redirects ; __s32 autoconf ; __s32 dad_transmits ; __s32 rtr_solicits ; __s32 rtr_solicit_interval ; __s32 rtr_solicit_delay ; __s32 force_mld_version ; __s32 use_tempaddr ; __s32 temp_valid_lft ; __s32 temp_prefered_lft ; __s32 regen_max_retry ; __s32 max_desync_factor ; __s32 max_addresses ; __s32 accept_ra_defrtr ; __s32 accept_ra_pinfo ; __s32 accept_ra_rtr_pref ; __s32 rtr_probe_interval ; __s32 accept_ra_rt_info_max_plen ; __s32 proxy_ndp ; __s32 accept_source_route ; __s32 optimistic_dad ; __s32 mc_forwarding ; __s32 disable_ipv6 ; __s32 accept_dad ; __s32 force_tllao ; void *sysctl ; }; struct ip6_sf_list { struct ip6_sf_list *sf_next ; struct in6_addr sf_addr ; unsigned long sf_count[2U] ; unsigned char sf_gsresp ; unsigned char sf_oldin ; unsigned char sf_crcount ; }; struct ifmcaddr6 { struct in6_addr mca_addr ; struct inet6_dev *idev ; struct ifmcaddr6 *next ; struct ip6_sf_list *mca_sources ; struct ip6_sf_list *mca_tomb ; unsigned int mca_sfmode ; unsigned char mca_crcount ; unsigned long mca_sfcount[2U] ; struct timer_list mca_timer ; unsigned int mca_flags ; int mca_users ; atomic_t mca_refcnt ; spinlock_t mca_lock ; unsigned long mca_cstamp ; unsigned long mca_tstamp ; }; struct ifacaddr6 { struct in6_addr aca_addr ; struct inet6_dev *aca_idev ; struct rt6_info *aca_rt ; struct ifacaddr6 *aca_next ; int aca_users ; atomic_t aca_refcnt ; spinlock_t aca_lock ; unsigned long aca_cstamp ; unsigned long aca_tstamp ; }; struct ipv6_devstat { struct proc_dir_entry *proc_dir_entry ; struct ipstats_mib *ipv6[1U] ; struct icmpv6_mib_device *icmpv6dev ; struct icmpv6msg_mib_device *icmpv6msgdev ; }; struct inet6_dev { struct net_device *dev ; struct list_head addr_list ; struct ifmcaddr6 *mc_list ; struct ifmcaddr6 *mc_tomb ; spinlock_t mc_lock ; unsigned char mc_qrv ; unsigned char mc_gq_running ; unsigned char mc_ifc_count ; unsigned long mc_v1_seen ; unsigned long mc_maxdelay ; struct timer_list mc_gq_timer ; struct timer_list mc_ifc_timer ; struct ifacaddr6 *ac_list ; rwlock_t lock ; atomic_t refcnt ; __u32 if_flags ; int dead ; u8 rndid[8U] ; struct timer_list regen_timer ; struct list_head tempaddr_list ; struct neigh_parms *nd_parms ; struct inet6_dev *next ; struct ipv6_devconf cnf ; struct ipv6_devstat stats ; unsigned long tstamp ; struct callback_head rcu ; }; union __anonunion_ldv_46571_248 { __be32 a4 ; __be32 a6[4U] ; }; struct inetpeer_addr_base { union __anonunion_ldv_46571_248 ldv_46571 ; }; struct inetpeer_addr { struct inetpeer_addr_base addr ; __u16 family ; }; union __anonunion_ldv_46586_249 { struct list_head gc_list ; struct callback_head gc_rcu ; }; struct __anonstruct_ldv_46591_251 { atomic_t rid ; atomic_t ip_id_count ; }; union __anonunion_ldv_46594_250 { struct __anonstruct_ldv_46591_251 ldv_46591 ; struct callback_head rcu ; struct inet_peer *gc_next ; }; struct inet_peer { struct inet_peer *avl_left ; struct inet_peer *avl_right ; struct inetpeer_addr daddr ; __u32 avl_height ; u32 metrics[14U] ; u32 rate_tokens ; unsigned long rate_last ; union __anonunion_ldv_46586_249 ldv_46586 ; union __anonunion_ldv_46594_250 ldv_46594 ; __u32 dtime ; atomic_t refcnt ; }; struct inet_peer_base { struct inet_peer *root ; seqlock_t lock ; u32 flush_seq ; int total ; }; struct rtable { struct dst_entry dst ; int rt_genid ; unsigned int rt_flags ; __u16 rt_type ; __u8 rt_is_input ; __u8 rt_uses_gateway ; int rt_iif ; __be32 rt_gateway ; u32 rt_pmtu ; struct list_head rt_uncached ; }; struct inet_ehash_bucket { struct hlist_nulls_head chain ; struct hlist_nulls_head twchain ; }; struct inet_bind_hashbucket { spinlock_t lock ; struct hlist_head chain ; }; struct inet_listen_hashbucket { spinlock_t lock ; struct hlist_nulls_head head ; }; struct inet_hashinfo { struct inet_ehash_bucket *ehash ; spinlock_t *ehash_locks ; unsigned int ehash_mask ; unsigned int ehash_locks_mask ; struct inet_bind_hashbucket *bhash ; unsigned int bhash_size ; struct kmem_cache *bind_bucket_cachep ; struct inet_listen_hashbucket listening_hash[32U] ; atomic_t bsockets ; }; struct lc_element { struct hlist_node colision ; struct list_head list ; unsigned int refcnt ; unsigned int lc_index ; unsigned int lc_number ; unsigned int lc_new_number ; }; struct lru_cache { struct list_head lru ; struct list_head free ; struct list_head in_use ; struct list_head to_be_changed ; struct kmem_cache *lc_cache ; size_t element_size ; size_t element_off ; unsigned int nr_elements ; unsigned int max_pending_changes ; unsigned int pending_changes ; unsigned int used ; unsigned long hits ; unsigned long misses ; unsigned long starving ; unsigned long locked ; unsigned long changed ; unsigned long flags ; void *lc_private ; char const *name ; struct hlist_head *lc_slot ; struct lc_element **lc_element ; }; struct disk_conf { char backing_dev[128U] ; __u32 backing_dev_len ; char meta_dev[128U] ; __u32 meta_dev_len ; __s32 meta_dev_idx ; __u64 disk_size ; __u32 max_bio_bvecs ; __u32 on_io_error ; __u32 fencing ; __u32 resync_rate ; __s32 resync_after ; __u32 al_extents ; __u32 c_plan_ahead ; __u32 c_delay_target ; __u32 c_fill_target ; __u32 c_max_rate ; __u32 c_min_rate ; char disk_barrier ; char disk_flushes ; char disk_drain ; char md_flushes ; __u32 disk_timeout ; __u32 read_balancing ; char al_updates ; }; struct res_opts { char cpu_mask[32U] ; __u32 cpu_mask_len ; __u32 on_no_data ; }; struct net_conf { char shared_secret[64U] ; __u32 shared_secret_len ; char cram_hmac_alg[64U] ; __u32 cram_hmac_alg_len ; char integrity_alg[64U] ; __u32 integrity_alg_len ; char verify_alg[64U] ; __u32 verify_alg_len ; char csums_alg[64U] ; __u32 csums_alg_len ; __u32 wire_protocol ; __u32 connect_int ; __u32 timeout ; __u32 ping_int ; __u32 ping_timeo ; __u32 sndbuf_size ; __u32 rcvbuf_size ; __u32 ko_count ; __u32 max_buffers ; __u32 max_epoch_size ; __u32 unplug_watermark ; __u32 after_sb_0p ; __u32 after_sb_1p ; __u32 after_sb_2p ; __u32 rr_conflict ; __u32 on_congestion ; __u32 cong_fill ; __u32 cong_extents ; char two_primaries ; char discard_my_data ; char tcp_cork ; char always_asbp ; char tentative ; char use_rle ; }; struct drbd_conf; struct drbd_tconn; enum chg_state_flags { CS_HARD = 1, CS_VERBOSE = 2, CS_WAIT_COMPLETE = 4, CS_SERIALIZE = 8, CS_ORDERED = 12, CS_LOCAL_ONLY = 16, CS_DC_ROLE = 32, CS_DC_PEER = 64, CS_DC_CONN = 128, CS_DC_DISK = 256, CS_DC_PDSK = 512, CS_DC_SUSP = 1024, CS_DC_MASK = 992, CS_IGN_OUTD_FAIL = 2048 } ; struct __anonstruct_ldv_49522_255 { unsigned char role : 2 ; unsigned char peer : 2 ; unsigned char conn : 5 ; unsigned char disk : 4 ; unsigned char pdsk : 4 ; unsigned char _unused : 1 ; unsigned char aftr_isp : 1 ; unsigned char peer_isp : 1 ; unsigned char user_isp : 1 ; unsigned short _pad : 11 ; }; union drbd_dev_state { struct __anonstruct_ldv_49522_255 ldv_49522 ; unsigned int i ; }; enum drbd_thread_state { NONE = 0, RUNNING = 1, EXITING = 2, RESTARTING = 3 } ; struct drbd_thread { spinlock_t t_lock ; struct task_struct *task ; struct completion stop ; enum drbd_thread_state t_state ; int (*function)(struct drbd_thread * ) ; struct drbd_tconn *tconn ; int reset_cpu_mask ; char name[9U] ; }; union __anonunion_ldv_49807_256 { struct drbd_conf *mdev ; struct drbd_tconn *tconn ; }; struct drbd_work { struct list_head list ; int (*cb)(struct drbd_work * , int ) ; union __anonunion_ldv_49807_256 ldv_49807 ; }; struct drbd_epoch { struct drbd_tconn *tconn ; struct list_head list ; unsigned int barrier_nr ; atomic_t epoch_size ; atomic_t active ; unsigned long flags ; }; struct drbd_bitmap; enum bm_flag { BM_P_VMALLOCED = 65536, BM_LOCKED_MASK = 15, BM_DONT_CLEAR = 1, BM_DONT_SET = 2, BM_DONT_TEST = 4, BM_IS_LOCKED = 8, BM_LOCKED_TEST_ALLOWED = 11, BM_LOCKED_SET_ALLOWED = 9, BM_LOCKED_CHANGE_ALLOWED = 8 } ; struct drbd_work_queue { struct list_head q ; spinlock_t q_lock ; wait_queue_head_t q_wait ; }; struct drbd_socket { struct mutex mutex ; struct socket *socket ; void *sbuf ; void *rbuf ; }; struct drbd_md { u64 md_offset ; u64 la_size_sect ; spinlock_t uuid_lock ; u64 uuid[4U] ; u64 device_uuid ; u32 flags ; u32 md_size_sect ; s32 al_offset ; s32 bm_offset ; }; struct drbd_backing_dev { struct block_device *backing_bdev ; struct block_device *md_bdev ; struct drbd_md md ; struct disk_conf *disk_conf ; sector_t known_size ; }; struct drbd_md_io { unsigned int done ; int error ; }; struct bm_io_work { struct drbd_work w ; char *why ; enum bm_flag flags ; int (*io_fn)(struct drbd_conf * ) ; void (*done)(struct drbd_conf * , int ) ; }; enum write_ordering_e { WO_none = 0, WO_drain_io = 1, WO_bdev_flush = 2 } ; struct fifo_buffer { unsigned int head_index ; unsigned int size ; int total ; int values[0U] ; }; struct __anonstruct_send_258 { bool seen_any_write_yet ; int current_epoch_nr ; unsigned int current_epoch_writes ; }; struct drbd_tconn { char *name ; struct list_head all_tconn ; struct kref kref ; struct idr volumes ; enum drbd_conns cstate ; unsigned char susp : 1 ; unsigned char susp_nod : 1 ; unsigned char susp_fen : 1 ; struct mutex cstate_mutex ; unsigned long flags ; struct net_conf *net_conf ; struct mutex conf_update ; wait_queue_head_t ping_wait ; struct res_opts res_opts ; struct __kernel_sockaddr_storage my_addr ; int my_addr_len ; struct __kernel_sockaddr_storage peer_addr ; int peer_addr_len ; struct drbd_socket data ; struct drbd_socket meta ; int agreed_pro_version ; unsigned long last_received ; unsigned int ko_count ; spinlock_t req_lock ; struct list_head transfer_log ; struct crypto_hash *cram_hmac_tfm ; struct crypto_hash *integrity_tfm ; struct crypto_hash *peer_integrity_tfm ; struct crypto_hash *csums_tfm ; struct crypto_hash *verify_tfm ; void *int_dig_in ; void *int_dig_vv ; struct drbd_epoch *current_epoch ; spinlock_t epoch_lock ; unsigned int epochs ; enum write_ordering_e write_ordering ; atomic_t current_tle_nr ; unsigned int current_tle_writes ; unsigned long last_reconnect_jif ; struct drbd_thread receiver ; struct drbd_thread worker ; struct drbd_thread asender ; cpumask_var_t cpu_mask ; struct drbd_work_queue sender_work ; struct __anonstruct_send_258 send ; }; struct drbd_conf { struct drbd_tconn *tconn ; int vnr ; struct kref kref ; unsigned long flags ; struct drbd_backing_dev *ldev ; sector_t p_size ; struct request_queue *rq_queue ; struct block_device *this_bdev ; struct gendisk *vdisk ; unsigned long last_reattach_jif ; struct drbd_work resync_work ; struct drbd_work unplug_work ; struct drbd_work go_diskless ; struct drbd_work md_sync_work ; struct drbd_work start_resync_work ; struct timer_list resync_timer ; struct timer_list md_sync_timer ; struct timer_list start_resync_timer ; struct timer_list request_timer ; union drbd_state new_state_tmp ; union drbd_dev_state state ; wait_queue_head_t misc_wait ; wait_queue_head_t state_wait ; unsigned int send_cnt ; unsigned int recv_cnt ; unsigned int read_cnt ; unsigned int writ_cnt ; unsigned int al_writ_cnt ; unsigned int bm_writ_cnt ; atomic_t ap_bio_cnt ; atomic_t ap_pending_cnt ; atomic_t rs_pending_cnt ; atomic_t unacked_cnt ; atomic_t local_cnt ; struct rb_root read_requests ; struct rb_root write_requests ; unsigned long rs_total ; unsigned long rs_failed ; unsigned long rs_start ; unsigned long rs_paused ; unsigned long rs_same_csum ; unsigned long rs_mark_left[8U] ; unsigned long rs_mark_time[8U] ; int rs_last_mark ; unsigned long rs_last_bcast ; sector_t ov_start_sector ; sector_t ov_stop_sector ; sector_t ov_position ; sector_t ov_last_oos_start ; sector_t ov_last_oos_size ; unsigned long ov_left ; struct drbd_bitmap *bitmap ; unsigned long bm_resync_fo ; struct lru_cache *resync ; unsigned int resync_locked ; unsigned int resync_wenr ; int open_cnt ; u64 *p_uuid ; struct list_head active_ee ; struct list_head sync_ee ; struct list_head done_ee ; struct list_head read_ee ; struct list_head net_ee ; int next_barrier_nr ; struct list_head resync_reads ; atomic_t pp_in_use ; atomic_t pp_in_use_by_net ; wait_queue_head_t ee_wait ; struct page *md_io_page ; struct drbd_md_io md_io ; atomic_t md_io_in_use ; spinlock_t al_lock ; wait_queue_head_t al_wait ; struct lru_cache *act_log ; unsigned int al_tr_number ; int al_tr_cycle ; int al_tr_pos ; wait_queue_head_t seq_wait ; atomic_t packet_seq ; unsigned int peer_seq ; spinlock_t peer_seq_lock ; unsigned int minor ; unsigned long comm_bm_set ; struct bm_io_work bm_io_work ; u64 ed_uuid ; struct mutex own_state_mutex ; struct mutex *state_mutex ; char congestion_reason ; atomic_t rs_sect_in ; atomic_t rs_sect_ev ; int rs_last_sect_ev ; int rs_last_events ; int c_sync_rate ; struct fifo_buffer *rs_plan_s ; int rs_in_flight ; atomic_t ap_in_flight ; unsigned int peer_max_bio_size ; unsigned int local_max_bio_size ; }; enum drbd_force_detach_flags { DRBD_READ_ERROR = 0, DRBD_WRITE_ERROR = 1, DRBD_META_IO_ERROR = 2, DRBD_FORCE_DETACH = 3 } ; struct drbd_bitmap { struct page **bm_pages ; spinlock_t bm_lock ; unsigned long bm_set ; unsigned long bm_bits ; size_t bm_words ; size_t bm_number_of_pages ; sector_t bm_dev_capacity ; struct mutex bm_change ; wait_queue_head_t bm_io_wait ; enum bm_flag bm_flags ; char *bm_why ; struct task_struct *bm_task ; }; struct bm_aio_ctx { struct drbd_conf *mdev ; atomic_t in_flight ; unsigned int done ; unsigned int flags ; int error ; struct kref kref ; }; typedef int ldv_func_ret_type___2; typedef int ldv_func_ret_type___10; typedef short s16; typedef void (*ctor_fn_t)(void); struct bug_entry { int bug_addr_disp ; int file_disp ; unsigned short line ; unsigned short flags ; }; struct static_key; enum hrtimer_restart; typedef __u64 Elf64_Addr; typedef __u16 Elf64_Half; typedef __u32 Elf64_Word; typedef __u64 Elf64_Xword; struct elf64_sym { Elf64_Word st_name ; unsigned char st_info ; unsigned char st_other ; Elf64_Half st_shndx ; Elf64_Addr st_value ; Elf64_Xword st_size ; }; typedef struct elf64_sym Elf64_Sym; struct kernel_param; struct kernel_param_ops { int (*set)(char const * , struct kernel_param const * ) ; int (*get)(char * , struct kernel_param const * ) ; void (*free)(void * ) ; }; struct kparam_string; struct kparam_array; union __anonunion_ldv_13750_134 { void *arg ; struct kparam_string const *str ; struct kparam_array const *arr ; }; struct kernel_param { char const *name ; struct kernel_param_ops const *ops ; u16 perm ; s16 level ; union __anonunion_ldv_13750_134 ldv_13750 ; }; struct kparam_string { unsigned int maxlen ; char *string ; }; struct kparam_array { unsigned int max ; unsigned int elemsize ; unsigned int *num ; struct kernel_param_ops const *ops ; void *elem ; }; struct static_key { atomic_t enabled ; }; struct tracepoint; struct tracepoint_func { void *func ; void *data ; }; struct tracepoint { char const *name ; struct static_key key ; void (*regfunc)(void) ; void (*unregfunc)(void) ; struct tracepoint_func *funcs ; }; struct kernel_symbol { unsigned long value ; char const *name ; }; struct mod_arch_specific { }; struct module_param_attrs; struct module_kobject { struct kobject kobj ; struct module *mod ; struct kobject *drivers_dir ; struct module_param_attrs *mp ; }; struct module_attribute { struct attribute attr ; ssize_t (*show)(struct module_attribute * , struct module_kobject * , char * ) ; ssize_t (*store)(struct module_attribute * , struct module_kobject * , char const * , size_t ) ; void (*setup)(struct module * , char const * ) ; int (*test)(struct module * ) ; void (*free)(struct module * ) ; }; struct exception_table_entry; enum module_state { MODULE_STATE_LIVE = 0, MODULE_STATE_COMING = 1, MODULE_STATE_GOING = 2 } ; struct module_ref { unsigned long incs ; unsigned long decs ; }; struct module_sect_attrs; struct module_notes_attrs; struct ftrace_event_call; struct module { enum module_state state ; struct list_head list ; char name[56U] ; struct module_kobject mkobj ; struct module_attribute *modinfo_attrs ; char const *version ; char const *srcversion ; struct kobject *holders_dir ; struct kernel_symbol const *syms ; unsigned long const *crcs ; unsigned int num_syms ; struct kernel_param *kp ; unsigned int num_kp ; unsigned int num_gpl_syms ; struct kernel_symbol const *gpl_syms ; unsigned long const *gpl_crcs ; struct kernel_symbol const *unused_syms ; unsigned long const *unused_crcs ; unsigned int num_unused_syms ; unsigned int num_unused_gpl_syms ; struct kernel_symbol const *unused_gpl_syms ; unsigned long const *unused_gpl_crcs ; struct kernel_symbol const *gpl_future_syms ; unsigned long const *gpl_future_crcs ; unsigned int num_gpl_future_syms ; unsigned int num_exentries ; struct exception_table_entry *extable ; int (*init)(void) ; void *module_init ; void *module_core ; unsigned int init_size ; unsigned int core_size ; unsigned int init_text_size ; unsigned int core_text_size ; unsigned int init_ro_size ; unsigned int core_ro_size ; struct mod_arch_specific arch ; unsigned int taints ; unsigned int num_bugs ; struct list_head bug_list ; struct bug_entry *bug_table ; Elf64_Sym *symtab ; Elf64_Sym *core_symtab ; unsigned int num_symtab ; unsigned int core_num_syms ; char *strtab ; char *core_strtab ; struct module_sect_attrs *sect_attrs ; struct module_notes_attrs *notes_attrs ; char *args ; void *percpu ; unsigned int percpu_size ; unsigned int num_tracepoints ; struct tracepoint * const *tracepoints_ptrs ; unsigned int num_trace_bprintk_fmt ; char const **trace_bprintk_fmt_start ; struct ftrace_event_call **trace_events ; unsigned int num_trace_events ; struct list_head source_list ; struct list_head target_list ; struct task_struct *waiter ; void (*exit)(void) ; struct module_ref *refptr ; ctor_fn_t (**ctors)(void) ; unsigned int num_ctors ; }; struct exception_table_entry { int insn ; int fixup ; }; struct proc_ns_operations { char const *name ; int type ; void *(*get)(struct task_struct * ) ; void (*put)(void * ) ; int (*install)(struct nsproxy * , void * ) ; }; union proc_op { int (*proc_get_link)(struct dentry * , struct path * ) ; int (*proc_read)(struct task_struct * , char * ) ; int (*proc_show)(struct seq_file * , struct pid_namespace * , struct pid * , struct task_struct * ) ; }; struct proc_inode { struct pid *pid ; int fd ; union proc_op op ; struct proc_dir_entry *pde ; struct ctl_table_header *sysctl ; struct ctl_table *sysctl_entry ; void *ns ; struct proc_ns_operations const *ns_ops ; struct inode vfs_inode ; }; enum drbd_role { R_UNKNOWN = 0, R_PRIMARY = 1, R_SECONDARY = 2, R_MASK = 3 } ; struct bm_extent { int rs_left ; int rs_failed ; unsigned long flags ; struct lc_element lce ; }; typedef int ldv_func_ret_type___6; enum hrtimer_restart; enum drbd_ret_code { ERR_CODE_BASE = 100, NO_ERROR = 101, ERR_LOCAL_ADDR = 102, ERR_PEER_ADDR = 103, ERR_OPEN_DISK = 104, ERR_OPEN_MD_DISK = 105, ERR_DISK_NOT_BDEV = 107, ERR_MD_NOT_BDEV = 108, ERR_DISK_TOO_SMALL = 111, ERR_MD_DISK_TOO_SMALL = 112, ERR_BDCLAIM_DISK = 114, ERR_BDCLAIM_MD_DISK = 115, ERR_MD_IDX_INVALID = 116, ERR_IO_MD_DISK = 118, ERR_MD_INVALID = 119, ERR_AUTH_ALG = 120, ERR_AUTH_ALG_ND = 121, ERR_NOMEM = 122, ERR_DISCARD_IMPOSSIBLE = 123, ERR_DISK_CONFIGURED = 124, ERR_NET_CONFIGURED = 125, ERR_MANDATORY_TAG = 126, ERR_MINOR_INVALID = 127, ERR_INTR = 129, ERR_RESIZE_RESYNC = 130, ERR_NO_PRIMARY = 131, ERR_RESYNC_AFTER = 132, ERR_RESYNC_AFTER_CYCLE = 133, ERR_PAUSE_IS_SET = 134, ERR_PAUSE_IS_CLEAR = 135, ERR_PACKET_NR = 137, ERR_NO_DISK = 138, ERR_NOT_PROTO_C = 139, ERR_NOMEM_BITMAP = 140, ERR_INTEGRITY_ALG = 141, ERR_INTEGRITY_ALG_ND = 142, ERR_CPU_MASK_PARSE = 143, ERR_CSUMS_ALG = 144, ERR_CSUMS_ALG_ND = 145, ERR_VERIFY_ALG = 146, ERR_VERIFY_ALG_ND = 147, ERR_CSUMS_RESYNC_RUNNING = 148, ERR_VERIFY_RUNNING = 149, ERR_DATA_NOT_CURRENT = 150, ERR_CONNECTED = 151, ERR_PERM = 152, ERR_NEED_APV_93 = 153, ERR_STONITH_AND_PROT_A = 154, ERR_CONG_NOT_PROTO_A = 155, ERR_PIC_AFTER_DEP = 156, ERR_PIC_PEER_DEP = 157, ERR_RES_NOT_KNOWN = 158, ERR_RES_IN_USE = 159, ERR_MINOR_CONFIGURED = 160, ERR_MINOR_EXISTS = 161, ERR_INVALID_REQUEST = 162, ERR_NEED_APV_100 = 163, ERR_NEED_ALLOW_TWO_PRI = 164, ERR_MD_UNCLEAN = 165, AFTER_LAST_ERR_CODE = 166 } ; enum drbd_packet { P_DATA = 0, P_DATA_REPLY = 1, P_RS_DATA_REPLY = 2, P_BARRIER = 3, P_BITMAP = 4, P_BECOME_SYNC_TARGET = 5, P_BECOME_SYNC_SOURCE = 6, P_UNPLUG_REMOTE = 7, P_DATA_REQUEST = 8, P_RS_DATA_REQUEST = 9, P_SYNC_PARAM = 10, P_PROTOCOL = 11, P_UUIDS = 12, P_SIZES = 13, P_STATE = 14, P_SYNC_UUID = 15, P_AUTH_CHALLENGE = 16, P_AUTH_RESPONSE = 17, P_STATE_CHG_REQ = 18, P_PING = 19, P_PING_ACK = 20, P_RECV_ACK = 21, P_WRITE_ACK = 22, P_RS_WRITE_ACK = 23, P_SUPERSEDED = 24, P_NEG_ACK = 25, P_NEG_DREPLY = 26, P_NEG_RS_DREPLY = 27, P_BARRIER_ACK = 28, P_STATE_CHG_REPLY = 29, P_OV_REQUEST = 30, P_OV_REPLY = 31, P_OV_RESULT = 32, P_CSUM_RS_REQUEST = 33, P_RS_IS_IN_SYNC = 34, P_SYNC_PARAM89 = 35, P_COMPRESSED_BITMAP = 36, P_DELAY_PROBE = 39, P_OUT_OF_SYNC = 40, P_RS_CANCEL = 41, P_CONN_ST_CHG_REQ = 42, P_CONN_ST_CHG_REPLY = 43, P_RETRY_WRITE = 44, P_PROTOCOL_UPDATE = 45, P_MAY_IGNORE = 256, P_MAX_OPT_CMD = 257, P_INITIAL_META = 65521, P_INITIAL_DATA = 65522, P_CONNECTION_FEATURES = 65534 } ; struct p_barrier { u32 barrier ; u32 pad ; }; struct drbd_interval { struct rb_node rb ; sector_t sector ; unsigned int size ; sector_t end ; signed char local : 1 ; signed char waiting : 1 ; }; struct drbd_request { struct drbd_work w ; struct bio *private_bio ; struct drbd_interval i ; unsigned int epoch ; struct list_head tl_requests ; struct bio *master_bio ; unsigned long start_time ; atomic_t completion_ref ; struct kref kref ; unsigned int rq_state ; }; struct drbd_wq_barrier { struct drbd_work w ; struct completion done ; }; struct digest_info { int digest_size ; void *digest ; }; union __anonunion_ldv_50726_262 { u64 block_id ; struct digest_info *digest ; }; struct drbd_peer_request { struct drbd_work w ; struct drbd_epoch *epoch ; struct page *pages ; atomic_t pending_bios ; struct drbd_interval i ; unsigned long flags ; union __anonunion_ldv_50726_262 ldv_50726 ; }; enum drbd_req_event { CREATED = 0, TO_BE_SENT = 1, TO_BE_SUBMITTED = 2, QUEUE_FOR_NET_WRITE = 3, QUEUE_FOR_NET_READ = 4, QUEUE_FOR_SEND_OOS = 5, SEND_CANCELED = 6, SEND_FAILED = 7, HANDED_OVER_TO_NETWORK = 8, OOS_HANDED_TO_NETWORK = 9, CONNECTION_LOST_WHILE_PENDING = 10, READ_RETRY_REMOTE_CANCELED = 11, RECV_ACKED_BY_PEER = 12, WRITE_ACKED_BY_PEER = 13, WRITE_ACKED_BY_PEER_AND_SIS = 14, CONFLICT_RESOLVED = 15, POSTPONE_WRITE = 16, NEG_ACKED = 17, BARRIER_ACKED = 18, DATA_RECEIVED = 19, READ_COMPLETED_WITH_ERROR = 20, READ_AHEAD_COMPLETED_WITH_ERROR = 21, WRITE_COMPLETED_WITH_ERROR = 22, ABORT_DISK_IO = 23, COMPLETED_OK = 24, RESEND = 25, FAIL_FROZEN_DISK_IO = 26, RESTART_FROZEN_DISK_IO = 27, NOTHING = 28 } ; struct bio_and_error { struct bio *bio ; int error ; }; typedef int ldv_func_ret_type___12; enum hrtimer_restart; struct kvec { void *iov_base ; size_t iov_len ; }; struct in_addr { __be32 s_addr ; }; struct sockaddr_in { __kernel_sa_family_t sin_family ; __be16 sin_port ; struct in_addr sin_addr ; unsigned char __pad[8U] ; }; struct sockaddr_in6 { unsigned short sin6_family ; __be16 sin6_port ; __be32 sin6_flowinfo ; struct in6_addr sin6_addr ; __u32 sin6_scope_id ; }; enum drbd_after_sb_p { ASB_DISCONNECT = 0, ASB_DISCARD_YOUNGER_PRI = 1, ASB_DISCARD_OLDER_PRI = 2, ASB_DISCARD_ZERO_CHG = 3, ASB_DISCARD_LEAST_CHG = 4, ASB_DISCARD_LOCAL = 5, ASB_DISCARD_REMOTE = 6, ASB_CONSENSUS = 7, ASB_DISCARD_SECONDARY = 8, ASB_CALL_HELPER = 9, ASB_VIOLENTLY = 10 } ; struct bm_xfer_ctx { unsigned long bm_bits ; unsigned long bm_words ; unsigned long bit_offset ; unsigned long word_offset ; unsigned int packets[2U] ; unsigned int bytes[2U] ; }; struct p_header80 { u32 magic ; u16 command ; u16 length ; }; struct p_header95 { u16 magic ; u16 command ; u32 length ; }; struct p_header100 { u32 magic ; u16 volume ; u16 command ; u32 length ; u32 pad ; }; struct p_data { u64 sector ; u64 block_id ; u32 seq_num ; u32 dp_flags ; }; struct p_block_ack { u64 sector ; u64 block_id ; u32 blksize ; u32 seq_num ; }; struct p_block_req { u64 sector ; u64 block_id ; u32 blksize ; u32 pad ; }; struct p_connection_features { u32 protocol_min ; u32 feature_flags ; u32 protocol_max ; u32 _pad ; u64 reserved[7U] ; }; struct p_barrier_ack { u32 barrier ; u32 set_size ; }; struct p_rs_param_95 { u32 resync_rate ; char verify_alg[64U] ; char csums_alg[64U] ; u32 c_plan_ahead ; u32 c_delay_target ; u32 c_fill_target ; u32 c_max_rate ; }; struct p_protocol { u32 protocol ; u32 after_sb_0p ; u32 after_sb_1p ; u32 after_sb_2p ; u32 conn_flags ; u32 two_primaries ; char integrity_alg[0U] ; }; struct p_uuids { u64 uuid[6U] ; }; struct p_rs_uuid { u64 uuid ; }; struct p_sizes { u64 d_size ; u64 u_size ; u64 c_size ; u32 max_bio_size ; u16 queue_order_type ; u16 dds_flags ; }; struct p_state { u32 state ; }; struct p_req_state { u32 mask ; u32 val ; }; struct p_req_state_reply { u32 retcode ; }; struct p_block_desc { u64 sector ; u32 blksize ; u32 pad ; }; enum drbd_bitmap_code { RLE_VLI_Bits = 2 } ; struct p_compressed_bm { u8 encoding ; u8 code[0U] ; }; enum epoch_event { EV_PUT = 0, EV_GOT_BARRIER_NR = 1, EV_BECAME_LAST = 2, EV_CLEANUP = 32 } ; enum dds_flags { DDSF_FORCED = 1, DDSF_NO_RESYNC = 2 } ; enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 } ; struct bitstream_cursor { u8 *b ; unsigned int bit ; }; struct bitstream { struct bitstream_cursor cur ; unsigned char *buf ; size_t buf_len ; unsigned int pad_bits ; }; struct packet_info { enum drbd_packet cmd ; unsigned int size ; unsigned int vnr ; void *data ; }; enum finish_epoch { FE_STILL_LIVE = 0, FE_DESTROYED = 1, FE_RECYCLED = 2 } ; struct accept_wait_data { struct drbd_tconn *tconn ; struct socket *s_listen ; struct completion door_bell ; void (*original_sk_state_change)(struct sock * ) ; }; struct data_cmd { int expect_payload ; size_t pkt_size ; int (*fn)(struct drbd_tconn * , struct packet_info * ) ; }; struct asender_cmd { size_t pkt_size ; int (*fn)(struct drbd_tconn * , struct packet_info * ) ; }; typedef int ldv_func_ret_type___25; typedef int ldv_func_ret_type___26; enum hrtimer_restart; enum drbd_on_congestion { OC_BLOCK = 0, OC_PULL_AHEAD = 1, OC_DISCONNECT = 2 } ; enum drbd_read_balancing { RB_PREFER_LOCAL = 0, RB_PREFER_REMOTE = 1, RB_ROUND_ROBIN = 2, RB_LEAST_PENDING = 3, RB_CONGESTED_REMOTE = 4, RB_32K_STRIPING = 5, RB_64K_STRIPING = 6, RB_128K_STRIPING = 7, RB_256K_STRIPING = 8, RB_512K_STRIPING = 9, RB_1M_STRIPING = 10 } ; enum hrtimer_restart; enum drbd_state_info_bcast_reason { SIB_GET_STATUS_REPLY = 1, SIB_STATE_CHANGE = 2, SIB_HELPER_PRE = 3, SIB_HELPER_POST = 4, SIB_SYNC_PROGRESS = 5 } ; struct __anonstruct_ldv_50737_260 { char *helper_name ; unsigned int helper_exit_code ; }; struct __anonstruct_ldv_50741_261 { union drbd_state os ; union drbd_state ns ; }; union __anonunion_ldv_50742_259 { struct __anonstruct_ldv_50737_260 ldv_50737 ; struct __anonstruct_ldv_50741_261 ldv_50741 ; }; struct sib_info { enum drbd_state_info_bcast_reason sib_reason ; union __anonunion_ldv_50742_259 ldv_50742 ; }; struct al_transaction_on_disk { __be32 magic ; __be32 tr_number ; __be32 crc32c ; __be16 transaction_type ; __be16 n_updates ; __be16 context_size ; __be16 context_start_slot_nr ; __be32 __reserved[4U] ; __be16 update_slot_nr[64U] ; __be32 update_extent_nr[64U] ; __be32 context[919U] ; }; struct update_odbm_work { struct drbd_work w ; unsigned int enr ; }; struct update_al_work { struct drbd_work w ; struct completion event ; int err ; }; enum hrtimer_restart; struct workqueue_struct; enum sock_shutdown_cmd { SHUT_RD = 0, SHUT_WR = 1, SHUT_RDWR = 2 } ; struct retry_worker { struct workqueue_struct *wq ; struct work_struct worker ; spinlock_t lock ; struct list_head writes ; }; struct meta_data_on_disk { u64 la_size ; u64 uuid[4U] ; u64 device_uuid ; u64 reserved_u64_1 ; u32 flags ; u32 magic ; u32 md_size_sect ; u32 al_offset ; u32 al_nr_extents ; u32 bm_offset ; u32 bm_bytes_per_bit ; u32 la_peer_max_bio_size ; u32 reserved_u32[3U] ; }; struct fault_random_state { unsigned long state ; unsigned long count ; }; enum hrtimer_restart; enum hrtimer_restart; struct subprocess_info { struct work_struct work ; struct completion *complete ; char *path ; char **argv ; char **envp ; int wait ; int retval ; int (*init)(struct subprocess_info * , struct cred * ) ; void (*cleanup)(struct subprocess_info * ) ; void *data ; }; enum kobject_action { KOBJ_ADD = 0, KOBJ_REMOVE = 1, KOBJ_CHANGE = 2, KOBJ_MOVE = 3, KOBJ_ONLINE = 4, KOBJ_OFFLINE = 5, KOBJ_MAX = 6 } ; struct scm_creds { u32 pid ; kuid_t uid ; kgid_t gid ; }; struct netlink_skb_parms { struct scm_creds creds ; __u32 portid ; __u32 dst_group ; struct sock *ssk ; }; enum drbd_fencing_p { FP_NOT_AVAIL = -1, FP_DONT_CARE = 0, FP_RESOURCE = 1, FP_STONITH = 2 } ; union __anonunion_ldv_49826_259 { __u32 flags ; __s32 ret_code ; }; struct drbd_genlmsghdr { __u32 minor ; union __anonunion_ldv_49826_259 ldv_49826 ; }; struct genlmsghdr { __u8 cmd ; __u8 version ; __u16 reserved ; }; struct drbd_cfg_context { __u32 ctx_volume ; char ctx_resource_name[128U] ; __u32 ctx_resource_name_len ; char ctx_my_addr[128U] ; __u32 ctx_my_addr_len ; char ctx_peer_addr[128U] ; __u32 ctx_peer_addr_len ; }; struct set_role_parms { char assume_uptodate ; }; struct resize_parms { __u64 resize_size ; char resize_force ; char no_resync ; }; struct state_info { __u32 sib_reason ; __u32 current_state ; __u64 capacity ; __u64 ed_uuid ; __u32 prev_state ; __u32 new_state ; char uuids[32U] ; __u32 uuids_len ; __u32 disk_flags ; __u64 bits_total ; __u64 bits_oos ; __u64 bits_rs_total ; __u64 bits_rs_failed ; char helper[32U] ; __u32 helper_len ; __u32 helper_exit_code ; __u64 send_cnt ; __u64 recv_cnt ; __u64 read_cnt ; __u64 writ_cnt ; __u64 al_writ_cnt ; __u64 bm_writ_cnt ; __u32 ap_bio_cnt ; __u32 ap_pending_cnt ; __u32 rs_pending_cnt ; }; struct start_ov_parms { __u64 ov_start_sector ; __u64 ov_stop_sector ; }; struct new_c_uuid_parms { char clear_bm ; }; struct timeout_parms { __u32 timeout_type ; }; struct disconnect_parms { char force_disconnect ; }; struct detach_parms { char force_detach ; }; struct genl_family; struct genl_multicast_group { struct genl_family *family ; struct list_head list ; char name[16U] ; u32 id ; }; struct genl_ops; struct genl_info; struct genl_family { unsigned int id ; unsigned int hdrsize ; char name[16U] ; unsigned int version ; unsigned int maxattr ; bool netnsok ; int (*pre_doit)(struct genl_ops * , struct sk_buff * , struct genl_info * ) ; void (*post_doit)(struct genl_ops * , struct sk_buff * , struct genl_info * ) ; struct nlattr **attrbuf ; struct list_head ops_list ; struct list_head family_list ; struct list_head mcast_groups ; }; struct genl_info { u32 snd_seq ; u32 snd_portid ; struct nlmsghdr *nlhdr ; struct genlmsghdr *genlhdr ; void *userhdr ; struct nlattr **attrs ; struct net *_net ; void *user_ptr[2U] ; }; struct genl_ops { u8 cmd ; u8 internal_flags ; unsigned int flags ; struct nla_policy const *policy ; int (*doit)(struct sk_buff * , struct genl_info * ) ; int (*dumpit)(struct sk_buff * , struct netlink_callback * ) ; int (*done)(struct netlink_callback * ) ; struct list_head ops_list ; }; struct drbd_config_context { unsigned int minor ; unsigned int volume ; char *resource_name ; struct nlattr *my_addr ; struct nlattr *peer_addr ; struct sk_buff *reply_skb ; struct drbd_genlmsghdr *reply_dh ; struct drbd_conf *mdev ; struct drbd_tconn *tconn ; }; struct crypto { struct crypto_hash *verify_tfm ; struct crypto_hash *csums_tfm ; struct crypto_hash *cram_hmac_tfm ; struct crypto_hash *integrity_tfm ; }; struct rb_augment_callbacks { void (*propagate)(struct rb_node * , struct rb_node * ) ; void (*copy)(struct rb_node * , struct rb_node * ) ; void (*rotate)(struct rb_node * , struct rb_node * ) ; }; enum hrtimer_restart; struct after_state_chg_work { struct drbd_work w ; union drbd_state os ; union drbd_state ns ; enum chg_state_flags flags ; struct completion *done ; }; enum sanitize_state_warnings { NO_WARNING = 0, ABORTED_ONLINE_VERIFY = 1, ABORTED_RESYNC = 2, CONNECTION_LOST_NEGOTIATING = 3, IMPLICITLY_UPGRADED_DISK = 4, IMPLICITLY_UPGRADED_PDSK = 5 } ; struct after_conn_state_chg_work { struct drbd_work w ; enum drbd_conns oc ; union drbd_state ns_min ; union drbd_state ns_max ; enum chg_state_flags flags ; }; typedef int ldv_func_ret_type___4; enum hrtimer_restart; void *memcpy(void * , void const * , unsigned long ) ; long ldv__builtin_expect(long exp , long c ) ; __inline static void set_bit(unsigned int nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return; } } __inline static void clear_bit(int nr , unsigned long volatile *addr ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %1,%0": "+m" (*((long volatile *)addr)): "Ir" (nr)); return; } } __inline static void clear_bit_unlock(unsigned int nr , unsigned long volatile *addr ) { { __asm__ volatile ("": : : "memory"); clear_bit((int )nr, addr); return; } } __inline static int test_and_set_bit(int nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; bts %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return (oldbit); } } __inline static int __test_and_set_bit(int nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ ("bts %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr)); return (oldbit); } } __inline static int test_and_clear_bit(int nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btr %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return (oldbit); } } __inline static int __test_and_clear_bit(int nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile ("btr %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr)); return (oldbit); } } __inline static int constant_test_bit(unsigned int nr , unsigned long const volatile *addr ) { { return ((int )((unsigned long )*(addr + (unsigned long )(nr / 64U)) >> ((int )nr & 63)) & 1); } } __inline static int variable_test_bit(int nr , unsigned long const volatile *addr ) { int oldbit ; { __asm__ volatile ("bt %2,%1\n\tsbb %0,%0": "=r" (oldbit): "m" (*((unsigned long *)addr)), "Ir" (nr)); return (oldbit); } } extern unsigned long find_next_bit(unsigned long const * , unsigned long , unsigned long ) ; extern unsigned long find_next_zero_bit(unsigned long const * , unsigned long , unsigned long ) ; __inline static unsigned long __arch_hweight64(__u64 w ) { unsigned long res ; { res = 0UL; __asm__ ("661:\n\tcall __sw_hweight64\n662:\n.pushsection .altinstructions,\"a\"\n .long 661b - .\n .long 6631f - .\n .word (4*32+23)\n .byte 662b-661b\n .byte 6641f-6631f\n.popsection\n.pushsection .discard,\"aw\",@progbits\n .byte 0xff + (6641f-6631f) - (662b-661b)\n.popsection\n.pushsection .altinstr_replacement, \"ax\"\n6631:\n\t.byte 0xf3,0x48,0x0f,0xb8,0xc7\n6641:\n\t.popsection": "=a" (res): "D" (w)); return (res); } } __inline static unsigned long find_next_zero_bit_le(void const *addr , unsigned long size , unsigned long offset ) { unsigned long tmp ; { tmp = find_next_zero_bit((unsigned long const *)addr, size, offset); return (tmp); } } __inline static unsigned long find_next_bit_le(void const *addr , unsigned long size , unsigned long offset ) { unsigned long tmp ; { tmp = find_next_bit((unsigned long const *)addr, size, offset); return (tmp); } } __inline static int test_bit_le(int nr , void const *addr ) { int tmp ; { tmp = variable_test_bit(nr, (unsigned long const volatile *)addr); return (tmp); } } __inline static int __test_and_set_bit_le(int nr , void *addr ) { int tmp ; { tmp = __test_and_set_bit(nr, (unsigned long volatile *)addr); return (tmp); } } __inline static int __test_and_clear_bit_le(int nr , void *addr ) { int tmp ; { tmp = __test_and_clear_bit(nr, (unsigned long volatile *)addr); return (tmp); } } __inline static unsigned long hweight_long(unsigned long w ) { unsigned long tmp ; { tmp = __arch_hweight64((__u64 )w); return (tmp); } } extern int printk(char const * , ...) ; extern int __dynamic_dev_dbg(struct _ddebug * , struct device const * , char const * , ...) ; extern void __might_sleep(char const * , int , int ) ; extern void __bad_percpu_size(void) ; extern struct task_struct *current_task ; __inline static struct task_struct *get_current(void) { struct task_struct *pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& current_task)); goto ldv_2860; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2860; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2860; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& current_task)); goto ldv_2860; default: __bad_percpu_size(); } ldv_2860: ; return (pfo_ret__); } } extern void *memcpy(void * , void const * , size_t ) ; extern void *memset(void * , int , size_t ) ; extern void warn_slowpath_null(char const * , int const ) ; extern void __xadd_wrong_size(void) ; __inline static int atomic_read(atomic_t const *v ) { { return ((int )*((int volatile *)(& v->counter))); } } __inline static void atomic_add(int i , atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; addl %1,%0": "+m" (v->counter): "ir" (i)); return; } } __inline static int atomic_sub_and_test(int i , atomic_t *v ) { unsigned char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subl %2,%0; sete %1": "+m" (v->counter), "=qm" (c): "ir" (i): "memory"); return ((int )c); } } __inline static void atomic_inc(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; incl %0": "+m" (v->counter)); return; } } __inline static int atomic_dec_and_test(atomic_t *v ) { unsigned char c ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0; sete %1": "+m" (v->counter), "=qm" (c): : "memory"); return ((unsigned int )c != 0U); } } __inline static int atomic_add_return(int i , atomic_t *v ) { int __ret ; { __ret = i; switch (4UL) { case 1UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddb %b0, %1\n": "+q" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5470; case 2UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddw %w0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5470; case 4UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddl %0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5470; case 8UL: __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; xaddq %q0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5470; default: __xadd_wrong_size(); } ldv_5470: ; return (__ret + i); } } __inline static int atomic_sub_return(int i , atomic_t *v ) { int tmp ; { tmp = atomic_add_return(- i, v); return (tmp); } } extern void lock_acquire(struct lockdep_map * , unsigned int , int , int , int , struct lockdep_map * , unsigned long ) ; extern void lock_release(struct lockdep_map * , int , unsigned long ) ; extern int lock_is_held(struct lockdep_map * ) ; extern void lockdep_rcu_suspicious(char const * , int const , char const * ) ; extern void __mutex_init(struct mutex * , char const * , struct lock_class_key * ) ; extern int mutex_trylock(struct mutex * ) ; int ldv_mutex_trylock_4(struct mutex *ldv_func_arg1 ) ; int ldv_mutex_trylock_12(struct mutex *ldv_func_arg1 ) ; extern void mutex_unlock(struct mutex * ) ; void ldv_mutex_unlock_2(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_5(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_7(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_9(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_11(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_14(struct mutex *ldv_func_arg1 ) ; extern void mutex_lock(struct mutex * ) ; void ldv_mutex_lock_1(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_3(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_6(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_8(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_10(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_13(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_bm_change_of_drbd_bitmap(struct mutex *lock ) ; int ldv_mutex_trylock_bm_change_of_drbd_bitmap(struct mutex *lock ) ; void ldv_mutex_unlock_bm_change_of_drbd_bitmap(struct mutex *lock ) ; void ldv_mutex_lock_cred_guard_mutex_of_signal_struct(struct mutex *lock ) ; void ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(struct mutex *lock ) ; void ldv_mutex_lock_lock(struct mutex *lock ) ; void ldv_mutex_unlock_lock(struct mutex *lock ) ; void ldv_mutex_lock_mtx_of_percpu_rw_semaphore(struct mutex *lock ) ; void ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(struct mutex *lock ) ; void ldv_mutex_lock_mutex_of_device(struct mutex *lock ) ; int ldv_mutex_trylock_mutex_of_device(struct mutex *lock ) ; void ldv_mutex_unlock_mutex_of_device(struct mutex *lock ) ; extern unsigned long kernel_stack ; __inline static struct thread_info *current_thread_info(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6299; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6299; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6299; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6299; default: __bad_percpu_size(); } ldv_6299: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } extern void __raw_spin_lock_init(raw_spinlock_t * , char const * , struct lock_class_key * ) ; extern void _raw_spin_lock_irq(raw_spinlock_t * ) ; extern unsigned long _raw_spin_lock_irqsave(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irq(raw_spinlock_t * ) ; extern void _raw_spin_unlock_irqrestore(raw_spinlock_t * , unsigned long ) ; extern void _raw_read_lock(rwlock_t * ) ; extern void _raw_read_unlock(rwlock_t * ) ; __inline static raw_spinlock_t *spinlock_check(spinlock_t *lock ) { { return (& lock->ldv_5957.rlock); } } __inline static void spin_lock_irq(spinlock_t *lock ) { { _raw_spin_lock_irq(& lock->ldv_5957.rlock); return; } } __inline static void spin_unlock_irq(spinlock_t *lock ) { { _raw_spin_unlock_irq(& lock->ldv_5957.rlock); return; } } __inline static void spin_unlock_irqrestore(spinlock_t *lock , unsigned long flags ) { { _raw_spin_unlock_irqrestore(& lock->ldv_5957.rlock, flags); return; } } extern void *__vmalloc(unsigned long , gfp_t , pgprot_t ) ; extern void vfree(void const * ) ; extern unsigned long volatile jiffies ; extern void __init_waitqueue_head(wait_queue_head_t * , char const * , struct lock_class_key * ) ; extern void __wake_up(wait_queue_head_t * , unsigned int , int , void * ) ; extern void prepare_to_wait(wait_queue_head_t * , wait_queue_t * , int ) ; extern void finish_wait(wait_queue_head_t * , wait_queue_t * ) ; extern int autoremove_wake_function(wait_queue_t * , unsigned int , int , void * ) ; __inline static void __rcu_read_lock(void) { struct thread_info *tmp ; { tmp = current_thread_info(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } extern int rcu_is_cpu_idle(void) ; extern bool rcu_lockdep_current_cpu_online(void) ; __inline static void rcu_lock_acquire(struct lockdep_map *map ) { { lock_acquire(map, 0U, 0, 2, 1, 0, (unsigned long )((void *)0)); return; } } __inline static void rcu_lock_release(struct lockdep_map *map ) { { lock_release(map, 1, (unsigned long )((void *)0)); return; } } extern struct lockdep_map rcu_lock_map ; extern int debug_lockdep_rcu_enabled(void) ; __inline static int rcu_read_lock_held(void) { int tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp == 0) { return (1); } else { } tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { return (0); } else { } tmp___1 = rcu_lockdep_current_cpu_online(); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } tmp___3 = lock_is_held(& rcu_lock_map); return (tmp___3); } } __inline static void rcu_read_lock(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 738, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 759, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock(); return; } } extern struct page *alloc_pages_current(gfp_t , unsigned int ) ; __inline static struct page *alloc_pages(gfp_t gfp_mask , unsigned int order ) { struct page *tmp ; { tmp = alloc_pages_current(gfp_mask, order); return (tmp); } } extern void __free_pages(struct page * , unsigned int ) ; __inline static int kref_sub(struct kref *kref , unsigned int count , void (*release)(struct kref * ) ) { int __ret_warn_on ; long tmp ; int tmp___0 ; { __ret_warn_on = (unsigned long )release == (unsigned long )((void (*)(struct kref * ))0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/kref.h", 67); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = atomic_sub_and_test((int )count, & kref->refcount); if (tmp___0 != 0) { (*release)(kref); return (1); } else { } return (0); } } __inline static int kref_put(struct kref *kref , void (*release)(struct kref * ) ) { int tmp ; { tmp = kref_sub(kref, 1U, release); return (tmp); } } extern int ___ratelimit(struct ratelimit_state * , char const * ) ; extern int dev_alert(struct device const * , char const * , ...) ; extern int dev_err(struct device const * , char const * , ...) ; extern int dev_warn(struct device const * , char const * , ...) ; extern int _dev_info(struct device const * , char const * , ...) ; __inline static void *lowmem_page_address(struct page const *page ) { { return ((void *)((unsigned long )((unsigned long long )(((long )page + 24189255811072L) / 64L) << 12) + 0xffff880000000000UL)); } } extern void submit_bio(int , struct bio * ) ; extern void schedule(void) ; extern int _cond_resched(void) ; extern void kfree(void const * ) ; extern void *__kmalloc(size_t , gfp_t ) ; __inline static void *kmalloc(size_t size , gfp_t flags ) { void *tmp___2 ; { tmp___2 = __kmalloc(size, flags); return (tmp___2); } } __inline static void *kzalloc(size_t size , gfp_t flags ) { void *tmp ; { tmp = kmalloc(size, flags | 32768U); return (tmp); } } __inline static void *kmap_atomic(struct page *page ) { void *tmp ; { __rcu_read_lock(); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void __kunmap_atomic(void *addr ) { { __rcu_read_unlock(); return; } } extern void *mempool_alloc(mempool_t * , gfp_t ) ; extern void mempool_free(void * , mempool_t * ) ; extern void bio_put(struct bio * ) ; extern void bio_endio(struct bio * , int ) ; extern int bio_add_page(struct bio * , struct page * , unsigned int , unsigned int ) ; extern int blkdev_issue_flush(struct block_device * , gfp_t , sector_t * ) ; enum drbd_state_rv __drbd_set_state(struct drbd_conf *mdev , union drbd_state ns , enum chg_state_flags flags , struct completion *done ) ; int enable_faults ; int fault_rate ; unsigned int _drbd_insert_fault(struct drbd_conf *mdev , unsigned int type ) ; __inline static int drbd_insert_fault(struct drbd_conf *mdev , unsigned int type ) { unsigned int tmp ; int tmp___0 ; { if (fault_rate != 0 && (enable_faults >> (int )type) & 1) { tmp = _drbd_insert_fault(mdev, type); if (tmp != 0U) { tmp___0 = 1; } else { tmp___0 = 0; } } else { tmp___0 = 0; } return (tmp___0); } } struct ratelimit_state drbd_ratelimit_state ; char *drbd_task_to_thread_name(struct drbd_tconn *tconn , struct task_struct *task ) ; void drbd_go_diskless(struct drbd_conf *mdev ) ; void drbd_ldev_destroy(struct drbd_conf *mdev ) ; int drbd_bm_init(struct drbd_conf *mdev ) ; int drbd_bm_resize(struct drbd_conf *mdev , sector_t capacity , int set_new_bits ) ; void drbd_bm_cleanup(struct drbd_conf *mdev ) ; void drbd_bm_set_all(struct drbd_conf *mdev ) ; void drbd_bm_clear_all(struct drbd_conf *mdev ) ; int drbd_bm_set_bits(struct drbd_conf *mdev , unsigned long const s , unsigned long const e ) ; int drbd_bm_clear_bits(struct drbd_conf *mdev , unsigned long const s , unsigned long const e ) ; int drbd_bm_count_bits(struct drbd_conf *mdev , unsigned long const s , unsigned long const e ) ; void _drbd_bm_set_bits(struct drbd_conf *mdev , unsigned long const s , unsigned long const e ) ; int drbd_bm_test_bit(struct drbd_conf *mdev , unsigned long const bitnr ) ; int drbd_bm_e_weight(struct drbd_conf *mdev , unsigned long enr ) ; int drbd_bm_write_page(struct drbd_conf *mdev , unsigned int idx ) ; int drbd_bm_read(struct drbd_conf *mdev ) ; void drbd_bm_mark_for_writeout(struct drbd_conf *mdev , int page_nr ) ; int drbd_bm_write(struct drbd_conf *mdev ) ; int drbd_bm_write_hinted(struct drbd_conf *mdev ) ; int drbd_bm_write_all(struct drbd_conf *mdev ) ; int drbd_bm_write_copy_pages(struct drbd_conf *mdev ) ; size_t drbd_bm_words(struct drbd_conf *mdev ) ; unsigned long drbd_bm_bits(struct drbd_conf *mdev ) ; sector_t drbd_bm_capacity(struct drbd_conf *mdev ) ; unsigned long drbd_bm_find_next(struct drbd_conf *mdev , unsigned long bm_fo ) ; unsigned long _drbd_bm_find_next(struct drbd_conf *mdev , unsigned long bm_fo ) ; unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev , unsigned long bm_fo ) ; unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev ) ; unsigned long drbd_bm_total_weight(struct drbd_conf *mdev ) ; void drbd_bm_merge_lel(struct drbd_conf *mdev , size_t offset , size_t number , unsigned long *buffer ) ; void drbd_bm_get_lel(struct drbd_conf *mdev , size_t offset , size_t number , unsigned long *buffer ) ; void drbd_bm_lock(struct drbd_conf *mdev , char *why , enum bm_flag flags ) ; void drbd_bm_unlock(struct drbd_conf *mdev ) ; mempool_t *drbd_md_io_page_pool ; struct bio *bio_alloc_drbd(gfp_t gfp_mask ) ; rwlock_t global_state_lock ; char *ppsize(char *buf , unsigned long long size ) ; void wait_until_done_or_force_detached(struct drbd_conf *mdev , struct drbd_backing_dev *bdev , unsigned int *done ) ; __inline static enum drbd_state_rv _drbd_set_state(struct drbd_conf *mdev , union drbd_state ns , enum chg_state_flags flags , struct completion *done ) { enum drbd_state_rv rv ; { _raw_read_lock(& global_state_lock); rv = __drbd_set_state(mdev, ns, flags, done); _raw_read_unlock(& global_state_lock); return (rv); } } __inline static union drbd_state drbd_read_state(struct drbd_conf *mdev ) { union drbd_state rv ; { rv.i = mdev->state.i; rv.ldv_40024.susp = (mdev->tconn)->susp; rv.ldv_40024.susp_nod = (mdev->tconn)->susp_nod; rv.ldv_40024.susp_fen = (mdev->tconn)->susp_fen; return (rv); } } __inline static void __drbd_chk_io_error_(struct drbd_conf *mdev , enum drbd_force_detach_flags df , char const *where ) { enum drbd_io_error_p ep ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; union drbd_state __ns ; union drbd_state __ns___0 ; { rcu_read_lock(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1682, "suspicious rcu_dereference_check() usage"); } else { } } else { } ep = (enum drbd_io_error_p )_________p1->on_io_error; rcu_read_unlock(); switch ((unsigned int )ep) { case 0U: ; if ((unsigned int )df == 0U || (unsigned int )df == 1U) { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "__drbd_chk_io_error_"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s.\n", where); } else { } if ((int )mdev->state.ldv_49522.disk > 4) { __ns = drbd_read_state(mdev); __ns.ldv_40024.disk = 4U; _drbd_set_state(mdev, __ns, CS_HARD, 0); } else { } goto ldv_50794; } else { } case 2U: ; case 1U: set_bit(12U, (unsigned long volatile *)(& mdev->flags)); if ((unsigned int )df == 0U) { set_bit(13U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((unsigned int )df == 3U) { set_bit(14U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((int )mdev->state.ldv_49522.disk > 2) { __ns___0 = drbd_read_state(mdev); __ns___0.ldv_40024.disk = 2U; _drbd_set_state(mdev, __ns___0, CS_HARD, 0); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s. Detaching...\n", where); } else { } goto ldv_50794; } ldv_50794: ; return; } } __inline static void drbd_chk_io_error_(struct drbd_conf *mdev , int error , enum drbd_force_detach_flags forcedetach , char const *where ) { unsigned long flags ; raw_spinlock_t *tmp ; { if (error != 0) { tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); __drbd_chk_io_error_(mdev, forcedetach, where); spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); } else { } return; } } __inline static sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev ) { int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; { rcu_read_lock(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1790, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock(); switch (meta_dev_idx) { case -1: ; case -3: ; return ((sector_t )(bdev->md.md_offset + 7ULL)); case -2: ; default: ; return ((sector_t )(bdev->md.md_offset + (u64 )bdev->md.md_size_sect)); } } } __inline static void put_ldev(struct drbd_conf *mdev ) { int i ; int tmp ; { tmp = atomic_sub_return(1, & mdev->local_cnt); i = tmp; if (i < 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( i >= 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 2051); } else { } if (i == 0) { if ((unsigned int )*((unsigned char *)mdev + 749UL) == 0U) { drbd_ldev_destroy(mdev); } else { } if ((unsigned int )*((unsigned char *)mdev + 749UL) == 4U) { drbd_go_diskless(mdev); } else { } __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } return; } } __inline static int _get_ldev_if_state(struct drbd_conf *mdev , enum drbd_disk_state mins ) { int io_allowed ; { if ((unsigned int )*((unsigned char *)mdev + 749UL) == 0U) { return (0); } else { } atomic_inc(& mdev->local_cnt); io_allowed = (unsigned int )mdev->state.ldv_49522.disk >= (unsigned int )mins; if (io_allowed == 0) { put_ldev(mdev); } else { } return (io_allowed); } } __inline static void drbd_md_flush(struct drbd_conf *mdev ) { int r ; int tmp ; { if ((unsigned long )mdev->ldev == (unsigned long )((struct drbd_backing_dev *)0)) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "mdev->ldev == NULL in drbd_md_flush\n"); return; } else { } tmp = constant_test_bit(7U, (unsigned long const volatile *)(& mdev->flags)); if (tmp != 0) { return; } else { } r = blkdev_issue_flush((mdev->ldev)->md_bdev, 16U, 0); if (r != 0) { set_bit(7U, (unsigned long volatile *)(& mdev->flags)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "meta data flush failed with status %d, disabling md-flushes\n", r); } else { } return; } } static void __bm_print_lock_info(struct drbd_conf *mdev , char const *func ) { struct drbd_bitmap *b ; int tmp ; char *tmp___0 ; struct task_struct *tmp___1 ; char *tmp___2 ; { b = mdev->bitmap; tmp = ___ratelimit(& drbd_ratelimit_state, "__bm_print_lock_info"); if (tmp == 0) { return; } else { } tmp___0 = drbd_task_to_thread_name(mdev->tconn, b->bm_task); tmp___1 = get_current(); tmp___2 = drbd_task_to_thread_name(mdev->tconn, tmp___1); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "FIXME %s in %s, bitmap locked for \'%s\' by %s\n", tmp___2, func, (unsigned long )b->bm_why != (unsigned long )((char *)0) ? b->bm_why : (char *)"?", tmp___0); return; } } void drbd_bm_lock(struct drbd_conf *mdev , char *why , enum bm_flag flags ) { struct drbd_bitmap *b ; int trylock_failed ; int tmp ; char *tmp___0 ; struct task_struct *tmp___1 ; char *tmp___2 ; { b = mdev->bitmap; if ((unsigned long )b == (unsigned long )((struct drbd_bitmap *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "FIXME no bitmap in drbd_bm_lock!?\n"); return; } else { } tmp = ldv_mutex_trylock_12(& b->bm_change); trylock_failed = tmp == 0; if (trylock_failed != 0) { tmp___0 = drbd_task_to_thread_name(mdev->tconn, b->bm_task); tmp___1 = get_current(); tmp___2 = drbd_task_to_thread_name(mdev->tconn, tmp___1); dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s going to \'%s\' but bitmap already locked for \'%s\' by %s\n", tmp___2, why, (unsigned long )b->bm_why != (unsigned long )((char *)0) ? b->bm_why : (char *)"?", tmp___0); ldv_mutex_lock_13(& b->bm_change); } else { } if (((unsigned int )b->bm_flags & 15U) != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "FIXME bitmap already locked in bm_lock\n"); } else { } b->bm_flags = (enum bm_flag )((unsigned int )b->bm_flags | ((unsigned int )flags & 15U)); b->bm_why = why; b->bm_task = get_current(); return; } } void drbd_bm_unlock(struct drbd_conf *mdev ) { struct drbd_bitmap *b ; { b = mdev->bitmap; if ((unsigned long )b == (unsigned long )((struct drbd_bitmap *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "FIXME no bitmap in drbd_bm_unlock!?\n"); return; } else { } if (((unsigned int )(mdev->bitmap)->bm_flags & 15U) == 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "FIXME bitmap not locked in bm_unlock\n"); } else { } b->bm_flags = (enum bm_flag )((unsigned int )b->bm_flags & 4294967280U); b->bm_why = 0; b->bm_task = 0; ldv_mutex_unlock_14(& b->bm_change); return; } } static void bm_store_page_idx(struct page *page , unsigned long idx ) { long tmp ; { tmp = ldv__builtin_expect((idx & 0xffffffffff000000UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"), "i" (289), "i" (12UL)); ldv_51111: ; goto ldv_51111; } else { } page->ldv_14746.private = idx; return; } } static unsigned long bm_page_to_idx(struct page *page ) { { return (page->ldv_14746.private & 16777215UL); } } static void bm_page_lock_io(struct drbd_conf *mdev , int page_nr ) { struct drbd_bitmap *b ; void *addr ; int tmp ; wait_queue_t __wait ; struct task_struct *tmp___0 ; int tmp___1 ; { b = mdev->bitmap; addr = (void *)(& (*(b->bm_pages + (unsigned long )page_nr))->ldv_14746.private); tmp = test_and_set_bit(31, (unsigned long volatile *)addr); if (tmp == 0) { goto ldv_51121; } else { } tmp___0 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___0; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_51124: prepare_to_wait(& b->bm_io_wait, & __wait, 2); tmp___1 = test_and_set_bit(31, (unsigned long volatile *)addr); if (tmp___1 == 0) { goto ldv_51123; } else { } schedule(); goto ldv_51124; ldv_51123: finish_wait(& b->bm_io_wait, & __wait); ldv_51121: ; return; } } static void bm_page_unlock_io(struct drbd_conf *mdev , int page_nr ) { struct drbd_bitmap *b ; void *addr ; { b = mdev->bitmap; addr = (void *)(& (*(b->bm_pages + (unsigned long )page_nr))->ldv_14746.private); clear_bit_unlock(31U, (unsigned long volatile *)addr); __wake_up(& (mdev->bitmap)->bm_io_wait, 3U, 1, 0); return; } } static void bm_set_page_unchanged(struct page *page ) { { clear_bit(29, (unsigned long volatile *)(& page->ldv_14746.private)); clear_bit(28, (unsigned long volatile *)(& page->ldv_14746.private)); return; } } static void bm_set_page_need_writeout(struct page *page ) { { set_bit(29U, (unsigned long volatile *)(& page->ldv_14746.private)); return; } } void drbd_bm_mark_for_writeout(struct drbd_conf *mdev , int page_nr ) { struct page *page ; { if ((size_t )page_nr >= (mdev->bitmap)->bm_number_of_pages) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "BAD: page_nr: %u, number_of_pages: %u\n", page_nr, (int )(mdev->bitmap)->bm_number_of_pages); return; } else { } page = *((mdev->bitmap)->bm_pages + (unsigned long )page_nr); set_bit(27U, (unsigned long volatile *)(& page->ldv_14746.private)); return; } } static int bm_test_page_unchanged(struct page *page ) { unsigned long const volatile *addr ; { addr = (unsigned long const volatile *)(& page->ldv_14746.private); return (((unsigned long )*addr & 805306368UL) == 0UL); } } static void bm_set_page_io_err(struct page *page ) { { set_bit(30U, (unsigned long volatile *)(& page->ldv_14746.private)); return; } } static void bm_clear_page_io_err(struct page *page ) { { clear_bit(30, (unsigned long volatile *)(& page->ldv_14746.private)); return; } } static void bm_set_page_lazy_writeout(struct page *page ) { { set_bit(28U, (unsigned long volatile *)(& page->ldv_14746.private)); return; } } static int bm_test_page_lazy_writeout(struct page *page ) { int tmp ; { tmp = constant_test_bit(28U, (unsigned long const volatile *)(& page->ldv_14746.private)); return (tmp); } } static unsigned int bm_word_to_page_idx(struct drbd_bitmap *b , unsigned long long_nr ) { unsigned int page_nr ; long tmp ; { page_nr = (unsigned int )(long_nr >> 9); tmp = ldv__builtin_expect((size_t )page_nr >= b->bm_number_of_pages, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"), "i" (382), "i" (12UL)); ldv_51163: ; goto ldv_51163; } else { } return (page_nr); } } static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b , u64 bitnr ) { unsigned int page_nr ; long tmp ; { page_nr = (unsigned int )(bitnr >> 15); tmp = ldv__builtin_expect((size_t )page_nr >= b->bm_number_of_pages, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"), "i" (390), "i" (12UL)); ldv_51169: ; goto ldv_51169; } else { } return (page_nr); } } static unsigned long *__bm_map_pidx(struct drbd_bitmap *b , unsigned int idx ) { struct page *page ; void *tmp ; { page = *(b->bm_pages + (unsigned long )idx); tmp = kmap_atomic(page); return ((unsigned long *)tmp); } } static unsigned long *bm_map_pidx(struct drbd_bitmap *b , unsigned int idx ) { unsigned long *tmp ; { tmp = __bm_map_pidx(b, idx); return (tmp); } } static void __bm_unmap(unsigned long *p_addr ) { { __kunmap_atomic((void *)p_addr); return; } } static void bm_unmap(unsigned long *p_addr ) { { return; } } static void bm_free_pages(struct page **pages , unsigned long number ) { unsigned long i ; { if ((unsigned long )pages == (unsigned long )((struct page **)0)) { return; } else { } i = 0UL; goto ldv_51192; ldv_51191: ; if ((unsigned long )*(pages + i) == (unsigned long )((struct page *)0)) { printk("\tdrbd: bm_free_pages tried to free a NULL pointer; i=%lu n=%lu\n", i, number); goto ldv_51190; } else { } __free_pages(*(pages + i), 0U); *(pages + i) = 0; ldv_51190: i = i + 1UL; ldv_51192: ; if (i < number) { goto ldv_51191; } else { } return; } } static void bm_vk_free(void *ptr , int v ) { { if (v != 0) { vfree((void const *)ptr); } else { kfree((void const *)ptr); } return; } } static struct page **bm_realloc_pages(struct drbd_bitmap *b , unsigned long want ) { struct page **old_pages ; struct page **new_pages ; struct page *page ; unsigned int i ; unsigned int bytes ; unsigned int vmalloced ; unsigned long have ; long tmp ; long tmp___0 ; long tmp___1 ; long tmp___2 ; void *tmp___3 ; pgprot_t __constr_expr_0 ; void *tmp___4 ; { old_pages = b->bm_pages; vmalloced = 0U; have = b->bm_number_of_pages; tmp = ldv__builtin_expect(have == 0UL, 0L); if (tmp != 0L) { tmp___0 = ldv__builtin_expect((unsigned long )old_pages != (unsigned long )((struct page **)0), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"), "i" (471), "i" (12UL)); ldv_51209: ; goto ldv_51209; } else { } } else { } tmp___1 = ldv__builtin_expect(have != 0UL, 0L); if (tmp___1 != 0L) { tmp___2 = ldv__builtin_expect((unsigned long )old_pages == (unsigned long )((struct page **)0), 0L); if (tmp___2 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"), "i" (472), "i" (12UL)); ldv_51210: ; goto ldv_51210; } else { } } else { } if (have == want) { return (old_pages); } else { } bytes = (unsigned int )want * 8U; tmp___3 = kzalloc((size_t )bytes, 16U); new_pages = (struct page **)tmp___3; if ((unsigned long )new_pages == (unsigned long )((struct page **)0)) { __constr_expr_0.pgprot = 0x8000000000000163UL; tmp___4 = __vmalloc((unsigned long )bytes, 32786U, __constr_expr_0); new_pages = (struct page **)tmp___4; if ((unsigned long )new_pages == (unsigned long )((struct page **)0)) { return (0); } else { } vmalloced = 1U; } else { } if (want >= have) { i = 0U; goto ldv_51213; ldv_51212: *(new_pages + (unsigned long )i) = *(old_pages + (unsigned long )i); i = i + 1U; ldv_51213: ; if ((unsigned long )i < have) { goto ldv_51212; } else { } goto ldv_51216; ldv_51215: page = alloc_pages(18U, 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { bm_free_pages(new_pages + have, (unsigned long )i - have); bm_vk_free((void *)new_pages, (int )vmalloced); return (0); } else { } bm_store_page_idx(page, (unsigned long )i); *(new_pages + (unsigned long )i) = page; i = i + 1U; ldv_51216: ; if ((unsigned long )i < want) { goto ldv_51215; } else { } } else { i = 0U; goto ldv_51219; ldv_51218: *(new_pages + (unsigned long )i) = *(old_pages + (unsigned long )i); i = i + 1U; ldv_51219: ; if ((unsigned long )i < want) { goto ldv_51218; } else { } } if (vmalloced != 0U) { b->bm_flags = (enum bm_flag )((unsigned int )b->bm_flags | 65536U); } else { b->bm_flags = (enum bm_flag )((unsigned int )b->bm_flags & 4294901759U); } return (new_pages); } } int drbd_bm_init(struct drbd_conf *mdev ) { struct drbd_bitmap *b ; int __ret_warn_on ; long tmp ; void *tmp___0 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; { b = mdev->bitmap; __ret_warn_on = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared", 531); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); tmp___0 = kzalloc(400UL, 208U); b = (struct drbd_bitmap *)tmp___0; if ((unsigned long )b == (unsigned long )((struct drbd_bitmap *)0)) { return (-12); } else { } spinlock_check(& b->bm_lock); __raw_spin_lock_init(& b->bm_lock.ldv_5957.rlock, "&(&b->bm_lock)->rlock", & __key); __mutex_init(& b->bm_change, "&b->bm_change", & __key___0); __init_waitqueue_head(& b->bm_io_wait, "&b->bm_io_wait", & __key___1); mdev->bitmap = b; return (0); } } sector_t drbd_bm_capacity(struct drbd_conf *mdev ) { bool _bool ; int tmp ; { _bool = (unsigned long )mdev->bitmap != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"mdev->bitmap", "drbd_bm_capacity"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (0UL); } else { } return ((mdev->bitmap)->bm_dev_capacity); } } void drbd_bm_cleanup(struct drbd_conf *mdev ) { bool _bool ; int tmp ; { _bool = (unsigned long )mdev->bitmap != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"mdev->bitmap", "drbd_bm_cleanup"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return; } else { } bm_free_pages((mdev->bitmap)->bm_pages, (mdev->bitmap)->bm_number_of_pages); bm_vk_free((void *)(mdev->bitmap)->bm_pages, (int )(mdev->bitmap)->bm_flags & 65536); kfree((void const *)mdev->bitmap); mdev->bitmap = 0; return; } } static int bm_clear_surplus(struct drbd_bitmap *b ) { unsigned long mask ; unsigned long *p_addr ; unsigned long *bm ; int tmp ; int cleared ; unsigned long tmp___0 ; { cleared = 0; tmp = (int )b->bm_bits & 32767; mask = (1UL << (tmp & 63)) - 1UL; mask = mask; p_addr = bm_map_pidx(b, (unsigned int )b->bm_number_of_pages - 1U); bm = p_addr + (unsigned long )(tmp / 64); if (mask != 0UL) { tmp___0 = hweight_long(*bm & ~ mask); cleared = (int )tmp___0; *bm = *bm & mask; bm = bm + 1; } else { } bm_unmap(p_addr); return (cleared); } } static void bm_set_surplus(struct drbd_bitmap *b ) { unsigned long mask ; unsigned long *p_addr ; unsigned long *bm ; int tmp ; { tmp = (int )b->bm_bits & 32767; mask = (1UL << (tmp & 63)) - 1UL; mask = mask; p_addr = bm_map_pidx(b, (unsigned int )b->bm_number_of_pages - 1U); bm = p_addr + (unsigned long )(tmp / 64); if (mask != 0UL) { *bm = *bm | ~ mask; bm = bm + 1; } else { } bm_unmap(p_addr); return; } } static unsigned long bm_count_bits(struct drbd_bitmap *b ) { unsigned long *p_addr ; unsigned long bits ; unsigned long mask ; int idx ; int i ; int last_word ; unsigned long tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; { bits = 0UL; mask = (1UL << ((int )b->bm_bits & 63)) - 1UL; idx = 0; goto ldv_51271; ldv_51270: p_addr = __bm_map_pidx(b, (unsigned int )idx); i = 0; goto ldv_51267; ldv_51266: tmp = hweight_long(*(p_addr + (unsigned long )i)); bits = tmp + bits; i = i + 1; ldv_51267: ; if ((unsigned int )i <= 511U) { goto ldv_51266; } else { } __bm_unmap(p_addr); __might_sleep("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared", 656, 0); _cond_resched(); idx = idx + 1; ldv_51271: ; if ((size_t )idx < b->bm_number_of_pages - 1UL) { goto ldv_51270; } else { } last_word = (int )(((b->bm_bits - 1UL) & 32767UL) >> 6); p_addr = __bm_map_pidx(b, (unsigned int )idx); i = 0; goto ldv_51274; ldv_51273: tmp___0 = hweight_long(*(p_addr + (unsigned long )i)); bits = tmp___0 + bits; i = i + 1; ldv_51274: ; if (i < last_word) { goto ldv_51273; } else { } *(p_addr + (unsigned long )last_word) = (unsigned long )((unsigned long long )*(p_addr + (unsigned long )last_word) & (unsigned long long )mask); tmp___1 = hweight_long(*(p_addr + (unsigned long )last_word)); bits = tmp___1 + bits; __bm_unmap(p_addr); return (bits); } } static void bm_memset(struct drbd_bitmap *b , size_t offset , int c , size_t len ) { unsigned long *p_addr ; unsigned long *bm ; unsigned int idx ; size_t do_now ; size_t end ; size_t __min1 ; size_t __min2 ; { end = offset + len; if (b->bm_words < end) { printk("\tdrbd: bm_memset end > bm_words\n"); return; } else { } goto ldv_51291; ldv_51290: __min1 = (offset + 512UL) & 0xfffffffffffffe00UL; __min2 = end; do_now = (__min1 < __min2 ? __min1 : __min2) - offset; idx = bm_word_to_page_idx(b, offset); p_addr = bm_map_pidx(b, idx); bm = p_addr + (offset & 511UL); if ((unsigned long )(bm + do_now) > (unsigned long )(p_addr + 512UL)) { printk("\tdrbd: BUG BUG BUG! p_addr:%p bm:%p do_now:%d\n", p_addr, bm, (int )do_now); } else { memset((void *)bm, c, do_now * 8UL); } bm_unmap(p_addr); bm_set_page_need_writeout(*(b->bm_pages + (unsigned long )idx)); offset = offset + do_now; ldv_51291: ; if (offset < end) { goto ldv_51290; } else { } return; } } int drbd_bm_resize(struct drbd_conf *mdev , sector_t capacity , int set_new_bits ) { struct drbd_bitmap *b ; unsigned long bits ; unsigned long words ; unsigned long owords ; unsigned long obits ; unsigned long want ; unsigned long have ; unsigned long onpages ; struct page **npages ; struct page **opages ; int err ; int growing ; int opages_vmalloced ; bool _bool ; int tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; size_t tmp___2 ; sector_t tmp___3 ; u64 bits_on_disk ; int tmp___4 ; int tmp___5 ; { b = mdev->bitmap; opages = 0; err = 0; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_resize"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (-12); } else { } drbd_bm_lock(mdev, (char *)"resize", BM_LOCKED_MASK); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_bm_resize called with capacity == %llu\n", (unsigned long long )capacity); if (b->bm_dev_capacity == capacity) { goto out; } else { } opages_vmalloced = (int )b->bm_flags & 65536; if (capacity == 0UL) { spin_lock_irq(& b->bm_lock); opages = b->bm_pages; onpages = b->bm_number_of_pages; owords = b->bm_words; b->bm_pages = 0; tmp___3 = 0UL; b->bm_dev_capacity = tmp___3; tmp___2 = tmp___3; b->bm_words = tmp___2; tmp___1 = tmp___2; b->bm_bits = tmp___1; tmp___0 = tmp___1; b->bm_set = tmp___0; b->bm_number_of_pages = tmp___0; spin_unlock_irq(& b->bm_lock); bm_free_pages(opages, onpages); bm_vk_free((void *)opages, opages_vmalloced); goto out; } else { } bits = (capacity + 7UL) >> 3; words = (bits + 63UL) >> 6; tmp___4 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___4 != 0) { bits_on_disk = ((unsigned long long )(mdev->ldev)->md.md_size_sect - 72ULL) << 12; put_ldev(mdev); if ((unsigned long long )bits > bits_on_disk) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "bits = %lu\n", bits); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "bits_on_disk = %llu\n", bits_on_disk); err = -28; goto out; } else { } } else { } want = (words * 8UL + 4095UL) >> 12; have = b->bm_number_of_pages; if (want == have) { if ((unsigned long )b->bm_pages == (unsigned long )((struct page **)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( b->bm_pages != NULL ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared", 771); } else { } npages = b->bm_pages; } else { tmp___5 = drbd_insert_fault(mdev, 7U); if (tmp___5 != 0) { npages = 0; } else { npages = bm_realloc_pages(b, want); } } if ((unsigned long )npages == (unsigned long )((struct page **)0)) { err = -12; goto out; } else { } spin_lock_irq(& b->bm_lock); opages = b->bm_pages; owords = b->bm_words; obits = b->bm_bits; growing = bits > obits; if (((unsigned long )opages != (unsigned long )((struct page **)0) && growing != 0) && set_new_bits != 0) { bm_set_surplus(b); } else { } b->bm_pages = npages; b->bm_number_of_pages = want; b->bm_bits = bits; b->bm_words = words; b->bm_dev_capacity = capacity; if (growing != 0) { if (set_new_bits != 0) { bm_memset(b, owords, 255, words - owords); b->bm_set = b->bm_set + (bits - obits); } else { bm_memset(b, owords, 0, words - owords); } } else { } if (want < have) { bm_free_pages(opages + want, have - want); } else { } bm_clear_surplus(b); spin_unlock_irq(& b->bm_lock); if ((unsigned long )opages != (unsigned long )npages) { bm_vk_free((void *)opages, opages_vmalloced); } else { } if (growing == 0) { b->bm_set = bm_count_bits(b); } else { } _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want); out: drbd_bm_unlock(mdev); return (err); } } unsigned long _drbd_bm_total_weight(struct drbd_conf *mdev ) { struct drbd_bitmap *b ; unsigned long s ; unsigned long flags ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; raw_spinlock_t *tmp___1 ; { b = mdev->bitmap; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "_drbd_bm_total_weight"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (0UL); } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "_drbd_bm_total_weight"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0UL); } else { } tmp___1 = spinlock_check(& b->bm_lock); flags = _raw_spin_lock_irqsave(tmp___1); s = b->bm_set; spin_unlock_irqrestore(& b->bm_lock, flags); return (s); } } unsigned long drbd_bm_total_weight(struct drbd_conf *mdev ) { unsigned long s ; int tmp ; { tmp = _get_ldev_if_state(mdev, D_NEGOTIATING); if (tmp == 0) { return (0UL); } else { } s = _drbd_bm_total_weight(mdev); put_ldev(mdev); return (s); } } size_t drbd_bm_words(struct drbd_conf *mdev ) { struct drbd_bitmap *b ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; { b = mdev->bitmap; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_words"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (0UL); } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "drbd_bm_words"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0UL); } else { } return (b->bm_words); } } unsigned long drbd_bm_bits(struct drbd_conf *mdev ) { struct drbd_bitmap *b ; bool _bool ; int tmp ; { b = mdev->bitmap; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_bits"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (0UL); } else { } return (b->bm_bits); } } void drbd_bm_merge_lel(struct drbd_conf *mdev , size_t offset , size_t number , unsigned long *buffer ) { struct drbd_bitmap *b ; unsigned long *p_addr ; unsigned long *bm ; unsigned long word ; unsigned long bits ; unsigned int idx ; size_t end ; size_t do_now ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; int __ret_warn_on ; long tmp___1 ; int __ret_warn_on___0 ; long tmp___2 ; size_t __min1 ; size_t __min2 ; unsigned long *tmp___3 ; unsigned long *tmp___4 ; unsigned long tmp___5 ; size_t tmp___6 ; int tmp___7 ; { b = mdev->bitmap; end = offset + number; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_merge_lel"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return; } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "drbd_bm_merge_lel"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } if (number == 0UL) { return; } else { } __ret_warn_on = b->bm_words <= offset; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_null("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared", 907); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); __ret_warn_on___0 = b->bm_words < end; tmp___2 = ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); if (tmp___2 != 0L) { warn_slowpath_null("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared", 908); } else { } ldv__builtin_expect(__ret_warn_on___0 != 0, 0L); spin_lock_irq(& b->bm_lock); goto ldv_51380; ldv_51379: __min1 = (offset + 512UL) & 0xfffffffffffffe00UL; __min2 = end; do_now = (__min1 < __min2 ? __min1 : __min2) - offset; idx = bm_word_to_page_idx(b, offset); p_addr = bm_map_pidx(b, idx); bm = p_addr + (offset & 511UL); offset = offset + do_now; goto ldv_51377; ldv_51376: bits = hweight_long(*bm); tmp___3 = buffer; buffer = buffer + 1; word = *bm | *tmp___3; tmp___4 = bm; bm = bm + 1; *tmp___4 = word; tmp___5 = hweight_long(word); b->bm_set = b->bm_set + (tmp___5 - bits); ldv_51377: tmp___6 = do_now; do_now = do_now - (size_t )1; if (tmp___6 != 0UL) { goto ldv_51376; } else { } bm_unmap(p_addr); bm_set_page_need_writeout(*(b->bm_pages + (unsigned long )idx)); ldv_51380: ; if (offset < end) { goto ldv_51379; } else { } if (b->bm_words == end) { tmp___7 = bm_clear_surplus(b); b->bm_set = b->bm_set - (unsigned long )tmp___7; } else { } spin_unlock_irq(& b->bm_lock); return; } } void drbd_bm_get_lel(struct drbd_conf *mdev , size_t offset , size_t number , unsigned long *buffer ) { struct drbd_bitmap *b ; unsigned long *p_addr ; unsigned long *bm ; size_t end ; size_t do_now ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; size_t __min1 ; size_t __min2 ; unsigned int tmp___1 ; unsigned long *tmp___2 ; unsigned long *tmp___3 ; size_t tmp___4 ; { b = mdev->bitmap; end = offset + number; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_get_lel"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return; } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "drbd_bm_get_lel"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } spin_lock_irq(& b->bm_lock); if ((b->bm_words <= offset || b->bm_words < end) || number == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "offset=%lu number=%lu bm_words=%lu\n", offset, number, b->bm_words); } else { goto ldv_51405; ldv_51404: __min1 = (offset + 512UL) & 0xfffffffffffffe00UL; __min2 = end; do_now = (__min1 < __min2 ? __min1 : __min2) - offset; tmp___1 = bm_word_to_page_idx(b, offset); p_addr = bm_map_pidx(b, tmp___1); bm = p_addr + (offset & 511UL); offset = offset + do_now; goto ldv_51402; ldv_51401: tmp___2 = buffer; buffer = buffer + 1; tmp___3 = bm; bm = bm + 1; *tmp___2 = *tmp___3; ldv_51402: tmp___4 = do_now; do_now = do_now - (size_t )1; if (tmp___4 != 0UL) { goto ldv_51401; } else { } bm_unmap(p_addr); ldv_51405: ; if (offset < end) { goto ldv_51404; } else { } } spin_unlock_irq(& b->bm_lock); return; } } void drbd_bm_set_all(struct drbd_conf *mdev ) { struct drbd_bitmap *b ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; { b = mdev->bitmap; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_set_all"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return; } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "drbd_bm_set_all"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } spin_lock_irq(& b->bm_lock); bm_memset(b, 0UL, 255, b->bm_words); bm_clear_surplus(b); b->bm_set = b->bm_bits; spin_unlock_irq(& b->bm_lock); return; } } void drbd_bm_clear_all(struct drbd_conf *mdev ) { struct drbd_bitmap *b ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; { b = mdev->bitmap; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_clear_all"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return; } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "drbd_bm_clear_all"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return; } else { } spin_lock_irq(& b->bm_lock); bm_memset(b, 0UL, 0, b->bm_words); b->bm_set = 0UL; spin_unlock_irq(& b->bm_lock); return; } } static void bm_aio_ctx_destroy(struct kref *kref ) { struct bm_aio_ctx *ctx ; struct kref const *__mptr ; { __mptr = (struct kref const *)kref; ctx = (struct bm_aio_ctx *)__mptr + 0xffffffffffffffe8UL; put_ldev(ctx->mdev); kfree((void const *)ctx); return; } } static void bm_async_io_complete(struct bio *bio , int error ) { struct bm_aio_ctx *ctx ; struct drbd_conf *mdev ; struct drbd_bitmap *b ; unsigned int idx ; unsigned long tmp ; int uptodate ; int tmp___0 ; int tmp___1 ; struct _ddebug descriptor ; long tmp___2 ; int tmp___3 ; { ctx = (struct bm_aio_ctx *)bio->bi_private; mdev = ctx->mdev; b = mdev->bitmap; tmp = bm_page_to_idx((bio->bi_io_vec)->bv_page); idx = (unsigned int )tmp; uptodate = (int )bio->bi_flags & 1; if (error == 0 && uptodate == 0) { error = -5; } else { } if ((ctx->flags & 1U) == 0U) { tmp___0 = bm_test_page_unchanged(*(b->bm_pages + (unsigned long )idx)); if (tmp___0 == 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "bitmap page idx %u changed during IO!\n", idx); } else { } } else { } if (error != 0) { ctx->error = error; bm_set_page_io_err(*(b->bm_pages + (unsigned long )idx)); tmp___1 = ___ratelimit(& drbd_ratelimit_state, "bm_async_io_complete"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "IO ERROR %d on bitmap page idx %u\n", error, idx); } else { } } else { bm_clear_page_io_err(*(b->bm_pages + (unsigned long )idx)); descriptor.modname = "drbd"; descriptor.function = "bm_async_io_complete"; descriptor.filename = "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"; descriptor.format = "bitmap page idx %u completed\n"; descriptor.lineno = 1059U; descriptor.flags = 0U; tmp___2 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___2 != 0L) { __dynamic_dev_dbg(& descriptor, (struct device const *)(& (mdev->vdisk)->part0.__dev), "bitmap page idx %u completed\n", idx); } else { } } bm_page_unlock_io(mdev, (int )idx); if ((int )ctx->flags & 1) { mempool_free((void *)(bio->bi_io_vec)->bv_page, drbd_md_io_page_pool); } else { } bio_put(bio); tmp___3 = atomic_dec_and_test(& ctx->in_flight); if (tmp___3 != 0) { ctx->done = 1U; __wake_up(& mdev->misc_wait, 3U, 1, 0); kref_put(& ctx->kref, & bm_aio_ctx_destroy); } else { } return; } } static void bm_page_io_async(struct bm_aio_ctx *ctx , int page_nr , int rw ) { struct bio *bio ; struct bio *tmp ; struct drbd_conf *mdev ; struct drbd_bitmap *b ; struct page *page ; unsigned int len ; sector_t on_disk_sector ; unsigned int __min1 ; unsigned int __min2 ; sector_t tmp___0 ; void *src ; void *dest ; void *tmp___1 ; size_t __len ; void *__ret ; int tmp___2 ; { tmp = bio_alloc_drbd(16U); bio = tmp; mdev = ctx->mdev; b = mdev->bitmap; on_disk_sector = (sector_t )((mdev->ldev)->md.md_offset + (u64 )(mdev->ldev)->md.bm_offset); on_disk_sector = ((unsigned long )page_nr << 3) + on_disk_sector; __min1 = 4096U; tmp___0 = drbd_md_last_sector(mdev->ldev); __min2 = (((unsigned int )tmp___0 - (unsigned int )on_disk_sector) + 1U) << 9U; len = __min1 < __min2 ? __min1 : __min2; bm_page_lock_io(mdev, page_nr); bm_set_page_unchanged(*(b->bm_pages + (unsigned long )page_nr)); if ((int )ctx->flags & 1) { tmp___1 = mempool_alloc(drbd_md_io_page_pool, 18U); page = (struct page *)tmp___1; dest = kmap_atomic(page); src = kmap_atomic(*(b->bm_pages + (unsigned long )page_nr)); __len = 4096UL; if (__len > 63UL) { __ret = memcpy(dest, (void const *)src, __len); } else { __ret = memcpy(dest, (void const *)src, __len); } __kunmap_atomic(src); __kunmap_atomic(dest); bm_store_page_idx(page, (unsigned long )page_nr); } else { page = *(b->bm_pages + (unsigned long )page_nr); } bio->bi_bdev = (mdev->ldev)->md_bdev; bio->bi_sector = on_disk_sector; bio_add_page(bio, page, len, 0U); bio->bi_private = (void *)ctx; bio->bi_end_io = & bm_async_io_complete; tmp___2 = drbd_insert_fault(mdev, rw & 1 ? 0U : 1U); if (tmp___2 != 0) { bio->bi_rw = bio->bi_rw | (unsigned long )rw; bio_endio(bio, -5); } else { submit_bio(rw, bio); atomic_add((int )(len >> 9), & mdev->rs_sect_ev); } return; } } static int bm_rw(struct drbd_conf *mdev , int rw , unsigned int flags , unsigned int lazy_writeout_upper_idx ) { struct bm_aio_ctx *ctx ; struct drbd_bitmap *b ; int num_pages ; int i ; int count ; unsigned long now ; char ppb[10U] ; int err ; void *tmp ; struct bm_aio_ctx __constr_expr_0 ; int tmp___0 ; int __ret_warn_on ; long tmp___1 ; int tmp___2 ; struct _ddebug descriptor ; long tmp___3 ; int tmp___4 ; struct _ddebug descriptor___0 ; long tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; char *tmp___9 ; { b = mdev->bitmap; count = 0; err = 0; tmp = kmalloc(32UL, 16U); ctx = (struct bm_aio_ctx *)tmp; if ((unsigned long )ctx == (unsigned long )((struct bm_aio_ctx *)0)) { return (-12); } else { } __constr_expr_0.mdev = mdev; __constr_expr_0.in_flight.counter = 1; __constr_expr_0.done = 0U; __constr_expr_0.flags = flags; __constr_expr_0.error = 0; __constr_expr_0.kref.refcount.counter = 2; *ctx = __constr_expr_0; tmp___0 = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp___0 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n"); kfree((void const *)ctx); return (-19); } else { } if (ctx->flags == 0U) { __ret_warn_on = ((unsigned int )b->bm_flags & 15U) == 0U; tmp___1 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___1 != 0L) { warn_slowpath_null("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared", 1172); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); } else { } num_pages = (int )b->bm_number_of_pages; now = jiffies; i = 0; goto ldv_51492; ldv_51491: ; if (lazy_writeout_upper_idx != 0U && (unsigned int )i == lazy_writeout_upper_idx) { goto ldv_51485; } else { } if (rw & 1) { if ((flags & 2U) != 0U) { tmp___2 = test_and_clear_bit(27, (unsigned long volatile *)(& (*(b->bm_pages + (unsigned long )i))->ldv_14746.private)); if (tmp___2 == 0) { goto ldv_51486; } else { } } else { } if ((flags & 4U) == 0U) { tmp___4 = bm_test_page_unchanged(*(b->bm_pages + (unsigned long )i)); if (tmp___4 != 0) { descriptor.modname = "drbd"; descriptor.function = "bm_rw"; descriptor.filename = "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"; descriptor.format = "skipped bm write for idx %u\n"; descriptor.lineno = 1191U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { __dynamic_dev_dbg(& descriptor, (struct device const *)(& (mdev->vdisk)->part0.__dev), "skipped bm write for idx %u\n", i); } else { } goto ldv_51486; } else { } } else { } if (lazy_writeout_upper_idx != 0U) { tmp___6 = bm_test_page_lazy_writeout(*(b->bm_pages + (unsigned long )i)); if (tmp___6 == 0) { descriptor___0.modname = "drbd"; descriptor___0.function = "bm_rw"; descriptor___0.filename = "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"; descriptor___0.format = "skipped bm lazy write for idx %u\n"; descriptor___0.lineno = 1198U; descriptor___0.flags = 0U; tmp___5 = ldv__builtin_expect((long )descriptor___0.flags & 1L, 0L); if (tmp___5 != 0L) { __dynamic_dev_dbg(& descriptor___0, (struct device const *)(& (mdev->vdisk)->part0.__dev), "skipped bm lazy write for idx %u\n", i); } else { } goto ldv_51486; } else { } } else { } } else { } atomic_inc(& ctx->in_flight); bm_page_io_async(ctx, i, rw); count = count + 1; __might_sleep("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared", 1205, 0); _cond_resched(); ldv_51486: i = i + 1; ldv_51492: ; if (i < num_pages) { goto ldv_51491; } else { } ldv_51485: tmp___7 = atomic_dec_and_test(& ctx->in_flight); if (tmp___7 == 0) { wait_until_done_or_force_detached(mdev, mdev->ldev, & ctx->done); } else { kref_put(& ctx->kref, & bm_aio_ctx_destroy); } if (flags == 0U) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "bitmap %s of %u pages took %lu jiffies\n", rw == 1 ? (char *)"WRITE" : (char *)"READ", count, (unsigned long )jiffies - now); } else { } if (ctx->error != 0) { dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "we had at least one MD IO ERROR during bitmap IO\n"); drbd_chk_io_error_(mdev, 1, DRBD_META_IO_ERROR, "bm_rw"); err = -5; } else { } tmp___8 = atomic_read((atomic_t const *)(& ctx->in_flight)); if (tmp___8 != 0) { err = -5; } else { } now = jiffies; if (rw == 1) { drbd_md_flush(mdev); } else { b->bm_set = bm_count_bits(b); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "recounting of set bits took additional %lu jiffies\n", (unsigned long )jiffies - now); } now = b->bm_set; if (flags == 0U) { tmp___9 = ppsize((char *)(& ppb), (unsigned long long )(now << 2)); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s (%lu bits) marked out-of-sync by on disk bit-map.\n", tmp___9, now); } else { } kref_put(& ctx->kref, & bm_aio_ctx_destroy); return (err); } } int drbd_bm_read(struct drbd_conf *mdev ) { int tmp ; { tmp = bm_rw(mdev, 0, 0U, 0U); return (tmp); } } int drbd_bm_write(struct drbd_conf *mdev ) { int tmp ; { tmp = bm_rw(mdev, 1, 0U, 0U); return (tmp); } } int drbd_bm_write_all(struct drbd_conf *mdev ) { int tmp ; { tmp = bm_rw(mdev, 1, 4U, 0U); return (tmp); } } int drbd_bm_write_lazy(struct drbd_conf *mdev , unsigned int upper_idx ) { int tmp ; { tmp = bm_rw(mdev, 1, 1U, upper_idx); return (tmp); } } int drbd_bm_write_copy_pages(struct drbd_conf *mdev ) { int tmp ; { tmp = bm_rw(mdev, 1, 1U, 0U); return (tmp); } } int drbd_bm_write_hinted(struct drbd_conf *mdev ) { int tmp ; { tmp = bm_rw(mdev, 1, 3U, 0U); return (tmp); } } int drbd_bm_write_page(struct drbd_conf *mdev , unsigned int idx ) { struct bm_aio_ctx *ctx ; int err ; struct _ddebug descriptor ; long tmp ; int tmp___0 ; void *tmp___1 ; struct bm_aio_ctx __constr_expr_0 ; int tmp___2 ; int tmp___3 ; { tmp___0 = bm_test_page_unchanged(*((mdev->bitmap)->bm_pages + (unsigned long )idx)); if (tmp___0 != 0) { descriptor.modname = "drbd"; descriptor.function = "drbd_bm_write_page"; descriptor.filename = "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared"; descriptor.format = "skipped bm page write for idx %u\n"; descriptor.lineno = 1338U; descriptor.flags = 0U; tmp = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp != 0L) { __dynamic_dev_dbg(& descriptor, (struct device const *)(& (mdev->vdisk)->part0.__dev), "skipped bm page write for idx %u\n", idx); } else { } return (0); } else { } tmp___1 = kmalloc(32UL, 16U); ctx = (struct bm_aio_ctx *)tmp___1; if ((unsigned long )ctx == (unsigned long )((struct bm_aio_ctx *)0)) { return (-12); } else { } __constr_expr_0.mdev = mdev; __constr_expr_0.in_flight.counter = 1; __constr_expr_0.done = 0U; __constr_expr_0.flags = 1U; __constr_expr_0.error = 0; __constr_expr_0.kref.refcount.counter = 2; *ctx = __constr_expr_0; tmp___2 = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp___2 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n"); kfree((void const *)ctx); return (-19); } else { } bm_page_io_async(ctx, (int )idx, 1041); wait_until_done_or_force_detached(mdev, mdev->ldev, & ctx->done); if (ctx->error != 0) { drbd_chk_io_error_(mdev, 1, DRBD_META_IO_ERROR, "drbd_bm_write_page"); } else { } mdev->bm_writ_cnt = mdev->bm_writ_cnt + 1U; tmp___3 = atomic_read((atomic_t const *)(& ctx->in_flight)); err = tmp___3 == 0 ? ctx->error : -5; kref_put(& ctx->kref, & bm_aio_ctx_destroy); return (err); } } static unsigned long __bm_find_next(struct drbd_conf *mdev , unsigned long bm_fo , int const find_zero_bit ) { struct drbd_bitmap *b ; unsigned long *p_addr ; unsigned long bit_offset ; unsigned int i ; unsigned int tmp ; unsigned long tmp___0 ; unsigned long tmp___1 ; { b = mdev->bitmap; if (b->bm_bits < bm_fo) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "bm_fo=%lu bm_bits=%lu\n", bm_fo, b->bm_bits); bm_fo = 0xffffffffffffffffUL; } else { goto ldv_51533; ldv_51532: bit_offset = bm_fo & 0xffffffffffff8000UL; tmp = bm_bit_to_page_idx(b, (u64 )bm_fo); p_addr = __bm_map_pidx(b, tmp); if ((int )find_zero_bit != 0) { tmp___0 = find_next_zero_bit_le((void const *)p_addr, 32768UL, bm_fo & 32767UL); i = (unsigned int )tmp___0; } else { tmp___1 = find_next_bit_le((void const *)p_addr, 32768UL, bm_fo & 32767UL); i = (unsigned int )tmp___1; } __bm_unmap(p_addr); if (i <= 32767U) { bm_fo = (unsigned long )i + bit_offset; if (b->bm_bits <= bm_fo) { goto ldv_51530; } else { } goto found; } else { } bm_fo = bit_offset + 32768UL; ldv_51533: ; if (b->bm_bits > bm_fo) { goto ldv_51532; } else { } ldv_51530: bm_fo = 0xffffffffffffffffUL; } found: ; return (bm_fo); } } static unsigned long bm_find_next(struct drbd_conf *mdev , unsigned long bm_fo , int const find_zero_bit ) { struct drbd_bitmap *b ; unsigned long i ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; { b = mdev->bitmap; i = 0xffffffffffffffffUL; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "bm_find_next"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (i); } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "bm_find_next"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (i); } else { } spin_lock_irq(& b->bm_lock); if (((unsigned int )b->bm_flags & 4U) != 0U) { __bm_print_lock_info(mdev, "bm_find_next"); } else { } i = __bm_find_next(mdev, bm_fo, find_zero_bit); spin_unlock_irq(& b->bm_lock); return (i); } } unsigned long drbd_bm_find_next(struct drbd_conf *mdev , unsigned long bm_fo ) { unsigned long tmp ; { tmp = bm_find_next(mdev, bm_fo, 0); return (tmp); } } unsigned long _drbd_bm_find_next(struct drbd_conf *mdev , unsigned long bm_fo ) { unsigned long tmp ; { tmp = __bm_find_next(mdev, bm_fo, 0); return (tmp); } } unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev , unsigned long bm_fo ) { unsigned long tmp ; { tmp = __bm_find_next(mdev, bm_fo, 1); return (tmp); } } static int __bm_change_bits_to(struct drbd_conf *mdev , unsigned long const s , unsigned long e , int val ) { struct drbd_bitmap *b ; unsigned long *p_addr ; unsigned long bitnr ; unsigned int last_page_nr ; int c ; int changed_total ; unsigned int page_nr ; unsigned int tmp ; int tmp___0 ; int tmp___1 ; { b = mdev->bitmap; p_addr = 0; last_page_nr = 4294967295U; c = 0; changed_total = 0; if (b->bm_bits <= e) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED: bit_s=%lu bit_e=%lu bm_bits=%lu\n", s, e, b->bm_bits); e = b->bm_bits != 0UL ? b->bm_bits - 1UL : 0UL; } else { } bitnr = s; goto ldv_51572; ldv_51571: tmp = bm_bit_to_page_idx(b, (u64 )bitnr); page_nr = tmp; if (page_nr != last_page_nr) { if ((unsigned long )p_addr != (unsigned long )((unsigned long *)0)) { __bm_unmap(p_addr); } else { } if (c < 0) { bm_set_page_lazy_writeout(*(b->bm_pages + (unsigned long )last_page_nr)); } else if (c > 0) { bm_set_page_need_writeout(*(b->bm_pages + (unsigned long )last_page_nr)); } else { } changed_total = changed_total + c; c = 0; p_addr = __bm_map_pidx(b, page_nr); last_page_nr = page_nr; } else { } if (val != 0) { tmp___0 = __test_and_set_bit_le((int )bitnr & 32767, (void *)p_addr); c = (tmp___0 == 0) + c; } else { tmp___1 = __test_and_clear_bit_le((int )bitnr & 32767, (void *)p_addr); c = c - (tmp___1 != 0); } bitnr = bitnr + 1UL; ldv_51572: ; if (bitnr <= e) { goto ldv_51571; } else { } if ((unsigned long )p_addr != (unsigned long )((unsigned long *)0)) { __bm_unmap(p_addr); } else { } if (c < 0) { bm_set_page_lazy_writeout(*(b->bm_pages + (unsigned long )last_page_nr)); } else if (c > 0) { bm_set_page_need_writeout(*(b->bm_pages + (unsigned long )last_page_nr)); } else { } changed_total = changed_total + c; b->bm_set = b->bm_set + (unsigned long )changed_total; return (changed_total); } } static int bm_change_bits_to(struct drbd_conf *mdev , unsigned long const s , unsigned long const e , int val ) { unsigned long flags ; struct drbd_bitmap *b ; int c ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; raw_spinlock_t *tmp___1 ; { b = mdev->bitmap; c = 0; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "bm_change_bits_to"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (1); } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "bm_change_bits_to"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = spinlock_check(& b->bm_lock); flags = _raw_spin_lock_irqsave(tmp___1); if (((val != 0 ? 2U : 1U) & (unsigned int )b->bm_flags) != 0U) { __bm_print_lock_info(mdev, "bm_change_bits_to"); } else { } c = __bm_change_bits_to(mdev, s, e, val); spin_unlock_irqrestore(& b->bm_lock, flags); return (c); } } int drbd_bm_set_bits(struct drbd_conf *mdev , unsigned long const s , unsigned long const e ) { int tmp ; { tmp = bm_change_bits_to(mdev, s, e, 1); return (tmp); } } int drbd_bm_clear_bits(struct drbd_conf *mdev , unsigned long const s , unsigned long const e ) { int tmp ; { tmp = bm_change_bits_to(mdev, s, e, 0); return (- tmp); } } __inline static void bm_set_full_words_within_one_page(struct drbd_bitmap *b , int page_nr , int first_word , int last_word ) { int i ; int bits ; int changed ; unsigned long *paddr ; void *tmp ; unsigned long tmp___0 ; { changed = 0; tmp = kmap_atomic(*(b->bm_pages + (unsigned long )page_nr)); paddr = (unsigned long *)tmp; i = first_word; goto ldv_51612; ldv_51611: tmp___0 = hweight_long(*(paddr + (unsigned long )i)); bits = (int )tmp___0; *(paddr + (unsigned long )i) = 0xffffffffffffffffUL; changed = (64 - bits) + changed; i = i + 1; ldv_51612: ; if (i < last_word) { goto ldv_51611; } else { } __kunmap_atomic((void *)paddr); if (changed != 0) { bm_set_page_lazy_writeout(*(b->bm_pages + (unsigned long )page_nr)); b->bm_set = b->bm_set + (unsigned long )changed; } else { } return; } } void _drbd_bm_set_bits(struct drbd_conf *mdev , unsigned long const s , unsigned long const e ) { struct drbd_bitmap *b ; unsigned long sl ; unsigned long el ; int first_page ; int last_page ; int page_nr ; int first_word ; int last_word ; { b = mdev->bitmap; sl = ((unsigned long )s + 63UL) & 0xffffffffffffffc0UL; el = ((unsigned long )e + 1UL) & 0xffffffffffffffc0UL; if ((unsigned long )e - (unsigned long )s <= 192UL) { spin_lock_irq(& b->bm_lock); __bm_change_bits_to(mdev, s, e, 1); spin_unlock_irq(& b->bm_lock); return; } else { } spin_lock_irq(& b->bm_lock); if (sl != 0UL) { __bm_change_bits_to(mdev, s, sl - 1UL, 1); } else { } first_page = (int )(sl >> 15); last_page = (int )(el >> 15); first_word = (int )(sl >> 6) & 511; last_word = 512; page_nr = first_page; goto ldv_51629; ldv_51628: bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word); spin_unlock_irq(& b->bm_lock); __might_sleep("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_bitmap.c.prepared", 1636, 0); _cond_resched(); first_word = 0; spin_lock_irq(& b->bm_lock); page_nr = page_nr + 1; ldv_51629: ; if (page_nr < last_page) { goto ldv_51628; } else { } last_word = (int )(el >> 6) & 511; if (last_word != 0) { bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); } else { } if (el <= (unsigned long )e) { __bm_change_bits_to(mdev, el, e, 1); } else { } spin_unlock_irq(& b->bm_lock); return; } } int drbd_bm_test_bit(struct drbd_conf *mdev , unsigned long const bitnr ) { unsigned long flags ; struct drbd_bitmap *b ; unsigned long *p_addr ; int i ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; raw_spinlock_t *tmp___1 ; unsigned int tmp___2 ; int tmp___3 ; { b = mdev->bitmap; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_test_bit"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (0); } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "drbd_bm_test_bit"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = spinlock_check(& b->bm_lock); flags = _raw_spin_lock_irqsave(tmp___1); if (((unsigned int )b->bm_flags & 4U) != 0U) { __bm_print_lock_info(mdev, "drbd_bm_test_bit"); } else { } if (b->bm_bits > (unsigned long )bitnr) { tmp___2 = bm_bit_to_page_idx(b, (u64 )bitnr); p_addr = bm_map_pidx(b, tmp___2); tmp___3 = test_bit_le((int )bitnr & 32767, (void const *)p_addr); i = tmp___3 != 0; bm_unmap(p_addr); } else if (b->bm_bits == (unsigned long )bitnr) { i = -1; } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "bitnr=%lu > bm_bits=%lu\n", bitnr, b->bm_bits); i = 0; } spin_unlock_irqrestore(& b->bm_lock, flags); return (i); } } int drbd_bm_count_bits(struct drbd_conf *mdev , unsigned long const s , unsigned long const e ) { unsigned long flags ; struct drbd_bitmap *b ; unsigned long *p_addr ; unsigned long bitnr ; unsigned int page_nr ; int c ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; raw_spinlock_t *tmp___1 ; unsigned int idx ; unsigned int tmp___2 ; int tmp___3 ; bool _bool___1 ; { b = mdev->bitmap; p_addr = 0; page_nr = 4294967295U; c = 0; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_count_bits"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (1); } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "drbd_bm_count_bits"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (1); } else { } tmp___1 = spinlock_check(& b->bm_lock); flags = _raw_spin_lock_irqsave(tmp___1); if (((unsigned int )b->bm_flags & 4U) != 0U) { __bm_print_lock_info(mdev, "drbd_bm_count_bits"); } else { } bitnr = s; goto ldv_51670; ldv_51669: tmp___2 = bm_bit_to_page_idx(b, (u64 )bitnr); idx = tmp___2; if (page_nr != idx) { page_nr = idx; if ((unsigned long )p_addr != (unsigned long )((unsigned long *)0)) { bm_unmap(p_addr); } else { } p_addr = bm_map_pidx(b, idx); } else { } _bool___1 = b->bm_bits > bitnr; if (! _bool___1) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"bitnr < b->bm_bits", "drbd_bm_count_bits"); } else { } if ((int )_bool___1) { tmp___3 = test_bit_le((int )((unsigned int )bitnr - (page_nr << 15)), (void const *)p_addr); c = (tmp___3 != 0) + c; } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "bitnr=%lu bm_bits=%lu\n", bitnr, b->bm_bits); } bitnr = bitnr + 1UL; ldv_51670: ; if (bitnr <= (unsigned long )e) { goto ldv_51669; } else { } if ((unsigned long )p_addr != (unsigned long )((unsigned long *)0)) { bm_unmap(p_addr); } else { } spin_unlock_irqrestore(& b->bm_lock, flags); return (c); } } int drbd_bm_e_weight(struct drbd_conf *mdev , unsigned long enr ) { struct drbd_bitmap *b ; int count ; int s ; int e ; unsigned long flags ; unsigned long *p_addr ; unsigned long *bm ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; raw_spinlock_t *tmp___1 ; unsigned long _min1 ; size_t _min2 ; int n ; unsigned int tmp___2 ; unsigned long *tmp___3 ; unsigned long tmp___4 ; int tmp___5 ; { b = mdev->bitmap; _bool = (unsigned long )b != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b", "drbd_bm_e_weight"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (0); } else { } _bool___0 = (unsigned long )b->bm_pages != (unsigned long )((struct page **)0); if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"b->bm_pages", "drbd_bm_e_weight"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (0); } else { } tmp___1 = spinlock_check(& b->bm_lock); flags = _raw_spin_lock_irqsave(tmp___1); if (((unsigned int )b->bm_flags & 4U) != 0U) { __bm_print_lock_info(mdev, "drbd_bm_e_weight"); } else { } s = (int )(enr << 6); _min1 = (enr + 1UL) << 6; _min2 = b->bm_words; e = (int )(_min1 < _min2 ? _min1 : _min2); count = 0; if ((size_t )s < b->bm_words) { n = e - s; tmp___2 = bm_word_to_page_idx(b, (unsigned long )s); p_addr = bm_map_pidx(b, tmp___2); bm = p_addr + ((unsigned long )s & 511UL); goto ldv_51696; ldv_51695: tmp___3 = bm; bm = bm + 1; tmp___4 = hweight_long(*tmp___3); count = (int )((unsigned int )tmp___4 + (unsigned int )count); ldv_51696: tmp___5 = n; n = n - 1; if (tmp___5 != 0) { goto ldv_51695; } else { } bm_unmap(p_addr); } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "start offset (%d) too large in drbd_bm_e_weight\n", s); } spin_unlock_irqrestore(& b->bm_lock, flags); return (count); } } void ldv_mutex_lock_1(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_2(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_3(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_4(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_5(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_6(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_7(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_8(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_9(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_10(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_11(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_12(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___10 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_bm_change_of_drbd_bitmap(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_lock_13(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_bm_change_of_drbd_bitmap(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_14(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_bm_change_of_drbd_bitmap(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_36(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_30(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_32(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_34(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_37(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_39(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_29(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_31(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_33(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_35(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_38(struct mutex *ldv_func_arg1 ) ; __inline static struct thread_info *current_thread_info___0(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6318; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6318; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6318; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6318; default: __bad_percpu_size(); } ldv_6318: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static void __rcu_read_lock___0(void) { struct thread_info *tmp ; { tmp = current_thread_info___0(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___0(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___0(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___0(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___0(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 738, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___0(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 759, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___0(); return; } } extern struct module __this_module ; extern bool try_module_get(struct module * ) ; extern void module_put(struct module * ) ; __inline static struct proc_inode *PROC_I(struct inode const *inode ) { struct inode const *__mptr ; { __mptr = inode; return ((struct proc_inode *)__mptr + 0xffffffffffffffc0UL); } } __inline static struct proc_dir_entry *PDE(struct inode const *inode ) { struct proc_inode *tmp ; { tmp = PROC_I(inode); return (tmp->pde); } } extern ssize_t seq_read(struct file * , char * , size_t , loff_t * ) ; extern loff_t seq_lseek(struct file * , loff_t , int ) ; extern int seq_printf(struct seq_file * , char const * , ...) ; extern int single_open(struct file * , int (*)(struct seq_file * , void * ) , void * ) ; extern int single_release(struct inode * , struct file * ) ; extern void *idr_get_next(struct idr * , int * ) ; char const *drbd_buildtag(void) ; char const *drbd_conn_str(enum drbd_conns s ) ; char const *drbd_role_str(enum drbd_role s ) ; char const *drbd_disk_str(enum drbd_disk_state s ) ; __inline static int bdi_congested(struct backing_dev_info *bdi , int bdi_bits ) { int tmp ; { if ((unsigned long )bdi->congested_fn != (unsigned long )((congested_fn *)0)) { tmp = (*(bdi->congested_fn))(bdi->congested_data, bdi_bits); return (tmp); } else { } return ((int )((unsigned int )bdi->state & (unsigned int )bdi_bits)); } } __inline static int bdi_rw_congested(struct backing_dev_info *bdi ) { int tmp ; { tmp = bdi_congested(bdi, 12); return (tmp); } } extern size_t lc_seq_printf_stats(struct seq_file * , struct lru_cache * ) ; extern void lc_seq_dump_details(struct seq_file * , struct lru_cache * , char * , void (*)(struct seq_file * , struct lc_element * ) ) ; struct idr minors ; int proc_details ; struct proc_dir_entry *drbd_proc ; struct file_operations const drbd_proc_fops ; __inline static void drbd_get_syncer_progress(struct drbd_conf *mdev , unsigned long *bits_left , unsigned int *per_mil_done ) { unsigned long tmp ; char const *tmp___0 ; unsigned int shift ; unsigned long left ; unsigned long total ; unsigned long tmp___1 ; { if ((unsigned int )*((unsigned short *)mdev + 374UL) == 288U || (unsigned int )*((unsigned short *)mdev + 374UL) == 304U) { *bits_left = mdev->ov_left; } else { tmp = drbd_bm_total_weight(mdev); *bits_left = tmp - mdev->rs_failed; } if (*bits_left > mdev->rs_total) { __asm__ volatile ("": : : "memory"); tmp___0 = drbd_conn_str((enum drbd_conns )mdev->state.ldv_49522.conn); dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "cs:%s rs_left=%lu > rs_total=%lu (rs_failed %lu)\n", tmp___0, *bits_left, mdev->rs_total, mdev->rs_failed); *per_mil_done = 0U; } else { shift = mdev->rs_total > 4294967295UL ? 16U : 10U; left = *bits_left >> (int )shift; total = (mdev->rs_total >> (int )shift) + 1UL; tmp___1 = 1000UL - (left * 1000UL) / total; *per_mil_done = (unsigned int )tmp___1; } return; } } __inline static int drbd_suspended(struct drbd_conf *mdev ) { struct drbd_tconn *tconn ; { tconn = mdev->tconn; return (((unsigned int )*((unsigned char *)tconn + 132UL) != 0U || (unsigned int )*((unsigned char *)tconn + 132UL) != 0U) || (unsigned int )*((unsigned char *)tconn + 132UL) != 0U); } } __inline static bool verify_can_do_stop_sector(struct drbd_conf *mdev ) { { return ((bool )((mdev->tconn)->agreed_pro_version > 96 && (mdev->tconn)->agreed_pro_version != 100)); } } static int drbd_proc_open(struct inode *inode , struct file *file ) ; static int drbd_proc_release(struct inode *inode , struct file *file ) ; struct file_operations const drbd_proc_fops = {& __this_module, & seq_lseek, & seq_read, 0, 0, 0, 0, 0, 0, 0, 0, & drbd_proc_open, 0, & drbd_proc_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; void seq_printf_with_thousands_grouping(struct seq_file *seq , long v ) { long tmp ; long tmp___0 ; { tmp___0 = ldv__builtin_expect(v > 999999L, 0L); if (tmp___0 != 0L) { seq_printf(seq, "%ld,", v / 1000000L); v = v % 1000000L; seq_printf(seq, "%03ld,%03ld", v / 1000L, v % 1000L); } else { tmp = ldv__builtin_expect(v > 999L, 1L); if (tmp != 0L) { seq_printf(seq, "%ld,%03ld", v / 1000L, v % 1000L); } else { seq_printf(seq, "%ld", v); } } return; } } static void drbd_syncer_progress(struct drbd_conf *mdev , struct seq_file *seq ) { unsigned long db ; unsigned long dt ; unsigned long dbdt ; unsigned long rt ; unsigned long rs_left ; unsigned int res ; int i ; int x ; int y ; int stalled ; unsigned long bm_bits ; unsigned long tmp ; unsigned long bit_pos ; unsigned long long stop_sector ; bool tmp___0 ; { stalled = 0; drbd_get_syncer_progress(mdev, & rs_left, & res); x = (int )(res / 50U); y = 20 - x; seq_printf(seq, "\t["); i = 1; goto ldv_51946; ldv_51945: seq_printf(seq, "="); i = i + 1; ldv_51946: ; if (i < x) { goto ldv_51945; } else { } seq_printf(seq, ">"); i = 0; goto ldv_51949; ldv_51948: seq_printf(seq, "."); i = i + 1; ldv_51949: ; if (i < y) { goto ldv_51948; } else { } seq_printf(seq, "] "); if ((unsigned int )*((unsigned short *)mdev + 374UL) == 288U || (unsigned int )*((unsigned short *)mdev + 374UL) == 304U) { seq_printf(seq, "verified:"); } else { seq_printf(seq, "sync\'ed:"); } seq_printf(seq, "%3u.%u%% ", res / 10U, res % 10U); if (mdev->rs_total > 1048576UL) { seq_printf(seq, "(%lu/%lu)M", (rs_left >> 10) << 2, (mdev->rs_total >> 10) << 2); } else { seq_printf(seq, "(%lu/%lu)K\n\t", rs_left << 2, mdev->rs_total << 2); } i = (mdev->rs_last_mark + 2) % 8; dt = ((unsigned long )jiffies - mdev->rs_mark_time[i]) / 250UL; if (dt > 6000UL) { stalled = 1; } else { } if (dt == 0UL) { dt = dt + 1UL; } else { } db = mdev->rs_mark_left[i] - rs_left; rt = ((rs_left / (db / 100UL + 1UL)) * dt) / 100UL; seq_printf(seq, "finish: %lu:%02lu:%02lu", rt / 3600UL, (rt % 3600UL) / 60UL, rt % 60UL); dbdt = db / dt << 2; seq_printf(seq, " speed: "); seq_printf_with_thousands_grouping(seq, (long )dbdt); seq_printf(seq, " ("); if (proc_details > 0) { i = (mdev->rs_last_mark + 7) % 8; dt = ((unsigned long )jiffies - mdev->rs_mark_time[i]) / 250UL; if (dt == 0UL) { dt = dt + 1UL; } else { } db = mdev->rs_mark_left[i] - rs_left; dbdt = db / dt << 2; seq_printf_with_thousands_grouping(seq, (long )dbdt); seq_printf(seq, " -- "); } else { } dt = (((unsigned long )jiffies - mdev->rs_start) - mdev->rs_paused) / 250UL; if (dt == 0UL) { dt = 1UL; } else { } db = mdev->rs_total - rs_left; dbdt = db / dt << 2; seq_printf_with_thousands_grouping(seq, (long )dbdt); seq_printf(seq, ")"); if ((unsigned int )*((unsigned short *)mdev + 374UL) == 272U || (unsigned int )*((unsigned short *)mdev + 374UL) == 288U) { seq_printf(seq, " want: "); seq_printf_with_thousands_grouping(seq, (long )mdev->c_sync_rate); } else { } seq_printf(seq, " K/sec%s\n", stalled != 0 ? (char *)" (stalled)" : (char *)""); if (proc_details > 0) { tmp = drbd_bm_bits(mdev); bm_bits = tmp; stop_sector = 0ULL; if ((unsigned int )*((unsigned short *)mdev + 374UL) == 288U || (unsigned int )*((unsigned short *)mdev + 374UL) == 304U) { bit_pos = bm_bits - mdev->ov_left; tmp___0 = verify_can_do_stop_sector(mdev); if ((int )tmp___0) { stop_sector = (unsigned long long )mdev->ov_stop_sector; } else { } } else { bit_pos = mdev->bm_resync_fo; } seq_printf(seq, "\t%3d%% sector pos: %llu/%llu", (int )(bit_pos / (bm_bits / 100UL + 1UL)), (unsigned long long )bit_pos * 8ULL, (unsigned long long )bm_bits * 8ULL); if (stop_sector != 0ULL && stop_sector != 0xffffffffffffffffULL) { seq_printf(seq, " stop sector: %llu", stop_sector); } else { } seq_printf(seq, "\n"); } else { } return; } } static void resync_dump_detail(struct seq_file *seq , struct lc_element *e ) { struct bm_extent *bme ; struct lc_element const *__mptr ; { __mptr = (struct lc_element const *)e; bme = (struct bm_extent *)__mptr + 0xfffffffffffffff0UL; seq_printf(seq, "%5d %s %s\n", bme->rs_left, (char *)"---------", (int )bme->flags & 1 ? (char *)"LOCKED" : (char *)"------"); return; } } static int drbd_seq_show(struct seq_file *seq , void *v ) { int i ; int prev_i ; char const *sn ; struct drbd_conf *mdev ; struct net_conf *nc ; char wp ; char write_ordering_chars[3U] ; char const *tmp ; void *tmp___0 ; struct net_conf *_________p1 ; bool __warned ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; char const *tmp___10 ; char const *tmp___11 ; char const *tmp___12 ; char const *tmp___13 ; unsigned long tmp___14 ; int tmp___15 ; void *tmp___16 ; { prev_i = -1; write_ordering_chars[0] = 110; write_ordering_chars[1] = 100; write_ordering_chars[2] = 102; tmp = drbd_buildtag(); seq_printf(seq, "version: 8.4.2 (api:%d/proto:%d-%d)\n%s\n", 1, 86, 101, tmp); rcu_read_lock___0(); i = 0; tmp___0 = idr_get_next(& minors, & i); mdev = (struct drbd_conf *)tmp___0; goto ldv_51976; ldv_51975: ; if (i + -1 != prev_i) { seq_printf(seq, "\n"); } else { } prev_i = i; sn = drbd_conn_str((enum drbd_conns )mdev->state.ldv_49522.conn); if (((unsigned int )*((unsigned short *)mdev + 374UL) == 0U && (unsigned int )*((unsigned char *)mdev + 749UL) == 0U) && (unsigned int )*((unsigned char *)mdev + 748UL) == 2U) { seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { bdi_rw_congested(& (mdev->rq_queue)->backing_dev_info); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_proc.c.prepared", 342, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; wp = (unsigned long )nc != (unsigned long )((struct net_conf *)0) ? (char )((unsigned int )((unsigned char )nc->wire_protocol) + 64U) : 32; tmp___3 = atomic_read((atomic_t const *)(& mdev->ap_bio_cnt)); tmp___4 = atomic_read((atomic_t const *)(& mdev->unacked_cnt)); tmp___5 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); tmp___6 = atomic_read((atomic_t const *)(& mdev->rs_pending_cnt)); tmp___7 = atomic_read((atomic_t const *)(& mdev->local_cnt)); tmp___8 = constant_test_bit(18U, (unsigned long const volatile *)(& mdev->flags)); tmp___9 = drbd_suspended(mdev); tmp___10 = drbd_disk_str((enum drbd_disk_state )mdev->state.ldv_49522.pdsk); tmp___11 = drbd_disk_str((enum drbd_disk_state )mdev->state.ldv_49522.disk); tmp___12 = drbd_role_str((enum drbd_role )mdev->state.ldv_49522.peer); tmp___13 = drbd_role_str((enum drbd_role )mdev->state.ldv_49522.role); seq_printf(seq, "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n ns:%u nr:%u dw:%u dr:%u al:%u bm:%u lo:%d pe:%d ua:%d ap:%d ep:%d wo:%c", i, sn, tmp___13, tmp___12, tmp___11, tmp___10, (int )wp, tmp___9 != 0 ? 115 : 114, (unsigned int )*((unsigned char *)mdev + 750UL) != 0U ? 97 : 45, (unsigned int )*((unsigned char *)mdev + 750UL) != 0U ? 112 : 45, (unsigned int )*((unsigned char *)mdev + 750UL) != 0U ? 117 : 45, (int )mdev->congestion_reason != 0 ? (int )mdev->congestion_reason : 45, tmp___8 != 0 ? 115 : 45, mdev->send_cnt / 2U, mdev->recv_cnt / 2U, mdev->writ_cnt / 2U, mdev->read_cnt / 2U, mdev->al_writ_cnt, mdev->bm_writ_cnt, tmp___7, tmp___5 + tmp___6, tmp___4, tmp___3, (mdev->tconn)->epochs, (int )write_ordering_chars[(unsigned int )(mdev->tconn)->write_ordering]); tmp___14 = drbd_bm_total_weight(mdev); seq_printf(seq, " oos:%llu\n", (unsigned long long )tmp___14 << 2); } if ((((unsigned int )*((unsigned short *)mdev + 374UL) == 256U || (unsigned int )*((unsigned short *)mdev + 374UL) == 272U) || (unsigned int )*((unsigned short *)mdev + 374UL) == 288U) || (unsigned int )*((unsigned short *)mdev + 374UL) == 304U) { drbd_syncer_progress(mdev, seq); } else { } if (proc_details > 0) { tmp___15 = _get_ldev_if_state(mdev, D_FAILED); if (tmp___15 != 0) { lc_seq_printf_stats(seq, mdev->resync); lc_seq_printf_stats(seq, mdev->act_log); put_ldev(mdev); } else { } } else { } if (proc_details > 1) { if ((unsigned long )mdev->resync != (unsigned long )((struct lru_cache *)0)) { lc_seq_dump_details(seq, mdev->resync, (char *)"rs_left", & resync_dump_detail); } else { } } else { } i = i + 1; tmp___16 = idr_get_next(& minors, & i); mdev = (struct drbd_conf *)tmp___16; ldv_51976: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_51975; } else { } rcu_read_unlock___0(); return (0); } } static int drbd_proc_open(struct inode *inode , struct file *file ) { struct proc_dir_entry *tmp ; int tmp___0 ; bool tmp___1 ; { tmp___1 = try_module_get(& __this_module); if ((int )tmp___1) { tmp = PDE((struct inode const *)inode); tmp___0 = single_open(file, & drbd_seq_show, tmp->data); return (tmp___0); } else { } return (-19); } } static int drbd_proc_release(struct inode *inode , struct file *file ) { int tmp ; { module_put(& __this_module); tmp = single_release(inode, file); return (tmp); } } void ldv_check_final_state(void) ; extern void ldv_check_return_value(int ) ; void ldv_initialize(void) ; extern void ldv_handler_precall(void) ; extern int nondet_int(void) ; int LDV_IN_INTERRUPT ; int main(void) { struct inode *var_group1 ; struct file *var_group2 ; int res_drbd_proc_open_4 ; int ldv_s_drbd_proc_fops_file_operations ; int tmp ; int tmp___0 ; { ldv_s_drbd_proc_fops_file_operations = 0; LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_52012; ldv_52011: tmp = nondet_int(); switch (tmp) { case 0: ; if (ldv_s_drbd_proc_fops_file_operations == 0) { ldv_handler_precall(); res_drbd_proc_open_4 = drbd_proc_open(var_group1, var_group2); ldv_check_return_value(res_drbd_proc_open_4); if (res_drbd_proc_open_4 != 0) { goto ldv_module_exit; } else { } ldv_s_drbd_proc_fops_file_operations = ldv_s_drbd_proc_fops_file_operations + 1; } else { } goto ldv_52008; case 1: ; if (ldv_s_drbd_proc_fops_file_operations == 1) { ldv_handler_precall(); drbd_proc_release(var_group1, var_group2); ldv_s_drbd_proc_fops_file_operations = 0; } else { } goto ldv_52008; default: ; goto ldv_52008; } ldv_52008: ; ldv_52012: tmp___0 = nondet_int(); if (tmp___0 != 0 || ldv_s_drbd_proc_fops_file_operations != 0) { goto ldv_52011; } else { } ldv_module_exit: ; ldv_check_final_state(); return 0; } } void ldv_mutex_lock_29(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_30(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_31(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_32(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_33(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_34(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_35(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_36(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___6 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_37(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_38(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_39(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void __builtin_prefetch(void const * , ...) ; extern void panic(char const * , ...) ; __inline static void INIT_LIST_HEAD(struct list_head *list ) { { list->next = list; list->prev = list; return; } } extern void __list_add(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add(struct list_head *new , struct list_head *head ) { { __list_add(new, head, head->next); return; } } __inline static void list_add_tail(struct list_head *new , struct list_head *head ) { { __list_add(new, head->prev, head); return; } } extern void __list_del_entry(struct list_head * ) ; extern void list_del(struct list_head * ) ; __inline static void list_del_init(struct list_head *entry ) { { __list_del_entry(entry); INIT_LIST_HEAD(entry); return; } } __inline static void list_move(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add(list, head); return; } } __inline static void list_move_tail(struct list_head *list , struct list_head *head ) { { __list_del_entry(list); list_add_tail(list, head); return; } } __inline static int list_empty(struct list_head const *head ) { { return ((unsigned long )((struct list_head const *)head->next) == (unsigned long )head); } } __inline static void __list_splice(struct list_head const *list , struct list_head *prev , struct list_head *next ) { struct list_head *first ; struct list_head *last ; { first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; return; } } __inline static void list_splice_init(struct list_head *list , struct list_head *head ) { int tmp ; { tmp = list_empty((struct list_head const *)list); if (tmp == 0) { __list_splice((struct list_head const *)list, head, head->next); INIT_LIST_HEAD(list); } else { } return; } } extern int memcmp(void const * , void const * , size_t ) ; __inline static void *ERR_PTR(long error ) { { return ((void *)error); } } extern void __xchg_wrong_size(void) ; __inline static void atomic_set(atomic_t *v , int i ) { { v->counter = i; return; } } __inline static void atomic_sub(int i , atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; subl %1,%0": "+m" (v->counter): "ir" (i)); return; } } __inline static void atomic_dec(atomic_t *v ) { { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; decl %0": "+m" (v->counter)); return; } } __inline static int atomic_xchg(atomic_t *v , int new ) { int __ret ; { __ret = new; switch (4UL) { case 1UL: __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5505; case 2UL: __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5505; case 4UL: __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5505; case 8UL: __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret), "+m" (v->counter): : "memory", "cc"); goto ldv_5505; default: __xchg_wrong_size(); } ldv_5505: ; return (__ret); } } int ldv_mutex_trylock_54(struct mutex *ldv_func_arg1 ) ; int ldv_mutex_trylock_64(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_52(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_55(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_57(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_59(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_61(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_63(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_66(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_67(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_69(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_71(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_51(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_53(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_56(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_58(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_60(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_62(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_65(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_68(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_70(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_mutex_of_drbd_socket(struct mutex *lock ) ; void ldv_mutex_unlock_mutex_of_drbd_socket(struct mutex *lock ) ; void ldv_mutex_lock_state_mutex_of_drbd_conf(struct mutex *lock ) ; int ldv_mutex_trylock_state_mutex_of_drbd_conf(struct mutex *lock ) ; void ldv_mutex_unlock_state_mutex_of_drbd_conf(struct mutex *lock ) ; __inline static struct thread_info *current_thread_info___1(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6358; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6358; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6358; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6358; default: __bad_percpu_size(); } ldv_6358: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static int test_ti_thread_flag(struct thread_info *ti , int flag ) { int tmp ; { tmp = variable_test_bit(flag, (unsigned long const volatile *)(& ti->flags)); return (tmp); } } extern void _raw_spin_lock(raw_spinlock_t * ) ; extern void _raw_spin_unlock(raw_spinlock_t * ) ; extern void _raw_write_lock_irq(rwlock_t * ) ; extern void _raw_write_unlock_irq(rwlock_t * ) ; __inline static void spin_lock(spinlock_t *lock ) { { _raw_spin_lock(& lock->ldv_5957.rlock); return; } } __inline static void spin_unlock(spinlock_t *lock ) { { _raw_spin_unlock(& lock->ldv_5957.rlock); return; } } extern void complete(struct completion * ) ; __inline static void __rcu_read_lock___1(void) { struct thread_info *tmp ; { tmp = current_thread_info___1(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___1(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___1(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___1(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___1(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 738, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___1(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 759, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___1(); return; } } extern int mod_timer(struct timer_list * , unsigned long ) ; extern void add_timer(struct timer_list * ) ; __inline static void kref_get(struct kref *kref ) { int __ret_warn_on ; int tmp ; long tmp___0 ; { tmp = atomic_read((atomic_t const *)(& kref->refcount)); __ret_warn_on = tmp == 0; tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { warn_slowpath_null("include/linux/kref.h", 42); } else { } ldv__builtin_expect(__ret_warn_on != 0, 0L); atomic_inc(& kref->refcount); return; } } extern int dev_emerg(struct device const * , char const * , ...) ; __inline static int PageTail(struct page const *page ) { int tmp ; { tmp = constant_test_bit(15U, (unsigned long const volatile *)(& page->flags)); return (tmp); } } __inline static struct page *compound_head(struct page *page ) { int tmp ; long tmp___0 ; { tmp = PageTail((struct page const *)page); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); if (tmp___0 != 0L) { return (page->ldv_14746.first_page); } else { } return (page); } } __inline static int page_count(struct page *page ) { struct page *tmp ; int tmp___0 ; { tmp = compound_head(page); tmp___0 = atomic_read((atomic_t const *)(& tmp->ldv_14727.ldv_14726.ldv_14725._count)); return (tmp___0); } } __inline static void sg_assign_page(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (65), "i" (12UL)); ldv_19226: ; goto ldv_19226; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (67), "i" (12UL)); ldv_19227: ; goto ldv_19227; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (68), "i" (12UL)); ldv_19228: ; goto ldv_19228; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static void sg_set_page(struct scatterlist *sg , struct page *page , unsigned int len , unsigned int offset ) { { sg_assign_page(sg, page); sg->offset = offset; sg->length = len; return; } } extern void sg_init_table(struct scatterlist * , unsigned int ) ; __inline static loff_t i_size_read(struct inode const *inode ) { { return ((loff_t )inode->i_size); } } extern long schedule_timeout_interruptible(long ) ; extern void flush_signals(struct task_struct * ) ; extern void force_sig(int , struct task_struct * ) ; __inline static int test_tsk_thread_flag(struct task_struct *tsk , int flag ) { int tmp ; { tmp = test_ti_thread_flag((struct thread_info *)tsk->stack, flag); return (tmp); } } __inline static int signal_pending(struct task_struct *p ) { int tmp ; long tmp___0 ; { tmp = test_tsk_thread_flag(p, 2); tmp___0 = ldv__builtin_expect(tmp != 0, 0L); return ((int )tmp___0); } } extern void *idr_find(struct idr * , int ) ; extern int sock_setsockopt(struct socket * , int , int , char * , unsigned int ) ; __inline static struct crypto_tfm *crypto_hash_tfm(struct crypto_hash *tfm ) { { return (& tfm->base); } } __inline static struct hash_tfm *crypto_hash_crt(struct crypto_hash *tfm ) { struct crypto_tfm *tmp ; { tmp = crypto_hash_tfm(tfm); return (& tmp->crt_u.hash); } } __inline static unsigned int crypto_hash_digestsize(struct crypto_hash *tfm ) { struct hash_tfm *tmp ; { tmp = crypto_hash_crt(tfm); return (tmp->digestsize); } } __inline static int crypto_hash_init(struct hash_desc *desc ) { struct hash_tfm *tmp ; int tmp___0 ; { tmp = crypto_hash_crt(desc->tfm); tmp___0 = (*(tmp->init))(desc); return (tmp___0); } } __inline static int crypto_hash_update(struct hash_desc *desc , struct scatterlist *sg , unsigned int nbytes ) { struct hash_tfm *tmp ; int tmp___0 ; { tmp = crypto_hash_crt(desc->tfm); tmp___0 = (*(tmp->update))(desc, sg, nbytes); return (tmp___0); } } __inline static int crypto_hash_final(struct hash_desc *desc , u8 *out ) { struct hash_tfm *tmp ; int tmp___0 ; { tmp = crypto_hash_crt(desc->tfm); tmp___0 = (*(tmp->final))(desc, out); return (tmp___0); } } extern struct bio *bio_clone_bioset(struct bio * , gfp_t , struct bio_set * ) ; extern struct bio_set *fs_bio_set ; __inline static struct bio *bio_clone(struct bio *bio , gfp_t gfp_mask ) { struct bio *tmp ; { tmp = bio_clone_bioset(bio, gfp_mask, fs_bio_set); return (tmp); } } extern void generic_make_request(struct bio * ) ; __inline static unsigned int queue_max_hw_sectors(struct request_queue *q ) { { return (q->limits.max_hw_sectors); } } enum drbd_state_rv conn_request_state(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val , enum chg_state_flags flags ) ; __inline static enum drbd_thread_state get_t_state(struct drbd_thread *thi ) { { __asm__ volatile ("": : : "memory"); return (thi->t_state); } } struct fifo_buffer *fifo_alloc(int fifo_size ) ; __inline static struct drbd_conf *minor_to_mdev(unsigned int minor ) { void *tmp ; { tmp = idr_find(& minors, (int )minor); return ((struct drbd_conf *)tmp); } } void drbd_thread_current_set_cpu(struct drbd_thread *thi ) ; void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev ) ; int drbd_send_ack(struct drbd_conf *mdev , enum drbd_packet cmd , struct drbd_peer_request *peer_req ) ; int drbd_send_ack_ex(struct drbd_conf *mdev , enum drbd_packet cmd , sector_t sector , int blksize , u64 block_id ) ; int drbd_send_out_of_sync(struct drbd_conf *mdev , struct drbd_request *req ) ; int drbd_send_block(struct drbd_conf *mdev , enum drbd_packet cmd , struct drbd_peer_request *peer_req ) ; int drbd_send_dblock(struct drbd_conf *mdev , struct drbd_request *req ) ; int drbd_send_drequest(struct drbd_conf *mdev , int cmd , sector_t sector , int size , u64 block_id ) ; int drbd_send_drequest_csum(struct drbd_conf *mdev , sector_t sector , int size , void *digest , int digest_size , enum drbd_packet cmd ) ; int drbd_send_ov_request(struct drbd_conf *mdev , sector_t sector , int size ) ; void drbd_mdev_cleanup(struct drbd_conf *mdev ) ; void drbd_print_uuids(struct drbd_conf *mdev , char const *text ) ; void drbd_md_sync(struct drbd_conf *mdev ) ; void drbd_uuid_set(struct drbd_conf *mdev , int idx , u64 val ) ; void _drbd_uuid_set(struct drbd_conf *mdev , int idx , u64 val ) ; void drbd_uuid_set_bm(struct drbd_conf *mdev , u64 val ) ; wait_queue_head_t drbd_pp_wait ; rwlock_t global_state_lock ; void drbd_minor_destroy(struct kref *kref ) ; int drbd_khelper(struct drbd_conf *mdev , char *cmd ) ; int drbd_worker(struct drbd_thread *thi ) ; enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev , int o_minor ) ; void drbd_resync_after_changed(struct drbd_conf *mdev ) ; void drbd_start_resync(struct drbd_conf *mdev , enum drbd_conns side ) ; void resume_next_sg(struct drbd_conf *mdev ) ; void suspend_other_sg(struct drbd_conf *mdev ) ; int drbd_resync_finished(struct drbd_conf *mdev ) ; void drbd_md_put_buffer(struct drbd_conf *mdev ) ; void drbd_ov_out_of_sync_found(struct drbd_conf *mdev , sector_t sector , int size ) ; void drbd_rs_controller_reset(struct drbd_conf *mdev ) ; __inline static void ov_out_of_sync_print(struct drbd_conf *mdev ) { { if (mdev->ov_last_oos_size != 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Out of sync: start=%llu, size=%lu (sectors)\n", (unsigned long long )mdev->ov_last_oos_start, mdev->ov_last_oos_size); } else { } mdev->ov_last_oos_size = 0UL; return; } } void drbd_csum_bio(struct drbd_conf *mdev , struct crypto_hash *tfm , struct bio *bio , void *digest ) ; void drbd_csum_ee(struct drbd_conf *mdev , struct crypto_hash *tfm , struct drbd_peer_request *peer_req , void *digest ) ; int w_e_end_data_req(struct drbd_work *w , int cancel ) ; int w_e_end_rsdata_req(struct drbd_work *w , int cancel ) ; int w_e_end_csum_rs_req(struct drbd_work *w , int cancel ) ; int w_e_end_ov_reply(struct drbd_work *w , int cancel ) ; int w_e_end_ov_req(struct drbd_work *w , int cancel ) ; int w_ov_finished(struct drbd_work *w , int cancel ) ; int w_resync_timer(struct drbd_work *w , int cancel ) ; int w_send_write_hint(struct drbd_work *w , int cancel ) ; int w_make_resync_request(struct drbd_work *w , int cancel ) ; int w_send_dblock(struct drbd_work *w , int cancel ) ; int w_send_read_req(struct drbd_work *w , int cancel ) ; int w_prev_work_done(struct drbd_work *w , int cancel ) ; int w_restart_disk_io(struct drbd_work *w , int cancel ) ; int w_send_out_of_sync(struct drbd_work *w , int cancel ) ; int w_start_resync(struct drbd_work *w , int cancel ) ; void resync_timer_fn(unsigned long data ) ; void start_resync_timer_fn(unsigned long data ) ; int drbd_rs_should_slow_down(struct drbd_conf *mdev , sector_t sector ) ; int drbd_submit_peer_request(struct drbd_conf *mdev , struct drbd_peer_request *peer_req , unsigned int const rw , int const fault_type ) ; struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *mdev , u64 id , sector_t sector , unsigned int data_size , gfp_t gfp_mask ) ; void __drbd_free_peer_req(struct drbd_conf *mdev , struct drbd_peer_request *peer_req , int is_net ) ; __inline static int drbd_setsockopt(struct socket *sock , int level , int optname , char *optval , int optlen ) { mm_segment_t oldfs ; struct thread_info *tmp ; char *uoptval ; int err ; struct thread_info *tmp___0 ; mm_segment_t __constr_expr_0 ; struct thread_info *tmp___1 ; { tmp = current_thread_info___1(); oldfs = tmp->addr_limit; uoptval = optval; tmp___0 = current_thread_info___1(); __constr_expr_0.seg = 0xffffffffffffffffUL; tmp___0->addr_limit = __constr_expr_0; if (level == 1) { err = sock_setsockopt(sock, level, optname, uoptval, (unsigned int )optlen); } else { err = (*((sock->ops)->setsockopt))(sock, level, optname, uoptval, (unsigned int )optlen); } tmp___1 = current_thread_info___1(); tmp___1->addr_limit = oldfs; return (err); } } __inline static void drbd_tcp_cork(struct socket *sock ) { int val ; { val = 1; drbd_setsockopt(sock, 6, 3, (char *)(& val), 4); return; } } __inline static void drbd_tcp_uncork(struct socket *sock ) { int val ; { val = 0; drbd_setsockopt(sock, 6, 3, (char *)(& val), 4); return; } } void drbd_al_begin_io(struct drbd_conf *mdev , struct drbd_interval *i ) ; void drbd_al_complete_io(struct drbd_conf *mdev , struct drbd_interval *i ) ; void drbd_rs_complete_io(struct drbd_conf *mdev , sector_t sector ) ; int drbd_try_rs_begin_io(struct drbd_conf *mdev , sector_t sector ) ; int drbd_rs_del_all(struct drbd_conf *mdev ) ; void drbd_rs_failed_io(struct drbd_conf *mdev , sector_t sector , int size ) ; void drbd_advance_rs_marks(struct drbd_conf *mdev , unsigned long still_to_go ) ; void __drbd_set_in_sync(struct drbd_conf *mdev , sector_t sector , int size , char const *file , unsigned int const line ) ; int __drbd_set_out_of_sync(struct drbd_conf *mdev , sector_t sector , int size , char const *file , unsigned int const line ) ; __inline static struct page *page_chain_next(struct page *page ) { { return ((struct page *)page->ldv_14746.private); } } __inline static int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req ) { struct page *page ; int tmp ; struct page *tmp___0 ; { page = peer_req->pages; goto ldv_51606; ldv_51605: tmp = page_count(page); if (tmp > 1) { return (1); } else { } page = page_chain_next(page); ldv_51606: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { tmp___0 = page_chain_next(page); __builtin_prefetch((void const *)tmp___0); if (1 != 0) { goto ldv_51605; } else { goto ldv_51607; } } else { } ldv_51607: ; return (0); } } __inline static void __drbd_chk_io_error____0(struct drbd_conf *mdev , enum drbd_force_detach_flags df , char const *where ) { enum drbd_io_error_p ep ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; union drbd_state __ns ; union drbd_state __ns___0 ; { rcu_read_lock___1(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1682, "suspicious rcu_dereference_check() usage"); } else { } } else { } ep = (enum drbd_io_error_p )_________p1->on_io_error; rcu_read_unlock___1(); switch ((unsigned int )ep) { case 0U: ; if ((unsigned int )df == 0U || (unsigned int )df == 1U) { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "__drbd_chk_io_error_"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s.\n", where); } else { } if ((int )mdev->state.ldv_49522.disk > 4) { __ns = drbd_read_state(mdev); __ns.ldv_40024.disk = 4U; _drbd_set_state(mdev, __ns, CS_HARD, 0); } else { } goto ldv_51637; } else { } case 2U: ; case 1U: set_bit(12U, (unsigned long volatile *)(& mdev->flags)); if ((unsigned int )df == 0U) { set_bit(13U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((unsigned int )df == 3U) { set_bit(14U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((int )mdev->state.ldv_49522.disk > 2) { __ns___0 = drbd_read_state(mdev); __ns___0.ldv_40024.disk = 2U; _drbd_set_state(mdev, __ns___0, CS_HARD, 0); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s. Detaching...\n", where); } else { } goto ldv_51637; } ldv_51637: ; return; } } __inline static sector_t drbd_get_capacity(struct block_device *bdev ) { loff_t tmp ; sector_t tmp___0 ; { if ((unsigned long )bdev != (unsigned long )((struct block_device *)0)) { tmp = i_size_read((struct inode const *)bdev->bd_inode); tmp___0 = (sector_t )(tmp >> 9); } else { tmp___0 = 0UL; } return (tmp___0); } } __inline static void drbd_queue_work(struct drbd_work_queue *q , struct drbd_work *w ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& q->q_lock); flags = _raw_spin_lock_irqsave(tmp); list_add_tail(& w->list, & q->q); spin_unlock_irqrestore(& q->q_lock, flags); __wake_up(& q->q_wait, 3U, 1, 0); return; } } __inline static void wake_asender(struct drbd_tconn *tconn ) { int tmp ; { tmp = constant_test_bit(3U, (unsigned long const volatile *)(& tconn->flags)); if (tmp != 0) { force_sig(24, tconn->asender.task); } else { } return; } } __inline static void request_ping(struct drbd_tconn *tconn ) { { set_bit(2U, (unsigned long volatile *)(& tconn->flags)); wake_asender(tconn); return; } } void *conn_prepare_command(struct drbd_tconn *tconn , struct drbd_socket *sock ) ; void *drbd_prepare_command(struct drbd_conf *mdev , struct drbd_socket *sock ) ; int conn_send_command(struct drbd_tconn *tconn , struct drbd_socket *sock , enum drbd_packet cmd , unsigned int header_size , void *data , unsigned int size ) ; int drbd_send_command(struct drbd_conf *mdev , struct drbd_socket *sock , enum drbd_packet cmd , unsigned int header_size , void *data , unsigned int size ) ; __inline static void inc_rs_pending(struct drbd_conf *mdev ) { { atomic_inc(& mdev->rs_pending_cnt); return; } } __inline static void _dec_rs_pending(struct drbd_conf *mdev , char const *func , int line ) { int tmp ; int tmp___0 ; { atomic_dec(& mdev->rs_pending_cnt); tmp___0 = atomic_read((atomic_t const *)(& mdev->rs_pending_cnt)); if (tmp___0 < 0) { tmp = atomic_read((atomic_t const *)(& mdev->rs_pending_cnt)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "in %s:%d: rs_pending_cnt = %d < 0 !\n", func, line, tmp); } else { } return; } } __inline static void _dec_unacked(struct drbd_conf *mdev , char const *func , int line ) { int tmp ; int tmp___0 ; { atomic_dec(& mdev->unacked_cnt); tmp___0 = atomic_read((atomic_t const *)(& mdev->unacked_cnt)); if (tmp___0 < 0) { tmp = atomic_read((atomic_t const *)(& mdev->unacked_cnt)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "in %s:%d: unacked_cnt = %d < 0 !\n", func, line, tmp); } else { } return; } } void drbd_md_io_complete(struct bio *bio , int error ) ; void drbd_peer_request_endio(struct bio *bio , int error ) ; void drbd_request_endio(struct bio *bio , int error ) ; __inline static void drbd_req_make_private_bio(struct drbd_request *req , struct bio *bio_src ) { struct bio *bio ; { bio = bio_clone(bio_src, 16U); req->private_bio = bio; bio->bi_private = (void *)req; bio->bi_end_io = & drbd_request_endio; bio->bi_next = 0; return; } } int __req_mod(struct drbd_request *req , enum drbd_req_event what , struct bio_and_error *m ) ; void complete_master_bio(struct drbd_conf *mdev , struct bio_and_error *m ) ; __inline static int req_mod(struct drbd_request *req , enum drbd_req_event what ) { unsigned long flags ; struct drbd_conf *mdev ; struct bio_and_error m ; int rv ; raw_spinlock_t *tmp ; { mdev = req->w.ldv_49807.mdev; tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); rv = __req_mod(req, what, & m); spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); if ((unsigned long )m.bio != (unsigned long )((struct bio *)0)) { complete_master_bio(mdev, & m); } else { } return (rv); } } static int w_make_ov_request(struct drbd_work *w , int cancel ) ; void drbd_md_io_complete(struct bio *bio , int error ) { struct drbd_md_io *md_io ; struct drbd_conf *mdev ; struct drbd_md_io const *__mptr ; { md_io = (struct drbd_md_io *)bio->bi_private; __mptr = (struct drbd_md_io const *)md_io; mdev = (struct drbd_conf *)__mptr + 0xfffffffffffffa38UL; md_io->error = error; drbd_md_put_buffer(mdev); md_io->done = 1U; __wake_up(& mdev->misc_wait, 3U, 1, 0); bio_put(bio); put_ldev(mdev); return; } } void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req ) { unsigned long flags ; struct drbd_conf *mdev ; raw_spinlock_t *tmp ; int tmp___0 ; int tmp___1 ; { flags = 0UL; mdev = peer_req->w.ldv_49807.mdev; tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); mdev->read_cnt = mdev->read_cnt + (peer_req->i.size >> 9); list_del(& peer_req->w.list); tmp___0 = list_empty((struct list_head const *)(& mdev->read_ee)); if (tmp___0 != 0) { __wake_up(& mdev->ee_wait, 3U, 1, 0); } else { } tmp___1 = constant_test_bit(3U, (unsigned long const volatile *)(& peer_req->flags)); if (tmp___1 != 0) { __drbd_chk_io_error____0(mdev, DRBD_READ_ERROR, "drbd_endio_read_sec_final"); } else { } spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); drbd_queue_work(& (mdev->tconn)->sender_work, & peer_req->w); put_ldev(mdev); return; } } static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req ) { unsigned long flags ; struct drbd_conf *mdev ; struct drbd_interval i ; int do_wake ; u64 block_id ; int do_al_complete_io ; raw_spinlock_t *tmp ; int tmp___0 ; { flags = 0UL; mdev = peer_req->w.ldv_49807.mdev; i = peer_req->i; do_al_complete_io = (int )peer_req->flags & 1; block_id = peer_req->ldv_50726.block_id; tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); mdev->writ_cnt = mdev->writ_cnt + (peer_req->i.size >> 9); list_move_tail(& peer_req->w.list, & mdev->done_ee); do_wake = list_empty((struct list_head const *)(block_id == 0xffffffffffffffffULL ? & mdev->sync_ee : & mdev->active_ee)); tmp___0 = constant_test_bit(3U, (unsigned long const volatile *)(& peer_req->flags)); if (tmp___0 != 0) { __drbd_chk_io_error____0(mdev, DRBD_WRITE_ERROR, "drbd_endio_write_sec_final"); } else { } spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); if (block_id == 0xffffffffffffffffULL) { drbd_rs_complete_io(mdev, i.sector); } else { } if (do_wake != 0) { __wake_up(& mdev->ee_wait, 3U, 1, 0); } else { } if (do_al_complete_io != 0) { drbd_al_complete_io(mdev, & i); } else { } wake_asender(mdev->tconn); put_ldev(mdev); return; } } void drbd_peer_request_endio(struct bio *bio , int error ) { struct drbd_peer_request *peer_req ; struct drbd_conf *mdev ; int uptodate ; int is_write ; int tmp ; int tmp___0 ; int tmp___1 ; { peer_req = (struct drbd_peer_request *)bio->bi_private; mdev = peer_req->w.ldv_49807.mdev; uptodate = (int )bio->bi_flags & 1; is_write = (int )bio->bi_rw & 1; if (error != 0) { tmp = ___ratelimit(& drbd_ratelimit_state, "drbd_peer_request_endio"); if (tmp != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s: error=%d s=%llus\n", is_write != 0 ? (char *)"write" : (char *)"read", error, (unsigned long long )peer_req->i.sector); } else { } } else { } if (error == 0 && uptodate == 0) { tmp___0 = ___ratelimit(& drbd_ratelimit_state, "drbd_peer_request_endio"); if (tmp___0 != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s: setting error to -EIO s=%llus\n", is_write != 0 ? (char *)"write" : (char *)"read", (unsigned long long )peer_req->i.sector); } else { } error = -5; } else { } if (error != 0) { set_bit(3U, (unsigned long volatile *)(& peer_req->flags)); } else { } bio_put(bio); tmp___1 = atomic_dec_and_test(& peer_req->pending_bios); if (tmp___1 != 0) { if (is_write != 0) { drbd_endio_write_sec_final(peer_req); } else { drbd_endio_read_sec_final(peer_req); } } else { } return; } } void drbd_request_endio(struct bio *bio , int error ) { unsigned long flags ; struct drbd_request *req ; struct drbd_conf *mdev ; struct bio_and_error m ; enum drbd_req_event what ; int uptodate ; int tmp ; long tmp___0 ; long tmp___1 ; void *tmp___2 ; raw_spinlock_t *tmp___3 ; { req = (struct drbd_request *)bio->bi_private; mdev = req->w.ldv_49807.mdev; uptodate = (int )bio->bi_flags & 1; if (error == 0 && uptodate == 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "p %s: setting error to -EIO\n", (int )bio->bi_rw & 1 ? (char *)"write" : (char *)"read"); error = -5; } else { } tmp___0 = ldv__builtin_expect(((unsigned long )req->rq_state & 8UL) != 0UL, 0L); if (tmp___0 != 0L) { tmp = ___ratelimit(& drbd_ratelimit_state, "drbd_request_endio"); if (tmp != 0) { dev_emerg((struct device const *)(& (mdev->vdisk)->part0.__dev), "delayed completion of aborted local request; disk-timeout may be too aggressive\n"); } else { } if (error == 0) { panic("possible random memory corruption caused by delayed completion of aborted local request\n"); } else { } } else { } tmp___1 = ldv__builtin_expect(error != 0, 0L); if (tmp___1 != 0L) { what = (int )bio->bi_rw & 1 ? WRITE_COMPLETED_WITH_ERROR : ((bio->bi_rw & 8193UL) == 0UL ? READ_COMPLETED_WITH_ERROR : READ_AHEAD_COMPLETED_WITH_ERROR); } else { what = COMPLETED_OK; } bio_put(req->private_bio); tmp___2 = ERR_PTR((long )error); req->private_bio = (struct bio *)tmp___2; tmp___3 = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp___3); __req_mod(req, what, & m); spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); put_ldev(mdev); if ((unsigned long )m.bio != (unsigned long )((struct bio *)0)) { complete_master_bio(mdev, & m); } else { } return; } } void drbd_csum_ee(struct drbd_conf *mdev , struct crypto_hash *tfm , struct drbd_peer_request *peer_req , void *digest ) { struct hash_desc desc ; struct scatterlist sg ; struct page *page ; struct page *tmp ; unsigned int len ; { page = peer_req->pages; desc.tfm = tfm; desc.flags = 0U; sg_init_table(& sg, 1U); crypto_hash_init(& desc); goto ldv_52122; ldv_52121: sg_set_page(& sg, page, 4096U, 0U); crypto_hash_update(& desc, & sg, sg.length); page = tmp; ldv_52122: tmp = page_chain_next(page); if ((unsigned long )tmp != (unsigned long )((struct page *)0)) { goto ldv_52121; } else { } len = peer_req->i.size & 4095U; sg_set_page(& sg, page, len != 0U ? len : 4096U, 0U); crypto_hash_update(& desc, & sg, sg.length); crypto_hash_final(& desc, (u8 *)digest); return; } } void drbd_csum_bio(struct drbd_conf *mdev , struct crypto_hash *tfm , struct bio *bio , void *digest ) { struct hash_desc desc ; struct scatterlist sg ; struct bio_vec *bvec ; int i ; { desc.tfm = tfm; desc.flags = 0U; sg_init_table(& sg, 1U); crypto_hash_init(& desc); bvec = bio->bi_io_vec + (unsigned long )bio->bi_idx; i = (int )bio->bi_idx; goto ldv_52135; ldv_52134: sg_set_page(& sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); crypto_hash_update(& desc, & sg, sg.length); bvec = bvec + 1; i = i + 1; ldv_52135: ; if ((int )bio->bi_vcnt > i) { goto ldv_52134; } else { } crypto_hash_final(& desc, (u8 *)digest); return; } } static int w_e_send_csum(struct drbd_work *w , int cancel ) { struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; int digest_size ; void *digest ; int err ; long tmp ; long tmp___0 ; unsigned int tmp___1 ; sector_t sector ; unsigned int size ; long tmp___2 ; { __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; mdev = w->ldv_49807.mdev; err = 0; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { goto out; } else { } tmp___0 = ldv__builtin_expect((peer_req->flags & 8UL) != 0UL, 0L); if (tmp___0 != 0L) { goto out; } else { } tmp___1 = crypto_hash_digestsize((mdev->tconn)->csums_tfm); digest_size = (int )tmp___1; digest = kmalloc((size_t )digest_size, 16U); if ((unsigned long )digest != (unsigned long )((void *)0)) { sector = peer_req->i.sector; size = peer_req->i.size; drbd_csum_ee(mdev, (mdev->tconn)->csums_tfm, peer_req, digest); __drbd_free_peer_req(mdev, peer_req, 0); peer_req = 0; inc_rs_pending(mdev); err = drbd_send_drequest_csum(mdev, sector, (int )size, digest, digest_size, P_CSUM_RS_REQUEST); kfree((void const *)digest); } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "kmalloc() of digest failed.\n"); err = -12; } out: ; if ((unsigned long )peer_req != (unsigned long )((struct drbd_peer_request *)0)) { __drbd_free_peer_req(mdev, peer_req, 0); } else { } tmp___2 = ldv__builtin_expect(err != 0, 0L); if (tmp___2 != 0L) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_send_drequest(..., csum) failed\n"); } else { } return (err); } } static int read_for_csum(struct drbd_conf *mdev , sector_t sector , int size ) { struct drbd_peer_request *peer_req ; int tmp ; int tmp___0 ; int tmp___1 ; { tmp = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp == 0) { return (-5); } else { } tmp___0 = drbd_rs_should_slow_down(mdev, sector); if (tmp___0 != 0) { goto defer; } else { } peer_req = drbd_alloc_peer_req(mdev, 0xffffffffffffffffULL, sector, (unsigned int )size, 514U); if ((unsigned long )peer_req == (unsigned long )((struct drbd_peer_request *)0)) { goto defer; } else { } peer_req->w.cb = & w_e_send_csum; spin_lock_irq(& (mdev->tconn)->req_lock); list_add(& peer_req->w.list, & mdev->read_ee); spin_unlock_irq(& (mdev->tconn)->req_lock); atomic_add(size >> 9, & mdev->rs_sect_ev); tmp___1 = drbd_submit_peer_request(mdev, peer_req, 0U, 3); if (tmp___1 == 0) { return (0); } else { } spin_lock_irq(& (mdev->tconn)->req_lock); list_del(& peer_req->w.list); spin_unlock_irq(& (mdev->tconn)->req_lock); __drbd_free_peer_req(mdev, peer_req, 0); defer: put_ldev(mdev); return (-11); } } int w_resync_timer(struct drbd_work *w , int cancel ) { struct drbd_conf *mdev ; { mdev = w->ldv_49807.mdev; switch ((int )mdev->state.ldv_49522.conn) { case 18: w_make_ov_request(w, cancel); goto ldv_52164; case 17: w_make_resync_request(w, cancel); goto ldv_52164; } ldv_52164: ; return (0); } } void resync_timer_fn(unsigned long data ) { struct drbd_conf *mdev ; int tmp ; { mdev = (struct drbd_conf *)data; tmp = list_empty((struct list_head const *)(& mdev->resync_work.list)); if (tmp != 0) { drbd_queue_work(& (mdev->tconn)->sender_work, & mdev->resync_work); } else { } return; } } static void fifo_set(struct fifo_buffer *fb , int value ) { int i ; { i = 0; goto ldv_52176; ldv_52175: fb->values[i] = value; i = i + 1; ldv_52176: ; if ((unsigned int )i < fb->size) { goto ldv_52175; } else { } return; } } static int fifo_push(struct fifo_buffer *fb , int value ) { int ov ; unsigned int tmp ; { ov = fb->values[fb->head_index]; tmp = fb->head_index; fb->head_index = fb->head_index + 1U; fb->values[tmp] = value; if (fb->head_index >= fb->size) { fb->head_index = 0U; } else { } return (ov); } } static void fifo_add_val(struct fifo_buffer *fb , int value ) { int i ; { i = 0; goto ldv_52189; ldv_52188: fb->values[i] = fb->values[i] + value; i = i + 1; ldv_52189: ; if ((unsigned int )i < fb->size) { goto ldv_52188; } else { } return; } } struct fifo_buffer *fifo_alloc(int fifo_size ) { struct fifo_buffer *fb ; void *tmp ; { tmp = kzalloc(((unsigned long )fifo_size + 3UL) * 4UL, 16U); fb = (struct fifo_buffer *)tmp; if ((unsigned long )fb == (unsigned long )((struct fifo_buffer *)0)) { return (0); } else { } fb->head_index = 0U; fb->size = (unsigned int )fifo_size; fb->total = 0; return (fb); } } static int drbd_rs_controller(struct drbd_conf *mdev ) { struct disk_conf *dc ; unsigned int sect_in ; unsigned int want ; int req_sect ; int correction ; int cps ; int steps ; int curr_corr ; int max_sect ; struct fifo_buffer *plan ; int tmp ; struct disk_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; struct fifo_buffer *_________p1___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { tmp = atomic_xchg(& mdev->rs_sect_in, 0); sect_in = (unsigned int )tmp; mdev->rs_in_flight = (int )((unsigned int )mdev->rs_in_flight - sect_in); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 590, "suspicious rcu_dereference_check() usage"); } else { } } else { } dc = _________p1; _________p1___0 = *((struct fifo_buffer * volatile *)(& mdev->rs_plan_s)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 591, "suspicious rcu_dereference_check() usage"); } else { } } else { } plan = _________p1___0; steps = (int )plan->size; if ((unsigned int )mdev->rs_in_flight + sect_in == 0U) { want = ((dc->resync_rate * 50U) / 250U) * (__u32 )steps; } else { want = dc->c_fill_target != 0U ? dc->c_fill_target : ((dc->c_delay_target * sect_in) * 250U) / 250U; } correction = (int )((want - (unsigned int )mdev->rs_in_flight) - (unsigned int )plan->total); cps = correction / steps; fifo_add_val(plan, cps); plan->total = plan->total + cps * steps; curr_corr = fifo_push(plan, 0); plan->total = plan->total - curr_corr; req_sect = (int )(sect_in + (unsigned int )curr_corr); if (req_sect < 0) { req_sect = 0; } else { } max_sect = (int )((dc->c_max_rate * 50U) / 250U); if (req_sect > max_sect) { req_sect = max_sect; } else { } return (req_sect); } } static int drbd_rs_number_requests(struct drbd_conf *mdev ) { int number ; int tmp ; struct disk_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; struct fifo_buffer *_________p1___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; { rcu_read_lock___1(); _________p1___0 = *((struct fifo_buffer * volatile *)(& mdev->rs_plan_s)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 635, "suspicious rcu_dereference_check() usage"); } else { } } else { } if (_________p1___0->size != 0U) { tmp = drbd_rs_controller(mdev); number = tmp >> 3; mdev->c_sync_rate = (number * 1000) / 25; } else { _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 639, "suspicious rcu_dereference_check() usage"); } else { } } else { } mdev->c_sync_rate = (int )_________p1->resync_rate; number = (mdev->c_sync_rate * 25) / 1000; } rcu_read_unlock___1(); return (number); } } int w_make_resync_request(struct drbd_work *w , int cancel ) { struct drbd_conf *mdev ; unsigned long bit ; sector_t sector ; sector_t capacity ; sector_t tmp ; int max_bio_size ; int number ; int rollback_i ; int size ; int align ; int queued ; int sndbuf ; int i ; long tmp___0 ; int tmp___1 ; unsigned int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; long tmp___6 ; int tmp___7 ; int tmp___8 ; int err ; unsigned long tmp___9 ; { mdev = w->ldv_49807.mdev; tmp = drbd_get_capacity(mdev->this_bdev); capacity = tmp; i = 0; tmp___0 = ldv__builtin_expect(cancel != 0, 0L); if (tmp___0 != 0L) { return (0); } else { } if (mdev->rs_total == 0UL) { drbd_resync_finished(mdev); return (0); } else { } tmp___1 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___1 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Disk broke down during resync!\n"); return (0); } else { } tmp___2 = queue_max_hw_sectors(mdev->rq_queue); max_bio_size = (int )(tmp___2 << 9); number = drbd_rs_number_requests(mdev); if (number == 0) { goto requeue; } else { } i = 0; goto ldv_52253; ldv_52252: ldv_mutex_lock_62(& (mdev->tconn)->data.mutex); if ((unsigned long )(mdev->tconn)->data.socket != (unsigned long )((struct socket *)0)) { queued = (((mdev->tconn)->data.socket)->sk)->sk_wmem_queued; sndbuf = (((mdev->tconn)->data.socket)->sk)->sk_sndbuf; } else { queued = 1; sndbuf = 0; } ldv_mutex_unlock_63(& (mdev->tconn)->data.mutex); if (sndbuf / 2 < queued) { goto requeue; } else { } next_sector: size = 4096; bit = drbd_bm_find_next(mdev, mdev->bm_resync_fo); if (bit == 0xffffffffffffffffUL) { mdev->bm_resync_fo = drbd_bm_bits(mdev); put_ldev(mdev); return (0); } else { } sector = bit << 3; tmp___3 = drbd_rs_should_slow_down(mdev, sector); if (tmp___3 != 0) { mdev->bm_resync_fo = bit; goto requeue; } else { tmp___4 = drbd_try_rs_begin_io(mdev, sector); if (tmp___4 != 0) { mdev->bm_resync_fo = bit; goto requeue; } else { } } mdev->bm_resync_fo = bit + 1UL; tmp___5 = drbd_bm_test_bit(mdev, bit); tmp___6 = ldv__builtin_expect(tmp___5 == 0, 0L); if (tmp___6 != 0L) { drbd_rs_complete_io(mdev, sector); goto next_sector; } else { } align = 1; rollback_i = i; ldv_52243: ; if (size + 4096 > max_bio_size) { goto ldv_52242; } else { } if (((sector_t )((1 << (align + 3)) + -1) & sector) != 0UL) { goto ldv_52242; } else { } if (((bit + 1UL) & 4095UL) == 0UL) { goto ldv_52242; } else { } tmp___7 = drbd_bm_test_bit(mdev, bit + 1UL); if (tmp___7 != 1) { goto ldv_52242; } else { } bit = bit + 1UL; size = size + 4096; if (4096 << align <= size) { align = align + 1; } else { } i = i + 1; goto ldv_52243; ldv_52242: ; if (size > 4096) { mdev->bm_resync_fo = bit + 1UL; } else { } if ((sector_t )(size >> 9) + sector > capacity) { size = (int )((capacity - sector) << 9); } else { } if ((mdev->tconn)->agreed_pro_version > 88 && (unsigned long )(mdev->tconn)->csums_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp___8 = read_for_csum(mdev, sector, size); switch (tmp___8) { case -5: put_ldev(mdev); return (-5); case -11: drbd_rs_complete_io(mdev, sector); mdev->bm_resync_fo = sector >> 3; i = rollback_i; goto requeue; case 0: ; goto ldv_52247; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared"), "i" (777), "i" (12UL)); ldv_52249: ; goto ldv_52249; } ldv_52247: ; } else { inc_rs_pending(mdev); err = drbd_send_drequest(mdev, 9, sector, size, 0xffffffffffffffffULL); if (err != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_send_drequest() failed, aborting...\n"); _dec_rs_pending(mdev, "w_make_resync_request", 787); put_ldev(mdev); return (err); } else { } } i = i + 1; ldv_52253: ; if (i < number) { goto ldv_52252; } else { } tmp___9 = drbd_bm_bits(mdev); if (mdev->bm_resync_fo >= tmp___9) { put_ldev(mdev); return (0); } else { } requeue: mdev->rs_in_flight = mdev->rs_in_flight + (i << 3); mod_timer(& mdev->resync_timer, (unsigned long )jiffies + 25UL); put_ldev(mdev); return (0); } } static int w_make_ov_request(struct drbd_work *w , int cancel ) { struct drbd_conf *mdev ; int number ; int i ; int size ; sector_t sector ; sector_t capacity ; sector_t tmp ; bool stop_sector_reached ; long tmp___0 ; bool tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; { mdev = w->ldv_49807.mdev; tmp = drbd_get_capacity(mdev->this_bdev); capacity = tmp; stop_sector_reached = 0; tmp___0 = ldv__builtin_expect(cancel != 0, 0L); if (tmp___0 != 0L) { return (1); } else { } number = drbd_rs_number_requests(mdev); sector = mdev->ov_position; i = 0; goto ldv_52270; ldv_52269: ; if (sector >= capacity) { return (1); } else { } if (i > 0) { tmp___1 = verify_can_do_stop_sector(mdev); if ((int )tmp___1) { if (mdev->ov_stop_sector <= sector) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } } else { tmp___2 = 0; } stop_sector_reached = (bool )tmp___2; if ((int )stop_sector_reached) { goto ldv_52266; } else { } size = 4096; tmp___3 = drbd_rs_should_slow_down(mdev, sector); if (tmp___3 != 0) { mdev->ov_position = sector; goto requeue; } else { tmp___4 = drbd_try_rs_begin_io(mdev, sector); if (tmp___4 != 0) { mdev->ov_position = sector; goto requeue; } else { } } if ((sector_t )(size >> 9) + sector > capacity) { size = (int )((capacity - sector) << 9); } else { } inc_rs_pending(mdev); tmp___5 = drbd_send_ov_request(mdev, sector, size); if (tmp___5 != 0) { _dec_rs_pending(mdev, "w_make_ov_request", 852); return (0); } else { } sector = sector + 8UL; i = i + 1; ldv_52270: ; if (i < number) { goto ldv_52269; } else { } ldv_52266: mdev->ov_position = sector; requeue: mdev->rs_in_flight = mdev->rs_in_flight + (i << 3); if (i == 0 || ! stop_sector_reached) { mod_timer(& mdev->resync_timer, (unsigned long )jiffies + 25UL); } else { } return (1); } } int w_ov_finished(struct drbd_work *w , int cancel ) { struct drbd_conf *mdev ; { mdev = w->ldv_49807.mdev; kfree((void const *)w); ov_out_of_sync_print(mdev); drbd_resync_finished(mdev); return (0); } } static int w_resync_finished(struct drbd_work *w , int cancel ) { struct drbd_conf *mdev ; { mdev = w->ldv_49807.mdev; kfree((void const *)w); drbd_resync_finished(mdev); return (0); } } static void ping_peer(struct drbd_conf *mdev ) { struct drbd_tconn *tconn ; int tmp ; wait_queue_t __wait ; struct task_struct *tmp___0 ; int tmp___1 ; { tconn = mdev->tconn; clear_bit(4, (unsigned long volatile *)(& tconn->flags)); request_ping(tconn); tmp = constant_test_bit(4U, (unsigned long const volatile *)(& tconn->flags)); if (tmp != 0 || (int )mdev->state.ldv_49522.conn <= 9) { goto ldv_52285; } else { } tmp___0 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___0; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_52288: prepare_to_wait(& tconn->ping_wait, & __wait, 2); tmp___1 = constant_test_bit(4U, (unsigned long const volatile *)(& tconn->flags)); if (tmp___1 != 0 || (int )mdev->state.ldv_49522.conn <= 9) { goto ldv_52287; } else { } schedule(); goto ldv_52288; ldv_52287: finish_wait(& tconn->ping_wait, & __wait); ldv_52285: ; return; } } int drbd_resync_finished(struct drbd_conf *mdev ) { unsigned long db ; unsigned long dt ; unsigned long dbdt ; unsigned long n_oos ; union drbd_state os ; union drbd_state ns ; struct drbd_work *w ; char *khelper_cmd ; int verify_done ; void *tmp ; int tmp___0 ; int tmp___1 ; unsigned long s ; unsigned long t ; int ratio ; int i ; int i___0 ; { khelper_cmd = 0; verify_done = 0; tmp___0 = drbd_rs_del_all(mdev); if (tmp___0 != 0) { schedule_timeout_interruptible(25L); tmp = kmalloc(32UL, 32U); w = (struct drbd_work *)tmp; if ((unsigned long )w != (unsigned long )((struct drbd_work *)0)) { w->cb = & w_resync_finished; w->ldv_49807.mdev = mdev; drbd_queue_work(& (mdev->tconn)->sender_work, w); return (1); } else { } dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); } else { } dt = (((unsigned long )jiffies - mdev->rs_start) - mdev->rs_paused) / 250UL; if (dt == 0UL) { dt = 1UL; } else { } db = mdev->rs_total; if ((unsigned int )*((unsigned short *)mdev + 374UL) == 288U || (unsigned int )*((unsigned short *)mdev + 374UL) == 304U) { db = db - mdev->ov_left; } else { } dbdt = db / dt << 2; mdev->rs_paused = mdev->rs_paused / 250UL; tmp___1 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___1 == 0) { goto out; } else { } ping_peer(mdev); spin_lock_irq(& (mdev->tconn)->req_lock); os = drbd_read_state(mdev); verify_done = (unsigned int )*((unsigned short *)(& os) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& os) + 0UL) == 304U; if ((int )os.ldv_40024.conn <= 10) { goto out_unlock; } else { } ns = os; ns.ldv_40024.conn = 10U; _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n", verify_done != 0 ? (char *)"Online verify" : (char *)"Resync", mdev->rs_paused + dt, mdev->rs_paused, dbdt); n_oos = drbd_bm_total_weight(mdev); if ((unsigned int )*((unsigned short *)(& os) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& os) + 0UL) == 304U) { if (n_oos != 0UL) { dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "Online verify found %lu %dk block out of sync!\n", n_oos, 4); khelper_cmd = (char *)"out-of-sync"; } else { if (mdev->rs_failed != n_oos) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( (n_oos - mdev->rs_failed) == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 968); } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) == 272U || (unsigned int )*((unsigned short *)(& os) + 0UL) == 336U) { khelper_cmd = (char *)"after-resync-target"; } else { } if ((unsigned long )(mdev->tconn)->csums_tfm != (unsigned long )((struct crypto_hash *)0) && mdev->rs_total != 0UL) { s = mdev->rs_same_csum; t = mdev->rs_total; ratio = t != 0UL ? (t <= 99999UL ? (int const )((s * 100UL) / t) : (int const )(s / (t / 100UL))) : 0; _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%u %% had equal checksums, eliminated: %luK; transferred %luK total %luK\n", ratio, mdev->rs_same_csum << 2, (mdev->rs_total - mdev->rs_same_csum) << 2, mdev->rs_total << 2); } else { } } } else { } if (mdev->rs_failed != 0UL) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), " %lu failed blocks\n", mdev->rs_failed); if ((unsigned int )*((unsigned short *)(& os) + 0UL) == 272U || (unsigned int )*((unsigned short *)(& os) + 0UL) == 336U) { ns.ldv_40024.disk = 4U; ns.ldv_40024.pdsk = 8U; } else { ns.ldv_40024.disk = 8U; ns.ldv_40024.pdsk = 4U; } } else { ns.ldv_40024.disk = 8U; ns.ldv_40024.pdsk = 8U; if ((unsigned int )*((unsigned short *)(& os) + 0UL) == 272U || (unsigned int )*((unsigned short *)(& os) + 0UL) == 336U) { if ((unsigned long )mdev->p_uuid != (unsigned long )((u64 *)0)) { i = 1; goto ldv_52308; ldv_52307: _drbd_uuid_set(mdev, i, *(mdev->p_uuid + (unsigned long )i)); i = i + 1; ldv_52308: ; if (i <= 3) { goto ldv_52307; } else { } drbd_uuid_set(mdev, 1, (mdev->ldev)->md.uuid[0]); _drbd_uuid_set(mdev, 0, *(mdev->p_uuid)); } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "mdev->p_uuid is NULL! BUG\n"); } } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) != 288U && (unsigned int )*((unsigned short *)(& os) + 0UL) != 304U) { drbd_uuid_set_bm(mdev, 0ULL); drbd_print_uuids(mdev, "updated UUIDs"); if ((unsigned long )mdev->p_uuid != (unsigned long )((u64 *)0)) { i___0 = 0; goto ldv_52312; ldv_52311: *(mdev->p_uuid + (unsigned long )i___0) = (mdev->ldev)->md.uuid[i___0]; i___0 = i___0 + 1; ldv_52312: ; if (i___0 <= 3) { goto ldv_52311; } else { } } else { } } else { } } _drbd_set_state(mdev, ns, CS_VERBOSE, 0); out_unlock: spin_unlock_irq(& (mdev->tconn)->req_lock); put_ldev(mdev); out: mdev->rs_total = 0UL; mdev->rs_failed = 0UL; mdev->rs_paused = 0UL; if (verify_done != 0 && mdev->ov_left == 0UL) { mdev->ov_start_sector = 0UL; } else { } drbd_md_sync(mdev); if ((unsigned long )khelper_cmd != (unsigned long )((char *)0)) { drbd_khelper(mdev, khelper_cmd); } else { } return (1); } } static void move_to_net_ee_or_free(struct drbd_conf *mdev , struct drbd_peer_request *peer_req ) { int i ; int tmp ; { tmp = drbd_peer_req_has_active_page(peer_req); if (tmp != 0) { i = (int )(((unsigned long )peer_req->i.size + 4095UL) >> 12); atomic_add(i, & mdev->pp_in_use_by_net); atomic_sub(i, & mdev->pp_in_use); spin_lock_irq(& (mdev->tconn)->req_lock); list_add_tail(& peer_req->w.list, & mdev->net_ee); spin_unlock_irq(& (mdev->tconn)->req_lock); __wake_up(& drbd_pp_wait, 3U, 1, 0); } else { __drbd_free_peer_req(mdev, peer_req, 0); } return; } } int w_e_end_data_req(struct drbd_work *w , int cancel ) { struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; int err ; long tmp ; int tmp___0 ; long tmp___1 ; long tmp___2 ; { __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; mdev = w->ldv_49807.mdev; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { __drbd_free_peer_req(mdev, peer_req, 0); _dec_unacked(mdev, "w_e_end_data_req", 1080); return (0); } else { } tmp___1 = ldv__builtin_expect((peer_req->flags & 8UL) == 0UL, 1L); if (tmp___1 != 0L) { err = drbd_send_block(mdev, P_DATA_REPLY, peer_req); } else { tmp___0 = ___ratelimit(& drbd_ratelimit_state, "w_e_end_data_req"); if (tmp___0 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Sending NegDReply. sector=%llus.\n", (unsigned long long )peer_req->i.sector); } else { } err = drbd_send_ack(mdev, P_NEG_DREPLY, peer_req); } _dec_unacked(mdev, "w_e_end_data_req", 1094); move_to_net_ee_or_free(mdev, peer_req); tmp___2 = ldv__builtin_expect(err != 0, 0L); if (tmp___2 != 0L) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_send_block() failed\n"); } else { } return (err); } } int w_e_end_rsdata_req(struct drbd_work *w , int cancel ) { struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; int err ; long tmp ; int tmp___0 ; int tmp___1 ; long tmp___2 ; int tmp___3 ; long tmp___4 ; long tmp___5 ; { __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; mdev = w->ldv_49807.mdev; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { __drbd_free_peer_req(mdev, peer_req, 0); _dec_unacked(mdev, "w_e_end_rsdata_req", 1117); return (0); } else { } tmp___0 = _get_ldev_if_state(mdev, D_FAILED); if (tmp___0 != 0) { drbd_rs_complete_io(mdev, peer_req->i.sector); put_ldev(mdev); } else { } if ((unsigned int )*((unsigned short *)mdev + 374UL) == 352U) { err = drbd_send_ack(mdev, P_RS_CANCEL, peer_req); } else { tmp___4 = ldv__builtin_expect((peer_req->flags & 8UL) == 0UL, 1L); if (tmp___4 != 0L) { tmp___2 = ldv__builtin_expect((int )mdev->state.ldv_49522.pdsk > 3, 1L); if (tmp___2 != 0L) { inc_rs_pending(mdev); err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req); } else { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "w_e_end_rsdata_req"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Not sending RSDataReply, partner DISKLESS!\n"); } else { } err = 0; } } else { tmp___3 = ___ratelimit(& drbd_ratelimit_state, "w_e_end_rsdata_req"); if (tmp___3 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Sending NegRSDReply. sector %llus.\n", (unsigned long long )peer_req->i.sector); } else { } err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req); drbd_rs_failed_io(mdev, peer_req->i.sector, (int )peer_req->i.size); } } _dec_unacked(mdev, "w_e_end_rsdata_req", 1149); move_to_net_ee_or_free(mdev, peer_req); tmp___5 = ldv__builtin_expect(err != 0, 0L); if (tmp___5 != 0L) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_send_block() failed\n"); } else { } return (err); } } int w_e_end_csum_rs_req(struct drbd_work *w , int cancel ) { struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; struct digest_info *di ; int digest_size ; void *digest ; int err ; int eq ; long tmp ; int tmp___0 ; unsigned int tmp___1 ; int tmp___2 ; int tmp___3 ; long tmp___4 ; long tmp___5 ; { __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; mdev = w->ldv_49807.mdev; digest = 0; eq = 0; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { __drbd_free_peer_req(mdev, peer_req, 0); _dec_unacked(mdev, "w_e_end_csum_rs_req", 1169); return (0); } else { } tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 != 0) { drbd_rs_complete_io(mdev, peer_req->i.sector); put_ldev(mdev); } else { } di = peer_req->ldv_50726.digest; tmp___4 = ldv__builtin_expect((peer_req->flags & 8UL) == 0UL, 1L); if (tmp___4 != 0L) { if ((unsigned long )(mdev->tconn)->csums_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp___1 = crypto_hash_digestsize((mdev->tconn)->csums_tfm); digest_size = (int )tmp___1; if (di->digest_size != digest_size) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( digest_size == di->digest_size ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1186); } else { } digest = kmalloc((size_t )digest_size, 16U); } else { } if ((unsigned long )digest != (unsigned long )((void *)0)) { drbd_csum_ee(mdev, (mdev->tconn)->csums_tfm, peer_req, digest); tmp___2 = memcmp((void const *)digest, (void const *)di->digest, (size_t )digest_size); eq = tmp___2 == 0; kfree((void const *)digest); } else { } if (eq != 0) { __drbd_set_in_sync(mdev, peer_req->i.sector, (int )peer_req->i.size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1196U); mdev->rs_same_csum = mdev->rs_same_csum + (unsigned long )(peer_req->i.size >> 12); err = drbd_send_ack(mdev, P_RS_IS_IN_SYNC, peer_req); } else { inc_rs_pending(mdev); peer_req->ldv_50726.block_id = 0xffffffffffffffffULL; peer_req->flags = peer_req->flags & 0xffffffffffffffefUL; kfree((void const *)di); err = drbd_send_block(mdev, P_RS_DATA_REPLY, peer_req); } } else { err = drbd_send_ack(mdev, P_NEG_RS_DREPLY, peer_req); tmp___3 = ___ratelimit(& drbd_ratelimit_state, "w_e_end_csum_rs_req"); if (tmp___3 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Sending NegDReply. I guess it gets messy.\n"); } else { } } _dec_unacked(mdev, "w_e_end_csum_rs_req", 1213); move_to_net_ee_or_free(mdev, peer_req); tmp___5 = ldv__builtin_expect(err != 0, 0L); if (tmp___5 != 0L) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_send_block/ack() failed\n"); } else { } return (err); } } int w_e_end_ov_req(struct drbd_work *w , int cancel ) { struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; sector_t sector ; unsigned int size ; int digest_size ; void *digest ; int err ; long tmp ; unsigned int tmp___0 ; long tmp___1 ; { __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; mdev = w->ldv_49807.mdev; sector = peer_req->i.sector; size = peer_req->i.size; err = 0; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { goto out; } else { } tmp___0 = crypto_hash_digestsize((mdev->tconn)->verify_tfm); digest_size = (int )tmp___0; digest = kmalloc((size_t )digest_size, 16U); if ((unsigned long )digest == (unsigned long )((void *)0)) { err = 1; goto out; } else { } tmp___1 = ldv__builtin_expect((peer_req->flags & 8UL) == 0UL, 1L); if (tmp___1 != 0L) { drbd_csum_ee(mdev, (mdev->tconn)->verify_tfm, peer_req, digest); } else { memset(digest, 0, (size_t )digest_size); } __drbd_free_peer_req(mdev, peer_req, 0); peer_req = 0; inc_rs_pending(mdev); err = drbd_send_drequest_csum(mdev, sector, (int )size, digest, digest_size, P_OV_REPLY); if (err != 0) { _dec_rs_pending(mdev, "w_e_end_ov_req", 1256); } else { } kfree((void const *)digest); out: ; if ((unsigned long )peer_req != (unsigned long )((struct drbd_peer_request *)0)) { __drbd_free_peer_req(mdev, peer_req, 0); } else { } _dec_unacked(mdev, "w_e_end_ov_req", 1262); return (err); } } void drbd_ov_out_of_sync_found(struct drbd_conf *mdev , sector_t sector , int size ) { { if (mdev->ov_last_oos_start + mdev->ov_last_oos_size == sector) { mdev->ov_last_oos_size = mdev->ov_last_oos_size + (sector_t )(size >> 9); } else { mdev->ov_last_oos_start = sector; mdev->ov_last_oos_size = (sector_t )(size >> 9); } __drbd_set_out_of_sync(mdev, sector, size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1274U); return; } } int w_e_end_ov_reply(struct drbd_work *w , int cancel ) { struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; struct digest_info *di ; void *digest ; sector_t sector ; unsigned int size ; int digest_size ; int err ; int eq ; bool stop_sector_reached ; long tmp ; int tmp___0 ; unsigned int tmp___1 ; int tmp___2 ; long tmp___3 ; bool tmp___4 ; { __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; mdev = w->ldv_49807.mdev; sector = peer_req->i.sector; size = peer_req->i.size; eq = 0; stop_sector_reached = 0; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { __drbd_free_peer_req(mdev, peer_req, 0); _dec_unacked(mdev, "w_e_end_ov_reply", 1291); return (0); } else { } tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 != 0) { drbd_rs_complete_io(mdev, peer_req->i.sector); put_ldev(mdev); } else { } di = peer_req->ldv_50726.digest; tmp___3 = ldv__builtin_expect((peer_req->flags & 8UL) == 0UL, 1L); if (tmp___3 != 0L) { tmp___1 = crypto_hash_digestsize((mdev->tconn)->verify_tfm); digest_size = (int )tmp___1; digest = kmalloc((size_t )digest_size, 16U); if ((unsigned long )digest != (unsigned long )((void *)0)) { drbd_csum_ee(mdev, (mdev->tconn)->verify_tfm, peer_req, digest); if (di->digest_size != digest_size) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( digest_size == di->digest_size ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1310); } else { } tmp___2 = memcmp((void const *)digest, (void const *)di->digest, (size_t )digest_size); eq = tmp___2 == 0; kfree((void const *)digest); } else { } } else { } __drbd_free_peer_req(mdev, peer_req, 0); if (eq == 0) { drbd_ov_out_of_sync_found(mdev, sector, (int )size); } else { ov_out_of_sync_print(mdev); } err = drbd_send_ack_ex(mdev, P_OV_RESULT, sector, (int )size, eq != 0 ? 4711ULL : 4712ULL); _dec_unacked(mdev, "w_e_end_ov_reply", 1330); mdev->ov_left = mdev->ov_left - 1UL; if ((mdev->ov_left & 512UL) != 0UL) { drbd_advance_rs_marks(mdev, mdev->ov_left); } else { } tmp___4 = verify_can_do_stop_sector(mdev); stop_sector_reached = (bool )((int )tmp___4 && (sector_t )(size >> 9) + sector >= mdev->ov_stop_sector); if (mdev->ov_left == 0UL || (int )stop_sector_reached) { ov_out_of_sync_print(mdev); drbd_resync_finished(mdev); } else { } return (err); } } int w_prev_work_done(struct drbd_work *w , int cancel ) { struct drbd_wq_barrier *b ; struct drbd_work const *__mptr ; { __mptr = (struct drbd_work const *)w; b = (struct drbd_wq_barrier *)__mptr; complete(& b->done); return (0); } } int drbd_send_barrier(struct drbd_tconn *tconn ) { struct p_barrier *p ; struct drbd_socket *sock ; void *tmp ; int tmp___0 ; { sock = & tconn->data; tmp = conn_prepare_command(tconn, sock); p = (struct p_barrier *)tmp; if ((unsigned long )p == (unsigned long )((struct p_barrier *)0)) { return (-5); } else { } p->barrier = (u32 )tconn->send.current_epoch_nr; p->pad = 0U; tconn->send.current_epoch_writes = 0U; tmp___0 = conn_send_command(tconn, sock, P_BARRIER, 8U, 0, 0U); return (tmp___0); } } int w_send_write_hint(struct drbd_work *w , int cancel ) { struct drbd_conf *mdev ; struct drbd_socket *sock ; void *tmp ; int tmp___0 ; { mdev = w->ldv_49807.mdev; if (cancel != 0) { return (0); } else { } sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); if ((unsigned long )tmp == (unsigned long )((void *)0)) { return (-5); } else { } tmp___0 = drbd_send_command(mdev, sock, P_UNPLUG_REMOTE, 0U, 0, 0U); return (tmp___0); } } static void re_init_if_first_write(struct drbd_tconn *tconn , unsigned int epoch ) { { if (! tconn->send.seen_any_write_yet) { tconn->send.seen_any_write_yet = 1; tconn->send.current_epoch_nr = (int )epoch; tconn->send.current_epoch_writes = 0U; } else { } return; } } static void maybe_send_barrier(struct drbd_tconn *tconn , unsigned int epoch ) { { if (! tconn->send.seen_any_write_yet) { return; } else { } if ((unsigned int )tconn->send.current_epoch_nr != epoch) { if (tconn->send.current_epoch_writes != 0U) { drbd_send_barrier(tconn); } else { } tconn->send.current_epoch_nr = (int )epoch; } else { } return; } } int w_send_out_of_sync(struct drbd_work *w , int cancel ) { struct drbd_request *req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; struct drbd_tconn *tconn ; int err ; long tmp ; { __mptr = (struct drbd_work const *)w; req = (struct drbd_request *)__mptr; mdev = w->ldv_49807.mdev; tconn = mdev->tconn; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { req_mod(req, SEND_CANCELED); return (0); } else { } maybe_send_barrier(tconn, req->epoch); err = drbd_send_out_of_sync(mdev, req); req_mod(req, OOS_HANDED_TO_NETWORK); return (err); } } int w_send_dblock(struct drbd_work *w , int cancel ) { struct drbd_request *req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; struct drbd_tconn *tconn ; int err ; long tmp ; { __mptr = (struct drbd_work const *)w; req = (struct drbd_request *)__mptr; mdev = w->ldv_49807.mdev; tconn = mdev->tconn; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { req_mod(req, SEND_CANCELED); return (0); } else { } re_init_if_first_write(tconn, req->epoch); maybe_send_barrier(tconn, req->epoch); tconn->send.current_epoch_writes = tconn->send.current_epoch_writes + 1U; err = drbd_send_dblock(mdev, req); req_mod(req, err != 0 ? SEND_FAILED : HANDED_OVER_TO_NETWORK); return (err); } } int w_send_read_req(struct drbd_work *w , int cancel ) { struct drbd_request *req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; struct drbd_tconn *tconn ; int err ; long tmp ; { __mptr = (struct drbd_work const *)w; req = (struct drbd_request *)__mptr; mdev = w->ldv_49807.mdev; tconn = mdev->tconn; tmp = ldv__builtin_expect(cancel != 0, 0L); if (tmp != 0L) { req_mod(req, SEND_CANCELED); return (0); } else { } maybe_send_barrier(tconn, req->epoch); err = drbd_send_drequest(mdev, 8, req->i.sector, (int )req->i.size, (u64 )req); req_mod(req, err != 0 ? SEND_FAILED : HANDED_OVER_TO_NETWORK); return (err); } } int w_restart_disk_io(struct drbd_work *w , int cancel ) { struct drbd_request *req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; { __mptr = (struct drbd_work const *)w; req = (struct drbd_request *)__mptr; mdev = w->ldv_49807.mdev; if ((int )(req->master_bio)->bi_rw & 1 && ((unsigned long )req->rq_state & 4096UL) != 0UL) { drbd_al_begin_io(mdev, & req->i); } else { } drbd_req_make_private_bio(req, req->master_bio); (req->private_bio)->bi_bdev = (mdev->ldev)->backing_bdev; generic_make_request(req->private_bio); return (0); } } static int _drbd_may_sync_now(struct drbd_conf *mdev ) { struct drbd_conf *odev ; int resync_after ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; bool _bool ; int tmp___1 ; { odev = mdev; ldv_52465: ; if ((unsigned long )odev->ldev == (unsigned long )((struct drbd_backing_dev *)0)) { return (1); } else { } rcu_read_lock___1(); _________p1 = *((struct disk_conf * volatile *)(& (odev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1518, "suspicious rcu_dereference_check() usage"); } else { } } else { } resync_after = _________p1->resync_after; rcu_read_unlock___1(); if (resync_after == -1) { return (1); } else { } odev = minor_to_mdev((unsigned int )resync_after); _bool = (unsigned long )odev != (unsigned long )((struct drbd_conf *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"odev", "_drbd_may_sync_now"); } else { } if (_bool) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (1); } else { } if (((((int )odev->state.ldv_49522.conn > 15 && (int )odev->state.ldv_49522.conn <= 21) || (unsigned int )*((unsigned char *)odev + 750UL) != 0U) || (unsigned int )*((unsigned char *)odev + 750UL) != 0U) || (unsigned int )*((unsigned char *)odev + 750UL) != 0U) { return (0); } else { } goto ldv_52465; } } static int _drbd_pause_after(struct drbd_conf *mdev ) { struct drbd_conf *odev ; int i ; int rv ; void *tmp ; union drbd_state __ns ; enum drbd_state_rv tmp___0 ; int tmp___1 ; void *tmp___2 ; { rv = 0; rcu_read_lock___1(); i = 0; tmp = idr_get_next(& minors, & i); odev = (struct drbd_conf *)tmp; goto ldv_52476; ldv_52475: ; if ((unsigned int )*((unsigned short *)odev + 374UL) == 0U && (unsigned int )*((unsigned char *)odev + 749UL) == 0U) { goto ldv_52472; } else { } tmp___1 = _drbd_may_sync_now(odev); if (tmp___1 == 0) { __ns = drbd_read_state(odev); __ns.ldv_40024.aftr_isp = 1U; tmp___0 = __drbd_set_state(odev, __ns, CS_HARD, 0); rv = ((int )tmp___0 != 2) | rv; } else { } ldv_52472: i = i + 1; tmp___2 = idr_get_next(& minors, & i); odev = (struct drbd_conf *)tmp___2; ldv_52476: ; if ((unsigned long )odev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52475; } else { } rcu_read_unlock___1(); return (rv); } } static int _drbd_resume_next(struct drbd_conf *mdev ) { struct drbd_conf *odev ; int i ; int rv ; void *tmp ; union drbd_state __ns ; enum drbd_state_rv tmp___0 ; int tmp___1 ; void *tmp___2 ; { rv = 0; rcu_read_lock___1(); i = 0; tmp = idr_get_next(& minors, & i); odev = (struct drbd_conf *)tmp; goto ldv_52488; ldv_52487: ; if ((unsigned int )*((unsigned short *)odev + 374UL) == 0U && (unsigned int )*((unsigned char *)odev + 749UL) == 0U) { goto ldv_52484; } else { } if ((unsigned int )*((unsigned char *)odev + 750UL) != 0U) { tmp___1 = _drbd_may_sync_now(odev); if (tmp___1 != 0) { __ns = drbd_read_state(odev); __ns.ldv_40024.aftr_isp = 0U; tmp___0 = __drbd_set_state(odev, __ns, CS_HARD, 0); rv = ((int )tmp___0 != 2) | rv; } else { } } else { } ldv_52484: i = i + 1; tmp___2 = idr_get_next(& minors, & i); odev = (struct drbd_conf *)tmp___2; ldv_52488: ; if ((unsigned long )odev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52487; } else { } rcu_read_unlock___1(); return (rv); } } void resume_next_sg(struct drbd_conf *mdev ) { { _raw_write_lock_irq(& global_state_lock); _drbd_resume_next(mdev); _raw_write_unlock_irq(& global_state_lock); return; } } void suspend_other_sg(struct drbd_conf *mdev ) { { _raw_write_lock_irq(& global_state_lock); _drbd_pause_after(mdev); _raw_write_unlock_irq(& global_state_lock); return; } } enum drbd_ret_code drbd_resync_after_valid(struct drbd_conf *mdev , int o_minor ) { struct drbd_conf *odev ; int resync_after ; struct drbd_conf *tmp ; struct disk_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; { if (o_minor == -1) { return (NO_ERROR); } else { } if (o_minor < -1) { return (ERR_RESYNC_AFTER); } else { tmp = minor_to_mdev((unsigned int )o_minor); if ((unsigned long )tmp == (unsigned long )((struct drbd_conf *)0)) { return (ERR_RESYNC_AFTER); } else { } } odev = minor_to_mdev((unsigned int )o_minor); ldv_52505: ; if ((unsigned long )odev == (unsigned long )mdev) { return (ERR_RESYNC_AFTER_CYCLE); } else { } rcu_read_lock___1(); _________p1 = *((struct disk_conf * volatile *)(& (odev->ldev)->disk_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1615, "suspicious rcu_dereference_check() usage"); } else { } } else { } resync_after = _________p1->resync_after; rcu_read_unlock___1(); if (resync_after == -1) { return (NO_ERROR); } else { } odev = minor_to_mdev((unsigned int )resync_after); goto ldv_52505; } } void drbd_resync_after_changed(struct drbd_conf *mdev ) { int changes ; int tmp ; { ldv_52510: changes = _drbd_pause_after(mdev); tmp = _drbd_resume_next(mdev); changes = tmp | changes; if (changes != 0) { goto ldv_52510; } else { } return; } } void drbd_rs_controller_reset(struct drbd_conf *mdev ) { struct fifo_buffer *plan ; struct fifo_buffer *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; { atomic_set(& mdev->rs_sect_in, 0); atomic_set(& mdev->rs_sect_ev, 0); mdev->rs_in_flight = 0; rcu_read_lock___1(); _________p1 = *((struct fifo_buffer * volatile *)(& mdev->rs_plan_s)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1650, "suspicious rcu_dereference_check() usage"); } else { } } else { } plan = _________p1; plan->total = 0; fifo_set(plan, 0); rcu_read_unlock___1(); return; } } void start_resync_timer_fn(unsigned long data ) { struct drbd_conf *mdev ; { mdev = (struct drbd_conf *)data; drbd_queue_work(& (mdev->tconn)->sender_work, & mdev->start_resync_work); return; } } int w_start_resync(struct drbd_work *w , int cancel ) { struct drbd_conf *mdev ; int tmp ; int tmp___0 ; { mdev = w->ldv_49807.mdev; tmp = atomic_read((atomic_t const *)(& mdev->unacked_cnt)); if (tmp != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "w_start_resync later...\n"); mdev->start_resync_timer.expires = (unsigned long )jiffies + 25UL; add_timer(& mdev->start_resync_timer); return (0); } else { tmp___0 = atomic_read((atomic_t const *)(& mdev->rs_pending_cnt)); if (tmp___0 != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "w_start_resync later...\n"); mdev->start_resync_timer.expires = (unsigned long )jiffies + 25UL; add_timer(& mdev->start_resync_timer); return (0); } else { } } drbd_start_resync(mdev, C_SYNC_SOURCE); clear_bit(19, (unsigned long volatile *)(& mdev->flags)); return (0); } } void drbd_start_resync(struct drbd_conf *mdev , enum drbd_conns side ) { union drbd_state ns ; int r ; union drbd_state val ; union drbd_state mask ; union drbd_state val___0 ; union drbd_state mask___0 ; int tmp ; int tmp___0 ; struct task_struct *tmp___1 ; int tmp___2 ; int tmp___3 ; enum drbd_state_rv tmp___4 ; unsigned long tw ; unsigned long tmp___5 ; unsigned long now ; int i ; char const *tmp___6 ; struct net_conf *nc ; int timeo ; struct net_conf *_________p1 ; bool __warned ; int tmp___7 ; int tmp___8 ; { if ((int )mdev->state.ldv_49522.conn > 15 && (int )mdev->state.ldv_49522.conn <= 21) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Resync already running!\n"); return; } else { } tmp = constant_test_bit(20U, (unsigned long const volatile *)(& mdev->flags)); if (tmp == 0) { if ((unsigned int )side == 17U) { r = drbd_khelper(mdev, (char *)"before-resync-target"); r = (r >> 8) & 255; if (r > 0) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "before-resync-target handler returned %d, dropping connection.\n", r); val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(mdev->tconn, mask, val, CS_HARD); return; } else { } } else { r = drbd_khelper(mdev, (char *)"before-resync-source"); r = (r >> 8) & 255; if (r > 0) { if (r == 3) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "before-resync-source handler returned %d, ignoring. Old userland tools?", r); } else { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "before-resync-source handler returned %d, dropping connection.\n", r); val___0.i = 0U; val___0.ldv_40024.conn = 1U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; conn_request_state(mdev->tconn, mask___0, val___0, CS_HARD); return; } } else { } } } else { } tmp___1 = get_current(); if ((unsigned long )tmp___1 == (unsigned long )(mdev->tconn)->worker.task) { tmp___0 = ldv_mutex_trylock_64(mdev->state_mutex); if (tmp___0 == 0) { set_bit(20U, (unsigned long volatile *)(& mdev->flags)); mdev->start_resync_timer.expires = (unsigned long )jiffies + 50UL; add_timer(& mdev->start_resync_timer); return; } else { ldv_mutex_lock_65(mdev->state_mutex); } } else { } clear_bit(20, (unsigned long volatile *)(& mdev->flags)); _raw_write_lock_irq(& global_state_lock); tmp___2 = _get_ldev_if_state(mdev, D_NEGOTIATING); if (tmp___2 == 0) { _raw_write_unlock_irq(& global_state_lock); ldv_mutex_unlock_66(mdev->state_mutex); return; } else { } ns = drbd_read_state(mdev); tmp___3 = _drbd_may_sync_now(mdev); ns.ldv_40024.aftr_isp = tmp___3 == 0; ns.ldv_40024.conn = (unsigned char )side; if ((unsigned int )side == 17U) { ns.ldv_40024.disk = 4U; } else { ns.ldv_40024.pdsk = 4U; } tmp___4 = __drbd_set_state(mdev, ns, CS_VERBOSE, 0); r = (int )tmp___4; ns = drbd_read_state(mdev); if ((int )ns.ldv_40024.conn <= 9) { r = 0; } else { } if (r == 1) { tmp___5 = drbd_bm_total_weight(mdev); tw = tmp___5; now = jiffies; mdev->rs_failed = 0UL; mdev->rs_paused = 0UL; mdev->rs_same_csum = 0UL; mdev->rs_last_events = 0; mdev->rs_last_sect_ev = 0; mdev->rs_total = tw; mdev->rs_start = now; i = 0; goto ldv_52546; ldv_52545: mdev->rs_mark_left[i] = tw; mdev->rs_mark_time[i] = now; i = i + 1; ldv_52546: ; if (i <= 7) { goto ldv_52545; } else { } _drbd_pause_after(mdev); } else { } _raw_write_unlock_irq(& global_state_lock); if (r == 1) { mdev->rs_last_bcast = (unsigned long )jiffies - 250UL; tmp___6 = drbd_conn_str((enum drbd_conns )ns.ldv_40024.conn); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Began resync as %s (will sync %lu KB [%lu bits set]).\n", tmp___6, mdev->rs_total << 2, mdev->rs_total); if ((unsigned int )side == 17U) { mdev->bm_resync_fo = 0UL; } else { } if ((unsigned int )side == 16U && (mdev->tconn)->agreed_pro_version <= 95) { drbd_gen_and_send_sync_uuid(mdev); } else { } if ((mdev->tconn)->agreed_pro_version <= 94 && mdev->rs_total == 0UL) { if ((unsigned int )side == 16U) { rcu_read_lock___1(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___7 = debug_lockdep_rcu_enabled(); if (tmp___7 != 0 && ! __warned) { tmp___8 = rcu_read_lock_held(); if (tmp___8 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1823, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; timeo = (int )(nc->ping_int * 250U + (nc->ping_timeo * 250U) / 9U); rcu_read_unlock___1(); schedule_timeout_interruptible((long )timeo); } else { } drbd_resync_finished(mdev); } else { } drbd_rs_controller_reset(mdev); if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 272U) { mod_timer(& mdev->resync_timer, jiffies); } else { } drbd_md_sync(mdev); } else { } put_ldev(mdev); ldv_mutex_unlock_67(mdev->state_mutex); return; } } bool need_to_send_barrier(struct drbd_tconn *connection ) { int tmp ; { if (! connection->send.seen_any_write_yet) { return (0); } else { } if (connection->send.current_epoch_writes == 0U) { return (0); } else { } tmp = atomic_read((atomic_t const *)(& connection->current_tle_nr)); if (tmp != connection->send.current_epoch_nr + 1) { return (0); } else { } return (1); } } bool dequeue_work_batch(struct drbd_work_queue *queue , struct list_head *work_list ) { int tmp ; { spin_lock_irq(& queue->q_lock); list_splice_init(& queue->q, work_list); spin_unlock_irq(& queue->q_lock); tmp = list_empty((struct list_head const *)work_list); return (tmp == 0); } } bool dequeue_work_item(struct drbd_work_queue *queue , struct list_head *work_list ) { int tmp ; int tmp___0 ; { spin_lock_irq(& queue->q_lock); tmp = list_empty((struct list_head const *)(& queue->q)); if (tmp == 0) { list_move(queue->q.next, work_list); } else { } spin_unlock_irq(& queue->q_lock); tmp___0 = list_empty((struct list_head const *)work_list); return (tmp___0 == 0); } } void wait_for_work(struct drbd_tconn *connection , struct list_head *work_list ) { wait_queue_t wait ; struct task_struct *tmp ; struct net_conf *nc ; int uncork ; int cork ; int tmp___0 ; struct net_conf *_________p1 ; bool __warned ; int tmp___1 ; int tmp___2 ; int send_barrier ; int tmp___3 ; int tmp___4 ; struct task_struct *tmp___5 ; int tmp___6 ; bool tmp___7 ; struct net_conf *_________p1___0 ; bool __warned___0 ; int tmp___8 ; int tmp___9 ; { tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; dequeue_work_item(& connection->sender_work, work_list); tmp___0 = list_empty((struct list_head const *)work_list); if (tmp___0 == 0) { return; } else { } rcu_read_lock___1(); _________p1 = *((struct net_conf * volatile *)(& connection->net_conf)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1907, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; uncork = (unsigned long )nc != (unsigned long )((struct net_conf *)0) ? (int )nc->tcp_cork : 0; rcu_read_unlock___1(); if (uncork != 0) { ldv_mutex_lock_68(& connection->data.mutex); if ((unsigned long )connection->data.socket != (unsigned long )((struct socket *)0)) { drbd_tcp_uncork(connection->data.socket); } else { } ldv_mutex_unlock_69(& connection->data.mutex); } else { } ldv_52577: prepare_to_wait(& connection->sender_work.q_wait, & wait, 1); spin_lock_irq(& connection->req_lock); spin_lock(& connection->sender_work.q_lock); tmp___3 = list_empty((struct list_head const *)(& connection->sender_work.q)); if (tmp___3 == 0) { list_move(connection->sender_work.q.next, work_list); } else { } spin_unlock(& connection->sender_work.q_lock); tmp___4 = list_empty((struct list_head const *)work_list); if (tmp___4 == 0) { spin_unlock_irq(& connection->req_lock); goto ldv_52576; } else { tmp___5 = get_current(); tmp___6 = signal_pending(tmp___5); if (tmp___6 != 0) { spin_unlock_irq(& connection->req_lock); goto ldv_52576; } else { } } tmp___7 = need_to_send_barrier(connection); send_barrier = (int )tmp___7; spin_unlock_irq(& connection->req_lock); if (send_barrier != 0) { drbd_send_barrier(connection); connection->send.current_epoch_nr = connection->send.current_epoch_nr + 1; } else { } schedule(); goto ldv_52577; ldv_52576: finish_wait(& connection->sender_work.q_wait, & wait); rcu_read_lock___1(); _________p1___0 = *((struct net_conf * volatile *)(& connection->net_conf)); tmp___8 = debug_lockdep_rcu_enabled(); if (tmp___8 != 0 && ! __warned___0) { tmp___9 = rcu_read_lock_held(); if (tmp___9 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 1946, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1___0; cork = (unsigned long )nc != (unsigned long )((struct net_conf *)0) ? (int )nc->tcp_cork : 0; rcu_read_unlock___1(); ldv_mutex_lock_70(& connection->data.mutex); if ((unsigned long )connection->data.socket != (unsigned long )((struct socket *)0)) { if (cork != 0) { drbd_tcp_cork(connection->data.socket); } else if (uncork == 0) { drbd_tcp_uncork(connection->data.socket); } else { } } else { } ldv_mutex_unlock_71(& connection->data.mutex); return; } } int drbd_worker(struct drbd_thread *thi ) { struct drbd_tconn *tconn ; struct drbd_work *w ; struct drbd_conf *mdev ; struct list_head work_list ; int vnr ; int tmp ; struct task_struct *tmp___0 ; enum drbd_thread_state tmp___1 ; struct task_struct *tmp___2 ; int tmp___3 ; enum drbd_thread_state tmp___4 ; struct list_head const *__mptr ; int tmp___5 ; union drbd_state val ; union drbd_state mask ; int tmp___6 ; enum drbd_thread_state tmp___7 ; struct list_head const *__mptr___0 ; int tmp___8 ; int tmp___9 ; void *tmp___10 ; void *tmp___11 ; { tconn = thi->tconn; w = 0; work_list.next = & work_list; work_list.prev = & work_list; goto ldv_52589; ldv_52600: drbd_thread_current_set_cpu(thi); tmp = list_empty((struct list_head const *)(& work_list)); if (tmp != 0) { wait_for_work(tconn, & work_list); } else { } tmp___2 = get_current(); tmp___3 = signal_pending(tmp___2); if (tmp___3 != 0) { tmp___0 = get_current(); flush_signals(tmp___0); tmp___1 = get_t_state(thi); if ((unsigned int )tmp___1 == 1U) { printk("\fd-con %s: Worker got an unexpected signal\n", tconn->name); goto ldv_52589; } else { } goto ldv_52590; } else { } tmp___4 = get_t_state(thi); if ((unsigned int )tmp___4 != 1U) { goto ldv_52590; } else { } goto ldv_52593; ldv_52598: __mptr = (struct list_head const *)work_list.next; w = (struct drbd_work *)__mptr; list_del_init(& w->list); tmp___5 = (*(w->cb))(w, (unsigned int )tconn->cstate <= 8U); if (tmp___5 == 0) { goto ldv_52593; } else { } if ((unsigned int )tconn->cstate > 8U) { val.i = 0U; val.ldv_40024.conn = 5U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); } else { } ldv_52593: tmp___6 = list_empty((struct list_head const *)(& work_list)); if (tmp___6 == 0) { goto ldv_52598; } else { } ldv_52589: tmp___7 = get_t_state(thi); if ((unsigned int )tmp___7 == 1U) { goto ldv_52600; } else { } ldv_52590: ; ldv_52606: ; goto ldv_52604; ldv_52603: __mptr___0 = (struct list_head const *)work_list.next; w = (struct drbd_work *)__mptr___0; list_del_init(& w->list); (*(w->cb))(w, 1); ldv_52604: tmp___8 = list_empty((struct list_head const *)(& work_list)); if (tmp___8 == 0) { goto ldv_52603; } else { } dequeue_work_batch(& tconn->sender_work, & work_list); tmp___9 = list_empty((struct list_head const *)(& work_list)); if (tmp___9 == 0) { goto ldv_52606; } else { } rcu_read_lock___1(); vnr = 0; tmp___10 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___10; goto ldv_52609; ldv_52608: ; if ((unsigned int )*((unsigned char *)mdev + 749UL) != 0U || (unsigned int )*((unsigned short *)mdev + 374UL) != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->state.disk == D_DISKLESS && mdev->state.conn == C_STANDALONE ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_worker.c.prepared", 2008); } else { } kref_get(& mdev->kref); rcu_read_unlock___1(); drbd_mdev_cleanup(mdev); kref_put(& mdev->kref, & drbd_minor_destroy); rcu_read_lock___1(); vnr = vnr + 1; tmp___11 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___11; ldv_52609: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52608; } else { } rcu_read_unlock___1(); return (0); } } void ldv_mutex_lock_51(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_52(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_53(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_54(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_55(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_56(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_57(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_58(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_59(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_60(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_61(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_62(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_63(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_64(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___12 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_state_mutex_of_drbd_conf(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_lock_65(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_66(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_67(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_68(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_69(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_70(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_71(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static __u32 __arch_swab32(__u32 val ) { { __asm__ ("bswapl %0": "=r" (val): "0" (val)); return (val); } } __inline static __u64 __arch_swab64(__u64 val ) { { __asm__ ("bswapq %0": "=r" (val): "0" (val)); return (val); } } __inline static __u16 __fswab16(__u16 val ) { { return ((__u16 )((int )((short )((int )val << 8)) | (int )((short )((int )val >> 8)))); } } __inline static __u32 __fswab32(__u32 val ) { __u32 tmp ; { tmp = __arch_swab32(val); return (tmp); } } __inline static __u64 __fswab64(__u64 val ) { __u64 tmp ; { tmp = __arch_swab64(val); return (tmp); } } extern void dump_stack(void) ; extern unsigned long __phys_addr(unsigned long ) ; extern unsigned long __per_cpu_offset[4096U] ; extern size_t strlen(char const * ) ; extern char *strcpy(char * , char const * ) ; extern int strcmp(char const * , char const * ) ; extern int nr_cpu_ids ; extern struct cpumask const * const cpu_possible_mask ; __inline static unsigned int cpumask_check(unsigned int cpu ) { bool __warned ; int __ret_warn_once ; int __ret_warn_on ; long tmp ; long tmp___0 ; long tmp___1 ; { __ret_warn_once = (unsigned int )nr_cpu_ids <= cpu; tmp___1 = ldv__builtin_expect(__ret_warn_once != 0, 0L); if (tmp___1 != 0L) { __ret_warn_on = ! __warned; tmp = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp != 0L) { warn_slowpath_null("include/linux/cpumask.h", 108); } else { } tmp___0 = ldv__builtin_expect(__ret_warn_on != 0, 0L); if (tmp___0 != 0L) { __warned = 1; } else { } } else { } ldv__builtin_expect(__ret_warn_once != 0, 0L); return (cpu); } } __inline static unsigned int cpumask_next(int n , struct cpumask const *srcp ) { unsigned long tmp ; { if (n != -1) { cpumask_check((unsigned int )n); } else { } tmp = find_next_bit((unsigned long const *)(& srcp->bits), (unsigned long )nr_cpu_ids, (unsigned long )(n + 1)); return ((unsigned int )tmp); } } __inline static long PTR_ERR(void const *ptr ) { { return ((long )ptr); } } __inline static long IS_ERR(void const *ptr ) { long tmp ; { tmp = ldv__builtin_expect((unsigned long )ptr > 0xfffffffffffff000UL, 0L); return (tmp); } } __inline static int mutex_is_locked(struct mutex *lock ) { int tmp ; { tmp = atomic_read((atomic_t const *)(& lock->count)); return (tmp != 1); } } __inline static int ldv_mutex_is_locked_119(struct mutex *lock ) ; __inline static int ldv_mutex_is_locked_120(struct mutex *lock ) ; int ldv_mutex_trylock_96(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_94(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_97(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_99(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_101(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_103(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_105(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_108(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_109(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_111(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_112(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_113(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_114(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_116(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_118(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_93(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_95(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_98(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_100(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_102(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_104(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_106(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_107(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_110(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_115(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_117(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_conf_update_of_drbd_tconn(struct mutex *lock ) ; void ldv_mutex_unlock_conf_update_of_drbd_tconn(struct mutex *lock ) ; int ldv_mutex_is_locked_cstate_mutex_of_drbd_tconn(struct mutex *lock ) ; int ldv_mutex_is_locked_state_mutex_of_drbd_conf(struct mutex *lock ) ; __inline static struct thread_info *current_thread_info___2(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6388; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6388; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6388; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6388; default: __bad_percpu_size(); } ldv_6388: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } extern void _raw_write_lock_bh(rwlock_t * ) ; extern void _raw_write_unlock_bh(rwlock_t * ) ; __inline static void init_completion(struct completion *x ) { struct lock_class_key __key ; { x->done = 0U; __init_waitqueue_head(& x->wait, "&x->wait", & __key); return; } } extern void wait_for_completion(struct completion * ) ; extern long wait_for_completion_interruptible_timeout(struct completion * , unsigned long ) ; extern void synchronize_sched(void) ; __inline static void __rcu_read_lock___2(void) { struct thread_info *tmp ; { tmp = current_thread_info___2(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___2(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___2(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void synchronize_rcu(void) { { synchronize_sched(); return; } } __inline static void rcu_read_lock___2(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___2(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 738, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___2(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 759, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___2(); return; } } extern int del_timer_sync(struct timer_list * ) ; extern void put_page(struct page * ) ; __inline static void sg_assign_page___0(struct scatterlist *sg , struct page *page ) { unsigned long page_link ; long tmp ; long tmp___0 ; long tmp___1 ; { page_link = sg->page_link & 3UL; tmp = ldv__builtin_expect(((unsigned long )page & 3UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (65), "i" (12UL)); ldv_19256: ; goto ldv_19256; } else { } tmp___0 = ldv__builtin_expect(sg->sg_magic != 2271560481UL, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (67), "i" (12UL)); ldv_19257: ; goto ldv_19257; } else { } tmp___1 = ldv__builtin_expect((long )((int )sg->page_link) & 1L, 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"include/linux/scatterlist.h"), "i" (68), "i" (12UL)); ldv_19258: ; goto ldv_19258; } else { } sg->page_link = page_link | (unsigned long )page; return; } } __inline static void sg_set_page___0(struct scatterlist *sg , struct page *page , unsigned int len , unsigned int offset ) { { sg_assign_page___0(sg, page); sg->offset = offset; sg->length = len; return; } } __inline static void sg_set_buf(struct scatterlist *sg , void const *buf , unsigned int buflen ) { unsigned long tmp ; { tmp = __phys_addr((unsigned long )buf); sg_set_page___0(sg, 0xffffea0000000000UL + (tmp >> 12), buflen, (unsigned int )((long )buf) & 4095U); return; } } extern void io_schedule(void) ; extern long schedule_timeout(long ) ; extern long schedule_timeout_uninterruptible(long ) ; extern void get_random_bytes(void * , int ) ; extern u32 random32(void) ; extern int sock_create_kern(int , int , int , struct socket ** ) ; extern void sock_release(struct socket * ) ; extern int sock_recvmsg(struct socket * , struct msghdr * , size_t , int ) ; extern int kernel_accept(struct socket * , struct socket ** , int ) ; char const *drbd_set_st_err_str(enum drbd_state_rv err ) ; extern struct crypto_tfm *crypto_alloc_base(char const * , u32 , u32 ) ; __inline static void crypto_free_tfm(struct crypto_tfm *tfm ) { { return; } } __inline static struct crypto_hash *__crypto_hash_cast(struct crypto_tfm *tfm ) { { return ((struct crypto_hash *)tfm); } } __inline static struct crypto_hash *crypto_alloc_hash(char const *alg_name , u32 type , u32 mask ) { struct crypto_tfm *tmp ; struct crypto_hash *tmp___0 ; { type = type & 4294967280U; mask = mask & 4294967280U; type = type | 8U; mask = mask | 14U; tmp = crypto_alloc_base(alg_name, type, mask); tmp___0 = __crypto_hash_cast(tmp); return (tmp___0); } } __inline static void crypto_free_hash(struct crypto_hash *tfm ) { struct crypto_tfm *tmp ; { tmp = crypto_hash_tfm(tfm); crypto_free_tfm(tmp); return; } } __inline static int crypto_hash_digest(struct hash_desc *desc , struct scatterlist *sg , unsigned int nbytes , u8 *out ) { struct hash_tfm *tmp ; int tmp___0 ; { tmp = crypto_hash_crt(desc->tfm); tmp___0 = (*(tmp->digest))(desc, sg, nbytes, out); return (tmp___0); } } __inline static int crypto_hash_setkey(struct crypto_hash *hash , u8 const *key , unsigned int keylen ) { struct hash_tfm *tmp ; int tmp___0 ; { tmp = crypto_hash_crt(hash); tmp___0 = (*(tmp->setkey))(hash, key, keylen); return (tmp___0); } } __inline static void set_capacity(struct gendisk *disk , sector_t size ) { { disk->part0.nr_sects = size; return; } } __inline static void *kmap(struct page *page ) { void *tmp ; { __might_sleep("include/linux/highmem.h", 58, 0); tmp = lowmem_page_address((struct page const *)page); return (tmp); } } __inline static void kunmap(struct page *page ) { { return; } } extern struct bio *bio_alloc_bioset(gfp_t , int , struct bio_set * ) ; __inline static struct bio *bio_alloc(gfp_t gfp_mask , unsigned int nr_iovecs ) { struct bio *tmp ; { tmp = bio_alloc_bioset(gfp_mask, (int )nr_iovecs, fs_bio_set); return (tmp); } } extern struct lc_element *lc_find(struct lru_cache * , unsigned int ) ; enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev , enum chg_state_flags f , union drbd_state mask , union drbd_state val ) ; void drbd_force_state(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val ) ; enum drbd_state_rv _drbd_request_state(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val , enum chg_state_flags f ) ; enum drbd_state_rv _conn_request_state(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val , enum chg_state_flags flags ) ; enum drbd_role conn_highest_role(struct drbd_tconn *tconn ) ; enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn ) ; unsigned int minor_count ; char const *cmdname(enum drbd_packet cmd ) ; void INFO_bm_xfer_stats(struct drbd_conf *mdev , char const *direction , struct bm_xfer_ctx *c ) ; __inline static void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c ) { { c->word_offset = c->bit_offset >> 6; return; } } unsigned int drbd_header_size(struct drbd_tconn *tconn ) ; __inline static void drbd_clear_interval(struct drbd_interval *i ) { { i->rb.__rb_parent_color = (unsigned long )(& i->rb); return; } } __inline static bool drbd_interval_empty(struct drbd_interval *i ) { { return (i->rb.__rb_parent_color == (unsigned long )(& i->rb)); } } bool drbd_insert_interval(struct rb_root *root , struct drbd_interval *this ) ; bool drbd_contains_interval(struct rb_root *root , sector_t sector , struct drbd_interval *interval ) ; void drbd_remove_interval(struct rb_root *root , struct drbd_interval *this ) ; struct drbd_interval *drbd_find_overlap(struct rb_root *root , sector_t sector , unsigned int size ) ; struct drbd_interval *drbd_next_overlap(struct drbd_interval *i , sector_t sector , unsigned int size ) ; int drbd_wait_misc(struct drbd_conf *mdev , struct drbd_interval *i ) ; __inline static unsigned int mdev_to_minor(struct drbd_conf *mdev ) { { return (mdev->minor); } } __inline static struct drbd_conf *vnr_to_mdev(struct drbd_tconn *tconn , int vnr ) { void *tmp ; { tmp = idr_find(& tconn->volumes, vnr); return ((struct drbd_conf *)tmp); } } int drbd_thread_start(struct drbd_thread *thi ) ; void _drbd_thread_stop(struct drbd_thread *thi , int restart , int wait ) ; void tl_release(struct drbd_tconn *tconn , unsigned int barrier_nr , unsigned int set_size ) ; void tl_clear(struct drbd_tconn *tconn ) ; void drbd_free_sock(struct drbd_tconn *tconn ) ; int drbd_send_protocol(struct drbd_tconn *tconn ) ; int drbd_send_uuids(struct drbd_conf *mdev ) ; int drbd_send_sizes(struct drbd_conf *mdev , int trigger_reply , enum dds_flags flags ) ; int drbd_send_current_state(struct drbd_conf *mdev ) ; int drbd_send_sync_param(struct drbd_conf *mdev ) ; void drbd_send_b_ack(struct drbd_tconn *tconn , u32 barrier_nr , u32 set_size ) ; void drbd_send_ack_rp(struct drbd_conf *mdev , enum drbd_packet cmd , struct p_block_req *rp ) ; void drbd_send_ack_dp(struct drbd_conf *mdev , enum drbd_packet cmd , struct p_data *dp , int data_size ) ; int drbd_send_bitmap(struct drbd_conf *mdev ) ; void drbd_send_sr_reply(struct drbd_conf *mdev , enum drbd_state_rv retcode ) ; void conn_send_sr_reply(struct drbd_tconn *tconn , enum drbd_state_rv retcode ) ; void conn_md_sync(struct drbd_tconn *tconn ) ; void drbd_uuid_new_current(struct drbd_conf *mdev ) ; void drbd_uuid_move_history(struct drbd_conf *mdev ) ; void __drbd_uuid_set(struct drbd_conf *mdev , int idx , u64 val ) ; int drbd_bitmap_io(struct drbd_conf *mdev , int (*io_fn)(struct drbd_conf * ) , char *why , enum bm_flag flags ) ; int drbd_bmio_set_n_write(struct drbd_conf *mdev ) ; int drbd_bmio_clear_n_write(struct drbd_conf *mdev ) ; mempool_t *drbd_ee_mempool ; struct page *drbd_pp_pool ; spinlock_t drbd_pp_lock ; int drbd_pp_vacant ; sector_t drbd_new_dev_size(struct drbd_conf *mdev , struct drbd_backing_dev *bdev , sector_t u_size , int assume_peer_has_space ) ; enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev , enum dds_flags flags ) ; void resync_after_online_grow(struct drbd_conf *mdev ) ; void drbd_reconsider_max_bio_size(struct drbd_conf *mdev ) ; enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev , enum drbd_role new_role , int force ) ; void conn_try_outdate_peer_async(struct drbd_tconn *tconn ) ; int drbd_free_peer_reqs(struct drbd_conf *mdev , struct list_head *list ) ; struct page *drbd_alloc_pages(struct drbd_conf *mdev , unsigned int number , bool retry___0 ) ; void conn_flush_workqueue(struct drbd_tconn *tconn ) ; int drbd_connected(struct drbd_conf *mdev ) ; __inline static void drbd_flush_workqueue(struct drbd_conf *mdev ) { { conn_flush_workqueue(mdev->tconn); return; } } __inline static int drbd_setsockopt___0(struct socket *sock , int level , int optname , char *optval , int optlen ) { mm_segment_t oldfs ; struct thread_info *tmp ; char *uoptval ; int err ; struct thread_info *tmp___0 ; mm_segment_t __constr_expr_0 ; struct thread_info *tmp___1 ; { tmp = current_thread_info___2(); oldfs = tmp->addr_limit; uoptval = optval; tmp___0 = current_thread_info___2(); __constr_expr_0.seg = 0xffffffffffffffffUL; tmp___0->addr_limit = __constr_expr_0; if (level == 1) { err = sock_setsockopt(sock, level, optname, uoptval, (unsigned int )optlen); } else { err = (*((sock->ops)->setsockopt))(sock, level, optname, uoptval, (unsigned int )optlen); } tmp___1 = current_thread_info___2(); tmp___1->addr_limit = oldfs; return (err); } } __inline static void drbd_tcp_cork___0(struct socket *sock ) { int val ; { val = 1; drbd_setsockopt___0(sock, 6, 3, (char *)(& val), 4); return; } } __inline static void drbd_tcp_uncork___0(struct socket *sock ) { int val ; { val = 0; drbd_setsockopt___0(sock, 6, 3, (char *)(& val), 4); return; } } __inline static void drbd_tcp_nodelay(struct socket *sock ) { int val ; { val = 1; drbd_setsockopt___0(sock, 6, 1, (char *)(& val), 4); return; } } __inline static void drbd_tcp_quickack(struct socket *sock ) { int val ; { val = 2; drbd_setsockopt___0(sock, 6, 12, (char *)(& val), 4); return; } } void drbd_bump_write_ordering(struct drbd_tconn *tconn , enum write_ordering_e wo ) ; int drbd_rs_begin_io(struct drbd_conf *mdev , sector_t sector ) ; void drbd_rs_cancel_all(struct drbd_conf *mdev ) ; __inline static int drbd_peer_req_has_active_page___0(struct drbd_peer_request *peer_req ) { struct page *page ; int tmp ; struct page *tmp___0 ; { page = peer_req->pages; goto ldv_52098; ldv_52097: tmp = page_count(page); if (tmp > 1) { return (1); } else { } page = page_chain_next(page); ldv_52098: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { tmp___0 = page_chain_next(page); __builtin_prefetch((void const *)tmp___0); if (1 != 0) { goto ldv_52097; } else { goto ldv_52099; } } else { } ldv_52099: ; return (0); } } __inline static sector_t _drbd_md_first_sector(int meta_dev_idx , struct drbd_backing_dev *bdev ) { { switch (meta_dev_idx) { case -1: ; case -3: ; return ((sector_t )(bdev->md.md_offset + (u64 )bdev->md.bm_offset)); case -2: ; default: ; return ((sector_t )bdev->md.md_offset); } } } __inline static sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev ) { sector_t s ; int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; sector_t __min1 ; sector_t __min2 ; sector_t tmp___2 ; sector_t tmp___3 ; sector_t __min1___0 ; sector_t __min2___0 ; sector_t tmp___4 ; sector_t __min1___1 ; sector_t __min2___1 ; sector_t __min1___2 ; sector_t __min2___2 ; sector_t tmp___5 ; { rcu_read_lock___2(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1824, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock___2(); switch (meta_dev_idx) { case -1: ; case -3: tmp___3 = drbd_get_capacity(bdev->backing_bdev); if (tmp___3 != 0UL) { __min1 = 2251799813685248UL; tmp___2 = _drbd_md_first_sector(meta_dev_idx, bdev); __min2 = tmp___2; s = __min1 < __min2 ? __min1 : __min2; } else { s = 0UL; } goto ldv_52186; case -2: __min1___0 = 2251799813685248UL; tmp___4 = drbd_get_capacity(bdev->backing_bdev); __min2___0 = tmp___4; s = __min1___0 < __min2___0 ? __min1___0 : __min2___0; __min1___1 = s; __min2___1 = (unsigned long )(bdev->md.md_size_sect - (u32 )bdev->md.bm_offset) << 15; s = __min1___1 < __min2___1 ? __min1___1 : __min2___1; goto ldv_52186; default: __min1___2 = 8587575296UL; tmp___5 = drbd_get_capacity(bdev->backing_bdev); __min2___2 = tmp___5; s = __min1___2 < __min2___2 ? __min1___2 : __min2___2; } ldv_52186: ; return (s); } } int drbd_send_ping(struct drbd_tconn *tconn ) ; int drbd_send_ping_ack(struct drbd_tconn *tconn ) ; __inline static void drbd_thread_stop(struct drbd_thread *thi ) { { _drbd_thread_stop(thi, 0, 1); return; } } __inline static void inc_unacked(struct drbd_conf *mdev ) { { atomic_inc(& mdev->unacked_cnt); return; } } __inline static int drbd_set_ed_uuid(struct drbd_conf *mdev , u64 val ) { int changed ; { changed = mdev->ed_uuid != val; mdev->ed_uuid = val; return (changed); } } __inline static void drbd_set_my_capacity(struct drbd_conf *mdev , sector_t size ) { { set_capacity(mdev->vdisk, size); ((mdev->this_bdev)->bd_inode)->i_size = (long long )size << 9; return; } } __inline static void drbd_generic_make_request(struct drbd_conf *mdev , int fault_type , struct bio *bio ) { unsigned int tmp ; int tmp___0 ; { if ((unsigned long )bio->bi_bdev == (unsigned long )((struct block_device *)0)) { tmp = mdev_to_minor(mdev); printk("\vdrbd%d: drbd_generic_make_request: bio->bi_bdev == NULL\n", tmp); dump_stack(); bio_endio(bio, -19); return; } else { } tmp___0 = drbd_insert_fault(mdev, (unsigned int )fault_type); if (tmp___0 != 0) { bio_endio(bio, -5); } else { generic_make_request(bio); } return; } } __inline static int vli_decode_bits(u64 *out , u64 const in ) { u64 adj ; { adj = 1ULL; if (((unsigned long long )in & 1ULL) == 0ULL) { *out = (((unsigned long long )in & 3ULL) >> 1) + adj; return (2); } else { } adj = adj + 2ULL; if (((unsigned long long )in & 3ULL) == 1ULL) { *out = (((unsigned long long )in & 7ULL) >> 2) + adj; return (3); } else { } adj = adj + 2ULL; if (((unsigned long long )in & 7ULL) == 3ULL) { *out = (((unsigned long long )in & 31ULL) >> 3) + adj; return (5); } else { } adj = adj + 4ULL; if (((unsigned long long )in & 15ULL) == 7ULL) { *out = (((unsigned long long )in & 127ULL) >> 4) + adj; return (7); } else { } adj = adj + 8ULL; if (((unsigned long long )in & 31ULL) == 15ULL) { *out = (((unsigned long long )in & 1023ULL) >> 5) + adj; return (10); } else { } adj = adj + 32ULL; if (((unsigned long long )in & 63ULL) == 31ULL) { *out = (((unsigned long long )in & 16383ULL) >> 6) + adj; return (14); } else { } adj = adj + 256ULL; if (((unsigned long long )in & 255ULL) == 63ULL) { *out = (((unsigned long long )in & 2097151ULL) >> 8) + adj; return (21); } else { } adj = adj + 8192ULL; if (((unsigned long long )in & 255ULL) == 127ULL) { *out = (((unsigned long long )in & 536870911ULL) >> 8) + adj; return (29); } else { } adj = adj + 2097152ULL; if (((unsigned long long )in & 255ULL) == 191ULL) { *out = (((unsigned long long )in & 4398046511103ULL) >> 8) + adj; return (42); } else { } adj = adj + 17179869184ULL; if (((unsigned long long )in & 255ULL) == 255ULL) { *out = ((unsigned long long )in >> 8) + adj; return (64); } else { } adj = adj + 72057594037927936ULL; __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_vli.h"), "i" (162), "i" (12UL)); ldv_52550: ; goto ldv_52550; } } __inline static void bitstream_cursor_reset(struct bitstream_cursor *cur , void *s ) { { cur->b = (u8 *)s; cur->bit = 0U; return; } } __inline static void bitstream_cursor_advance(struct bitstream_cursor *cur , unsigned int bits ) { { bits = cur->bit + bits; cur->b = cur->b + (unsigned long )(bits >> 3); cur->bit = bits & 7U; return; } } __inline static void bitstream_init(struct bitstream *bs , void *s , size_t len , unsigned int pad_bits ) { { bs->buf = (unsigned char *)s; bs->buf_len = len; bs->pad_bits = pad_bits; bitstream_cursor_reset(& bs->cur, (void *)bs->buf); return; } } __inline static int bitstream_get_bits(struct bitstream *bs , u64 *out , int bits ) { u64 val ; unsigned int n ; size_t __len ; void *__ret ; { if (bits > 64) { return (-22); } else { } if ((unsigned long )((long )(bs->cur.b + (unsigned long )((((bs->cur.bit + bs->pad_bits) + (unsigned int )bits) - 1U) >> 3)) - (long )bs->buf) >= bs->buf_len) { bits = (int )(((((unsigned int )bs->buf_len + ((unsigned int )((long )bs->buf) - (unsigned int )((long )bs->cur.b))) << 3U) - bs->cur.bit) - bs->pad_bits); } else { } if (bits == 0) { *out = 0ULL; return (0); } else { } val = 0ULL; n = ((bs->cur.bit + (unsigned int )bits) + 7U) >> 3; if (n != 0U) { __len = (size_t )(n - 1U); __ret = memcpy((void *)(& val), (void const *)bs->cur.b + 1U, __len); val = val << (int )(8U - bs->cur.bit); } else { } val = (u64 )((int )*(bs->cur.b) >> (int )bs->cur.bit) | val; val = (0xffffffffffffffffULL >> (64 - bits)) & val; bitstream_cursor_advance(& bs->cur, (unsigned int )bits); *out = val; return (bits); } } static int drbd_do_features(struct drbd_tconn *tconn ) ; static int drbd_do_auth(struct drbd_tconn *tconn ) ; static int drbd_disconnected(struct drbd_conf *mdev ) ; static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn , struct drbd_epoch *epoch , enum epoch_event ev ) ; static int e_end_block(struct drbd_work *w , int cancel ) ; static struct page *page_chain_del(struct page **head , int n ) { struct page *page ; struct page *tmp ; long tmp___0 ; long tmp___1 ; { tmp___0 = ldv__builtin_expect(n == 0, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared"), "i" (176), "i" (12UL)); ldv_52636: ; goto ldv_52636; } else { } tmp___1 = ldv__builtin_expect((unsigned long )head == (unsigned long )((struct page **)0), 0L); if (tmp___1 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared"), "i" (177), "i" (12UL)); ldv_52637: ; goto ldv_52637; } else { } page = *head; if ((unsigned long )page == (unsigned long )((struct page *)0)) { return (0); } else { } goto ldv_52640; ldv_52639: tmp = page_chain_next(page); n = n - 1; if (n == 0) { goto ldv_52638; } else { } if ((unsigned long )tmp == (unsigned long )((struct page *)0)) { return (0); } else { } page = tmp; ldv_52640: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { goto ldv_52639; } else { } ldv_52638: page->ldv_14746.private = 0UL; page = *head; *head = tmp; return (page); } } static struct page *page_chain_tail(struct page *page , int *len ) { struct page *tmp ; int i ; { i = 1; goto ldv_52648; ldv_52647: i = i + 1; page = tmp; ldv_52648: tmp = page_chain_next(page); if ((unsigned long )tmp != (unsigned long )((struct page *)0)) { goto ldv_52647; } else { } if ((unsigned long )len != (unsigned long )((int *)0)) { *len = i; } else { } return (page); } } static int page_chain_free(struct page *page ) { struct page *tmp ; int i ; { i = 0; goto ldv_52657; ldv_52656: put_page(page); i = i + 1; page = tmp; ldv_52657: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { tmp = page_chain_next(page); if (1 != 0) { goto ldv_52656; } else { goto ldv_52658; } } else { } ldv_52658: ; return (i); } } static void page_chain_add(struct page **head , struct page *chain_first , struct page *chain_last ) { struct page *tmp ; long tmp___0 ; { tmp = page_chain_tail(chain_first, 0); tmp___0 = ldv__builtin_expect((unsigned long )tmp != (unsigned long )chain_last, 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared"), "i" (233), "i" (12UL)); ldv_52665: ; goto ldv_52665; } else { } chain_last->ldv_14746.private = (unsigned long )*head; *head = chain_first; return; } } static struct page *__drbd_alloc_pages(struct drbd_conf *mdev , unsigned int number ) { struct page *page ; struct page *tmp ; unsigned int i ; { page = 0; tmp = 0; i = 0U; if ((unsigned int )drbd_pp_vacant >= number) { spin_lock(& drbd_pp_lock); page = page_chain_del(& drbd_pp_pool, (int )number); if ((unsigned long )page != (unsigned long )((struct page *)0)) { drbd_pp_vacant = (int )((unsigned int )drbd_pp_vacant - number); } else { } spin_unlock(& drbd_pp_lock); if ((unsigned long )page != (unsigned long )((struct page *)0)) { return (page); } else { } } else { } i = 0U; goto ldv_52675; ldv_52674: tmp = alloc_pages(514U, 0U); if ((unsigned long )tmp == (unsigned long )((struct page *)0)) { goto ldv_52673; } else { } tmp->ldv_14746.private = (unsigned long )page; page = tmp; i = i + 1U; ldv_52675: ; if (i < number) { goto ldv_52674; } else { } ldv_52673: ; if (i == number) { return (page); } else { } if ((unsigned long )page != (unsigned long )((struct page *)0)) { tmp = page_chain_tail(page, 0); spin_lock(& drbd_pp_lock); page_chain_add(& drbd_pp_pool, page, tmp); drbd_pp_vacant = (int )((unsigned int )drbd_pp_vacant + i); spin_unlock(& drbd_pp_lock); } else { } return (0); } } static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev , struct list_head *to_be_freed ) { struct drbd_peer_request *peer_req ; struct list_head *le ; struct list_head *tle ; struct list_head const *__mptr ; int tmp ; { le = mdev->net_ee.next; tle = le->next; goto ldv_52687; ldv_52686: __mptr = (struct list_head const *)le; peer_req = (struct drbd_peer_request *)__mptr; tmp = drbd_peer_req_has_active_page___0(peer_req); if (tmp != 0) { goto ldv_52685; } else { } list_move(le, to_be_freed); le = tle; tle = le->next; ldv_52687: ; if ((unsigned long )(& mdev->net_ee) != (unsigned long )le) { goto ldv_52686; } else { } ldv_52685: ; return; } } static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev ) { struct list_head reclaimed ; struct drbd_peer_request *peer_req ; struct drbd_peer_request *t ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { reclaimed.next = & reclaimed; reclaimed.prev = & reclaimed; spin_lock_irq(& (mdev->tconn)->req_lock); reclaim_finished_net_peer_reqs(mdev, & reclaimed); spin_unlock_irq(& (mdev->tconn)->req_lock); __mptr = (struct list_head const *)reclaimed.next; peer_req = (struct drbd_peer_request *)__mptr; __mptr___0 = (struct list_head const *)peer_req->w.list.next; t = (struct drbd_peer_request *)__mptr___0; goto ldv_52701; ldv_52700: __drbd_free_peer_req(mdev, peer_req, 1); peer_req = t; __mptr___1 = (struct list_head const *)t->w.list.next; t = (struct drbd_peer_request *)__mptr___1; ldv_52701: ; if ((unsigned long )(& peer_req->w.list) != (unsigned long )(& reclaimed)) { goto ldv_52700; } else { } return; } } struct page *drbd_alloc_pages(struct drbd_conf *mdev , unsigned int number , bool retry___0 ) { struct page *page ; struct net_conf *nc ; wait_queue_t wait ; struct task_struct *tmp ; int mxb ; struct net_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; struct task_struct *tmp___4 ; int tmp___5 ; { page = 0; tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 342, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; mxb = (unsigned long )nc != (unsigned long )((struct net_conf *)0) ? (int )nc->max_buffers : 1000000; rcu_read_unlock___2(); tmp___2 = atomic_read((atomic_t const *)(& mdev->pp_in_use)); if (tmp___2 < mxb) { page = __drbd_alloc_pages(mdev, number); } else { } goto ldv_52717; ldv_52716: prepare_to_wait(& drbd_pp_wait, & wait, 1); drbd_kick_lo_and_reclaim_net(mdev); tmp___3 = atomic_read((atomic_t const *)(& mdev->pp_in_use)); if (tmp___3 < mxb) { page = __drbd_alloc_pages(mdev, number); if ((unsigned long )page != (unsigned long )((struct page *)0)) { goto ldv_52715; } else { } } else { } if (! retry___0) { goto ldv_52715; } else { } tmp___4 = get_current(); tmp___5 = signal_pending(tmp___4); if (tmp___5 != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_alloc_pages interrupted!\n"); goto ldv_52715; } else { } schedule(); ldv_52717: ; if ((unsigned long )page == (unsigned long )((struct page *)0)) { goto ldv_52716; } else { } ldv_52715: finish_wait(& drbd_pp_wait, & wait); if ((unsigned long )page != (unsigned long )((struct page *)0)) { atomic_add((int )number, & mdev->pp_in_use); } else { } return (page); } } static void drbd_free_pages(struct drbd_conf *mdev , struct page *page , int is_net ) { atomic_t *a ; int i ; struct page *tmp ; { a = is_net != 0 ? & mdev->pp_in_use_by_net : & mdev->pp_in_use; if ((unsigned long )page == (unsigned long )((struct page *)0)) { return; } else { } if ((unsigned long )drbd_pp_vacant > (unsigned long )minor_count * 256UL) { i = page_chain_free(page); } else { tmp = page_chain_tail(page, & i); spin_lock(& drbd_pp_lock); page_chain_add(& drbd_pp_pool, page, tmp); drbd_pp_vacant = drbd_pp_vacant + i; spin_unlock(& drbd_pp_lock); } i = atomic_sub_return(i, a); if (i < 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION FAILED: %s: %d < 0\n", is_net != 0 ? (char *)"pp_in_use_by_net" : (char *)"pp_in_use", i); } else { } __wake_up(& drbd_pp_wait, 3U, 1, 0); return; } } struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_conf *mdev , u64 id , sector_t sector , unsigned int data_size , gfp_t gfp_mask ) { struct drbd_peer_request *peer_req ; struct page *page ; unsigned int nr_pages ; int tmp ; void *tmp___0 ; { page = 0; nr_pages = (unsigned int )(((unsigned long )data_size + 4095UL) >> 12); tmp = drbd_insert_fault(mdev, 8U); if (tmp != 0) { return (0); } else { } tmp___0 = mempool_alloc(drbd_ee_mempool, gfp_mask & 4294967293U); peer_req = (struct drbd_peer_request *)tmp___0; if ((unsigned long )peer_req == (unsigned long )((struct drbd_peer_request *)0)) { if ((gfp_mask & 512U) == 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s: allocation failed\n", "drbd_alloc_peer_req"); } else { } return (0); } else { } if (data_size != 0U) { page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & 16U) != 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { goto fail; } else { } } else { } drbd_clear_interval(& peer_req->i); peer_req->i.size = data_size; peer_req->i.sector = sector; peer_req->i.local = 0; peer_req->i.waiting = 0; peer_req->epoch = 0; peer_req->w.ldv_49807.mdev = mdev; peer_req->pages = page; atomic_set(& peer_req->pending_bios, 0); peer_req->flags = 0UL; peer_req->ldv_50726.block_id = id; return (peer_req); fail: mempool_free((void *)peer_req, drbd_ee_mempool); return (0); } } void __drbd_free_peer_req(struct drbd_conf *mdev , struct drbd_peer_request *peer_req , int is_net ) { int tmp ; bool tmp___0 ; int tmp___1 ; { if ((peer_req->flags & 16UL) != 0UL) { kfree((void const *)peer_req->ldv_50726.digest); } else { } drbd_free_pages(mdev, peer_req->pages, is_net); tmp = atomic_read((atomic_t const *)(& peer_req->pending_bios)); if (tmp != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( atomic_read(&peer_req->pending_bios) == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 474); } else { } tmp___0 = drbd_interval_empty(& peer_req->i); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( drbd_interval_empty(&peer_req->i) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 475); } else { } mempool_free((void *)peer_req, drbd_ee_mempool); return; } } int drbd_free_peer_reqs(struct drbd_conf *mdev , struct list_head *list ) { struct list_head work_list ; struct drbd_peer_request *peer_req ; struct drbd_peer_request *t ; int count ; int is_net ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { work_list.next = & work_list; work_list.prev = & work_list; count = 0; is_net = (unsigned long )(& mdev->net_ee) == (unsigned long )list; spin_lock_irq(& (mdev->tconn)->req_lock); list_splice_init(list, & work_list); spin_unlock_irq(& (mdev->tconn)->req_lock); __mptr = (struct list_head const *)work_list.next; peer_req = (struct drbd_peer_request *)__mptr; __mptr___0 = (struct list_head const *)peer_req->w.list.next; t = (struct drbd_peer_request *)__mptr___0; goto ldv_52759; ldv_52758: __drbd_free_peer_req(mdev, peer_req, is_net); count = count + 1; peer_req = t; __mptr___1 = (struct list_head const *)t->w.list.next; t = (struct drbd_peer_request *)__mptr___1; ldv_52759: ; if ((unsigned long )(& peer_req->w.list) != (unsigned long )(& work_list)) { goto ldv_52758; } else { } return (count); } } static int drbd_finish_peer_reqs(struct drbd_conf *mdev ) { struct list_head work_list ; struct list_head reclaimed ; struct drbd_peer_request *peer_req ; struct drbd_peer_request *t ; int err ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; int err2 ; struct list_head const *__mptr___4 ; { work_list.next = & work_list; work_list.prev = & work_list; reclaimed.next = & reclaimed; reclaimed.prev = & reclaimed; err = 0; spin_lock_irq(& (mdev->tconn)->req_lock); reclaim_finished_net_peer_reqs(mdev, & reclaimed); list_splice_init(& mdev->done_ee, & work_list); spin_unlock_irq(& (mdev->tconn)->req_lock); __mptr = (struct list_head const *)reclaimed.next; peer_req = (struct drbd_peer_request *)__mptr; __mptr___0 = (struct list_head const *)peer_req->w.list.next; t = (struct drbd_peer_request *)__mptr___0; goto ldv_52776; ldv_52775: __drbd_free_peer_req(mdev, peer_req, 1); peer_req = t; __mptr___1 = (struct list_head const *)t->w.list.next; t = (struct drbd_peer_request *)__mptr___1; ldv_52776: ; if ((unsigned long )(& peer_req->w.list) != (unsigned long )(& reclaimed)) { goto ldv_52775; } else { } __mptr___2 = (struct list_head const *)work_list.next; peer_req = (struct drbd_peer_request *)__mptr___2; __mptr___3 = (struct list_head const *)peer_req->w.list.next; t = (struct drbd_peer_request *)__mptr___3; goto ldv_52786; ldv_52785: err2 = (*(peer_req->w.cb))(& peer_req->w, err != 0); if (err == 0) { err = err2; } else { } __drbd_free_peer_req(mdev, peer_req, 0); peer_req = t; __mptr___4 = (struct list_head const *)t->w.list.next; t = (struct drbd_peer_request *)__mptr___4; ldv_52786: ; if ((unsigned long )(& peer_req->w.list) != (unsigned long )(& work_list)) { goto ldv_52785; } else { } __wake_up(& mdev->ee_wait, 3U, 1, 0); return (err); } } static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev , struct list_head *head ) { wait_queue_t wait ; struct task_struct *tmp ; int tmp___0 ; { tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; goto ldv_52794; ldv_52793: prepare_to_wait(& mdev->ee_wait, & wait, 2); spin_unlock_irq(& (mdev->tconn)->req_lock); io_schedule(); finish_wait(& mdev->ee_wait, & wait); spin_lock_irq(& (mdev->tconn)->req_lock); ldv_52794: tmp___0 = list_empty((struct list_head const *)head); if (tmp___0 == 0) { goto ldv_52793; } else { } return; } } static void drbd_wait_ee_list_empty(struct drbd_conf *mdev , struct list_head *head ) { { spin_lock_irq(& (mdev->tconn)->req_lock); _drbd_wait_ee_list_empty(mdev, head); spin_unlock_irq(& (mdev->tconn)->req_lock); return; } } static int drbd_recv_short(struct socket *sock , void *buf , size_t size , int flags ) { mm_segment_t oldfs ; struct kvec iov ; struct msghdr msg ; int rv ; struct thread_info *tmp ; struct thread_info *tmp___0 ; mm_segment_t __constr_expr_0 ; struct thread_info *tmp___1 ; { iov.iov_base = buf; iov.iov_len = size; msg.msg_name = 0; msg.msg_namelen = 0; msg.msg_iov = (struct iovec *)(& iov); msg.msg_iovlen = 1UL; msg.msg_control = 0; msg.msg_controllen = 0UL; msg.msg_flags = flags != 0 ? (unsigned int )flags : 16640U; tmp = current_thread_info___2(); oldfs = tmp->addr_limit; tmp___0 = current_thread_info___2(); __constr_expr_0.seg = 0xffffffffffffffffUL; tmp___0->addr_limit = __constr_expr_0; rv = sock_recvmsg(sock, & msg, size, (int )msg.msg_flags); tmp___1 = current_thread_info___2(); tmp___1->addr_limit = oldfs; return (rv); } } static int drbd_recv(struct drbd_tconn *tconn , void *buf , size_t size ) { int rv ; long t ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long __ret ; wait_queue_t __wait ; struct task_struct *tmp___1 ; int tmp___2 ; union drbd_state val ; union drbd_state mask ; { rv = drbd_recv_short(tconn->data.socket, buf, size, 0); if (rv < 0) { if (rv == -104) { printk("\016d-con %s: sock was reset by peer\n", tconn->name); } else if (rv != -512) { printk("\vd-con %s: sock_recvmsg returned %d\n", tconn->name, rv); } else if (rv == 0) { tmp___2 = constant_test_bit(12U, (unsigned long const volatile *)(& tconn->flags)); if (tmp___2 != 0) { rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 594, "suspicious rcu_dereference_check() usage"); } else { } } else { } t = (long )((_________p1->ping_timeo * 250U) / 10U); rcu_read_unlock___2(); __ret = t; if ((unsigned int )tconn->cstate > 8U) { tmp___1 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___1; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_52824: prepare_to_wait(& tconn->ping_wait, & __wait, 2); if ((unsigned int )tconn->cstate <= 8U) { goto ldv_52823; } else { } __ret = schedule_timeout(__ret); if (__ret == 0L) { goto ldv_52823; } else { } goto ldv_52824; ldv_52823: finish_wait(& tconn->ping_wait, & __wait); } else { } t = __ret; if (t != 0L) { goto out; } else { } } else { } printk("\016d-con %s: sock was shut down by peer\n", tconn->name); } else { } } else { } if ((size_t )rv != size) { val.i = 0U; val.ldv_40024.conn = 4U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); } else { } out: ; return (rv); } } static int drbd_recv_all(struct drbd_tconn *tconn , void *buf , size_t size ) { int err ; { err = drbd_recv(tconn, buf, size); if ((size_t )err != size) { if (err >= 0) { err = -5; } else { err = 0; } } else { } return (err); } } static int drbd_recv_all_warn(struct drbd_tconn *tconn , void *buf , size_t size ) { int err ; struct task_struct *tmp ; int tmp___0 ; { err = drbd_recv_all(tconn, buf, size); if (err != 0) { tmp = get_current(); tmp___0 = signal_pending(tmp); if (tmp___0 == 0) { printk("\fd-con %s: short read (expected size %d)\n", tconn->name, (int )size); } else { } } else { } return (err); } } static void drbd_setbufsize(struct socket *sock , unsigned int snd , unsigned int rcv ) { { if (snd != 0U) { (sock->sk)->sk_sndbuf = (int )snd; (sock->sk)->sk_userlocks = (unsigned char )((unsigned int )(sock->sk)->sk_userlocks | 1U); } else { } if (rcv != 0U) { (sock->sk)->sk_rcvbuf = (int )rcv; (sock->sk)->sk_userlocks = (unsigned char )((unsigned int )(sock->sk)->sk_userlocks | 2U); } else { } return; } } static struct socket *drbd_try_connect(struct drbd_tconn *tconn ) { char const *what ; struct socket *sock ; struct sockaddr_in6 src_in6 ; struct sockaddr_in6 peer_in6 ; struct net_conf *nc ; int err ; int peer_addr_len ; int my_addr_len ; int sndbuf_size ; int rcvbuf_size ; int connect_int ; int disconnect_on_error ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int __min1 ; int __min2 ; size_t __len ; void *__ret ; int __min1___0 ; int __min2___0 ; size_t __len___0 ; void *__ret___0 ; long tmp___1 ; union drbd_state val ; union drbd_state mask ; { disconnect_on_error = 1; rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 666, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if ((unsigned long )nc == (unsigned long )((struct net_conf *)0)) { rcu_read_unlock___2(); return (0); } else { } sndbuf_size = (int )nc->sndbuf_size; rcvbuf_size = (int )nc->rcvbuf_size; connect_int = (int )nc->connect_int; rcu_read_unlock___2(); __min1 = tconn->my_addr_len; __min2 = 28; my_addr_len = __min1 < __min2 ? __min1 : __min2; __len = (size_t )my_addr_len; __ret = memcpy((void *)(& src_in6), (void const *)(& tconn->my_addr), __len); if ((unsigned int )((struct sockaddr *)(& tconn->my_addr))->sa_family == 10U) { src_in6.sin6_port = 0U; } else { ((struct sockaddr_in *)(& src_in6))->sin_port = 0U; } __min1___0 = tconn->peer_addr_len; __min2___0 = 28; peer_addr_len = __min1___0 < __min2___0 ? __min1___0 : __min2___0; __len___0 = (size_t )peer_addr_len; __ret___0 = memcpy((void *)(& peer_in6), (void const *)(& tconn->peer_addr), __len___0); what = "sock_create_kern"; err = sock_create_kern((int )((struct sockaddr *)(& src_in6))->sa_family, 1, 6, & sock); if (err < 0) { sock = 0; goto out; } else { } tmp___1 = (long )(connect_int * 250); (sock->sk)->sk_sndtimeo = tmp___1; (sock->sk)->sk_rcvtimeo = tmp___1; drbd_setbufsize(sock, (unsigned int )sndbuf_size, (unsigned int )rcvbuf_size); what = "bind before connect"; err = (*((sock->ops)->bind))(sock, (struct sockaddr *)(& src_in6), my_addr_len); if (err < 0) { goto out; } else { } disconnect_on_error = 0; what = "connect"; err = (*((sock->ops)->connect))(sock, (struct sockaddr *)(& peer_in6), peer_addr_len, 0); out: ; if (err < 0) { if ((unsigned long )sock != (unsigned long )((struct socket *)0)) { sock_release(sock); sock = 0; } else { } switch (- err) { case 110: ; case 11: ; case 115: ; case 4: ; case 512: ; case 111: ; case 101: ; case 112: ; case 113: disconnect_on_error = 0; goto ldv_52888; default: printk("\vd-con %s: %s failed, err = %d\n", tconn->name, what, err); } ldv_52888: ; if (disconnect_on_error != 0) { val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); } else { } } else { } return (sock); } } static void drbd_incoming_connection(struct sock *sk ) { struct accept_wait_data *ad ; void (*state_change)(struct sock * ) ; { ad = (struct accept_wait_data *)sk->sk_user_data; state_change = ad->original_sk_state_change; if ((unsigned int )((unsigned char )sk->__sk_common.skc_state) == 1U) { complete(& ad->door_bell); } else { } (*state_change)(sk); return; } } static int prepare_listen_socket(struct drbd_tconn *tconn , struct accept_wait_data *ad ) { int err ; int sndbuf_size ; int rcvbuf_size ; int my_addr_len ; struct sockaddr_in6 my_addr ; struct socket *s_listen ; struct net_conf *nc ; char const *what ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int __min1 ; int __min2 ; size_t __len ; void *__ret ; union drbd_state val ; union drbd_state mask ; { rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 770, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if ((unsigned long )nc == (unsigned long )((struct net_conf *)0)) { rcu_read_unlock___2(); return (-5); } else { } sndbuf_size = (int )nc->sndbuf_size; rcvbuf_size = (int )nc->rcvbuf_size; rcu_read_unlock___2(); __min1 = tconn->my_addr_len; __min2 = 28; my_addr_len = __min1 < __min2 ? __min1 : __min2; __len = (size_t )my_addr_len; __ret = memcpy((void *)(& my_addr), (void const *)(& tconn->my_addr), __len); what = "sock_create_kern"; err = sock_create_kern((int )((struct sockaddr *)(& my_addr))->sa_family, 1, 6, & s_listen); if (err != 0) { s_listen = 0; goto out; } else { } (s_listen->sk)->__sk_common.skc_reuse = 1U; drbd_setbufsize(s_listen, (unsigned int )sndbuf_size, (unsigned int )rcvbuf_size); what = "bind before listen"; err = (*((s_listen->ops)->bind))(s_listen, (struct sockaddr *)(& my_addr), my_addr_len); if (err < 0) { goto out; } else { } ad->s_listen = s_listen; _raw_write_lock_bh(& (s_listen->sk)->sk_callback_lock); ad->original_sk_state_change = (s_listen->sk)->sk_state_change; (s_listen->sk)->sk_state_change = & drbd_incoming_connection; (s_listen->sk)->sk_user_data = (void *)ad; _raw_write_unlock_bh(& (s_listen->sk)->sk_callback_lock); what = "listen"; err = (*((s_listen->ops)->listen))(s_listen, 5); if (err < 0) { goto out; } else { } return (0); out: ; if ((unsigned long )s_listen != (unsigned long )((struct socket *)0)) { sock_release(s_listen); } else { } if (err < 0) { if ((err != -11 && err != -4) && err != -512) { printk("\vd-con %s: %s failed, err = %d\n", tconn->name, what, err); val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); } else { } } else { } return (-5); } } static void unregister_state_change(struct sock *sk , struct accept_wait_data *ad ) { { _raw_write_lock_bh(& sk->sk_callback_lock); sk->sk_state_change = ad->original_sk_state_change; sk->sk_user_data = 0; _raw_write_unlock_bh(& sk->sk_callback_lock); return; } } static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn , struct accept_wait_data *ad ) { int timeo ; int connect_int ; int err ; struct socket *s_estab ; struct net_conf *nc ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; u32 tmp___1 ; long tmp___2 ; union drbd_state val ; union drbd_state mask ; { err = 0; s_estab = 0; rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 839, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if ((unsigned long )nc == (unsigned long )((struct net_conf *)0)) { rcu_read_unlock___2(); return (0); } else { } connect_int = (int )nc->connect_int; rcu_read_unlock___2(); timeo = connect_int * 250; tmp___1 = random32(); timeo = ((int )tmp___1 & 1 ? timeo / 7 : - timeo / 7) + timeo; tmp___2 = wait_for_completion_interruptible_timeout(& ad->door_bell, (unsigned long )timeo); err = (int )tmp___2; if (err <= 0) { return (0); } else { } err = kernel_accept(ad->s_listen, & s_estab, 0); if (err < 0) { if ((err != -11 && err != -4) && err != -512) { printk("\vd-con %s: accept failed, err = %d\n", tconn->name, err); val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); } else { } } else { } if ((unsigned long )s_estab != (unsigned long )((struct socket *)0)) { unregister_state_change(s_estab->sk, ad); } else { } return (s_estab); } } static int decode_header(struct drbd_tconn *tconn , void *header , struct packet_info *pi ) ; static int send_first_packet(struct drbd_tconn *tconn , struct drbd_socket *sock , enum drbd_packet cmd ) { void *tmp ; int tmp___0 ; { tmp = conn_prepare_command(tconn, sock); if ((unsigned long )tmp == (unsigned long )((void *)0)) { return (-5); } else { } tmp___0 = conn_send_command(tconn, sock, cmd, 0U, 0, 0U); return (tmp___0); } } static int receive_first_packet(struct drbd_tconn *tconn , struct socket *sock ) { unsigned int header_size ; unsigned int tmp ; struct packet_info pi ; int err ; { tmp = drbd_header_size(tconn); header_size = tmp; err = drbd_recv_short(sock, tconn->data.rbuf, (size_t )header_size, 0); if ((unsigned int )err != header_size) { if (err >= 0) { err = -5; } else { } return (err); } else { } err = decode_header(tconn, tconn->data.rbuf, & pi); if (err != 0) { return (err); } else { } return ((int )pi.cmd); } } static int drbd_socket_okay(struct socket **sock ) { int rr ; char tb[4U] ; { if ((unsigned long )*sock == (unsigned long )((struct socket *)0)) { return (0); } else { } rr = drbd_recv_short(*sock, (void *)(& tb), 4UL, 66); if (rr > 0 || rr == -11) { return (1); } else { sock_release(*sock); *sock = 0; return (0); } } } int drbd_connected(struct drbd_conf *mdev ) { int err ; { atomic_set(& mdev->packet_seq, 0); mdev->peer_seq = 0U; mdev->state_mutex = (mdev->tconn)->agreed_pro_version <= 99 ? & (mdev->tconn)->cstate_mutex : & mdev->own_state_mutex; err = drbd_send_sync_param(mdev); if (err == 0) { err = drbd_send_sizes(mdev, 0, 0); } else { } if (err == 0) { err = drbd_send_uuids(mdev); } else { } if (err == 0) { err = drbd_send_current_state(mdev); } else { } clear_bit(2, (unsigned long volatile *)(& mdev->flags)); clear_bit(16, (unsigned long volatile *)(& mdev->flags)); mod_timer(& mdev->request_timer, (unsigned long )jiffies + 250UL); return (err); } } static int conn_connect(struct drbd_tconn *tconn ) { struct drbd_socket sock ; struct drbd_socket msock ; struct drbd_conf *mdev ; struct net_conf *nc ; int vnr ; int timeout ; int h ; int ok ; bool discard_my_data ; enum drbd_state_rv rv ; struct accept_wait_data ad ; union drbd_state val ; union drbd_state mask ; enum drbd_state_rv tmp ; struct lock_class_key __key ; struct lock_class_key __key___0 ; int tmp___0 ; struct socket *s ; struct net_conf *_________p1 ; bool __warned ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int fp ; int tmp___4 ; u32 tmp___5 ; struct task_struct *tmp___6 ; enum drbd_thread_state tmp___7 ; struct task_struct *tmp___8 ; int tmp___9 ; int tmp___10 ; struct net_conf *_________p1___0 ; bool __warned___0 ; int tmp___11 ; int tmp___12 ; long tmp___13 ; int tmp___14 ; int tmp___15 ; void *tmp___16 ; void *tmp___17 ; union drbd_state val___0 ; union drbd_state mask___0 ; { init_completion(& ad.door_bell); ad.tconn = tconn; ad.s_listen = 0; ad.door_bell = ad.door_bell; ad.original_sk_state_change = 0; clear_bit(12, (unsigned long volatile *)(& tconn->flags)); val.i = 0U; val.ldv_40024.conn = 8U; mask.i = 0U; mask.ldv_40024.conn = 31U; tmp = conn_request_state(tconn, mask, val, CS_VERBOSE); if ((int )tmp <= 0) { return (-2); } else { } __mutex_init(& sock.mutex, "&sock.mutex", & __key); sock.sbuf = tconn->data.sbuf; sock.rbuf = tconn->data.rbuf; sock.socket = 0; __mutex_init(& msock.mutex, "&msock.mutex", & __key___0); msock.sbuf = tconn->meta.sbuf; msock.rbuf = tconn->meta.rbuf; msock.socket = 0; tconn->agreed_pro_version = 80; tmp___0 = prepare_listen_socket(tconn, & ad); if (tmp___0 != 0) { return (0); } else { } ldv_53011: s = drbd_try_connect(tconn); if ((unsigned long )s != (unsigned long )((struct socket *)0)) { if ((unsigned long )sock.socket == (unsigned long )((struct socket *)0)) { sock.socket = s; send_first_packet(tconn, & sock, P_INITIAL_DATA); } else if ((unsigned long )msock.socket == (unsigned long )((struct socket *)0)) { clear_bit(1, (unsigned long volatile *)(& tconn->flags)); msock.socket = s; send_first_packet(tconn, & msock, P_INITIAL_META); } else { printk("\vd-con %s: Logic error in conn_connect()\n", tconn->name); goto out_release_sockets; } } else { } if ((unsigned long )sock.socket != (unsigned long )((struct socket *)0) && (unsigned long )msock.socket != (unsigned long )((struct socket *)0)) { rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1004, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; timeout = (int )((nc->ping_timeo * 250U) / 10U); rcu_read_unlock___2(); schedule_timeout_interruptible((long )timeout); ok = drbd_socket_okay(& sock.socket); tmp___3 = drbd_socket_okay(& msock.socket); ok = tmp___3 != 0 && ok != 0; if (ok != 0) { goto ldv_53003; } else { } } else { } retry: s = drbd_wait_for_connect(tconn, & ad); if ((unsigned long )s != (unsigned long )((struct socket *)0)) { tmp___4 = receive_first_packet(tconn, s); fp = tmp___4; drbd_socket_okay(& sock.socket); drbd_socket_okay(& msock.socket); switch (fp) { case 65522: ; if ((unsigned long )sock.socket != (unsigned long )((struct socket *)0)) { printk("\fd-con %s: initial packet S crossed\n", tconn->name); sock_release(sock.socket); sock.socket = s; goto randomize; } else { } sock.socket = s; goto ldv_53008; case 65521: set_bit(1U, (unsigned long volatile *)(& tconn->flags)); if ((unsigned long )msock.socket != (unsigned long )((struct socket *)0)) { printk("\fd-con %s: initial packet M crossed\n", tconn->name); sock_release(msock.socket); msock.socket = s; goto randomize; } else { } msock.socket = s; goto ldv_53008; default: printk("\fd-con %s: Error receiving initial packet\n", tconn->name); sock_release(s); randomize: tmp___5 = random32(); if ((int )tmp___5 & 1) { goto retry; } else { } } ldv_53008: ; } else { } if ((unsigned int )tconn->cstate <= 1U) { goto out_release_sockets; } else { } tmp___8 = get_current(); tmp___9 = signal_pending(tmp___8); if (tmp___9 != 0) { tmp___6 = get_current(); flush_signals(tmp___6); __asm__ volatile ("": : : "memory"); tmp___7 = get_t_state(& tconn->receiver); if ((unsigned int )tmp___7 == 2U) { goto out_release_sockets; } else { } } else { } ok = drbd_socket_okay(& sock.socket); tmp___10 = drbd_socket_okay(& msock.socket); ok = tmp___10 != 0 && ok != 0; if (ok == 0) { goto ldv_53011; } else { } ldv_53003: ; if ((unsigned long )ad.s_listen != (unsigned long )((struct socket *)0)) { sock_release(ad.s_listen); } else { } ((sock.socket)->sk)->__sk_common.skc_reuse = 1U; ((msock.socket)->sk)->__sk_common.skc_reuse = 1U; ((sock.socket)->sk)->sk_allocation = 16U; ((msock.socket)->sk)->sk_allocation = 16U; ((sock.socket)->sk)->sk_priority = 4U; ((msock.socket)->sk)->sk_priority = 6U; rcu_read_lock___2(); _________p1___0 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp___11 = debug_lockdep_rcu_enabled(); if (tmp___11 != 0 && ! __warned___0) { tmp___12 = rcu_read_lock_held(); if (tmp___12 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1080, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1___0; tmp___13 = (long )((nc->ping_timeo * 1000U) / 10U); ((sock.socket)->sk)->sk_rcvtimeo = tmp___13; ((sock.socket)->sk)->sk_sndtimeo = tmp___13; ((msock.socket)->sk)->sk_rcvtimeo = (long )(nc->ping_int * 250U); timeout = (int )((nc->timeout * 250U) / 10U); discard_my_data = (int )((signed char )nc->discard_my_data) != 0; rcu_read_unlock___2(); ((msock.socket)->sk)->sk_sndtimeo = (long )timeout; drbd_tcp_nodelay(sock.socket); drbd_tcp_nodelay(msock.socket); tconn->data.socket = sock.socket; tconn->meta.socket = msock.socket; tconn->last_received = jiffies; h = drbd_do_features(tconn); if (h <= 0) { return (h); } else { } if ((unsigned long )tconn->cram_hmac_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp___14 = drbd_do_auth(tconn); switch (tmp___14) { case -1: printk("\vd-con %s: Authentication of peer failed\n", tconn->name); return (-1); case 0: printk("\vd-con %s: Authentication of peer failed, trying again.\n", tconn->name); return (0); } } else { } ((tconn->data.socket)->sk)->sk_sndtimeo = (long )timeout; ((tconn->data.socket)->sk)->sk_rcvtimeo = 9223372036854775807L; tmp___15 = drbd_send_protocol(tconn); if (tmp___15 == -95) { return (-1); } else { } set_bit(10U, (unsigned long volatile *)(& tconn->flags)); rcu_read_lock___2(); vnr = 0; tmp___16 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___16; goto ldv_53018; ldv_53017: kref_get(& mdev->kref); rcu_read_unlock___2(); if ((int )discard_my_data) { set_bit(21U, (unsigned long volatile *)(& mdev->flags)); } else { clear_bit(21, (unsigned long volatile *)(& mdev->flags)); } drbd_connected(mdev); kref_put(& mdev->kref, & drbd_minor_destroy); rcu_read_lock___2(); vnr = vnr + 1; tmp___17 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___17; ldv_53018: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53017; } else { } rcu_read_unlock___2(); val___0.i = 0U; val___0.ldv_40024.conn = 9U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; rv = conn_request_state(tconn, mask___0, val___0, CS_VERBOSE); if ((int )rv <= 0 || (unsigned int )tconn->cstate != 9U) { clear_bit(10, (unsigned long volatile *)(& tconn->flags)); return (0); } else { } drbd_thread_start(& tconn->asender); ldv_mutex_lock_104(& tconn->conf_update); (tconn->net_conf)->discard_my_data = 0; ldv_mutex_unlock_105(& tconn->conf_update); return (h); out_release_sockets: ; if ((unsigned long )ad.s_listen != (unsigned long )((struct socket *)0)) { sock_release(ad.s_listen); } else { } if ((unsigned long )sock.socket != (unsigned long )((struct socket *)0)) { sock_release(sock.socket); } else { } if ((unsigned long )msock.socket != (unsigned long )((struct socket *)0)) { sock_release(msock.socket); } else { } return (-1); } } static int decode_header(struct drbd_tconn *tconn , void *header , struct packet_info *pi ) { unsigned int header_size ; unsigned int tmp ; struct p_header100 *h ; __u16 tmp___0 ; __u16 tmp___1 ; __u32 tmp___2 ; struct p_header95 *h___0 ; __u16 tmp___3 ; __u32 tmp___4 ; struct p_header80 *h___1 ; __u16 tmp___5 ; __u16 tmp___6 ; __u32 tmp___7 ; { tmp = drbd_header_size(tconn); header_size = tmp; if (header_size == 16U && *((__be32 *)header) == 552345734U) { h = (struct p_header100 *)header; if (h->pad != 0U) { printk("\vd-con %s: Header padding is not zero\n", tconn->name); return (-22); } else { } tmp___0 = __fswab16((int )h->volume); pi->vnr = (unsigned int )tmp___0; tmp___1 = __fswab16((int )h->command); pi->cmd = (enum drbd_packet )tmp___1; tmp___2 = __fswab32(h->length); pi->size = tmp___2; } else if (header_size == 8U && (unsigned int )*((__be16 *)header) == 23171U) { h___0 = (struct p_header95 *)header; tmp___3 = __fswab16((int )h___0->command); pi->cmd = (enum drbd_packet )tmp___3; tmp___4 = __fswab32(h___0->length); pi->size = tmp___4; pi->vnr = 0U; } else if (header_size == 8U && *((__be32 *)header) == 1728214147U) { h___1 = (struct p_header80 *)header; tmp___5 = __fswab16((int )h___1->command); pi->cmd = (enum drbd_packet )tmp___5; tmp___6 = __fswab16((int )h___1->length); pi->size = (unsigned int )tmp___6; pi->vnr = 0U; } else { tmp___7 = __fswab32(*((__be32 *)header)); printk("\vd-con %s: Wrong magic value 0x%08x in protocol version %d\n", tconn->name, tmp___7, tconn->agreed_pro_version); return (-22); } pi->data = header + (unsigned long )header_size; return (0); } } static int drbd_recv_header(struct drbd_tconn *tconn , struct packet_info *pi ) { void *buffer ; int err ; unsigned int tmp ; { buffer = tconn->data.rbuf; tmp = drbd_header_size(tconn); err = drbd_recv_all_warn(tconn, buffer, (size_t )tmp); if (err != 0) { return (err); } else { } err = decode_header(tconn, buffer, pi); tconn->last_received = jiffies; return (err); } } static void drbd_flush(struct drbd_tconn *tconn ) { int rv ; struct drbd_conf *mdev ; int vnr ; void *tmp ; int tmp___0 ; void *tmp___1 ; { if ((unsigned int )tconn->write_ordering > 1U) { rcu_read_lock___2(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_53048; ldv_53047: tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 == 0) { goto ldv_53045; } else { } kref_get(& mdev->kref); rcu_read_unlock___2(); rv = blkdev_issue_flush((mdev->ldev)->backing_bdev, 16U, 0); if (rv != 0) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "local disk flush failed with status %d\n", rv); drbd_bump_write_ordering(tconn, WO_drain_io); } else { } put_ldev(mdev); kref_put(& mdev->kref, & drbd_minor_destroy); rcu_read_lock___2(); if (rv != 0) { goto ldv_53046; } else { } ldv_53045: vnr = vnr + 1; tmp___1 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___1; ldv_53048: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53047; } else { } ldv_53046: rcu_read_unlock___2(); } else { } return; } } static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn , struct drbd_epoch *epoch , enum epoch_event ev ) { int epoch_size ; struct drbd_epoch *next_epoch ; enum finish_epoch rv ; struct list_head const *__mptr ; int tmp ; int tmp___0 ; { rv = FE_STILL_LIVE; spin_lock(& tconn->epoch_lock); ldv_53064: next_epoch = 0; epoch_size = atomic_read((atomic_t const *)(& epoch->epoch_size)); switch ((unsigned int )ev & 4294967263U) { case 0U: atomic_dec(& epoch->active); goto ldv_53058; case 1U: set_bit(0U, (unsigned long volatile *)(& epoch->flags)); goto ldv_53058; case 2U: ; goto ldv_53058; } ldv_53058: ; if (epoch_size != 0) { tmp = atomic_read((atomic_t const *)(& epoch->active)); if (tmp == 0) { tmp___0 = constant_test_bit(0U, (unsigned long const volatile *)(& epoch->flags)); if (tmp___0 != 0 || ((unsigned int )ev & 32U) != 0U) { if (((unsigned int )ev & 32U) == 0U) { spin_unlock(& tconn->epoch_lock); drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, (u32 )epoch_size); spin_lock(& tconn->epoch_lock); } else { } if ((unsigned long )tconn->current_epoch != (unsigned long )epoch) { __mptr = (struct list_head const *)epoch->list.next; next_epoch = (struct drbd_epoch *)__mptr + 0xfffffffffffffff8UL; list_del(& epoch->list); ev = (enum epoch_event )(((unsigned int )ev & 32U) | 2U); tconn->epochs = tconn->epochs - 1U; kfree((void const *)epoch); if ((unsigned int )rv == 0U) { rv = FE_DESTROYED; } else { } } else { epoch->flags = 0UL; atomic_set(& epoch->epoch_size, 0); if ((unsigned int )rv == 0U) { rv = FE_RECYCLED; } else { } } } else { } } else { } } else { } if ((unsigned long )next_epoch == (unsigned long )((struct drbd_epoch *)0)) { goto ldv_53063; } else { } epoch = next_epoch; goto ldv_53064; ldv_53063: spin_unlock(& tconn->epoch_lock); return (rv); } } void drbd_bump_write_ordering(struct drbd_tconn *tconn , enum write_ordering_e wo ) { struct disk_conf *dc ; struct drbd_conf *mdev ; enum write_ordering_e pwo ; int vnr ; char *write_ordering_str[3U] ; enum write_ordering_e _min1 ; enum write_ordering_e _min2 ; void *tmp ; int tmp___0 ; struct disk_conf *_________p1 ; bool __warned ; int tmp___1 ; int tmp___2 ; void *tmp___3 ; { write_ordering_str[0] = (char *)"none"; write_ordering_str[1] = (char *)"drain"; write_ordering_str[2] = (char *)"flush"; pwo = tconn->write_ordering; _min1 = pwo; _min2 = wo; wo = (enum write_ordering_e )((unsigned int )_min1 < (unsigned int )_min2 ? (unsigned int )_min1 : (unsigned int )_min2); rcu_read_lock___2(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_53082; ldv_53081: tmp___0 = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp___0 == 0) { goto ldv_53077; } else { } _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1353, "suspicious rcu_dereference_check() usage"); } else { } } else { } dc = _________p1; if ((unsigned int )wo == 2U && (int )((signed char )dc->disk_flushes) == 0) { wo = WO_drain_io; } else { } if ((unsigned int )wo == 1U && (int )((signed char )dc->disk_drain) == 0) { wo = WO_none; } else { } put_ldev(mdev); ldv_53077: vnr = vnr + 1; tmp___3 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___3; ldv_53082: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53081; } else { } rcu_read_unlock___2(); tconn->write_ordering = wo; if ((unsigned int )tconn->write_ordering != (unsigned int )pwo || (unsigned int )wo == 2U) { printk("\016d-con %s: Method to ensure write ordering: %s\n", tconn->name, write_ordering_str[(unsigned int )tconn->write_ordering]); } else { } return; } } int drbd_submit_peer_request(struct drbd_conf *mdev , struct drbd_peer_request *peer_req , unsigned int const rw , int const fault_type ) { struct bio *bios ; struct bio *bio ; struct page *page ; sector_t sector ; unsigned int ds ; unsigned int n_bios ; unsigned int nr_pages ; int err ; unsigned int len ; unsigned int __min1 ; unsigned int __min2 ; int tmp ; struct page *tmp___0 ; { bios = 0; page = peer_req->pages; sector = peer_req->i.sector; ds = peer_req->i.size; n_bios = 0U; nr_pages = (unsigned int )(((unsigned long )ds + 4095UL) >> 12); err = -12; next_bio: bio = bio_alloc(16U, nr_pages); if ((unsigned long )bio == (unsigned long )((struct bio *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "submit_ee: Allocation of a bio failed\n"); goto fail; } else { } bio->bi_sector = sector; bio->bi_bdev = (mdev->ldev)->backing_bdev; bio->bi_rw = (unsigned long )rw; bio->bi_private = (void *)peer_req; bio->bi_end_io = & drbd_peer_request_endio; bio->bi_next = bios; bios = bio; n_bios = n_bios + 1U; goto ldv_53106; ldv_53105: __min1 = ds; __min2 = 4096U; len = __min1 < __min2 ? __min1 : __min2; tmp = bio_add_page(bio, page, len, 0U); if (tmp == 0) { if ((unsigned int )bio->bi_vcnt == 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "bio_add_page failed for len=%u, bi_vcnt=0 (bi_sector=%llu)\n", len, (unsigned long long )bio->bi_sector); err = -28; goto fail; } else { } goto next_bio; } else { } ds = ds - len; sector = (sector_t )(len >> 9) + sector; nr_pages = nr_pages - 1U; page = page_chain_next(page); ldv_53106: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { tmp___0 = page_chain_next(page); __builtin_prefetch((void const *)tmp___0); if (1 != 0) { goto ldv_53105; } else { goto ldv_53107; } } else { } ldv_53107: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( page == NULL ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1442); } else { } if (ds != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( ds == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1443); } else { } atomic_set(& peer_req->pending_bios, (int )n_bios); ldv_53108: bio = bios; bios = bios->bi_next; bio->bi_next = 0; drbd_generic_make_request(mdev, fault_type, bio); if ((unsigned long )bios != (unsigned long )((struct bio *)0)) { goto ldv_53108; } else { } return (0); fail: ; goto ldv_53111; ldv_53110: bio = bios; bios = bios->bi_next; bio_put(bio); ldv_53111: ; if ((unsigned long )bios != (unsigned long )((struct bio *)0)) { goto ldv_53110; } else { } return (err); } } static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev , struct drbd_peer_request *peer_req ) { struct drbd_interval *i ; { i = & peer_req->i; drbd_remove_interval(& mdev->write_requests, i); drbd_clear_interval(i); if ((unsigned int )*((unsigned char *)i + 48UL) != 0U) { __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } return; } } void conn_wait_active_ee_empty(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; int vnr ; void *tmp ; void *tmp___0 ; { rcu_read_lock___2(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_53124; ldv_53123: kref_get(& mdev->kref); rcu_read_unlock___2(); drbd_wait_ee_list_empty(mdev, & mdev->active_ee); kref_put(& mdev->kref, & drbd_minor_destroy); rcu_read_lock___2(); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_53124: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53123; } else { } rcu_read_unlock___2(); return; } } static int receive_Barrier(struct drbd_tconn *tconn , struct packet_info *pi ) { int rv ; struct p_barrier *p ; struct drbd_epoch *epoch ; enum finish_epoch tmp ; void *tmp___0 ; void *tmp___1 ; int tmp___2 ; int tmp___3 ; { p = (struct p_barrier *)pi->data; (tconn->current_epoch)->barrier_nr = p->barrier; (tconn->current_epoch)->tconn = tconn; tmp = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR); rv = (int )tmp; switch ((unsigned int )tconn->write_ordering) { case 0U: ; if (rv == 2) { return (0); } else { } tmp___0 = kmalloc(48UL, 16U); epoch = (struct drbd_epoch *)tmp___0; if ((unsigned long )epoch != (unsigned long )((struct drbd_epoch *)0)) { goto ldv_53134; } else { printk("\fd-con %s: Allocation of an epoch failed, slowing down\n", tconn->name); } case 2U: ; case 1U: conn_wait_active_ee_empty(tconn); drbd_flush(tconn); tmp___2 = atomic_read((atomic_t const *)(& (tconn->current_epoch)->epoch_size)); if (tmp___2 != 0) { tmp___1 = kmalloc(48UL, 16U); epoch = (struct drbd_epoch *)tmp___1; if ((unsigned long )epoch != (unsigned long )((struct drbd_epoch *)0)) { goto ldv_53134; } else { } } else { } return (0); default: printk("\vd-con %s: Strangeness in tconn->write_ordering %d\n", tconn->name, (unsigned int )tconn->write_ordering); return (-5); } ldv_53134: epoch->flags = 0UL; atomic_set(& epoch->epoch_size, 0); atomic_set(& epoch->active, 0); spin_lock(& tconn->epoch_lock); tmp___3 = atomic_read((atomic_t const *)(& (tconn->current_epoch)->epoch_size)); if (tmp___3 != 0) { list_add(& epoch->list, & (tconn->current_epoch)->list); tconn->current_epoch = epoch; tconn->epochs = tconn->epochs + 1U; } else { kfree((void const *)epoch); } spin_unlock(& tconn->epoch_lock); return (0); } } static struct drbd_peer_request *read_in_block(struct drbd_conf *mdev , u64 id , sector_t sector , int data_size ) { sector_t capacity ; sector_t tmp ; struct drbd_peer_request *peer_req ; struct page *page ; int dgs ; int ds ; int err ; void *dig_in ; void *dig_vv ; unsigned long *data ; unsigned int tmp___0 ; bool _bool ; int tmp___1 ; bool _bool___0 ; int tmp___2 ; unsigned int len ; int __min1 ; int __min2 ; void *tmp___3 ; int tmp___4 ; struct page *tmp___5 ; int tmp___6 ; { tmp = drbd_get_capacity(mdev->this_bdev); capacity = tmp; dig_in = (mdev->tconn)->int_dig_in; dig_vv = (mdev->tconn)->int_dig_vv; dgs = 0; if ((unsigned long )(mdev->tconn)->peer_integrity_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp___0 = crypto_hash_digestsize((mdev->tconn)->peer_integrity_tfm); dgs = (int )tmp___0; err = drbd_recv_all_warn(mdev->tconn, dig_in, (size_t )dgs); if (err != 0) { return (0); } else { } data_size = data_size - dgs; } else { } _bool = (data_size & 511) == 0; if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"IS_ALIGNED(data_size, 512)", "read_in_block"); } else { } if (_bool) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } _bool___0 = (unsigned int )data_size <= 1048576U; if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"data_size <= DRBD_MAX_BIO_SIZE", "read_in_block"); } else { } if (_bool___0) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { return (0); } else { } if ((sector_t )(data_size >> 9) + sector > capacity) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "request from peer beyond end of local disk: capacity: %llus < sector: %llus + size: %u\n", (unsigned long long )capacity, (unsigned long long )sector, data_size); return (0); } else { } peer_req = drbd_alloc_peer_req(mdev, id, sector, (unsigned int )data_size, 16U); if ((unsigned long )peer_req == (unsigned long )((struct drbd_peer_request *)0)) { return (0); } else { } if (data_size == 0) { return (peer_req); } else { } ds = data_size; page = peer_req->pages; goto ldv_53164; ldv_53163: __min1 = ds; __min2 = 4096; len = (unsigned int )(__min1 < __min2 ? __min1 : __min2); tmp___3 = kmap(page); data = (unsigned long *)tmp___3; err = drbd_recv_all_warn(mdev->tconn, (void *)data, (size_t )len); tmp___4 = drbd_insert_fault(mdev, 9U); if (tmp___4 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Fault injection: Corrupting data on receive\n"); *data = ~ *data; } else { } kunmap(page); if (err != 0) { __drbd_free_peer_req(mdev, peer_req, 0); return (0); } else { } ds = (int )((unsigned int )ds - len); page = page_chain_next(page); ldv_53164: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { tmp___5 = page_chain_next(page); __builtin_prefetch((void const *)tmp___5); if (1 != 0) { goto ldv_53163; } else { goto ldv_53165; } } else { } ldv_53165: ; if (dgs != 0) { drbd_csum_ee(mdev, (mdev->tconn)->peer_integrity_tfm, peer_req, dig_vv); tmp___6 = memcmp((void const *)dig_in, (void const *)dig_vv, (size_t )dgs); if (tmp___6 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Digest integrity check FAILED: %llus +%u\n", (unsigned long long )sector, data_size); __drbd_free_peer_req(mdev, peer_req, 0); return (0); } else { } } else { } mdev->recv_cnt = mdev->recv_cnt + (unsigned int )(data_size >> 9); return (peer_req); } } static int drbd_drain_block(struct drbd_conf *mdev , int data_size ) { struct page *page ; int err ; void *data ; unsigned int len ; int __min1 ; int __min2 ; { err = 0; if (data_size == 0) { return (0); } else { } page = drbd_alloc_pages(mdev, 1U, 1); data = kmap(page); goto ldv_53179; ldv_53178: __min1 = data_size; __min2 = 4096; len = (unsigned int )(__min1 < __min2 ? __min1 : __min2); err = drbd_recv_all_warn(mdev->tconn, data, (size_t )len); if (err != 0) { goto ldv_53177; } else { } data_size = (int )((unsigned int )data_size - len); ldv_53179: ; if (data_size != 0) { goto ldv_53178; } else { } ldv_53177: kunmap(page); drbd_free_pages(mdev, page, 0); return (err); } } static int recv_dless_read(struct drbd_conf *mdev , struct drbd_request *req , sector_t sector , int data_size ) { struct bio_vec *bvec ; struct bio *bio ; int dgs ; int err ; int i ; int expect ; void *dig_in ; void *dig_vv ; unsigned int tmp ; void *mapped ; void *tmp___0 ; int __min1 ; int __min2 ; int tmp___1 ; { dig_in = (mdev->tconn)->int_dig_in; dig_vv = (mdev->tconn)->int_dig_vv; dgs = 0; if ((unsigned long )(mdev->tconn)->peer_integrity_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp = crypto_hash_digestsize((mdev->tconn)->peer_integrity_tfm); dgs = (int )tmp; err = drbd_recv_all_warn(mdev->tconn, dig_in, (size_t )dgs); if (err != 0) { return (err); } else { } data_size = data_size - dgs; } else { } mdev->recv_cnt = mdev->recv_cnt + (unsigned int )(data_size >> 9); bio = req->master_bio; if (bio->bi_sector != sector) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( sector == bio->bi_sector ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1694); } else { } bvec = bio->bi_io_vec + (unsigned long )bio->bi_idx; i = (int )bio->bi_idx; goto ldv_53199; ldv_53198: tmp___0 = kmap(bvec->bv_page); mapped = tmp___0 + (unsigned long )bvec->bv_offset; __min1 = data_size; __min2 = (int )bvec->bv_len; expect = __min1 < __min2 ? __min1 : __min2; err = drbd_recv_all_warn(mdev->tconn, mapped, (size_t )expect); kunmap(bvec->bv_page); if (err != 0) { return (err); } else { } data_size = data_size - expect; bvec = bvec + 1; i = i + 1; ldv_53199: ; if ((int )bio->bi_vcnt > i) { goto ldv_53198; } else { } if (dgs != 0) { drbd_csum_bio(mdev, (mdev->tconn)->peer_integrity_tfm, bio, dig_vv); tmp___1 = memcmp((void const *)dig_in, (void const *)dig_vv, (size_t )dgs); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Digest integrity check FAILED. Broken NICs?\n"); return (-22); } else { } } else { } if (data_size != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( data_size == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1714); } else { } return (0); } } static int e_end_resync_block(struct drbd_work *w , int unused ) { struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; sector_t sector ; int err ; bool tmp ; int tmp___0 ; long tmp___1 ; { __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; mdev = w->ldv_49807.mdev; sector = peer_req->i.sector; tmp = drbd_interval_empty(& peer_req->i); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( drbd_interval_empty(&peer_req->i) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1730); } else { } tmp___1 = ldv__builtin_expect((peer_req->flags & 8UL) == 0UL, 1L); if (tmp___1 != 0L) { __drbd_set_in_sync(mdev, sector, (int )peer_req->i.size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1733U); err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req); } else { drbd_rs_failed_io(mdev, sector, (int )peer_req->i.size); err = drbd_send_ack(mdev, P_NEG_ACK, peer_req); } _dec_unacked(mdev, "e_end_resync_block", 1741); return (err); } } static int recv_resync_read(struct drbd_conf *mdev , sector_t sector , int data_size ) { struct drbd_peer_request *peer_req ; int tmp ; { peer_req = read_in_block(mdev, 0xffffffffffffffffULL, sector, data_size); if ((unsigned long )peer_req == (unsigned long )((struct drbd_peer_request *)0)) { goto fail; } else { } _dec_rs_pending(mdev, "recv_resync_read", 1754); inc_unacked(mdev); peer_req->w.cb = & e_end_resync_block; spin_lock_irq(& (mdev->tconn)->req_lock); list_add(& peer_req->w.list, & mdev->sync_ee); spin_unlock_irq(& (mdev->tconn)->req_lock); atomic_add(data_size >> 9, & mdev->rs_sect_ev); tmp = drbd_submit_peer_request(mdev, peer_req, 1U, 2); if (tmp == 0) { return (0); } else { } dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "submit failed, triggering re-connect\n"); spin_lock_irq(& (mdev->tconn)->req_lock); list_del(& peer_req->w.list); spin_unlock_irq(& (mdev->tconn)->req_lock); __drbd_free_peer_req(mdev, peer_req, 0); fail: put_ldev(mdev); return (-5); } } static struct drbd_request *find_request(struct drbd_conf *mdev , struct rb_root *root , u64 id , sector_t sector , bool missing_ok , char const *func ) { struct drbd_request *req ; bool tmp ; { req = (struct drbd_request *)id; tmp = drbd_contains_interval(root, sector, & req->i); if ((int )tmp && (unsigned int )*((unsigned char *)req + 88UL) != 0U) { return (req); } else { } if (! missing_ok) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s: failed to find request 0x%lx, sector %llus\n", func, (unsigned long )id, (unsigned long long )sector); } else { } return (0); } } static int receive_DataReply(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct drbd_request *req ; sector_t sector ; int err ; struct p_data *p ; __u64 tmp ; long tmp___0 ; { p = (struct p_data *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp = __fswab64(p->sector); sector = (sector_t )tmp; spin_lock_irq(& (mdev->tconn)->req_lock); req = find_request(mdev, & mdev->read_requests, p->block_id, sector, 0, "receive_DataReply"); spin_unlock_irq(& (mdev->tconn)->req_lock); tmp___0 = ldv__builtin_expect((unsigned long )req == (unsigned long )((struct drbd_request *)0), 0L); if (tmp___0 != 0L) { return (-5); } else { } err = recv_dless_read(mdev, req, sector, (int )pi->size); if (err == 0) { req_mod(req, DATA_RECEIVED); } else { } return (err); } } static int receive_RSDataReply(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; sector_t sector ; int err ; struct p_data *p ; __u64 tmp ; int tmp___0 ; int tmp___1 ; { p = (struct p_data *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp = __fswab64(p->sector); sector = (sector_t )tmp; if (p->block_id != 0xffffffffffffffffULL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( p->block_id == ID_SYNCER ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1844); } else { } tmp___1 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___1 != 0) { err = recv_resync_read(mdev, sector, (int )pi->size); } else { tmp___0 = ___ratelimit(& drbd_ratelimit_state, "receive_RSDataReply"); if (tmp___0 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Can not write resync data to local disk.\n"); } else { } err = drbd_drain_block(mdev, (int )pi->size); drbd_send_ack_dp(mdev, P_NEG_ACK, p, (int )pi->size); } atomic_add((int )(pi->size >> 9), & mdev->rs_sect_in); return (err); } } static void restart_conflicting_writes(struct drbd_conf *mdev , sector_t sector , int size ) { struct drbd_interval *i ; struct drbd_request *req ; struct drbd_interval const *__mptr ; { i = drbd_find_overlap(& mdev->write_requests, sector, (unsigned int )size); goto ldv_53259; ldv_53258: ; if ((unsigned int )*((unsigned char *)i + 48UL) == 0U) { goto ldv_53255; } else { } __mptr = (struct drbd_interval const *)i; req = (struct drbd_request *)__mptr + 0xffffffffffffffd8UL; if ((int )req->rq_state & 1 || ((unsigned long )req->rq_state & 8192UL) == 0UL) { goto ldv_53255; } else { } __req_mod(req, CONFLICT_RESOLVED, 0); ldv_53255: i = drbd_next_overlap(i, sector, (unsigned int )size); ldv_53259: ; if ((unsigned long )i != (unsigned long )((struct drbd_interval *)0)) { goto ldv_53258; } else { } return; } } static int e_end_block(struct drbd_work *w , int cancel ) { struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; sector_t sector ; int err ; int pcmd ; long tmp ; bool tmp___0 ; bool tmp___1 ; int tmp___2 ; { __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; mdev = w->ldv_49807.mdev; sector = peer_req->i.sector; err = 0; if ((peer_req->flags & 64UL) != 0UL) { tmp = ldv__builtin_expect((peer_req->flags & 8UL) == 0UL, 1L); if (tmp != 0L) { pcmd = ((int )mdev->state.ldv_49522.conn > 15 && (int )mdev->state.ldv_49522.conn <= 21) && (peer_req->flags & 2UL) != 0UL ? 23 : 22; err = drbd_send_ack(mdev, (enum drbd_packet )pcmd, peer_req); if (pcmd == 23) { __drbd_set_in_sync(mdev, sector, (int )peer_req->i.size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1903U); } else { } } else { err = drbd_send_ack(mdev, P_NEG_ACK, peer_req); } _dec_unacked(mdev, "e_end_block", 1909); } else { } if ((peer_req->flags & 128UL) != 0UL) { spin_lock_irq(& (mdev->tconn)->req_lock); tmp___0 = drbd_interval_empty(& peer_req->i); if ((int )tmp___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !drbd_interval_empty(&peer_req->i) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1915); } else { } drbd_remove_epoch_entry_interval(mdev, peer_req); if ((peer_req->flags & 32UL) != 0UL) { restart_conflicting_writes(mdev, sector, (int )peer_req->i.size); } else { } spin_unlock_irq(& (mdev->tconn)->req_lock); } else { tmp___1 = drbd_interval_empty(& peer_req->i); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( drbd_interval_empty(&peer_req->i) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1921); } else { } } drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, cancel != 0 ? EV_CLEANUP : EV_PUT); return (err); } } static int e_send_ack(struct drbd_work *w , enum drbd_packet ack ) { struct drbd_conf *mdev ; struct drbd_peer_request *peer_req ; struct drbd_work const *__mptr ; int err ; { mdev = w->ldv_49807.mdev; __mptr = (struct drbd_work const *)w; peer_req = (struct drbd_peer_request *)__mptr; err = drbd_send_ack(mdev, ack, peer_req); _dec_unacked(mdev, "e_send_ack", 1936); return (err); } } static int e_send_superseded(struct drbd_work *w , int unused ) { int tmp ; { tmp = e_send_ack(w, P_SUPERSEDED); return (tmp); } } static int e_send_retry_write(struct drbd_work *w , int unused ) { struct drbd_tconn *tconn ; int tmp ; { tconn = (w->ldv_49807.mdev)->tconn; tmp = e_send_ack(w, tconn->agreed_pro_version > 99 ? P_RETRY_WRITE : P_SUPERSEDED); return (tmp); } } static bool seq_greater(u32 a , u32 b ) { { return ((int )a - (int )b > 0); } } static u32 seq_max(u32 a , u32 b ) { bool tmp ; { tmp = seq_greater(a, b); return ((int )tmp ? a : b); } } static bool need_peer_seq(struct drbd_conf *mdev ) { struct drbd_tconn *tconn ; int tp ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tconn = mdev->tconn; rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 1981, "suspicious rcu_dereference_check() usage"); } else { } } else { } tp = (int )_________p1->two_primaries; rcu_read_unlock___2(); if (tp != 0) { tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& tconn->flags)); if (tmp___1 != 0) { tmp___2 = 1; } else { tmp___2 = 0; } } else { tmp___2 = 0; } return ((bool )tmp___2); } } static void update_peer_seq(struct drbd_conf *mdev , unsigned int peer_seq ) { unsigned int newest_peer_seq ; bool tmp ; { tmp = need_peer_seq(mdev); if ((int )tmp) { spin_lock(& mdev->peer_seq_lock); newest_peer_seq = seq_max(mdev->peer_seq, peer_seq); mdev->peer_seq = newest_peer_seq; spin_unlock(& mdev->peer_seq_lock); if (peer_seq == newest_peer_seq) { __wake_up(& mdev->seq_wait, 3U, 1, 0); } else { } } else { } return; } } __inline static int overlaps(sector_t s1 , int l1 , sector_t s2 , int l2 ) { { return ((sector_t )(l1 >> 9) + s1 > s2 && (sector_t )(l2 >> 9) + s2 > s1); } } static bool overlapping_resync_write(struct drbd_conf *mdev , struct drbd_peer_request *peer_req ) { struct drbd_peer_request *rs_req ; bool rv ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; { rv = 0; spin_lock_irq(& (mdev->tconn)->req_lock); __mptr = (struct list_head const *)mdev->sync_ee.next; rs_req = (struct drbd_peer_request *)__mptr; goto ldv_53331; ldv_53330: tmp = overlaps(peer_req->i.sector, (int )peer_req->i.size, rs_req->i.sector, (int )rs_req->i.size); if (tmp != 0) { rv = 1; goto ldv_53329; } else { } __mptr___0 = (struct list_head const *)rs_req->w.list.next; rs_req = (struct drbd_peer_request *)__mptr___0; ldv_53331: ; if ((unsigned long )(& rs_req->w.list) != (unsigned long )(& mdev->sync_ee)) { goto ldv_53330; } else { } ldv_53329: spin_unlock_irq(& (mdev->tconn)->req_lock); return (rv); } } static int wait_for_and_update_peer_seq(struct drbd_conf *mdev , u32 const peer_seq ) { wait_queue_t wait ; struct task_struct *tmp ; long timeout ; int ret ; bool tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; struct task_struct *tmp___4 ; int tmp___5 ; struct net_conf *_________p1 ; bool __warned ; int tmp___6 ; int tmp___7 ; { tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; tmp___0 = need_peer_seq(mdev); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { return (0); } else { } spin_lock(& mdev->peer_seq_lock); ldv_53343: tmp___2 = seq_greater((unsigned int )peer_seq - 1U, mdev->peer_seq); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq); ret = 0; goto ldv_53339; } else { } tmp___4 = get_current(); tmp___5 = signal_pending(tmp___4); if (tmp___5 != 0) { ret = -512; goto ldv_53339; } else { } prepare_to_wait(& mdev->seq_wait, & wait, 1); spin_unlock(& mdev->peer_seq_lock); rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___6 = debug_lockdep_rcu_enabled(); if (tmp___6 != 0 && ! __warned) { tmp___7 = rcu_read_lock_held(); if (tmp___7 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2070, "suspicious rcu_dereference_check() usage"); } else { } } else { } timeout = (long )((_________p1->ping_timeo * 250U) / 10U); rcu_read_unlock___2(); timeout = schedule_timeout(timeout); spin_lock(& mdev->peer_seq_lock); if (timeout == 0L) { ret = -110; dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Timed out waiting for missing ack packets; disconnecting\n"); goto ldv_53339; } else { } goto ldv_53343; ldv_53339: spin_unlock(& mdev->peer_seq_lock); finish_wait(& mdev->seq_wait, & wait); return (ret); } } static unsigned long wire_flags_to_bio(struct drbd_conf *mdev , u32 dpf ) { { return ((unsigned long )(((((dpf & 2U) != 0U ? 16 : 0) | ((dpf & 16U) != 0U ? 2048 : 0)) | ((dpf & 32U) != 0U ? 4096 : 0)) | ((dpf & 64U) != 0U ? 128 : 0))); } } static void fail_postponed_requests(struct drbd_conf *mdev , sector_t sector , unsigned int size ) { struct drbd_interval *i ; struct drbd_request *req ; struct bio_and_error m ; struct drbd_interval const *__mptr ; { repeat: i = drbd_find_overlap(& mdev->write_requests, sector, size); goto ldv_53361; ldv_53360: ; if ((unsigned int )*((unsigned char *)i + 48UL) == 0U) { goto ldv_53357; } else { } __mptr = (struct drbd_interval const *)i; req = (struct drbd_request *)__mptr + 0xffffffffffffffd8UL; if (((unsigned long )req->rq_state & 8192UL) == 0UL) { goto ldv_53357; } else { } req->rq_state = req->rq_state & 4294959103U; __req_mod(req, NEG_ACKED, & m); spin_unlock_irq(& (mdev->tconn)->req_lock); if ((unsigned long )m.bio != (unsigned long )((struct bio *)0)) { complete_master_bio(mdev, & m); } else { } spin_lock_irq(& (mdev->tconn)->req_lock); goto repeat; ldv_53357: i = drbd_next_overlap(i, sector, size); ldv_53361: ; if ((unsigned long )i != (unsigned long )((struct drbd_interval *)0)) { goto ldv_53360; } else { } return; } } static int handle_write_conflicts(struct drbd_conf *mdev , struct drbd_peer_request *peer_req ) { struct drbd_tconn *tconn ; bool resolve_conflicts ; int tmp ; sector_t sector ; unsigned int size ; struct drbd_interval *i ; bool equal ; int err ; bool superseded ; struct drbd_request *req ; struct drbd_interval const *__mptr ; union drbd_state val ; union drbd_state mask ; { tconn = mdev->tconn; tmp = constant_test_bit(1U, (unsigned long const volatile *)(& tconn->flags)); resolve_conflicts = tmp != 0; sector = peer_req->i.sector; size = peer_req->i.size; drbd_insert_interval(& mdev->write_requests, & peer_req->i); repeat: i = drbd_find_overlap(& mdev->write_requests, sector, size); goto ldv_53386; ldv_53385: ; if ((unsigned long )(& peer_req->i) == (unsigned long )i) { goto ldv_53375; } else { } if ((unsigned int )*((unsigned char *)i + 48UL) == 0U) { err = drbd_wait_misc(mdev, i); if (err != 0) { goto out; } else { } goto repeat; } else { } equal = (bool )(i->sector == sector && i->size == size); if ((int )resolve_conflicts) { superseded = (bool )(i->sector <= sector && i->sector + (sector_t )(i->size >> 9) >= (sector_t )(size >> 9) + sector); if (! equal) { dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "Concurrent writes detected: local=%llus +%u, remote=%llus +%u, assuming %s came first\n", (unsigned long long )i->sector, i->size, (unsigned long long )sector, size, (int )superseded ? (char *)"local" : (char *)"remote"); } else { } inc_unacked(mdev); peer_req->w.cb = (int )superseded ? & e_send_superseded : & e_send_retry_write; list_add_tail(& peer_req->w.list, & mdev->done_ee); wake_asender(mdev->tconn); err = -2; goto out; } else { __mptr = (struct drbd_interval const *)i; req = (struct drbd_request *)__mptr + 0xffffffffffffffd8UL; if (! equal) { dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "Concurrent writes detected: local=%llus +%u, remote=%llus +%u\n", (unsigned long long )i->sector, i->size, (unsigned long long )sector, size); } else { } if ((int )req->rq_state & 1 || ((unsigned long )req->rq_state & 8192UL) == 0UL) { err = drbd_wait_misc(mdev, & req->i); if (err != 0) { val.i = 0U; val.ldv_40024.conn = 3U; mask.i = 0U; mask.ldv_40024.conn = 31U; _conn_request_state(mdev->tconn, mask, val, CS_HARD); fail_postponed_requests(mdev, sector, size); goto out; } else { } goto repeat; } else { } peer_req->flags = peer_req->flags | 32UL; } ldv_53375: i = drbd_next_overlap(i, sector, size); ldv_53386: ; if ((unsigned long )i != (unsigned long )((struct drbd_interval *)0)) { goto ldv_53385; } else { } err = 0; out: ; if (err != 0) { drbd_remove_epoch_entry_interval(mdev, peer_req); } else { } return (err); } } static int receive_Data(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; sector_t sector ; struct drbd_peer_request *peer_req ; struct p_data *p ; u32 peer_seq ; __u32 tmp ; int rw ; u32 dp_flags ; int err ; int tp ; int err2 ; int tmp___0 ; __u64 tmp___1 ; __u32 tmp___2 ; unsigned long tmp___3 ; struct net_conf *_________p1 ; bool __warned ; int tmp___4 ; int tmp___5 ; bool tmp___6 ; int tmp___7 ; wait_queue_t __wait ; struct task_struct *tmp___8 ; bool tmp___9 ; int tmp___10 ; struct net_conf *_________p1___0 ; bool __warned___0 ; int tmp___11 ; int tmp___12 ; { p = (struct p_data *)pi->data; tmp = __fswab32(p->seq_num); peer_seq = tmp; rw = 1; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 == 0) { err = wait_for_and_update_peer_seq(mdev, peer_seq); drbd_send_ack_dp(mdev, P_NEG_ACK, p, (int )pi->size); atomic_inc(& (tconn->current_epoch)->epoch_size); err2 = drbd_drain_block(mdev, (int )pi->size); if (err == 0) { err = err2; } else { } return (err); } else { } tmp___1 = __fswab64(p->sector); sector = (sector_t )tmp___1; peer_req = read_in_block(mdev, p->block_id, sector, (int )pi->size); if ((unsigned long )peer_req == (unsigned long )((struct drbd_peer_request *)0)) { put_ldev(mdev); return (-5); } else { } peer_req->w.cb = & e_end_block; tmp___2 = __fswab32(p->dp_flags); dp_flags = tmp___2; tmp___3 = wire_flags_to_bio(mdev, dp_flags); rw = (int )((unsigned int )tmp___3 | (unsigned int )rw); if ((unsigned long )peer_req->pages == (unsigned long )((struct page *)0)) { if (peer_req->i.size != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( peer_req->i.size == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2276); } else { } if ((dp_flags & 32U) == 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( dp_flags & DP_FLUSH ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2277); } else { } } else { } if ((dp_flags & 4U) != 0U) { peer_req->flags = peer_req->flags | 2UL; } else { } spin_lock(& tconn->epoch_lock); peer_req->epoch = tconn->current_epoch; atomic_inc(& (peer_req->epoch)->epoch_size); atomic_inc(& (peer_req->epoch)->active); spin_unlock(& tconn->epoch_lock); rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___4 = debug_lockdep_rcu_enabled(); if (tmp___4 != 0 && ! __warned) { tmp___5 = rcu_read_lock_held(); if (tmp___5 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2290, "suspicious rcu_dereference_check() usage"); } else { } } else { } tp = (int )_________p1->two_primaries; rcu_read_unlock___2(); if (tp != 0) { peer_req->flags = peer_req->flags | 128UL; err = wait_for_and_update_peer_seq(mdev, peer_seq); if (err != 0) { goto out_interrupted; } else { } spin_lock_irq(& (mdev->tconn)->req_lock); err = handle_write_conflicts(mdev, peer_req); if (err != 0) { spin_unlock_irq(& (mdev->tconn)->req_lock); if (err == -2) { put_ldev(mdev); return (0); } else { } goto out_interrupted; } else { } } else { spin_lock_irq(& (mdev->tconn)->req_lock); } list_add(& peer_req->w.list, & mdev->active_ee); spin_unlock_irq(& (mdev->tconn)->req_lock); if ((unsigned int )*((unsigned short *)mdev + 374UL) == 272U) { tmp___6 = overlapping_resync_write(mdev, peer_req); if (tmp___6) { tmp___7 = 0; } else { tmp___7 = 1; } if (tmp___7) { goto ldv_53406; } else { } tmp___8 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___8; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53409: prepare_to_wait(& mdev->ee_wait, & __wait, 2); tmp___9 = overlapping_resync_write(mdev, peer_req); if (tmp___9) { tmp___10 = 0; } else { tmp___10 = 1; } if (tmp___10) { goto ldv_53408; } else { } schedule(); goto ldv_53409; ldv_53408: finish_wait(& mdev->ee_wait, & __wait); ldv_53406: ; } else { } if ((mdev->tconn)->agreed_pro_version <= 99) { rcu_read_lock___2(); _________p1___0 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___11 = debug_lockdep_rcu_enabled(); if (tmp___11 != 0 && ! __warned___0) { tmp___12 = rcu_read_lock_held(); if (tmp___12 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2317, "suspicious rcu_dereference_check() usage"); } else { } } else { } switch (_________p1___0->wire_protocol) { case (__u32 )3: dp_flags = dp_flags | 256U; goto ldv_53414; case (__u32 )2: dp_flags = dp_flags | 128U; goto ldv_53414; } ldv_53414: rcu_read_unlock___2(); } else { } if ((dp_flags & 256U) != 0U) { peer_req->flags = peer_req->flags | 64UL; inc_unacked(mdev); } else { } if ((dp_flags & 128U) != 0U) { drbd_send_ack(mdev, P_RECV_ACK, peer_req); } else { } if ((int )mdev->state.ldv_49522.pdsk <= 3) { __drbd_set_out_of_sync(mdev, peer_req->i.sector, (int )peer_req->i.size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2343U); peer_req->flags = peer_req->flags | 1UL; peer_req->flags = peer_req->flags & 0xfffffffffffffffdUL; drbd_al_begin_io(mdev, & peer_req->i); } else { } err = drbd_submit_peer_request(mdev, peer_req, (unsigned int const )rw, 4); if (err == 0) { return (0); } else { } dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "submit failed, triggering re-connect\n"); spin_lock_irq(& (mdev->tconn)->req_lock); list_del(& peer_req->w.list); drbd_remove_epoch_entry_interval(mdev, peer_req); spin_unlock_irq(& (mdev->tconn)->req_lock); if ((int )peer_req->flags & 1) { drbd_al_complete_io(mdev, & peer_req->i); } else { } out_interrupted: drbd_may_finish_epoch(tconn, peer_req->epoch, EV_CLEANUP); put_ldev(mdev); __drbd_free_peer_req(mdev, peer_req, 0); return (err); } } int drbd_rs_should_slow_down(struct drbd_conf *mdev , sector_t sector ) { struct gendisk *disk ; unsigned long db ; unsigned long dt ; unsigned long dbdt ; struct lc_element *tmp ; int curr_events ; int throttle ; unsigned int c_min_rate ; struct disk_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; struct bm_extent *bm_ext ; struct lc_element const *__mptr ; int tmp___2 ; unsigned long res ; unsigned int _cpu ; void const *__vpp_verify ; unsigned long __ptr ; unsigned long res___0 ; unsigned int _cpu___0 ; void const *__vpp_verify___0 ; unsigned long __ptr___0 ; int tmp___3 ; unsigned long rs_left ; int i ; unsigned long tmp___4 ; { disk = (((mdev->ldev)->backing_bdev)->bd_contains)->bd_disk; throttle = 0; rcu_read_lock___2(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2390, "suspicious rcu_dereference_check() usage"); } else { } } else { } c_min_rate = _________p1->c_min_rate; rcu_read_unlock___2(); if (c_min_rate == 0U) { return (0); } else { } spin_lock_irq(& mdev->al_lock); tmp = lc_find(mdev->resync, (unsigned int )(sector >> 15)); if ((unsigned long )tmp != (unsigned long )((struct lc_element *)0)) { __mptr = (struct lc_element const *)tmp; bm_ext = (struct bm_extent *)__mptr + 0xfffffffffffffff0UL; tmp___2 = constant_test_bit(2U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___2 != 0) { spin_unlock_irq(& mdev->al_lock); return (0); } else { } } else { } spin_unlock_irq(& mdev->al_lock); res = 0UL; _cpu = 4294967295U; goto ldv_53441; ldv_53440: __vpp_verify = 0; __asm__ ("": "=r" (__ptr): "0" (disk->part0.dkstats)); res = ((struct disk_stats *)(__per_cpu_offset[_cpu] + __ptr))->sectors[0] + res; ldv_53441: _cpu = cpumask_next((int )_cpu, cpu_possible_mask); if ((unsigned int )nr_cpu_ids > _cpu) { goto ldv_53440; } else { } res___0 = 0UL; _cpu___0 = 4294967295U; goto ldv_53451; ldv_53450: __vpp_verify___0 = 0; __asm__ ("": "=r" (__ptr___0): "0" (disk->part0.dkstats)); res___0 = ((struct disk_stats *)(__per_cpu_offset[_cpu___0] + __ptr___0))->sectors[1] + res___0; ldv_53451: _cpu___0 = cpumask_next((int )_cpu___0, cpu_possible_mask); if ((unsigned int )nr_cpu_ids > _cpu___0) { goto ldv_53450; } else { } tmp___3 = atomic_read((atomic_t const *)(& mdev->rs_sect_ev)); curr_events = ((int )res + (int )res___0) - tmp___3; if (mdev->rs_last_events == 0 || curr_events - mdev->rs_last_events > 64) { mdev->rs_last_events = curr_events; i = (mdev->rs_last_mark + 7) % 8; if ((unsigned int )*((unsigned short *)mdev + 374UL) == 288U || (unsigned int )*((unsigned short *)mdev + 374UL) == 304U) { rs_left = mdev->ov_left; } else { tmp___4 = drbd_bm_total_weight(mdev); rs_left = tmp___4 - mdev->rs_failed; } dt = (unsigned long )(((long )jiffies - (long )mdev->rs_mark_time[i]) / 250L); if (dt == 0UL) { dt = dt + 1UL; } else { } db = mdev->rs_mark_left[i] - rs_left; dbdt = db / dt << 2; if ((unsigned long )c_min_rate < dbdt) { throttle = 1; } else { } } else { } return (throttle); } } static int receive_DataRequest(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; sector_t sector ; sector_t capacity ; struct drbd_peer_request *peer_req ; struct digest_info *di ; int size ; int verb ; unsigned int fault_type ; struct p_block_req *p ; __u64 tmp ; __u32 tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; void *tmp___4 ; int tmp___5 ; unsigned long now ; int i ; unsigned long tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; { di = 0; p = (struct p_block_req *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } capacity = drbd_get_capacity(mdev->this_bdev); tmp = __fswab64(p->sector); sector = (sector_t )tmp; tmp___0 = __fswab32(p->blksize); size = (int )tmp___0; if ((size <= 0 || (size & 511) != 0) || (unsigned int )size > 1048576U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s:%d: sector: %llus, size: %u\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2461, (unsigned long long )sector, size); return (-22); } else { } if ((sector_t )(size >> 9) + sector > capacity) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s:%d: sector: %llus, size: %u\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2466, (unsigned long long )sector, size); return (-22); } else { } tmp___3 = _get_ldev_if_state(mdev, D_UP_TO_DATE); if (tmp___3 == 0) { verb = 1; switch ((unsigned int )pi->cmd) { case 8U: drbd_send_ack_rp(mdev, P_NEG_DREPLY, p); goto ldv_53470; case 9U: ; case 33U: ; case 30U: drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY, p); goto ldv_53470; case 31U: verb = 0; _dec_rs_pending(mdev, "receive_DataRequest", 2484); drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, 4711ULL); goto ldv_53470; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared"), "i" (2488), "i" (12UL)); ldv_53477: ; goto ldv_53477; } ldv_53470: ; if (verb != 0) { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "receive_DataRequest"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Can not satisfy peer\'s read request, no local data.\n"); } else { } } else { } tmp___2 = drbd_drain_block(mdev, (int )pi->size); return (tmp___2); } else { } peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, (unsigned int )size, 16U); if ((unsigned long )peer_req == (unsigned long )((struct drbd_peer_request *)0)) { put_ldev(mdev); return (-12); } else { } switch ((unsigned int )pi->cmd) { case 8U: peer_req->w.cb = & w_e_end_data_req; fault_type = 5U; goto submit; case 9U: peer_req->w.cb = & w_e_end_rsdata_req; fault_type = 3U; mdev->bm_resync_fo = sector >> 3; goto ldv_53481; case 31U: ; case 33U: fault_type = 3U; tmp___4 = kmalloc((unsigned long )pi->size + 16UL, 16U); di = (struct digest_info *)tmp___4; if ((unsigned long )di == (unsigned long )((struct digest_info *)0)) { goto out_free_e; } else { } di->digest_size = (int )pi->size; di->digest = (void *)di + 16U; peer_req->ldv_50726.digest = di; peer_req->flags = peer_req->flags | 16UL; tmp___5 = drbd_recv_all(mdev->tconn, di->digest, (size_t )pi->size); if (tmp___5 != 0) { goto out_free_e; } else { } if ((unsigned int )pi->cmd == 33U) { if ((mdev->tconn)->agreed_pro_version <= 88) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->tconn->agreed_pro_version >= 89 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2538); } else { } peer_req->w.cb = & w_e_end_csum_rs_req; mdev->bm_resync_fo = sector >> 3; } else if ((unsigned int )pi->cmd == 31U) { atomic_add(size >> 9, & mdev->rs_sect_in); peer_req->w.cb = & w_e_end_ov_reply; _dec_rs_pending(mdev, "receive_DataRequest", 2546); goto submit_for_resync; } else { } goto ldv_53481; case 30U: ; if (mdev->ov_start_sector == 0xffffffffffffffffUL && (mdev->tconn)->agreed_pro_version > 89) { now = jiffies; mdev->ov_start_sector = sector; mdev->ov_position = sector; tmp___6 = drbd_bm_bits(mdev); mdev->ov_left = tmp___6 - (sector >> 3); mdev->rs_total = mdev->ov_left; i = 0; goto ldv_53490; ldv_53489: mdev->rs_mark_left[i] = mdev->ov_left; mdev->rs_mark_time[i] = now; i = i + 1; ldv_53490: ; if (i <= 7) { goto ldv_53489; } else { } _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Online Verify start sector: %llu\n", (unsigned long long )sector); } else { } peer_req->w.cb = & w_e_end_ov_req; fault_type = 3U; goto ldv_53481; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared"), "i" (2574), "i" (12UL)); ldv_53493: ; goto ldv_53493; } ldv_53481: ; if ((unsigned int )*((unsigned char *)mdev + 748UL) != 4U) { tmp___7 = drbd_rs_should_slow_down(mdev, sector); if (tmp___7 != 0) { schedule_timeout_uninterruptible(25L); } else { } } else { } tmp___8 = drbd_rs_begin_io(mdev, sector); if (tmp___8 != 0) { goto out_free_e; } else { } submit_for_resync: atomic_add(size >> 9, & mdev->rs_sect_ev); submit: inc_unacked(mdev); spin_lock_irq(& (mdev->tconn)->req_lock); list_add_tail(& peer_req->w.list, & mdev->read_ee); spin_unlock_irq(& (mdev->tconn)->req_lock); tmp___9 = drbd_submit_peer_request(mdev, peer_req, 0U, (int const )fault_type); if (tmp___9 == 0) { return (0); } else { } dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "submit failed, triggering re-connect\n"); spin_lock_irq(& (mdev->tconn)->req_lock); list_del(& peer_req->w.list); spin_unlock_irq(& (mdev->tconn)->req_lock); out_free_e: put_ldev(mdev); __drbd_free_peer_req(mdev, peer_req, 0); return (-5); } } static int drbd_asb_recover_0p(struct drbd_conf *mdev ) { int self ; int peer ; int rv ; unsigned long ch_self ; unsigned long ch_peer ; enum drbd_after_sb_p after_sb_0p ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { rv = -100; self = (int )(mdev->ldev)->md.uuid[1] & 1; peer = (int )*(mdev->p_uuid + 1UL) & 1; ch_peer = (unsigned long )*(mdev->p_uuid + 4UL); ch_self = mdev->comm_bm_set; rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2642, "suspicious rcu_dereference_check() usage"); } else { } } else { } after_sb_0p = (enum drbd_after_sb_p )_________p1->after_sb_0p; rcu_read_unlock___2(); switch ((unsigned int )after_sb_0p) { case 7U: ; case 8U: ; case 9U: ; case 10U: dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Configuration error.\n"); goto ldv_53510; case 0U: ; goto ldv_53510; case 1U: ; if (self == 0 && peer == 1) { rv = -1; goto ldv_53510; } else { } if (self == 1 && peer == 0) { rv = 1; goto ldv_53510; } else { } case 2U: ; if (self == 0 && peer == 1) { rv = 1; goto ldv_53510; } else { } if (self == 1 && peer == 0) { rv = -1; goto ldv_53510; } else { } dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Discard younger/older primary did not find a decision\nUsing discard-least-changes instead\n"); case 3U: ; if (ch_peer == 0UL && ch_self == 0UL) { tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& (mdev->tconn)->flags)); rv = tmp___1 != 0 ? -1 : 1; goto ldv_53510; } else { if (ch_peer == 0UL) { rv = 1; goto ldv_53510; } else { } if (ch_self == 0UL) { rv = -1; goto ldv_53510; } else { } } if ((unsigned int )after_sb_0p == 3U) { goto ldv_53510; } else { } case 4U: ; if (ch_self < ch_peer) { rv = -1; } else if (ch_self > ch_peer) { rv = 1; } else { tmp___2 = constant_test_bit(1U, (unsigned long const volatile *)(& (mdev->tconn)->flags)); rv = tmp___2 != 0 ? -1 : 1; } goto ldv_53510; case 5U: rv = -1; goto ldv_53510; case 6U: rv = 1; } ldv_53510: ; return (rv); } } static int drbd_asb_recover_1p(struct drbd_conf *mdev ) { int hg ; int rv ; enum drbd_after_sb_p after_sb_1p ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; enum drbd_state_rv rv2 ; union drbd_state val ; union drbd_state mask ; { rv = -100; rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2712, "suspicious rcu_dereference_check() usage"); } else { } } else { } after_sb_1p = (enum drbd_after_sb_p )_________p1->after_sb_1p; rcu_read_unlock___2(); switch ((unsigned int )after_sb_1p) { case 1U: ; case 2U: ; case 4U: ; case 5U: ; case 6U: ; case 3U: dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Configuration error.\n"); goto ldv_53533; case 0U: ; goto ldv_53533; case 7U: hg = drbd_asb_recover_0p(mdev); if (hg == -1 && (unsigned int )*((unsigned char *)mdev + 748UL) == 2U) { rv = hg; } else { } if (hg == 1 && (unsigned int )*((unsigned char *)mdev + 748UL) == 1U) { rv = hg; } else { } goto ldv_53533; case 10U: rv = drbd_asb_recover_0p(mdev); goto ldv_53533; case 8U: ; return ((unsigned int )*((unsigned char *)mdev + 748UL) == 1U ? 1 : -1); case 9U: hg = drbd_asb_recover_0p(mdev); if (hg == -1 && (unsigned int )*((unsigned char *)mdev + 748UL) == 1U) { drbd_set_role(mdev, R_SECONDARY, 0); val.i = 0U; val.ldv_40024.role = 2U; mask.i = 0U; mask.ldv_40024.role = 3U; rv2 = drbd_change_state(mdev, CS_VERBOSE, mask, val); if ((int )rv2 != 1) { drbd_khelper(mdev, (char *)"pri-lost-after-sb"); } else { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Successfully gave up primary role.\n"); rv = hg; } } else { rv = hg; } } ldv_53533: ; return (rv); } } static int drbd_asb_recover_2p(struct drbd_conf *mdev ) { int hg ; int rv ; enum drbd_after_sb_p after_sb_2p ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; enum drbd_state_rv rv2 ; union drbd_state val ; union drbd_state mask ; { rv = -100; rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 2766, "suspicious rcu_dereference_check() usage"); } else { } } else { } after_sb_2p = (enum drbd_after_sb_p )_________p1->after_sb_2p; rcu_read_unlock___2(); switch ((unsigned int )after_sb_2p) { case 1U: ; case 2U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: ; case 8U: ; case 3U: dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Configuration error.\n"); goto ldv_53561; case 10U: rv = drbd_asb_recover_0p(mdev); goto ldv_53561; case 0U: ; goto ldv_53561; case 9U: hg = drbd_asb_recover_0p(mdev); if (hg == -1) { val.i = 0U; val.ldv_40024.role = 2U; mask.i = 0U; mask.ldv_40024.role = 3U; rv2 = drbd_change_state(mdev, CS_VERBOSE, mask, val); if ((int )rv2 != 1) { drbd_khelper(mdev, (char *)"pri-lost-after-sb"); } else { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Successfully gave up primary role.\n"); rv = hg; } } else { rv = hg; } } ldv_53561: ; return (rv); } } static void drbd_uuid_dump(struct drbd_conf *mdev , char *text , u64 *uuid , u64 bits , u64 flags ) { { if ((unsigned long )uuid == (unsigned long )((u64 *)0)) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s uuid info vanished while I was looking!\n", text); return; } else { } _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n", text, *uuid, *(uuid + 1UL), *(uuid + 2UL), *(uuid + 3UL), bits, flags); return; } } static int drbd_uuid_compare(struct drbd_conf *mdev , int *rule_nr ) { u64 self ; u64 peer ; int i ; int j ; int rct ; int dc ; unsigned long tmp ; u64 tmp___0 ; int tmp___1 ; unsigned long tmp___2 ; u64 tmp___3 ; { self = (mdev->ldev)->md.uuid[0] & 0xfffffffffffffffeULL; peer = *(mdev->p_uuid) & 0xfffffffffffffffeULL; *rule_nr = 10; if (self == 4ULL && peer == 4ULL) { return (0); } else { } *rule_nr = 20; if ((self == 4ULL || self == 0ULL) && peer != 4ULL) { return (-2); } else { } *rule_nr = 30; if (self != 4ULL && (peer == 4ULL || peer == 0ULL)) { return (2); } else { } if (self == peer) { if (*(mdev->p_uuid + 1UL) == 0ULL && (mdev->ldev)->md.uuid[1] != 0ULL) { if ((mdev->tconn)->agreed_pro_version <= 90) { return (-1091); } else { } if ((((mdev->ldev)->md.uuid[1] ^ *(mdev->p_uuid + 2UL)) & 0xfffffffffffffffeULL) == 0ULL && (((mdev->ldev)->md.uuid[2] ^ *(mdev->p_uuid + 3UL)) & 0xfffffffffffffffeULL) == 0ULL) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "was SyncSource, missed the resync finished event, corrected myself:\n"); drbd_uuid_move_history(mdev); (mdev->ldev)->md.uuid[2] = (mdev->ldev)->md.uuid[1]; (mdev->ldev)->md.uuid[1] = 0ULL; if ((int )mdev->state.ldv_49522.disk > 2) { tmp = drbd_bm_total_weight(mdev); tmp___0 = (u64 )tmp; } else { tmp___0 = 0ULL; } drbd_uuid_dump(mdev, (char *)"self", (u64 *)(& (mdev->ldev)->md.uuid), tmp___0, 0ULL); *rule_nr = 34; } else { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "was SyncSource (peer failed to write sync_uuid)\n"); *rule_nr = 36; } return (1); } else { } if ((mdev->ldev)->md.uuid[1] == 0ULL && *(mdev->p_uuid + 1UL) != 0ULL) { if ((mdev->tconn)->agreed_pro_version <= 90) { return (-1091); } else { } if ((((mdev->ldev)->md.uuid[2] ^ *(mdev->p_uuid + 1UL)) & 0xfffffffffffffffeULL) == 0ULL && (((mdev->ldev)->md.uuid[3] ^ *(mdev->p_uuid + 2UL)) & 0xfffffffffffffffeULL) == 0ULL) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "was SyncTarget, peer missed the resync finished event, corrected peer:\n"); *(mdev->p_uuid + 3UL) = *(mdev->p_uuid + 2UL); *(mdev->p_uuid + 2UL) = *(mdev->p_uuid + 1UL); *(mdev->p_uuid + 1UL) = 0ULL; drbd_uuid_dump(mdev, (char *)"peer", mdev->p_uuid, *(mdev->p_uuid + 4UL), *(mdev->p_uuid + 5UL)); *rule_nr = 35; } else { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "was SyncTarget (failed to write sync_uuid)\n"); *rule_nr = 37; } return (-1); } else { } tmp___1 = constant_test_bit(5U, (unsigned long const volatile *)(& mdev->flags)); rct = (int )((tmp___1 != 0 ? 1U : 0U) + ((unsigned int )*(mdev->p_uuid + 5UL) & 2U)); *rule_nr = 40; switch (rct) { case 0: ; return (0); case 1: ; return (1); case 2: ; return (-1); case 3: dc = constant_test_bit(1U, (unsigned long const volatile *)(& (mdev->tconn)->flags)); return (dc != 0 ? -1 : 1); } } else { } *rule_nr = 50; peer = *(mdev->p_uuid + 1UL) & 0xfffffffffffffffeULL; if (self == peer) { return (-1); } else { } *rule_nr = 51; peer = *(mdev->p_uuid + 2UL) & 0xfffffffffffffffeULL; if (self == peer) { if ((mdev->tconn)->agreed_pro_version <= 95 ? (((mdev->ldev)->md.uuid[2] ^ *(mdev->p_uuid + 3UL)) & 0xfffffffffffffffeULL) == 0ULL : peer + 281474976710656ULL == (*(mdev->p_uuid + 1UL) & 0xfffffffffffffffeULL)) { if ((mdev->tconn)->agreed_pro_version <= 90) { return (-1091); } else { } *(mdev->p_uuid + 1UL) = *(mdev->p_uuid + 2UL); *(mdev->p_uuid + 2UL) = *(mdev->p_uuid + 3UL); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Lost last syncUUID packet, corrected:\n"); drbd_uuid_dump(mdev, (char *)"peer", mdev->p_uuid, *(mdev->p_uuid + 4UL), *(mdev->p_uuid + 5UL)); return (-1); } else { } } else { } *rule_nr = 60; self = (mdev->ldev)->md.uuid[0] & 0xfffffffffffffffeULL; i = 2; goto ldv_53592; ldv_53591: peer = *(mdev->p_uuid + (unsigned long )i) & 0xfffffffffffffffeULL; if (self == peer) { return (-2); } else { } i = i + 1; ldv_53592: ; if (i <= 3) { goto ldv_53591; } else { } *rule_nr = 70; self = (mdev->ldev)->md.uuid[1] & 0xfffffffffffffffeULL; peer = *(mdev->p_uuid) & 0xfffffffffffffffeULL; if (self == peer) { return (1); } else { } *rule_nr = 71; self = (mdev->ldev)->md.uuid[2] & 0xfffffffffffffffeULL; if (self == peer) { if ((mdev->tconn)->agreed_pro_version <= 95 ? (((mdev->ldev)->md.uuid[3] ^ *(mdev->p_uuid + 2UL)) & 0xfffffffffffffffeULL) == 0ULL : self + 281474976710656ULL == ((mdev->ldev)->md.uuid[1] & 0xfffffffffffffffeULL)) { if ((mdev->tconn)->agreed_pro_version <= 90) { return (-1091); } else { } __drbd_uuid_set(mdev, 1, (mdev->ldev)->md.uuid[2]); __drbd_uuid_set(mdev, 2, (mdev->ldev)->md.uuid[3]); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Last syncUUID did not get through, corrected:\n"); if ((int )mdev->state.ldv_49522.disk > 2) { tmp___2 = drbd_bm_total_weight(mdev); tmp___3 = (u64 )tmp___2; } else { tmp___3 = 0ULL; } drbd_uuid_dump(mdev, (char *)"self", (u64 *)(& (mdev->ldev)->md.uuid), tmp___3, 0ULL); return (1); } else { } } else { } *rule_nr = 80; peer = *(mdev->p_uuid) & 0xfffffffffffffffeULL; i = 2; goto ldv_53595; ldv_53594: self = (mdev->ldev)->md.uuid[i] & 0xfffffffffffffffeULL; if (self == peer) { return (2); } else { } i = i + 1; ldv_53595: ; if (i <= 3) { goto ldv_53594; } else { } *rule_nr = 90; self = (mdev->ldev)->md.uuid[1] & 0xfffffffffffffffeULL; peer = *(mdev->p_uuid + 1UL) & 0xfffffffffffffffeULL; if (self == peer && self != 0ULL) { return (100); } else { } *rule_nr = 100; i = 2; goto ldv_53601; ldv_53600: self = (mdev->ldev)->md.uuid[i] & 0xfffffffffffffffeULL; j = 2; goto ldv_53598; ldv_53597: peer = *(mdev->p_uuid + (unsigned long )j) & 0xfffffffffffffffeULL; if (self == peer) { return (-100); } else { } j = j + 1; ldv_53598: ; if (j <= 3) { goto ldv_53597; } else { } i = i + 1; ldv_53601: ; if (i <= 3) { goto ldv_53600; } else { } return (-1000); } } static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev , enum drbd_role peer_role , enum drbd_disk_state peer_disk ) { enum drbd_conns rv ; enum drbd_disk_state mydisk ; struct net_conf *nc ; int hg ; int rule_nr ; int rr_conflict ; int tentative ; int f ; long ret ; int __x___0 ; int tmp ; long ret___0 ; int __x___2 ; struct net_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; int pcount ; int forced ; long ret___1 ; int __x___4 ; int tmp___2 ; int tmp___3 ; long ret___2 ; int __x___6 ; long ret___3 ; int __x___8 ; char const *tmp___4 ; int tmp___5 ; int tmp___6 ; long ret___4 ; int __x___10 ; unsigned long tmp___7 ; unsigned long tmp___8 ; { rv = C_MASK; mydisk = (enum drbd_disk_state )mdev->state.ldv_49522.disk; if ((unsigned int )mydisk == 3U) { mydisk = (enum drbd_disk_state )mdev->new_state_tmp.ldv_40024.disk; } else { } _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_sync_handshake:\n"); spin_lock_irq(& (mdev->ldev)->md.uuid_lock); drbd_uuid_dump(mdev, (char *)"self", (u64 *)(& (mdev->ldev)->md.uuid), (u64 )mdev->comm_bm_set, 0ULL); drbd_uuid_dump(mdev, (char *)"peer", mdev->p_uuid, *(mdev->p_uuid + 4UL), *(mdev->p_uuid + 5UL)); hg = drbd_uuid_compare(mdev, & rule_nr); spin_unlock_irq(& (mdev->ldev)->md.uuid_lock); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "uuid_compare()=%d by rule %d\n", hg, rule_nr); if (hg == -1000) { dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "Unrelated data, aborting!\n"); return (C_MASK); } else { } if (hg < -1000) { dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "To resolve this both sides have to support at least protocol %d\n", -1000 - hg); return (C_MASK); } else { } if (((unsigned int )mydisk == 4U && (unsigned int )peer_disk > 4U) || ((unsigned int )peer_disk == 4U && (unsigned int )mydisk > 4U)) { if (hg == -100) { tmp = 1; } else { __x___0 = hg; ret = (long )(__x___0 < 0 ? - __x___0 : __x___0); if (ret == 2L) { tmp = 1; } else { tmp = 0; } } f = tmp; hg = (unsigned int )mydisk > 4U ? 1 : -1; if (f != 0) { hg = hg * 2; } else { } _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Becoming sync %s due to disk states.\n", hg > 0 ? (char *)"source" : (char *)"target"); } else { } __x___2 = hg; ret___0 = (long )(__x___2 < 0 ? - __x___2 : __x___2); if (ret___0 == 100L) { drbd_khelper(mdev, (char *)"initial-split-brain"); } else { } rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 3067, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if (hg == 100 || (hg == -100 && (int )((signed char )nc->always_asbp) != 0)) { pcount = ((unsigned int )*((unsigned char *)mdev + 748UL) == 1U) + ((unsigned int )peer_role == 1U); forced = hg == -100; switch (pcount) { case 0: hg = drbd_asb_recover_0p(mdev); goto ldv_53630; case 1: hg = drbd_asb_recover_1p(mdev); goto ldv_53630; case 2: hg = drbd_asb_recover_2p(mdev); goto ldv_53630; } ldv_53630: __x___4 = hg; ret___1 = (long )(__x___4 < 0 ? - __x___4 : __x___4); if (ret___1 <= 99L) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Split-Brain detected, %d primaries, automatically solved. Sync from %s node\n", pcount, hg < 0 ? (char *)"peer" : (char *)"this"); if (forced != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Doing a full sync, since UUIDs where ambiguous.\n"); hg = hg * 2; } else { } } else { } } else { } if (hg == -100) { tmp___2 = constant_test_bit(21U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___2 != 0 && (*(mdev->p_uuid + 5UL) & 1ULL) == 0ULL) { hg = -1; } else { } tmp___3 = constant_test_bit(21U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___3 == 0 && (int )*(mdev->p_uuid + 5UL) & 1) { hg = 1; } else { } __x___6 = hg; ret___2 = (long )(__x___6 < 0 ? - __x___6 : __x___6); if (ret___2 <= 99L) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Split-Brain detected, manually solved. Sync from %s node\n", hg < 0 ? (char *)"peer" : (char *)"this"); } else { } } else { } rr_conflict = (int )nc->rr_conflict; tentative = (int )nc->tentative; rcu_read_unlock___2(); if (hg == -100) { dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "Split-Brain detected but unresolved, dropping connection!\n"); drbd_khelper(mdev, (char *)"split-brain"); return (C_MASK); } else { } if (hg > 0 && (unsigned int )mydisk <= 4U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "I shall become SyncSource, but I am inconsistent!\n"); return (C_MASK); } else { } if ((hg < 0 && (unsigned int )*((unsigned char *)mdev + 748UL) == 1U) && (int )mdev->state.ldv_49522.disk > 6) { switch (rr_conflict) { case 9: drbd_khelper(mdev, (char *)"pri-lost"); case 0: dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "I shall become SyncTarget, but I am primary!\n"); return (C_MASK); case 10: dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Becoming SyncTarget, violating the stable-dataassumption\n"); } } else { } if (tentative != 0) { goto _L; } else { tmp___5 = constant_test_bit(8U, (unsigned long const volatile *)(& (mdev->tconn)->flags)); if (tmp___5 != 0) { _L: /* CIL Label */ if (hg == 0) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "dry-run connect: No resync, would become Connected immediately.\n"); } else { __x___8 = hg; ret___3 = (long )(__x___8 < 0 ? - __x___8 : __x___8); tmp___4 = drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "dry-run connect: Would become %s, doing a %s resync.", tmp___4, ret___3 > 1L ? (char *)"full" : (char *)"bit-map based"); } return (C_MASK); } else { } } __x___10 = hg; ret___4 = (long )(__x___10 < 0 ? - __x___10 : __x___10); if (ret___4 > 1L) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n"); tmp___6 = drbd_bitmap_io(mdev, & drbd_bmio_set_n_write, (char *)"set_n_write from sync_handshake", BM_LOCKED_SET_ALLOWED); if (tmp___6 != 0) { return (C_MASK); } else { } } else { } if (hg > 0) { rv = C_WF_BITMAP_S; } else if (hg < 0) { rv = C_WF_BITMAP_T; } else { rv = C_CONNECTED; tmp___8 = drbd_bm_total_weight(mdev); if (tmp___8 != 0UL) { tmp___7 = drbd_bm_total_weight(mdev); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "No resync, but %lu bits in bitmap!\n", tmp___7); } else { } } return (rv); } } static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer ) { { if ((unsigned int )peer == 6U) { return (ASB_DISCARD_LOCAL); } else { } if ((unsigned int )peer == 5U) { return (ASB_DISCARD_REMOTE); } else { } return (peer); } } static int receive_protocol(struct drbd_tconn *tconn , struct packet_info *pi ) { struct p_protocol *p ; enum drbd_after_sb_p p_after_sb_0p ; enum drbd_after_sb_p p_after_sb_1p ; enum drbd_after_sb_p p_after_sb_2p ; int p_proto ; int p_discard_my_data ; int p_two_primaries ; int cf ; struct net_conf *nc ; struct net_conf *old_net_conf ; struct net_conf *new_net_conf ; char integrity_alg[64U] ; unsigned int tmp ; struct crypto_hash *peer_integrity_tfm ; void *int_dig_in ; void *int_dig_vv ; __u32 tmp___0 ; __u32 tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; int err ; struct net_conf *_________p1 ; bool __warned ; int tmp___6 ; int tmp___7 ; enum drbd_after_sb_p tmp___8 ; enum drbd_after_sb_p tmp___9 ; enum drbd_after_sb_p tmp___10 ; int tmp___11 ; int hash_size ; unsigned int tmp___12 ; void *tmp___13 ; enum drbd_after_sb_p tmp___14 ; enum drbd_after_sb_p tmp___15 ; enum drbd_after_sb_p tmp___16 ; int tmp___17 ; union drbd_state val ; union drbd_state mask ; { p = (struct p_protocol *)pi->data; new_net_conf = 0; integrity_alg[0] = '\000'; tmp = 1U; while (1) { if (tmp >= 64U) { break; } else { } integrity_alg[tmp] = (char)0; tmp = tmp + 1U; } peer_integrity_tfm = 0; int_dig_in = 0; int_dig_vv = 0; tmp___0 = __fswab32(p->protocol); p_proto = (int )tmp___0; tmp___1 = __fswab32(p->after_sb_0p); p_after_sb_0p = (enum drbd_after_sb_p )tmp___1; tmp___2 = __fswab32(p->after_sb_1p); p_after_sb_1p = (enum drbd_after_sb_p )tmp___2; tmp___3 = __fswab32(p->after_sb_2p); p_after_sb_2p = (enum drbd_after_sb_p )tmp___3; tmp___4 = __fswab32(p->two_primaries); p_two_primaries = (int )tmp___4; tmp___5 = __fswab32(p->conn_flags); cf = (int )tmp___5; p_discard_my_data = cf & 1; if (tconn->agreed_pro_version > 86) { if (pi->size > 64U) { return (-5); } else { } err = drbd_recv_all(tconn, (void *)(& integrity_alg), (size_t )pi->size); if (err != 0) { return (err); } else { } integrity_alg[63] = 0; } else { } if ((unsigned int )pi->cmd != 45U) { clear_bit(8, (unsigned long volatile *)(& tconn->flags)); if ((cf & 2) != 0) { set_bit(8U, (unsigned long volatile *)(& tconn->flags)); } else { } rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp___6 = debug_lockdep_rcu_enabled(); if (tmp___6 != 0 && ! __warned) { tmp___7 = rcu_read_lock_held(); if (tmp___7 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 3224, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if ((__u32 )p_proto != nc->wire_protocol) { printk("\vd-con %s: incompatible %s settings\n", tconn->name, (char *)"protocol"); goto disconnect_rcu_unlock; } else { } tmp___8 = convert_after_sb(p_after_sb_0p); if ((unsigned int )tmp___8 != nc->after_sb_0p) { printk("\vd-con %s: incompatible %s settings\n", tconn->name, (char *)"after-sb-0pri"); goto disconnect_rcu_unlock; } else { } tmp___9 = convert_after_sb(p_after_sb_1p); if ((unsigned int )tmp___9 != nc->after_sb_1p) { printk("\vd-con %s: incompatible %s settings\n", tconn->name, (char *)"after-sb-1pri"); goto disconnect_rcu_unlock; } else { } tmp___10 = convert_after_sb(p_after_sb_2p); if ((unsigned int )tmp___10 != nc->after_sb_2p) { printk("\vd-con %s: incompatible %s settings\n", tconn->name, (char *)"after-sb-2pri"); goto disconnect_rcu_unlock; } else { } if (p_discard_my_data != 0 && (int )((signed char )nc->discard_my_data) != 0) { printk("\vd-con %s: incompatible %s settings\n", tconn->name, (char *)"discard-my-data"); goto disconnect_rcu_unlock; } else { } if ((int )nc->two_primaries != p_two_primaries) { printk("\vd-con %s: incompatible %s settings\n", tconn->name, (char *)"allow-two-primaries"); goto disconnect_rcu_unlock; } else { } tmp___11 = strcmp((char const *)(& integrity_alg), (char const *)(& nc->integrity_alg)); if (tmp___11 != 0) { printk("\vd-con %s: incompatible %s settings\n", tconn->name, (char *)"data-integrity-alg"); goto disconnect_rcu_unlock; } else { } rcu_read_unlock___2(); } else { } if ((int )((signed char )integrity_alg[0]) != 0) { peer_integrity_tfm = crypto_alloc_hash((char const *)(& integrity_alg), 0U, 128U); if ((unsigned long )peer_integrity_tfm == (unsigned long )((struct crypto_hash *)0)) { printk("\vd-con %s: peer data-integrity-alg %s not supported\n", tconn->name, (char *)(& integrity_alg)); goto disconnect; } else { } tmp___12 = crypto_hash_digestsize(peer_integrity_tfm); hash_size = (int )tmp___12; int_dig_in = kmalloc((size_t )hash_size, 208U); int_dig_vv = kmalloc((size_t )hash_size, 208U); if ((unsigned long )int_dig_in == (unsigned long )((void *)0) || (unsigned long )int_dig_vv == (unsigned long )((void *)0)) { printk("\vd-con %s: Allocation of buffers for data integrity checking failed\n", tconn->name); goto disconnect; } else { } } else { } tmp___13 = kmalloc(420UL, 208U); new_net_conf = (struct net_conf *)tmp___13; if ((unsigned long )new_net_conf == (unsigned long )((struct net_conf *)0)) { printk("\vd-con %s: Allocation of new net_conf failed\n", tconn->name); goto disconnect; } else { } ldv_mutex_lock_106(& tconn->data.mutex); ldv_mutex_lock_107(& tconn->conf_update); old_net_conf = tconn->net_conf; *new_net_conf = *old_net_conf; new_net_conf->wire_protocol = (__u32 )p_proto; tmp___14 = convert_after_sb(p_after_sb_0p); new_net_conf->after_sb_0p = (__u32 )tmp___14; tmp___15 = convert_after_sb(p_after_sb_1p); new_net_conf->after_sb_1p = (__u32 )tmp___15; tmp___16 = convert_after_sb(p_after_sb_2p); new_net_conf->after_sb_2p = (__u32 )tmp___16; new_net_conf->two_primaries = (char )p_two_primaries; __asm__ volatile ("": : : "memory"); tconn->net_conf = new_net_conf; ldv_mutex_unlock_108(& tconn->conf_update); ldv_mutex_unlock_109(& tconn->data.mutex); crypto_free_hash(tconn->peer_integrity_tfm); kfree((void const *)tconn->int_dig_in); kfree((void const *)tconn->int_dig_vv); tconn->peer_integrity_tfm = peer_integrity_tfm; tconn->int_dig_in = int_dig_in; tconn->int_dig_vv = int_dig_vv; tmp___17 = strcmp((char const *)(& old_net_conf->integrity_alg), (char const *)(& integrity_alg)); if (tmp___17 != 0) { printk("\016d-con %s: peer data-integrity-alg: %s\n", tconn->name, (int )((signed char )integrity_alg[0]) != 0 ? (char *)(& integrity_alg) : (char *)"(none)"); } else { } synchronize_rcu(); kfree((void const *)old_net_conf); return (0); disconnect_rcu_unlock: rcu_read_unlock___2(); disconnect: crypto_free_hash(peer_integrity_tfm); kfree((void const *)int_dig_in); kfree((void const *)int_dig_vv); val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); return (-5); } } struct crypto_hash *drbd_crypto_alloc_digest_safe(struct drbd_conf const *mdev , char const *alg , char const *name ) { struct crypto_hash *tfm ; long tmp ; long tmp___0 ; { if ((int )((signed char )*alg) == 0) { return (0); } else { } tfm = crypto_alloc_hash(alg, 0U, 128U); tmp___0 = IS_ERR((void const *)tfm); if (tmp___0 != 0L) { tmp = PTR_ERR((void const *)tfm); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Can not allocate \"%s\" as %s (reason: %ld)\n", alg, name, tmp); return (tfm); } else { } return (tfm); } } static int ignore_remaining_packet(struct drbd_tconn *tconn , struct packet_info *pi ) { void *buffer ; int size ; int s ; int __min1 ; int __min2 ; { buffer = tconn->data.rbuf; size = (int )pi->size; goto ldv_53703; ldv_53702: __min1 = size; __min2 = 4096; s = __min1 < __min2 ? __min1 : __min2; s = drbd_recv(tconn, buffer, (size_t )s); if (s <= 0) { if (s < 0) { return (s); } else { } goto ldv_53701; } else { } size = size - s; ldv_53703: ; if (size != 0) { goto ldv_53702; } else { } ldv_53701: ; if (size != 0) { return (-5); } else { } return (0); } } static int config_unknown_volume(struct drbd_tconn *tconn , struct packet_info *pi ) { char const *tmp ; int tmp___0 ; { tmp = cmdname(pi->cmd); printk("\fd-con %s: %s packet received for volume %u, which is not configured locally\n", tconn->name, tmp, pi->vnr); tmp___0 = ignore_remaining_packet(tconn, pi); return (tmp___0); } } static int receive_SyncParam(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_rs_param_95 *p ; unsigned int header_size ; unsigned int data_size ; unsigned int exp_max_sz ; struct crypto_hash *verify_tfm ; struct crypto_hash *csums_tfm ; struct net_conf *old_net_conf ; struct net_conf *new_net_conf ; struct disk_conf *old_disk_conf ; struct disk_conf *new_disk_conf ; int apv ; struct fifo_buffer *old_plan ; struct fifo_buffer *new_plan ; int fifo_size ; int err ; int tmp ; void *tmp___0 ; __u32 tmp___1 ; int tmp___2 ; long tmp___3 ; int tmp___4 ; long tmp___5 ; int tmp___6 ; __u32 tmp___7 ; __u32 tmp___8 ; __u32 tmp___9 ; __u32 tmp___10 ; void *tmp___11 ; size_t tmp___12 ; size_t tmp___13 ; union drbd_state val ; union drbd_state mask ; { verify_tfm = 0; csums_tfm = 0; new_net_conf = 0; old_disk_conf = 0; new_disk_conf = 0; apv = tconn->agreed_pro_version; old_plan = 0; new_plan = 0; fifo_size = 0; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { tmp = config_unknown_volume(tconn, pi); return (tmp); } else { } exp_max_sz = apv > 87 ? (apv != 88 ? (apv <= 94 ? 132U : 148U) : 68U) : 4U; if (pi->size > exp_max_sz) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "SyncParam packet too long: received %u, expected <= %u bytes\n", pi->size, exp_max_sz); return (-5); } else { } if (apv <= 88) { header_size = 4U; data_size = pi->size - header_size; } else if (apv <= 94) { header_size = 132U; data_size = pi->size - header_size; if (data_size != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( data_size == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 3434); } else { } } else { header_size = 148U; data_size = pi->size - header_size; if (data_size != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( data_size == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 3438); } else { } } p = (struct p_rs_param_95 *)pi->data; memset((void *)(& p->verify_alg), 0, 128UL); err = drbd_recv_all(mdev->tconn, (void *)p, (size_t )header_size); if (err != 0) { return (err); } else { } ldv_mutex_lock_110(& (mdev->tconn)->conf_update); old_net_conf = (mdev->tconn)->net_conf; tmp___2 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___2 != 0) { tmp___0 = kzalloc(344UL, 208U); new_disk_conf = (struct disk_conf *)tmp___0; if ((unsigned long )new_disk_conf == (unsigned long )((struct disk_conf *)0)) { put_ldev(mdev); ldv_mutex_unlock_111(& (mdev->tconn)->conf_update); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Allocation of new disk_conf failed\n"); return (-12); } else { } old_disk_conf = (mdev->ldev)->disk_conf; *new_disk_conf = *old_disk_conf; tmp___1 = __fswab32(p->resync_rate); new_disk_conf->resync_rate = tmp___1; } else { } if (apv > 87) { if (apv == 88) { if (data_size > 64U || data_size == 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "verify-alg of wrong size, peer wants %u, accepting only up to %u byte\n", data_size, 64); err = -5; goto reconnect; } else { } err = drbd_recv_all(mdev->tconn, (void *)(& p->verify_alg), (size_t )data_size); if (err != 0) { goto reconnect; } else { } if ((int )((signed char )p->verify_alg[data_size - 1U]) != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( p->verify_alg[data_size-1] == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 3481); } else { } p->verify_alg[data_size - 1U] = 0; } else { if ((int )((signed char )p->verify_alg[63]) != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( p->verify_alg[SHARED_SECRET_MAX-1] == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 3487); } else { } if ((int )((signed char )p->csums_alg[63]) != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( p->csums_alg[SHARED_SECRET_MAX-1] == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 3488); } else { } p->verify_alg[63] = 0; p->csums_alg[63] = 0; } tmp___4 = strcmp((char const *)(& old_net_conf->verify_alg), (char const *)(& p->verify_alg)); if (tmp___4 != 0) { if ((unsigned int )*((unsigned short *)mdev + 374UL) == 144U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n", (char *)(& old_net_conf->verify_alg), (char *)(& p->verify_alg)); goto disconnect; } else { } verify_tfm = drbd_crypto_alloc_digest_safe((struct drbd_conf const *)mdev, (char const *)(& p->verify_alg), "verify-alg"); tmp___3 = IS_ERR((void const *)verify_tfm); if (tmp___3 != 0L) { verify_tfm = 0; goto disconnect; } else { } } else { } if (apv > 88) { tmp___6 = strcmp((char const *)(& old_net_conf->csums_alg), (char const *)(& p->csums_alg)); if (tmp___6 != 0) { if ((unsigned int )*((unsigned short *)mdev + 374UL) == 144U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n", (char *)(& old_net_conf->csums_alg), (char *)(& p->csums_alg)); goto disconnect; } else { } csums_tfm = drbd_crypto_alloc_digest_safe((struct drbd_conf const *)mdev, (char const *)(& p->csums_alg), "csums-alg"); tmp___5 = IS_ERR((void const *)csums_tfm); if (tmp___5 != 0L) { csums_tfm = 0; goto disconnect; } else { } } else { } } else { } if (apv > 94 && (unsigned long )new_disk_conf != (unsigned long )((struct disk_conf *)0)) { tmp___7 = __fswab32(p->c_plan_ahead); new_disk_conf->c_plan_ahead = tmp___7; tmp___8 = __fswab32(p->c_delay_target); new_disk_conf->c_delay_target = tmp___8; tmp___9 = __fswab32(p->c_fill_target); new_disk_conf->c_fill_target = tmp___9; tmp___10 = __fswab32(p->c_max_rate); new_disk_conf->c_max_rate = tmp___10; fifo_size = (int )((new_disk_conf->c_plan_ahead * 250U) / 250U); if ((unsigned int )fifo_size != (mdev->rs_plan_s)->size) { new_plan = fifo_alloc(fifo_size); if ((unsigned long )new_plan == (unsigned long )((struct fifo_buffer *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "kmalloc of fifo_buffer failed"); put_ldev(mdev); goto disconnect; } else { } } else { } } else { } if ((unsigned long )verify_tfm != (unsigned long )((struct crypto_hash *)0) || (unsigned long )csums_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp___11 = kzalloc(420UL, 208U); new_net_conf = (struct net_conf *)tmp___11; if ((unsigned long )new_net_conf == (unsigned long )((struct net_conf *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Allocation of new net_conf failed\n"); goto disconnect; } else { } *new_net_conf = *old_net_conf; if ((unsigned long )verify_tfm != (unsigned long )((struct crypto_hash *)0)) { strcpy((char *)(& new_net_conf->verify_alg), (char const *)(& p->verify_alg)); tmp___12 = strlen((char const *)(& p->verify_alg)); new_net_conf->verify_alg_len = (__u32 )tmp___12 + 1U; crypto_free_hash((mdev->tconn)->verify_tfm); (mdev->tconn)->verify_tfm = verify_tfm; _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "using verify-alg: \"%s\"\n", (char *)(& p->verify_alg)); } else { } if ((unsigned long )csums_tfm != (unsigned long )((struct crypto_hash *)0)) { strcpy((char *)(& new_net_conf->csums_alg), (char const *)(& p->csums_alg)); tmp___13 = strlen((char const *)(& p->csums_alg)); new_net_conf->csums_alg_len = (__u32 )tmp___13 + 1U; crypto_free_hash((mdev->tconn)->csums_tfm); (mdev->tconn)->csums_tfm = csums_tfm; _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "using csums-alg: \"%s\"\n", (char *)(& p->csums_alg)); } else { } __asm__ volatile ("": : : "memory"); tconn->net_conf = new_net_conf; } else { } } else { } if ((unsigned long )new_disk_conf != (unsigned long )((struct disk_conf *)0)) { __asm__ volatile ("": : : "memory"); (mdev->ldev)->disk_conf = new_disk_conf; put_ldev(mdev); } else { } if ((unsigned long )new_plan != (unsigned long )((struct fifo_buffer *)0)) { old_plan = mdev->rs_plan_s; __asm__ volatile ("": : : "memory"); mdev->rs_plan_s = new_plan; } else { } ldv_mutex_unlock_112(& (mdev->tconn)->conf_update); synchronize_rcu(); if ((unsigned long )new_net_conf != (unsigned long )((struct net_conf *)0)) { kfree((void const *)old_net_conf); } else { } kfree((void const *)old_disk_conf); kfree((void const *)old_plan); return (0); reconnect: ; if ((unsigned long )new_disk_conf != (unsigned long )((struct disk_conf *)0)) { put_ldev(mdev); kfree((void const *)new_disk_conf); } else { } ldv_mutex_unlock_113(& (mdev->tconn)->conf_update); return (-5); disconnect: kfree((void const *)new_plan); if ((unsigned long )new_disk_conf != (unsigned long )((struct disk_conf *)0)) { put_ldev(mdev); kfree((void const *)new_disk_conf); } else { } ldv_mutex_unlock_114(& (mdev->tconn)->conf_update); crypto_free_hash(csums_tfm); crypto_free_hash(verify_tfm); val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(mdev->tconn, mask, val, CS_HARD); return (-5); } } static void warn_if_differ_considerably(struct drbd_conf *mdev , char const *s , sector_t a , sector_t b ) { sector_t d ; { if (a == 0UL || b == 0UL) { return; } else { } d = a > b ? a - b : b - a; if (a >> 3 < d || b >> 3 < d) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Considerable difference in %s: %llus vs. %llus\n", s, (unsigned long long )a, (unsigned long long )b); } else { } return; } } static int receive_sizes(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_sizes *p ; enum determine_dev_size dd ; sector_t p_size ; sector_t p_usize ; sector_t my_usize ; int ldsc ; enum dds_flags ddsf ; int tmp ; __u64 tmp___0 ; __u64 tmp___1 ; struct disk_conf *_________p1 ; bool __warned ; int tmp___2 ; int tmp___3 ; sector_t tmp___4 ; sector_t __x ; sector_t __y ; sector_t _min1 ; sector_t _min2 ; sector_t tmp___5 ; sector_t tmp___6 ; union drbd_state val ; union drbd_state mask ; sector_t tmp___7 ; sector_t tmp___8 ; struct disk_conf *old_disk_conf ; struct disk_conf *new_disk_conf ; void *tmp___9 ; int tmp___10 ; __u16 tmp___11 ; int tmp___12 ; __u32 tmp___13 ; sector_t tmp___14 ; int tmp___15 ; __u64 tmp___16 ; sector_t tmp___17 ; int tmp___18 ; { p = (struct p_sizes *)pi->data; dd = 0; ldsc = 0; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { tmp = config_unknown_volume(tconn, pi); return (tmp); } else { } tmp___0 = __fswab64(p->d_size); p_size = (sector_t )tmp___0; tmp___1 = __fswab64(p->u_size); p_usize = (sector_t )tmp___1; mdev->p_size = p_size; tmp___10 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___10 != 0) { rcu_read_lock___2(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 3643, "suspicious rcu_dereference_check() usage"); } else { } } else { } my_usize = (sector_t )_________p1->disk_size; rcu_read_unlock___2(); tmp___4 = drbd_get_max_capacity(mdev->ldev); warn_if_differ_considerably(mdev, "lower level device sizes", p_size, tmp___4); warn_if_differ_considerably(mdev, "user requested size", p_usize, my_usize); if ((unsigned int )*((unsigned short *)mdev + 374UL) == 144U) { __x = my_usize; __y = p_usize; if (__x != 0UL) { if (__y != 0UL) { _min1 = __x; _min2 = __y; tmp___5 = _min1 < _min2 ? _min1 : _min2; } else { tmp___5 = __x; } tmp___6 = tmp___5; } else { tmp___6 = __y; } p_usize = tmp___6; } else { } tmp___7 = drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0); tmp___8 = drbd_get_capacity(mdev->this_bdev); if ((tmp___7 < tmp___8 && (int )mdev->state.ldv_49522.disk > 4) && (int )mdev->state.ldv_49522.conn <= 9) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "The peer\'s disk size is too small!\n"); val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(mdev->tconn, mask, val, CS_HARD); put_ldev(mdev); return (-5); } else { } if (my_usize != p_usize) { new_disk_conf = 0; tmp___9 = kzalloc(344UL, 208U); new_disk_conf = (struct disk_conf *)tmp___9; if ((unsigned long )new_disk_conf == (unsigned long )((struct disk_conf *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Allocation of new disk_conf failed\n"); put_ldev(mdev); return (-12); } else { } ldv_mutex_lock_115(& (mdev->tconn)->conf_update); old_disk_conf = (mdev->ldev)->disk_conf; *new_disk_conf = *old_disk_conf; new_disk_conf->disk_size = (__u64 )p_usize; __asm__ volatile ("": : : "memory"); (mdev->ldev)->disk_conf = new_disk_conf; ldv_mutex_unlock_116(& (mdev->tconn)->conf_update); synchronize_rcu(); kfree((void const *)old_disk_conf); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Peer sets u_size to %lu sectors\n", my_usize); } else { } put_ldev(mdev); } else { } tmp___11 = __fswab16((int )p->dds_flags); ddsf = (enum dds_flags )tmp___11; tmp___12 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___12 != 0) { dd = drbd_determine_dev_size(mdev, ddsf); put_ldev(mdev); if ((int )dd == -1) { return (-5); } else { } drbd_md_sync(mdev); } else { drbd_set_my_capacity(mdev, p_size); } tmp___13 = __fswab32(p->max_bio_size); mdev->peer_max_bio_size = tmp___13; drbd_reconsider_max_bio_size(mdev); tmp___15 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___15 != 0) { tmp___14 = drbd_get_capacity((mdev->ldev)->backing_bdev); if ((mdev->ldev)->known_size != tmp___14) { (mdev->ldev)->known_size = drbd_get_capacity((mdev->ldev)->backing_bdev); ldsc = 1; } else { } put_ldev(mdev); } else { } if ((int )mdev->state.ldv_49522.conn > 9) { tmp___16 = __fswab64(p->c_size); tmp___17 = drbd_get_capacity(mdev->this_bdev); if (tmp___16 != (unsigned long long )tmp___17 || ldsc != 0) { drbd_send_sizes(mdev, 0, ddsf); } else { } tmp___18 = test_and_clear_bit(16, (unsigned long volatile *)(& mdev->flags)); if (tmp___18 != 0 || ((int )dd == 2 && (unsigned int )*((unsigned short *)mdev + 374UL) == 160U)) { if ((int )mdev->state.ldv_49522.pdsk > 3 && (int )mdev->state.ldv_49522.disk > 3) { if (((unsigned int )ddsf & 2U) != 0U) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Resync of new storage suppressed with --assume-clean\n"); } else { resync_after_online_grow(mdev); } } else { set_bit(15U, (unsigned long volatile *)(& mdev->flags)); } } else { } } else { } return (0); } } static int receive_uuids(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_uuids *p ; u64 *p_uuid ; int i ; int updated_uuids ; int tmp ; void *tmp___0 ; __u64 tmp___1 ; union drbd_state val ; union drbd_state mask ; int skip_initial_sync ; union drbd_state __ns ; int tmp___2 ; int tmp___3 ; { p = (struct p_uuids *)pi->data; updated_uuids = 0; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { tmp = config_unknown_volume(tconn, pi); return (tmp); } else { } tmp___0 = kmalloc(48UL, 16U); p_uuid = (u64 *)tmp___0; if ((unsigned long )p_uuid == (unsigned long )((u64 *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "kmalloc of p_uuid failed\n"); return (0); } else { } i = 0; goto ldv_53778; ldv_53777: tmp___1 = __fswab64(p->uuid[i]); *(p_uuid + (unsigned long )i) = tmp___1; i = i + 1; ldv_53778: ; if (i <= 5) { goto ldv_53777; } else { } kfree((void const *)mdev->p_uuid); mdev->p_uuid = p_uuid; if ((((int )mdev->state.ldv_49522.conn <= 9 && (int )mdev->state.ldv_49522.disk <= 3) && (unsigned int )*((unsigned char *)mdev + 748UL) == 1U) && ((mdev->ed_uuid ^ *p_uuid) & 0xfffffffffffffffeULL) != 0ULL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Can only connect to data with current UUID=%016llX\n", mdev->ed_uuid); val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(mdev->tconn, mask, val, CS_HARD); return (-5); } else { } tmp___2 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___2 != 0) { skip_initial_sync = (((unsigned int )*((unsigned short *)mdev + 374UL) == 160U && (mdev->tconn)->agreed_pro_version > 89) && (mdev->ldev)->md.uuid[0] == 4ULL) && (*(p_uuid + 5UL) & 8ULL) != 0ULL; if (skip_initial_sync != 0) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Accepted new current UUID, preparing to skip initial sync\n"); drbd_bitmap_io(mdev, & drbd_bmio_clear_n_write, (char *)"clear_n_write from receive_uuids", BM_LOCKED_TEST_ALLOWED); _drbd_uuid_set(mdev, 0, *p_uuid); _drbd_uuid_set(mdev, 1, 0ULL); __ns = drbd_read_state(mdev); __ns.ldv_40024.disk = 8U; __ns.ldv_40024.pdsk = 8U; _drbd_set_state(mdev, __ns, CS_VERBOSE, 0); drbd_md_sync(mdev); updated_uuids = 1; } else { } put_ldev(mdev); } else if ((int )mdev->state.ldv_49522.disk <= 3 && (unsigned int )*((unsigned char *)mdev + 748UL) == 1U) { updated_uuids = drbd_set_ed_uuid(mdev, *p_uuid); } else { } ldv_mutex_lock_117(mdev->state_mutex); ldv_mutex_unlock_118(mdev->state_mutex); if ((int )mdev->state.ldv_49522.conn > 9 && (int )mdev->state.ldv_49522.disk <= 3) { tmp___3 = drbd_set_ed_uuid(mdev, *p_uuid); updated_uuids = tmp___3 | updated_uuids; } else { } if (updated_uuids != 0) { drbd_print_uuids(mdev, "receiver updated UUIDs to"); } else { } return (0); } } static union drbd_state convert_state(union drbd_state ps ) { union drbd_state ms ; enum drbd_conns c_tab[32U] ; { c_tab[0] = 0; c_tab[1] = C_TEAR_DOWN; c_tab[2] = 0; c_tab[3] = 0; c_tab[4] = 0; c_tab[5] = 0; c_tab[6] = 0; c_tab[7] = 0; c_tab[8] = 0; c_tab[9] = C_WF_REPORT_PARAMS; c_tab[10] = C_CONNECTED; c_tab[11] = C_STARTING_SYNC_T; c_tab[12] = C_STARTING_SYNC_S; c_tab[13] = 0; c_tab[14] = 0; c_tab[15] = 0; c_tab[16] = 0; c_tab[17] = 0; c_tab[18] = C_VERIFY_T; c_tab[19] = 0; c_tab[20] = 0; c_tab[21] = 0; c_tab[22] = 0; c_tab[23] = 0; c_tab[24] = 0; c_tab[25] = 0; c_tab[26] = 0; c_tab[27] = 0; c_tab[28] = 0; c_tab[29] = 0; c_tab[30] = 0; c_tab[31] = C_MASK; ms.i = ps.i; ms.ldv_40024.conn = (unsigned char )c_tab[(int )ps.ldv_40024.conn]; ms.ldv_40024.peer = ps.ldv_40024.role; ms.ldv_40024.role = ps.ldv_40024.peer; ms.ldv_40024.pdsk = ps.ldv_40024.disk; ms.ldv_40024.disk = ps.ldv_40024.pdsk; ms.ldv_40024.peer_isp = (unsigned char )((int )ps.ldv_40024.aftr_isp | (int )ps.ldv_40024.user_isp); return (ms); } } static int receive_req_state(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_req_state *p ; union drbd_state mask ; union drbd_state val ; enum drbd_state_rv rv ; __u32 tmp ; __u32 tmp___0 ; int tmp___1 ; int tmp___2 ; { p = (struct p_req_state *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp = __fswab32(p->mask); mask.i = tmp; tmp___0 = __fswab32(p->val); val.i = tmp___0; tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& (mdev->tconn)->flags)); if (tmp___1 != 0) { tmp___2 = ldv_mutex_is_locked_119(mdev->state_mutex); if (tmp___2 != 0) { drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG); return (0); } else { } } else { } mask = convert_state(mask); val = convert_state(val); rv = drbd_change_state(mdev, CS_VERBOSE, mask, val); drbd_send_sr_reply(mdev, rv); drbd_md_sync(mdev); return (0); } } static int receive_req_conn_state(struct drbd_tconn *tconn , struct packet_info *pi ) { struct p_req_state *p ; union drbd_state mask ; union drbd_state val ; enum drbd_state_rv rv ; __u32 tmp ; __u32 tmp___0 ; int tmp___1 ; int tmp___2 ; { p = (struct p_req_state *)pi->data; tmp = __fswab32(p->mask); mask.i = tmp; tmp___0 = __fswab32(p->val); val.i = tmp___0; tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& tconn->flags)); if (tmp___1 != 0) { tmp___2 = ldv_mutex_is_locked_120(& tconn->cstate_mutex); if (tmp___2 != 0) { conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG); return (0); } else { } } else { } mask = convert_state(mask); val = convert_state(val); rv = conn_request_state(tconn, mask, val, 2066); conn_send_sr_reply(tconn, rv); return (0); } } static int receive_state(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_state *p ; union drbd_state os ; union drbd_state ns ; union drbd_state peer_state ; enum drbd_disk_state real_peer_disk ; enum chg_state_flags cs_flags ; int rv ; int tmp ; __u32 tmp___0 ; char const *tmp___1 ; unsigned long tmp___2 ; int cr ; int tmp___3 ; enum drbd_conns tmp___4 ; union drbd_state val ; union drbd_state mask ; int tmp___5 ; union drbd_state val___0 ; union drbd_state mask___0 ; int tmp___6 ; union drbd_state tmp___7 ; union drbd_state val___1 ; union drbd_state mask___1 ; int tmp___8 ; int tmp___9 ; enum drbd_state_rv tmp___10 ; union drbd_state val___2 ; union drbd_state mask___2 ; { p = (struct p_state *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { tmp = config_unknown_volume(tconn, pi); return (tmp); } else { } tmp___0 = __fswab32(p->state); peer_state.i = tmp___0; real_peer_disk = (enum drbd_disk_state )peer_state.ldv_40024.disk; if ((unsigned int )*((unsigned char *)(& peer_state) + 1UL) == 6U) { real_peer_disk = (*(mdev->p_uuid + 5UL) & 4ULL) != 0ULL ? D_INCONSISTENT : D_CONSISTENT; tmp___1 = drbd_disk_str(real_peer_disk); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "real peer disk state = %s\n", tmp___1); } else { } spin_lock_irq(& (mdev->tconn)->req_lock); retry: ns = drbd_read_state(mdev); os = ns; spin_unlock_irq(& (mdev->tconn)->req_lock); if ((int )os.ldv_40024.conn <= 7) { return (-104); } else { } if ((((*((unsigned int *)(& os) + 0UL) == 32768U || *((unsigned int *)(& os) + 0UL) == 57344U) && (unsigned int )real_peer_disk == 8U) && (int )os.ldv_40024.conn > 10) && (unsigned int )*((unsigned char *)(& os) + 1UL) == 16U) { if ((int )peer_state.ldv_40024.conn > 10 && (int )peer_state.ldv_40024.conn <= 15) { real_peer_disk = D_INCONSISTENT; } else if ((int )os.ldv_40024.conn > 15 && (unsigned int )*((unsigned short *)(& peer_state) + 0UL) == 160U) { tmp___2 = drbd_bm_total_weight(mdev); if (tmp___2 <= mdev->rs_failed) { drbd_resync_finished(mdev); } else { } return (0); } else { } } else { } if ((((unsigned int )*((unsigned short *)(& os) + 0UL) == 304U && (unsigned int )*((unsigned char *)(& os) + 1UL) == 16U) && (unsigned int )*((unsigned short *)(& peer_state) + 0UL) == 160U) && (unsigned int )real_peer_disk == 8U) { ov_out_of_sync_print(mdev); drbd_resync_finished(mdev); return (0); } else { } if (((*((unsigned int *)(& os) + 0UL) == 65536U && (unsigned int )real_peer_disk == 4U) && (unsigned int )*((unsigned short *)(& os) + 0UL) == 160U) && (int )peer_state.ldv_40024.conn > 16) { real_peer_disk = D_UP_TO_DATE; } else { } if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 144U) { ns.ldv_40024.conn = 10U; } else { } if ((unsigned int )*((unsigned short *)(& peer_state) + 0UL) == 352U) { ns.ldv_40024.conn = 23U; } else { } if ((unsigned long )mdev->p_uuid != (unsigned long )((u64 *)0) && (int )peer_state.ldv_40024.disk > 2) { tmp___6 = _get_ldev_if_state(mdev, D_NEGOTIATING); if (tmp___6 != 0) { cr = (int )os.ldv_40024.conn <= 9; cr = ((unsigned int )*((unsigned short *)(& os) + 0UL) == 160U && ((unsigned int )*((unsigned char *)(& peer_state) + 1UL) == 6U || (unsigned int )*((unsigned char *)(& os) + 1UL) == 6U)) | cr; tmp___3 = constant_test_bit(6U, (unsigned long const volatile *)(& mdev->flags)); cr = tmp___3 | cr; cr = ((unsigned int )*((unsigned short *)(& os) + 0UL) == 160U && ((int )peer_state.ldv_40024.conn > 10 && (int )peer_state.ldv_40024.conn <= 14)) | cr; if (cr != 0) { tmp___4 = drbd_sync_handshake(mdev, (enum drbd_role )peer_state.ldv_40024.role, real_peer_disk); ns.ldv_40024.conn = (unsigned char )tmp___4; } else { } put_ldev(mdev); if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 496U) { ns.ldv_40024.conn = 10U; if ((unsigned int )*((unsigned char *)mdev + 749UL) == 6U) { val.i = 0U; val.ldv_40024.disk = 2U; mask.i = 0U; mask.ldv_40024.disk = 15U; drbd_force_state(mdev, mask, val); } else if ((unsigned int )*((unsigned char *)(& peer_state) + 1UL) == 6U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Disk attach process on the peer node was aborted.\n"); peer_state.ldv_40024.disk = 0U; real_peer_disk = D_DISKLESS; } else { tmp___5 = test_and_clear_bit(8, (unsigned long volatile *)(& (mdev->tconn)->flags)); if (tmp___5 != 0) { return (-5); } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) != 144U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( os.conn == C_WF_REPORT_PARAMS ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4022); } else { } val___0.i = 0U; val___0.ldv_40024.conn = 1U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; conn_request_state(mdev->tconn, mask___0, val___0, CS_HARD); return (-5); } } else { } } else { } } else { } spin_lock_irq(& (mdev->tconn)->req_lock); tmp___7 = drbd_read_state(mdev); if (os.i != tmp___7.i) { goto retry; } else { } clear_bit(6, (unsigned long volatile *)(& mdev->flags)); ns.ldv_40024.peer = peer_state.ldv_40024.role; ns.ldv_40024.pdsk = (unsigned char )real_peer_disk; ns.ldv_40024.peer_isp = (unsigned char )((int )peer_state.ldv_40024.aftr_isp | (int )peer_state.ldv_40024.user_isp); if (((unsigned int )*((unsigned short *)(& ns) + 0UL) == 160U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 208U) && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 6U) { ns.ldv_40024.disk = mdev->new_state_tmp.ldv_40024.disk; } else { } cs_flags = (enum chg_state_flags )(((int )os.ldv_40024.conn > 9 || (int )ns.ldv_40024.conn <= 9) + 2); if (*((unsigned int *)(& ns) + 0UL) == 57344U) { tmp___8 = drbd_suspended(mdev); if (tmp___8 != 0) { if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 160U) { if ((int )os.ldv_40024.conn <= 9) { tmp___9 = constant_test_bit(17U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___9 != 0) { spin_unlock_irq(& (mdev->tconn)->req_lock); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Aborting Connect, can not thaw IO with an only Consistent peer\n"); tl_clear(mdev->tconn); drbd_uuid_new_current(mdev); clear_bit(17, (unsigned long volatile *)(& mdev->flags)); val___1.i = 0U; val___1.ldv_40024.conn = 6U; val___1.ldv_40024.susp = 0U; mask___1.i = 0U; mask___1.ldv_40024.conn = 31U; mask___1.ldv_40024.susp = 1U; conn_request_state(mdev->tconn, mask___1, val___1, CS_HARD); return (-5); } else { } } else { } } else { } } else { } } else { } tmp___10 = _drbd_set_state(mdev, ns, cs_flags, 0); rv = (int )tmp___10; ns = drbd_read_state(mdev); spin_unlock_irq(& (mdev->tconn)->req_lock); if (rv <= 0) { val___2.i = 0U; val___2.ldv_40024.conn = 1U; mask___2.i = 0U; mask___2.ldv_40024.conn = 31U; conn_request_state(mdev->tconn, mask___2, val___2, CS_HARD); return (-5); } else { } if ((int )os.ldv_40024.conn > 9) { if (((int )ns.ldv_40024.conn > 10 && (int )peer_state.ldv_40024.conn <= 10) && (unsigned int )*((unsigned char *)(& peer_state) + 1UL) != 6U) { drbd_send_uuids(mdev); drbd_send_current_state(mdev); } else { } } else { } clear_bit(21, (unsigned long volatile *)(& mdev->flags)); drbd_md_sync(mdev); return (0); } } static int receive_sync_uuid(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_rs_uuid *p ; wait_queue_t __wait ; struct task_struct *tmp ; __u64 tmp___0 ; int tmp___1 ; { p = (struct p_rs_uuid *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } if ((((unsigned int )*((unsigned short *)mdev + 374UL) == 240U || (unsigned int )*((unsigned short *)mdev + 374UL) == 368U) || (int )mdev->state.ldv_49522.conn <= 9) || (int )mdev->state.ldv_49522.disk <= 2) { goto ldv_53845; } else { } tmp = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53848: prepare_to_wait(& mdev->misc_wait, & __wait, 2); if ((((unsigned int )*((unsigned short *)mdev + 374UL) == 240U || (unsigned int )*((unsigned short *)mdev + 374UL) == 368U) || (int )mdev->state.ldv_49522.conn <= 9) || (int )mdev->state.ldv_49522.disk <= 2) { goto ldv_53847; } else { } schedule(); goto ldv_53848; ldv_53847: finish_wait(& mdev->misc_wait, & __wait); ldv_53845: tmp___1 = _get_ldev_if_state(mdev, D_NEGOTIATING); if (tmp___1 != 0) { tmp___0 = __fswab64(p->uuid); _drbd_uuid_set(mdev, 0, tmp___0); _drbd_uuid_set(mdev, 1, 0ULL); drbd_print_uuids(mdev, "updated sync uuid"); drbd_start_resync(mdev, C_SYNC_TARGET); put_ldev(mdev); } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Ignoring SyncUUID packet!\n"); } return (0); } } static int receive_bitmap_plain(struct drbd_conf *mdev , unsigned int size , unsigned long *p , struct bm_xfer_ctx *c ) { unsigned int data_size ; unsigned int tmp ; unsigned int num_words ; size_t __min1 ; size_t __min2 ; unsigned int want ; int err ; { tmp = drbd_header_size(mdev->tconn); data_size = 4096U - tmp; __min1 = (unsigned long )(data_size / 8U); __min2 = c->bm_words - c->word_offset; num_words = (unsigned int )(__min1 < __min2 ? __min1 : __min2); want = num_words * 8U; if (want != size) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s:want (%u) != size (%u)\n", "receive_bitmap_plain", want, size); return (-5); } else { } if (want == 0U) { return (0); } else { } err = drbd_recv_all(mdev->tconn, (void *)p, (size_t )want); if (err != 0) { return (err); } else { } drbd_bm_merge_lel(mdev, c->word_offset, (size_t )num_words, p); c->word_offset = c->word_offset + (unsigned long )num_words; c->bit_offset = c->word_offset * 64UL; if (c->bit_offset > c->bm_bits) { c->bit_offset = c->bm_bits; } else { } return (1); } } static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p ) { { return ((enum drbd_bitmap_code )((int )p->encoding & 15)); } } static int dcbp_get_start(struct p_compressed_bm *p ) { { return ((int )((signed char )p->encoding) < 0); } } static int dcbp_get_pad_bits(struct p_compressed_bm *p ) { { return (((int )p->encoding >> 4) & 7); } } static int recv_bm_rle_bits(struct drbd_conf *mdev , struct p_compressed_bm *p , struct bm_xfer_ctx *c , unsigned int len ) { struct bitstream bs ; u64 look_ahead ; u64 rl ; u64 tmp ; unsigned long s ; unsigned long e ; int toggle ; int tmp___0 ; int have ; int bits ; int tmp___1 ; { s = c->bit_offset; tmp___0 = dcbp_get_start(p); toggle = tmp___0; tmp___1 = dcbp_get_pad_bits(p); bitstream_init(& bs, (void *)(& p->code), (size_t )len, (unsigned int )tmp___1); bits = bitstream_get_bits(& bs, & look_ahead, 64); if (bits < 0) { return (-5); } else { } have = bits; goto ldv_53888; ldv_53887: bits = vli_decode_bits(& rl, look_ahead); if (bits <= 0) { return (-5); } else { } if (toggle != 0) { e = (unsigned long )(((unsigned long long )s + rl) - 1ULL); if (c->bm_bits <= e) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e); return (-5); } else { } _drbd_bm_set_bits(mdev, s, e); } else { } if (have < bits) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n", have, bits, look_ahead, (unsigned int )((long )bs.cur.b) - (unsigned int )((long )(& p->code)), (unsigned int )bs.buf_len); return (-5); } else { } look_ahead = look_ahead >> bits; have = have - bits; bits = bitstream_get_bits(& bs, & tmp, 64 - have); if (bits < 0) { return (-5); } else { } look_ahead = (tmp << have) | look_ahead; have = have + bits; s = (unsigned long )((unsigned long long )s + rl); toggle = toggle == 0; ldv_53888: ; if (have > 0) { goto ldv_53887; } else { } c->bit_offset = s; bm_xfer_ctx_bit_to_word_offset(c); return (c->bm_bits != s); } } static int decode_bitmap_c(struct drbd_conf *mdev , struct p_compressed_bm *p , struct bm_xfer_ctx *c , unsigned int len ) { int tmp ; enum drbd_bitmap_code tmp___0 ; union drbd_state val ; union drbd_state mask ; { tmp___0 = dcbp_get_code(p); if ((unsigned int )tmp___0 == 2U) { tmp = recv_bm_rle_bits(mdev, p, c, len - 1U); return (tmp); } else { } dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "receive_bitmap_c: unknown encoding %u\n", (int )p->encoding); val.i = 0U; val.ldv_40024.conn = 6U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(mdev->tconn, mask, val, CS_HARD); return (-5); } } void INFO_bm_xfer_stats(struct drbd_conf *mdev , char const *direction , struct bm_xfer_ctx *c ) { unsigned int header_size ; unsigned int tmp ; unsigned int data_size ; unsigned int plain ; unsigned int total ; unsigned int r ; { tmp = drbd_header_size(mdev->tconn); header_size = tmp; data_size = 4096U - header_size; plain = (unsigned int )(((c->bm_words + (unsigned long )data_size) - 1UL) / (unsigned long )data_size + 1UL) * header_size + (unsigned int )c->bm_words * 8U; total = c->bytes[0] + c->bytes[1]; if (total == 0U) { return; } else { } if (total >= plain) { return; } else { } r = total > 4294967U ? total / (plain / 1000U) : (total * 1000U) / plain; if (r > 1000U) { r = 1000U; } else { } r = 1000U - r; _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), total %u; compression: %u.%u%%\n", direction, c->bytes[1], c->packets[1], c->bytes[0], c->packets[0], total, r / 10U, r % 10U); return; } } static int receive_bitmap(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct bm_xfer_ctx c ; int err ; struct bm_xfer_ctx __constr_expr_0 ; unsigned long tmp ; size_t tmp___0 ; struct p_compressed_bm *p ; unsigned int tmp___1 ; unsigned int tmp___2 ; enum drbd_state_rv rv ; union drbd_state val ; union drbd_state mask ; char const *tmp___3 ; { mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } drbd_bm_lock(mdev, (char *)"receive bitmap", BM_LOCKED_SET_ALLOWED); tmp = drbd_bm_bits(mdev); tmp___0 = drbd_bm_words(mdev); __constr_expr_0.bm_bits = tmp; __constr_expr_0.bm_words = tmp___0; __constr_expr_0.bit_offset = 0UL; __constr_expr_0.word_offset = 0UL; __constr_expr_0.packets[0] = 0U; __constr_expr_0.packets[1] = 0U; __constr_expr_0.bytes[0] = 0U; __constr_expr_0.bytes[1] = 0U; c = __constr_expr_0; ldv_53921: ; if ((unsigned int )pi->cmd == 4U) { err = receive_bitmap_plain(mdev, pi->size, (unsigned long *)pi->data, & c); } else if ((unsigned int )pi->cmd == 36U) { p = (struct p_compressed_bm *)pi->data; tmp___1 = drbd_header_size(tconn); if (pi->size > 4096U - tmp___1) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ReportCBitmap packet too large\n"); err = -5; goto out; } else { } if (pi->size <= 1U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ReportCBitmap packet too small (l:%u)\n", pi->size); err = -5; goto out; } else { } err = drbd_recv_all(mdev->tconn, (void *)p, (size_t )pi->size); if (err != 0) { goto out; } else { } err = decode_bitmap_c(mdev, p, & c, pi->size); } else { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", (unsigned int )pi->cmd); err = -5; goto out; } c.packets[(unsigned int )pi->cmd == 4U] = c.packets[(unsigned int )pi->cmd == 4U] + 1U; tmp___2 = drbd_header_size(tconn); c.bytes[(unsigned int )pi->cmd == 4U] = c.bytes[(unsigned int )pi->cmd == 4U] + (tmp___2 + pi->size); if (err <= 0) { if (err < 0) { goto out; } else { } goto ldv_53920; } else { } err = drbd_recv_header(mdev->tconn, pi); if (err != 0) { goto out; } else { } goto ldv_53921; ldv_53920: INFO_bm_xfer_stats(mdev, "receive", & c); if ((unsigned int )*((unsigned short *)mdev + 374UL) == 224U) { err = drbd_send_bitmap(mdev); if (err != 0) { goto out; } else { } val.i = 0U; val.ldv_40024.conn = 15U; mask.i = 0U; mask.ldv_40024.conn = 31U; rv = _drbd_request_state(mdev, mask, val, CS_VERBOSE); if ((int )rv != 1) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( rv == SS_SUCCESS ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4366); } else { } } else if ((unsigned int )*((unsigned short *)mdev + 374UL) != 208U) { tmp___3 = drbd_conn_str((enum drbd_conns )mdev->state.ldv_49522.conn); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "unexpected cstate (%s) in receive_bitmap\n", tmp___3); } else { } err = 0; out: drbd_bm_unlock(mdev); if (err == 0 && (unsigned int )*((unsigned short *)mdev + 374UL) == 208U) { drbd_start_resync(mdev, C_SYNC_SOURCE); } else { } return (err); } } static int receive_skip(struct drbd_tconn *tconn , struct packet_info *pi ) { int tmp ; { printk("\fd-con %s: skipping unknown optional packet type %d, l: %d!\n", tconn->name, (unsigned int )pi->cmd, pi->size); tmp = ignore_remaining_packet(tconn, pi); return (tmp); } } static int receive_UnplugRemote(struct drbd_tconn *tconn , struct packet_info *pi ) { { drbd_tcp_quickack(tconn->data.socket); return (0); } } static int receive_out_of_sync(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_block_desc *p ; char const *tmp ; __u32 tmp___0 ; __u64 tmp___1 ; { p = (struct p_block_desc *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } switch ((int )mdev->state.ldv_49522.conn) { case 15: ; case 14: ; case 23: ; goto ldv_53944; default: tmp = drbd_conn_str((enum drbd_conns )mdev->state.ldv_49522.conn); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n", tmp); } ldv_53944: tmp___0 = __fswab32(p->blksize); tmp___1 = __fswab64(p->sector); __drbd_set_out_of_sync(mdev, (sector_t )tmp___1, (int )tmp___0, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4418U); return (0); } } static struct data_cmd drbd_cmd_handler[46U] = { {1, 24UL, & receive_Data}, {1, 24UL, & receive_DataReply}, {1, 24UL, & receive_RSDataReply}, {0, 8UL, & receive_Barrier}, {1, 0UL, & receive_bitmap}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, & receive_UnplugRemote}, {0, 24UL, & receive_DataRequest}, {0, 24UL, & receive_DataRequest}, {1, 0UL, & receive_SyncParam}, {1, 24UL, & receive_protocol}, {0, 48UL, & receive_uuids}, {0, 32UL, & receive_sizes}, {0, 4UL, & receive_state}, {0, 8UL, & receive_sync_uuid}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 8UL, & receive_req_state}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 24UL, & receive_DataRequest}, {1, 24UL, & receive_DataRequest}, {0, 0UL, 0}, {1, 24UL, & receive_DataRequest}, {0, 0UL, 0}, {1, 0UL, & receive_SyncParam}, {1, 0UL, & receive_bitmap}, {0, 0UL, 0}, {0, 0UL, 0}, {0, 8UL, & receive_skip}, {0, 16UL, & receive_out_of_sync}, {0, 0UL, 0}, {0, 8UL, & receive_req_conn_state}, {0, 0UL, 0}, {0, 0UL, 0}, {1, 24UL, & receive_protocol}}; static void drbdd(struct drbd_tconn *tconn ) { struct packet_info pi ; size_t shs ; int err ; struct data_cmd *cmd ; int tmp ; char const *tmp___0 ; long tmp___1 ; long tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; enum drbd_thread_state tmp___5 ; union drbd_state val ; union drbd_state mask ; { goto ldv_53964; ldv_53963: drbd_thread_current_set_cpu(& tconn->receiver); tmp = drbd_recv_header(tconn, & pi); if (tmp != 0) { goto err_out; } else { } cmd = (struct data_cmd *)(& drbd_cmd_handler) + (unsigned long )pi.cmd; tmp___1 = ldv__builtin_expect((unsigned int )pi.cmd > (unsigned int )P_PROTOCOL_UPDATE, 0L); if (tmp___1 != 0L) { tmp___0 = cmdname(pi.cmd); printk("\vd-con %s: Unexpected data packet %s (0x%04x)", tconn->name, tmp___0, (unsigned int )pi.cmd); goto err_out; } else { tmp___2 = ldv__builtin_expect((unsigned long )cmd->fn == (unsigned long )((int (*)(struct drbd_tconn * , struct packet_info * ))0), 0L); if (tmp___2 != 0L) { tmp___0 = cmdname(pi.cmd); printk("\vd-con %s: Unexpected data packet %s (0x%04x)", tconn->name, tmp___0, (unsigned int )pi.cmd); goto err_out; } else { } } shs = cmd->pkt_size; if ((size_t )pi.size > shs && cmd->expect_payload == 0) { tmp___3 = cmdname(pi.cmd); printk("\vd-con %s: No payload expected %s l:%d\n", tconn->name, tmp___3, pi.size); goto err_out; } else { } if (shs != 0UL) { err = drbd_recv_all_warn(tconn, pi.data, shs); if (err != 0) { goto err_out; } else { } pi.size = pi.size - (unsigned int )shs; } else { } err = (*(cmd->fn))(tconn, & pi); if (err != 0) { tmp___4 = cmdname(pi.cmd); printk("\vd-con %s: error receiving %s, e: %d l: %d!\n", tconn->name, tmp___4, err, pi.size); goto err_out; } else { } ldv_53964: tmp___5 = get_t_state(& tconn->receiver); if ((unsigned int )tmp___5 == 1U) { goto ldv_53963; } else { } return; err_out: val.i = 0U; val.ldv_40024.conn = 6U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); return; } } void conn_flush_workqueue(struct drbd_tconn *tconn ) { struct drbd_wq_barrier barr ; { barr.w.cb = & w_prev_work_done; barr.w.ldv_49807.tconn = tconn; init_completion(& barr.done); drbd_queue_work(& tconn->sender_work, & barr.w); wait_for_completion(& barr.done); return; } } static void conn_disconnect(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; enum drbd_conns oc ; int vnr ; union drbd_state val ; union drbd_state mask ; void *tmp ; void *tmp___0 ; int tmp___1 ; enum drbd_role tmp___2 ; enum drbd_disk_state tmp___3 ; union drbd_state val___0 ; union drbd_state mask___0 ; union drbd_state val___1 ; union drbd_state mask___1 ; { if ((unsigned int )tconn->cstate == 0U) { return; } else { } val.i = 0U; val.ldv_40024.conn = 5U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); drbd_thread_stop(& tconn->asender); drbd_free_sock(tconn); rcu_read_lock___2(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_53985; ldv_53984: kref_get(& mdev->kref); rcu_read_unlock___2(); drbd_disconnected(mdev); kref_put(& mdev->kref, & drbd_minor_destroy); rcu_read_lock___2(); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_53985: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53984; } else { } rcu_read_unlock___2(); tmp___1 = list_empty((struct list_head const *)(& (tconn->current_epoch)->list)); if (tmp___1 == 0) { printk("\vd-con %s: ASSERTION FAILED: tconn->current_epoch->list not empty\n", tconn->name); } else { } atomic_set(& (tconn->current_epoch)->epoch_size, 0); tconn->send.seen_any_write_yet = 0; printk("\016d-con %s: Connection closed\n", tconn->name); tmp___2 = conn_highest_role(tconn); if ((unsigned int )tmp___2 == 1U) { tmp___3 = conn_highest_pdsk(tconn); if ((unsigned int )tmp___3 > 5U) { conn_try_outdate_peer_async(tconn); } else { } } else { } spin_lock_irq(& tconn->req_lock); oc = tconn->cstate; if ((unsigned int )oc > 1U) { val___0.i = 0U; val___0.ldv_40024.conn = 2U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; _conn_request_state(tconn, mask___0, val___0, CS_VERBOSE); } else { } spin_unlock_irq(& tconn->req_lock); if ((unsigned int )oc == 1U) { val___1.i = 0U; val___1.ldv_40024.conn = 0U; mask___1.i = 0U; mask___1.ldv_40024.conn = 31U; conn_request_state(tconn, mask___1, val___1, 3); } else { } return; } } static int drbd_disconnected(struct drbd_conf *mdev ) { unsigned int i ; int tmp ; int tmp___0 ; wait_queue_t __wait ; struct task_struct *tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; { spin_lock_irq(& (mdev->tconn)->req_lock); _drbd_wait_ee_list_empty(mdev, & mdev->active_ee); _drbd_wait_ee_list_empty(mdev, & mdev->sync_ee); _drbd_wait_ee_list_empty(mdev, & mdev->read_ee); spin_unlock_irq(& (mdev->tconn)->req_lock); drbd_rs_cancel_all(mdev); mdev->rs_total = 0UL; mdev->rs_failed = 0UL; atomic_set(& mdev->rs_pending_cnt, 0); __wake_up(& mdev->misc_wait, 3U, 1, 0); del_timer_sync(& mdev->resync_timer); resync_timer_fn((unsigned long )mdev); drbd_flush_workqueue(mdev); drbd_finish_peer_reqs(mdev); drbd_flush_workqueue(mdev); drbd_rs_cancel_all(mdev); kfree((void const *)mdev->p_uuid); mdev->p_uuid = 0; tmp = drbd_suspended(mdev); if (tmp == 0) { tl_clear(mdev->tconn); } else { } drbd_md_sync(mdev); tmp___0 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___0 == 0) { goto ldv_53999; } else { } tmp___1 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___1; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_54002: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___2 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___2 == 0) { goto ldv_54001; } else { } schedule(); goto ldv_54002; ldv_54001: finish_wait(& mdev->misc_wait, & __wait); ldv_53999: tmp___3 = drbd_free_peer_reqs(mdev, & mdev->net_ee); i = (unsigned int )tmp___3; if (i != 0U) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "net_ee not empty, killed %u entries\n", i); } else { } tmp___4 = atomic_read((atomic_t const *)(& mdev->pp_in_use_by_net)); i = (unsigned int )tmp___4; if (i != 0U) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "pp_in_use_by_net = %d, expected 0\n", i); } else { } tmp___5 = atomic_read((atomic_t const *)(& mdev->pp_in_use)); i = (unsigned int )tmp___5; if (i != 0U) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "pp_in_use = %d, expected 0\n", i); } else { } tmp___6 = list_empty((struct list_head const *)(& mdev->read_ee)); if (tmp___6 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->read_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4641); } else { } tmp___7 = list_empty((struct list_head const *)(& mdev->active_ee)); if (tmp___7 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->active_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4642); } else { } tmp___8 = list_empty((struct list_head const *)(& mdev->sync_ee)); if (tmp___8 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->sync_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4643); } else { } tmp___9 = list_empty((struct list_head const *)(& mdev->done_ee)); if (tmp___9 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->done_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4644); } else { } return (0); } } static int drbd_send_features(struct drbd_tconn *tconn ) { struct drbd_socket *sock ; struct p_connection_features *p ; void *tmp ; int tmp___0 ; { sock = & tconn->data; tmp = conn_prepare_command(tconn, sock); p = (struct p_connection_features *)tmp; if ((unsigned long )p == (unsigned long )((struct p_connection_features *)0)) { return (-5); } else { } memset((void *)p, 0, 72UL); p->protocol_min = 1442840576U; p->protocol_max = 1694498816U; tmp___0 = conn_send_command(tconn, sock, P_CONNECTION_FEATURES, 72U, 0, 0U); return (tmp___0); } } static int drbd_do_features(struct drbd_tconn *tconn ) { struct p_connection_features *p ; int expect ; struct packet_info pi ; int err ; char const *tmp ; __u32 tmp___0 ; __u32 tmp___1 ; int __min1 ; int __min2 ; { expect = 72; err = drbd_send_features(tconn); if (err != 0) { return (0); } else { } err = drbd_recv_header(tconn, & pi); if (err != 0) { return (0); } else { } if ((unsigned int )pi.cmd != 65534U) { tmp = cmdname(pi.cmd); printk("\vd-con %s: expected ConnectionFeatures packet, received: %s (0x%04x)\n", tconn->name, tmp, (unsigned int )pi.cmd); return (-1); } else { } if (pi.size != (unsigned int )expect) { printk("\vd-con %s: expected ConnectionFeatures length: %u, received: %u\n", tconn->name, expect, pi.size); return (-1); } else { } p = (struct p_connection_features *)pi.data; err = drbd_recv_all_warn(tconn, (void *)p, (size_t )expect); if (err != 0) { return (0); } else { } tmp___0 = __fswab32(p->protocol_min); p->protocol_min = tmp___0; tmp___1 = __fswab32(p->protocol_max); p->protocol_max = tmp___1; if (p->protocol_max == 0U) { p->protocol_max = p->protocol_min; } else { } if (p->protocol_min > 101U || p->protocol_max <= 85U) { goto incompat; } else { } __min1 = 101; __min2 = (int )p->protocol_max; tconn->agreed_pro_version = __min1 < __min2 ? __min1 : __min2; printk("\016d-con %s: Handshake successful: Agreed network protocol version %d\n", tconn->name, tconn->agreed_pro_version); return (1); incompat: printk("\vd-con %s: incompatible DRBD dialects: I support %d-%d, peer supports %d-%d\n", tconn->name, 86, 101, p->protocol_min, p->protocol_max); return (-1); } } static int drbd_do_auth(struct drbd_tconn *tconn ) { struct drbd_socket *sock ; char my_challenge[64U] ; struct scatterlist sg ; char *response ; char *right_response ; char *peers_ch ; unsigned int key_len ; char secret[64U] ; unsigned int resp_size ; struct hash_desc desc ; struct packet_info pi ; struct net_conf *nc ; int err ; int rv ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; size_t tmp___1 ; size_t __len ; void *__ret ; void *tmp___2 ; int tmp___3 ; char const *tmp___4 ; void *tmp___5 ; void *tmp___6 ; void *tmp___7 ; int tmp___8 ; char const *tmp___9 ; void *tmp___10 ; int tmp___11 ; { response = 0; right_response = 0; peers_ch = 0; rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4772, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; tmp___1 = strlen((char const *)(& nc->shared_secret)); key_len = (unsigned int )tmp___1; __len = (size_t )key_len; __ret = memcpy((void *)(& secret), (void const *)(& nc->shared_secret), __len); rcu_read_unlock___2(); desc.tfm = tconn->cram_hmac_tfm; desc.flags = 0U; rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 const *)(& secret), key_len); if (rv != 0) { printk("\vd-con %s: crypto_hash_setkey() failed with %d\n", tconn->name, rv); rv = -1; goto fail; } else { } get_random_bytes((void *)(& my_challenge), 64); sock = & tconn->data; tmp___2 = conn_prepare_command(tconn, sock); if ((unsigned long )tmp___2 == (unsigned long )((void *)0)) { rv = 0; goto fail; } else { } tmp___3 = conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0U, (void *)(& my_challenge), 64U); rv = tmp___3 == 0; if (rv == 0) { goto fail; } else { } err = drbd_recv_header(tconn, & pi); if (err != 0) { rv = 0; goto fail; } else { } if ((unsigned int )pi.cmd != 16U) { tmp___4 = cmdname(pi.cmd); printk("\vd-con %s: expected AuthChallenge packet, received: %s (0x%04x)\n", tconn->name, tmp___4, (unsigned int )pi.cmd); rv = 0; goto fail; } else { } if (pi.size > 128U) { printk("\vd-con %s: expected AuthChallenge payload too big.\n", tconn->name); rv = -1; goto fail; } else { } tmp___5 = kmalloc((size_t )pi.size, 16U); peers_ch = (char *)tmp___5; if ((unsigned long )peers_ch == (unsigned long )((char *)0)) { printk("\vd-con %s: kmalloc of peers_ch failed\n", tconn->name); rv = -1; goto fail; } else { } err = drbd_recv_all_warn(tconn, (void *)peers_ch, (size_t )pi.size); if (err != 0) { rv = 0; goto fail; } else { } resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm); tmp___6 = kmalloc((size_t )resp_size, 16U); response = (char *)tmp___6; if ((unsigned long )response == (unsigned long )((char *)0)) { printk("\vd-con %s: kmalloc of response failed\n", tconn->name); rv = -1; goto fail; } else { } sg_init_table(& sg, 1U); sg_set_buf(& sg, (void const *)peers_ch, pi.size); rv = crypto_hash_digest(& desc, & sg, sg.length, (u8 *)response); if (rv != 0) { printk("\vd-con %s: crypto_hash_digest() failed with %d\n", tconn->name, rv); rv = -1; goto fail; } else { } tmp___7 = conn_prepare_command(tconn, sock); if ((unsigned long )tmp___7 == (unsigned long )((void *)0)) { rv = 0; goto fail; } else { } tmp___8 = conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0U, (void *)response, resp_size); rv = tmp___8 == 0; if (rv == 0) { goto fail; } else { } err = drbd_recv_header(tconn, & pi); if (err != 0) { rv = 0; goto fail; } else { } if ((unsigned int )pi.cmd != 17U) { tmp___9 = cmdname(pi.cmd); printk("\vd-con %s: expected AuthResponse packet, received: %s (0x%04x)\n", tconn->name, tmp___9, (unsigned int )pi.cmd); rv = 0; goto fail; } else { } if (pi.size != resp_size) { printk("\vd-con %s: expected AuthResponse payload of wrong size\n", tconn->name); rv = 0; goto fail; } else { } err = drbd_recv_all_warn(tconn, (void *)response, (size_t )resp_size); if (err != 0) { rv = 0; goto fail; } else { } tmp___10 = kmalloc((size_t )resp_size, 16U); right_response = (char *)tmp___10; if ((unsigned long )right_response == (unsigned long )((char *)0)) { printk("\vd-con %s: kmalloc of right_response failed\n", tconn->name); rv = -1; goto fail; } else { } sg_set_buf(& sg, (void const *)(& my_challenge), 64U); rv = crypto_hash_digest(& desc, & sg, sg.length, (u8 *)right_response); if (rv != 0) { printk("\vd-con %s: crypto_hash_digest() failed with %d\n", tconn->name, rv); rv = -1; goto fail; } else { } tmp___11 = memcmp((void const *)response, (void const *)right_response, (size_t )resp_size); rv = tmp___11 == 0; if (rv != 0) { printk("\016d-con %s: Peer authenticated using %d bytes HMAC\n", tconn->name, resp_size); } else { rv = -1; } fail: kfree((void const *)peers_ch); kfree((void const *)response); kfree((void const *)right_response); return (rv); } } int drbdd_init(struct drbd_thread *thi ) { struct drbd_tconn *tconn ; int h ; union drbd_state val ; union drbd_state mask ; { tconn = thi->tconn; printk("\016d-con %s: receiver (re)started\n", tconn->name); ldv_54052: h = conn_connect(tconn); if (h == 0) { conn_disconnect(tconn); schedule_timeout_interruptible(250L); } else { } if (h == -1) { printk("\fd-con %s: Discarding network configuration.\n", tconn->name); val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); } else { } if (h == 0) { goto ldv_54052; } else { } if (h > 0) { drbdd(tconn); } else { } conn_disconnect(tconn); printk("\016d-con %s: receiver terminated\n", tconn->name); return (0); } } static int got_conn_RqSReply(struct drbd_tconn *tconn , struct packet_info *pi ) { struct p_req_state_reply *p ; int retcode ; __u32 tmp ; char const *tmp___0 ; { p = (struct p_req_state_reply *)pi->data; tmp = __fswab32(p->retcode); retcode = (int )tmp; if (retcode > 0) { set_bit(6U, (unsigned long volatile *)(& tconn->flags)); } else { set_bit(7U, (unsigned long volatile *)(& tconn->flags)); tmp___0 = drbd_set_st_err_str((enum drbd_state_rv )retcode); printk("\vd-con %s: Requested state change failed by peer: %s (%d)\n", tconn->name, tmp___0, retcode); } __wake_up(& tconn->ping_wait, 3U, 1, 0); return (0); } } static int got_RqSReply(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_req_state_reply *p ; int retcode ; __u32 tmp ; int tmp___0 ; int tmp___1 ; char const *tmp___2 ; { p = (struct p_req_state_reply *)pi->data; tmp = __fswab32(p->retcode); retcode = (int )tmp; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp___1 = constant_test_bit(5U, (unsigned long const volatile *)(& tconn->flags)); if (tmp___1 != 0) { if (tconn->agreed_pro_version > 99) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( tconn->agreed_pro_version < 100 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 4974); } else { } tmp___0 = got_conn_RqSReply(tconn, pi); return (tmp___0); } else { } if (retcode > 0) { set_bit(3U, (unsigned long volatile *)(& mdev->flags)); } else { set_bit(4U, (unsigned long volatile *)(& mdev->flags)); tmp___2 = drbd_set_st_err_str((enum drbd_state_rv )retcode); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Requested state change failed by peer: %s (%d)\n", tmp___2, retcode); } __wake_up(& mdev->state_wait, 3U, 1, 0); return (0); } } static int got_Ping(struct drbd_tconn *tconn , struct packet_info *pi ) { int tmp ; { tmp = drbd_send_ping_ack(tconn); return (tmp); } } static int got_PingAck(struct drbd_tconn *tconn , struct packet_info *pi ) { int tmp ; { ((tconn->meta.socket)->sk)->sk_rcvtimeo = (long )((tconn->net_conf)->ping_int * 250U); tmp = test_and_set_bit(4, (unsigned long volatile *)(& tconn->flags)); if (tmp == 0) { __wake_up(& tconn->ping_wait, 3U, 1, 0); } else { } return (0); } } static int got_IsInSync(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_block_ack *p ; sector_t sector ; __u64 tmp ; int blksize ; __u32 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { p = (struct p_block_ack *)pi->data; tmp = __fswab64(p->sector); sector = (sector_t )tmp; tmp___0 = __fswab32(p->blksize); blksize = (int )tmp___0; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } if ((mdev->tconn)->agreed_pro_version <= 88) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->tconn->agreed_pro_version >= 89 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 5017); } else { } tmp___1 = __fswab32(p->seq_num); update_peer_seq(mdev, tmp___1); tmp___2 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___2 != 0) { drbd_rs_complete_io(mdev, sector); __drbd_set_in_sync(mdev, sector, blksize, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 5023U); mdev->rs_same_csum = mdev->rs_same_csum + (unsigned long )(blksize >> 12); put_ldev(mdev); } else { } _dec_rs_pending(mdev, "got_IsInSync", 5028); atomic_add(blksize >> 9, & mdev->rs_sect_in); return (0); } } static int validate_req_change_req_state(struct drbd_conf *mdev , u64 id , sector_t sector , struct rb_root *root , char const *func , enum drbd_req_event what , bool missing_ok ) { struct drbd_request *req ; struct bio_and_error m ; long tmp ; { spin_lock_irq(& (mdev->tconn)->req_lock); req = find_request(mdev, root, id, sector, (int )missing_ok, func); tmp = ldv__builtin_expect((unsigned long )req == (unsigned long )((struct drbd_request *)0), 0L); if (tmp != 0L) { spin_unlock_irq(& (mdev->tconn)->req_lock); return (-5); } else { } __req_mod(req, what, & m); spin_unlock_irq(& (mdev->tconn)->req_lock); if ((unsigned long )m.bio != (unsigned long )((struct bio *)0)) { complete_master_bio(mdev, & m); } else { } return (0); } } static int got_BlockAck(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_block_ack *p ; sector_t sector ; __u64 tmp ; int blksize ; __u32 tmp___0 ; enum drbd_req_event what ; __u32 tmp___1 ; int tmp___2 ; { p = (struct p_block_ack *)pi->data; tmp = __fswab64(p->sector); sector = (sector_t )tmp; tmp___0 = __fswab32(p->blksize); blksize = (int )tmp___0; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp___1 = __fswab32(p->seq_num); update_peer_seq(mdev, tmp___1); if (p->block_id == 0xffffffffffffffffULL) { __drbd_set_in_sync(mdev, sector, blksize, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 5071U); _dec_rs_pending(mdev, "got_BlockAck", 5072); return (0); } else { } switch ((unsigned int )pi->cmd) { case 23U: what = WRITE_ACKED_BY_PEER_AND_SIS; goto ldv_54106; case 22U: what = WRITE_ACKED_BY_PEER; goto ldv_54106; case 21U: what = RECV_ACKED_BY_PEER; goto ldv_54106; case 24U: what = CONFLICT_RESOLVED; goto ldv_54106; case 44U: what = POSTPONE_WRITE; goto ldv_54106; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared"), "i" (5092), "i" (12UL)); ldv_54112: ; goto ldv_54112; } ldv_54106: tmp___2 = validate_req_change_req_state(mdev, p->block_id, sector, & mdev->write_requests, "got_BlockAck", what, 0); return (tmp___2); } } static int got_NegAck(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_block_ack *p ; sector_t sector ; __u64 tmp ; int size ; __u32 tmp___0 ; int err ; __u32 tmp___1 ; { p = (struct p_block_ack *)pi->data; tmp = __fswab64(p->sector); sector = (sector_t )tmp; tmp___0 = __fswab32(p->blksize); size = (int )tmp___0; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp___1 = __fswab32(p->seq_num); update_peer_seq(mdev, tmp___1); if (p->block_id == 0xffffffffffffffffULL) { _dec_rs_pending(mdev, "got_NegAck", 5115); drbd_rs_failed_io(mdev, sector, size); return (0); } else { } err = validate_req_change_req_state(mdev, p->block_id, sector, & mdev->write_requests, "got_NegAck", NEG_ACKED, 1); if (err != 0) { __drbd_set_out_of_sync(mdev, sector, size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 5129U); } else { } return (0); } } static int got_NegDReply(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_block_ack *p ; sector_t sector ; __u64 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { p = (struct p_block_ack *)pi->data; tmp = __fswab64(p->sector); sector = (sector_t )tmp; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp___0 = __fswab32(p->seq_num); update_peer_seq(mdev, tmp___0); tmp___1 = __fswab32(p->blksize); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Got NegDReply; Sector %llus, len %u.\n", (unsigned long long )sector, tmp___1); tmp___2 = validate_req_change_req_state(mdev, p->block_id, sector, & mdev->read_requests, "got_NegDReply", NEG_ACKED, 0); return (tmp___2); } } static int got_NegRSDReply(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; sector_t sector ; int size ; struct p_block_ack *p ; __u64 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { p = (struct p_block_ack *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp = __fswab64(p->sector); sector = (sector_t )tmp; tmp___0 = __fswab32(p->blksize); size = (int )tmp___0; tmp___1 = __fswab32(p->seq_num); update_peer_seq(mdev, tmp___1); _dec_rs_pending(mdev, "got_NegRSDReply", 5170); tmp___2 = _get_ldev_if_state(mdev, D_FAILED); if (tmp___2 != 0) { drbd_rs_complete_io(mdev, sector); switch ((unsigned int )pi->cmd) { case 27U: drbd_rs_failed_io(mdev, sector, size); case 41U: ; goto ldv_54142; default: __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared"), "i" (5180), "i" (12UL)); ldv_54144: ; goto ldv_54144; } ldv_54142: put_ldev(mdev); } else { } return (0); } } static int got_BarrierAck(struct drbd_tconn *tconn , struct packet_info *pi ) { struct p_barrier_ack *p ; struct drbd_conf *mdev ; int vnr ; __u32 tmp ; void *tmp___0 ; int tmp___1 ; int tmp___2 ; void *tmp___3 ; { p = (struct p_barrier_ack *)pi->data; tmp = __fswab32(p->set_size); tl_release(tconn, p->barrier, tmp); rcu_read_lock___2(); vnr = 0; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; goto ldv_54153; ldv_54152: ; if ((unsigned int )*((unsigned short *)mdev + 374UL) == 352U) { tmp___1 = atomic_read((atomic_t const *)(& mdev->ap_in_flight)); if (tmp___1 == 0) { tmp___2 = test_and_set_bit(19, (unsigned long volatile *)(& mdev->flags)); if (tmp___2 == 0) { mdev->start_resync_timer.expires = (unsigned long )jiffies + 250UL; add_timer(& mdev->start_resync_timer); } else { } } else { } } else { } vnr = vnr + 1; tmp___3 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___3; ldv_54153: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_54152; } else { } rcu_read_unlock___2(); return (0); } } static int got_OVResult(struct drbd_tconn *tconn , struct packet_info *pi ) { struct drbd_conf *mdev ; struct p_block_ack *p ; struct drbd_work *w ; sector_t sector ; int size ; __u64 tmp ; __u32 tmp___0 ; __u32 tmp___1 ; __u64 tmp___2 ; int tmp___3 ; void *tmp___4 ; { p = (struct p_block_ack *)pi->data; mdev = vnr_to_mdev(tconn, (int )pi->vnr); if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (-5); } else { } tmp = __fswab64(p->sector); sector = (sector_t )tmp; tmp___0 = __fswab32(p->blksize); size = (int )tmp___0; tmp___1 = __fswab32(p->seq_num); update_peer_seq(mdev, tmp___1); tmp___2 = __fswab64(p->block_id); if (tmp___2 == 4712ULL) { drbd_ov_out_of_sync_found(mdev, sector, size); } else { ov_out_of_sync_print(mdev); } tmp___3 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___3 == 0) { return (0); } else { } drbd_rs_complete_io(mdev, sector); _dec_rs_pending(mdev, "got_OVResult", 5236); mdev->ov_left = mdev->ov_left - 1UL; if ((mdev->ov_left & 512UL) != 0UL) { drbd_advance_rs_marks(mdev, mdev->ov_left); } else { } if (mdev->ov_left == 0UL) { tmp___4 = kmalloc(32UL, 16U); w = (struct drbd_work *)tmp___4; if ((unsigned long )w != (unsigned long )((struct drbd_work *)0)) { w->cb = & w_ov_finished; w->ldv_49807.mdev = mdev; drbd_queue_work(& (mdev->tconn)->sender_work, w); } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "kmalloc(w) failed."); ov_out_of_sync_print(mdev); drbd_resync_finished(mdev); } } else { } put_ldev(mdev); return (0); } } static int got_skip(struct drbd_tconn *tconn , struct packet_info *pi ) { { return (0); } } static int tconn_finish_peer_reqs(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; int vnr ; int not_empty ; struct task_struct *tmp ; void *tmp___0 ; int tmp___1 ; void *tmp___2 ; void *tmp___3 ; int tmp___4 ; void *tmp___5 ; { not_empty = 0; ldv_54181: clear_bit(3, (unsigned long volatile *)(& tconn->flags)); tmp = get_current(); flush_signals(tmp); rcu_read_lock___2(); vnr = 0; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; goto ldv_54176; ldv_54175: kref_get(& mdev->kref); rcu_read_unlock___2(); tmp___1 = drbd_finish_peer_reqs(mdev); if (tmp___1 != 0) { kref_put(& mdev->kref, & drbd_minor_destroy); return (1); } else { } kref_put(& mdev->kref, & drbd_minor_destroy); rcu_read_lock___2(); vnr = vnr + 1; tmp___2 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___2; ldv_54176: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_54175; } else { } set_bit(3U, (unsigned long volatile *)(& tconn->flags)); spin_lock_irq(& tconn->req_lock); vnr = 0; tmp___3 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___3; goto ldv_54180; ldv_54179: tmp___4 = list_empty((struct list_head const *)(& mdev->done_ee)); not_empty = tmp___4 == 0; if (not_empty != 0) { goto ldv_54178; } else { } vnr = vnr + 1; tmp___5 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___5; ldv_54180: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_54179; } else { } ldv_54178: spin_unlock_irq(& tconn->req_lock); rcu_read_unlock___2(); if (not_empty != 0) { goto ldv_54181; } else { } return (0); } } static struct asender_cmd asender_tbl[45U] = { {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, & got_Ping}, {0UL, & got_PingAck}, {24UL, & got_BlockAck}, {24UL, & got_BlockAck}, {24UL, & got_BlockAck}, {24UL, & got_BlockAck}, {24UL, & got_NegAck}, {24UL, & got_NegDReply}, {24UL, & got_NegRSDReply}, {8UL, & got_BarrierAck}, {4UL, & got_RqSReply}, {0UL, 0}, {0UL, 0}, {24UL, & got_OVResult}, {0UL, 0}, {24UL, & got_IsInSync}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {0UL, 0}, {8UL, & got_skip}, {0UL, 0}, {24UL, & got_NegRSDReply}, {0UL, 0}, {4UL, & got_conn_RqSReply}, {24UL, & got_BlockAck}}; int drbd_asender(struct drbd_thread *thi ) { struct drbd_tconn *tconn ; struct asender_cmd *cmd ; struct packet_info pi ; int rv ; void *buf ; int received ; unsigned int header_size ; unsigned int tmp ; int expect ; bool ping_timeout_active ; struct net_conf *nc ; int ping_timeo ; int tcp_cork ; int ping_int ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; struct net_conf *_________p1 ; bool __warned ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; struct task_struct *tmp___7 ; int tmp___8 ; struct task_struct *tmp___9 ; long t ; struct net_conf *_________p1___0 ; bool __warned___0 ; int tmp___10 ; int tmp___11 ; long __ret ; wait_queue_t __wait ; struct task_struct *tmp___12 ; int tmp___13 ; long tmp___14 ; int tmp___15 ; char const *tmp___16 ; bool err ; int tmp___17 ; enum drbd_thread_state tmp___18 ; union drbd_state val ; union drbd_state mask ; union drbd_state val___0 ; union drbd_state mask___0 ; { tconn = thi->tconn; cmd = 0; buf = tconn->meta.rbuf; received = 0; tmp = drbd_header_size(tconn); header_size = tmp; expect = (int )header_size; ping_timeout_active = 0; tmp___0 = get_current(); tmp___0->policy = 2U; tmp___1 = get_current(); tmp___1->rt_priority = 2U; goto ldv_54209; ldv_54230: drbd_thread_current_set_cpu(thi); rcu_read_lock___2(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 5346, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; ping_timeo = (int )nc->ping_timeo; tcp_cork = (int )nc->tcp_cork; ping_int = (int )nc->ping_int; rcu_read_unlock___2(); tmp___5 = test_and_clear_bit(2, (unsigned long volatile *)(& tconn->flags)); if (tmp___5 != 0) { tmp___4 = drbd_send_ping(tconn); if (tmp___4 != 0) { printk("\vd-con %s: drbd_send_ping has failed\n", tconn->name); goto reconnect; } else { } ((tconn->meta.socket)->sk)->sk_rcvtimeo = (long )((ping_timeo * 250) / 10); ping_timeout_active = 1; } else { } if (tcp_cork != 0) { drbd_tcp_cork___0(tconn->meta.socket); } else { } tmp___6 = tconn_finish_peer_reqs(tconn); if (tmp___6 != 0) { printk("\vd-con %s: tconn_finish_peer_reqs() failed\n", tconn->name); goto reconnect; } else { } if (tcp_cork != 0) { drbd_tcp_uncork___0(tconn->meta.socket); } else { } tmp___7 = get_current(); tmp___8 = signal_pending(tmp___7); if (tmp___8 != 0) { goto ldv_54209; } else { } rv = drbd_recv_short(tconn->meta.socket, buf, (size_t )(expect - received), 0); clear_bit(3, (unsigned long volatile *)(& tconn->flags)); tmp___9 = get_current(); flush_signals(tmp___9); tmp___14 = ldv__builtin_expect(rv > 0, 1L); if (tmp___14 != 0L) { received = received + rv; buf = buf + (unsigned long )rv; } else if (rv == 0) { tmp___13 = constant_test_bit(12U, (unsigned long const volatile *)(& tconn->flags)); if (tmp___13 != 0) { rcu_read_lock___2(); _________p1___0 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp___10 = debug_lockdep_rcu_enabled(); if (tmp___10 != 0 && ! __warned___0) { tmp___11 = rcu_read_lock_held(); if (tmp___11 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_receiver.c.prepared", 5399, "suspicious rcu_dereference_check() usage"); } else { } } else { } t = (long )((_________p1___0->ping_timeo * 250U) / 10U); rcu_read_unlock___2(); __ret = t; if ((unsigned int )tconn->cstate > 8U) { tmp___12 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___12; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_54217: prepare_to_wait(& tconn->ping_wait, & __wait, 2); if ((unsigned int )tconn->cstate <= 8U) { goto ldv_54216; } else { } __ret = schedule_timeout(__ret); if (__ret == 0L) { goto ldv_54216; } else { } goto ldv_54217; ldv_54216: finish_wait(& tconn->ping_wait, & __wait); } else { } t = __ret; if (t != 0L) { goto ldv_54219; } else { } } else { } printk("\vd-con %s: meta connection shut down by peer.\n", tconn->name); goto reconnect; } else if (rv == -11) { if ((1 != 0 && 1 != 0) && (long )((unsigned long )jiffies - (unsigned long )((tconn->meta.socket)->sk)->sk_rcvtimeo) - (long )tconn->last_received < 0L) { goto ldv_54209; } else { } if ((int )ping_timeout_active) { printk("\vd-con %s: PingAck did not arrive in time.\n", tconn->name); goto reconnect; } else { } set_bit(2U, (unsigned long volatile *)(& tconn->flags)); goto ldv_54209; } else if (rv == -4) { goto ldv_54209; } else { printk("\vd-con %s: sock_recvmsg returned %d\n", tconn->name, rv); goto reconnect; } if (received == expect && (unsigned long )cmd == (unsigned long )((struct asender_cmd *)0)) { tmp___15 = decode_header(tconn, tconn->meta.rbuf, & pi); if (tmp___15 != 0) { goto reconnect; } else { } cmd = (struct asender_cmd *)(& asender_tbl) + (unsigned long )pi.cmd; if ((unsigned int )pi.cmd > (unsigned int )P_RETRY_WRITE || (unsigned long )cmd->fn == (unsigned long )((int (*)(struct drbd_tconn * , struct packet_info * ))0)) { tmp___16 = cmdname(pi.cmd); printk("\vd-con %s: Unexpected meta packet %s (0x%04x)\n", tconn->name, tmp___16, (unsigned int )pi.cmd); goto disconnect; } else { } expect = (int )((unsigned int )cmd->pkt_size + header_size); if (pi.size != (unsigned int )expect - header_size) { printk("\vd-con %s: Wrong packet size on meta (c: %d, l: %d)\n", tconn->name, (unsigned int )pi.cmd, pi.size); goto reconnect; } else { } } else { } if (received == expect) { tmp___17 = (*(cmd->fn))(tconn, & pi); err = tmp___17 != 0; if ((int )err) { printk("\vd-con %s: %pf failed\n", tconn->name, cmd->fn); goto reconnect; } else { } tconn->last_received = jiffies; if ((unsigned long )cmd == (unsigned long )((struct asender_cmd *)(& asender_tbl) + 20UL)) { ((tconn->meta.socket)->sk)->sk_rcvtimeo = (long )(ping_int * 250); ping_timeout_active = 0; } else { } buf = tconn->meta.rbuf; received = 0; expect = (int )header_size; cmd = 0; } else { } ldv_54209: tmp___18 = get_t_state(thi); if ((unsigned int )tmp___18 == 1U) { goto ldv_54230; } else { } ldv_54219: ; if (0) { reconnect: val.i = 0U; val.ldv_40024.conn = 5U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); conn_md_sync(tconn); } else { } if (0) { disconnect: val___0.i = 0U; val___0.ldv_40024.conn = 1U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; conn_request_state(tconn, mask___0, val___0, CS_HARD); } else { } clear_bit(3, (unsigned long volatile *)(& tconn->flags)); printk("\016d-con %s: asender terminated\n", tconn->name); return (0); } } void ldv_main3_sequence_infinite_withcheck_stateful(void) { int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_54258; ldv_54257: tmp = nondet_int(); switch (tmp) { default: ; goto ldv_54256; } ldv_54256: ; ldv_54258: tmp___0 = nondet_int(); if (tmp___0 != 0) { goto ldv_54257; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_93(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_94(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_95(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_96(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_97(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_98(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_99(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_100(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_101(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_102(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_103(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_104(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_105(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_106(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_107(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_108(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_109(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_110(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_111(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_112(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_113(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_114(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_115(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_116(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_117(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_118(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static int ldv_mutex_is_locked_119(struct mutex *lock ) { ldv_func_ret_type___25 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_is_locked(lock); ldv_func_res = tmp; tmp___0 = ldv_mutex_is_locked_state_mutex_of_drbd_conf(lock); return (tmp___0); return (ldv_func_res); } } __inline static int ldv_mutex_is_locked_120(struct mutex *lock ) { ldv_func_ret_type___26 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_is_locked(lock); ldv_func_res = tmp; tmp___0 = ldv_mutex_is_locked_cstate_mutex_of_drbd_tconn(lock); return (tmp___0); return (ldv_func_res); } } __inline static int test_and_change_bit(int nr , unsigned long volatile *addr ) { int oldbit ; { __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; btc %2,%1\n\tsbb %0,%0": "=r" (oldbit), "+m" (*((long volatile *)addr)): "Ir" (nr): "memory"); return (oldbit); } } int ldv_mutex_trylock_152(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_150(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_153(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_155(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_157(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_159(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_149(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_151(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_154(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_156(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_158(struct mutex *ldv_func_arg1 ) ; extern int cpu_number ; extern void __bad_size_call_parameter(void) ; __inline static void kref_init(struct kref *kref ) { { atomic_set(& kref->refcount, 1); return; } } extern char const *bdevname(struct block_device * , char * ) ; __inline static struct gendisk *part_to_disk(struct hd_struct *part ) { struct device const *__mptr ; struct device const *__mptr___0 ; long tmp ; { tmp = ldv__builtin_expect((unsigned long )part != (unsigned long )((struct hd_struct *)0), 1L); if (tmp != 0L) { if (part->partno != 0) { __mptr = (struct device const *)part->__dev.parent; return ((struct gendisk *)__mptr + 0xffffffffffffff90UL); } else { __mptr___0 = (struct device const *)(& part->__dev); return ((struct gendisk *)__mptr___0 + 0xffffffffffffff90UL); } } else { } return (0); } } __inline static void part_inc_in_flight(struct hd_struct *part , int rw ) { struct gendisk *tmp ; { atomic_inc((atomic_t *)(& part->in_flight) + (unsigned long )rw); if (part->partno != 0) { tmp = part_to_disk(part); atomic_inc((atomic_t *)(& tmp->part0.in_flight) + (unsigned long )rw); } else { } return; } } __inline static void part_dec_in_flight(struct hd_struct *part , int rw ) { struct gendisk *tmp ; { atomic_dec((atomic_t *)(& part->in_flight) + (unsigned long )rw); if (part->partno != 0) { tmp = part_to_disk(part); atomic_dec((atomic_t *)(& tmp->part0.in_flight) + (unsigned long )rw); } else { } return; } } extern void part_round_stats(int , struct hd_struct * ) ; __inline static int bdi_read_congested(struct backing_dev_info *bdi ) { int tmp ; { tmp = bdi_congested(bdi, 8); return (tmp); } } mempool_t *drbd_request_mempool ; void __drbd_make_request(struct drbd_conf *mdev , struct bio *bio , unsigned long start_time ) ; void drbd_make_request(struct request_queue *q , struct bio *bio ) ; int drbd_merge_bvec(struct request_queue *q , struct bvec_merge_data *bvm , struct bio_vec *bvec ) ; __inline static void __drbd_chk_io_error____1(struct drbd_conf *mdev , enum drbd_force_detach_flags df , char const *where ) { enum drbd_io_error_p ep ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; union drbd_state __ns ; union drbd_state __ns___0 ; { rcu_read_lock___0(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1682, "suspicious rcu_dereference_check() usage"); } else { } } else { } ep = (enum drbd_io_error_p )_________p1->on_io_error; rcu_read_unlock___0(); switch ((unsigned int )ep) { case 0U: ; if ((unsigned int )df == 0U || (unsigned int )df == 1U) { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "__drbd_chk_io_error_"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s.\n", where); } else { } if ((int )mdev->state.ldv_49522.disk > 4) { __ns = drbd_read_state(mdev); __ns.ldv_40024.disk = 4U; _drbd_set_state(mdev, __ns, CS_HARD, 0); } else { } goto ldv_51571; } else { } case 2U: ; case 1U: set_bit(12U, (unsigned long volatile *)(& mdev->flags)); if ((unsigned int )df == 0U) { set_bit(13U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((unsigned int )df == 3U) { set_bit(14U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((int )mdev->state.ldv_49522.disk > 2) { __ns___0 = drbd_read_state(mdev); __ns___0.ldv_40024.disk = 2U; _drbd_set_state(mdev, __ns___0, CS_HARD, 0); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s. Detaching...\n", where); } else { } goto ldv_51571; } ldv_51571: ; return; } } __inline static void inc_ap_pending(struct drbd_conf *mdev ) { { atomic_inc(& mdev->ap_pending_cnt); return; } } __inline static void _dec_ap_pending(struct drbd_conf *mdev , char const *func , int line ) { int tmp ; int tmp___0 ; int tmp___1 ; { tmp = atomic_dec_and_test(& mdev->ap_pending_cnt); if (tmp != 0) { __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } tmp___1 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); if (tmp___1 < 0) { tmp___0 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "in %s:%d: ap_pending_cnt = %d < 0 !\n", func, line, tmp___0); } else { } return; } } __inline static int drbd_get_max_buffers(struct drbd_conf *mdev ) { struct net_conf *nc ; int mxb ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; { rcu_read_lock___0(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 2139, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; mxb = (unsigned long )nc != (unsigned long )((struct net_conf *)0) ? (int )nc->max_buffers : 1000000; rcu_read_unlock___0(); return (mxb); } } __inline static int drbd_state_is_stable(struct drbd_conf *mdev ) { union drbd_dev_state s ; { s = mdev->state; switch ((unsigned int )s.ldv_49522.conn) { case 0U: ; case 8U: ; case 10U: ; case 16U: ; case 17U: ; case 18U: ; case 19U: ; case 20U: ; case 21U: ; case 22U: ; case 23U: ; case 1U: ; case 2U: ; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: ; case 9U: ; case 11U: ; case 12U: ; goto ldv_51800; case 13U: ; if ((mdev->tconn)->agreed_pro_version <= 95) { return (0); } else { } goto ldv_51800; case 14U: ; case 15U: ; case 31U: ; return (0); } ldv_51800: ; switch ((unsigned int )s.ldv_49522.disk) { case 0U: ; case 4U: ; case 5U: ; case 7U: ; case 8U: ; case 2U: ; goto ldv_51811; case 1U: ; case 3U: ; case 6U: ; case 15U: ; return (0); } ldv_51811: ; return (1); } } __inline static bool may_inc_ap_bio(struct drbd_conf *mdev ) { int mxb ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { tmp = drbd_get_max_buffers(mdev); mxb = tmp; tmp___0 = drbd_suspended(mdev); if (tmp___0 != 0) { return (0); } else { } tmp___1 = constant_test_bit(8U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___1 != 0) { return (0); } else { } tmp___2 = drbd_state_is_stable(mdev); if (tmp___2 == 0) { return (0); } else { } tmp___3 = atomic_read((atomic_t const *)(& mdev->ap_bio_cnt)); if (tmp___3 > mxb) { return (0); } else { } tmp___4 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___4 != 0) { return (0); } else { } return (1); } } __inline static bool inc_ap_bio_cond(struct drbd_conf *mdev ) { bool rv ; { rv = 0; spin_lock_irq(& (mdev->tconn)->req_lock); rv = may_inc_ap_bio(mdev); if ((int )rv) { atomic_inc(& mdev->ap_bio_cnt); } else { } spin_unlock_irq(& (mdev->tconn)->req_lock); return (rv); } } __inline static void inc_ap_bio(struct drbd_conf *mdev ) { bool tmp ; wait_queue_t __wait ; struct task_struct *tmp___0 ; bool tmp___1 ; { tmp = inc_ap_bio_cond(mdev); if ((int )tmp) { goto ldv_51831; } else { } tmp___0 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___0; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_51834: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___1 = inc_ap_bio_cond(mdev); if ((int )tmp___1) { goto ldv_51833; } else { } schedule(); goto ldv_51834; ldv_51833: finish_wait(& mdev->misc_wait, & __wait); ldv_51831: ; return; } } __inline static void dec_ap_bio(struct drbd_conf *mdev ) { int mxb ; int tmp ; int ap_bio ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = drbd_get_max_buffers(mdev); mxb = tmp; tmp___0 = atomic_sub_return(1, & mdev->ap_bio_cnt); ap_bio = tmp___0; if (ap_bio < 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( ap_bio >= 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 2280); } else { } if (ap_bio == 0) { tmp___2 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___2 != 0) { tmp___1 = test_and_set_bit(10, (unsigned long volatile *)(& mdev->flags)); if (tmp___1 == 0) { drbd_queue_work(& (mdev->tconn)->sender_work, & mdev->bm_io_work.w); } else { } } else { } } else { } if (ap_bio < mxb) { __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } return; } } void drbd_req_destroy(struct kref *kref ) ; void request_timer_fn(unsigned long data ) ; void drbd_restart_request(struct drbd_request *req ) ; __inline static int _req_mod(struct drbd_request *req , enum drbd_req_event what ) { struct drbd_conf *mdev ; struct bio_and_error m ; int rv ; { mdev = req->w.ldv_49807.mdev; rv = __req_mod(req, what, & m); if ((unsigned long )m.bio != (unsigned long )((struct bio *)0)) { complete_master_bio(mdev, & m); } else { } return (rv); } } __inline static bool drbd_should_do_remote(union drbd_dev_state s ) { { return ((bool )(*((unsigned int *)(& s) + 0UL) == 65536U || (((int )s.ldv_49522.pdsk > 3 && (int )s.ldv_49522.conn > 13) && (int )s.ldv_49522.conn <= 21))); } } __inline static bool drbd_should_send_out_of_sync(union drbd_dev_state s ) { { return ((bool )((unsigned int )*((unsigned short *)(& s) + 0UL) == 352U || (unsigned int )*((unsigned short *)(& s) + 0UL) == 208U)); } } static bool drbd_may_do_local_read(struct drbd_conf *mdev , sector_t sector , int size ) ; static void _drbd_start_io_acct(struct drbd_conf *mdev , struct drbd_request *req , struct bio *bio ) { int rw ; int cpu ; struct thread_info *tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; void const *__vpp_verify___0 ; unsigned long __ptr ; void const *__vpp_verify___1 ; unsigned long __ptr___0 ; struct gendisk *tmp___0 ; void const *__vpp_verify___2 ; unsigned long __ptr___1 ; void const *__vpp_verify___3 ; unsigned long __ptr___2 ; struct gendisk *tmp___1 ; struct thread_info *tmp___2 ; { rw = (int const )bio->bi_rw & (int const )1; rcu_read_lock___0(); tmp = current_thread_info___0(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); __vpp_verify = 0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_52003; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_52003; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_52003; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_52003; default: __bad_percpu_size(); } ldv_52003: pscr_ret__ = pfo_ret__; goto ldv_52009; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_52013; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_52013; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_52013; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_52013; default: __bad_percpu_size(); } ldv_52013: pscr_ret__ = pfo_ret_____0; goto ldv_52009; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_52022; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_52022; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_52022; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_52022; default: __bad_percpu_size(); } ldv_52022: pscr_ret__ = pfo_ret_____1; goto ldv_52009; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_52031; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_52031; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_52031; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_52031; default: __bad_percpu_size(); } ldv_52031: pscr_ret__ = pfo_ret_____2; goto ldv_52009; default: __bad_size_call_parameter(); goto ldv_52009; } ldv_52009: cpu = pscr_ret__; part_round_stats(cpu, & (mdev->vdisk)->part0); __vpp_verify___0 = 0; __asm__ ("": "=r" (__ptr): "0" ((mdev->vdisk)->part0.dkstats)); ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr))->ios[rw] = ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr))->ios[rw] + 1UL; if ((mdev->vdisk)->part0.partno != 0) { __vpp_verify___1 = 0; tmp___0 = part_to_disk(& (mdev->vdisk)->part0); __asm__ ("": "=r" (__ptr___0): "0" (tmp___0->part0.dkstats)); ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr___0))->ios[rw] = ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr___0))->ios[rw] + 1UL; } else { } __vpp_verify___2 = 0; __asm__ ("": "=r" (__ptr___1): "0" ((mdev->vdisk)->part0.dkstats)); ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr___1))->sectors[rw] = ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr___1))->sectors[rw] + (unsigned long )(bio->bi_size >> 9); if ((mdev->vdisk)->part0.partno != 0) { __vpp_verify___3 = 0; tmp___1 = part_to_disk(& (mdev->vdisk)->part0); __asm__ ("": "=r" (__ptr___2): "0" (tmp___1->part0.dkstats)); ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr___2))->sectors[rw] = ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr___2))->sectors[rw] + (unsigned long )(bio->bi_size >> 9); } else { } part_inc_in_flight(& (mdev->vdisk)->part0, rw); __asm__ volatile ("": : : "memory"); tmp___2 = current_thread_info___0(); tmp___2->preempt_count = tmp___2->preempt_count + -1; __asm__ volatile ("": : : "memory"); rcu_read_unlock___0(); return; } } static void _drbd_end_io_acct(struct drbd_conf *mdev , struct drbd_request *req ) { int rw ; unsigned long duration ; int cpu ; struct thread_info *tmp ; int pscr_ret__ ; void const *__vpp_verify ; int pfo_ret__ ; int pfo_ret_____0 ; int pfo_ret_____1 ; int pfo_ret_____2 ; void const *__vpp_verify___0 ; unsigned long __ptr ; void const *__vpp_verify___1 ; unsigned long __ptr___0 ; struct gendisk *tmp___0 ; struct thread_info *tmp___1 ; { rw = (int )(req->master_bio)->bi_rw & 1; duration = (unsigned long )jiffies - req->start_time; rcu_read_lock___0(); tmp = current_thread_info___0(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); __vpp_verify = 0; switch (4UL) { case 1UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "m" (cpu_number)); goto ldv_52069; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_52069; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_52069; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "m" (cpu_number)); goto ldv_52069; default: __bad_percpu_size(); } ldv_52069: pscr_ret__ = pfo_ret__; goto ldv_52075; case 2UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret_____0): "m" (cpu_number)); goto ldv_52079; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_52079; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_52079; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret_____0): "m" (cpu_number)); goto ldv_52079; default: __bad_percpu_size(); } ldv_52079: pscr_ret__ = pfo_ret_____0; goto ldv_52075; case 4UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret_____1): "m" (cpu_number)); goto ldv_52088; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_52088; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_52088; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret_____1): "m" (cpu_number)); goto ldv_52088; default: __bad_percpu_size(); } ldv_52088: pscr_ret__ = pfo_ret_____1; goto ldv_52075; case 8UL: ; switch (4UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret_____2): "m" (cpu_number)); goto ldv_52097; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_52097; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_52097; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret_____2): "m" (cpu_number)); goto ldv_52097; default: __bad_percpu_size(); } ldv_52097: pscr_ret__ = pfo_ret_____2; goto ldv_52075; default: __bad_size_call_parameter(); goto ldv_52075; } ldv_52075: cpu = pscr_ret__; __vpp_verify___0 = 0; __asm__ ("": "=r" (__ptr): "0" ((mdev->vdisk)->part0.dkstats)); ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr))->ticks[rw] = ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr))->ticks[rw] + duration; if ((mdev->vdisk)->part0.partno != 0) { __vpp_verify___1 = 0; tmp___0 = part_to_disk(& (mdev->vdisk)->part0); __asm__ ("": "=r" (__ptr___0): "0" (tmp___0->part0.dkstats)); ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr___0))->ticks[rw] = ((struct disk_stats *)(__per_cpu_offset[cpu] + __ptr___0))->ticks[rw] + duration; } else { } part_round_stats(cpu, & (mdev->vdisk)->part0); part_dec_in_flight(& (mdev->vdisk)->part0, rw); __asm__ volatile ("": : : "memory"); tmp___1 = current_thread_info___0(); tmp___1->preempt_count = tmp___1->preempt_count + -1; __asm__ volatile ("": : : "memory"); rcu_read_unlock___0(); return; } } static struct drbd_request *drbd_req_new(struct drbd_conf *mdev , struct bio *bio_src ) { struct drbd_request *req ; void *tmp ; { tmp = mempool_alloc(drbd_request_mempool, 16U); req = (struct drbd_request *)tmp; if ((unsigned long )req == (unsigned long )((struct drbd_request *)0)) { return (0); } else { } drbd_req_make_private_bio(req, bio_src); req->rq_state = (int )bio_src->bi_rw & 1 ? 2048U : 0U; req->w.ldv_49807.mdev = mdev; req->master_bio = bio_src; req->epoch = 0U; drbd_clear_interval(& req->i); req->i.sector = bio_src->bi_sector; req->i.size = bio_src->bi_size; req->i.local = -1; req->i.waiting = 0; INIT_LIST_HEAD(& req->tl_requests); INIT_LIST_HEAD(& req->w.list); atomic_set(& req->completion_ref, 1); kref_init(& req->kref); return (req); } } void drbd_req_destroy(struct kref *kref ) { struct drbd_request *req ; struct kref const *__mptr ; struct drbd_conf *mdev ; unsigned int s ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { __mptr = (struct kref const *)kref; req = (struct drbd_request *)__mptr + 0xffffffffffffff74UL; mdev = req->w.ldv_49807.mdev; s = req->rq_state; if ((unsigned long )req->master_bio != (unsigned long )((struct bio *)0) && ((unsigned long )s & 8192UL) == 0UL) { tmp = atomic_read((atomic_t const *)(& req->completion_ref)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", s, tmp); return; } else { tmp___0 = atomic_read((atomic_t const *)(& req->completion_ref)); if (tmp___0 != 0) { tmp = atomic_read((atomic_t const *)(& req->completion_ref)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", s, tmp); return; } else if ((int )s & 1) { tmp = atomic_read((atomic_t const *)(& req->completion_ref)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", s, tmp); return; } else if (((unsigned long )s & 1008UL) != 0UL && ((unsigned long )s & 128UL) == 0UL) { tmp = atomic_read((atomic_t const *)(& req->completion_ref)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", s, tmp); return; } else { } } list_del_init(& req->tl_requests); if (((unsigned long )s & 2048UL) != 0UL) { if (((unsigned long )s & 9215UL) != 8192UL) { if (((unsigned long )s & 256UL) == 0UL || ((unsigned long )s & 4UL) == 0UL) { __drbd_set_out_of_sync(mdev, req->i.sector, (int )req->i.size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 222U); } else { } if ((((unsigned long )s & 256UL) != 0UL && ((unsigned long )s & 4UL) != 0UL) && ((unsigned long )s & 512UL) != 0UL) { __drbd_set_in_sync(mdev, req->i.sector, (int )req->i.size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 225U); } else { } } else { } if (((unsigned long )s & 4096UL) != 0UL) { tmp___2 = _get_ldev_if_state(mdev, D_FAILED); if (tmp___2 != 0) { drbd_al_complete_io(mdev, & req->i); put_ldev(mdev); } else { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "drbd_req_destroy"); if (tmp___1 != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Should have called drbd_al_complete_io(, %llu, %u), but my Disk seems to have failed :(\n", (unsigned long long )req->i.sector, req->i.size); } else { } } } else { } } else { } mempool_free((void *)req, drbd_request_mempool); return; } } static void wake_all_senders(struct drbd_tconn *tconn ) { { __wake_up(& tconn->sender_work.q_wait, 3U, 1, 0); return; } } static void start_new_tl_epoch(struct drbd_tconn *tconn ) { { if (tconn->current_tle_writes == 0U) { return; } else { } tconn->current_tle_writes = 0U; atomic_inc(& tconn->current_tle_nr); wake_all_senders(tconn); return; } } void complete_master_bio(struct drbd_conf *mdev , struct bio_and_error *m ) { { bio_endio(m->bio, m->error); dec_ap_bio(mdev); return; } } static void drbd_remove_request_interval(struct rb_root *root , struct drbd_request *req ) { struct drbd_conf *mdev ; struct drbd_interval *i ; { mdev = req->w.ldv_49807.mdev; i = & req->i; drbd_remove_interval(root, i); if ((unsigned int )*((unsigned char *)i + 48UL) != 0U) { __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } return; } } static void drbd_req_complete(struct drbd_request *req , struct bio_and_error *m ) { unsigned int s ; struct drbd_conf *mdev ; int rw ; int error ; int ok ; long tmp ; struct rb_root *root ; bool tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { s = req->rq_state; mdev = req->w.ldv_49807.mdev; if (((((int )s & 1 && ((unsigned long )s & 8UL) == 0UL) || ((unsigned long )s & 32UL) != 0UL) || ((unsigned long )s & 16UL) != 0UL) || ((unsigned long )s & 16384UL) != 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); return; } else { } if ((unsigned long )req->master_bio == (unsigned long )((struct bio *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); return; } else { } rw = (int )(req->master_bio)->bi_rw & 8193; ok = ((unsigned long )s & 4UL) != 0UL || ((unsigned long )s & 256UL) != 0UL; tmp = PTR_ERR((void const *)req->private_bio); error = (int )tmp; tmp___0 = drbd_interval_empty(& req->i); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { if (rw == 1) { root = & mdev->write_requests; } else { root = & mdev->read_requests; } drbd_remove_request_interval(root, req); } else if (((unsigned long )s & 8192UL) == 0UL) { if (((unsigned long )s & 880UL) != 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( (s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 354); } else { } } else { } if (rw == 1) { tmp___2 = atomic_read((atomic_t const *)(& (mdev->tconn)->current_tle_nr)); if (req->epoch == (unsigned int )tmp___2) { start_new_tl_epoch(mdev->tconn); } else { } } else { } _drbd_end_io_acct(mdev, req); if (ok == 0 && rw == 0) { tmp___3 = list_empty((struct list_head const *)(& req->tl_requests)); if (tmp___3 == 0) { req->rq_state = req->rq_state | 8192U; } else { } } else { } if (((unsigned long )req->rq_state & 8192UL) == 0UL) { m->error = ok == 0 ? (error != 0 ? error : -5) : 0; m->bio = req->master_bio; req->master_bio = 0; } else { } return; } } static int drbd_req_put_completion_ref(struct drbd_request *req , struct bio_and_error *m , int put ) { struct drbd_conf *mdev ; int tmp ; { mdev = req->w.ldv_49807.mdev; if ((unsigned long )m == (unsigned long )((struct bio_and_error *)0) && ((unsigned long )req->rq_state & 8192UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( m || (req->rq_state & RQ_POSTPONED) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 397); } else { } tmp = atomic_sub_and_test(put, & req->completion_ref); if (tmp == 0) { return (0); } else { } drbd_req_complete(req, m); if (((unsigned long )req->rq_state & 8192UL) != 0UL) { drbd_restart_request(req); return (0); } else { } return (1); } } static void mod_rq_state(struct drbd_request *req , struct bio_and_error *m , int clear , int set ) { struct drbd_conf *mdev ; unsigned int s ; int c_put ; int k_put ; int tmp ; int at_least ; int refcount ; int tmp___0 ; int tmp___1 ; { mdev = req->w.ldv_49807.mdev; s = req->rq_state; c_put = 0; k_put = 0; tmp = drbd_suspended(mdev); if (tmp != 0 && ((unsigned long )(s | (unsigned int )clear) & 16384UL) == 0UL) { set = set | 16384; } else { } req->rq_state = req->rq_state & (unsigned int )(~ clear); req->rq_state = req->rq_state | (unsigned int )set; if (req->rq_state == s) { return; } else { } if (((unsigned long )s & 1UL) == 0UL && set & 1) { atomic_inc(& req->completion_ref); } else { } if (((unsigned long )s & 16UL) == 0UL && ((unsigned long )set & 16UL) != 0UL) { inc_ap_pending(mdev); atomic_inc(& req->completion_ref); } else { } if (((unsigned long )s & 32UL) == 0UL && ((unsigned long )set & 32UL) != 0UL) { atomic_inc(& req->completion_ref); } else { } if (((unsigned long )s & 131072UL) == 0UL && ((unsigned long )set & 131072UL) != 0UL) { kref_get(& req->kref); } else { } if (((unsigned long )s & 64UL) == 0UL && ((unsigned long )set & 64UL) != 0UL) { atomic_add((int )(req->i.size >> 9), & mdev->ap_in_flight); } else { } if (((unsigned long )s & 16384UL) == 0UL && ((unsigned long )set & 16384UL) != 0UL) { atomic_inc(& req->completion_ref); } else { } if (((unsigned long )s & 16384UL) != 0UL && ((unsigned long )clear & 16384UL) != 0UL) { c_put = c_put + 1; } else { } if (((unsigned long )s & 8UL) == 0UL && ((unsigned long )set & 8UL) != 0UL) { if (((unsigned long )req->rq_state & 1UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_LOCAL_PENDING ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 464); } else { } kref_get(& req->kref); c_put = c_put + 1; } else { } if ((int )s & 1 && clear & 1) { if (((unsigned long )req->rq_state & 8UL) != 0UL) { k_put = k_put + 1; } else { c_put = c_put + 1; } } else { } if (((unsigned long )s & 16UL) != 0UL && ((unsigned long )clear & 16UL) != 0UL) { _dec_ap_pending(mdev, "mod_rq_state", 479); c_put = c_put + 1; } else { } if (((unsigned long )s & 32UL) != 0UL && ((unsigned long )clear & 32UL) != 0UL) { c_put = c_put + 1; } else { } if ((((unsigned long )s & 131072UL) != 0UL && ((unsigned long )s & 128UL) == 0UL) && ((unsigned long )set & 128UL) != 0UL) { if (((unsigned long )req->rq_state & 64UL) != 0UL) { atomic_sub((int )(req->i.size >> 9), & mdev->ap_in_flight); } else { } k_put = k_put + 1; } else { } if (k_put != 0 || c_put != 0) { at_least = (c_put != 0) + k_put; tmp___0 = atomic_read((atomic_t const *)(& req->kref.refcount)); refcount = tmp___0; if (refcount < at_least) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "mod_rq_state: Logic BUG: %x -> %x: refcount = %d, should be >= %d\n", s, req->rq_state, refcount, at_least); } else { } } else { } if ((unsigned int )*((unsigned char *)req + 88UL) != 0U) { __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } if (c_put != 0) { tmp___1 = drbd_req_put_completion_ref(req, m, c_put); k_put = tmp___1 + k_put; } else { } if (k_put != 0) { kref_sub(& req->kref, (unsigned int )k_put, & drbd_req_destroy); } else { } return; } } static void drbd_report_io_error(struct drbd_conf *mdev , struct drbd_request *req ) { char b[32U] ; int tmp ; char const *tmp___0 ; { tmp = ___ratelimit(& drbd_ratelimit_state, "drbd_report_io_error"); if (tmp == 0) { return; } else { } tmp___0 = bdevname((mdev->ldev)->backing_bdev, (char *)(& b)); dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "local %s IO error sector %llu+%u on %s\n", ((unsigned long )req->rq_state & 2048UL) != 0UL ? (char *)"WRITE" : (char *)"READ", (unsigned long long )req->i.sector, req->i.size >> 9, tmp___0); return; } } int __req_mod(struct drbd_request *req , enum drbd_req_event what , struct bio_and_error *m ) { struct drbd_conf *mdev ; struct net_conf *nc ; int p ; int rv ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; int tmp___4 ; struct net_conf *_________p1___0 ; bool __warned___0 ; int tmp___5 ; int tmp___6 ; { mdev = req->w.ldv_49807.mdev; rv = 0; if ((unsigned long )m != (unsigned long )((struct bio_and_error *)0)) { m->bio = 0; } else { } switch ((unsigned int )what) { default: dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "LOGIC BUG in %s:%u\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 553); goto ldv_52190; case 1U: ; if (((unsigned long )req->rq_state & 1008UL) != 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !(req->rq_state & RQ_NET_MASK) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 565); } else { } rcu_read_lock___0(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 567, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; p = (int )nc->wire_protocol; rcu_read_unlock___0(); req->rq_state = req->rq_state | (p != 3 ? (p == 2 ? 32768U : 0U) : 65536U); mod_rq_state(req, m, 0, 16); goto ldv_52190; case 2U: ; if (((unsigned long )req->rq_state & 15UL) != 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !(req->rq_state & RQ_LOCAL_MASK) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 578); } else { } mod_rq_state(req, m, 0, 1); goto ldv_52190; case 24U: ; if (((unsigned long )req->rq_state & 2048UL) != 0UL) { mdev->writ_cnt = mdev->writ_cnt + (req->i.size >> 9); } else { mdev->read_cnt = mdev->read_cnt + (req->i.size >> 9); } mod_rq_state(req, m, 1, 6); goto ldv_52190; case 23U: mod_rq_state(req, m, 0, 8); goto ldv_52190; case 22U: drbd_report_io_error(mdev, req); __drbd_chk_io_error____1(mdev, DRBD_WRITE_ERROR, "__req_mod"); mod_rq_state(req, m, 1, 2); goto ldv_52190; case 20U: __drbd_set_out_of_sync(mdev, req->i.sector, (int )req->i.size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 603U); drbd_report_io_error(mdev, req); __drbd_chk_io_error____1(mdev, DRBD_READ_ERROR, "__req_mod"); case 21U: mod_rq_state(req, m, 1, 2); goto ldv_52190; case 4U: tmp___1 = drbd_interval_empty(& req->i); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( drbd_interval_empty(&req->i) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 623); } else { } drbd_insert_interval(& mdev->read_requests, & req->i); set_bit(0U, (unsigned long volatile *)(& mdev->flags)); if (((unsigned long )req->rq_state & 16UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_NET_PENDING ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 628); } else { } if (((unsigned long )req->rq_state & 15UL) != 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( (req->rq_state & RQ_LOCAL_MASK) == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 629); } else { } mod_rq_state(req, m, 0, 32); req->w.cb = & w_send_read_req; drbd_queue_work(& (mdev->tconn)->sender_work, & req->w); goto ldv_52190; case 3U: tmp___3 = drbd_interval_empty(& req->i); if (tmp___3) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( drbd_interval_empty(&req->i) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 641); } else { } drbd_insert_interval(& mdev->write_requests, & req->i); set_bit(0U, (unsigned long volatile *)(& mdev->flags)); if (((unsigned long )req->rq_state & 16UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_NET_PENDING ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 664); } else { } mod_rq_state(req, m, 0, 131104); req->w.cb = & w_send_dblock; drbd_queue_work(& (mdev->tconn)->sender_work, & req->w); rcu_read_lock___0(); _________p1___0 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___5 = debug_lockdep_rcu_enabled(); if (tmp___5 != 0 && ! __warned___0) { tmp___6 = rcu_read_lock_held(); if (tmp___6 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 671, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1___0; p = (int )nc->max_epoch_size; rcu_read_unlock___0(); if ((mdev->tconn)->current_tle_writes >= (unsigned int )p) { start_new_tl_epoch(mdev->tconn); } else { } goto ldv_52190; case 5U: mod_rq_state(req, m, 0, 32); req->w.cb = & w_send_out_of_sync; drbd_queue_work(& (mdev->tconn)->sender_work, & req->w); goto ldv_52190; case 11U: ; case 6U: ; case 7U: mod_rq_state(req, m, 32, 0); goto ldv_52190; case 8U: ; if ((int )(req->master_bio)->bi_rw & 1 && ((unsigned long )req->rq_state & 98304UL) == 0UL) { if (((unsigned long )req->rq_state & 16UL) != 0UL) { mod_rq_state(req, m, 16, 256); } else { } } else { } mod_rq_state(req, m, 32, 64); goto ldv_52190; case 9U: mod_rq_state(req, m, 32, 128); goto ldv_52190; case 10U: mod_rq_state(req, m, 16656, 128); goto ldv_52190; case 15U: ; if (((unsigned long )req->rq_state & 16UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_NET_PENDING ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 730); } else { } if (((unsigned long )req->rq_state & 65536UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_EXP_WRITE_ACK ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 731); } else { } mod_rq_state(req, m, 16, 384); goto ldv_52190; case 14U: req->rq_state = req->rq_state | 512U; case 13U: ; if (((unsigned long )req->rq_state & 65536UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_EXP_WRITE_ACK ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 738); } else { } goto ack_common; case 12U: ; if (((unsigned long )req->rq_state & 32768UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_EXP_RECEIVE_ACK ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 746); } else { } ack_common: ; if (((unsigned long )req->rq_state & 16UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_NET_PENDING ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 751); } else { } mod_rq_state(req, m, 16, 256); goto ldv_52190; case 16U: ; if (((unsigned long )req->rq_state & 65536UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_EXP_WRITE_ACK ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 756); } else { } if (((unsigned long )req->rq_state & 16UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_NET_PENDING ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 761); } else { } req->rq_state = req->rq_state | 8192U; if ((unsigned int )*((unsigned char *)req + 88UL) != 0U) { __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } goto ldv_52190; case 17U: mod_rq_state(req, m, 272, 0); goto ldv_52190; case 26U: ; if (((unsigned long )req->rq_state & 2UL) == 0UL) { goto ldv_52190; } else { } mod_rq_state(req, m, 16384, 0); goto ldv_52190; case 27U: ; if (((unsigned long )req->rq_state & 2UL) == 0UL) { goto ldv_52190; } else { } mod_rq_state(req, m, 16386, 1); rv = 2; if ((int )(req->master_bio)->bi_rw & 1) { rv = 1; } else { } _get_ldev_if_state(mdev, D_INCONSISTENT); req->w.cb = & w_restart_disk_io; drbd_queue_work(& (mdev->tconn)->sender_work, & req->w); goto ldv_52190; case 25U: ; if (((unsigned long )req->rq_state & 2048UL) == 0UL && (unsigned long )req->w.cb == (unsigned long )((int (*)(struct drbd_work * , int ))0)) { mod_rq_state(req, m, 16384, 0); goto ldv_52190; } else { } if (((unsigned long )req->rq_state & 256UL) == 0UL) { mod_rq_state(req, m, 16384, 48); if ((unsigned long )req->w.cb != (unsigned long )((int (*)(struct drbd_work * , int ))0)) { drbd_queue_work(& (mdev->tconn)->sender_work, & req->w); rv = ((unsigned long )req->rq_state & 2048UL) != 0UL ? 1 : 2; } else { } goto ldv_52190; } else { } case 18U: ; if (((unsigned long )req->rq_state & 2048UL) == 0UL) { goto ldv_52190; } else { } if (((unsigned long )req->rq_state & 16UL) != 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "FIXME (BARRIER_ACKED but pending)\n"); } else { } mod_rq_state(req, m, 16384, ((unsigned long )req->rq_state & 1008UL) != 0UL ? 128 : 0); goto ldv_52190; case 19U: ; if (((unsigned long )req->rq_state & 16UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->rq_state & RQ_NET_PENDING ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 842); } else { } mod_rq_state(req, m, 16, 384); goto ldv_52190; } ldv_52190: ; return (rv); } } static bool drbd_may_do_local_read(struct drbd_conf *mdev , sector_t sector , int size ) { unsigned long sbnr ; unsigned long ebnr ; sector_t esector ; sector_t nr_sectors ; int tmp ; { if ((unsigned int )*((unsigned char *)mdev + 749UL) == 16U) { return (1); } else { } if ((unsigned int )*((unsigned char *)mdev + 749UL) != 8U) { return (0); } else { } esector = ((sector_t )(size >> 9) + sector) - 1UL; nr_sectors = drbd_get_capacity(mdev->this_bdev); if (sector >= nr_sectors) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( sector < nr_sectors ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 868); } else { } if (esector >= nr_sectors) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( esector < nr_sectors ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 869); } else { } sbnr = sector >> 3; ebnr = esector >> 3; tmp = drbd_bm_count_bits(mdev, sbnr, ebnr); return (tmp == 0); } } static bool remote_due_to_read_balancing(struct drbd_conf *mdev , sector_t sector , enum drbd_read_balancing rbm ) { struct backing_dev_info *bdi ; int stripe_shift ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { switch ((unsigned int )rbm) { case 4U: bdi = & ((((mdev->ldev)->backing_bdev)->bd_disk)->queue)->backing_dev_info; tmp = bdi_read_congested(bdi); return (tmp != 0); case 3U: tmp___0 = atomic_read((atomic_t const *)(& mdev->local_cnt)); tmp___1 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); tmp___2 = atomic_read((atomic_t const *)(& mdev->rs_pending_cnt)); return (tmp___0 > tmp___1 + tmp___2); case 5U: ; case 6U: ; case 7U: ; case 8U: ; case 9U: ; case 10U: stripe_shift = (int )((unsigned int )rbm + 10U); return (((sector >> (stripe_shift + -9)) & 1UL) != 0UL); case 2U: tmp___3 = test_and_change_bit(22, (unsigned long volatile *)(& mdev->flags)); return (tmp___3 != 0); case 1U: ; return (1); case 0U: ; default: ; return (0); } } } static void complete_conflicting_writes(struct drbd_request *req ) { wait_queue_t wait ; struct task_struct *tmp ; struct drbd_conf *mdev ; struct drbd_interval *i ; sector_t sector ; int size ; { tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; mdev = req->w.ldv_49807.mdev; sector = req->i.sector; size = (int )req->i.size; i = drbd_find_overlap(& mdev->write_requests, sector, (unsigned int )size); if ((unsigned long )i == (unsigned long )((struct drbd_interval *)0)) { return; } else { } ldv_52263: prepare_to_wait(& mdev->misc_wait, & wait, 2); i = drbd_find_overlap(& mdev->write_requests, sector, (unsigned int )size); if ((unsigned long )i == (unsigned long )((struct drbd_interval *)0)) { goto ldv_52262; } else { } i->waiting = -1; spin_unlock_irq(& (mdev->tconn)->req_lock); schedule(); spin_lock_irq(& (mdev->tconn)->req_lock); goto ldv_52263; ldv_52262: finish_wait(& mdev->misc_wait, & wait); return; } } static void maybe_pull_ahead(struct drbd_conf *mdev ) { struct drbd_tconn *tconn ; struct net_conf *nc ; bool congested ; enum drbd_on_congestion on_congestion ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; union drbd_state __ns ; union drbd_state __ns___0 ; { tconn = mdev->tconn; congested = 0; _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 951, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; on_congestion = (unsigned long )nc != (unsigned long )((struct net_conf *)0) ? (enum drbd_on_congestion )nc->on_congestion : OC_BLOCK; if ((unsigned int )on_congestion == 0U || tconn->agreed_pro_version <= 95) { return; } else { } tmp___1 = _get_ldev_if_state(mdev, D_UP_TO_DATE); if (tmp___1 == 0) { return; } else { } if (nc->cong_fill != 0U) { tmp___2 = atomic_read((atomic_t const *)(& mdev->ap_in_flight)); if ((__u32 )tmp___2 >= nc->cong_fill) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Congestion-fill threshold reached\n"); congested = 1; } else { } } else { } if ((mdev->act_log)->used >= nc->cong_extents) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Congestion-extents threshold reached\n"); congested = 1; } else { } if ((int )congested) { start_new_tl_epoch(mdev->tconn); if ((unsigned int )on_congestion == 1U) { __ns = drbd_read_state(mdev); __ns.ldv_40024.conn = 22U; _drbd_set_state(mdev, __ns, 0, 0); } else { __ns___0 = drbd_read_state(mdev); __ns___0.ldv_40024.conn = 1U; _drbd_set_state(mdev, __ns___0, 0, 0); } } else { } put_ldev(mdev); return; } } static bool do_remote_read(struct drbd_request *req ) { struct drbd_conf *mdev ; enum drbd_read_balancing rbm ; bool tmp ; int tmp___0 ; struct disk_conf *_________p1 ; bool __warned ; int tmp___1 ; int tmp___2 ; bool tmp___3 ; { mdev = req->w.ldv_49807.mdev; if ((unsigned long )req->private_bio != (unsigned long )((struct bio *)0)) { tmp = drbd_may_do_local_read(mdev, req->i.sector, (int )req->i.size); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { bio_put(req->private_bio); req->private_bio = 0; put_ldev(mdev); } else { } } else { } if (*((unsigned int *)mdev + 187UL) != 65536U) { return (0); } else { } if ((unsigned long )req->private_bio == (unsigned long )((struct bio *)0)) { return (1); } else { } rcu_read_lock___0(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 1020, "suspicious rcu_dereference_check() usage"); } else { } } else { } rbm = (enum drbd_read_balancing )_________p1->read_balancing; rcu_read_unlock___0(); if ((unsigned int )rbm == 0U && (unsigned long )req->private_bio != (unsigned long )((struct bio *)0)) { return (0); } else { } tmp___3 = remote_due_to_read_balancing(mdev, req->i.sector, rbm); if ((int )tmp___3) { if ((unsigned long )req->private_bio != (unsigned long )((struct bio *)0)) { bio_put(req->private_bio); req->private_bio = 0; put_ldev(mdev); } else { } return (1); } else { } return (0); } } static int drbd_process_write_request(struct drbd_request *req ) { struct drbd_conf *mdev ; int remote ; int send_oos ; bool tmp ; bool tmp___0 ; bool tmp___1 ; long tmp___2 ; int tmp___3 ; { mdev = req->w.ldv_49807.mdev; rcu_read_lock___0(); tmp = drbd_should_do_remote(mdev->state); remote = (int )tmp; if (remote != 0) { maybe_pull_ahead(mdev); tmp___0 = drbd_should_do_remote(mdev->state); remote = (int )tmp___0; } else { } tmp___1 = drbd_should_send_out_of_sync(mdev->state); send_oos = (int )tmp___1; rcu_read_unlock___0(); tmp___2 = ldv__builtin_expect(req->i.size == 0U, 0L); if (tmp___2 != 0L) { if (((req->master_bio)->bi_rw & 4096UL) == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( req->master_bio->bi_rw & REQ_FLUSH ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 1063); } else { } if (remote != 0) { start_new_tl_epoch(mdev->tconn); } else { } return (0); } else { } if (remote == 0 && send_oos == 0) { return (0); } else { } if (remote != 0 && send_oos != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !(remote && send_oos) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 1072); } else { } if (remote != 0) { _req_mod(req, TO_BE_SENT); _req_mod(req, QUEUE_FOR_NET_WRITE); } else { tmp___3 = __drbd_set_out_of_sync(mdev, req->i.sector, (int )req->i.size, "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 1077U); if (tmp___3 != 0) { _req_mod(req, QUEUE_FOR_SEND_OOS); } else { } } return (remote); } } static void drbd_submit_req_private_bio(struct drbd_request *req ) { struct drbd_conf *mdev ; struct bio *bio ; int rw ; int tmp ; int tmp___0 ; { mdev = req->w.ldv_49807.mdev; bio = req->private_bio; rw = (int const )bio->bi_rw & (int const )8193; bio->bi_bdev = (mdev->ldev)->backing_bdev; tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 != 0) { tmp = drbd_insert_fault(mdev, rw != 1 ? (rw == 0 ? 5U : 6U) : 4U); if (tmp != 0) { bio_endio(bio, -5); } else { generic_make_request(bio); } put_ldev(mdev); } else { bio_endio(bio, -5); } return; } } void __drbd_make_request(struct drbd_conf *mdev , struct bio *bio , unsigned long start_time ) { int rw ; struct bio_and_error m ; struct drbd_request *req ; bool no_remote ; int tmp ; int tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; long tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; { rw = (int const )bio->bi_rw & (int const )8193; m.bio = 0; m.error = 0; no_remote = 0; req = drbd_req_new(mdev, bio); if ((unsigned long )req == (unsigned long )((struct drbd_request *)0)) { dec_ap_bio(mdev); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "could not kmalloc() req\n"); bio_endio(bio, -12); return; } else { } req->start_time = start_time; tmp = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp == 0) { bio_put(req->private_bio); req->private_bio = 0; } else { } if ((rw == 1 && (unsigned long )req->private_bio != (unsigned long )((struct bio *)0)) && req->i.size != 0U) { tmp___0 = constant_test_bit(18U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___0 == 0) { req->rq_state = req->rq_state | 4096U; drbd_al_begin_io(mdev, & req->i); } else { } } else { } spin_lock_irq(& (mdev->tconn)->req_lock); if (rw == 1) { complete_conflicting_writes(req); } else { } tmp___1 = drbd_suspended(mdev); if (tmp___1 != 0) { req->rq_state = req->rq_state | 8192U; if ((unsigned long )req->private_bio != (unsigned long )((struct bio *)0)) { bio_put(req->private_bio); req->private_bio = 0; put_ldev(mdev); } else { } goto out; } else { } _drbd_start_io_acct(mdev, req, bio); if (rw != 1) { tmp___2 = do_remote_read(req); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3 && (unsigned long )req->private_bio == (unsigned long )((struct bio *)0)) { goto nodata; } else { } } else { } tmp___4 = atomic_read((atomic_t const *)(& (mdev->tconn)->current_tle_nr)); req->epoch = (unsigned int )tmp___4; tmp___5 = ldv__builtin_expect(req->i.size != 0U, 1L); if (tmp___5 != 0L) { if (rw == 1) { (mdev->tconn)->current_tle_writes = (mdev->tconn)->current_tle_writes + 1U; } else { } list_add_tail(& req->tl_requests, & (mdev->tconn)->transfer_log); } else { } if (rw == 1) { tmp___6 = drbd_process_write_request(req); if (tmp___6 == 0) { no_remote = 1; } else if ((unsigned long )req->private_bio == (unsigned long )((struct bio *)0)) { _req_mod(req, TO_BE_SENT); _req_mod(req, QUEUE_FOR_NET_READ); } else { no_remote = 1; } } else { } if ((unsigned long )req->private_bio != (unsigned long )((struct bio *)0)) { _req_mod(req, TO_BE_SUBMITTED); spin_unlock_irq(& (mdev->tconn)->req_lock); drbd_submit_req_private_bio(req); spin_lock_irq(& (mdev->tconn)->req_lock); } else if ((int )no_remote) { nodata: tmp___7 = ___ratelimit(& drbd_ratelimit_state, "__drbd_make_request"); if (tmp___7 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "IO ERROR: neither local nor remote data, sector %llu+%u\n", (unsigned long long )req->i.sector, req->i.size >> 9); } else { } } else { } out: tmp___8 = drbd_req_put_completion_ref(req, & m, 1); if (tmp___8 != 0) { kref_put(& req->kref, & drbd_req_destroy); } else { } spin_unlock_irq(& (mdev->tconn)->req_lock); if ((unsigned long )m.bio != (unsigned long )((struct bio *)0)) { complete_master_bio(mdev, & m); } else { } return; } } void drbd_make_request(struct request_queue *q , struct bio *bio ) { struct drbd_conf *mdev ; unsigned long start_time ; { mdev = (struct drbd_conf *)q->queuedata; start_time = jiffies; if ((bio->bi_size & 511U) != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( IS_ALIGNED(bio->bi_size, 512) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 1240); } else { } inc_ap_bio(mdev); __drbd_make_request(mdev, bio, start_time); return; } } int drbd_merge_bvec(struct request_queue *q , struct bvec_merge_data *bvm , struct bio_vec *bvec ) { struct drbd_conf *mdev ; unsigned int bio_size ; int limit ; int backing_limit ; struct request_queue *b ; int _min1 ; int _min2 ; int tmp ; { mdev = (struct drbd_conf *)q->queuedata; bio_size = bvm->bi_size; limit = 1048576; if (bio_size != 0U) { tmp = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp != 0) { b = (((mdev->ldev)->backing_bdev)->bd_disk)->queue; if ((unsigned long )b->merge_bvec_fn != (unsigned long )((merge_bvec_fn *)0)) { backing_limit = (*(b->merge_bvec_fn))(b, bvm, bvec); _min1 = limit; _min2 = backing_limit; limit = _min1 < _min2 ? _min1 : _min2; } else { } put_ldev(mdev); } else { } } else { } return (limit); } } struct drbd_request *find_oldest_request(struct drbd_tconn *tconn ) { struct drbd_request *r ; struct list_head const *__mptr ; int tmp ; struct list_head const *__mptr___0 ; { __mptr = (struct list_head const *)tconn->transfer_log.next; r = (struct drbd_request *)__mptr + 0xffffffffffffff98UL; goto ldv_52338; ldv_52337: tmp = atomic_read((atomic_t const *)(& r->completion_ref)); if (tmp != 0) { return (r); } else { } __mptr___0 = (struct list_head const *)r->tl_requests.next; r = (struct drbd_request *)__mptr___0 + 0xffffffffffffff98UL; ldv_52338: ; if ((unsigned long )(& r->tl_requests) != (unsigned long )(& tconn->transfer_log)) { goto ldv_52337; } else { } return (0); } } void request_timer_fn(unsigned long data ) { struct drbd_conf *mdev ; struct drbd_tconn *tconn ; struct drbd_request *req ; struct net_conf *nc ; unsigned long ent ; unsigned long dt ; unsigned long et ; unsigned long nt ; unsigned long now ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; struct disk_conf *_________p1___0 ; bool __warned___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; unsigned long __x ; unsigned long __y ; unsigned long _min1 ; unsigned long _min2 ; unsigned long tmp___4 ; unsigned long tmp___5 ; union drbd_state __ns ; { mdev = (struct drbd_conf *)data; tconn = mdev->tconn; ent = 0UL; dt = 0UL; rcu_read_lock___0(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 1299, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if ((unsigned long )nc != (unsigned long )((struct net_conf *)0) && (int )mdev->state.ldv_49522.conn > 8) { ent = (unsigned long )(((nc->timeout * 250U) / 10U) * nc->ko_count); } else { } tmp___3 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___3 != 0) { _________p1___0 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { tmp___2 = rcu_read_lock_held(); if (tmp___2 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_req.c.prepared", 1304, "suspicious rcu_dereference_check() usage"); } else { } } else { } dt = (unsigned long )((_________p1___0->disk_timeout * 250U) / 10U); put_ldev(mdev); } else { } rcu_read_unlock___0(); __x = dt; __y = ent; if (__x != 0UL) { if (__y != 0UL) { _min1 = __x; _min2 = __y; tmp___4 = _min1 < _min2 ? _min1 : _min2; } else { tmp___4 = __x; } tmp___5 = tmp___4; } else { tmp___5 = __y; } et = tmp___5; if (et == 0UL) { return; } else { } now = jiffies; spin_lock_irq(& tconn->req_lock); req = find_oldest_request(tconn); if ((unsigned long )req == (unsigned long )((struct drbd_request *)0)) { spin_unlock_irq(& tconn->req_lock); mod_timer(& mdev->request_timer, now + et); return; } else { } if (((ent != 0UL && ((unsigned long )req->rq_state & 16UL) != 0UL) && ((1 != 0 && 1 != 0) && (long )(req->start_time + ent) - (long )now < 0L)) && (((1 == 0 || 1 == 0) || (long )now - (long )tconn->last_reconnect_jif < 0L) || ((1 == 0 || 1 == 0) || (long )(tconn->last_reconnect_jif + ent) - (long )now < 0L))) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Remote failed to finish a request within ko-count * timeout\n"); __ns = drbd_read_state(mdev); __ns.ldv_40024.conn = 3U; _drbd_set_state(mdev, __ns, 3, 0); } else { } if ((((dt != 0UL && (int )req->rq_state & 1) && (unsigned long )req->w.ldv_49807.mdev == (unsigned long )mdev) && ((1 != 0 && 1 != 0) && (long )(req->start_time + dt) - (long )now < 0L)) && (((1 == 0 || 1 == 0) || (long )now - (long )mdev->last_reattach_jif < 0L) || ((1 == 0 || 1 == 0) || (long )(mdev->last_reattach_jif + dt) - (long )now < 0L))) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local backing device failed to meet the disk-timeout\n"); __drbd_chk_io_error____1(mdev, DRBD_FORCE_DETACH, "request_timer_fn"); } else { } nt = ((1 == 0 || 1 == 0) || (long )(req->start_time + et) - (long )now >= 0L ? req->start_time : now) + et; spin_unlock_irq(& tconn->req_lock); mod_timer(& mdev->request_timer, nt); return; } } void ldv_mutex_lock_149(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_150(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_151(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_152(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_153(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_154(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_155(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_156(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_157(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_158(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_159(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } extern void __cmpxchg_wrong_size(void) ; __inline static int atomic_cmpxchg(atomic_t *v , int old , int new ) { int __ret ; int __old ; int __new ; u8 volatile *__ptr ; u16 volatile *__ptr___0 ; u32 volatile *__ptr___1 ; u64 volatile *__ptr___2 ; { __old = old; __new = new; switch (4UL) { case 1UL: __ptr = (u8 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgb %2,%1": "=a" (__ret), "+m" (*__ptr): "q" (__new), "0" (__old): "memory"); goto ldv_5490; case 2UL: __ptr___0 = (u16 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgw %2,%1": "=a" (__ret), "+m" (*__ptr___0): "r" (__new), "0" (__old): "memory"); goto ldv_5490; case 4UL: __ptr___1 = (u32 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgl %2,%1": "=a" (__ret), "+m" (*__ptr___1): "r" (__new), "0" (__old): "memory"); goto ldv_5490; case 8UL: __ptr___2 = (u64 volatile *)(& v->counter); __asm__ volatile (".pushsection .smp_locks,\"a\"\n.balign 4\n.long 671f - .\n.popsection\n671:\n\tlock; cmpxchgq %2,%1": "=a" (__ret), "+m" (*__ptr___2): "r" (__new), "0" (__old): "memory"); goto ldv_5490; default: __cmpxchg_wrong_size(); } ldv_5490: ; return (__ret); } } int ldv_mutex_trylock_174(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_172(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_175(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_177(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_179(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_181(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_171(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_173(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_176(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_178(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_180(struct mutex *ldv_func_arg1 ) ; __inline static struct thread_info *current_thread_info___4(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6287; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6287; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6287; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6287; default: __bad_percpu_size(); } ldv_6287: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static void __rcu_read_lock___4(void) { struct thread_info *tmp ; { tmp = current_thread_info___4(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___4(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___4(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___4(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___4(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 738, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___4(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 759, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___4(); return; } } extern u32 crc32c(u32 , void const * , unsigned int ) ; extern void lc_reset(struct lru_cache * ) ; extern void lc_del(struct lru_cache * , struct lc_element * ) ; extern struct lc_element *lc_try_get(struct lru_cache * , unsigned int ) ; extern struct lc_element *lc_get(struct lru_cache * , unsigned int ) ; extern unsigned int lc_put(struct lru_cache * , struct lc_element * ) ; extern void lc_committed(struct lru_cache * ) ; __inline static int lc_try_lock_for_transaction(struct lru_cache *lc ) { int tmp ; { tmp = test_and_set_bit(2, (unsigned long volatile *)(& lc->flags)); return (tmp == 0); } } __inline static void lc_unlock(struct lru_cache *lc ) { { clear_bit(1, (unsigned long volatile *)(& lc->flags)); clear_bit_unlock(2U, (unsigned long volatile *)(& lc->flags)); return; } } extern bool lc_is_used(struct lru_cache * , unsigned int ) ; extern struct lc_element *lc_element_by_index(struct lru_cache * , unsigned int ) ; void *drbd_md_get_buffer(struct drbd_conf *mdev ) ; int drbd_md_sync_page_io(struct drbd_conf *mdev , struct drbd_backing_dev *bdev , sector_t sector , int rw ) ; void drbd_al_shrink(struct drbd_conf *mdev ) ; void drbd_bcast_event(struct drbd_conf *mdev , struct sib_info const *sib ) ; __inline static void __drbd_chk_io_error____2(struct drbd_conf *mdev , enum drbd_force_detach_flags df , char const *where ) { enum drbd_io_error_p ep ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; union drbd_state __ns ; union drbd_state __ns___0 ; { rcu_read_lock___4(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1682, "suspicious rcu_dereference_check() usage"); } else { } } else { } ep = (enum drbd_io_error_p )_________p1->on_io_error; rcu_read_unlock___4(); switch ((unsigned int )ep) { case 0U: ; if ((unsigned int )df == 0U || (unsigned int )df == 1U) { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "__drbd_chk_io_error_"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s.\n", where); } else { } if ((int )mdev->state.ldv_49522.disk > 4) { __ns = drbd_read_state(mdev); __ns.ldv_40024.disk = 4U; _drbd_set_state(mdev, __ns, CS_HARD, 0); } else { } goto ldv_50786; } else { } case 2U: ; case 1U: set_bit(12U, (unsigned long volatile *)(& mdev->flags)); if ((unsigned int )df == 0U) { set_bit(13U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((unsigned int )df == 3U) { set_bit(14U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((int )mdev->state.ldv_49522.disk > 2) { __ns___0 = drbd_read_state(mdev); __ns___0.ldv_40024.disk = 2U; _drbd_set_state(mdev, __ns___0, CS_HARD, 0); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s. Detaching...\n", where); } else { } goto ldv_50786; } ldv_50786: ; return; } } __inline static void drbd_chk_io_error____0(struct drbd_conf *mdev , int error , enum drbd_force_detach_flags forcedetach , char const *where ) { unsigned long flags ; raw_spinlock_t *tmp ; { if (error != 0) { tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); __drbd_chk_io_error____2(mdev, forcedetach, where); spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); } else { } return; } } __inline static sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev ) { int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; sector_t tmp___1 ; { rcu_read_lock___4(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1775, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock___4(); tmp___1 = _drbd_md_first_sector(meta_dev_idx, bdev); return (tmp___1); } } __inline static sector_t drbd_md_last_sector___0(struct drbd_backing_dev *bdev ) { int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; { rcu_read_lock___4(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1790, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock___4(); switch (meta_dev_idx) { case -1: ; case -3: ; return ((sector_t )(bdev->md.md_offset + 7ULL)); case -2: ; default: ; return ((sector_t )(bdev->md.md_offset + (u64 )bdev->md.md_size_sect)); } } } __inline static void drbd_queue_work_front(struct drbd_work_queue *q , struct drbd_work *w ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& q->q_lock); flags = _raw_spin_lock_irqsave(tmp); list_add(& w->list, & q->q); spin_unlock_irqrestore(& q->q_lock, flags); __wake_up(& q->q_wait, 3U, 1, 0); return; } } static int al_write_transaction(struct drbd_conf *mdev ) ; void *drbd_md_get_buffer(struct drbd_conf *mdev ) { int r ; wait_queue_t __wait ; struct task_struct *tmp ; void *tmp___0 ; void *tmp___1 ; { r = atomic_cmpxchg(& mdev->md_io_in_use, 0, 1); if (r == 0 || (int )mdev->state.ldv_49522.disk <= 2) { goto ldv_51127; } else { } tmp = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_51130: prepare_to_wait(& mdev->misc_wait, & __wait, 2); r = atomic_cmpxchg(& mdev->md_io_in_use, 0, 1); if (r == 0 || (int )mdev->state.ldv_49522.disk <= 2) { goto ldv_51129; } else { } schedule(); goto ldv_51130; ldv_51129: finish_wait(& mdev->misc_wait, & __wait); ldv_51127: ; if (r == 0) { tmp___0 = lowmem_page_address((struct page const *)mdev->md_io_page); tmp___1 = tmp___0; } else { tmp___1 = 0; } return (tmp___1); } } void drbd_md_put_buffer(struct drbd_conf *mdev ) { int tmp ; { tmp = atomic_dec_and_test(& mdev->md_io_in_use); if (tmp != 0) { __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } return; } } void wait_until_done_or_force_detached(struct drbd_conf *mdev , struct drbd_backing_dev *bdev , unsigned int *done ) { long dt ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; long __ret ; wait_queue_t __wait ; struct task_struct *tmp___1 ; int tmp___2 ; int tmp___3 ; { rcu_read_lock___4(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 220, "suspicious rcu_dereference_check() usage"); } else { } } else { } dt = (long )_________p1->disk_timeout; rcu_read_unlock___4(); dt = (dt * 250L) / 10L; if (dt == 0L) { dt = 9223372036854775807L; } else { } __ret = dt; if (*done == 0U) { tmp___3 = constant_test_bit(14U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___3 == 0) { tmp___1 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___1; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_51146: prepare_to_wait(& mdev->misc_wait, & __wait, 2); if (*done != 0U) { goto ldv_51145; } else { tmp___2 = constant_test_bit(14U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___2 != 0) { goto ldv_51145; } else { } } __ret = schedule_timeout(__ret); if (__ret == 0L) { goto ldv_51145; } else { } goto ldv_51146; ldv_51145: finish_wait(& mdev->misc_wait, & __wait); } else { } } else { } dt = __ret; if (dt == 0L) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "meta-data IO operation timed out\n"); drbd_chk_io_error____0(mdev, 1, DRBD_FORCE_DETACH, "wait_until_done_or_force_detached"); } else { } return; } } static int _drbd_md_sync_page_io(struct drbd_conf *mdev , struct drbd_backing_dev *bdev , struct page *page , sector_t sector , int rw , int size ) { struct bio *bio ; int err ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { mdev->md_io.done = 0U; mdev->md_io.error = -19; if (rw & 1) { tmp = constant_test_bit(7U, (unsigned long const volatile *)(& mdev->flags)); if (tmp == 0) { rw = rw | 6144; } else { } } else { } rw = rw | 16; bio = bio_alloc_drbd(16U); bio->bi_bdev = bdev->md_bdev; bio->bi_sector = sector; err = -5; tmp___0 = bio_add_page(bio, page, (unsigned int )size, 0U); if (tmp___0 != size) { goto out; } else { } bio->bi_private = (void *)(& mdev->md_io); bio->bi_end_io = & drbd_md_io_complete; bio->bi_rw = (unsigned long )rw; tmp___1 = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp___1 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED: get_ldev_if_state() == 1 in _drbd_md_sync_page_io()\n"); err = -19; goto out; } else { } atomic_inc(& bio->bi_cnt); atomic_inc(& mdev->md_io_in_use); tmp___2 = drbd_insert_fault(mdev, rw & 1 ? 0U : 1U); if (tmp___2 != 0) { bio_endio(bio, -5); } else { submit_bio(rw, bio); } wait_until_done_or_force_detached(mdev, bdev, & mdev->md_io.done); if ((int )bio->bi_flags & 1) { err = mdev->md_io.error; } else { } out: bio_put(bio); return (err); } } int drbd_md_sync_page_io(struct drbd_conf *mdev , struct drbd_backing_dev *bdev , sector_t sector , int rw ) { int err ; struct page *iop ; int tmp ; long tmp___0 ; struct _ddebug descriptor ; struct task_struct *tmp___1 ; struct task_struct *tmp___2 ; long tmp___3 ; struct task_struct *tmp___4 ; struct task_struct *tmp___5 ; sector_t tmp___6 ; sector_t tmp___7 ; { iop = mdev->md_io_page; tmp = atomic_read((atomic_t const *)(& mdev->md_io_in_use)); if (tmp != 1) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( atomic_read(&mdev->md_io_in_use) == 1 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 286); } else { } tmp___0 = ldv__builtin_expect((unsigned long )bdev->md_bdev == (unsigned long )((struct block_device *)0), 0L); if (tmp___0 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared"), "i" (288), "i" (12UL)); ldv_51168: ; goto ldv_51168; } else { } descriptor.modname = "drbd"; descriptor.function = "drbd_md_sync_page_io"; descriptor.filename = "/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared"; descriptor.format = "meta_data io: %s [%d]:%s(,%llus,%s)\n"; descriptor.lineno = 292U; descriptor.flags = 0U; tmp___3 = ldv__builtin_expect((long )descriptor.flags & 1L, 0L); if (tmp___3 != 0L) { tmp___1 = get_current(); tmp___2 = get_current(); __dynamic_dev_dbg(& descriptor, (struct device const *)(& (mdev->vdisk)->part0.__dev), "meta_data io: %s [%d]:%s(,%llus,%s)\n", (char *)(& tmp___2->comm), tmp___1->pid, "drbd_md_sync_page_io", (unsigned long long )sector, rw & 1 ? (char *)"WRITE" : (char *)"READ"); } else { } tmp___6 = drbd_md_first_sector(bdev); if (tmp___6 > sector) { tmp___4 = get_current(); tmp___5 = get_current(); dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s [%d]:%s(,%llus,%s) out of range md access!\n", (char *)(& tmp___5->comm), tmp___4->pid, "drbd_md_sync_page_io", (unsigned long long )sector, rw & 1 ? (char *)"WRITE" : (char *)"READ"); } else { tmp___7 = drbd_md_last_sector___0(bdev); if (sector + 7UL > tmp___7) { tmp___4 = get_current(); tmp___5 = get_current(); dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s [%d]:%s(,%llus,%s) out of range md access!\n", (char *)(& tmp___5->comm), tmp___4->pid, "drbd_md_sync_page_io", (unsigned long long )sector, rw & 1 ? (char *)"WRITE" : (char *)"READ"); } else { } } err = _drbd_md_sync_page_io(mdev, bdev, iop, sector, rw, 4096); if (err != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_md_sync_page_io(,%llus,%s) failed with error %d\n", (unsigned long long )sector, rw & 1 ? (char *)"WRITE" : (char *)"READ", err); } else { } return (err); } } static struct lc_element *_al_get(struct drbd_conf *mdev , unsigned int enr ) { struct lc_element *al_ext ; struct lc_element *tmp ; int wake ; struct bm_extent *bm_ext ; struct lc_element const *__mptr ; int tmp___0 ; int tmp___1 ; long tmp___2 ; { spin_lock_irq(& mdev->al_lock); tmp = lc_find(mdev->resync, enr / 4U); tmp___2 = ldv__builtin_expect((unsigned long )tmp != (unsigned long )((struct lc_element *)0), 0L); if (tmp___2 != 0L) { __mptr = (struct lc_element const *)tmp; bm_ext = (struct bm_extent *)__mptr + 0xfffffffffffffff0UL; tmp___1 = constant_test_bit(0U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___1 != 0) { tmp___0 = test_and_set_bit(2, (unsigned long volatile *)(& bm_ext->flags)); wake = tmp___0 == 0; spin_unlock_irq(& mdev->al_lock); if (wake != 0) { __wake_up(& mdev->al_wait, 3U, 1, 0); } else { } return (0); } else { } } else { } al_ext = lc_get(mdev->act_log, enr); spin_unlock_irq(& mdev->al_lock); return (al_ext); } } void drbd_al_begin_io(struct drbd_conf *mdev , struct drbd_interval *i ) { unsigned int first ; unsigned int last ; unsigned int enr ; bool locked ; int tmp ; struct lc_element *tmp___0 ; wait_queue_t __wait ; struct task_struct *tmp___1 ; struct lc_element *tmp___2 ; int tmp___3 ; wait_queue_t __wait___0 ; struct task_struct *tmp___4 ; int tmp___5 ; bool write_al_updates ; struct disk_conf *_________p1 ; bool __warned ; int tmp___6 ; int tmp___7 ; { first = (unsigned int )(i->sector >> 13); last = i->size != 0U ? (unsigned int )(((i->sector + (sector_t )(i->size >> 9)) - 1UL) >> 13) : first; locked = 0; if (first > last) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( first <= last ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 341); } else { } tmp = atomic_read((atomic_t const *)(& mdev->local_cnt)); if (tmp <= 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( atomic_read(&mdev->local_cnt) > 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 342); } else { } enr = first; goto ldv_51194; ldv_51193: tmp___0 = _al_get(mdev, enr); if ((unsigned long )tmp___0 != (unsigned long )((struct lc_element *)0)) { goto ldv_51189; } else { } tmp___1 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___1; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_51192: prepare_to_wait(& mdev->al_wait, & __wait, 2); tmp___2 = _al_get(mdev, enr); if ((unsigned long )tmp___2 != (unsigned long )((struct lc_element *)0)) { goto ldv_51191; } else { } schedule(); goto ldv_51192; ldv_51191: finish_wait(& mdev->al_wait, & __wait); ldv_51189: enr = enr + 1U; ldv_51194: ; if (enr <= last) { goto ldv_51193; } else { } if ((mdev->act_log)->pending_changes == 0U) { goto ldv_51196; } else { tmp___3 = lc_try_lock_for_transaction(mdev->act_log); locked = tmp___3 != 0; if ((int )locked) { goto ldv_51196; } else { } } tmp___4 = get_current(); __wait___0.flags = 0U; __wait___0.private = (void *)tmp___4; __wait___0.func = & autoremove_wake_function; __wait___0.task_list.next = & __wait___0.task_list; __wait___0.task_list.prev = & __wait___0.task_list; ldv_51199: prepare_to_wait(& mdev->al_wait, & __wait___0, 2); if ((mdev->act_log)->pending_changes == 0U) { goto ldv_51198; } else { tmp___5 = lc_try_lock_for_transaction(mdev->act_log); locked = tmp___5 != 0; if ((int )locked) { goto ldv_51198; } else { } } schedule(); goto ldv_51199; ldv_51198: finish_wait(& mdev->al_wait, & __wait___0); ldv_51196: ; if ((int )locked) { if ((mdev->act_log)->pending_changes != 0U) { rcu_read_lock___4(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___6 = debug_lockdep_rcu_enabled(); if (tmp___6 != 0 && ! __warned) { tmp___7 = rcu_read_lock_held(); if (tmp___7 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 368, "suspicious rcu_dereference_check() usage"); } else { } } else { } write_al_updates = (int )((signed char )_________p1->al_updates) != 0; rcu_read_unlock___4(); if ((int )write_al_updates) { al_write_transaction(mdev); mdev->al_writ_cnt = mdev->al_writ_cnt + 1U; } else { } spin_lock_irq(& mdev->al_lock); lc_committed(mdev->act_log); spin_unlock_irq(& mdev->al_lock); } else { } lc_unlock(mdev->act_log); __wake_up(& mdev->al_wait, 3U, 1, 0); } else { } return; } } void drbd_al_complete_io(struct drbd_conf *mdev , struct drbd_interval *i ) { unsigned int first ; unsigned int last ; unsigned int enr ; struct lc_element *extent ; unsigned long flags ; raw_spinlock_t *tmp ; { first = (unsigned int )(i->sector >> 13); last = i->size != 0U ? (unsigned int )(((i->sector + (sector_t )(i->size >> 9)) - 1UL) >> 13) : first; if (first > last) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( first <= last ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 399); } else { } tmp = spinlock_check(& mdev->al_lock); flags = _raw_spin_lock_irqsave(tmp); enr = first; goto ldv_51218; ldv_51217: extent = lc_find(mdev->act_log, enr); if ((unsigned long )extent == (unsigned long )((struct lc_element *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "al_complete_io() called on inactive extent %u\n", enr); goto ldv_51216; } else { } lc_put(mdev->act_log, extent); ldv_51216: enr = enr + 1U; ldv_51218: ; if (enr <= last) { goto ldv_51217; } else { } spin_unlock_irqrestore(& mdev->al_lock, flags); __wake_up(& mdev->al_wait, 3U, 1, 0); return; } } static unsigned int al_extent_to_bm_page(unsigned int al_enr ) { { return (al_enr >> 5); } } static unsigned int rs_extent_to_bm_page(unsigned int rs_enr ) { { return (rs_enr >> 3); } } static int _al_write_transaction(struct drbd_conf *mdev ) { struct al_transaction_on_disk *buffer ; struct lc_element *e ; sector_t sector ; int i ; int mx ; unsigned int extent_nr ; unsigned int crc ; int err ; char const *tmp ; int tmp___0 ; char const *tmp___1 ; void *tmp___2 ; __u32 tmp___3 ; struct list_head const *__mptr ; __u16 tmp___4 ; __u32 tmp___5 ; unsigned int tmp___6 ; struct list_head const *__mptr___0 ; long tmp___7 ; __u16 tmp___8 ; __u16 tmp___9 ; __u16 tmp___10 ; int __min1 ; int __min2 ; unsigned int idx ; struct lc_element *tmp___11 ; __u32 tmp___12 ; __u32 tmp___13 ; int tmp___14 ; int tmp___15 ; { crc = 0U; err = 0; tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 == 0) { tmp = drbd_disk_str((enum drbd_disk_state )mdev->state.ldv_49522.disk); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "disk is %s, cannot start al transaction\n", tmp); return (-5); } else { } if ((int )mdev->state.ldv_49522.disk <= 3) { tmp___1 = drbd_disk_str((enum drbd_disk_state )mdev->state.ldv_49522.disk); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "disk is %s, cannot write al transaction\n", tmp___1); put_ldev(mdev); return (-5); } else { } tmp___2 = drbd_md_get_buffer(mdev); buffer = (struct al_transaction_on_disk *)tmp___2; if ((unsigned long )buffer == (unsigned long )((struct al_transaction_on_disk *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "disk failed while waiting for md_io buffer\n"); put_ldev(mdev); return (-19); } else { } memset((void *)buffer, 0, 4096UL); buffer->magic = 2724580201U; tmp___3 = __fswab32(mdev->al_tr_number); buffer->tr_number = tmp___3; i = 0; spin_lock_irq(& mdev->al_lock); __mptr = (struct list_head const *)(mdev->act_log)->to_be_changed.next; e = (struct lc_element *)__mptr + 0xfffffffffffffff0UL; goto ldv_51243; ldv_51242: ; if (i == 64) { i = i + 1; goto ldv_51241; } else { } tmp___4 = __fswab16((int )((__u16 )e->lc_index)); buffer->update_slot_nr[i] = tmp___4; tmp___5 = __fswab32(e->lc_new_number); buffer->update_extent_nr[i] = tmp___5; if (e->lc_number != 4294967295U) { tmp___6 = al_extent_to_bm_page(e->lc_number); drbd_bm_mark_for_writeout(mdev, (int )tmp___6); } else { } i = i + 1; __mptr___0 = (struct list_head const *)e->list.next; e = (struct lc_element *)__mptr___0 + 0xfffffffffffffff0UL; ldv_51243: ; if ((unsigned long )(& e->list) != (unsigned long )(& (mdev->act_log)->to_be_changed)) { goto ldv_51242; } else { } ldv_51241: spin_unlock_irq(& mdev->al_lock); tmp___7 = ldv__builtin_expect(i > 64, 0L); if (tmp___7 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared"), "i" (498), "i" (12UL)); ldv_51244: ; goto ldv_51244; } else { } tmp___8 = __fswab16((int )((__u16 )i)); buffer->n_updates = tmp___8; goto ldv_51246; ldv_51245: buffer->update_slot_nr[i] = 65535U; buffer->update_extent_nr[i] = 4294967295U; i = i + 1; ldv_51246: ; if (i <= 63) { goto ldv_51245; } else { } tmp___9 = __fswab16((int )((__u16 )(mdev->act_log)->nr_elements)); buffer->context_size = tmp___9; tmp___10 = __fswab16((int )((__u16 )mdev->al_tr_cycle)); buffer->context_start_slot_nr = tmp___10; __min1 = 919; __min2 = (int )((mdev->act_log)->nr_elements - (unsigned int )mdev->al_tr_cycle); mx = __min1 < __min2 ? __min1 : __min2; i = 0; goto ldv_51253; ldv_51252: idx = (unsigned int )(mdev->al_tr_cycle + i); tmp___11 = lc_element_by_index(mdev->act_log, idx); extent_nr = tmp___11->lc_number; tmp___12 = __fswab32(extent_nr); buffer->context[i] = tmp___12; i = i + 1; ldv_51253: ; if (i < mx) { goto ldv_51252; } else { } goto ldv_51256; ldv_51255: buffer->context[i] = 4294967295U; i = i + 1; ldv_51256: ; if (i <= 918) { goto ldv_51255; } else { } mdev->al_tr_cycle = mdev->al_tr_cycle + 919; if ((unsigned int )mdev->al_tr_cycle >= (mdev->act_log)->nr_elements) { mdev->al_tr_cycle = 0; } else { } sector = (sector_t )(((mdev->ldev)->md.md_offset + (u64 )(mdev->ldev)->md.al_offset) + (u64 )(mdev->al_tr_pos * 8)); crc = crc32c(0U, (void const *)buffer, 4096U); tmp___13 = __fswab32(crc); buffer->crc32c = tmp___13; tmp___15 = drbd_bm_write_hinted(mdev); if (tmp___15 != 0) { err = -5; } else { tmp___14 = drbd_md_sync_page_io(mdev, mdev->ldev, sector, 1); if (tmp___14 != 0) { err = -5; drbd_chk_io_error____0(mdev, 1, DRBD_META_IO_ERROR, "_al_write_transaction"); } else { mdev->al_tr_pos = (mdev->al_tr_pos + 1) % 8; mdev->al_tr_number = mdev->al_tr_number + 1U; } } drbd_md_put_buffer(mdev); put_ldev(mdev); return (err); } } static int w_al_write_transaction(struct drbd_work *w , int unused ) { struct update_al_work *aw ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; int err ; { __mptr = (struct drbd_work const *)w; aw = (struct update_al_work *)__mptr; mdev = w->ldv_49807.mdev; err = _al_write_transaction(mdev); aw->err = err; complete(& aw->event); return (err != -5 ? err : 0); } } static int al_write_transaction(struct drbd_conf *mdev ) { struct update_al_work al_work ; int tmp ; struct task_struct *tmp___0 ; { tmp___0 = get_current(); if ((unsigned long )tmp___0 == (unsigned long )(mdev->tconn)->worker.task) { tmp = _al_write_transaction(mdev); return (tmp); } else { } init_completion(& al_work.event); al_work.w.cb = & w_al_write_transaction; al_work.w.ldv_49807.mdev = mdev; drbd_queue_work_front(& (mdev->tconn)->sender_work, & al_work.w); wait_for_completion(& al_work.event); return (al_work.err); } } static int _try_lc_del(struct drbd_conf *mdev , struct lc_element *al_ext ) { int rv ; long tmp ; { spin_lock_irq(& mdev->al_lock); rv = al_ext->refcnt == 0U; tmp = ldv__builtin_expect(rv != 0, 1L); if (tmp != 0L) { lc_del(mdev->act_log, al_ext); } else { } spin_unlock_irq(& mdev->al_lock); return (rv); } } void drbd_al_shrink(struct drbd_conf *mdev ) { struct lc_element *al_ext ; int i ; int tmp ; int tmp___0 ; wait_queue_t __wait ; struct task_struct *tmp___1 ; int tmp___2 ; { tmp = constant_test_bit(2U, (unsigned long const volatile *)(& (mdev->act_log)->flags)); if (tmp == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( test_bit(__LC_LOCKED, &mdev->act_log->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 608); } else { } i = 0; goto ldv_51288; ldv_51287: al_ext = lc_element_by_index(mdev->act_log, (unsigned int )i); if (al_ext->lc_number == 4294967295U) { goto ldv_51282; } else { } tmp___0 = _try_lc_del(mdev, al_ext); if (tmp___0 != 0) { goto ldv_51283; } else { } tmp___1 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___1; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_51286: prepare_to_wait(& mdev->al_wait, & __wait, 2); tmp___2 = _try_lc_del(mdev, al_ext); if (tmp___2 != 0) { goto ldv_51285; } else { } schedule(); goto ldv_51286; ldv_51285: finish_wait(& mdev->al_wait, & __wait); ldv_51283: ; ldv_51282: i = i + 1; ldv_51288: ; if ((unsigned int )i < (mdev->act_log)->nr_elements) { goto ldv_51287; } else { } __wake_up(& mdev->al_wait, 3U, 1, 0); return; } } static int w_update_odbm(struct drbd_work *w , int unused ) { struct update_odbm_work *udw ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; struct sib_info sib ; int tmp ; int tmp___0 ; unsigned int tmp___1 ; unsigned long tmp___2 ; { __mptr = (struct drbd_work const *)w; udw = (struct update_odbm_work *)__mptr; mdev = w->ldv_49807.mdev; sib.sib_reason = SIB_SYNC_PROGRESS; sib.ldv_50742.ldv_50737.helper_name = 0; sib.ldv_50742.ldv_50737.helper_exit_code = 0U; tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 == 0) { tmp = ___ratelimit(& drbd_ratelimit_state, "w_update_odbm"); if (tmp != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Can not update on disk bitmap, local IO disabled.\n"); } else { } kfree((void const *)udw); return (0); } else { } tmp___1 = rs_extent_to_bm_page(udw->enr); drbd_bm_write_page(mdev, tmp___1); put_ldev(mdev); kfree((void const *)udw); tmp___2 = drbd_bm_total_weight(mdev); if (tmp___2 <= mdev->rs_failed) { switch ((int )mdev->state.ldv_49522.conn) { case 16: ; case 17: ; case 20: ; case 21: drbd_resync_finished(mdev); default: ; goto ldv_51305; } ldv_51305: ; } else { } drbd_bcast_event(mdev, (struct sib_info const *)(& sib)); return (0); } } static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev , sector_t sector , int count , int success ) { struct lc_element *e ; struct update_odbm_work *udw ; unsigned int enr ; int tmp ; struct bm_extent *ext ; struct lc_element const *__mptr ; char const *tmp___0 ; int rs_left ; int tmp___1 ; void *tmp___2 ; { tmp = atomic_read((atomic_t const *)(& mdev->local_cnt)); if (tmp == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( atomic_read(&mdev->local_cnt) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 668); } else { } enr = (unsigned int )(sector >> 15); e = lc_get(mdev->resync, enr); if ((unsigned long )e != (unsigned long )((struct lc_element *)0)) { __mptr = (struct lc_element const *)e; ext = (struct bm_extent *)__mptr + 0xfffffffffffffff0UL; if (ext->lce.lc_number == enr) { if (success != 0) { ext->rs_left = ext->rs_left - count; } else { ext->rs_failed = ext->rs_failed + count; } if (ext->rs_left < ext->rs_failed) { tmp___0 = drbd_conn_str((enum drbd_conns )mdev->state.ldv_49522.conn); dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "BAD! sector=%llus enr=%u rs_left=%d rs_failed=%d count=%d cstate=%s\n", (unsigned long long )sector, ext->lce.lc_number, ext->rs_left, ext->rs_failed, count, tmp___0); ext->rs_left = drbd_bm_e_weight(mdev, (unsigned long )enr); } else { } } else { tmp___1 = drbd_bm_e_weight(mdev, (unsigned long )enr); rs_left = tmp___1; if (ext->flags != 0UL) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "changing resync lce: %d[%u;%02lx] -> %d[%u;00]\n", ext->lce.lc_number, ext->rs_left, ext->flags, enr, rs_left); ext->flags = 0UL; } else { } if (ext->rs_failed != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Kicking resync_lru element enr=%u out with rs_failed=%d\n", ext->lce.lc_number, ext->rs_failed); } else { } ext->rs_left = rs_left; ext->rs_failed = success == 0 ? count : 0; lc_committed(mdev->resync); } lc_put(mdev->resync, & ext->lce); if (ext->rs_left == ext->rs_failed) { ext->rs_failed = 0; tmp___2 = kmalloc(40UL, 32U); udw = (struct update_odbm_work *)tmp___2; if ((unsigned long )udw != (unsigned long )((struct update_odbm_work *)0)) { udw->enr = ext->lce.lc_number; udw->w.cb = & w_update_odbm; udw->w.ldv_49807.mdev = mdev; drbd_queue_work_front(& (mdev->tconn)->sender_work, & udw->w); } else { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Could not kmalloc an udw\n"); } } else { } } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "lc_get() failed! locked=%d/%d flags=%lu\n", mdev->resync_locked, (mdev->resync)->nr_elements, (mdev->resync)->flags); } return; } } void drbd_advance_rs_marks(struct drbd_conf *mdev , unsigned long still_to_go ) { unsigned long now ; unsigned long last ; int next ; { now = jiffies; last = mdev->rs_mark_time[mdev->rs_last_mark]; next = (mdev->rs_last_mark + 1) % 8; if ((1 != 0 && 1 != 0) && (long )now - (long )(last + 750UL) >= 0L) { if ((mdev->rs_mark_left[mdev->rs_last_mark] != still_to_go && (unsigned int )*((unsigned short *)mdev + 374UL) != 336U) && (unsigned int )*((unsigned short *)mdev + 374UL) != 320U) { mdev->rs_mark_time[next] = now; mdev->rs_mark_left[next] = still_to_go; mdev->rs_last_mark = next; } else { } } else { } return; } } void __drbd_set_in_sync(struct drbd_conf *mdev , sector_t sector , int size , char const *file , unsigned int const line ) { unsigned long sbnr ; unsigned long ebnr ; unsigned long lbnr ; unsigned long count ; sector_t esector ; sector_t nr_sectors ; int wake_up ; unsigned long flags ; int tmp ; bool _bool ; int tmp___0 ; bool _bool___0 ; int tmp___1 ; long tmp___2 ; long tmp___3 ; int tmp___4 ; unsigned long tmp___5 ; raw_spinlock_t *tmp___6 ; { count = 0UL; wake_up = 0; if ((size <= 0 || (size & 511) != 0) || (unsigned int )size > 1048576U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_set_in_sync: sector=%llus size=%d nonsense!\n", (unsigned long long )sector, size); return; } else { } tmp = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp == 0) { return; } else { } nr_sectors = drbd_get_capacity(mdev->this_bdev); esector = ((sector_t )(size >> 9) + sector) - 1UL; _bool = sector < nr_sectors; if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"sector < nr_sectors", "__drbd_set_in_sync"); } else { } if (_bool) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto out; } else { } _bool___0 = esector < nr_sectors; if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"esector < nr_sectors", "__drbd_set_in_sync"); } else { } if (_bool___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { esector = nr_sectors - 1UL; } else { } lbnr = (nr_sectors - 1UL) >> 3; tmp___2 = ldv__builtin_expect(esector <= 6UL, 0L); if (tmp___2 != 0L) { goto out; } else { } tmp___3 = ldv__builtin_expect(nr_sectors - 1UL == esector, 0L); if (tmp___3 != 0L) { ebnr = lbnr; } else { ebnr = (esector - 7UL) >> 3; } sbnr = (sector + 7UL) >> 3; if (sbnr > ebnr) { goto out; } else { } tmp___4 = drbd_bm_clear_bits(mdev, sbnr, ebnr); count = (unsigned long )tmp___4; if (count != 0UL) { tmp___5 = drbd_bm_total_weight(mdev); drbd_advance_rs_marks(mdev, tmp___5); tmp___6 = spinlock_check(& mdev->al_lock); flags = _raw_spin_lock_irqsave(tmp___6); drbd_try_clear_on_disk_bm(mdev, sector, (int )count, 1); spin_unlock_irqrestore(& mdev->al_lock, flags); wake_up = 1; } else { } out: put_ldev(mdev); if (wake_up != 0) { __wake_up(& mdev->al_wait, 3U, 1, 0); } else { } return; } } int __drbd_set_out_of_sync(struct drbd_conf *mdev , sector_t sector , int size , char const *file , unsigned int const line ) { unsigned long sbnr ; unsigned long ebnr ; unsigned long flags ; sector_t esector ; sector_t nr_sectors ; unsigned int enr ; unsigned int count ; struct lc_element *e ; int tmp ; bool _bool ; int tmp___0 ; bool _bool___0 ; int tmp___1 ; raw_spinlock_t *tmp___2 ; int tmp___3 ; struct lc_element const *__mptr ; struct lc_element const *__mptr___0 ; { count = 0U; if (size == 0) { return (0); } else { } if ((size < 0 || (size & 511) != 0) || (unsigned int )size > 1048576U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "sector: %llus, size: %d\n", (unsigned long long )sector, size); return (0); } else { } tmp = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp == 0) { return (0); } else { } nr_sectors = drbd_get_capacity(mdev->this_bdev); esector = ((sector_t )(size >> 9) + sector) - 1UL; _bool = sector < nr_sectors; if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"sector < nr_sectors", "__drbd_set_out_of_sync"); } else { } if (_bool) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { goto out; } else { } _bool___0 = esector < nr_sectors; if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"esector < nr_sectors", "__drbd_set_out_of_sync"); } else { } if (_bool___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1) { esector = nr_sectors - 1UL; } else { } sbnr = sector >> 3; ebnr = esector >> 3; tmp___2 = spinlock_check(& mdev->al_lock); flags = _raw_spin_lock_irqsave(tmp___2); tmp___3 = drbd_bm_set_bits(mdev, sbnr, ebnr); count = (unsigned int )tmp___3; enr = (unsigned int )(sector >> 15); e = lc_find(mdev->resync, enr); if ((unsigned long )e != (unsigned long )((struct lc_element *)0)) { __mptr = (struct lc_element const *)e; __mptr___0 = (struct lc_element const *)e; ((struct bm_extent *)__mptr + 0xfffffffffffffff0UL)->rs_left = (int )((unsigned int )((struct bm_extent *)__mptr___0 + 0xfffffffffffffff0UL)->rs_left + count); } else { } spin_unlock_irqrestore(& mdev->al_lock, flags); out: put_ldev(mdev); return ((int )count); } } static struct bm_extent *_bme_get(struct drbd_conf *mdev , unsigned int enr ) { struct lc_element *e ; struct bm_extent *bm_ext ; int wakeup ; unsigned long rs_flags ; struct lc_element const *__mptr ; long tmp ; { wakeup = 0; spin_lock_irq(& mdev->al_lock); if (mdev->resync_locked > (mdev->resync)->nr_elements / 2U) { spin_unlock_irq(& mdev->al_lock); return (0); } else { } e = lc_get(mdev->resync, enr); if ((unsigned long )e != (unsigned long )((struct lc_element *)0)) { __mptr = (struct lc_element const *)e; bm_ext = (struct bm_extent *)__mptr + 0xfffffffffffffff0UL; } else { bm_ext = 0; } if ((unsigned long )bm_ext != (unsigned long )((struct bm_extent *)0)) { if (bm_ext->lce.lc_number != enr) { bm_ext->rs_left = drbd_bm_e_weight(mdev, (unsigned long )enr); bm_ext->rs_failed = 0; lc_committed(mdev->resync); wakeup = 1; } else { } if (bm_ext->lce.refcnt == 1U) { mdev->resync_locked = mdev->resync_locked + 1U; } else { } set_bit(0U, (unsigned long volatile *)(& bm_ext->flags)); } else { } rs_flags = (mdev->resync)->flags; spin_unlock_irq(& mdev->al_lock); if (wakeup != 0) { __wake_up(& mdev->al_wait, 3U, 1, 0); } else { } if ((unsigned long )bm_ext == (unsigned long )((struct bm_extent *)0)) { if ((rs_flags & 8UL) != 0UL) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Have to wait for element (resync LRU too small?)\n"); } else { } tmp = ldv__builtin_expect((rs_flags & 4UL) != 0UL, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared"), "i" (929), "i" (12UL)); ldv_51392: ; goto ldv_51392; } else { } } else { } return (bm_ext); } } static int _is_in_al(struct drbd_conf *mdev , unsigned int enr ) { int rv ; bool tmp ; { spin_lock_irq(& mdev->al_lock); tmp = lc_is_used(mdev->act_log, enr); rv = (int )tmp; spin_unlock_irq(& mdev->al_lock); return (rv); } } int drbd_rs_begin_io(struct drbd_conf *mdev , sector_t sector ) { unsigned int enr ; struct bm_extent *bm_ext ; int i ; int sig ; int sa ; int __ret ; wait_queue_t __wait ; struct task_struct *tmp ; struct task_struct *tmp___0 ; int tmp___1 ; int tmp___2 ; int __ret___0 ; wait_queue_t __wait___0 ; struct task_struct *tmp___3 ; int tmp___4 ; int tmp___5 ; struct task_struct *tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; unsigned int tmp___10 ; long tmp___11 ; int tmp___12 ; { enr = (unsigned int )(sector >> 15); sa = 200; retry: __ret = 0; bm_ext = _bme_get(mdev, enr); if ((unsigned long )bm_ext == (unsigned long )((struct bm_extent *)0)) { tmp = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_51412: prepare_to_wait(& mdev->al_wait, & __wait, 1); bm_ext = _bme_get(mdev, enr); if ((unsigned long )bm_ext != (unsigned long )((struct bm_extent *)0)) { goto ldv_51410; } else { } tmp___0 = get_current(); tmp___1 = signal_pending(tmp___0); if (tmp___1 == 0) { schedule(); goto ldv_51411; } else { } __ret = -512; goto ldv_51410; ldv_51411: ; goto ldv_51412; ldv_51410: finish_wait(& mdev->al_wait, & __wait); } else { } sig = __ret; if (sig != 0) { return (-4); } else { } tmp___2 = constant_test_bit(1U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___2 != 0) { return (0); } else { } i = 0; goto ldv_51421; ldv_51420: __ret___0 = 0; tmp___8 = _is_in_al(mdev, enr * 4U + (unsigned int )i); if (tmp___8 != 0) { tmp___9 = constant_test_bit(2U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___9 == 0) { tmp___3 = get_current(); __wait___0.flags = 0U; __wait___0.private = (void *)tmp___3; __wait___0.func = & autoremove_wake_function; __wait___0.task_list.next = & __wait___0.task_list; __wait___0.task_list.prev = & __wait___0.task_list; ldv_51418: prepare_to_wait(& mdev->al_wait, & __wait___0, 1); tmp___4 = _is_in_al(mdev, enr * 4U + (unsigned int )i); if (tmp___4 == 0) { goto ldv_51416; } else { tmp___5 = constant_test_bit(2U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___5 != 0) { goto ldv_51416; } else { } } tmp___6 = get_current(); tmp___7 = signal_pending(tmp___6); if (tmp___7 == 0) { schedule(); goto ldv_51417; } else { } __ret___0 = -512; goto ldv_51416; ldv_51417: ; goto ldv_51418; ldv_51416: finish_wait(& mdev->al_wait, & __wait___0); } else { } } else { } sig = __ret___0; if (sig != 0) { goto _L; } else { tmp___12 = constant_test_bit(2U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___12 != 0 && sa != 0) { _L: /* CIL Label */ spin_lock_irq(& mdev->al_lock); tmp___10 = lc_put(mdev->resync, & bm_ext->lce); if (tmp___10 == 0U) { bm_ext->flags = 0UL; mdev->resync_locked = mdev->resync_locked - 1U; __wake_up(& mdev->al_wait, 3U, 1, 0); } else { } spin_unlock_irq(& mdev->al_lock); if (sig != 0) { return (-4); } else { } tmp___11 = schedule_timeout_interruptible(25L); if (tmp___11 != 0L) { return (-4); } else { } if (sa != 0) { sa = sa - 1; if (sa == 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_rs_begin_io() stepped aside for 20sec.Resync stalled?\n"); } else { } } else { } goto retry; } else { } } i = i + 1; ldv_51421: ; if (i <= 3) { goto ldv_51420; } else { } set_bit(1U, (unsigned long volatile *)(& bm_ext->flags)); return (0); } } int drbd_try_rs_begin_io(struct drbd_conf *mdev , sector_t sector ) { unsigned int enr ; unsigned int al_enr ; struct lc_element *e ; struct bm_extent *bm_ext ; int i ; struct lc_element const *__mptr ; int tmp ; int tmp___0 ; unsigned int tmp___1 ; struct lc_element const *__mptr___0 ; int tmp___2 ; int tmp___3 ; struct lc_element const *__mptr___1 ; unsigned long rs_flags ; long tmp___4 ; int tmp___5 ; bool tmp___6 ; { enr = (unsigned int )(sector >> 15); al_enr = enr * 4U; spin_lock_irq(& mdev->al_lock); if (mdev->resync_wenr != 4294967295U && mdev->resync_wenr != enr) { e = lc_find(mdev->resync, mdev->resync_wenr); if ((unsigned long )e != (unsigned long )((struct lc_element *)0)) { __mptr = (struct lc_element const *)e; bm_ext = (struct bm_extent *)__mptr + 0xfffffffffffffff0UL; } else { bm_ext = 0; } if ((unsigned long )bm_ext != (unsigned long )((struct bm_extent *)0)) { tmp = constant_test_bit(1U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !test_bit(BME_LOCKED, &bm_ext->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1032); } else { } tmp___0 = constant_test_bit(0U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___0 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( test_bit(BME_NO_WRITES, &bm_ext->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1033); } else { } clear_bit(0, (unsigned long volatile *)(& bm_ext->flags)); mdev->resync_wenr = 4294967295U; tmp___1 = lc_put(mdev->resync, & bm_ext->lce); if (tmp___1 == 0U) { mdev->resync_locked = mdev->resync_locked - 1U; } else { } __wake_up(& mdev->al_wait, 3U, 1, 0); } else { dev_alert((struct device const *)(& (mdev->vdisk)->part0.__dev), "LOGIC BUG\n"); } } else { } e = lc_try_get(mdev->resync, enr); if ((unsigned long )e != (unsigned long )((struct lc_element *)0)) { __mptr___0 = (struct lc_element const *)e; bm_ext = (struct bm_extent *)__mptr___0 + 0xfffffffffffffff0UL; } else { bm_ext = 0; } if ((unsigned long )bm_ext != (unsigned long )((struct bm_extent *)0)) { tmp___2 = constant_test_bit(1U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___2 != 0) { goto proceed; } else { } tmp___3 = test_and_set_bit(0, (unsigned long volatile *)(& bm_ext->flags)); if (tmp___3 == 0) { mdev->resync_locked = mdev->resync_locked + 1U; } else { bm_ext->lce.refcnt = bm_ext->lce.refcnt - 1U; if (bm_ext->lce.refcnt == 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( bm_ext->lce.refcnt > 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1057); } else { } } goto check_al; } else { if (mdev->resync_locked > (mdev->resync)->nr_elements - 3U) { goto try_again; } else { } e = lc_get(mdev->resync, enr); if ((unsigned long )e != (unsigned long )((struct lc_element *)0)) { __mptr___1 = (struct lc_element const *)e; bm_ext = (struct bm_extent *)__mptr___1 + 0xfffffffffffffff0UL; } else { bm_ext = 0; } if ((unsigned long )bm_ext == (unsigned long )((struct bm_extent *)0)) { rs_flags = (mdev->resync)->flags; if ((rs_flags & 8UL) != 0UL) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Have to wait for element (resync LRU too small?)\n"); } else { } tmp___4 = ldv__builtin_expect((rs_flags & 4UL) != 0UL, 0L); if (tmp___4 != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared"), "i" (1072), "i" (12UL)); ldv_51442: ; goto ldv_51442; } else { } goto try_again; } else { } if (bm_ext->lce.lc_number != enr) { bm_ext->rs_left = drbd_bm_e_weight(mdev, (unsigned long )enr); bm_ext->rs_failed = 0; lc_committed(mdev->resync); __wake_up(& mdev->al_wait, 3U, 1, 0); tmp___5 = constant_test_bit(1U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___5 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( test_bit(BME_LOCKED, &bm_ext->flags) == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1080); } else { } } else { } set_bit(0U, (unsigned long volatile *)(& bm_ext->flags)); if (bm_ext->lce.refcnt != 1U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( bm_ext->lce.refcnt == 1 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1083); } else { } mdev->resync_locked = mdev->resync_locked + 1U; goto check_al; } check_al: i = 0; goto ldv_51444; ldv_51443: tmp___6 = lc_is_used(mdev->act_log, al_enr + (unsigned int )i); if ((int )tmp___6) { goto try_again; } else { } i = i + 1; ldv_51444: ; if (i <= 3) { goto ldv_51443; } else { } set_bit(1U, (unsigned long volatile *)(& bm_ext->flags)); proceed: mdev->resync_wenr = 4294967295U; spin_unlock_irq(& mdev->al_lock); return (0); try_again: ; if ((unsigned long )bm_ext != (unsigned long )((struct bm_extent *)0)) { mdev->resync_wenr = enr; } else { } spin_unlock_irq(& mdev->al_lock); return (-11); } } void drbd_rs_complete_io(struct drbd_conf *mdev , sector_t sector ) { unsigned int enr ; struct lc_element *e ; struct bm_extent *bm_ext ; unsigned long flags ; raw_spinlock_t *tmp ; struct lc_element const *__mptr ; int tmp___0 ; unsigned int tmp___1 ; { enr = (unsigned int )(sector >> 15); tmp = spinlock_check(& mdev->al_lock); flags = _raw_spin_lock_irqsave(tmp); e = lc_find(mdev->resync, enr); if ((unsigned long )e != (unsigned long )((struct lc_element *)0)) { __mptr = (struct lc_element const *)e; bm_ext = (struct bm_extent *)__mptr + 0xfffffffffffffff0UL; } else { bm_ext = 0; } if ((unsigned long )bm_ext == (unsigned long )((struct bm_extent *)0)) { spin_unlock_irqrestore(& mdev->al_lock, flags); tmp___0 = ___ratelimit(& drbd_ratelimit_state, "drbd_rs_complete_io"); if (tmp___0 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_rs_complete_io() called, but extent not found\n"); } else { } return; } else { } if (bm_ext->lce.refcnt == 0U) { spin_unlock_irqrestore(& mdev->al_lock, flags); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_rs_complete_io(,%llu [=%u]) called, but refcnt is 0!?\n", (unsigned long long )sector, enr); return; } else { } tmp___1 = lc_put(mdev->resync, & bm_ext->lce); if (tmp___1 == 0U) { bm_ext->flags = 0UL; mdev->resync_locked = mdev->resync_locked - 1U; __wake_up(& mdev->al_wait, 3U, 1, 0); } else { } spin_unlock_irqrestore(& mdev->al_lock, flags); return; } } void drbd_rs_cancel_all(struct drbd_conf *mdev ) { int tmp ; { spin_lock_irq(& mdev->al_lock); tmp = _get_ldev_if_state(mdev, D_FAILED); if (tmp != 0) { lc_reset(mdev->resync); put_ldev(mdev); } else { } mdev->resync_locked = 0U; mdev->resync_wenr = 4294967295U; spin_unlock_irq(& mdev->al_lock); __wake_up(& mdev->al_wait, 3U, 1, 0); return; } } int drbd_rs_del_all(struct drbd_conf *mdev ) { struct lc_element *e ; struct bm_extent *bm_ext ; int i ; struct lc_element const *__mptr ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; { spin_lock_irq(& mdev->al_lock); tmp___3 = _get_ldev_if_state(mdev, D_FAILED); if (tmp___3 != 0) { i = 0; goto ldv_51473; ldv_51472: e = lc_element_by_index(mdev->resync, (unsigned int )i); __mptr = (struct lc_element const *)e; bm_ext = (struct bm_extent *)__mptr + 0xfffffffffffffff0UL; if (bm_ext->lce.lc_number == 4294967295U) { goto ldv_51471; } else { } if (bm_ext->lce.lc_number == mdev->resync_wenr) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "dropping %u in drbd_rs_del_all, apparently got \'synced\' by application io\n", mdev->resync_wenr); tmp = constant_test_bit(1U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !test_bit(BME_LOCKED, &bm_ext->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1183); } else { } tmp___0 = constant_test_bit(0U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___0 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( test_bit(BME_NO_WRITES, &bm_ext->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1184); } else { } clear_bit(0, (unsigned long volatile *)(& bm_ext->flags)); mdev->resync_wenr = 4294967295U; lc_put(mdev->resync, & bm_ext->lce); } else { } if (bm_ext->lce.refcnt != 0U) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Retrying drbd_rs_del_all() later. refcnt=%d\n", bm_ext->lce.refcnt); put_ldev(mdev); spin_unlock_irq(& mdev->al_lock); return (-11); } else { } tmp___1 = constant_test_bit(1U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !test_bit(BME_LOCKED, &bm_ext->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1196); } else { } tmp___2 = constant_test_bit(0U, (unsigned long const volatile *)(& bm_ext->flags)); if (tmp___2 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !test_bit(BME_NO_WRITES, &bm_ext->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1197); } else { } lc_del(mdev->resync, & bm_ext->lce); ldv_51471: i = i + 1; ldv_51473: ; if ((unsigned int )i < (mdev->resync)->nr_elements) { goto ldv_51472; } else { } if ((mdev->resync)->used != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->resync->used == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_actlog.c.prepared", 1200); } else { } put_ldev(mdev); } else { } spin_unlock_irq(& mdev->al_lock); __wake_up(& mdev->al_wait, 3U, 1, 0); return (0); } } void drbd_rs_failed_io(struct drbd_conf *mdev , sector_t sector , int size ) { unsigned long sbnr ; unsigned long ebnr ; unsigned long lbnr ; unsigned long count ; sector_t esector ; sector_t nr_sectors ; int wake_up ; bool _bool ; int tmp ; bool _bool___0 ; int tmp___0 ; long tmp___1 ; long tmp___2 ; int tmp___3 ; int tmp___4 ; { wake_up = 0; if ((size <= 0 || (size & 511) != 0) || (unsigned int )size > 1048576U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "drbd_rs_failed_io: sector=%llus size=%d nonsense!\n", (unsigned long long )sector, size); return; } else { } nr_sectors = drbd_get_capacity(mdev->this_bdev); esector = ((sector_t )(size >> 9) + sector) - 1UL; _bool = sector < nr_sectors; if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"sector < nr_sectors", "drbd_rs_failed_io"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return; } else { } _bool___0 = esector < nr_sectors; if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"esector < nr_sectors", "drbd_rs_failed_io"); } else { } if (_bool___0) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { esector = nr_sectors - 1UL; } else { } lbnr = (nr_sectors - 1UL) >> 3; tmp___1 = ldv__builtin_expect(esector <= 6UL, 0L); if (tmp___1 != 0L) { return; } else { } tmp___2 = ldv__builtin_expect(nr_sectors - 1UL == esector, 0L); if (tmp___2 != 0L) { ebnr = lbnr; } else { ebnr = (esector - 7UL) >> 3; } sbnr = (sector + 7UL) >> 3; if (sbnr > ebnr) { return; } else { } spin_lock_irq(& mdev->al_lock); tmp___3 = drbd_bm_count_bits(mdev, sbnr, ebnr); count = (unsigned long )tmp___3; if (count != 0UL) { mdev->rs_failed = mdev->rs_failed + count; tmp___4 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___4 != 0) { drbd_try_clear_on_disk_bm(mdev, sector, (int )count, 0); put_ldev(mdev); } else { } wake_up = 1; } else { } spin_unlock_irq(& mdev->al_lock); if (wake_up != 0) { __wake_up(& mdev->al_wait, 3U, 1, 0); } else { } return; } } void ldv_main5_sequence_infinite_withcheck_stateful(void) { int tmp ; int tmp___0 ; { LDV_IN_INTERRUPT = 1; ldv_initialize(); goto ldv_51511; ldv_51510: tmp = nondet_int(); switch (tmp) { default: ; goto ldv_51509; } ldv_51509: ; ldv_51511: tmp___0 = nondet_int(); if (tmp___0 != 0) { goto ldv_51510; } else { } ldv_check_final_state(); return; } } void ldv_mutex_lock_171(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_172(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_173(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_174(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_175(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_176(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_177(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_178(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_179(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_180(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_181(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static __u32 __fswahw32(__u32 val ) { { return ((val << 16) | (val >> (8UL * sizeof(val) - 16UL))); } } extern int sprintf(char * , char const * , ...) ; extern int snprintf(char * , size_t , char const * , ...) ; extern char *strncpy(char * , char const * , __kernel_size_t ) ; extern char *kstrdup(char const * , gfp_t ) ; extern int __bitmap_equal(unsigned long const * , unsigned long const * , int ) ; extern int __bitmap_weight(unsigned long const * , int ) ; extern int __bitmap_parse(char const * , unsigned int , int , unsigned long * , int ) ; __inline static void bitmap_fill(unsigned long *dst , int nbits ) { size_t nlongs ; int len ; { nlongs = ((unsigned long )nbits + 63UL) / 64UL; len = (int )(((unsigned int )nlongs + 536870911U) * 8U); memset((void *)dst, 255, (size_t )len); *(dst + (nlongs + 0xffffffffffffffffUL)) = ((unsigned int )nbits & 63U) != 0U ? (1UL << nbits % 64) - 1UL : 0xffffffffffffffffUL; return; } } __inline static void bitmap_copy(unsigned long *dst , unsigned long const *src , int nbits ) { int len ; size_t __len ; void *__ret ; { len = (int )((unsigned int )(((unsigned long )nbits + 63UL) / 64UL) * 8U); __len = (size_t )len; __ret = memcpy((void *)dst, (void const *)src, __len); return; } } __inline static int bitmap_equal(unsigned long const *src1 , unsigned long const *src2 , int nbits ) { int tmp ; { tmp = __bitmap_equal(src1, src2, nbits); return (tmp); } } __inline static int bitmap_weight(unsigned long const *src , int nbits ) { int tmp___0 ; { tmp___0 = __bitmap_weight(src, nbits); return (tmp___0); } } __inline static int bitmap_parse(char const *buf , unsigned int buflen , unsigned long *maskp , int nmaskbits ) { int tmp ; { tmp = __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); return (tmp); } } extern struct cpumask const * const cpu_online_mask ; __inline static void cpumask_set_cpu(unsigned int cpu , struct cpumask *dstp ) { unsigned int tmp ; { tmp = cpumask_check(cpu); set_bit(tmp, (unsigned long volatile *)(& dstp->bits)); return; } } __inline static void cpumask_setall(struct cpumask *dstp ) { { bitmap_fill((unsigned long *)(& dstp->bits), nr_cpu_ids); return; } } __inline static bool cpumask_equal(struct cpumask const *src1p , struct cpumask const *src2p ) { int tmp ; { tmp = bitmap_equal((unsigned long const *)(& src1p->bits), (unsigned long const *)(& src2p->bits), nr_cpu_ids); return (tmp != 0); } } __inline static unsigned int cpumask_weight(struct cpumask const *srcp ) { int tmp ; { tmp = bitmap_weight((unsigned long const *)(& srcp->bits), nr_cpu_ids); return ((unsigned int )tmp); } } __inline static void cpumask_copy(struct cpumask *dstp , struct cpumask const *srcp ) { { bitmap_copy((unsigned long *)(& dstp->bits), (unsigned long const *)(& srcp->bits), nr_cpu_ids); return; } } extern bool zalloc_cpumask_var(cpumask_var_t ** , gfp_t ) ; extern void free_cpumask_var(cpumask_var_t ) ; extern void lockdep_init_map(struct lockdep_map * , char const * , struct lock_class_key * , int ) ; int ldv_mutex_trylock_196(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_194(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_197(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_199(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_201(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_203(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_205(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_206(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_207(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_208(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_210(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_212(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_213(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_214(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_216(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_218(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_220(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_222(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_193(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_195(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_198(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_200(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_202(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_204(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_209(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_211(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_215(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_217(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_219(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_221(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_drbd_main_mutex(struct mutex *lock ) ; void ldv_mutex_unlock_drbd_main_mutex(struct mutex *lock ) ; __inline static struct thread_info *current_thread_info___5(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6394; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6394; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6394; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6394; default: __bad_percpu_size(); } ldv_6394: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } extern void __rwlock_init(rwlock_t * , char const * , struct lock_class_key * ) ; extern void complete_all(struct completion * ) ; __inline static void __rcu_read_lock___5(void) { struct thread_info *tmp ; { tmp = current_thread_info___5(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___5(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___5(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___5(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___5(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 738, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___5(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 759, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___5(); return; } } extern void init_timer_key(struct timer_list * , unsigned int , char const * , struct lock_class_key * ) ; extern int del_timer(struct timer_list * ) ; extern void __init_work(struct work_struct * , int ) ; extern struct workqueue_struct *__alloc_workqueue_key(char const * , unsigned int , int , struct lock_class_key * , char const * , ...) ; extern void destroy_workqueue(struct workqueue_struct * ) ; extern bool queue_work(struct workqueue_struct * , struct work_struct * ) ; extern unsigned long __get_free_pages(gfp_t , unsigned int ) ; extern void free_pages(unsigned long , unsigned int ) ; extern void __list_add_rcu(struct list_head * , struct list_head * , struct list_head * ) ; __inline static void list_add_tail_rcu(struct list_head *new , struct list_head *head ) { { __list_add_rcu(new, head->prev, head); return; } } __inline static int PageSlab(struct page const *page ) { int tmp ; { tmp = constant_test_bit(7U, (unsigned long const volatile *)(& page->flags)); return (tmp); } } extern int register_blkdev(unsigned int , char const * ) ; extern void unregister_blkdev(unsigned int , char const * ) ; extern struct block_device *bdget(dev_t ) ; extern void bdput(struct block_device * ) ; extern int blkdev_put(struct block_device * , fmode_t ) ; extern int set_cpus_allowed_ptr(struct task_struct * , struct cpumask const * ) ; extern int wake_up_process(struct task_struct * ) ; extern int kernel_sendmsg(struct socket * , struct msghdr * , struct kvec * , size_t , size_t ) ; extern int kernel_sock_shutdown(struct socket * , enum sock_shutdown_cmd ) ; extern struct kmem_cache *kmem_cache_create(char const * , size_t , size_t , unsigned long , void (*)(void * ) ) ; extern void kmem_cache_destroy(struct kmem_cache * ) ; extern struct proc_dir_entry *proc_create_data(char const * , umode_t , struct proc_dir_entry * , struct file_operations const * , void * ) ; extern void remove_proc_entry(char const * , struct proc_dir_entry * ) ; extern int idr_pre_get(struct idr * , gfp_t ) ; extern int idr_get_new_above(struct idr * , void * , int , int * ) ; extern void idr_remove(struct idr * , int ) ; extern void idr_destroy(struct idr * ) ; extern void idr_init(struct idr * ) ; extern int register_reboot_notifier(struct notifier_block * ) ; extern int unregister_reboot_notifier(struct notifier_block * ) ; extern struct task_struct *kthread_create_on_node(int (*)(void * ) , void * , int , char const * , ...) ; extern void add_disk(struct gendisk * ) ; extern void del_gendisk(struct gendisk * ) ; extern void set_disk_ro(struct gendisk * , int ) ; extern struct gendisk *alloc_disk(int ) ; extern void put_disk(struct gendisk * ) ; extern mempool_t *mempool_create(int , mempool_alloc_t * , mempool_free_t * , void * ) ; extern void mempool_destroy(mempool_t * ) ; extern void *mempool_alloc_slab(gfp_t , void * ) ; extern void mempool_free_slab(void * , void * ) ; extern void *mempool_alloc_pages(gfp_t , void * ) ; extern void mempool_free_pages(void * , void * ) ; __inline static mempool_t *mempool_create_page_pool(int min_nr , int order ) { mempool_t *tmp ; { tmp = mempool_create(min_nr, & mempool_alloc_pages, & mempool_free_pages, (void *)((long )order)); return (tmp); } } extern struct bio_set *bioset_create(unsigned int , unsigned int ) ; extern void bioset_free(struct bio_set * ) ; __inline static struct request_queue *bdev_get_queue(struct block_device *bdev ) { { return ((bdev->bd_disk)->queue); } } extern void blk_cleanup_queue(struct request_queue * ) ; extern void blk_queue_make_request(struct request_queue * , make_request_fn * ) ; extern void blk_queue_bounce_limit(struct request_queue * , u64 ) ; extern void blk_queue_max_hw_sectors(struct request_queue * , unsigned int ) ; extern void blk_queue_merge_bvec(struct request_queue * , merge_bvec_fn * ) ; extern void blk_queue_flush(struct request_queue * , unsigned int ) ; extern struct request_queue *blk_alloc_queue(gfp_t ) ; extern void lc_destroy(struct lru_cache * ) ; int drbd_genl_register(void) ; void drbd_genl_unregister(void) ; void drbd_resume_al(struct drbd_conf *mdev ) ; bool disable_sendpage ; bool allow_oos ; int enable_faults ; int fault_rate ; int fault_devs ; char usermode_helper[80U] ; struct idr minors ; struct list_head drbd_tconns ; void drbd_init_set_defaults(struct drbd_conf *mdev ) ; void drbd_calc_cpu_mask(struct drbd_tconn *tconn ) ; int drbd_send(struct drbd_tconn *tconn , struct socket *sock , void *buf , size_t size , unsigned int msg_flags ) ; int drbd_send_all(struct drbd_tconn *tconn , struct socket *sock , void *buffer , size_t size , unsigned int msg_flags ) ; int __drbd_send_protocol(struct drbd_tconn *tconn , enum drbd_packet cmd ) ; int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev ) ; int drbd_send_state(struct drbd_conf *mdev , union drbd_state state ) ; void drbd_free_bc(struct drbd_backing_dev *ldev ) ; int drbd_md_read(struct drbd_conf *mdev , struct drbd_backing_dev *bdev ) ; void drbd_md_set_flag(struct drbd_conf *mdev , int flag ) ; void drbd_md_clear_flag(struct drbd_conf *mdev , int flag ) ; int drbd_md_test_flag(struct drbd_backing_dev *bdev , int flag ) ; void drbd_md_mark_dirty(struct drbd_conf *mdev ) ; void drbd_queue_bitmap_io(struct drbd_conf *mdev , int (*io_fn)(struct drbd_conf * ) , void (*done)(struct drbd_conf * , int ) , char *why , enum bm_flag flags ) ; int drbd_bitmap_io_from_worker(struct drbd_conf *mdev , int (*io_fn)(struct drbd_conf * ) , char *why , enum bm_flag flags ) ; struct kmem_cache *drbd_request_cache ; struct kmem_cache *drbd_ee_cache ; struct kmem_cache *drbd_bm_ext_cache ; struct kmem_cache *drbd_al_ext_cache ; mempool_t *drbd_request_mempool ; mempool_t *drbd_ee_mempool ; struct page *drbd_pp_pool ; spinlock_t drbd_pp_lock ; int drbd_pp_vacant ; wait_queue_head_t drbd_pp_wait ; mempool_t *drbd_md_io_page_pool ; struct bio_set *drbd_md_io_bio_set ; int conn_lowest_minor(struct drbd_tconn *tconn ) ; enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn , unsigned int minor , int vnr ) ; int set_resource_options(struct drbd_tconn *tconn , struct res_opts *res_opts ) ; struct drbd_tconn *conn_create(char const *name , struct res_opts *res_opts ) ; void conn_destroy(struct kref *kref ) ; struct drbd_tconn *conn_get_by_name(char const *name ) ; struct drbd_tconn *conn_get_by_addrs(void *my_addr , int my_addr_len , void *peer_addr , int peer_addr_len ) ; void conn_free_crypto(struct drbd_tconn *tconn ) ; int proc_details ; int drbd_msg_put_info(char const *info ) ; void drbd_suspend_io(struct drbd_conf *mdev ) ; void drbd_resume_io(struct drbd_conf *mdev ) ; __inline static void __drbd_chk_io_error____3(struct drbd_conf *mdev , enum drbd_force_detach_flags df , char const *where ) { enum drbd_io_error_p ep ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; union drbd_state __ns ; union drbd_state __ns___0 ; { rcu_read_lock___5(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1682, "suspicious rcu_dereference_check() usage"); } else { } } else { } ep = (enum drbd_io_error_p )_________p1->on_io_error; rcu_read_unlock___5(); switch ((unsigned int )ep) { case 0U: ; if ((unsigned int )df == 0U || (unsigned int )df == 1U) { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "__drbd_chk_io_error_"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s.\n", where); } else { } if ((int )mdev->state.ldv_49522.disk > 4) { __ns = drbd_read_state(mdev); __ns.ldv_40024.disk = 4U; _drbd_set_state(mdev, __ns, CS_HARD, 0); } else { } goto ldv_51849; } else { } case 2U: ; case 1U: set_bit(12U, (unsigned long volatile *)(& mdev->flags)); if ((unsigned int )df == 0U) { set_bit(13U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((unsigned int )df == 3U) { set_bit(14U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((int )mdev->state.ldv_49522.disk > 2) { __ns___0 = drbd_read_state(mdev); __ns___0.ldv_40024.disk = 2U; _drbd_set_state(mdev, __ns___0, CS_HARD, 0); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Local IO failed in %s. Detaching...\n", where); } else { } goto ldv_51849; } ldv_51849: ; return; } } __inline static void drbd_chk_io_error____1(struct drbd_conf *mdev , int error , enum drbd_force_detach_flags forcedetach , char const *where ) { unsigned long flags ; raw_spinlock_t *tmp ; { if (error != 0) { tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); __drbd_chk_io_error____3(mdev, forcedetach, where); spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); } else { } return; } } __inline static sector_t drbd_get_max_capacity___0(struct drbd_backing_dev *bdev ) { sector_t s ; int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; sector_t __min1 ; sector_t __min2 ; sector_t tmp___2 ; sector_t tmp___3 ; sector_t __min1___0 ; sector_t __min2___0 ; sector_t tmp___4 ; sector_t __min1___1 ; sector_t __min2___1 ; sector_t __min1___2 ; sector_t __min2___2 ; sector_t tmp___5 ; { rcu_read_lock___5(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1824, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock___5(); switch (meta_dev_idx) { case -1: ; case -3: tmp___3 = drbd_get_capacity(bdev->backing_bdev); if (tmp___3 != 0UL) { __min1 = 2251799813685248UL; tmp___2 = _drbd_md_first_sector(meta_dev_idx, bdev); __min2 = tmp___2; s = __min1 < __min2 ? __min1 : __min2; } else { s = 0UL; } goto ldv_51906; case -2: __min1___0 = 2251799813685248UL; tmp___4 = drbd_get_capacity(bdev->backing_bdev); __min2___0 = tmp___4; s = __min1___0 < __min2___0 ? __min1___0 : __min2___0; __min1___1 = s; __min2___1 = (unsigned long )(bdev->md.md_size_sect - (u32 )bdev->md.bm_offset) << 15; s = __min1___1 < __min2___1 ? __min1___1 : __min2___1; goto ldv_51906; default: __min1___2 = 8587575296UL; tmp___5 = drbd_get_capacity(bdev->backing_bdev); __min2___2 = tmp___5; s = __min1___2 < __min2___2 ? __min1___2 : __min2___2; } ldv_51906: ; return (s); } } __inline static sector_t drbd_md_ss__(struct drbd_conf *mdev , struct drbd_backing_dev *bdev ) { int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; sector_t tmp___2 ; { rcu_read_lock___5(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1861, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock___5(); switch (meta_dev_idx) { default: ; return ((unsigned long )meta_dev_idx * 262144UL); case -1: ; case -3: ; if ((unsigned long )bdev->backing_bdev == (unsigned long )((struct block_device *)0)) { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "drbd_md_ss__"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "bdev->backing_bdev==NULL\n"); dump_stack(); } else { } return (0UL); } else { } tmp___2 = drbd_get_capacity(bdev->backing_bdev); return ((sector_t )(((unsigned long long )tmp___2 & 0xfffffffffffffff8ULL) - 8ULL)); case -2: ; return (0UL); } } } int drbd_send_state_req(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val ) ; int conn_send_state_req(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val ) ; __inline static int drbd_get_max_buffers___0(struct drbd_conf *mdev ) { struct net_conf *nc ; int mxb ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; { rcu_read_lock___5(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 2139, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; mxb = (unsigned long )nc != (unsigned long )((struct net_conf *)0) ? (int )nc->max_buffers : 1000000; rcu_read_unlock___5(); return (mxb); } } __inline static int drbd_state_is_stable___0(struct drbd_conf *mdev ) { union drbd_dev_state s ; { s = mdev->state; switch ((unsigned int )s.ldv_49522.conn) { case 0U: ; case 8U: ; case 10U: ; case 16U: ; case 17U: ; case 18U: ; case 19U: ; case 20U: ; case 21U: ; case 22U: ; case 23U: ; case 1U: ; case 2U: ; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: ; case 9U: ; case 11U: ; case 12U: ; goto ldv_52078; case 13U: ; if ((mdev->tconn)->agreed_pro_version <= 95) { return (0); } else { } goto ldv_52078; case 14U: ; case 15U: ; case 31U: ; return (0); } ldv_52078: ; switch ((unsigned int )s.ldv_49522.disk) { case 0U: ; case 4U: ; case 5U: ; case 7U: ; case 8U: ; case 2U: ; goto ldv_52089; case 1U: ; case 3U: ; case 6U: ; case 15U: ; return (0); } ldv_52089: ; return (1); } } __inline static bool may_inc_ap_bio___0(struct drbd_conf *mdev ) { int mxb ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { tmp = drbd_get_max_buffers___0(mdev); mxb = tmp; tmp___0 = drbd_suspended(mdev); if (tmp___0 != 0) { return (0); } else { } tmp___1 = constant_test_bit(8U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___1 != 0) { return (0); } else { } tmp___2 = drbd_state_is_stable___0(mdev); if (tmp___2 == 0) { return (0); } else { } tmp___3 = atomic_read((atomic_t const *)(& mdev->ap_bio_cnt)); if (tmp___3 > mxb) { return (0); } else { } tmp___4 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___4 != 0) { return (0); } else { } return (1); } } __inline static bool inc_ap_bio_cond___0(struct drbd_conf *mdev ) { bool rv ; { rv = 0; spin_lock_irq(& (mdev->tconn)->req_lock); rv = may_inc_ap_bio___0(mdev); if ((int )rv) { atomic_inc(& mdev->ap_bio_cnt); } else { } spin_unlock_irq(& (mdev->tconn)->req_lock); return (rv); } } __inline static void inc_ap_bio___0(struct drbd_conf *mdev ) { bool tmp ; wait_queue_t __wait ; struct task_struct *tmp___0 ; bool tmp___1 ; { tmp = inc_ap_bio_cond___0(mdev); if ((int )tmp) { goto ldv_52109; } else { } tmp___0 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___0; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_52112: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___1 = inc_ap_bio_cond___0(mdev); if ((int )tmp___1) { goto ldv_52111; } else { } schedule(); goto ldv_52112; ldv_52111: finish_wait(& mdev->misc_wait, & __wait); ldv_52109: ; return; } } __inline static void dec_ap_bio___0(struct drbd_conf *mdev ) { int mxb ; int tmp ; int ap_bio ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = drbd_get_max_buffers___0(mdev); mxb = tmp; tmp___0 = atomic_sub_return(1, & mdev->ap_bio_cnt); ap_bio = tmp___0; if (ap_bio < 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( ap_bio >= 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 2280); } else { } if (ap_bio == 0) { tmp___2 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___2 != 0) { tmp___1 = test_and_set_bit(10, (unsigned long volatile *)(& mdev->flags)); if (tmp___1 == 0) { drbd_queue_work(& (mdev->tconn)->sender_work, & mdev->bm_io_work.w); } else { } } else { } } else { } if (ap_bio < mxb) { __wake_up(& mdev->misc_wait, 3U, 1, 0); } else { } return; } } __inline static int drbd_queue_order_type(struct drbd_conf *mdev ) { { return (0); } } void tl_restart(struct drbd_tconn *tconn , enum drbd_req_event what ) ; void _tl_restart(struct drbd_tconn *tconn , enum drbd_req_event what ) ; __inline static int __vli_encode_bits(u64 *out , u64 const in ) { u64 max ; u64 adj ; { max = 0ULL; adj = 1ULL; if ((unsigned long long )in == 0ULL) { return (-22); } else { } max = max + 2ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = ((unsigned long long )in - adj) << 1; } else { } return (2); } else { } adj = max + 1ULL; max = max + 2ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 2) | 1ULL; } else { } return (3); } else { } adj = max + 1ULL; max = max + 4ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 3) | 3ULL; } else { } return (5); } else { } adj = max + 1ULL; max = max + 8ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 4) | 7ULL; } else { } return (7); } else { } adj = max + 1ULL; max = max + 32ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 5) | 15ULL; } else { } return (10); } else { } adj = max + 1ULL; max = max + 256ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 6) | 31ULL; } else { } return (14); } else { } adj = max + 1ULL; max = max + 8192ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 8) | 63ULL; } else { } return (21); } else { } adj = max + 1ULL; max = max + 2097152ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 8) | 127ULL; } else { } return (29); } else { } adj = max + 1ULL; max = max + 17179869184ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 8) | 191ULL; } else { } return (42); } else { } adj = max + 1ULL; max = max + 72057594037927936ULL; if ((unsigned long long )in <= max) { if ((unsigned long )out != (unsigned long )((u64 *)0)) { *out = (((unsigned long long )in - adj) << 8) | 255ULL; } else { } return (64); } else { } adj = max + 1ULL; return (-75); } } __inline static int bitstream_put_bits(struct bitstream *bs , u64 val , unsigned int const bits ) { unsigned char *b ; unsigned int tmp ; unsigned char *tmp___0 ; unsigned char *tmp___1 ; { b = bs->cur.b; if ((unsigned int )bits == 0U) { return (0); } else { } if ((unsigned long )((long )(bs->cur.b + (unsigned long )(((bs->cur.bit + (unsigned int )bits) - 1U) >> 3)) - (long )bs->buf) >= bs->buf_len) { return (-105); } else { } if ((unsigned int )bits <= 63U) { val = (0xffffffffffffffffULL >> (int )(64U - (unsigned int )bits)) & val; } else { } tmp___0 = b; b = b + 1; *tmp___0 = (int )*tmp___0 | (int )((unsigned char )((val & 255ULL) << (int )bs->cur.bit)); tmp = 8U - bs->cur.bit; goto ldv_52300; ldv_52299: tmp___1 = b; b = b + 1; *tmp___1 = (int )*tmp___1 | (int )((unsigned char )(val >> (int )tmp)); tmp = tmp + 8U; ldv_52300: ; if (tmp < (unsigned int )bits) { goto ldv_52299; } else { } bitstream_cursor_advance(& bs->cur, bits); return ((int )bits); } } __inline static int vli_encode_bits(struct bitstream *bs , u64 in ) { u64 code ; int bits ; int tmp ; int tmp___0 ; { code = code; tmp = __vli_encode_bits(& code, in); bits = tmp; if (bits <= 0) { return (bits); } else { } tmp___0 = bitstream_put_bits(bs, code, (unsigned int const )bits); return (tmp___0); } } static struct mutex drbd_main_mutex = {{1}, {{{{{0U}}, 3735899821U, 4294967295U, 0xffffffffffffffffUL, {0, {0, 0}, "drbd_main_mutex.wait_lock", 0, 0UL}}}}, {& drbd_main_mutex.wait_list, & drbd_main_mutex.wait_list}, 0, 0, (void *)(& drbd_main_mutex), {0, {0, 0}, "drbd_main_mutex", 0, 0UL}}; int drbd_init(void) ; static int drbd_open(struct block_device *bdev , fmode_t mode ) ; static int drbd_release(struct gendisk *gd , fmode_t mode ) ; static int w_md_sync(struct drbd_work *w , int unused ) ; static void md_sync_timer_fn(unsigned long data ) ; static int w_bitmap_io(struct drbd_work *w , int unused ) ; static int w_go_diskless(struct drbd_work *w , int unused ) ; static int fault_count ; unsigned int minor_count = 32U; char usermode_helper[80U] = { '/', 's', 'b', 'i', 'n', '/', 'd', 'r', 'b', 'd', 'a', 'd', 'm', '\000'}; struct ratelimit_state drbd_ratelimit_state = {{{{0U}}, 3735899821U, 4294967295U, 0xffffffffffffffffUL, {0, {0, 0}, "drbd_ratelimit_state.lock", 0, 0UL}}, 1250, 5, 0, 0, 0UL}; static struct block_device_operations const drbd_ops = {& drbd_open, & drbd_release, 0, 0, 0, 0, 0, 0, 0, 0, 0, & __this_module}; struct bio *bio_alloc_drbd(gfp_t gfp_mask ) { struct bio *bio ; struct bio *tmp ; { if ((unsigned long )drbd_md_io_bio_set == (unsigned long )((struct bio_set *)0)) { tmp = bio_alloc(gfp_mask, 1U); return (tmp); } else { } bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set); if ((unsigned long )bio == (unsigned long )((struct bio *)0)) { return (0); } else { } return (bio); } } void tl_release(struct drbd_tconn *tconn , unsigned int barrier_nr , unsigned int set_size ) { struct drbd_request *r ; struct drbd_request *req ; int expect_epoch ; int expect_size ; struct list_head const *__mptr ; unsigned int s ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct list_head const *__mptr___2 ; struct list_head const *__mptr___3 ; struct list_head const *__mptr___4 ; union drbd_state val ; union drbd_state mask ; { req = 0; expect_epoch = 0; expect_size = 0; spin_lock_irq(& tconn->req_lock); __mptr = (struct list_head const *)tconn->transfer_log.next; r = (struct drbd_request *)__mptr + 0xffffffffffffff98UL; goto ldv_52496; ldv_52495: s = r->rq_state; if ((unsigned long )req == (unsigned long )((struct drbd_request *)0)) { if (((unsigned long )s & 2048UL) == 0UL) { goto ldv_52493; } else { } if (((unsigned long )s & 1008UL) == 0UL) { goto ldv_52493; } else { } if (((unsigned long )s & 128UL) != 0UL) { goto ldv_52493; } else { } req = r; expect_epoch = (int )req->epoch; expect_size = expect_size + 1; } else { if (r->epoch != (unsigned int )expect_epoch) { goto ldv_52494; } else { } if (((unsigned long )s & 2048UL) == 0UL) { goto ldv_52493; } else { } expect_size = expect_size + 1; } ldv_52493: __mptr___0 = (struct list_head const *)r->tl_requests.next; r = (struct drbd_request *)__mptr___0 + 0xffffffffffffff98UL; ldv_52496: ; if ((unsigned long )(& r->tl_requests) != (unsigned long )(& tconn->transfer_log)) { goto ldv_52495; } else { } ldv_52494: ; if ((unsigned long )req == (unsigned long )((struct drbd_request *)0)) { printk("\vd-con %s: BAD! BarrierAck #%u received, but no epoch in tl!?\n", tconn->name, barrier_nr); goto bail; } else { } if ((unsigned int )expect_epoch != barrier_nr) { printk("\vd-con %s: BAD! BarrierAck #%u received, expected #%u!\n", tconn->name, barrier_nr, expect_epoch); goto bail; } else { } if ((unsigned int )expect_size != set_size) { printk("\vd-con %s: BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", tconn->name, barrier_nr, set_size, expect_size); goto bail; } else { } __mptr___1 = (struct list_head const *)tconn->transfer_log.next; req = (struct drbd_request *)__mptr___1 + 0xffffffffffffff98UL; goto ldv_52504; ldv_52503: ; if (req->epoch == (unsigned int )expect_epoch) { goto ldv_52502; } else { } __mptr___2 = (struct list_head const *)req->tl_requests.next; req = (struct drbd_request *)__mptr___2 + 0xffffffffffffff98UL; ldv_52504: ; if ((unsigned long )(& req->tl_requests) != (unsigned long )(& tconn->transfer_log)) { goto ldv_52503; } else { } ldv_52502: __mptr___3 = (struct list_head const *)req->tl_requests.next; r = (struct drbd_request *)__mptr___3 + 0xffffffffffffff98UL; goto ldv_52511; ldv_52510: ; if (req->epoch != (unsigned int )expect_epoch) { goto ldv_52509; } else { } _req_mod(req, BARRIER_ACKED); req = r; __mptr___4 = (struct list_head const *)r->tl_requests.next; r = (struct drbd_request *)__mptr___4 + 0xffffffffffffff98UL; ldv_52511: ; if ((unsigned long )(& req->tl_requests) != (unsigned long )(& tconn->transfer_log)) { goto ldv_52510; } else { } ldv_52509: spin_unlock_irq(& tconn->req_lock); return; bail: spin_unlock_irq(& tconn->req_lock); val.i = 0U; val.ldv_40024.conn = 6U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); return; } } void _tl_restart(struct drbd_tconn *tconn , enum drbd_req_event what ) { struct drbd_request *req ; struct drbd_request *r ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { __mptr = (struct list_head const *)tconn->transfer_log.next; req = (struct drbd_request *)__mptr + 0xffffffffffffff98UL; __mptr___0 = (struct list_head const *)req->tl_requests.next; r = (struct drbd_request *)__mptr___0 + 0xffffffffffffff98UL; goto ldv_52529; ldv_52528: _req_mod(req, what); req = r; __mptr___1 = (struct list_head const *)r->tl_requests.next; r = (struct drbd_request *)__mptr___1 + 0xffffffffffffff98UL; ldv_52529: ; if ((unsigned long )(& req->tl_requests) != (unsigned long )(& tconn->transfer_log)) { goto ldv_52528; } else { } return; } } void tl_restart(struct drbd_tconn *tconn , enum drbd_req_event what ) { { spin_lock_irq(& tconn->req_lock); _tl_restart(tconn, what); spin_unlock_irq(& tconn->req_lock); return; } } void tl_clear(struct drbd_tconn *tconn ) { { tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); return; } } void tl_abort_disk_io(struct drbd_conf *mdev ) { struct drbd_tconn *tconn ; struct drbd_request *req ; struct drbd_request *r ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { tconn = mdev->tconn; spin_lock_irq(& tconn->req_lock); __mptr = (struct list_head const *)tconn->transfer_log.next; req = (struct drbd_request *)__mptr + 0xffffffffffffff98UL; __mptr___0 = (struct list_head const *)req->tl_requests.next; r = (struct drbd_request *)__mptr___0 + 0xffffffffffffff98UL; goto ldv_52552; ldv_52551: ; if (((unsigned long )req->rq_state & 1UL) == 0UL) { goto ldv_52550; } else { } if ((unsigned long )req->w.ldv_49807.mdev != (unsigned long )mdev) { goto ldv_52550; } else { } _req_mod(req, ABORT_DISK_IO); ldv_52550: req = r; __mptr___1 = (struct list_head const *)r->tl_requests.next; r = (struct drbd_request *)__mptr___1 + 0xffffffffffffff98UL; ldv_52552: ; if ((unsigned long )(& req->tl_requests) != (unsigned long )(& tconn->transfer_log)) { goto ldv_52551; } else { } spin_unlock_irq(& tconn->req_lock); return; } } static int drbd_thread_setup(void *arg ) { struct drbd_thread *thi ; struct drbd_tconn *tconn ; unsigned long flags ; int retval ; struct task_struct *tmp ; raw_spinlock_t *tmp___0 ; struct task_struct *tmp___1 ; { thi = (struct drbd_thread *)arg; tconn = thi->tconn; tmp = get_current(); snprintf((char *)(& tmp->comm), 16UL, "drbd_%c_%s", (int )thi->name[0], (thi->tconn)->name); restart: retval = (*(thi->function))(thi); tmp___0 = spinlock_check(& thi->t_lock); flags = _raw_spin_lock_irqsave(tmp___0); if ((unsigned int )thi->t_state == 3U) { printk("\016d-con %s: Restarting %s thread\n", tconn->name, (char *)(& thi->name)); thi->t_state = RUNNING; spin_unlock_irqrestore(& thi->t_lock, flags); goto restart; } else { } thi->task = 0; thi->t_state = NONE; __asm__ volatile ("mfence": : : "memory"); complete_all(& thi->stop); spin_unlock_irqrestore(& thi->t_lock, flags); tmp___1 = get_current(); printk("\016d-con %s: Terminating %s\n", tconn->name, (char *)(& tmp___1->comm)); kref_put(& tconn->kref, & conn_destroy); module_put(& __this_module); return (retval); } } static void drbd_thread_init(struct drbd_tconn *tconn , struct drbd_thread *thi , int (*func)(struct drbd_thread * ) , char *name ) { struct lock_class_key __key ; { spinlock_check(& thi->t_lock); __raw_spin_lock_init(& thi->t_lock.ldv_5957.rlock, "&(&thi->t_lock)->rlock", & __key); thi->task = 0; thi->t_state = NONE; thi->function = func; thi->tconn = tconn; strncpy((char *)(& thi->name), (char const *)name, 9UL); return; } } int drbd_thread_start(struct drbd_thread *thi ) { struct drbd_tconn *tconn ; struct task_struct *nt ; unsigned long flags ; raw_spinlock_t *tmp ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; bool tmp___2 ; int tmp___3 ; struct task_struct *tmp___4 ; long tmp___5 ; raw_spinlock_t *tmp___6 ; struct task_struct *tmp___7 ; struct task_struct *tmp___8 ; { tconn = thi->tconn; tmp = spinlock_check(& thi->t_lock); flags = _raw_spin_lock_irqsave(tmp); switch ((unsigned int )thi->t_state) { case 0U: tmp___0 = get_current(); tmp___1 = get_current(); printk("\016d-con %s: Starting %s thread (from %s [%d])\n", tconn->name, (char *)(& thi->name), (char *)(& tmp___1->comm), tmp___0->pid); tmp___2 = try_module_get(& __this_module); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { printk("\vd-con %s: Failed to get module reference in drbd_thread_start\n", tconn->name); spin_unlock_irqrestore(& thi->t_lock, flags); return (0); } else { } kref_get(& (thi->tconn)->kref); init_completion(& thi->stop); thi->reset_cpu_mask = 1; thi->t_state = RUNNING; spin_unlock_irqrestore(& thi->t_lock, flags); tmp___4 = get_current(); flush_signals(tmp___4); nt = kthread_create_on_node(& drbd_thread_setup, (void *)thi, -1, "drbd_%c_%s", (int )thi->name[0], (thi->tconn)->name); tmp___5 = IS_ERR((void const *)nt); if (tmp___5 != 0L) { printk("\vd-con %s: Couldn\'t start thread\n", tconn->name); kref_put(& tconn->kref, & conn_destroy); module_put(& __this_module); return (0); } else { } tmp___6 = spinlock_check(& thi->t_lock); flags = _raw_spin_lock_irqsave(tmp___6); thi->task = nt; thi->t_state = RUNNING; spin_unlock_irqrestore(& thi->t_lock, flags); wake_up_process(nt); goto ldv_52588; case 2U: thi->t_state = RESTARTING; tmp___7 = get_current(); tmp___8 = get_current(); printk("\016d-con %s: Restarting %s thread (from %s [%d])\n", tconn->name, (char *)(& thi->name), (char *)(& tmp___8->comm), tmp___7->pid); case 1U: ; case 3U: ; default: spin_unlock_irqrestore(& thi->t_lock, flags); goto ldv_52588; } ldv_52588: ; return (1); } } void _drbd_thread_stop(struct drbd_thread *thi , int restart , int wait ) { unsigned long flags ; enum drbd_thread_state ns ; raw_spinlock_t *tmp ; struct task_struct *tmp___0 ; { ns = restart != 0 ? RESTARTING : EXITING; tmp = spinlock_check(& thi->t_lock); flags = _raw_spin_lock_irqsave(tmp); if ((unsigned int )thi->t_state == 0U) { spin_unlock_irqrestore(& thi->t_lock, flags); if (restart != 0) { drbd_thread_start(thi); } else { } return; } else { } if ((unsigned int )thi->t_state != (unsigned int )ns) { if ((unsigned long )thi->task == (unsigned long )((struct task_struct *)0)) { spin_unlock_irqrestore(& thi->t_lock, flags); return; } else { } thi->t_state = ns; __asm__ volatile ("mfence": : : "memory"); init_completion(& thi->stop); tmp___0 = get_current(); if ((unsigned long )thi->task != (unsigned long )tmp___0) { force_sig(1, thi->task); } else { } } else { } spin_unlock_irqrestore(& thi->t_lock, flags); if (wait != 0) { wait_for_completion(& thi->stop); } else { } return; } } static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn , struct task_struct *task ) { struct drbd_thread *thi ; { thi = (unsigned long )tconn->receiver.task == (unsigned long )task ? & tconn->receiver : ((unsigned long )tconn->asender.task == (unsigned long )task ? & tconn->asender : ((unsigned long )tconn->worker.task == (unsigned long )task ? & tconn->worker : 0)); return (thi); } } char *drbd_task_to_thread_name(struct drbd_tconn *tconn , struct task_struct *task ) { struct drbd_thread *thi ; struct drbd_thread *tmp ; { tmp = drbd_task_to_thread(tconn, task); thi = tmp; return ((unsigned long )thi != (unsigned long )((struct drbd_thread *)0) ? (char *)(& thi->name) : (char *)(& task->comm)); } } int conn_lowest_minor(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; int vnr ; int m ; void *tmp ; unsigned int tmp___0 ; { vnr = 0; rcu_read_lock___5(); tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { tmp___0 = mdev_to_minor(mdev); m = (int )tmp___0; } else { m = -1; } rcu_read_unlock___5(); return (m); } } void drbd_calc_cpu_mask(struct drbd_tconn *tconn ) { int ord ; int cpu ; unsigned int tmp ; int tmp___0 ; unsigned int tmp___1 ; int tmp___2 ; unsigned int tmp___3 ; { tmp = cpumask_weight((struct cpumask const *)tconn->cpu_mask); if (tmp != 0U) { return; } else { } tmp___0 = conn_lowest_minor(tconn); tmp___1 = cpumask_weight(cpu_online_mask); ord = (int )((unsigned int )tmp___0 % tmp___1); cpu = -1; goto ldv_52625; ldv_52624: tmp___2 = ord; ord = ord - 1; if (tmp___2 == 0) { cpumask_set_cpu((unsigned int )cpu, tconn->cpu_mask); return; } else { } ldv_52625: tmp___3 = cpumask_next(cpu, cpu_online_mask); cpu = (int )tmp___3; if (cpu < nr_cpu_ids) { goto ldv_52624; } else { } cpumask_setall(tconn->cpu_mask); return; } } void drbd_thread_current_set_cpu(struct drbd_thread *thi ) { struct task_struct *p ; struct task_struct *tmp ; { tmp = get_current(); p = tmp; if (thi->reset_cpu_mask == 0) { return; } else { } thi->reset_cpu_mask = 0; set_cpus_allowed_ptr(p, (struct cpumask const *)(thi->tconn)->cpu_mask); return; } } unsigned int drbd_header_size(struct drbd_tconn *tconn ) { { if (tconn->agreed_pro_version > 99) { return (16U); } else { return (8U); } } } static unsigned int prepare_header80(struct p_header80 *h , enum drbd_packet cmd , int size ) { __u16 tmp ; __u16 tmp___0 ; { h->magic = 1728214147U; tmp = __fswab16((int )((__u16 )cmd)); h->command = tmp; tmp___0 = __fswab16((int )((__u16 )size)); h->length = tmp___0; return (8U); } } static unsigned int prepare_header95(struct p_header95 *h , enum drbd_packet cmd , int size ) { __u16 tmp ; __u32 tmp___0 ; { h->magic = 23171U; tmp = __fswab16((int )((__u16 )cmd)); h->command = tmp; tmp___0 = __fswab32((__u32 )size); h->length = tmp___0; return (8U); } } static unsigned int prepare_header100(struct p_header100 *h , enum drbd_packet cmd , int size , int vnr ) { __u16 tmp ; __u16 tmp___0 ; __u32 tmp___1 ; { h->magic = 552345734U; tmp = __fswab16((int )((__u16 )vnr)); h->volume = tmp; tmp___0 = __fswab16((int )((__u16 )cmd)); h->command = tmp___0; tmp___1 = __fswab32((__u32 )size); h->length = tmp___1; h->pad = 0U; return (16U); } } static unsigned int prepare_header(struct drbd_tconn *tconn , int vnr , void *buffer , enum drbd_packet cmd , int size ) { unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; { if (tconn->agreed_pro_version > 99) { tmp = prepare_header100((struct p_header100 *)buffer, cmd, size, vnr); return (tmp); } else if (tconn->agreed_pro_version > 94 && (unsigned int )size > 32768U) { tmp___0 = prepare_header95((struct p_header95 *)buffer, cmd, size); return (tmp___0); } else { tmp___1 = prepare_header80((struct p_header80 *)buffer, cmd, size); return (tmp___1); } } } static void *__conn_prepare_command(struct drbd_tconn *tconn , struct drbd_socket *sock ) { unsigned int tmp ; { if ((unsigned long )sock->socket == (unsigned long )((struct socket *)0)) { return (0); } else { } tmp = drbd_header_size(tconn); return (sock->sbuf + (unsigned long )tmp); } } void *conn_prepare_command(struct drbd_tconn *tconn , struct drbd_socket *sock ) { void *p ; { ldv_mutex_lock_204(& sock->mutex); p = __conn_prepare_command(tconn, sock); if ((unsigned long )p == (unsigned long )((void *)0)) { ldv_mutex_unlock_205(& sock->mutex); } else { } return (p); } } void *drbd_prepare_command(struct drbd_conf *mdev , struct drbd_socket *sock ) { void *tmp ; { tmp = conn_prepare_command(mdev->tconn, sock); return (tmp); } } static int __send_command(struct drbd_tconn *tconn , int vnr , struct drbd_socket *sock , enum drbd_packet cmd , unsigned int header_size , void *data , unsigned int size ) { int msg_flags ; int err ; unsigned int tmp ; { msg_flags = (unsigned long )data != (unsigned long )((void *)0) ? 32768 : 0; tmp = prepare_header(tconn, vnr, sock->sbuf, cmd, (int )(header_size + size)); header_size = tmp + header_size; err = drbd_send_all(tconn, sock->socket, sock->sbuf, (size_t )header_size, (unsigned int )msg_flags); if ((unsigned long )data != (unsigned long )((void *)0) && err == 0) { err = drbd_send_all(tconn, sock->socket, data, (size_t )size, 0U); } else { } return (err); } } static int __conn_send_command(struct drbd_tconn *tconn , struct drbd_socket *sock , enum drbd_packet cmd , unsigned int header_size , void *data , unsigned int size ) { int tmp ; { tmp = __send_command(tconn, 0, sock, cmd, header_size, data, size); return (tmp); } } int conn_send_command(struct drbd_tconn *tconn , struct drbd_socket *sock , enum drbd_packet cmd , unsigned int header_size , void *data , unsigned int size ) { int err ; { err = __conn_send_command(tconn, sock, cmd, header_size, data, size); ldv_mutex_unlock_206(& sock->mutex); return (err); } } int drbd_send_command(struct drbd_conf *mdev , struct drbd_socket *sock , enum drbd_packet cmd , unsigned int header_size , void *data , unsigned int size ) { int err ; { err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size, data, size); ldv_mutex_unlock_207(& sock->mutex); return (err); } } int drbd_send_ping(struct drbd_tconn *tconn ) { struct drbd_socket *sock ; void *tmp ; int tmp___0 ; { sock = & tconn->meta; tmp = conn_prepare_command(tconn, sock); if ((unsigned long )tmp == (unsigned long )((void *)0)) { return (-5); } else { } tmp___0 = conn_send_command(tconn, sock, P_PING, 0U, 0, 0U); return (tmp___0); } } int drbd_send_ping_ack(struct drbd_tconn *tconn ) { struct drbd_socket *sock ; void *tmp ; int tmp___0 ; { sock = & tconn->meta; tmp = conn_prepare_command(tconn, sock); if ((unsigned long )tmp == (unsigned long )((void *)0)) { return (-5); } else { } tmp___0 = conn_send_command(tconn, sock, P_PING_ACK, 0U, 0, 0U); return (tmp___0); } } int drbd_send_sync_param(struct drbd_conf *mdev ) { struct drbd_socket *sock ; struct p_rs_param_95 *p ; int size ; int apv ; enum drbd_packet cmd ; struct net_conf *nc ; struct disk_conf *dc ; void *tmp ; struct net_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; size_t tmp___2 ; int tmp___3 ; struct disk_conf *_________p1___0 ; bool __warned___0 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; { apv = (mdev->tconn)->agreed_pro_version; sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_rs_param_95 *)tmp; if ((unsigned long )p == (unsigned long )((struct p_rs_param_95 *)0)) { return (-5); } else { } rcu_read_lock___5(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 820, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if (apv > 87) { if (apv == 88) { tmp___2 = strlen((char const *)(& nc->verify_alg)); tmp___3 = (int )((unsigned int )tmp___2 + 5U); } else { tmp___3 = apv <= 94 ? 132 : 148; } size = tmp___3; } else { size = 4; } cmd = apv > 88 ? P_SYNC_PARAM89 : P_SYNC_PARAM; memset((void *)(& p->verify_alg), 0, 128UL); tmp___6 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___6 != 0) { _________p1___0 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___4 = debug_lockdep_rcu_enabled(); if (tmp___4 != 0 && ! __warned___0) { tmp___5 = rcu_read_lock_held(); if (tmp___5 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 834, "suspicious rcu_dereference_check() usage"); } else { } } else { } dc = _________p1___0; p->resync_rate = __fswab32(dc->resync_rate); p->c_plan_ahead = __fswab32(dc->c_plan_ahead); p->c_delay_target = __fswab32(dc->c_delay_target); p->c_fill_target = __fswab32(dc->c_fill_target); p->c_max_rate = __fswab32(dc->c_max_rate); put_ldev(mdev); } else { p->resync_rate = 4194304000U; p->c_plan_ahead = 335544320U; p->c_delay_target = 167772160U; p->c_fill_target = 1677721600U; p->c_max_rate = 9437440U; } if (apv > 87) { strcpy((char *)(& p->verify_alg), (char const *)(& nc->verify_alg)); } else { } if (apv > 88) { strcpy((char *)(& p->csums_alg), (char const *)(& nc->csums_alg)); } else { } rcu_read_unlock___5(); tmp___7 = drbd_send_command(mdev, sock, cmd, (unsigned int )size, 0, 0U); return (tmp___7); } } int __drbd_send_protocol(struct drbd_tconn *tconn , enum drbd_packet cmd ) { struct drbd_socket *sock ; struct p_protocol *p ; struct net_conf *nc ; int size ; int cf ; void *tmp ; struct net_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; size_t tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; int tmp___5 ; { sock = & tconn->data; tmp = __conn_prepare_command(tconn, sock); p = (struct p_protocol *)tmp; if ((unsigned long )p == (unsigned long )((struct p_protocol *)0)) { return (-5); } else { } rcu_read_lock___5(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 871, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if ((int )((signed char )nc->tentative) != 0 && tconn->agreed_pro_version <= 91) { rcu_read_unlock___5(); ldv_mutex_unlock_208(& sock->mutex); printk("\vd-con %s: --dry-run is not supported by peer", tconn->name); return (-95); } else { } size = 24; if (tconn->agreed_pro_version > 86) { tmp___2 = strlen((char const *)(& nc->integrity_alg)); size = (int )(((unsigned int )tmp___2 + (unsigned int )size) + 1U); } else { } p->protocol = __fswab32(nc->wire_protocol); p->after_sb_0p = __fswab32(nc->after_sb_0p); p->after_sb_1p = __fswab32(nc->after_sb_1p); p->after_sb_2p = __fswab32(nc->after_sb_2p); tmp___3 = __fswab32((__u32 )nc->two_primaries); p->two_primaries = tmp___3; cf = 0; if ((int )((signed char )nc->discard_my_data) != 0) { cf = cf | 1; } else { } if ((int )((signed char )nc->tentative) != 0) { cf = cf | 2; } else { } tmp___4 = __fswab32((__u32 )cf); p->conn_flags = tmp___4; if (tconn->agreed_pro_version > 86) { strcpy((char *)(& p->integrity_alg), (char const *)(& nc->integrity_alg)); } else { } rcu_read_unlock___5(); tmp___5 = __conn_send_command(tconn, sock, cmd, (unsigned int )size, 0, 0U); return (tmp___5); } } int drbd_send_protocol(struct drbd_tconn *tconn ) { int err ; { ldv_mutex_lock_209(& tconn->data.mutex); err = __drbd_send_protocol(tconn, P_PROTOCOL); ldv_mutex_unlock_210(& tconn->data.mutex); return (err); } } int _drbd_send_uuids(struct drbd_conf *mdev , u64 uuid_flags ) { struct drbd_socket *sock ; struct p_uuids *p ; int i ; int tmp ; void *tmp___0 ; __u64 tmp___1 ; __u64 tmp___2 ; struct net_conf *_________p1 ; bool __warned ; int tmp___3 ; int tmp___4 ; int tmp___5 ; __u64 tmp___6 ; int tmp___7 ; { tmp = _get_ldev_if_state(mdev, D_NEGOTIATING); if (tmp == 0) { return (0); } else { } sock = & (mdev->tconn)->data; tmp___0 = drbd_prepare_command(mdev, sock); p = (struct p_uuids *)tmp___0; if ((unsigned long )p == (unsigned long )((struct p_uuids *)0)) { put_ldev(mdev); return (-5); } else { } spin_lock_irq(& (mdev->ldev)->md.uuid_lock); i = 0; goto ldv_52755; ldv_52754: ; if ((unsigned long )mdev->ldev != (unsigned long )((struct drbd_backing_dev *)0)) { tmp___1 = __fswab64((mdev->ldev)->md.uuid[i]); p->uuid[i] = tmp___1; } else { p->uuid[i] = 0ULL; } i = i + 1; ldv_52755: ; if (i <= 3) { goto ldv_52754; } else { } spin_unlock_irq(& (mdev->ldev)->md.uuid_lock); mdev->comm_bm_set = drbd_bm_total_weight(mdev); tmp___2 = __fswab64((__u64 )mdev->comm_bm_set); p->uuid[4] = tmp___2; rcu_read_lock___5(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned) { tmp___4 = rcu_read_lock_held(); if (tmp___4 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 937, "suspicious rcu_dereference_check() usage"); } else { } } else { } uuid_flags = ((int )((signed char )_________p1->discard_my_data) != 0 ? 1ULL : 0ULL) | uuid_flags; rcu_read_unlock___5(); tmp___5 = constant_test_bit(5U, (unsigned long const volatile *)(& mdev->flags)); uuid_flags = (tmp___5 != 0 ? 2ULL : 0ULL) | uuid_flags; uuid_flags = ((unsigned int )*((unsigned char *)mdev + 745UL) == 8U ? 4ULL : 0ULL) | uuid_flags; tmp___6 = __fswab64(uuid_flags); p->uuid[5] = tmp___6; put_ldev(mdev); tmp___7 = drbd_send_command(mdev, sock, P_UUIDS, 48U, 0, 0U); return (tmp___7); } } int drbd_send_uuids(struct drbd_conf *mdev ) { int tmp ; { tmp = _drbd_send_uuids(mdev, 0ULL); return (tmp); } } int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev ) { int tmp ; { tmp = _drbd_send_uuids(mdev, 8ULL); return (tmp); } } void drbd_print_uuids(struct drbd_conf *mdev , char const *text ) { u64 *uuid ; int tmp ; { tmp = _get_ldev_if_state(mdev, D_NEGOTIATING); if (tmp != 0) { uuid = (u64 *)(& (mdev->ldev)->md.uuid); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s %016llX:%016llX:%016llX:%016llX\n", text, *uuid, *(uuid + 1UL), *(uuid + 2UL), *(uuid + 3UL)); put_ldev(mdev); } else { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s effective data uuid: %016llX\n", text, mdev->ed_uuid); } return; } } void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev ) { struct drbd_socket *sock ; struct p_rs_uuid *p ; u64 uuid ; void *tmp ; __u64 tmp___0 ; { if ((unsigned int )*((unsigned char *)mdev + 749UL) != 16U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->state.disk == D_UP_TO_DATE ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 981); } else { } uuid = (mdev->ldev)->md.uuid[1]; if (uuid != 0ULL && uuid != 4ULL) { uuid = uuid + 281474976710656ULL; } else { get_random_bytes((void *)(& uuid), 8); } drbd_uuid_set(mdev, 1, uuid); drbd_print_uuids(mdev, "updated sync UUID"); drbd_md_sync(mdev); sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_rs_uuid *)tmp; if ((unsigned long )p != (unsigned long )((struct p_rs_uuid *)0)) { tmp___0 = __fswab64(uuid); p->uuid = tmp___0; drbd_send_command(mdev, sock, P_SYNC_UUID, 8U, 0, 0U); } else { } return; } } int drbd_send_sizes(struct drbd_conf *mdev , int trigger_reply , enum dds_flags flags ) { struct drbd_socket *sock ; struct p_sizes *p ; sector_t d_size ; sector_t u_size ; int q_order_type ; unsigned int max_bio_size ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; unsigned int tmp___1 ; unsigned int _min1 ; unsigned int _min2 ; int tmp___2 ; void *tmp___3 ; unsigned int _min1___0 ; unsigned int _min2___0 ; unsigned int _min1___1 ; unsigned int _min2___1 ; __u64 tmp___4 ; __u64 tmp___5 ; sector_t tmp___6 ; __u64 tmp___7 ; __u64 tmp___8 ; __u32 tmp___9 ; __u16 tmp___10 ; __u16 tmp___11 ; int tmp___12 ; { tmp___2 = _get_ldev_if_state(mdev, D_NEGOTIATING); if (tmp___2 != 0) { if ((unsigned long )(mdev->ldev)->backing_bdev == (unsigned long )((struct block_device *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->ldev->backing_bdev ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 1009); } else { } d_size = drbd_get_max_capacity___0(mdev->ldev); rcu_read_lock___5(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 1012, "suspicious rcu_dereference_check() usage"); } else { } } else { } u_size = (sector_t )_________p1->disk_size; rcu_read_unlock___5(); q_order_type = drbd_queue_order_type(mdev); tmp___1 = queue_max_hw_sectors((((mdev->ldev)->backing_bdev)->bd_disk)->queue); max_bio_size = tmp___1 << 9; _min1 = max_bio_size; _min2 = 1048576U; max_bio_size = _min1 < _min2 ? _min1 : _min2; put_ldev(mdev); } else { d_size = 0UL; u_size = 0UL; q_order_type = 0; max_bio_size = 1048576U; } sock = & (mdev->tconn)->data; tmp___3 = drbd_prepare_command(mdev, sock); p = (struct p_sizes *)tmp___3; if ((unsigned long )p == (unsigned long )((struct p_sizes *)0)) { return (-5); } else { } if ((mdev->tconn)->agreed_pro_version <= 94) { _min1___0 = max_bio_size; _min2___0 = 32768U; max_bio_size = _min1___0 < _min2___0 ? _min1___0 : _min2___0; } else if ((mdev->tconn)->agreed_pro_version <= 99) { _min1___1 = max_bio_size; _min2___1 = 131072U; max_bio_size = _min1___1 < _min2___1 ? _min1___1 : _min2___1; } else { } tmp___4 = __fswab64((__u64 )d_size); p->d_size = tmp___4; tmp___5 = __fswab64((__u64 )u_size); p->u_size = tmp___5; if (trigger_reply == 0) { tmp___6 = drbd_get_capacity(mdev->this_bdev); tmp___7 = (__u64 )tmp___6; } else { tmp___7 = 0ULL; } tmp___8 = __fswab64(tmp___7); p->c_size = tmp___8; tmp___9 = __fswab32(max_bio_size); p->max_bio_size = tmp___9; tmp___10 = __fswab16((int )((__u16 )q_order_type)); p->queue_order_type = tmp___10; tmp___11 = __fswab16((int )((__u16 )flags)); p->dds_flags = tmp___11; tmp___12 = drbd_send_command(mdev, sock, P_SIZES, 32U, 0, 0U); return (tmp___12); } } int drbd_send_current_state(struct drbd_conf *mdev ) { struct drbd_socket *sock ; struct p_state *p ; void *tmp ; __u32 tmp___0 ; int tmp___1 ; { sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_state *)tmp; if ((unsigned long )p == (unsigned long )((struct p_state *)0)) { return (-5); } else { } tmp___0 = __fswab32(mdev->state.i); p->state = tmp___0; tmp___1 = drbd_send_command(mdev, sock, P_STATE, 4U, 0, 0U); return (tmp___1); } } int drbd_send_state(struct drbd_conf *mdev , union drbd_state state ) { struct drbd_socket *sock ; struct p_state *p ; void *tmp ; __u32 tmp___0 ; int tmp___1 ; { sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_state *)tmp; if ((unsigned long )p == (unsigned long )((struct p_state *)0)) { return (-5); } else { } tmp___0 = __fswab32(state.i); p->state = tmp___0; tmp___1 = drbd_send_command(mdev, sock, P_STATE, 4U, 0, 0U); return (tmp___1); } } int drbd_send_state_req(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val ) { struct drbd_socket *sock ; struct p_req_state *p ; void *tmp ; __u32 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_req_state *)tmp; if ((unsigned long )p == (unsigned long )((struct p_req_state *)0)) { return (-5); } else { } tmp___0 = __fswab32(mask.i); p->mask = tmp___0; tmp___1 = __fswab32(val.i); p->val = tmp___1; tmp___2 = drbd_send_command(mdev, sock, P_STATE_CHG_REQ, 8U, 0, 0U); return (tmp___2); } } int conn_send_state_req(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val ) { enum drbd_packet cmd ; struct drbd_socket *sock ; struct p_req_state *p ; void *tmp ; __u32 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { cmd = tconn->agreed_pro_version <= 99 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ; sock = & tconn->data; tmp = conn_prepare_command(tconn, sock); p = (struct p_req_state *)tmp; if ((unsigned long )p == (unsigned long )((struct p_req_state *)0)) { return (-5); } else { } tmp___0 = __fswab32(mask.i); p->mask = tmp___0; tmp___1 = __fswab32(val.i); p->val = tmp___1; tmp___2 = conn_send_command(tconn, sock, cmd, 8U, 0, 0U); return (tmp___2); } } void drbd_send_sr_reply(struct drbd_conf *mdev , enum drbd_state_rv retcode ) { struct drbd_socket *sock ; struct p_req_state_reply *p ; void *tmp ; __u32 tmp___0 ; { sock = & (mdev->tconn)->meta; tmp = drbd_prepare_command(mdev, sock); p = (struct p_req_state_reply *)tmp; if ((unsigned long )p != (unsigned long )((struct p_req_state_reply *)0)) { tmp___0 = __fswab32((__u32 )retcode); p->retcode = tmp___0; drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, 4U, 0, 0U); } else { } return; } } void conn_send_sr_reply(struct drbd_tconn *tconn , enum drbd_state_rv retcode ) { struct drbd_socket *sock ; struct p_req_state_reply *p ; enum drbd_packet cmd ; void *tmp ; __u32 tmp___0 ; { cmd = tconn->agreed_pro_version <= 99 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY; sock = & tconn->meta; tmp = conn_prepare_command(tconn, sock); p = (struct p_req_state_reply *)tmp; if ((unsigned long )p != (unsigned long )((struct p_req_state_reply *)0)) { tmp___0 = __fswab32((__u32 )retcode); p->retcode = tmp___0; conn_send_command(tconn, sock, cmd, 4U, 0, 0U); } else { } return; } } static void dcbp_set_code(struct p_compressed_bm *p , enum drbd_bitmap_code code ) { long tmp ; { tmp = ldv__builtin_expect(((unsigned int )code & 4294967280U) != 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared"), "i" (1143), "i" (12UL)); ldv_52843: ; goto ldv_52843; } else { } p->encoding = ((unsigned int )p->encoding & 240U) | (unsigned int )((u8 )code); return; } } static void dcbp_set_start(struct p_compressed_bm *p , int set ) { { p->encoding = (u8 )(((int )((signed char )p->encoding) & 127) | (set != 0 ? -128 : 0)); return; } } static void dcbp_set_pad_bits(struct p_compressed_bm *p , int n ) { long tmp ; { tmp = ldv__builtin_expect((n & -8) != 0, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared"), "i" (1154), "i" (12UL)); ldv_52852: ; goto ldv_52852; } else { } p->encoding = (u8 )(((int )((signed char )p->encoding) & -128) | (int )((signed char )(n << 4))); return; } } int fill_bitmap_rle_bits(struct drbd_conf *mdev , struct p_compressed_bm *p , unsigned int size , struct bm_xfer_ctx *c ) { struct bitstream bs ; unsigned long plain_bits ; unsigned long tmp ; unsigned long rl ; unsigned int len ; unsigned int toggle ; int bits ; int use_rle ; struct net_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; unsigned long tmp___2 ; unsigned long tmp___3 ; { rcu_read_lock___5(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 1173, "suspicious rcu_dereference_check() usage"); } else { } } else { } use_rle = (int )_________p1->use_rle; rcu_read_unlock___5(); if (use_rle == 0 || (mdev->tconn)->agreed_pro_version <= 89) { return (0); } else { } if (c->bit_offset >= c->bm_bits) { return (0); } else { } bitstream_init(& bs, (void *)(& p->code), (size_t )size, 0U); memset((void *)(& p->code), 0, (size_t )size); plain_bits = 0UL; toggle = 2U; ldv_52872: ; if (toggle == 0U) { tmp___2 = _drbd_bm_find_next_zero(mdev, c->bit_offset); tmp = tmp___2; } else { tmp___3 = _drbd_bm_find_next(mdev, c->bit_offset); tmp = tmp___3; } if (tmp == 0xffffffffffffffffUL) { tmp = c->bm_bits; } else { } rl = tmp - c->bit_offset; if (toggle == 2U) { if (rl == 0UL) { dcbp_set_start(p, 1); toggle = toggle == 0U; goto ldv_52870; } else { } dcbp_set_start(p, 0); } else { } if (rl == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "unexpected zero runlength while encoding bitmap t:%u bo:%lu\n", toggle, c->bit_offset); return (-1); } else { } bits = vli_encode_bits(& bs, (u64 )rl); if (bits == -105) { goto ldv_52871; } else { } if (bits <= 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "error while encoding bitmap: %d\n", bits); return (0); } else { } toggle = toggle == 0U; plain_bits = plain_bits + rl; c->bit_offset = tmp; ldv_52870: ; if (c->bit_offset < c->bm_bits) { goto ldv_52872; } else { } ldv_52871: len = ((unsigned int )((long )bs.cur.b) - (unsigned int )((long )(& p->code))) + (unsigned int )(bs.cur.bit != 0U); if ((unsigned long )(len << 3) > plain_bits) { c->bit_offset = c->bit_offset - plain_bits; bm_xfer_ctx_bit_to_word_offset(c); c->bit_offset = c->word_offset * 64UL; return (0); } else { } bm_xfer_ctx_bit_to_word_offset(c); dcbp_set_pad_bits(p, (int )(- bs.cur.bit) & 7); return ((int )len); } } static int send_bitmap_rle_or_plain(struct drbd_conf *mdev , struct bm_xfer_ctx *c ) { struct drbd_socket *sock ; unsigned int header_size ; unsigned int tmp ; struct p_compressed_bm *p ; int len ; int err ; unsigned int data_size ; unsigned long num_words ; unsigned long *p___0 ; size_t __min1 ; size_t __min2 ; { sock = & (mdev->tconn)->data; tmp = drbd_header_size(mdev->tconn); header_size = tmp; p = (struct p_compressed_bm *)sock->sbuf + (unsigned long )header_size; len = fill_bitmap_rle_bits(mdev, p, 4095U - header_size, c); if (len < 0) { return (-5); } else { } if (len != 0) { dcbp_set_code(p, RLE_VLI_Bits); err = __send_command(mdev->tconn, mdev->vnr, sock, P_COMPRESSED_BITMAP, (unsigned int )len + 1U, 0, 0U); c->packets[0] = c->packets[0] + 1U; c->bytes[0] = (c->bytes[0] + (header_size + (unsigned int )len)) + 1U; if (c->bit_offset >= c->bm_bits) { len = 0; } else { } } else { p___0 = (unsigned long *)sock->sbuf + (unsigned long )header_size; data_size = 4096U - header_size; __min1 = (unsigned long )(data_size / 8U); __min2 = c->bm_words - c->word_offset; num_words = __min1 < __min2 ? __min1 : __min2; len = (int )((unsigned int )num_words * 8U); if (len != 0) { drbd_bm_get_lel(mdev, c->word_offset, num_words, p___0); } else { } err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, (unsigned int )len, 0, 0U); c->word_offset = c->word_offset + num_words; c->bit_offset = c->word_offset * 64UL; c->packets[1] = c->packets[1] + 1U; c->bytes[1] = c->bytes[1] + (header_size + (unsigned int )len); if (c->bit_offset > c->bm_bits) { c->bit_offset = c->bm_bits; } else { } } if (err == 0) { if (len == 0) { INFO_bm_xfer_stats(mdev, "send", c); return (0); } else { return (1); } } else { } return (-5); } } static int _drbd_send_bitmap(struct drbd_conf *mdev ) { struct bm_xfer_ctx c ; int err ; bool _bool ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; struct bm_xfer_ctx __constr_expr_0 ; unsigned long tmp___3 ; size_t tmp___4 ; { _bool = (unsigned long )mdev->bitmap != (unsigned long )((struct drbd_bitmap *)0); if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"mdev->bitmap", "_drbd_send_bitmap"); } else { } if (_bool) { tmp = 0; } else { tmp = 1; } if (tmp) { return (0); } else { } tmp___2 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___2 != 0) { tmp___1 = drbd_md_test_flag(mdev->ldev, 8); if (tmp___1 != 0) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Writing the whole bitmap, MDF_FullSync was set.\n"); drbd_bm_set_all(mdev); tmp___0 = drbd_bm_write(mdev); if (tmp___0 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Failed to write bitmap to disk!\n"); } else { drbd_md_clear_flag(mdev, 8); drbd_md_sync(mdev); } } else { } put_ldev(mdev); } else { } tmp___3 = drbd_bm_bits(mdev); tmp___4 = drbd_bm_words(mdev); __constr_expr_0.bm_bits = tmp___3; __constr_expr_0.bm_words = tmp___4; __constr_expr_0.bit_offset = 0UL; __constr_expr_0.word_offset = 0UL; __constr_expr_0.packets[0] = 0U; __constr_expr_0.packets[1] = 0U; __constr_expr_0.bytes[0] = 0U; __constr_expr_0.bytes[1] = 0U; c = __constr_expr_0; ldv_52897: err = send_bitmap_rle_or_plain(mdev, & c); if (err > 0) { goto ldv_52897; } else { } return (err == 0); } } int drbd_send_bitmap(struct drbd_conf *mdev ) { struct drbd_socket *sock ; int err ; int tmp ; { sock = & (mdev->tconn)->data; err = -1; ldv_mutex_lock_211(& sock->mutex); if ((unsigned long )sock->socket != (unsigned long )((struct socket *)0)) { tmp = _drbd_send_bitmap(mdev); err = tmp == 0; } else { } ldv_mutex_unlock_212(& sock->mutex); return (err); } } void drbd_send_b_ack(struct drbd_tconn *tconn , u32 barrier_nr , u32 set_size ) { struct drbd_socket *sock ; struct p_barrier_ack *p ; void *tmp ; __u32 tmp___0 ; { if ((unsigned int )tconn->cstate <= 8U) { return; } else { } sock = & tconn->meta; tmp = conn_prepare_command(tconn, sock); p = (struct p_barrier_ack *)tmp; if ((unsigned long )p == (unsigned long )((struct p_barrier_ack *)0)) { return; } else { } p->barrier = barrier_nr; tmp___0 = __fswab32(set_size); p->set_size = tmp___0; conn_send_command(tconn, sock, P_BARRIER_ACK, 8U, 0, 0U); return; } } static int _drbd_send_ack(struct drbd_conf *mdev , enum drbd_packet cmd , u64 sector , u32 blksize , u64 block_id ) { struct drbd_socket *sock ; struct p_block_ack *p ; void *tmp ; int tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { if ((int )mdev->state.ldv_49522.conn <= 9) { return (-5); } else { } sock = & (mdev->tconn)->meta; tmp = drbd_prepare_command(mdev, sock); p = (struct p_block_ack *)tmp; if ((unsigned long )p == (unsigned long )((struct p_block_ack *)0)) { return (-5); } else { } p->sector = sector; p->block_id = block_id; p->blksize = blksize; tmp___0 = atomic_add_return(1, & mdev->packet_seq); tmp___1 = __fswab32((__u32 )tmp___0); p->seq_num = tmp___1; tmp___2 = drbd_send_command(mdev, sock, cmd, 24U, 0, 0U); return (tmp___2); } } void drbd_send_ack_dp(struct drbd_conf *mdev , enum drbd_packet cmd , struct p_data *dp , int data_size ) { unsigned int tmp ; __u32 tmp___0 ; { if ((unsigned long )(mdev->tconn)->peer_integrity_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp = crypto_hash_digestsize((mdev->tconn)->peer_integrity_tfm); data_size = (int )((unsigned int )data_size - tmp); } else { } tmp___0 = __fswab32((__u32 )data_size); _drbd_send_ack(mdev, cmd, dp->sector, tmp___0, dp->block_id); return; } } void drbd_send_ack_rp(struct drbd_conf *mdev , enum drbd_packet cmd , struct p_block_req *rp ) { { _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id); return; } } int drbd_send_ack(struct drbd_conf *mdev , enum drbd_packet cmd , struct drbd_peer_request *peer_req ) { __u32 tmp ; __u64 tmp___0 ; int tmp___1 ; { tmp = __fswab32(peer_req->i.size); tmp___0 = __fswab64((__u64 )peer_req->i.sector); tmp___1 = _drbd_send_ack(mdev, cmd, tmp___0, tmp, peer_req->ldv_50726.block_id); return (tmp___1); } } int drbd_send_ack_ex(struct drbd_conf *mdev , enum drbd_packet cmd , sector_t sector , int blksize , u64 block_id ) { __u64 tmp ; __u32 tmp___0 ; __u64 tmp___1 ; int tmp___2 ; { tmp = __fswab64(block_id); tmp___0 = __fswab32((__u32 )blksize); tmp___1 = __fswab64((__u64 )sector); tmp___2 = _drbd_send_ack(mdev, cmd, tmp___1, tmp___0, tmp); return (tmp___2); } } int drbd_send_drequest(struct drbd_conf *mdev , int cmd , sector_t sector , int size , u64 block_id ) { struct drbd_socket *sock ; struct p_block_req *p ; void *tmp ; __u64 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_block_req *)tmp; if ((unsigned long )p == (unsigned long )((struct p_block_req *)0)) { return (-5); } else { } tmp___0 = __fswab64((__u64 )sector); p->sector = tmp___0; p->block_id = block_id; tmp___1 = __fswab32((__u32 )size); p->blksize = tmp___1; tmp___2 = drbd_send_command(mdev, sock, (enum drbd_packet )cmd, 24U, 0, 0U); return (tmp___2); } } int drbd_send_drequest_csum(struct drbd_conf *mdev , sector_t sector , int size , void *digest , int digest_size , enum drbd_packet cmd ) { struct drbd_socket *sock ; struct p_block_req *p ; void *tmp ; __u64 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_block_req *)tmp; if ((unsigned long )p == (unsigned long )((struct p_block_req *)0)) { return (-5); } else { } tmp___0 = __fswab64((__u64 )sector); p->sector = tmp___0; p->block_id = 0xffffffffffffffffULL; tmp___1 = __fswab32((__u32 )size); p->blksize = tmp___1; tmp___2 = drbd_send_command(mdev, sock, cmd, 24U, digest, (unsigned int )digest_size); return (tmp___2); } } int drbd_send_ov_request(struct drbd_conf *mdev , sector_t sector , int size ) { struct drbd_socket *sock ; struct p_block_req *p ; void *tmp ; __u64 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_block_req *)tmp; if ((unsigned long )p == (unsigned long )((struct p_block_req *)0)) { return (-5); } else { } tmp___0 = __fswab64((__u64 )sector); p->sector = tmp___0; p->block_id = 0xffffffffffffffffULL; tmp___1 = __fswab32((__u32 )size); p->blksize = tmp___1; tmp___2 = drbd_send_command(mdev, sock, P_OV_REQUEST, 24U, 0, 0U); return (tmp___2); } } static int we_should_drop_the_connection(struct drbd_tconn *tconn , struct socket *sock ) { int drop_it ; enum drbd_thread_state tmp ; int tmp___0 ; struct task_struct *tmp___1 ; struct task_struct *tmp___2 ; { if ((unsigned long )tconn->meta.socket == (unsigned long )sock || (unsigned long )tconn->asender.task == (unsigned long )((struct task_struct *)0)) { tmp___0 = 1; } else { tmp = get_t_state(& tconn->asender); if ((unsigned int )tmp != 1U) { tmp___0 = 1; } else if ((unsigned int )tconn->cstate <= 8U) { tmp___0 = 1; } else { tmp___0 = 0; } } drop_it = tmp___0; if (drop_it != 0) { return (1); } else { } tconn->ko_count = tconn->ko_count - 1U; drop_it = tconn->ko_count == 0U; if (drop_it == 0) { tmp___1 = get_current(); tmp___2 = get_current(); printk("\vd-con %s: [%s/%d] sock_sendmsg time expired, ko = %u\n", tconn->name, (char *)(& tmp___2->comm), tmp___1->pid, tconn->ko_count); request_ping(tconn); } else { } return (drop_it); } } static void drbd_update_congested(struct drbd_tconn *tconn ) { struct sock *sk ; { sk = (tconn->data.socket)->sk; if (sk->sk_wmem_queued > (sk->sk_sndbuf * 4) / 5) { set_bit(0U, (unsigned long volatile *)(& tconn->flags)); } else { } return; } } static int _drbd_no_send_page(struct drbd_conf *mdev , struct page *page , int offset , size_t size , unsigned int msg_flags ) { struct socket *socket ; void *addr ; int err ; void *tmp ; { socket = (mdev->tconn)->data.socket; tmp = kmap(page); addr = tmp + (unsigned long )offset; err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags); kunmap(page); if (err == 0) { mdev->send_cnt = mdev->send_cnt + (unsigned int )(size >> 9); } else { } return (err); } } static int _drbd_send_page(struct drbd_conf *mdev , struct page *page , int offset , size_t size , unsigned int msg_flags ) { struct socket *socket ; mm_segment_t oldfs ; struct thread_info *tmp ; int len ; int err ; int tmp___0 ; int tmp___1 ; int tmp___2 ; struct thread_info *tmp___3 ; mm_segment_t __constr_expr_0 ; int sent ; ssize_t tmp___4 ; int tmp___5 ; struct thread_info *tmp___6 ; { socket = (mdev->tconn)->data.socket; tmp = current_thread_info___5(); oldfs = tmp->addr_limit; len = (int )size; err = -5; if ((int )disable_sendpage) { tmp___0 = _drbd_no_send_page(mdev, page, offset, size, msg_flags); return (tmp___0); } else { tmp___1 = page_count(page); if (tmp___1 <= 0) { tmp___0 = _drbd_no_send_page(mdev, page, offset, size, msg_flags); return (tmp___0); } else { tmp___2 = PageSlab((struct page const *)page); if (tmp___2 != 0) { tmp___0 = _drbd_no_send_page(mdev, page, offset, size, msg_flags); return (tmp___0); } else { } } } msg_flags = msg_flags | 16384U; drbd_update_congested(mdev->tconn); tmp___3 = current_thread_info___5(); __constr_expr_0.seg = 0xffffffffffffffffUL; tmp___3->addr_limit = __constr_expr_0; ldv_53004: tmp___4 = (*((socket->ops)->sendpage))(socket, page, offset, (size_t )len, (int )msg_flags); sent = (int )tmp___4; if (sent <= 0) { if (sent == -11) { tmp___5 = we_should_drop_the_connection(mdev->tconn, socket); if (tmp___5 != 0) { goto ldv_53001; } else { } goto ldv_53002; } else { } dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s: size=%d len=%d sent=%d\n", "_drbd_send_page", (int )size, len, sent); if (sent < 0) { err = sent; } else { } goto ldv_53001; } else { } len = len - sent; offset = offset + sent; ldv_53002: ; if (len > 0) { goto ldv_53004; } else { } ldv_53001: tmp___6 = current_thread_info___5(); tmp___6->addr_limit = oldfs; clear_bit(0, (unsigned long volatile *)(& (mdev->tconn)->flags)); if (len == 0) { err = 0; mdev->send_cnt = mdev->send_cnt + (unsigned int )(size >> 9); } else { } return (err); } } static int _drbd_send_bio(struct drbd_conf *mdev , struct bio *bio ) { struct bio_vec *bvec ; int i ; int err ; { bvec = bio->bi_io_vec + (unsigned long )bio->bi_idx; i = (int )bio->bi_idx; goto ldv_53013; ldv_53012: err = _drbd_no_send_page(mdev, bvec->bv_page, (int )bvec->bv_offset, (size_t )bvec->bv_len, (int )bio->bi_vcnt + -1 == i ? 0U : 32768U); if (err != 0) { return (err); } else { } bvec = bvec + 1; i = i + 1; ldv_53013: ; if ((int )bio->bi_vcnt > i) { goto ldv_53012; } else { } return (0); } } static int _drbd_send_zc_bio(struct drbd_conf *mdev , struct bio *bio ) { struct bio_vec *bvec ; int i ; int err ; { bvec = bio->bi_io_vec + (unsigned long )bio->bi_idx; i = (int )bio->bi_idx; goto ldv_53023; ldv_53022: err = _drbd_send_page(mdev, bvec->bv_page, (int )bvec->bv_offset, (size_t )bvec->bv_len, (int )bio->bi_vcnt + -1 == i ? 0U : 32768U); if (err != 0) { return (err); } else { } bvec = bvec + 1; i = i + 1; ldv_53023: ; if ((int )bio->bi_vcnt > i) { goto ldv_53022; } else { } return (0); } } static int _drbd_send_zc_ee(struct drbd_conf *mdev , struct drbd_peer_request *peer_req ) { struct page *page ; unsigned int len ; int err ; unsigned int l ; unsigned int __min1 ; unsigned int __min2 ; struct page *tmp ; struct page *tmp___0 ; { page = peer_req->pages; len = peer_req->i.size; goto ldv_53038; ldv_53037: __min1 = len; __min2 = 4096U; l = __min1 < __min2 ? __min1 : __min2; tmp = page_chain_next(page); err = _drbd_send_page(mdev, page, 0, (size_t )l, (unsigned long )tmp != (unsigned long )((struct page *)0) ? 32768U : 0U); if (err != 0) { return (err); } else { } len = len - l; page = page_chain_next(page); ldv_53038: ; if ((unsigned long )page != (unsigned long )((struct page *)0)) { tmp___0 = page_chain_next(page); __builtin_prefetch((void const *)tmp___0); if (1 != 0) { goto ldv_53037; } else { goto ldv_53039; } } else { } ldv_53039: ; return (0); } } static u32 bio_flags_to_wire(struct drbd_conf *mdev , unsigned long bi_rw ) { { if ((mdev->tconn)->agreed_pro_version > 94) { return ((u32 )(((((bi_rw & 16UL) != 0UL ? 2 : 0) | ((bi_rw & 2048UL) != 0UL ? 16 : 0)) | ((bi_rw & 4096UL) != 0UL ? 32 : 0)) | ((bi_rw & 128UL) != 0UL ? 64 : 0))); } else { return ((bi_rw & 16UL) != 0UL ? 2U : 0U); } } } int drbd_send_dblock(struct drbd_conf *mdev , struct drbd_request *req ) { struct drbd_socket *sock ; struct p_data *p ; unsigned int dp_flags ; int dgs ; int err ; void *tmp ; unsigned int tmp___0 ; __u64 tmp___1 ; int tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; unsigned char digest[64U] ; int tmp___5 ; { dp_flags = 0U; sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_data *)tmp; if ((unsigned long )(mdev->tconn)->integrity_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp___0 = crypto_hash_digestsize((mdev->tconn)->integrity_tfm); dgs = (int )tmp___0; } else { dgs = 0; } if ((unsigned long )p == (unsigned long )((struct p_data *)0)) { return (-5); } else { } tmp___1 = __fswab64((__u64 )req->i.sector); p->sector = tmp___1; p->block_id = (u64 )req; tmp___2 = atomic_add_return(1, & mdev->packet_seq); tmp___3 = __fswab32((__u32 )tmp___2); p->seq_num = tmp___3; dp_flags = bio_flags_to_wire(mdev, (req->master_bio)->bi_rw); if ((int )mdev->state.ldv_49522.conn > 15 && (int )mdev->state.ldv_49522.conn <= 21) { dp_flags = dp_flags | 4U; } else { } if ((mdev->tconn)->agreed_pro_version > 99) { if (((unsigned long )req->rq_state & 32768UL) != 0UL) { dp_flags = dp_flags | 128U; } else { } if (((unsigned long )req->rq_state & 65536UL) != 0UL) { dp_flags = dp_flags | 256U; } else { } } else { } tmp___4 = __fswab32(dp_flags); p->dp_flags = tmp___4; if (dgs != 0) { drbd_csum_bio(mdev, (mdev->tconn)->integrity_tfm, req->master_bio, (void *)p + 1U); } else { } err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, (unsigned int )dgs + 24U, 0, req->i.size); if (err == 0) { if (((unsigned long )req->rq_state & 98304UL) == 0UL || dgs != 0) { err = _drbd_send_bio(mdev, req->master_bio); } else { err = _drbd_send_zc_bio(mdev, req->master_bio); } if (dgs > 0 && dgs <= 64) { drbd_csum_bio(mdev, (mdev->tconn)->integrity_tfm, req->master_bio, (void *)(& digest)); tmp___5 = memcmp((void const *)p + 1U, (void const *)(& digest), (size_t )dgs); if (tmp___5 != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", (unsigned long long )req->i.sector, req->i.size); } else { } } else { } } else { } ldv_mutex_unlock_213(& sock->mutex); return (err); } } int drbd_send_block(struct drbd_conf *mdev , enum drbd_packet cmd , struct drbd_peer_request *peer_req ) { struct drbd_socket *sock ; struct p_data *p ; int err ; int dgs ; void *tmp ; unsigned int tmp___0 ; __u64 tmp___1 ; { sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_data *)tmp; if ((unsigned long )(mdev->tconn)->integrity_tfm != (unsigned long )((struct crypto_hash *)0)) { tmp___0 = crypto_hash_digestsize((mdev->tconn)->integrity_tfm); dgs = (int )tmp___0; } else { dgs = 0; } if ((unsigned long )p == (unsigned long )((struct p_data *)0)) { return (-5); } else { } tmp___1 = __fswab64((__u64 )peer_req->i.sector); p->sector = tmp___1; p->block_id = peer_req->ldv_50726.block_id; p->seq_num = 0U; p->dp_flags = 0U; if (dgs != 0) { drbd_csum_ee(mdev, (mdev->tconn)->integrity_tfm, peer_req, (void *)p + 1U); } else { } err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, (unsigned int )dgs + 24U, 0, peer_req->i.size); if (err == 0) { err = _drbd_send_zc_ee(mdev, peer_req); } else { } ldv_mutex_unlock_214(& sock->mutex); return (err); } } int drbd_send_out_of_sync(struct drbd_conf *mdev , struct drbd_request *req ) { struct drbd_socket *sock ; struct p_block_desc *p ; void *tmp ; __u64 tmp___0 ; __u32 tmp___1 ; int tmp___2 ; { sock = & (mdev->tconn)->data; tmp = drbd_prepare_command(mdev, sock); p = (struct p_block_desc *)tmp; if ((unsigned long )p == (unsigned long )((struct p_block_desc *)0)) { return (-5); } else { } tmp___0 = __fswab64((__u64 )req->i.sector); p->sector = tmp___0; tmp___1 = __fswab32(req->i.size); p->blksize = tmp___1; tmp___2 = drbd_send_command(mdev, sock, P_OUT_OF_SYNC, 16U, 0, 0U); return (tmp___2); } } int drbd_send(struct drbd_tconn *tconn , struct socket *sock , void *buf , size_t size , unsigned int msg_flags ) { struct kvec iov ; struct msghdr msg ; int rv ; int sent ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; struct task_struct *tmp___2 ; union drbd_state val ; union drbd_state mask ; union drbd_state val___0 ; union drbd_state mask___0 ; { sent = 0; if ((unsigned long )sock == (unsigned long )((struct socket *)0)) { return (-53); } else { } iov.iov_base = buf; iov.iov_len = size; msg.msg_name = 0; msg.msg_namelen = 0; msg.msg_control = 0; msg.msg_controllen = 0UL; msg.msg_flags = msg_flags | 16384U; if ((unsigned long )tconn->data.socket == (unsigned long )sock) { rcu_read_lock___5(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 1848, "suspicious rcu_dereference_check() usage"); } else { } } else { } tconn->ko_count = _________p1->ko_count; rcu_read_unlock___5(); drbd_update_congested(tconn); } else { } ldv_53085: rv = kernel_sendmsg(sock, & msg, & iov, 1UL, size); if (rv == -11) { tmp___1 = we_should_drop_the_connection(tconn, sock); if (tmp___1 != 0) { goto ldv_53083; } else { goto ldv_53084; } } else { } if (rv == -4) { tmp___2 = get_current(); flush_signals(tmp___2); rv = 0; } else { } if (rv < 0) { goto ldv_53083; } else { } sent = sent + rv; iov.iov_base = iov.iov_base + (unsigned long )rv; iov.iov_len = iov.iov_len - (size_t )rv; ldv_53084: ; if ((size_t )sent < size) { goto ldv_53085; } else { } ldv_53083: ; if ((unsigned long )tconn->data.socket == (unsigned long )sock) { clear_bit(0, (unsigned long volatile *)(& tconn->flags)); } else { } if (rv <= 0) { if (rv != -11) { printk("\vd-con %s: %s_sendmsg returned %d\n", tconn->name, (unsigned long )tconn->meta.socket == (unsigned long )sock ? (char *)"msock" : (char *)"sock", rv); val.i = 0U; val.ldv_40024.conn = 4U; mask.i = 0U; mask.ldv_40024.conn = 31U; conn_request_state(tconn, mask, val, CS_HARD); } else { val___0.i = 0U; val___0.ldv_40024.conn = 3U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; conn_request_state(tconn, mask___0, val___0, CS_HARD); } } else { } return (sent); } } int drbd_send_all(struct drbd_tconn *tconn , struct socket *sock , void *buffer , size_t size , unsigned int msg_flags ) { int err ; { err = drbd_send(tconn, sock, buffer, size, msg_flags); if (err < 0) { return (err); } else { } if ((size_t )err != size) { return (-5); } else { } return (0); } } static int drbd_open(struct block_device *bdev , fmode_t mode ) { struct drbd_conf *mdev ; unsigned long flags ; int rv ; raw_spinlock_t *tmp ; { mdev = (struct drbd_conf *)(bdev->bd_disk)->private_data; rv = 0; ldv_mutex_lock_215(& drbd_main_mutex); tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); if ((unsigned int )*((unsigned char *)mdev + 748UL) != 1U) { if ((mode & 2U) != 0U) { rv = -30; } else if (! allow_oos) { rv = -124; } else { } } else { } if (rv == 0) { mdev->open_cnt = mdev->open_cnt + 1; } else { } spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); ldv_mutex_unlock_216(& drbd_main_mutex); return (rv); } } static int drbd_release(struct gendisk *gd , fmode_t mode ) { struct drbd_conf *mdev ; { mdev = (struct drbd_conf *)gd->private_data; ldv_mutex_lock_217(& drbd_main_mutex); mdev->open_cnt = mdev->open_cnt - 1; ldv_mutex_unlock_218(& drbd_main_mutex); return (0); } } static void drbd_set_defaults(struct drbd_conf *mdev ) { union drbd_dev_state __constr_expr_0 ; { __constr_expr_0.ldv_49522.role = 2U; __constr_expr_0.ldv_49522.peer = 0U; __constr_expr_0.ldv_49522.conn = 0U; __constr_expr_0.ldv_49522.disk = 0U; __constr_expr_0.ldv_49522.pdsk = 6U; __constr_expr_0.ldv_49522._unused = (unsigned char)0; __constr_expr_0.ldv_49522.aftr_isp = (unsigned char)0; __constr_expr_0.ldv_49522.peer_isp = (unsigned char)0; __constr_expr_0.ldv_49522.user_isp = (unsigned char)0; __constr_expr_0.ldv_49522._pad = (unsigned short)0; mdev->state = __constr_expr_0; return; } } void drbd_init_set_defaults(struct drbd_conf *mdev ) { struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; struct lock_class_key __key___4 ; struct lock_class_key __key___5 ; struct lock_class_key __key___6 ; struct lock_class_key __key___7 ; struct lock_class_key __key___8 ; struct lock_class_key __key___9 ; struct lock_class_key __key___10 ; { drbd_set_defaults(mdev); atomic_set(& mdev->ap_bio_cnt, 0); atomic_set(& mdev->ap_pending_cnt, 0); atomic_set(& mdev->rs_pending_cnt, 0); atomic_set(& mdev->unacked_cnt, 0); atomic_set(& mdev->local_cnt, 0); atomic_set(& mdev->pp_in_use_by_net, 0); atomic_set(& mdev->rs_sect_in, 0); atomic_set(& mdev->rs_sect_ev, 0); atomic_set(& mdev->ap_in_flight, 0); atomic_set(& mdev->md_io_in_use, 0); __mutex_init(& mdev->own_state_mutex, "&mdev->own_state_mutex", & __key); mdev->state_mutex = & mdev->own_state_mutex; spinlock_check(& mdev->al_lock); __raw_spin_lock_init(& mdev->al_lock.ldv_5957.rlock, "&(&mdev->al_lock)->rlock", & __key___0); spinlock_check(& mdev->peer_seq_lock); __raw_spin_lock_init(& mdev->peer_seq_lock.ldv_5957.rlock, "&(&mdev->peer_seq_lock)->rlock", & __key___1); INIT_LIST_HEAD(& mdev->active_ee); INIT_LIST_HEAD(& mdev->sync_ee); INIT_LIST_HEAD(& mdev->done_ee); INIT_LIST_HEAD(& mdev->read_ee); INIT_LIST_HEAD(& mdev->net_ee); INIT_LIST_HEAD(& mdev->resync_reads); INIT_LIST_HEAD(& mdev->resync_work.list); INIT_LIST_HEAD(& mdev->unplug_work.list); INIT_LIST_HEAD(& mdev->go_diskless.list); INIT_LIST_HEAD(& mdev->md_sync_work.list); INIT_LIST_HEAD(& mdev->start_resync_work.list); INIT_LIST_HEAD(& mdev->bm_io_work.w.list); mdev->resync_work.cb = & w_resync_timer; mdev->unplug_work.cb = & w_send_write_hint; mdev->go_diskless.cb = & w_go_diskless; mdev->md_sync_work.cb = & w_md_sync; mdev->bm_io_work.w.cb = & w_bitmap_io; mdev->start_resync_work.cb = & w_start_resync; mdev->resync_work.ldv_49807.mdev = mdev; mdev->unplug_work.ldv_49807.mdev = mdev; mdev->go_diskless.ldv_49807.mdev = mdev; mdev->md_sync_work.ldv_49807.mdev = mdev; mdev->bm_io_work.w.ldv_49807.mdev = mdev; mdev->start_resync_work.ldv_49807.mdev = mdev; init_timer_key(& mdev->resync_timer, 0U, "(&mdev->resync_timer)", & __key___2); init_timer_key(& mdev->md_sync_timer, 0U, "(&mdev->md_sync_timer)", & __key___3); init_timer_key(& mdev->start_resync_timer, 0U, "(&mdev->start_resync_timer)", & __key___4); init_timer_key(& mdev->request_timer, 0U, "(&mdev->request_timer)", & __key___5); mdev->resync_timer.function = & resync_timer_fn; mdev->resync_timer.data = (unsigned long )mdev; mdev->md_sync_timer.function = & md_sync_timer_fn; mdev->md_sync_timer.data = (unsigned long )mdev; mdev->start_resync_timer.function = & start_resync_timer_fn; mdev->start_resync_timer.data = (unsigned long )mdev; mdev->request_timer.function = & request_timer_fn; mdev->request_timer.data = (unsigned long )mdev; __init_waitqueue_head(& mdev->misc_wait, "&mdev->misc_wait", & __key___6); __init_waitqueue_head(& mdev->state_wait, "&mdev->state_wait", & __key___7); __init_waitqueue_head(& mdev->ee_wait, "&mdev->ee_wait", & __key___8); __init_waitqueue_head(& mdev->al_wait, "&mdev->al_wait", & __key___9); __init_waitqueue_head(& mdev->seq_wait, "&mdev->seq_wait", & __key___10); mdev->resync_wenr = 4294967295U; mdev->peer_max_bio_size = 4096U; mdev->local_max_bio_size = 4096U; return; } } void drbd_mdev_cleanup(struct drbd_conf *mdev ) { int i ; unsigned int tmp ; unsigned int tmp___0 ; unsigned int tmp___1 ; unsigned int tmp___2 ; unsigned int tmp___3 ; sector_t tmp___4 ; unsigned long tmp___5 ; unsigned long tmp___6 ; unsigned long tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; { if ((unsigned int )(mdev->tconn)->receiver.t_state != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED: receiver t_state == %d expected 0.\n", (unsigned int )(mdev->tconn)->receiver.t_state); } else { } tmp___7 = 0UL; mdev->rs_failed = tmp___7; tmp___6 = tmp___7; mdev->rs_total = tmp___6; tmp___5 = tmp___6; mdev->rs_start = tmp___5; tmp___4 = tmp___5; mdev->p_size = tmp___4; tmp___3 = (unsigned int )tmp___4; mdev->writ_cnt = tmp___3; tmp___2 = tmp___3; mdev->send_cnt = tmp___2; tmp___1 = tmp___2; mdev->recv_cnt = tmp___1; tmp___0 = tmp___1; mdev->read_cnt = tmp___0; tmp = tmp___0; mdev->bm_writ_cnt = tmp; mdev->al_writ_cnt = tmp; mdev->rs_last_events = 0; mdev->rs_last_sect_ev = 0; i = 0; goto ldv_53141; ldv_53140: mdev->rs_mark_left[i] = 0UL; mdev->rs_mark_time[i] = 0UL; i = i + 1; ldv_53141: ; if (i <= 7) { goto ldv_53140; } else { } if ((unsigned long )(mdev->tconn)->net_conf != (unsigned long )((struct net_conf *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->tconn->net_conf == NULL ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2060); } else { } drbd_set_my_capacity(mdev, 0UL); if ((unsigned long )mdev->bitmap != (unsigned long )((struct drbd_bitmap *)0)) { drbd_bm_resize(mdev, 0UL, 1); drbd_bm_cleanup(mdev); } else { } drbd_free_bc(mdev->ldev); mdev->ldev = 0; clear_bit(18, (unsigned long volatile *)(& mdev->flags)); tmp___8 = list_empty((struct list_head const *)(& mdev->active_ee)); if (tmp___8 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->active_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2074); } else { } tmp___9 = list_empty((struct list_head const *)(& mdev->sync_ee)); if (tmp___9 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->sync_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2075); } else { } tmp___10 = list_empty((struct list_head const *)(& mdev->done_ee)); if (tmp___10 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->done_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2076); } else { } tmp___11 = list_empty((struct list_head const *)(& mdev->read_ee)); if (tmp___11 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->read_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2077); } else { } tmp___12 = list_empty((struct list_head const *)(& mdev->net_ee)); if (tmp___12 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->net_ee) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2078); } else { } tmp___13 = list_empty((struct list_head const *)(& mdev->resync_reads)); if (tmp___13 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->resync_reads) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2079); } else { } tmp___14 = list_empty((struct list_head const *)(& (mdev->tconn)->sender_work.q)); if (tmp___14 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->tconn->sender_work.q) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2080); } else { } tmp___15 = list_empty((struct list_head const *)(& mdev->resync_work.list)); if (tmp___15 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->resync_work.list) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2081); } else { } tmp___16 = list_empty((struct list_head const *)(& mdev->unplug_work.list)); if (tmp___16 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->unplug_work.list) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2082); } else { } tmp___17 = list_empty((struct list_head const *)(& mdev->go_diskless.list)); if (tmp___17 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->go_diskless.list) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2083); } else { } drbd_set_defaults(mdev); return; } } static void drbd_destroy_mempools(void) { struct page *page ; { goto ldv_53148; ldv_53147: page = drbd_pp_pool; drbd_pp_pool = (struct page *)page->ldv_14746.private; __free_pages(page, 0U); drbd_pp_vacant = drbd_pp_vacant - 1; ldv_53148: ; if ((unsigned long )drbd_pp_pool != (unsigned long )((struct page *)0)) { goto ldv_53147; } else { } if ((unsigned long )drbd_md_io_bio_set != (unsigned long )((struct bio_set *)0)) { bioset_free(drbd_md_io_bio_set); } else { } if ((unsigned long )drbd_md_io_page_pool != (unsigned long )((mempool_t *)0)) { mempool_destroy(drbd_md_io_page_pool); } else { } if ((unsigned long )drbd_ee_mempool != (unsigned long )((mempool_t *)0)) { mempool_destroy(drbd_ee_mempool); } else { } if ((unsigned long )drbd_request_mempool != (unsigned long )((mempool_t *)0)) { mempool_destroy(drbd_request_mempool); } else { } if ((unsigned long )drbd_ee_cache != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(drbd_ee_cache); } else { } if ((unsigned long )drbd_request_cache != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(drbd_request_cache); } else { } if ((unsigned long )drbd_bm_ext_cache != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(drbd_bm_ext_cache); } else { } if ((unsigned long )drbd_al_ext_cache != (unsigned long )((struct kmem_cache *)0)) { kmem_cache_destroy(drbd_al_ext_cache); } else { } drbd_md_io_bio_set = 0; drbd_md_io_page_pool = 0; drbd_ee_mempool = 0; drbd_request_mempool = 0; drbd_ee_cache = 0; drbd_request_cache = 0; drbd_bm_ext_cache = 0; drbd_al_ext_cache = 0; return; } } static int drbd_create_mempools(void) { struct page *page ; int number ; int i ; struct lock_class_key __key ; { number = (int const )(minor_count * 256U); drbd_request_mempool = 0; drbd_ee_cache = 0; drbd_request_cache = 0; drbd_bm_ext_cache = 0; drbd_al_ext_cache = 0; drbd_pp_pool = 0; drbd_md_io_page_pool = 0; drbd_md_io_bio_set = 0; drbd_request_cache = kmem_cache_create("drbd_req", 152UL, 0UL, 0UL, 0); if ((unsigned long )drbd_request_cache == (unsigned long )((struct kmem_cache *)0)) { goto Enomem; } else { } drbd_ee_cache = kmem_cache_create("drbd_ee", 128UL, 0UL, 0UL, 0); if ((unsigned long )drbd_ee_cache == (unsigned long )((struct kmem_cache *)0)) { goto Enomem; } else { } drbd_bm_ext_cache = kmem_cache_create("drbd_bm", 64UL, 0UL, 0UL, 0); if ((unsigned long )drbd_bm_ext_cache == (unsigned long )((struct kmem_cache *)0)) { goto Enomem; } else { } drbd_al_ext_cache = kmem_cache_create("drbd_al", 48UL, 0UL, 0UL, 0); if ((unsigned long )drbd_al_ext_cache == (unsigned long )((struct kmem_cache *)0)) { goto Enomem; } else { } drbd_md_io_bio_set = bioset_create(128U, 0U); if ((unsigned long )drbd_md_io_bio_set == (unsigned long )((struct bio_set *)0)) { goto Enomem; } else { } drbd_md_io_page_pool = mempool_create_page_pool(128, 0); if ((unsigned long )drbd_md_io_page_pool == (unsigned long )((mempool_t *)0)) { goto Enomem; } else { } drbd_request_mempool = mempool_create(number, & mempool_alloc_slab, & mempool_free_slab, (void *)drbd_request_cache); if ((unsigned long )drbd_request_mempool == (unsigned long )((mempool_t *)0)) { goto Enomem; } else { } drbd_ee_mempool = mempool_create(number, & mempool_alloc_slab, & mempool_free_slab, (void *)drbd_ee_cache); if ((unsigned long )drbd_ee_mempool == (unsigned long )((mempool_t *)0)) { goto Enomem; } else { } spinlock_check(& drbd_pp_lock); __raw_spin_lock_init(& drbd_pp_lock.ldv_5957.rlock, "&(&drbd_pp_lock)->rlock", & __key); i = 0; goto ldv_53159; ldv_53158: page = alloc_pages(131282U, 0U); if ((unsigned long )page == (unsigned long )((struct page *)0)) { goto Enomem; } else { } page->ldv_14746.private = (unsigned long )drbd_pp_pool; drbd_pp_pool = page; i = i + 1; ldv_53159: ; if (i < number) { goto ldv_53158; } else { } drbd_pp_vacant = number; return (0); Enomem: drbd_destroy_mempools(); return (-12); } } static int drbd_notify_sys(struct notifier_block *this , unsigned long code , void *unused ) { { return (0); } } static struct notifier_block drbd_notifier = {& drbd_notify_sys, 0, 0}; static void drbd_release_all_peer_reqs(struct drbd_conf *mdev ) { int rr ; { rr = drbd_free_peer_reqs(mdev, & mdev->active_ee); if (rr != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%d EEs in active list found!\n", rr); } else { } rr = drbd_free_peer_reqs(mdev, & mdev->sync_ee); if (rr != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%d EEs in sync list found!\n", rr); } else { } rr = drbd_free_peer_reqs(mdev, & mdev->read_ee); if (rr != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%d EEs in read list found!\n", rr); } else { } rr = drbd_free_peer_reqs(mdev, & mdev->done_ee); if (rr != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%d EEs in done list found!\n", rr); } else { } rr = drbd_free_peer_reqs(mdev, & mdev->net_ee); if (rr != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "%d EEs in net list found!\n", rr); } else { } return; } } void drbd_minor_destroy(struct kref *kref ) { struct drbd_conf *mdev ; struct kref const *__mptr ; struct drbd_tconn *tconn ; { __mptr = (struct kref const *)kref; mdev = (struct drbd_conf *)__mptr + 0xfffffffffffffff4UL; tconn = mdev->tconn; del_timer_sync(& mdev->request_timer); if (mdev->open_cnt != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->open_cnt == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2254); } else { } if ((unsigned long )mdev->this_bdev != (unsigned long )((struct block_device *)0)) { bdput(mdev->this_bdev); } else { } drbd_free_bc(mdev->ldev); mdev->ldev = 0; drbd_release_all_peer_reqs(mdev); lc_destroy(mdev->act_log); lc_destroy(mdev->resync); kfree((void const *)mdev->p_uuid); if ((unsigned long )mdev->bitmap != (unsigned long )((struct drbd_bitmap *)0)) { drbd_bm_cleanup(mdev); } else { } __free_pages(mdev->md_io_page, 0U); put_disk(mdev->vdisk); blk_cleanup_queue(mdev->rq_queue); kfree((void const *)mdev->rs_plan_s); kfree((void const *)mdev); kref_put(& tconn->kref, & conn_destroy); return; } } static struct retry_worker retry ; static void do_retry(struct work_struct *ws ) { struct retry_worker *retry___0 ; struct work_struct const *__mptr ; struct list_head writes ; struct drbd_request *req ; struct drbd_request *tmp ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; struct drbd_conf *mdev ; struct bio *bio ; unsigned long start_time ; bool expected ; bool _bool ; int tmp___0 ; bool _bool___0 ; bool _bool___1 ; int tmp___1 ; int tmp___2 ; struct list_head const *__mptr___2 ; { __mptr = (struct work_struct const *)ws; retry___0 = (struct retry_worker *)__mptr + 0xfffffffffffffff8UL; writes.next = & writes; writes.prev = & writes; spin_lock_irq(& retry___0->lock); list_splice_init(& retry___0->writes, & writes); spin_unlock_irq(& retry___0->lock); __mptr___0 = (struct list_head const *)writes.next; req = (struct drbd_request *)__mptr___0 + 0xffffffffffffff98UL; __mptr___1 = (struct list_head const *)req->tl_requests.next; tmp = (struct drbd_request *)__mptr___1 + 0xffffffffffffff98UL; goto ldv_53211; ldv_53210: mdev = req->w.ldv_49807.mdev; bio = req->master_bio; start_time = req->start_time; tmp___0 = atomic_read((atomic_t const *)(& req->completion_ref)); _bool = tmp___0 == 0; if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"atomic_read(&req->completion_ref) == 0", "do_retry"); } else { } if ((int )_bool) { _bool___0 = ((unsigned long )req->rq_state & 8192UL) != 0UL; if (! _bool___0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"req->rq_state & RQ_POSTPONED", "do_retry"); } else { } if ((int )_bool___0) { _bool___1 = (bool )(((unsigned long )req->rq_state & 1UL) == 0UL || ((unsigned long )req->rq_state & 8UL) != 0UL); if (! _bool___1) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"(req->rq_state & RQ_LOCAL_PENDING) == 0 || (req->rq_state & RQ_LOCAL_ABORTED) != 0", "do_retry"); } else { } if ((int )_bool___1) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } } else { tmp___1 = 0; } expected = (bool )tmp___1; if (! expected) { tmp___2 = atomic_read((atomic_t const *)(& req->completion_ref)); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "req=%p completion_ref=%d rq_state=%x\n", req, tmp___2, req->rq_state); } else { } kref_put(& req->kref, & drbd_req_destroy); inc_ap_bio___0(mdev); __drbd_make_request(mdev, bio, start_time); req = tmp; __mptr___2 = (struct list_head const *)tmp->tl_requests.next; tmp = (struct drbd_request *)__mptr___2 + 0xffffffffffffff98UL; ldv_53211: ; if ((unsigned long )(& req->tl_requests) != (unsigned long )(& writes)) { goto ldv_53210; } else { } return; } } void drbd_restart_request(struct drbd_request *req ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& retry.lock); flags = _raw_spin_lock_irqsave(tmp); list_move_tail(& req->tl_requests, & retry.writes); spin_unlock_irqrestore(& retry.lock, flags); dec_ap_bio___0(req->w.ldv_49807.mdev); queue_work(retry.wq, & retry.worker); return; } } static void drbd_cleanup(void) { unsigned int i ; struct drbd_conf *mdev ; struct drbd_tconn *tconn ; struct drbd_tconn *tmp ; void *tmp___0 ; unsigned int tmp___1 ; void *tmp___2 ; struct list_head const *__mptr ; struct list_head const *__mptr___0 ; struct list_head const *__mptr___1 ; { unregister_reboot_notifier(& drbd_notifier); if ((unsigned long )drbd_proc != (unsigned long )((struct proc_dir_entry *)0)) { remove_proc_entry("drbd", 0); } else { } if ((unsigned long )retry.wq != (unsigned long )((struct workqueue_struct *)0)) { destroy_workqueue(retry.wq); } else { } drbd_genl_unregister(); i = 0U; tmp___0 = idr_get_next(& minors, (int *)(& i)); mdev = (struct drbd_conf *)tmp___0; goto ldv_53228; ldv_53227: tmp___1 = mdev_to_minor(mdev); idr_remove(& minors, (int )tmp___1); idr_remove(& (mdev->tconn)->volumes, mdev->vnr); del_gendisk(mdev->vdisk); kref_put(& mdev->kref, & drbd_minor_destroy); i = i + 1U; tmp___2 = idr_get_next(& minors, (int *)(& i)); mdev = (struct drbd_conf *)tmp___2; ldv_53228: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53227; } else { } __mptr = (struct list_head const *)drbd_tconns.next; tconn = (struct drbd_tconn *)__mptr + 0xfffffffffffffff8UL; __mptr___0 = (struct list_head const *)tconn->all_tconn.next; tmp = (struct drbd_tconn *)__mptr___0 + 0xfffffffffffffff8UL; goto ldv_53237; ldv_53236: list_del(& tconn->all_tconn); kref_put(& tconn->kref, & conn_destroy); tconn = tmp; __mptr___1 = (struct list_head const *)tmp->all_tconn.next; tmp = (struct drbd_tconn *)__mptr___1 + 0xfffffffffffffff8UL; ldv_53237: ; if ((unsigned long )(& tconn->all_tconn) != (unsigned long )(& drbd_tconns)) { goto ldv_53236; } else { } drbd_destroy_mempools(); unregister_blkdev(147U, "drbd"); idr_destroy(& minors); printk("\016drbd: module cleanup done.\n"); return; } } static int drbd_congested(void *congested_data , int bdi_bits ) { struct drbd_conf *mdev ; struct request_queue *q ; char reason ; int r ; bool tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { mdev = (struct drbd_conf *)congested_data; reason = 45; r = 0; tmp = may_inc_ap_bio___0(mdev); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { r = bdi_bits; reason = 100; goto out; } else { } tmp___2 = constant_test_bit(11U, (unsigned long const volatile *)(& (mdev->tconn)->flags)); if (tmp___2 != 0) { r = r | 4; tmp___1 = _get_ldev_if_state(mdev, D_UP_TO_DATE); if (tmp___1 == 0) { r = r | 8; } else { put_ldev(mdev); } r = r & bdi_bits; reason = 99; goto out; } else { } tmp___3 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___3 != 0) { q = bdev_get_queue((mdev->ldev)->backing_bdev); r = bdi_congested(& q->backing_dev_info, bdi_bits); put_ldev(mdev); if (r != 0) { reason = 98; } else { } } else { } if ((bdi_bits & 4) != 0) { tmp___4 = constant_test_bit(0U, (unsigned long const volatile *)(& (mdev->tconn)->flags)); if (tmp___4 != 0) { r = r | 4; reason = (int )((signed char )reason) == 98 ? 97 : 110; } else { } } else { } out: mdev->congestion_reason = reason; return (r); } } static void drbd_init_workqueue(struct drbd_work_queue *wq ) { struct lock_class_key __key ; struct lock_class_key __key___0 ; { spinlock_check(& wq->q_lock); __raw_spin_lock_init(& wq->q_lock.ldv_5957.rlock, "&(&wq->q_lock)->rlock", & __key); INIT_LIST_HEAD(& wq->q); __init_waitqueue_head(& wq->q_wait, "&wq->q_wait", & __key___0); return; } } struct drbd_tconn *conn_get_by_name(char const *name ) { struct drbd_tconn *tconn ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___1 ; { if ((unsigned long )name == (unsigned long )((char const *)0) || (int )((signed char )*name) == 0) { return (0); } else { } rcu_read_lock___5(); __ptr = drbd_tconns.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; tconn = (struct drbd_tconn *)__mptr + 0xfffffffffffffff8UL; goto ldv_53273; ldv_53272: tmp___0 = strcmp((char const *)tconn->name, name); if (tmp___0 == 0) { kref_get(& tconn->kref); goto found; } else { } __ptr___0 = tconn->all_tconn.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; tconn = (struct drbd_tconn *)__mptr___0 + 0xfffffffffffffff8UL; ldv_53273: ; if ((unsigned long )(& tconn->all_tconn) != (unsigned long )(& drbd_tconns)) { goto ldv_53272; } else { } tconn = 0; found: rcu_read_unlock___5(); return (tconn); } } struct drbd_tconn *conn_get_by_addrs(void *my_addr , int my_addr_len , void *peer_addr , int peer_addr_len ) { struct drbd_tconn *tconn ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___2 ; { rcu_read_lock___5(); __ptr = drbd_tconns.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; tconn = (struct drbd_tconn *)__mptr + 0xfffffffffffffff8UL; goto ldv_53298; ldv_53297: ; if (tconn->my_addr_len == my_addr_len && tconn->peer_addr_len == peer_addr_len) { tmp___0 = memcmp((void const *)(& tconn->my_addr), (void const *)my_addr, (size_t )my_addr_len); if (tmp___0 == 0) { tmp___1 = memcmp((void const *)(& tconn->peer_addr), (void const *)peer_addr, (size_t )peer_addr_len); if (tmp___1 == 0) { kref_get(& tconn->kref); goto found; } else { } } else { } } else { } __ptr___0 = tconn->all_tconn.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; tconn = (struct drbd_tconn *)__mptr___0 + 0xfffffffffffffff8UL; ldv_53298: ; if ((unsigned long )(& tconn->all_tconn) != (unsigned long )(& drbd_tconns)) { goto ldv_53297; } else { } tconn = 0; found: rcu_read_unlock___5(); return (tconn); } } static int drbd_alloc_socket(struct drbd_socket *socket ) { unsigned long tmp ; unsigned long tmp___0 ; { tmp = __get_free_pages(208U, 0U); socket->rbuf = (void *)tmp; if ((unsigned long )socket->rbuf == (unsigned long )((void *)0)) { return (-12); } else { } tmp___0 = __get_free_pages(208U, 0U); socket->sbuf = (void *)tmp___0; if ((unsigned long )socket->sbuf == (unsigned long )((void *)0)) { return (-12); } else { } return (0); } } static void drbd_free_socket(struct drbd_socket *socket ) { { free_pages((unsigned long )socket->sbuf, 0U); free_pages((unsigned long )socket->rbuf, 0U); return; } } void conn_free_crypto(struct drbd_tconn *tconn ) { { drbd_free_sock(tconn); crypto_free_hash(tconn->csums_tfm); crypto_free_hash(tconn->verify_tfm); crypto_free_hash(tconn->cram_hmac_tfm); crypto_free_hash(tconn->integrity_tfm); crypto_free_hash(tconn->peer_integrity_tfm); kfree((void const *)tconn->int_dig_in); kfree((void const *)tconn->int_dig_vv); tconn->csums_tfm = 0; tconn->verify_tfm = 0; tconn->cram_hmac_tfm = 0; tconn->integrity_tfm = 0; tconn->peer_integrity_tfm = 0; tconn->int_dig_in = 0; tconn->int_dig_vv = 0; return; } } int set_resource_options(struct drbd_tconn *tconn , struct res_opts *res_opts ) { cpumask_var_t new_cpu_mask ; int err ; bool tmp ; int tmp___0 ; bool tmp___1 ; int tmp___2 ; { tmp = zalloc_cpumask_var(& new_cpu_mask, 208U); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-12); } else { } if (nr_cpu_ids > 1 && (int )((signed char )res_opts->cpu_mask[0]) != 0) { err = bitmap_parse((char const *)(& res_opts->cpu_mask), 32U, (unsigned long *)(& new_cpu_mask->bits), nr_cpu_ids); if (err != 0) { printk("\fd-con %s: bitmap_parse() failed with %d\n", tconn->name, err); goto fail; } else { } } else { } tconn->res_opts = *res_opts; tmp___1 = cpumask_equal((struct cpumask const *)tconn->cpu_mask, (struct cpumask const *)new_cpu_mask); if (tmp___1) { tmp___2 = 0; } else { tmp___2 = 1; } if (tmp___2) { cpumask_copy(tconn->cpu_mask, (struct cpumask const *)new_cpu_mask); drbd_calc_cpu_mask(tconn); tconn->receiver.reset_cpu_mask = 1; tconn->asender.reset_cpu_mask = 1; tconn->worker.reset_cpu_mask = 1; } else { } err = 0; fail: free_cpumask_var(new_cpu_mask); return (err); } } struct drbd_tconn *conn_create(char const *name , struct res_opts *res_opts ) { struct drbd_tconn *tconn ; void *tmp ; int tmp___0 ; int tmp___1 ; bool tmp___2 ; int tmp___3 ; int tmp___4 ; void *tmp___5 ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; struct lock_class_key __key___2 ; struct lock_class_key __key___3 ; struct lock_class_key __key___4 ; struct lock_class_key __key___5 ; { tmp = kzalloc(2392UL, 208U); tconn = (struct drbd_tconn *)tmp; if ((unsigned long )tconn == (unsigned long )((struct drbd_tconn *)0)) { return (0); } else { } tconn->name = kstrdup(name, 208U); if ((unsigned long )tconn->name == (unsigned long )((char *)0)) { goto fail; } else { } tmp___0 = drbd_alloc_socket(& tconn->data); if (tmp___0 != 0) { goto fail; } else { } tmp___1 = drbd_alloc_socket(& tconn->meta); if (tmp___1 != 0) { goto fail; } else { } tmp___2 = zalloc_cpumask_var(& tconn->cpu_mask, 208U); if (tmp___2) { tmp___3 = 0; } else { tmp___3 = 1; } if (tmp___3) { goto fail; } else { } tmp___4 = set_resource_options(tconn, res_opts); if (tmp___4 != 0) { goto fail; } else { } tmp___5 = kzalloc(48UL, 208U); tconn->current_epoch = (struct drbd_epoch *)tmp___5; if ((unsigned long )tconn->current_epoch == (unsigned long )((struct drbd_epoch *)0)) { goto fail; } else { } INIT_LIST_HEAD(& tconn->transfer_log); INIT_LIST_HEAD(& (tconn->current_epoch)->list); tconn->epochs = 1U; spinlock_check(& tconn->epoch_lock); __raw_spin_lock_init(& tconn->epoch_lock.ldv_5957.rlock, "&(&tconn->epoch_lock)->rlock", & __key); tconn->write_ordering = WO_bdev_flush; tconn->send.seen_any_write_yet = 0; tconn->send.current_epoch_nr = 0; tconn->send.current_epoch_writes = 0U; tconn->cstate = C_STANDALONE; __mutex_init(& tconn->cstate_mutex, "&tconn->cstate_mutex", & __key___0); spinlock_check(& tconn->req_lock); __raw_spin_lock_init(& tconn->req_lock.ldv_5957.rlock, "&(&tconn->req_lock)->rlock", & __key___1); __mutex_init(& tconn->conf_update, "&tconn->conf_update", & __key___2); __init_waitqueue_head(& tconn->ping_wait, "&tconn->ping_wait", & __key___3); idr_init(& tconn->volumes); drbd_init_workqueue(& tconn->sender_work); __mutex_init(& tconn->data.mutex, "&tconn->data.mutex", & __key___4); __mutex_init(& tconn->meta.mutex, "&tconn->meta.mutex", & __key___5); drbd_thread_init(tconn, & tconn->receiver, & drbdd_init, (char *)"receiver"); drbd_thread_init(tconn, & tconn->worker, & drbd_worker, (char *)"worker"); drbd_thread_init(tconn, & tconn->asender, & drbd_asender, (char *)"asender"); kref_init(& tconn->kref); list_add_tail_rcu(& tconn->all_tconn, & drbd_tconns); return (tconn); fail: kfree((void const *)tconn->current_epoch); free_cpumask_var(tconn->cpu_mask); drbd_free_socket(& tconn->meta); drbd_free_socket(& tconn->data); kfree((void const *)tconn->name); kfree((void const *)tconn); return (0); } } void conn_destroy(struct kref *kref ) { struct drbd_tconn *tconn ; struct kref const *__mptr ; int tmp ; int tmp___0 ; { __mptr = (struct kref const *)kref; tconn = (struct drbd_tconn *)__mptr + 0xffffffffffffffe8UL; tmp___0 = atomic_read((atomic_t const *)(& (tconn->current_epoch)->epoch_size)); if (tmp___0 != 0) { tmp = atomic_read((atomic_t const *)(& (tconn->current_epoch)->epoch_size)); printk("\vd-con %s: epoch_size:%d\n", tconn->name, tmp); } else { } kfree((void const *)tconn->current_epoch); idr_destroy(& tconn->volumes); free_cpumask_var(tconn->cpu_mask); drbd_free_socket(& tconn->meta); drbd_free_socket(& tconn->data); kfree((void const *)tconn->name); kfree((void const *)tconn->int_dig_in); kfree((void const *)tconn->int_dig_vv); kfree((void const *)tconn); return; } } enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn , unsigned int minor , int vnr ) { struct drbd_conf *mdev ; struct gendisk *disk ; struct request_queue *q ; int vnr_got ; int minor_got ; enum drbd_ret_code err ; void *tmp ; int tmp___0 ; struct rb_root __constr_expr_0 ; struct rb_root __constr_expr_1 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { vnr_got = vnr; minor_got = (int )minor; err = ERR_NOMEM; mdev = minor_to_mdev(minor); if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { return (ERR_MINOR_EXISTS); } else { } tmp = kzalloc(2160UL, 208U); mdev = (struct drbd_conf *)tmp; if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { return (ERR_NOMEM); } else { } kref_get(& tconn->kref); mdev->tconn = tconn; mdev->minor = minor; mdev->vnr = vnr; drbd_init_set_defaults(mdev); q = blk_alloc_queue(208U); if ((unsigned long )q == (unsigned long )((struct request_queue *)0)) { goto out_no_q; } else { } mdev->rq_queue = q; q->queuedata = (void *)mdev; disk = alloc_disk(1); if ((unsigned long )disk == (unsigned long )((struct gendisk *)0)) { goto out_no_disk; } else { } mdev->vdisk = disk; set_disk_ro(disk, 1); disk->queue = q; disk->major = 147; disk->first_minor = (int )minor; disk->fops = & drbd_ops; sprintf((char *)(& disk->disk_name), "drbd%d", minor); disk->private_data = (void *)mdev; mdev->this_bdev = bdget(minor | 154140672U); (mdev->this_bdev)->bd_contains = mdev->this_bdev; q->backing_dev_info.congested_fn = & drbd_congested; q->backing_dev_info.congested_data = (void *)mdev; blk_queue_make_request(q, & drbd_make_request); blk_queue_flush(q, 6144U); blk_queue_max_hw_sectors(q, 16U); blk_queue_bounce_limit(q, 0xffffffffffffffffULL); blk_queue_merge_bvec(q, & drbd_merge_bvec); q->queue_lock = & (mdev->tconn)->req_lock; mdev->md_io_page = alloc_pages(208U, 0U); if ((unsigned long )mdev->md_io_page == (unsigned long )((struct page *)0)) { goto out_no_io_page; } else { } tmp___0 = drbd_bm_init(mdev); if (tmp___0 != 0) { goto out_no_bitmap; } else { } __constr_expr_0.rb_node = 0; mdev->read_requests = __constr_expr_0; __constr_expr_1.rb_node = 0; mdev->write_requests = __constr_expr_1; tmp___1 = idr_pre_get(& minors, 208U); if (tmp___1 == 0) { goto out_no_minor_idr; } else { } tmp___2 = idr_get_new_above(& minors, (void *)mdev, (int )minor, & minor_got); if (tmp___2 != 0) { goto out_no_minor_idr; } else { } if ((unsigned int )minor_got != minor) { err = ERR_MINOR_EXISTS; drbd_msg_put_info("requested minor exists already"); goto out_idr_remove_minor; } else { } tmp___3 = idr_pre_get(& tconn->volumes, 208U); if (tmp___3 == 0) { goto out_idr_remove_minor; } else { } tmp___4 = idr_get_new_above(& tconn->volumes, (void *)mdev, vnr, & vnr_got); if (tmp___4 != 0) { goto out_idr_remove_minor; } else { } if (vnr_got != vnr) { err = ERR_INVALID_REQUEST; drbd_msg_put_info("requested volume exists already"); goto out_idr_remove_vol; } else { } add_disk(disk); kref_init(& mdev->kref); mdev->state.ldv_49522.conn = (unsigned char )tconn->cstate; if ((unsigned int )*((unsigned short *)mdev + 374UL) == 144U) { drbd_connected(mdev); } else { } return (NO_ERROR); out_idr_remove_vol: idr_remove(& tconn->volumes, vnr_got); out_idr_remove_minor: idr_remove(& minors, minor_got); synchronize_rcu(); out_no_minor_idr: drbd_bm_cleanup(mdev); out_no_bitmap: __free_pages(mdev->md_io_page, 0U); out_no_io_page: put_disk(disk); out_no_disk: blk_cleanup_queue(q); out_no_q: kfree((void const *)mdev); kref_put(& tconn->kref, & conn_destroy); return (err); } } int drbd_init(void) { int err ; struct lock_class_key __key ; struct lock_class_key __key___0 ; struct lock_class_key __key___1 ; char const *__lock_name ; struct workqueue_struct *tmp ; struct lock_class_key __key___2 ; atomic_long_t __constr_expr_0 ; struct lock_class_key __key___3 ; char const *tmp___0 ; { if (minor_count == 0U || minor_count > 255U) { printk("\vdrbd: invalid minor_count (%d)\n", minor_count); return (-22); } else { } err = register_blkdev(147U, "drbd"); if (err != 0) { printk("\vdrbd: unable to register block device major %d\n", 147); return (err); } else { } err = drbd_genl_register(); if (err != 0) { printk("\vdrbd: unable to register generic netlink family\n"); goto fail; } else { } register_reboot_notifier(& drbd_notifier); err = -12; __init_waitqueue_head(& drbd_pp_wait, "&drbd_pp_wait", & __key); drbd_proc = 0; idr_init(& minors); err = drbd_create_mempools(); if (err != 0) { goto fail; } else { } drbd_proc = proc_create_data("drbd", 33060, 0, & drbd_proc_fops, 0); if ((unsigned long )drbd_proc == (unsigned long )((struct proc_dir_entry *)0)) { printk("\vdrbd: unable to register proc file\n"); goto fail; } else { } __rwlock_init(& global_state_lock, "&global_state_lock", & __key___0); INIT_LIST_HEAD(& drbd_tconns); __lock_name = "drbd-reissue"; tmp = __alloc_workqueue_key("drbd-reissue", 10U, 1, & __key___1, __lock_name); retry.wq = tmp; if ((unsigned long )retry.wq == (unsigned long )((struct workqueue_struct *)0)) { printk("\vdrbd: unable to create retry workqueue\n"); goto fail; } else { } __init_work(& retry.worker, 0); __constr_expr_0.counter = 4195328L; retry.worker.data = __constr_expr_0; lockdep_init_map(& retry.worker.lockdep_map, "(&retry.worker)", & __key___2, 0); INIT_LIST_HEAD(& retry.worker.entry); retry.worker.func = & do_retry; spinlock_check(& retry.lock); __raw_spin_lock_init(& retry.lock.ldv_5957.rlock, "&(&retry.lock)->rlock", & __key___3); INIT_LIST_HEAD(& retry.writes); printk("\016drbd: initialized. Version: 8.4.2 (api:%d/proto:%d-%d)\n", 1, 86, 101); tmp___0 = drbd_buildtag(); printk("\016drbd: %s\n", tmp___0); printk("\016drbd: registered as block device major %d\n", 147); return (0); fail: drbd_cleanup(); if (err == -12) { printk("\vdrbd: ran out of memory\n"); } else { printk("\vdrbd: initialization failure\n"); } return (err); } } void drbd_free_bc(struct drbd_backing_dev *ldev ) { { if ((unsigned long )ldev == (unsigned long )((struct drbd_backing_dev *)0)) { return; } else { } blkdev_put(ldev->backing_bdev, 131U); blkdev_put(ldev->md_bdev, 131U); kfree((void const *)ldev); return; } } void drbd_free_sock(struct drbd_tconn *tconn ) { { if ((unsigned long )tconn->data.socket != (unsigned long )((struct socket *)0)) { ldv_mutex_lock_219(& tconn->data.mutex); kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR); sock_release(tconn->data.socket); tconn->data.socket = 0; ldv_mutex_unlock_220(& tconn->data.mutex); } else { } if ((unsigned long )tconn->meta.socket != (unsigned long )((struct socket *)0)) { ldv_mutex_lock_221(& tconn->meta.mutex); kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR); sock_release(tconn->meta.socket); tconn->meta.socket = 0; ldv_mutex_unlock_222(& tconn->meta.mutex); } else { } return; } } void conn_md_sync(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; int vnr ; void *tmp ; void *tmp___0 ; { rcu_read_lock___5(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_53380; ldv_53379: kref_get(& mdev->kref); rcu_read_unlock___5(); drbd_md_sync(mdev); kref_put(& mdev->kref, & drbd_minor_destroy); rcu_read_lock___5(); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_53380: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53379; } else { } rcu_read_unlock___5(); return; } } void drbd_md_sync(struct drbd_conf *mdev ) { struct meta_data_on_disk *buffer ; sector_t sector ; int i ; int tmp ; int tmp___0 ; void *tmp___1 ; sector_t tmp___2 ; __u64 tmp___3 ; __u64 tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; __u32 tmp___7 ; __u32 tmp___8 ; __u64 tmp___9 ; __u32 tmp___10 ; __u32 tmp___11 ; sector_t tmp___12 ; int tmp___13 ; sector_t tmp___14 ; { del_timer(& mdev->md_sync_timer); tmp = test_and_clear_bit(1, (unsigned long volatile *)(& mdev->flags)); if (tmp == 0) { return; } else { } tmp___0 = _get_ldev_if_state(mdev, D_FAILED); if (tmp___0 == 0) { return; } else { } tmp___1 = drbd_md_get_buffer(mdev); buffer = (struct meta_data_on_disk *)tmp___1; if ((unsigned long )buffer == (unsigned long )((struct meta_data_on_disk *)0)) { goto out; } else { } memset((void *)buffer, 0, 512UL); tmp___2 = drbd_get_capacity(mdev->this_bdev); tmp___3 = __fswab64((__u64 )tmp___2); buffer->la_size = tmp___3; i = 0; goto ldv_53404; ldv_53403: tmp___4 = __fswab64((mdev->ldev)->md.uuid[i]); buffer->uuid[i] = tmp___4; i = i + 1; ldv_53404: ; if (i <= 3) { goto ldv_53403; } else { } tmp___5 = __fswab32((mdev->ldev)->md.flags); buffer->flags = tmp___5; buffer->magic = 1812100227U; tmp___6 = __fswab32((mdev->ldev)->md.md_size_sect); buffer->md_size_sect = tmp___6; tmp___7 = __fswab32((__u32 )(mdev->ldev)->md.al_offset); buffer->al_offset = tmp___7; tmp___8 = __fswab32((mdev->act_log)->nr_elements); buffer->al_nr_extents = tmp___8; buffer->bm_bytes_per_bit = 1048576U; tmp___9 = __fswab64((mdev->ldev)->md.device_uuid); buffer->device_uuid = tmp___9; tmp___10 = __fswab32((__u32 )(mdev->ldev)->md.bm_offset); buffer->bm_offset = tmp___10; tmp___11 = __fswab32(mdev->peer_max_bio_size); buffer->la_peer_max_bio_size = tmp___11; tmp___12 = drbd_md_ss__(mdev, mdev->ldev); if ((unsigned long long )tmp___12 != (mdev->ldev)->md.md_offset) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 2987); } else { } sector = (sector_t )(mdev->ldev)->md.md_offset; tmp___13 = drbd_md_sync_page_io(mdev, mdev->ldev, sector, 1); if (tmp___13 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "meta data update failed!\n"); drbd_chk_io_error____1(mdev, 1, DRBD_META_IO_ERROR, "drbd_md_sync"); } else { } tmp___14 = drbd_get_capacity(mdev->this_bdev); (mdev->ldev)->md.la_size_sect = (u64 )tmp___14; drbd_md_put_buffer(mdev); out: put_ldev(mdev); return; } } int drbd_md_read(struct drbd_conf *mdev , struct drbd_backing_dev *bdev ) { struct meta_data_on_disk *buffer ; u32 magic ; u32 flags ; int i ; int rv ; int tmp ; void *tmp___0 ; int tmp___1 ; __u32 tmp___2 ; __u32 tmp___3 ; __u32 tmp___4 ; __u32 tmp___5 ; __u32 tmp___6 ; __u32 tmp___7 ; __u32 tmp___8 ; __u32 tmp___9 ; __u32 tmp___10 ; __u32 tmp___11 ; __u64 tmp___12 ; __u64 tmp___13 ; __u32 tmp___14 ; __u64 tmp___15 ; unsigned int peer ; __u32 tmp___16 ; unsigned int _max1 ; unsigned int _max2 ; { rv = 101; tmp = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp == 0) { return (118); } else { } tmp___0 = drbd_md_get_buffer(mdev); buffer = (struct meta_data_on_disk *)tmp___0; if ((unsigned long )buffer == (unsigned long )((struct meta_data_on_disk *)0)) { goto out; } else { } tmp___1 = drbd_md_sync_page_io(mdev, bdev, (sector_t )bdev->md.md_offset, 0); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Error while reading metadata.\n"); rv = 118; goto err; } else { } tmp___2 = __fswab32(buffer->magic); magic = tmp___2; tmp___3 = __fswab32(buffer->flags); flags = tmp___3; if (magic == 2205418092U || (magic == 2205418091U && (flags & 128U) == 0U)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Found unclean meta data. Did you \"drbdadm apply-al\"?\n"); rv = 165; goto err; } else { } if (magic != 2205418091U) { if (magic == 2205418090U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); } rv = 119; goto err; } else { } tmp___5 = __fswab32(buffer->al_offset); if (tmp___5 != (unsigned int )bdev->md.al_offset) { tmp___4 = __fswab32(buffer->al_offset); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "unexpected al_offset: %d (expected %d)\n", tmp___4, bdev->md.al_offset); rv = 119; goto err; } else { } tmp___7 = __fswab32(buffer->bm_offset); if (tmp___7 != (unsigned int )bdev->md.bm_offset) { tmp___6 = __fswab32(buffer->bm_offset); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "unexpected bm_offset: %d (expected %d)\n", tmp___6, bdev->md.bm_offset); rv = 119; goto err; } else { } tmp___9 = __fswab32(buffer->md_size_sect); if (tmp___9 != bdev->md.md_size_sect) { tmp___8 = __fswab32(buffer->md_size_sect); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "unexpected md_size: %u (expected %u)\n", tmp___8, bdev->md.md_size_sect); rv = 119; goto err; } else { } tmp___11 = __fswab32(buffer->bm_bytes_per_bit); if (tmp___11 != 4096U) { tmp___10 = __fswab32(buffer->bm_bytes_per_bit); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "unexpected bm_bytes_per_bit: %u (expected %u)\n", tmp___10, 4096); rv = 119; goto err; } else { } tmp___12 = __fswab64(buffer->la_size); bdev->md.la_size_sect = tmp___12; i = 0; goto ldv_53419; ldv_53418: tmp___13 = __fswab64(buffer->uuid[i]); bdev->md.uuid[i] = tmp___13; i = i + 1; ldv_53419: ; if (i <= 3) { goto ldv_53418; } else { } tmp___14 = __fswab32(buffer->flags); bdev->md.flags = tmp___14; tmp___15 = __fswab64(buffer->device_uuid); bdev->md.device_uuid = tmp___15; spin_lock_irq(& (mdev->tconn)->req_lock); if ((int )mdev->state.ldv_49522.conn <= 9) { tmp___16 = __fswab32(buffer->la_peer_max_bio_size); peer = tmp___16; _max1 = peer; _max2 = 4096U; peer = _max1 > _max2 ? _max1 : _max2; mdev->peer_max_bio_size = peer; } else { } spin_unlock_irq(& (mdev->tconn)->req_lock); err: drbd_md_put_buffer(mdev); out: put_ldev(mdev); return (rv); } } void drbd_md_mark_dirty(struct drbd_conf *mdev ) { int tmp ; { tmp = test_and_set_bit(1, (unsigned long volatile *)(& mdev->flags)); if (tmp == 0) { mod_timer(& mdev->md_sync_timer, (unsigned long )jiffies + 1250UL); } else { } return; } } void drbd_uuid_move_history(struct drbd_conf *mdev ) { int i ; { i = 2; goto ldv_53433; ldv_53432: (mdev->ldev)->md.uuid[i + 1] = (mdev->ldev)->md.uuid[i]; i = i + 1; ldv_53433: ; if (i <= 2) { goto ldv_53432; } else { } return; } } void __drbd_uuid_set(struct drbd_conf *mdev , int idx , u64 val ) { { if (idx == 0) { if ((unsigned int )*((unsigned char *)mdev + 748UL) == 1U) { val = val | 1ULL; } else { val = val & 0xfffffffffffffffeULL; } drbd_set_ed_uuid(mdev, val); } else { } (mdev->ldev)->md.uuid[idx] = val; drbd_md_mark_dirty(mdev); return; } } void _drbd_uuid_set(struct drbd_conf *mdev , int idx , u64 val ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& (mdev->ldev)->md.uuid_lock); flags = _raw_spin_lock_irqsave(tmp); __drbd_uuid_set(mdev, idx, val); spin_unlock_irqrestore(& (mdev->ldev)->md.uuid_lock, flags); return; } } void drbd_uuid_set(struct drbd_conf *mdev , int idx , u64 val ) { unsigned long flags ; raw_spinlock_t *tmp ; { tmp = spinlock_check(& (mdev->ldev)->md.uuid_lock); flags = _raw_spin_lock_irqsave(tmp); if ((mdev->ldev)->md.uuid[idx] != 0ULL) { drbd_uuid_move_history(mdev); (mdev->ldev)->md.uuid[2] = (mdev->ldev)->md.uuid[idx]; } else { } __drbd_uuid_set(mdev, idx, val); spin_unlock_irqrestore(& (mdev->ldev)->md.uuid_lock, flags); return; } } void drbd_uuid_new_current(struct drbd_conf *mdev ) { u64 val ; unsigned long long bm_uuid ; { get_random_bytes((void *)(& val), 8); spin_lock_irq(& (mdev->ldev)->md.uuid_lock); bm_uuid = (mdev->ldev)->md.uuid[1]; if (bm_uuid != 0ULL) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "bm UUID was already set: %llX\n", bm_uuid); } else { } (mdev->ldev)->md.uuid[1] = (mdev->ldev)->md.uuid[0]; __drbd_uuid_set(mdev, 0, val); spin_unlock_irq(& (mdev->ldev)->md.uuid_lock); drbd_print_uuids(mdev, "new current UUID"); drbd_md_sync(mdev); return; } } void drbd_uuid_set_bm(struct drbd_conf *mdev , u64 val ) { unsigned long flags ; raw_spinlock_t *tmp ; unsigned long long bm_uuid ; { if ((mdev->ldev)->md.uuid[1] == 0ULL && val == 0ULL) { return; } else { } tmp = spinlock_check(& (mdev->ldev)->md.uuid_lock); flags = _raw_spin_lock_irqsave(tmp); if (val == 0ULL) { drbd_uuid_move_history(mdev); (mdev->ldev)->md.uuid[2] = (mdev->ldev)->md.uuid[1]; (mdev->ldev)->md.uuid[1] = 0ULL; } else { bm_uuid = (mdev->ldev)->md.uuid[1]; if (bm_uuid != 0ULL) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "bm UUID was already set: %llX\n", bm_uuid); } else { } (mdev->ldev)->md.uuid[1] = val & 0xfffffffffffffffeULL; } spin_unlock_irqrestore(& (mdev->ldev)->md.uuid_lock, flags); drbd_md_mark_dirty(mdev); return; } } int drbd_bmio_set_n_write(struct drbd_conf *mdev ) { int rv ; int tmp ; { rv = -5; tmp = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp != 0) { drbd_md_set_flag(mdev, 8); drbd_md_sync(mdev); drbd_bm_set_all(mdev); rv = drbd_bm_write(mdev); if (rv == 0) { drbd_md_clear_flag(mdev, 8); drbd_md_sync(mdev); } else { } put_ldev(mdev); } else { } return (rv); } } int drbd_bmio_clear_n_write(struct drbd_conf *mdev ) { int rv ; int tmp ; { rv = -5; drbd_resume_al(mdev); tmp = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp != 0) { drbd_bm_clear_all(mdev); rv = drbd_bm_write(mdev); put_ldev(mdev); } else { } return (rv); } } static int w_bitmap_io(struct drbd_work *w , int unused ) { struct bm_io_work *work ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; int rv ; int tmp ; int tmp___0 ; { __mptr = (struct drbd_work const *)w; work = (struct bm_io_work *)__mptr; mdev = w->ldv_49807.mdev; rv = -5; tmp = atomic_read((atomic_t const *)(& mdev->ap_bio_cnt)); if (tmp != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( atomic_read(&mdev->ap_bio_cnt) == 0 ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3274); } else { } tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 != 0) { drbd_bm_lock(mdev, work->why, work->flags); rv = (*(work->io_fn))(mdev); drbd_bm_unlock(mdev); put_ldev(mdev); } else { } clear_bit_unlock(9U, (unsigned long volatile *)(& mdev->flags)); __wake_up(& mdev->misc_wait, 3U, 1, 0); if ((unsigned long )work->done != (unsigned long )((void (*)(struct drbd_conf * , int ))0)) { (*(work->done))(mdev, rv); } else { } clear_bit(10, (unsigned long volatile *)(& mdev->flags)); work->why = 0; work->flags = 0; return (0); } } void drbd_ldev_destroy(struct drbd_conf *mdev ) { { lc_destroy(mdev->resync); mdev->resync = 0; lc_destroy(mdev->act_log); mdev->act_log = 0; drbd_free_bc(mdev->ldev); mdev->ldev = 0; clear_bit(11, (unsigned long volatile *)(& mdev->flags)); return; } } static int w_go_diskless(struct drbd_work *w , int unused ) { struct drbd_conf *mdev ; int tmp ; int tmp___0 ; union drbd_state val ; union drbd_state mask ; { mdev = w->ldv_49807.mdev; if ((unsigned int )*((unsigned char *)mdev + 749UL) != 4U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->state.disk == D_FAILED ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3313); } else { } if ((unsigned long )mdev->bitmap != (unsigned long )((struct drbd_bitmap *)0) && (unsigned long )mdev->ldev != (unsigned long )((struct drbd_backing_dev *)0)) { tmp___0 = drbd_bitmap_io_from_worker(mdev, & drbd_bm_write, (char *)"detach", BM_LOCKED_MASK); if (tmp___0 != 0) { tmp = constant_test_bit(13U, (unsigned long const volatile *)(& mdev->flags)); if (tmp != 0) { drbd_md_set_flag(mdev, 8); drbd_md_sync(mdev); } else { } } else { } } else { } val.i = 0U; val.ldv_40024.disk = 0U; mask.i = 0U; mask.ldv_40024.disk = 15U; drbd_force_state(mdev, mask, val); return (0); } } void drbd_go_diskless(struct drbd_conf *mdev ) { int tmp ; { if ((unsigned int )*((unsigned char *)mdev + 749UL) != 4U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->state.disk == D_FAILED ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3348); } else { } tmp = test_and_set_bit(11, (unsigned long volatile *)(& mdev->flags)); if (tmp == 0) { drbd_queue_work(& (mdev->tconn)->sender_work, & mdev->go_diskless); } else { } return; } } void drbd_queue_bitmap_io(struct drbd_conf *mdev , int (*io_fn)(struct drbd_conf * ) , void (*done)(struct drbd_conf * , int ) , char *why , enum bm_flag flags ) { struct task_struct *tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; { tmp = get_current(); if ((unsigned long )tmp != (unsigned long )(mdev->tconn)->worker.task) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( current == mdev->tconn->worker.task ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3370); } else { } tmp___0 = constant_test_bit(10U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___0 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !test_bit(BITMAP_IO_QUEUED, &mdev->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3372); } else { } tmp___1 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( !test_bit(BITMAP_IO, &mdev->flags) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3373); } else { } tmp___2 = list_empty((struct list_head const *)(& mdev->bm_io_work.w.list)); if (tmp___2 == 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( list_empty(&mdev->bm_io_work.w.list) ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3374); } else { } if ((unsigned long )mdev->bm_io_work.why != (unsigned long )((char *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "FIXME going to queue \'%s\' but \'%s\' still pending?\n", why, mdev->bm_io_work.why); } else { } mdev->bm_io_work.io_fn = io_fn; mdev->bm_io_work.done = done; mdev->bm_io_work.why = why; mdev->bm_io_work.flags = flags; spin_lock_irq(& (mdev->tconn)->req_lock); set_bit(9U, (unsigned long volatile *)(& mdev->flags)); tmp___4 = atomic_read((atomic_t const *)(& mdev->ap_bio_cnt)); if (tmp___4 == 0) { tmp___3 = test_and_set_bit(10, (unsigned long volatile *)(& mdev->flags)); if (tmp___3 == 0) { drbd_queue_work(& (mdev->tconn)->sender_work, & mdev->bm_io_work.w); } else { } } else { } spin_unlock_irq(& (mdev->tconn)->req_lock); return; } } int drbd_bitmap_io(struct drbd_conf *mdev , int (*io_fn)(struct drbd_conf * ) , char *why , enum bm_flag flags ) { int rv ; struct task_struct *tmp ; { tmp = get_current(); if ((unsigned long )tmp == (unsigned long )(mdev->tconn)->worker.task) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( current != mdev->tconn->worker.task ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3407); } else { } if (((unsigned int )flags & 9U) == 0U) { drbd_suspend_io(mdev); } else { } drbd_bm_lock(mdev, why, flags); rv = (*io_fn)(mdev); drbd_bm_unlock(mdev); if (((unsigned int )flags & 9U) == 0U) { drbd_resume_io(mdev); } else { } return (rv); } } void drbd_md_set_flag(struct drbd_conf *mdev , int flag ) { { if (((mdev->ldev)->md.flags & (u32 )flag) != (u32 )flag) { drbd_md_mark_dirty(mdev); (mdev->ldev)->md.flags = (mdev->ldev)->md.flags | (u32 )flag; } else { } return; } } void drbd_md_clear_flag(struct drbd_conf *mdev , int flag ) { { if (((mdev->ldev)->md.flags & (u32 )flag) != 0U) { drbd_md_mark_dirty(mdev); (mdev->ldev)->md.flags = (mdev->ldev)->md.flags & (u32 )(~ flag); } else { } return; } } int drbd_md_test_flag(struct drbd_backing_dev *bdev , int flag ) { { return ((bdev->md.flags & (u32 )flag) != 0U); } } static void md_sync_timer_fn(unsigned long data ) { struct drbd_conf *mdev ; int tmp ; { mdev = (struct drbd_conf *)data; tmp = list_empty((struct list_head const *)(& mdev->md_sync_work.list)); if (tmp != 0) { drbd_queue_work_front(& (mdev->tconn)->sender_work, & mdev->md_sync_work); } else { } return; } } static int w_md_sync(struct drbd_work *w , int unused ) { struct drbd_conf *mdev ; { mdev = w->ldv_49807.mdev; dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "md_sync_timer expired! Worker calls drbd_md_sync().\n"); drbd_md_sync(mdev); return (0); } } char const *cmdname(enum drbd_packet cmd ) { char const *cmdnames[46U] ; { cmdnames[0] = "Data"; cmdnames[1] = "DataReply"; cmdnames[2] = "RSDataReply"; cmdnames[3] = "Barrier"; cmdnames[4] = "ReportBitMap"; cmdnames[5] = "BecomeSyncTarget"; cmdnames[6] = "BecomeSyncSource"; cmdnames[7] = "UnplugRemote"; cmdnames[8] = "DataRequest"; cmdnames[9] = "RSDataRequest"; cmdnames[10] = "SyncParam"; cmdnames[11] = "ReportProtocol"; cmdnames[12] = "ReportUUIDs"; cmdnames[13] = "ReportSizes"; cmdnames[14] = "ReportState"; cmdnames[15] = "ReportSyncUUID"; cmdnames[16] = "AuthChallenge"; cmdnames[17] = "AuthResponse"; cmdnames[18] = "StateChgRequest"; cmdnames[19] = "Ping"; cmdnames[20] = "PingAck"; cmdnames[21] = "RecvAck"; cmdnames[22] = "WriteAck"; cmdnames[23] = "RSWriteAck"; cmdnames[24] = "Superseded"; cmdnames[25] = "NegAck"; cmdnames[26] = "NegDReply"; cmdnames[27] = "NegRSDReply"; cmdnames[28] = "BarrierAck"; cmdnames[29] = "StateChgReply"; cmdnames[30] = "OVRequest"; cmdnames[31] = "OVReply"; cmdnames[32] = "OVResult"; cmdnames[33] = "CsumRSRequest"; cmdnames[34] = "CsumRSIsInSync"; cmdnames[35] = "SyncParam89"; cmdnames[36] = "CBitmap"; cmdnames[37] = 0; cmdnames[38] = 0; cmdnames[39] = "DelayProbe"; cmdnames[40] = "OutOfSync"; cmdnames[41] = "RSCancel"; cmdnames[42] = "conn_st_chg_req"; cmdnames[43] = "conn_st_chg_reply"; cmdnames[44] = "retry_write"; cmdnames[45] = "protocol_update"; if ((unsigned int )cmd == 65521U) { return ("InitialMeta"); } else { } if ((unsigned int )cmd == 65522U) { return ("InitialData"); } else { } if ((unsigned int )cmd == 65534U) { return ("ConnectionFeatures"); } else { } if ((unsigned int )cmd > (unsigned int )P_PROTOCOL_UPDATE) { return ("Unknown"); } else { } return (cmdnames[(unsigned int )cmd]); } } int drbd_wait_misc(struct drbd_conf *mdev , struct drbd_interval *i ) { struct net_conf *nc ; wait_queue_t wait ; struct task_struct *tmp ; long timeout ; struct net_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; struct task_struct *tmp___2 ; int tmp___3 ; { tmp = get_current(); wait.flags = 0U; wait.private = (void *)tmp; wait.func = & autoremove_wake_function; wait.task_list.next = & wait.task_list; wait.task_list.prev = & wait.task_list; rcu_read_lock___5(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_main.c.prepared", 3547, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if ((unsigned long )nc == (unsigned long )((struct net_conf *)0)) { rcu_read_unlock___5(); return (-110); } else { } timeout = nc->ko_count != 0U ? (long )(((nc->timeout * 250U) / 10U) * nc->ko_count) : 9223372036854775807L; rcu_read_unlock___5(); i->waiting = -1; prepare_to_wait(& mdev->misc_wait, & wait, 1); spin_unlock_irq(& (mdev->tconn)->req_lock); timeout = schedule_timeout(timeout); finish_wait(& mdev->misc_wait, & wait); spin_lock_irq(& (mdev->tconn)->req_lock); if (timeout == 0L || (int )mdev->state.ldv_49522.conn <= 9) { return (-110); } else { } tmp___2 = get_current(); tmp___3 = signal_pending(tmp___2); if (tmp___3 != 0) { return (-512); } else { } return (0); } } static unsigned long _drbd_fault_random(struct fault_random_state *rsp ) { long refresh ; unsigned long tmp ; __u32 tmp___0 ; { tmp = rsp->count; rsp->count = rsp->count - 1UL; if (tmp == 0UL) { get_random_bytes((void *)(& refresh), 8); rsp->state = rsp->state + (unsigned long )refresh; rsp->count = 10000UL; } else { } rsp->state = rsp->state * 39916801UL + 479001701UL; tmp___0 = __fswahw32((__u32 )rsp->state); return ((unsigned long )tmp___0); } } static char *_drbd_fault_str(unsigned int type ) { char *_faults[10U] ; { _faults[0] = (char *)"Meta-data write"; _faults[1] = (char *)"Meta-data read"; _faults[2] = (char *)"Resync write"; _faults[3] = (char *)"Resync read"; _faults[4] = (char *)"Data write"; _faults[5] = (char *)"Data read"; _faults[6] = (char *)"Data read ahead"; _faults[7] = (char *)"BM allocation"; _faults[8] = (char *)"EE allocation"; _faults[9] = (char *)"receive data corruption"; return (type <= 9U ? _faults[type] : (char *)"**Unknown**"); } } unsigned int _drbd_insert_fault(struct drbd_conf *mdev , unsigned int type ) { struct fault_random_state rrs ; unsigned int ret ; unsigned int tmp ; unsigned long tmp___0 ; int tmp___1 ; char *tmp___2 ; int tmp___3 ; { rrs.state = 0UL; rrs.count = 0UL; if (fault_devs == 0) { goto _L; } else { tmp = mdev_to_minor(mdev); if ((fault_devs >> (int )tmp) & 1) { _L: /* CIL Label */ tmp___0 = _drbd_fault_random(& rrs); if (tmp___0 % 100UL + 1UL <= (unsigned long )fault_rate) { tmp___1 = 1; } else { tmp___1 = 0; } } else { tmp___1 = 0; } } ret = (unsigned int )tmp___1; if (ret != 0U) { fault_count = fault_count + 1; tmp___3 = ___ratelimit(& drbd_ratelimit_state, "_drbd_insert_fault"); if (tmp___3 != 0) { tmp___2 = _drbd_fault_str(type); dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "***Simulating %s failure\n", tmp___2); } else { } } else { } return (ret); } } char const *drbd_buildtag(void) { char buildtag[38U] ; unsigned int tmp ; { buildtag[0] = '\000'; tmp = 1U; while (1) { if (tmp >= 38U) { break; } else { } buildtag[tmp] = (char)0; tmp = tmp + 1U; } if ((int )((signed char )buildtag[0]) == 0) { sprintf((char *)(& buildtag), "srcversion: %-24s", __this_module.srcversion); } else { } return ((char const *)(& buildtag)); } } void ldv_main6_sequence_infinite_withcheck_stateful(void) { struct block_device *var_group1 ; fmode_t var_drbd_open_73_p1 ; int res_drbd_open_73 ; struct gendisk *var_group2 ; fmode_t var_drbd_release_74_p1 ; struct notifier_block *var_group3 ; unsigned long var_drbd_notify_sys_80_p1 ; void *var_drbd_notify_sys_80_p2 ; unsigned long var_md_sync_timer_fn_120_p0 ; int ldv_s_drbd_ops_block_device_operations ; int tmp ; int tmp___0 ; int tmp___1 ; { ldv_s_drbd_ops_block_device_operations = 0; LDV_IN_INTERRUPT = 1; ldv_initialize(); ldv_handler_precall(); tmp = drbd_init(); if (tmp != 0) { goto ldv_final; } else { } goto ldv_53646; ldv_53645: tmp___0 = nondet_int(); switch (tmp___0) { case 0: ; if (ldv_s_drbd_ops_block_device_operations == 0) { ldv_handler_precall(); res_drbd_open_73 = drbd_open(var_group1, var_drbd_open_73_p1); ldv_check_return_value(res_drbd_open_73); if (res_drbd_open_73 != 0) { goto ldv_module_exit; } else { } ldv_s_drbd_ops_block_device_operations = ldv_s_drbd_ops_block_device_operations + 1; } else { } goto ldv_53640; case 1: ; if (ldv_s_drbd_ops_block_device_operations == 1) { ldv_handler_precall(); drbd_release(var_group2, var_drbd_release_74_p1); ldv_s_drbd_ops_block_device_operations = 0; } else { } goto ldv_53640; case 2: ldv_handler_precall(); drbd_notify_sys(var_group3, var_drbd_notify_sys_80_p1, var_drbd_notify_sys_80_p2); goto ldv_53640; case 3: ldv_handler_precall(); md_sync_timer_fn(var_md_sync_timer_fn_120_p0); goto ldv_53640; default: ; goto ldv_53640; } ldv_53640: ; ldv_53646: tmp___1 = nondet_int(); if (tmp___1 != 0 || ldv_s_drbd_ops_block_device_operations != 0) { goto ldv_53645; } else { } ldv_module_exit: ldv_handler_precall(); drbd_cleanup(); ldv_final: ldv_check_final_state(); return; } } void ldv_mutex_lock_193(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_194(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_195(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_196(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_197(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_198(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_199(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_200(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_201(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_202(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_203(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_204(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_205(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_206(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_207(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_208(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_209(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_210(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_211(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_212(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_213(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_214(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_215(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_drbd_main_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_216(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_drbd_main_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_217(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_drbd_main_mutex(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_218(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_drbd_main_mutex(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_219(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_220(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_221(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_222(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_256(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_254(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_257(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_259(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_261(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_263(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_253(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_255(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_258(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_260(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_262(struct mutex *ldv_func_arg1 ) ; static char const *drbd_conn_s_names[24U] = { "StandAlone", "Disconnecting", "Unconnected", "Timeout", "BrokenPipe", "NetworkFailure", "ProtocolError", "TearDown", "WFConnection", "WFReportParams", "Connected", "StartingSyncS", "StartingSyncT", "WFBitMapS", "WFBitMapT", "WFSyncUUID", "SyncSource", "SyncTarget", "VerifyS", "VerifyT", "PausedSyncS", "PausedSyncT", "Ahead", "Behind"}; static char const *drbd_role_s_names[3U] = { "Unknown", "Primary", "Secondary"}; static char const *drbd_disk_s_names[9U] = { "Diskless", "Attaching", "Failed", "Negotiating", "Inconsistent", "Outdated", "DUnknown", "Consistent", "UpToDate"}; static char const *drbd_state_sw_errors[21U] = { 0, "Multiple primaries not allowed by config", "Need access to UpToDate data", 0, "Can not resync without local disk", "Can not resync without remote disk", "Refusing to be Outdated while Connected", "Refusing to be Primary while peer is not outdated", "Can not start OV/resync since it is already active", "Can not disconnect a StandAlone device", "State change was refused by peer node", "Device is diskless, the requested operation requires a disk", "Device is held open by someone", "Have no net/connection configuration", "Need a verify algorithm to start online verify", "Need a connection to start verify or resync", "Disk state is lower than outdated", "Peer does not support protocol", "In transient state, retry after next state change", "Concurrent state changes detected and aborted", "Other vol primary on peer not allowed by config"}; char const *drbd_conn_str(enum drbd_conns s ) { { return ((unsigned int )s <= 23U ? drbd_conn_s_names[(unsigned int )s] : "TOO_LARGE"); } } char const *drbd_role_str(enum drbd_role s ) { { return ((unsigned int )s <= 2U ? drbd_role_s_names[(unsigned int )s] : "TOO_LARGE"); } } char const *drbd_disk_str(enum drbd_disk_state s ) { { return ((unsigned int )s <= 8U ? drbd_disk_s_names[(unsigned int )s] : "TOO_LARGE"); } } char const *drbd_set_st_err_str(enum drbd_state_rv err ) { { return ((int )err >= -20 ? ((int )err < 0 ? drbd_state_sw_errors[- ((int )err)] : "TOO_LARGE") : "TOO_SMALL"); } } void ldv_mutex_lock_253(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_254(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_255(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_256(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_257(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_258(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_259(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_260(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_261(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_262(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_263(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } extern char *strchr(char const * , int ) ; int ldv_mutex_trylock_278(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_276(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_279(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_281(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_283(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_285(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_288(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_289(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_291(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_292(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_295(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_296(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_297(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_298(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_300(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_301(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_303(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_305(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_275(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_277(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_280(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_282(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_284(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_286(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_287(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_290(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_293(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_294(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_299(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_302(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_304(struct mutex *ldv_func_arg1 ) ; __inline static struct thread_info *current_thread_info___6(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6398; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6398; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6398; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6398; default: __bad_percpu_size(); } ldv_6398: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } __inline static void __rcu_read_lock___6(void) { struct thread_info *tmp ; { tmp = current_thread_info___6(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___6(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___6(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___6(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___6(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 738, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___6(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 759, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___6(); return; } } extern int call_usermodehelper_fns(char * , char ** , char ** , int , int (*)(struct subprocess_info * , struct cred * ) , void (*)(struct subprocess_info * ) , void * ) ; __inline static int call_usermodehelper(char *path , char **argv , char **envp , int wait ) { int tmp ; { tmp = call_usermodehelper_fns(path, argv, envp, wait, 0, 0, 0); return (tmp); } } extern int kobject_uevent(struct kobject * , enum kobject_action ) ; __inline static void list_del_rcu(struct list_head *entry ) { { __list_del_entry(entry); entry->prev = 0xdead000000200200UL; return; } } extern bool capable(int ) ; extern struct block_device *blkdev_get_by_path(char const * , fmode_t , void * ) ; extern void kfree_skb(struct sk_buff * ) ; extern struct sk_buff *__alloc_skb(unsigned int , gfp_t , int , int ) ; __inline static struct sk_buff *alloc_skb(unsigned int size , gfp_t priority ) { struct sk_buff *tmp ; { tmp = __alloc_skb(size, priority, 0, -1); return (tmp); } } __inline static unsigned char *skb_tail_pointer(struct sk_buff const *skb ) { { return ((unsigned char *)skb->head + (unsigned long )skb->tail); } } extern void skb_trim(struct sk_buff * , unsigned int ) ; extern struct net init_net ; __inline static struct net *read_pnet(struct net * const *pnet ) { { return ((struct net *)*pnet); } } __inline static struct nlmsghdr *nlmsg_hdr(struct sk_buff const *skb ) { { return ((struct nlmsghdr *)skb->data); } } extern int netlink_unicast(struct sock * , struct sk_buff * , __u32 , int ) ; extern int netlink_broadcast(struct sock * , struct sk_buff * , __u32 , __u32 , gfp_t ) ; extern struct nlattr *nla_find(struct nlattr const * , int , int ) ; extern size_t nla_strlcpy(char * , struct nlattr const * , size_t ) ; extern int nla_memcpy(void * , struct nlattr const * , int ) ; extern int nla_put(struct sk_buff * , int , int , void const * ) ; extern int nla_put_nohdr(struct sk_buff * , int , void const * ) ; __inline static int nlmsg_msg_size(int payload ) { { return (payload + 16); } } __inline static int nlmsg_total_size(int payload ) { int tmp ; { tmp = nlmsg_msg_size(payload); return ((int )((unsigned int )tmp + 3U) & -4); } } __inline static void *nlmsg_data(struct nlmsghdr const *nlh ) { { return ((void *)nlh + 16U); } } __inline static int nlmsg_len(struct nlmsghdr const *nlh ) { { return ((int )((unsigned int )nlh->nlmsg_len - 16U)); } } __inline static struct nlattr *nlmsg_attrdata(struct nlmsghdr const *nlh , int hdrlen ) { unsigned char *data ; void *tmp ; { tmp = nlmsg_data(nlh); data = (unsigned char *)tmp; return ((struct nlattr *)(data + ((unsigned long )((unsigned int )hdrlen + 3U) & 4294967292UL))); } } __inline static int nlmsg_attrlen(struct nlmsghdr const *nlh , int hdrlen ) { int tmp ; { tmp = nlmsg_len(nlh); return ((int )((unsigned int )tmp - (((unsigned int )hdrlen + 3U) & 4294967292U))); } } __inline static struct sk_buff *nlmsg_new(size_t payload , gfp_t flags ) { int tmp ; struct sk_buff *tmp___0 ; { tmp = nlmsg_total_size((int )payload); tmp___0 = alloc_skb((unsigned int )tmp, flags); return (tmp___0); } } __inline static int nlmsg_end(struct sk_buff *skb , struct nlmsghdr *nlh ) { unsigned char *tmp ; { tmp = skb_tail_pointer((struct sk_buff const *)skb); nlh->nlmsg_len = (__u32 )((long )tmp) - (__u32 )((long )nlh); return ((int )skb->len); } } __inline static void nlmsg_trim(struct sk_buff *skb , void const *mark ) { { if ((unsigned long )mark != (unsigned long )((void const *)0)) { skb_trim(skb, (unsigned int )((long )mark) - (unsigned int )((long )skb->data)); } else { } return; } } __inline static void nlmsg_cancel(struct sk_buff *skb , struct nlmsghdr *nlh ) { { nlmsg_trim(skb, (void const *)nlh); return; } } __inline static void nlmsg_free(struct sk_buff *skb ) { { kfree_skb(skb); return; } } __inline static int nlmsg_multicast(struct sock *sk , struct sk_buff *skb , u32 portid , unsigned int group , gfp_t flags ) { int err ; { ((struct netlink_skb_parms *)(& skb->cb))->dst_group = group; err = netlink_broadcast(sk, skb, portid, group, flags); if (err > 0) { err = 0; } else { } return (err); } } __inline static int nlmsg_unicast(struct sock *sk , struct sk_buff *skb , u32 portid ) { int err ; { err = netlink_unicast(sk, skb, portid, 64); if (err > 0) { err = 0; } else { } return (err); } } __inline static void *nla_data(struct nlattr const *nla ) { { return ((void *)nla + 4U); } } __inline static int nla_len(struct nlattr const *nla ) { { return ((int )nla->nla_len + -4); } } __inline static int nla_put_u8(struct sk_buff *skb , int attrtype , u8 value ) { int tmp ; { tmp = nla_put(skb, attrtype, 1, (void const *)(& value)); return (tmp); } } __inline static int nla_put_u32(struct sk_buff *skb , int attrtype , u32 value ) { int tmp ; { tmp = nla_put(skb, attrtype, 4, (void const *)(& value)); return (tmp); } } __inline static int nla_put_u64(struct sk_buff *skb , int attrtype , u64 value ) { int tmp ; { tmp = nla_put(skb, attrtype, 8, (void const *)(& value)); return (tmp); } } __inline static int nla_put_string(struct sk_buff *skb , int attrtype , char const *str ) { size_t tmp ; int tmp___0 ; { tmp = strlen(str); tmp___0 = nla_put(skb, attrtype, (int )((unsigned int )tmp + 1U), (void const *)str); return (tmp___0); } } __inline static u32 nla_get_u32(struct nlattr const *nla ) { void *tmp ; { tmp = nla_data(nla); return (*((u32 *)tmp)); } } __inline static u8 nla_get_u8(struct nlattr const *nla ) { void *tmp ; { tmp = nla_data(nla); return (*((u8 *)tmp)); } } __inline static u64 nla_get_u64(struct nlattr const *nla ) { u64 tmp ; { nla_memcpy((void *)(& tmp), nla, 8); return (tmp); } } __inline static struct nlattr *nla_nest_start(struct sk_buff *skb , int attrtype ) { struct nlattr *start ; unsigned char *tmp ; int tmp___0 ; { tmp = skb_tail_pointer((struct sk_buff const *)skb); start = (struct nlattr *)tmp; tmp___0 = nla_put(skb, attrtype, 0, 0); if (tmp___0 < 0) { return (0); } else { } return (start); } } __inline static int nla_nest_end(struct sk_buff *skb , struct nlattr *start ) { unsigned char *tmp ; { tmp = skb_tail_pointer((struct sk_buff const *)skb); start->nla_len = (int )((__u16 )((long )tmp)) - (int )((__u16 )((long )start)); return ((int )skb->len); } } __inline static void nla_nest_cancel(struct sk_buff *skb , struct nlattr *start ) { { nlmsg_trim(skb, (void const *)start); return; } } extern void blk_queue_max_segments(struct request_queue * , unsigned short ) ; extern void blk_queue_logical_block_size(struct request_queue * , unsigned short ) ; extern void blk_queue_stack_limits(struct request_queue * , struct request_queue * ) ; extern void blk_queue_segment_boundary(struct request_queue * , unsigned long ) ; extern struct lru_cache *lc_create(char const * , struct kmem_cache * , unsigned int , unsigned int , size_t , size_t ) ; extern int lc_try_lock(struct lru_cache * ) ; bool conn_all_vols_unconf(struct drbd_tconn *tconn ) ; __inline static int drbd_request_state(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val ) { enum drbd_state_rv tmp ; { tmp = _drbd_request_state(mdev, mask, val, 14); return ((int )tmp); } } enum drbd_role conn_highest_peer(struct drbd_tconn *tconn ) ; enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn ) ; bool conn_try_outdate_peer(struct drbd_tconn *tconn ) ; __inline static sector_t drbd_md_first_sector___0(struct drbd_backing_dev *bdev ) { int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; sector_t tmp___1 ; { rcu_read_lock___6(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1775, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock___6(); tmp___1 = _drbd_md_first_sector(meta_dev_idx, bdev); return (tmp___1); } } __inline static sector_t drbd_get_max_capacity___1(struct drbd_backing_dev *bdev ) { sector_t s ; int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; sector_t __min1 ; sector_t __min2 ; sector_t tmp___2 ; sector_t tmp___3 ; sector_t __min1___0 ; sector_t __min2___0 ; sector_t tmp___4 ; sector_t __min1___1 ; sector_t __min2___1 ; sector_t __min1___2 ; sector_t __min2___2 ; sector_t tmp___5 ; { rcu_read_lock___6(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1824, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock___6(); switch (meta_dev_idx) { case -1: ; case -3: tmp___3 = drbd_get_capacity(bdev->backing_bdev); if (tmp___3 != 0UL) { __min1 = 2251799813685248UL; tmp___2 = _drbd_md_first_sector(meta_dev_idx, bdev); __min2 = tmp___2; s = __min1 < __min2 ? __min1 : __min2; } else { s = 0UL; } goto ldv_51783; case -2: __min1___0 = 2251799813685248UL; tmp___4 = drbd_get_capacity(bdev->backing_bdev); __min2___0 = tmp___4; s = __min1___0 < __min2___0 ? __min1___0 : __min2___0; __min1___1 = s; __min2___1 = (unsigned long )(bdev->md.md_size_sect - (u32 )bdev->md.bm_offset) << 15; s = __min1___1 < __min2___1 ? __min1___1 : __min2___1; goto ldv_51783; default: __min1___2 = 8587575296UL; tmp___5 = drbd_get_capacity(bdev->backing_bdev); __min2___2 = tmp___5; s = __min1___2 < __min2___2 ? __min1___2 : __min2___2; } ldv_51783: ; return (s); } } __inline static sector_t drbd_md_ss_____0(struct drbd_conf *mdev , struct drbd_backing_dev *bdev ) { int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; sector_t tmp___2 ; { rcu_read_lock___6(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/inst/current/envs/linux/linux/drivers/block/drbd/drbd_int.h", 1861, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; rcu_read_unlock___6(); switch (meta_dev_idx) { default: ; return ((unsigned long )meta_dev_idx * 262144UL); case -1: ; case -3: ; if ((unsigned long )bdev->backing_bdev == (unsigned long )((struct block_device *)0)) { tmp___1 = ___ratelimit(& drbd_ratelimit_state, "drbd_md_ss__"); if (tmp___1 != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "bdev->backing_bdev==NULL\n"); dump_stack(); } else { } return (0UL); } else { } tmp___2 = drbd_get_capacity(bdev->backing_bdev); return ((sector_t )(((unsigned long long )tmp___2 & 0xfffffffffffffff8ULL) - 8ULL)); case -2: ; return (0UL); } } } __inline static struct net *genl_info_net(struct genl_info *info ) { struct net *tmp ; { tmp = read_pnet((struct net * const *)(& info->_net)); return (tmp); } } extern int genl_register_family_with_ops(struct genl_family * , struct genl_ops * , size_t ) ; extern int genl_unregister_family(struct genl_family * ) ; extern int genl_register_mc_group(struct genl_family * , struct genl_multicast_group * ) ; extern void *genlmsg_put(struct sk_buff * , u32 , u32 , struct genl_family * , int , u8 ) ; __inline static void *genlmsg_put_reply(struct sk_buff *skb , struct genl_info *info , struct genl_family *family , int flags , u8 cmd ) { void *tmp ; { tmp = genlmsg_put(skb, info->snd_portid, info->snd_seq, family, flags, (int )cmd); return (tmp); } } __inline static int genlmsg_end(struct sk_buff *skb , void *hdr ) { int tmp ; { tmp = nlmsg_end(skb, (struct nlmsghdr *)hdr + 0xffffffffffffffecUL); return (tmp); } } __inline static void genlmsg_cancel(struct sk_buff *skb , void *hdr ) { { if ((unsigned long )hdr != (unsigned long )((void *)0)) { nlmsg_cancel(skb, (struct nlmsghdr *)hdr + 0xffffffffffffffecUL); } else { } return; } } __inline static int genlmsg_multicast_netns(struct net *net , struct sk_buff *skb , u32 portid , unsigned int group , gfp_t flags ) { int tmp ; { tmp = nlmsg_multicast(net->genl_sock, skb, portid, group, flags); return (tmp); } } __inline static int genlmsg_multicast(struct sk_buff *skb , u32 portid , unsigned int group , gfp_t flags ) { int tmp ; { tmp = genlmsg_multicast_netns(& init_net, skb, portid, group, flags); return (tmp); } } __inline static int genlmsg_unicast(struct net *net , struct sk_buff *skb , u32 portid ) { int tmp ; { tmp = nlmsg_unicast(net->genl_sock, skb, portid); return (tmp); } } __inline static int genlmsg_reply(struct sk_buff *skb , struct genl_info *info ) { struct net *tmp ; int tmp___0 ; { tmp = genl_info_net(info); tmp___0 = genlmsg_unicast(tmp, skb, info->snd_portid); return (tmp___0); } } __inline static void *genlmsg_data(struct genlmsghdr const *gnlh ) { { return ((void *)gnlh + 4U); } } __inline static int genlmsg_msg_size(int payload ) { { return ((int )((unsigned int )payload + 4U)); } } __inline static int genlmsg_total_size(int payload ) { int tmp ; { tmp = genlmsg_msg_size(payload); return ((int )((unsigned int )tmp + 3U) & -4); } } __inline static struct sk_buff *genlmsg_new(size_t payload , gfp_t flags ) { int tmp ; struct sk_buff *tmp___0 ; { tmp = genlmsg_total_size((int )payload); tmp___0 = nlmsg_new((size_t )tmp, flags); return (tmp___0); } } int drbd_adm_add_minor(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_delete_minor(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_new_resource(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_del_resource(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_down(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_set_role(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_attach(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_disk_opts(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_detach(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_connect(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_net_opts(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_resize(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_start_ov(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_new_c_uuid(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_disconnect(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_invalidate(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_invalidate_peer(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_pause_sync(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_resume_sync(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_suspend_io(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_resume_io(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_outdate(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_resource_opts(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_get_status(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_get_timeout_type(struct sk_buff *skb , struct genl_info *info ) ; int drbd_adm_get_status_all(struct sk_buff *skb , struct netlink_callback *cb ) ; int drbd_nla_parse_nested(struct nlattr **tb , int maxtype , struct nlattr *nla , struct nla_policy const *policy ) ; struct nlattr *drbd_nla_find_nested(int maxtype , struct nlattr *nla , int attrtype ) ; static struct nla_policy drbd_tla_nl_policy[14U] = { {(unsigned short)0, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}, {8U, (unsigned short)0}}; static struct nla_policy drbd_cfg_context_nl_policy[5U] = { {(unsigned short)0, (unsigned short)0}, {3U, (unsigned short)0}, {10U, 127U}, {11U, 128U}, {11U, 128U}}; static struct nla_policy disk_conf_nl_policy[24U] = { {(unsigned short)0, (unsigned short)0}, {10U, 127U}, {10U, 127U}, {3U, (unsigned short)0}, {4U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {(unsigned short)0, (unsigned short)0}, {1U, (unsigned short)0}}; static struct nla_policy res_opts_nl_policy[3U] = { {(unsigned short)0, (unsigned short)0}, {10U, 31U}, {3U, (unsigned short)0}}; static struct nla_policy net_conf_nl_policy[30U] = { {(unsigned short)0, (unsigned short)0}, {10U, 63U}, {10U, 63U}, {10U, 63U}, {10U, 63U}, {10U, 63U}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {3U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}}; static struct nla_policy set_role_parms_nl_policy[2U] = { {(unsigned short)0, (unsigned short)0}, {1U, (unsigned short)0}}; static struct nla_policy resize_parms_nl_policy[4U] = { {(unsigned short)0, (unsigned short)0}, {4U, (unsigned short)0}, {1U, (unsigned short)0}, {1U, (unsigned short)0}}; static struct nla_policy start_ov_parms_nl_policy[3U] = { {(unsigned short)0, (unsigned short)0}, {4U, (unsigned short)0}, {4U, (unsigned short)0}}; static struct nla_policy new_c_uuid_parms_nl_policy[2U] = { {(unsigned short)0, (unsigned short)0}, {1U, (unsigned short)0}}; static struct nla_policy disconnect_parms_nl_policy[2U] = { {(unsigned short)0, (unsigned short)0}, {1U, (unsigned short)0}}; static struct nla_policy detach_parms_nl_policy[2U] = { {(unsigned short)0, (unsigned short)0}, {1U, (unsigned short)0}}; static struct nlattr *nested_attr_tb[128U] ; static int __drbd_cfg_context_from_attrs(struct drbd_cfg_context *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; size_t tmp ; int tmp___0 ; int tmp___1 ; { maxtype = 4; tla = *(info->attrs + 2UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& drbd_cfg_context_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct drbd_cfg_context *)0)) { s->ctx_volume = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 2UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct drbd_cfg_context *)0)) { tmp = nla_strlcpy((char *)(& s->ctx_resource_name), (struct nlattr const *)nla, 128UL); s->ctx_resource_name_len = (__u32 )tmp; } else { } } else { } nla = *(ntb + 3UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct drbd_cfg_context *)0)) { tmp___0 = nla_memcpy((void *)(& s->ctx_my_addr), (struct nlattr const *)nla, 128); s->ctx_my_addr_len = (__u32 )tmp___0; } else { } } else { } nla = *(ntb + 4UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct drbd_cfg_context *)0)) { tmp___1 = nla_memcpy((void *)(& s->ctx_peer_addr), (struct nlattr const *)nla, 128); s->ctx_peer_addr_len = (__u32 )tmp___1; } else { } } else { } return (0); } } static int drbd_cfg_context_from_attrs(struct drbd_cfg_context *s , struct genl_info *info ) { int tmp ; { tmp = __drbd_cfg_context_from_attrs(s, info, 0); return (tmp); } } static int __disk_conf_from_attrs(struct disk_conf *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; size_t tmp ; size_t tmp___0 ; u32 tmp___1 ; u32 tmp___2 ; u8 tmp___3 ; u8 tmp___4 ; u8 tmp___5 ; u8 tmp___6 ; u8 tmp___7 ; { maxtype = 23; tla = *(info->attrs + 3UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& disk_conf_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((int )exclude_invariants) { printk("\016<< must not change invariant attr: %s\n", (char *)"backing_dev"); return (-17); } else { } if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp = nla_strlcpy((char *)(& s->backing_dev), (struct nlattr const *)nla, 128UL); s->backing_dev_len = (__u32 )tmp; } else { } } else if ((int )exclude_invariants) { } else { printk("\016<< missing attr: %s\n", (char *)"backing_dev"); return (-42); } nla = *(ntb + 2UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((int )exclude_invariants) { printk("\016<< must not change invariant attr: %s\n", (char *)"meta_dev"); return (-17); } else { } if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp___0 = nla_strlcpy((char *)(& s->meta_dev), (struct nlattr const *)nla, 128UL); s->meta_dev_len = (__u32 )tmp___0; } else { } } else if ((int )exclude_invariants) { } else { printk("\016<< missing attr: %s\n", (char *)"meta_dev"); return (-42); } nla = *(ntb + 3UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((int )exclude_invariants) { printk("\016<< must not change invariant attr: %s\n", (char *)"meta_dev_idx"); return (-17); } else { } if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp___1 = nla_get_u32((struct nlattr const *)nla); s->meta_dev_idx = (__s32 )tmp___1; } else { } } else if ((int )exclude_invariants) { } else { printk("\016<< missing attr: %s\n", (char *)"meta_dev_idx"); return (-42); } nla = *(ntb + 4UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((int )exclude_invariants) { printk("\016<< must not change invariant attr: %s\n", (char *)"disk_size"); return (-17); } else { } if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->disk_size = nla_get_u64((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 5UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((int )exclude_invariants) { printk("\016<< must not change invariant attr: %s\n", (char *)"max_bio_bvecs"); return (-17); } else { } if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->max_bio_bvecs = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 6UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->on_io_error = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 7UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->fencing = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 8UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->resync_rate = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 9UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp___2 = nla_get_u32((struct nlattr const *)nla); s->resync_after = (__s32 )tmp___2; } else { } } else { } nla = *(ntb + 10UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->al_extents = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 11UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->c_plan_ahead = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 12UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->c_delay_target = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 13UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->c_fill_target = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 14UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->c_max_rate = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 15UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->c_min_rate = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 16UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp___3 = nla_get_u8((struct nlattr const *)nla); s->disk_barrier = (char )tmp___3; } else { } } else { } nla = *(ntb + 17UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp___4 = nla_get_u8((struct nlattr const *)nla); s->disk_flushes = (char )tmp___4; } else { } } else { } nla = *(ntb + 18UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp___5 = nla_get_u8((struct nlattr const *)nla); s->disk_drain = (char )tmp___5; } else { } } else { } nla = *(ntb + 19UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp___6 = nla_get_u8((struct nlattr const *)nla); s->md_flushes = (char )tmp___6; } else { } } else { } nla = *(ntb + 20UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->disk_timeout = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 21UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { s->read_balancing = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 23UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disk_conf *)0)) { tmp___7 = nla_get_u8((struct nlattr const *)nla); s->al_updates = (char )tmp___7; } else { } } else { } return (0); } } static int disk_conf_from_attrs(struct disk_conf *s , struct genl_info *info ) { int tmp ; { tmp = __disk_conf_from_attrs(s, info, 0); return (tmp); } } static int disk_conf_from_attrs_for_change(struct disk_conf *s , struct genl_info *info ) { int tmp ; { tmp = __disk_conf_from_attrs(s, info, 1); return (tmp); } } static int __res_opts_from_attrs(struct res_opts *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; size_t tmp ; { maxtype = 2; tla = *(info->attrs + 4UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& res_opts_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct res_opts *)0)) { tmp = nla_strlcpy((char *)(& s->cpu_mask), (struct nlattr const *)nla, 32UL); s->cpu_mask_len = (__u32 )tmp; } else { } } else { } nla = *(ntb + 2UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct res_opts *)0)) { s->on_no_data = nla_get_u32((struct nlattr const *)nla); } else { } } else { } return (0); } } static int res_opts_from_attrs(struct res_opts *s , struct genl_info *info ) { int tmp ; { tmp = __res_opts_from_attrs(s, info, 0); return (tmp); } } static int __net_conf_from_attrs(struct net_conf *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; size_t tmp ; size_t tmp___0 ; size_t tmp___1 ; size_t tmp___2 ; size_t tmp___3 ; u8 tmp___4 ; u8 tmp___5 ; u8 tmp___6 ; u8 tmp___7 ; u8 tmp___8 ; u8 tmp___9 ; { maxtype = 29; tla = *(info->attrs + 5UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& net_conf_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp = nla_strlcpy((char *)(& s->shared_secret), (struct nlattr const *)nla, 64UL); s->shared_secret_len = (__u32 )tmp; } else { } } else { } nla = *(ntb + 2UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___0 = nla_strlcpy((char *)(& s->cram_hmac_alg), (struct nlattr const *)nla, 64UL); s->cram_hmac_alg_len = (__u32 )tmp___0; } else { } } else { } nla = *(ntb + 3UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___1 = nla_strlcpy((char *)(& s->integrity_alg), (struct nlattr const *)nla, 64UL); s->integrity_alg_len = (__u32 )tmp___1; } else { } } else { } nla = *(ntb + 4UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___2 = nla_strlcpy((char *)(& s->verify_alg), (struct nlattr const *)nla, 64UL); s->verify_alg_len = (__u32 )tmp___2; } else { } } else { } nla = *(ntb + 5UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___3 = nla_strlcpy((char *)(& s->csums_alg), (struct nlattr const *)nla, 64UL); s->csums_alg_len = (__u32 )tmp___3; } else { } } else { } nla = *(ntb + 6UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->wire_protocol = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 7UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->connect_int = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 8UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->timeout = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 9UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->ping_int = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 10UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->ping_timeo = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 11UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->sndbuf_size = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 12UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->rcvbuf_size = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 13UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->ko_count = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 14UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->max_buffers = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 15UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->max_epoch_size = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 16UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->unplug_watermark = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 17UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->after_sb_0p = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 18UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->after_sb_1p = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 19UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->after_sb_2p = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 20UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->rr_conflict = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 21UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->on_congestion = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 22UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->cong_fill = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 23UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { s->cong_extents = nla_get_u32((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 24UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___4 = nla_get_u8((struct nlattr const *)nla); s->two_primaries = (char )tmp___4; } else { } } else { } nla = *(ntb + 25UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((int )exclude_invariants) { printk("\016<< must not change invariant attr: %s\n", (char *)"discard_my_data"); return (-17); } else { } if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___5 = nla_get_u8((struct nlattr const *)nla); s->discard_my_data = (char )tmp___5; } else { } } else { } nla = *(ntb + 26UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___6 = nla_get_u8((struct nlattr const *)nla); s->tcp_cork = (char )tmp___6; } else { } } else { } nla = *(ntb + 27UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___7 = nla_get_u8((struct nlattr const *)nla); s->always_asbp = (char )tmp___7; } else { } } else { } nla = *(ntb + 28UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((int )exclude_invariants) { printk("\016<< must not change invariant attr: %s\n", (char *)"tentative"); return (-17); } else { } if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___8 = nla_get_u8((struct nlattr const *)nla); s->tentative = (char )tmp___8; } else { } } else { } nla = *(ntb + 29UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct net_conf *)0)) { tmp___9 = nla_get_u8((struct nlattr const *)nla); s->use_rle = (char )tmp___9; } else { } } else { } return (0); } } static int net_conf_from_attrs(struct net_conf *s , struct genl_info *info ) { int tmp ; { tmp = __net_conf_from_attrs(s, info, 0); return (tmp); } } static int net_conf_from_attrs_for_change(struct net_conf *s , struct genl_info *info ) { int tmp ; { tmp = __net_conf_from_attrs(s, info, 1); return (tmp); } } static int __set_role_parms_from_attrs(struct set_role_parms *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; u8 tmp ; { maxtype = 1; tla = *(info->attrs + 6UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& set_role_parms_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct set_role_parms *)0)) { tmp = nla_get_u8((struct nlattr const *)nla); s->assume_uptodate = (char )tmp; } else { } } else { } return (0); } } static int set_role_parms_from_attrs(struct set_role_parms *s , struct genl_info *info ) { int tmp ; { tmp = __set_role_parms_from_attrs(s, info, 0); return (tmp); } } static int __resize_parms_from_attrs(struct resize_parms *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; u8 tmp ; u8 tmp___0 ; { maxtype = 3; tla = *(info->attrs + 7UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& resize_parms_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct resize_parms *)0)) { s->resize_size = nla_get_u64((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 2UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct resize_parms *)0)) { tmp = nla_get_u8((struct nlattr const *)nla); s->resize_force = (char )tmp; } else { } } else { } nla = *(ntb + 3UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct resize_parms *)0)) { tmp___0 = nla_get_u8((struct nlattr const *)nla); s->no_resync = (char )tmp___0; } else { } } else { } return (0); } } static int resize_parms_from_attrs(struct resize_parms *s , struct genl_info *info ) { int tmp ; { tmp = __resize_parms_from_attrs(s, info, 0); return (tmp); } } static int __start_ov_parms_from_attrs(struct start_ov_parms *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; { maxtype = 2; tla = *(info->attrs + 9UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& start_ov_parms_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct start_ov_parms *)0)) { s->ov_start_sector = nla_get_u64((struct nlattr const *)nla); } else { } } else { } nla = *(ntb + 2UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct start_ov_parms *)0)) { s->ov_stop_sector = nla_get_u64((struct nlattr const *)nla); } else { } } else { } return (0); } } static int start_ov_parms_from_attrs(struct start_ov_parms *s , struct genl_info *info ) { int tmp ; { tmp = __start_ov_parms_from_attrs(s, info, 0); return (tmp); } } static int __new_c_uuid_parms_from_attrs(struct new_c_uuid_parms *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; u8 tmp ; { maxtype = 1; tla = *(info->attrs + 10UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& new_c_uuid_parms_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct new_c_uuid_parms *)0)) { tmp = nla_get_u8((struct nlattr const *)nla); s->clear_bm = (char )tmp; } else { } } else { } return (0); } } static int new_c_uuid_parms_from_attrs(struct new_c_uuid_parms *s , struct genl_info *info ) { int tmp ; { tmp = __new_c_uuid_parms_from_attrs(s, info, 0); return (tmp); } } static int __disconnect_parms_from_attrs(struct disconnect_parms *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; u8 tmp ; { maxtype = 1; tla = *(info->attrs + 12UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& disconnect_parms_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct disconnect_parms *)0)) { tmp = nla_get_u8((struct nlattr const *)nla); s->force_disconnect = (char )tmp; } else { } } else { } return (0); } } static int disconnect_parms_from_attrs(struct disconnect_parms *s , struct genl_info *info ) { int tmp ; { tmp = __disconnect_parms_from_attrs(s, info, 0); return (tmp); } } static int __detach_parms_from_attrs(struct detach_parms *s , struct genl_info *info , bool exclude_invariants ) { int maxtype ; struct nlattr *tla ; struct nlattr **ntb ; struct nlattr *nla ; int err ; u8 tmp ; { maxtype = 1; tla = *(info->attrs + 13UL); ntb = (struct nlattr **)(& nested_attr_tb); if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { return (-42); } else { } err = drbd_nla_parse_nested(ntb, maxtype, tla, (struct nla_policy const *)(& detach_parms_nl_policy)); if (err != 0) { return (err); } else { } nla = *(ntb + 1UL); if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { if ((unsigned long )s != (unsigned long )((struct detach_parms *)0)) { tmp = nla_get_u8((struct nlattr const *)nla); s->force_detach = (char )tmp; } else { } } else { } return (0); } } static int detach_parms_from_attrs(struct detach_parms *s , struct genl_info *info ) { int tmp ; { tmp = __detach_parms_from_attrs(s, info, 0); return (tmp); } } char const *drbd_genl_cmd_to_str(__u8 cmd ) { { switch ((int )cmd) { case 2: ; return ("DRBD_ADM_GET_STATUS"); case 5: ; return ("DRBD_ADM_NEW_MINOR"); case 6: ; return ("DRBD_ADM_DEL_MINOR"); case 7: ; return ("DRBD_ADM_NEW_RESOURCE"); case 8: ; return ("DRBD_ADM_DEL_RESOURCE"); case 9: ; return ("DRBD_ADM_RESOURCE_OPTS"); case 10: ; return ("DRBD_ADM_CONNECT"); case 29: ; return ("DRBD_ADM_CHG_NET_OPTS"); case 11: ; return ("DRBD_ADM_DISCONNECT"); case 12: ; return ("DRBD_ADM_ATTACH"); case 28: ; return ("DRBD_ADM_CHG_DISK_OPTS"); case 13: ; return ("DRBD_ADM_RESIZE"); case 14: ; return ("DRBD_ADM_PRIMARY"); case 15: ; return ("DRBD_ADM_SECONDARY"); case 16: ; return ("DRBD_ADM_NEW_C_UUID"); case 17: ; return ("DRBD_ADM_START_OV"); case 18: ; return ("DRBD_ADM_DETACH"); case 19: ; return ("DRBD_ADM_INVALIDATE"); case 20: ; return ("DRBD_ADM_INVAL_PEER"); case 21: ; return ("DRBD_ADM_PAUSE_SYNC"); case 22: ; return ("DRBD_ADM_RESUME_SYNC"); case 23: ; return ("DRBD_ADM_SUSPEND_IO"); case 24: ; return ("DRBD_ADM_RESUME_IO"); case 25: ; return ("DRBD_ADM_OUTDATE"); case 26: ; return ("DRBD_ADM_GET_TIMEOUT_TYPE"); case 27: ; return ("DRBD_ADM_DOWN"); default: ; return ("unknown"); } } } static struct genl_ops drbd_genl_ops[26U] = { {2U, (unsigned char)0, 0U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_get_status, & drbd_adm_get_status_all, 0, {0, 0}}, {5U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_add_minor, 0, 0, {0, 0}}, {6U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_delete_minor, 0, 0, {0, 0}}, {7U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_new_resource, 0, 0, {0, 0}}, {8U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_del_resource, 0, 0, {0, 0}}, {9U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_resource_opts, 0, 0, {0, 0}}, {10U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_connect, 0, 0, {0, 0}}, {29U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_net_opts, 0, 0, {0, 0}}, {11U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_disconnect, 0, 0, {0, 0}}, {12U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_attach, 0, 0, {0, 0}}, {28U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_disk_opts, 0, 0, {0, 0}}, {13U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_resize, 0, 0, {0, 0}}, {14U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_set_role, 0, 0, {0, 0}}, {15U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_set_role, 0, 0, {0, 0}}, {16U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_new_c_uuid, 0, 0, {0, 0}}, {17U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_start_ov, 0, 0, {0, 0}}, {18U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_detach, 0, 0, {0, 0}}, {19U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_invalidate, 0, 0, {0, 0}}, {20U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_invalidate_peer, 0, 0, {0, 0}}, {21U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_pause_sync, 0, 0, {0, 0}}, {22U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_resume_sync, 0, 0, {0, 0}}, {23U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_suspend_io, 0, 0, {0, 0}}, {24U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_resume_io, 0, 0, {0, 0}}, {25U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_outdate, 0, 0, {0, 0}}, {26U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_get_timeout_type, 0, 0, {0, 0}}, {27U, (unsigned char)0, 1U, (struct nla_policy const *)(& drbd_tla_nl_policy), & drbd_adm_down, 0, 0, {0, 0}}}; static struct genl_family drbd_genl_family = {0U, 8U, {'d', 'r', 'b', 'd', '\000'}, 1U, 13U, (_Bool)0, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}}; static struct genl_multicast_group drbd_mcg_events = {0, {0, 0}, {'e', 'v', 'e', 'n', 't', 's', '\000'}, 0U}; static int drbd_genl_multicast_events(struct sk_buff *skb , gfp_t flags ) { unsigned int group_id ; int tmp ; { group_id = drbd_mcg_events.id; if (group_id == 0U) { return (-22); } else { } tmp = genlmsg_multicast(skb, 0U, group_id, flags); return (tmp); } } int drbd_genl_register(void) { int err ; int tmp ; { tmp = genl_register_family_with_ops(& drbd_genl_family, (struct genl_ops *)(& drbd_genl_ops), 26UL); err = tmp; if (err != 0) { return (err); } else { } err = genl_register_mc_group(& drbd_genl_family, & drbd_mcg_events); if (err != 0) { goto fail; } else { printk("\016%s: mcg %s: %u\n", (char *)"events", (char *)"drbd", drbd_mcg_events.id); } return (0); fail: genl_unregister_family(& drbd_genl_family); return (err); } } void drbd_genl_unregister(void) { { genl_unregister_family(& drbd_genl_family); return; } } static int disk_conf_to_skb(struct sk_buff *skb , struct disk_conf *s , bool const exclude_sensitive ) { struct nlattr *tla ; struct nlattr *tmp ; int __min1 ; int __min2 ; int tmp___0 ; int __min1___0 ; int __min2___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; { tmp = nla_nest_start(skb, 3); tla = tmp; if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { goto nla_put_failure; } else { } __min1 = 128; __min2 = (int )(s->backing_dev_len + 1U); tmp___0 = nla_put(skb, 1, __min1 < __min2 ? __min1 : __min2, (void const *)(& s->backing_dev)); if (tmp___0 != 0) { goto nla_put_failure; } else { } __min1___0 = 128; __min2___0 = (int )(s->meta_dev_len + 1U); tmp___1 = nla_put(skb, 2, __min1___0 < __min2___0 ? __min1___0 : __min2___0, (void const *)(& s->meta_dev)); if (tmp___1 != 0) { goto nla_put_failure; } else { } tmp___2 = nla_put_u32(skb, 3, (u32 )s->meta_dev_idx); if (tmp___2 != 0) { goto nla_put_failure; } else { } tmp___3 = nla_put_u64(skb, 4, s->disk_size); if (tmp___3 != 0) { goto nla_put_failure; } else { } tmp___4 = nla_put_u32(skb, 5, s->max_bio_bvecs); if (tmp___4 != 0) { goto nla_put_failure; } else { } tmp___5 = nla_put_u32(skb, 6, s->on_io_error); if (tmp___5 != 0) { goto nla_put_failure; } else { } tmp___6 = nla_put_u32(skb, 7, s->fencing); if (tmp___6 != 0) { goto nla_put_failure; } else { } tmp___7 = nla_put_u32(skb, 8, s->resync_rate); if (tmp___7 != 0) { goto nla_put_failure; } else { } tmp___8 = nla_put_u32(skb, 9, (u32 )s->resync_after); if (tmp___8 != 0) { goto nla_put_failure; } else { } tmp___9 = nla_put_u32(skb, 10, s->al_extents); if (tmp___9 != 0) { goto nla_put_failure; } else { } tmp___10 = nla_put_u32(skb, 11, s->c_plan_ahead); if (tmp___10 != 0) { goto nla_put_failure; } else { } tmp___11 = nla_put_u32(skb, 12, s->c_delay_target); if (tmp___11 != 0) { goto nla_put_failure; } else { } tmp___12 = nla_put_u32(skb, 13, s->c_fill_target); if (tmp___12 != 0) { goto nla_put_failure; } else { } tmp___13 = nla_put_u32(skb, 14, s->c_max_rate); if (tmp___13 != 0) { goto nla_put_failure; } else { } tmp___14 = nla_put_u32(skb, 15, s->c_min_rate); if (tmp___14 != 0) { goto nla_put_failure; } else { } tmp___15 = nla_put_u8(skb, 16, (int )((u8 )s->disk_barrier)); if (tmp___15 != 0) { goto nla_put_failure; } else { } tmp___16 = nla_put_u8(skb, 17, (int )((u8 )s->disk_flushes)); if (tmp___16 != 0) { goto nla_put_failure; } else { } tmp___17 = nla_put_u8(skb, 18, (int )((u8 )s->disk_drain)); if (tmp___17 != 0) { goto nla_put_failure; } else { } tmp___18 = nla_put_u8(skb, 19, (int )((u8 )s->md_flushes)); if (tmp___18 != 0) { goto nla_put_failure; } else { } tmp___19 = nla_put_u32(skb, 20, s->disk_timeout); if (tmp___19 != 0) { goto nla_put_failure; } else { } tmp___20 = nla_put_u32(skb, 21, s->read_balancing); if (tmp___20 != 0) { goto nla_put_failure; } else { } tmp___21 = nla_put_u8(skb, 23, (int )((u8 )s->al_updates)); if (tmp___21 != 0) { goto nla_put_failure; } else { } nla_nest_end(skb, tla); return (0); nla_put_failure: ; if ((unsigned long )tla != (unsigned long )((struct nlattr *)0)) { nla_nest_cancel(skb, tla); } else { } return (-90); } } static int res_opts_to_skb(struct sk_buff *skb , struct res_opts *s , bool const exclude_sensitive ) { struct nlattr *tla ; struct nlattr *tmp ; int __min1 ; int __min2 ; int tmp___0 ; int tmp___1 ; { tmp = nla_nest_start(skb, 4); tla = tmp; if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { goto nla_put_failure; } else { } __min1 = 32; __min2 = (int )(s->cpu_mask_len + 1U); tmp___0 = nla_put(skb, 1, __min1 < __min2 ? __min1 : __min2, (void const *)(& s->cpu_mask)); if (tmp___0 != 0) { goto nla_put_failure; } else { } tmp___1 = nla_put_u32(skb, 2, s->on_no_data); if (tmp___1 != 0) { goto nla_put_failure; } else { } nla_nest_end(skb, tla); return (0); nla_put_failure: ; if ((unsigned long )tla != (unsigned long )((struct nlattr *)0)) { nla_nest_cancel(skb, tla); } else { } return (-90); } } static int net_conf_to_skb(struct sk_buff *skb , struct net_conf *s , bool const exclude_sensitive ) { struct nlattr *tla ; struct nlattr *tmp ; int __min1 ; int __min2 ; int tmp___0 ; int __min1___0 ; int __min2___0 ; int tmp___1 ; int __min1___1 ; int __min2___1 ; int tmp___2 ; int __min1___2 ; int __min2___2 ; int tmp___3 ; int __min1___3 ; int __min2___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; int tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int tmp___26 ; int tmp___27 ; int tmp___28 ; { tmp = nla_nest_start(skb, 5); tla = tmp; if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { goto nla_put_failure; } else { } if (! ((_Bool )exclude_sensitive)) { __min1 = 64; __min2 = (int )(s->shared_secret_len + 1U); tmp___0 = nla_put(skb, 1, __min1 < __min2 ? __min1 : __min2, (void const *)(& s->shared_secret)); if (tmp___0 != 0) { goto nla_put_failure; } else { } } else { } __min1___0 = 64; __min2___0 = (int )(s->cram_hmac_alg_len + 1U); tmp___1 = nla_put(skb, 2, __min1___0 < __min2___0 ? __min1___0 : __min2___0, (void const *)(& s->cram_hmac_alg)); if (tmp___1 != 0) { goto nla_put_failure; } else { } __min1___1 = 64; __min2___1 = (int )(s->integrity_alg_len + 1U); tmp___2 = nla_put(skb, 3, __min1___1 < __min2___1 ? __min1___1 : __min2___1, (void const *)(& s->integrity_alg)); if (tmp___2 != 0) { goto nla_put_failure; } else { } __min1___2 = 64; __min2___2 = (int )(s->verify_alg_len + 1U); tmp___3 = nla_put(skb, 4, __min1___2 < __min2___2 ? __min1___2 : __min2___2, (void const *)(& s->verify_alg)); if (tmp___3 != 0) { goto nla_put_failure; } else { } __min1___3 = 64; __min2___3 = (int )(s->csums_alg_len + 1U); tmp___4 = nla_put(skb, 5, __min1___3 < __min2___3 ? __min1___3 : __min2___3, (void const *)(& s->csums_alg)); if (tmp___4 != 0) { goto nla_put_failure; } else { } tmp___5 = nla_put_u32(skb, 6, s->wire_protocol); if (tmp___5 != 0) { goto nla_put_failure; } else { } tmp___6 = nla_put_u32(skb, 7, s->connect_int); if (tmp___6 != 0) { goto nla_put_failure; } else { } tmp___7 = nla_put_u32(skb, 8, s->timeout); if (tmp___7 != 0) { goto nla_put_failure; } else { } tmp___8 = nla_put_u32(skb, 9, s->ping_int); if (tmp___8 != 0) { goto nla_put_failure; } else { } tmp___9 = nla_put_u32(skb, 10, s->ping_timeo); if (tmp___9 != 0) { goto nla_put_failure; } else { } tmp___10 = nla_put_u32(skb, 11, s->sndbuf_size); if (tmp___10 != 0) { goto nla_put_failure; } else { } tmp___11 = nla_put_u32(skb, 12, s->rcvbuf_size); if (tmp___11 != 0) { goto nla_put_failure; } else { } tmp___12 = nla_put_u32(skb, 13, s->ko_count); if (tmp___12 != 0) { goto nla_put_failure; } else { } tmp___13 = nla_put_u32(skb, 14, s->max_buffers); if (tmp___13 != 0) { goto nla_put_failure; } else { } tmp___14 = nla_put_u32(skb, 15, s->max_epoch_size); if (tmp___14 != 0) { goto nla_put_failure; } else { } tmp___15 = nla_put_u32(skb, 16, s->unplug_watermark); if (tmp___15 != 0) { goto nla_put_failure; } else { } tmp___16 = nla_put_u32(skb, 17, s->after_sb_0p); if (tmp___16 != 0) { goto nla_put_failure; } else { } tmp___17 = nla_put_u32(skb, 18, s->after_sb_1p); if (tmp___17 != 0) { goto nla_put_failure; } else { } tmp___18 = nla_put_u32(skb, 19, s->after_sb_2p); if (tmp___18 != 0) { goto nla_put_failure; } else { } tmp___19 = nla_put_u32(skb, 20, s->rr_conflict); if (tmp___19 != 0) { goto nla_put_failure; } else { } tmp___20 = nla_put_u32(skb, 21, s->on_congestion); if (tmp___20 != 0) { goto nla_put_failure; } else { } tmp___21 = nla_put_u32(skb, 22, s->cong_fill); if (tmp___21 != 0) { goto nla_put_failure; } else { } tmp___22 = nla_put_u32(skb, 23, s->cong_extents); if (tmp___22 != 0) { goto nla_put_failure; } else { } tmp___23 = nla_put_u8(skb, 24, (int )((u8 )s->two_primaries)); if (tmp___23 != 0) { goto nla_put_failure; } else { } tmp___24 = nla_put_u8(skb, 25, (int )((u8 )s->discard_my_data)); if (tmp___24 != 0) { goto nla_put_failure; } else { } tmp___25 = nla_put_u8(skb, 26, (int )((u8 )s->tcp_cork)); if (tmp___25 != 0) { goto nla_put_failure; } else { } tmp___26 = nla_put_u8(skb, 27, (int )((u8 )s->always_asbp)); if (tmp___26 != 0) { goto nla_put_failure; } else { } tmp___27 = nla_put_u8(skb, 28, (int )((u8 )s->tentative)); if (tmp___27 != 0) { goto nla_put_failure; } else { } tmp___28 = nla_put_u8(skb, 29, (int )((u8 )s->use_rle)); if (tmp___28 != 0) { goto nla_put_failure; } else { } nla_nest_end(skb, tla); return (0); nla_put_failure: ; if ((unsigned long )tla != (unsigned long )((struct nlattr *)0)) { nla_nest_cancel(skb, tla); } else { } return (-90); } } static int timeout_parms_to_skb(struct sk_buff *skb , struct timeout_parms *s , bool const exclude_sensitive ) { struct nlattr *tla ; struct nlattr *tmp ; int tmp___0 ; { tmp = nla_nest_start(skb, 11); tla = tmp; if ((unsigned long )tla == (unsigned long )((struct nlattr *)0)) { goto nla_put_failure; } else { } tmp___0 = nla_put_u32(skb, 1, s->timeout_type); if (tmp___0 != 0) { goto nla_put_failure; } else { } nla_nest_end(skb, tla); return (0); nla_put_failure: ; if ((unsigned long )tla != (unsigned long )((struct nlattr *)0)) { nla_nest_cancel(skb, tla); } else { } return (-90); } } __inline static int timeout_parms_to_priv_skb(struct sk_buff *skb , struct timeout_parms *s ) { int tmp ; { tmp = timeout_parms_to_skb(skb, s, 0); return (tmp); } } static void set_disk_conf_defaults(struct disk_conf *x ) ; static void set_disk_conf_defaults(struct disk_conf *x ) { { x->on_io_error = 2U; x->fencing = 0U; x->resync_rate = 250U; x->resync_after = -1; x->al_extents = 1237U; x->c_plan_ahead = 20U; x->c_delay_target = 10U; x->c_fill_target = 100U; x->c_max_rate = 102400U; x->c_min_rate = 250U; x->disk_barrier = 0; x->disk_flushes = 1; x->disk_drain = 1; x->md_flushes = 1; x->disk_timeout = 0U; x->read_balancing = 0U; x->al_updates = 1; return; } } static void set_res_opts_defaults(struct res_opts *x ) ; static void set_res_opts_defaults(struct res_opts *x ) { { memset((void *)(& x->cpu_mask), 0, 32UL); x->cpu_mask_len = 0U; x->on_no_data = 0U; return; } } static void set_net_conf_defaults(struct net_conf *x ) ; static void set_net_conf_defaults(struct net_conf *x ) { { memset((void *)(& x->shared_secret), 0, 64UL); x->shared_secret_len = 0U; memset((void *)(& x->cram_hmac_alg), 0, 64UL); x->cram_hmac_alg_len = 0U; memset((void *)(& x->integrity_alg), 0, 64UL); x->integrity_alg_len = 0U; memset((void *)(& x->verify_alg), 0, 64UL); x->verify_alg_len = 0U; memset((void *)(& x->csums_alg), 0, 64UL); x->csums_alg_len = 0U; x->wire_protocol = 3U; x->connect_int = 10U; x->timeout = 60U; x->ping_int = 10U; x->ping_timeo = 5U; x->sndbuf_size = 0U; x->rcvbuf_size = 0U; x->ko_count = 7U; x->max_buffers = 2048U; x->max_epoch_size = 2048U; x->unplug_watermark = 128U; x->after_sb_0p = 0U; x->after_sb_1p = 0U; x->after_sb_2p = 0U; x->rr_conflict = 0U; x->on_congestion = 0U; x->cong_fill = 0U; x->cong_extents = 1237U; x->two_primaries = 0; x->tcp_cork = 1; x->always_asbp = 0; x->use_rle = 1; return; } } static char *drbd_m_holder = (char *)"Hands off! this is DRBD\'s meta data device."; static struct drbd_config_context adm_ctx ; static void drbd_adm_send_reply(struct sk_buff *skb , struct genl_info *info ) { struct nlmsghdr *tmp ; void *tmp___0 ; void *tmp___1 ; int tmp___2 ; { tmp = nlmsg_hdr((struct sk_buff const *)skb); tmp___0 = nlmsg_data((struct nlmsghdr const *)tmp); tmp___1 = genlmsg_data((struct genlmsghdr const *)tmp___0); genlmsg_end(skb, tmp___1); tmp___2 = genlmsg_reply(skb, info); if (tmp___2 != 0) { printk("\vdrbd: error sending genl reply\n"); } else { } return; } } int drbd_msg_put_info(char const *info ) { struct sk_buff *skb ; struct nlattr *nla ; int err ; { skb = adm_ctx.reply_skb; err = -90; if ((unsigned long )info == (unsigned long )((char const *)0) || (int )((signed char )*info) == 0) { return (0); } else { } nla = nla_nest_start(skb, 1); if ((unsigned long )nla == (unsigned long )((struct nlattr *)0)) { return (err); } else { } err = nla_put_string(skb, 16385, info); if (err != 0) { nla_nest_cancel(skb, nla); return (err); } else { nla_nest_end(skb, nla); } return (0); } } static int drbd_adm_prepare(struct sk_buff *skb , struct genl_info *info , unsigned int flags ) { struct drbd_genlmsghdr *d_in ; u8 cmd ; int err ; bool tmp ; int tmp___0 ; void *tmp___1 ; struct nlattr *nla ; void *tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; void *tmp___6 ; int tmp___7 ; void *tmp___8 ; { d_in = (struct drbd_genlmsghdr *)info->userhdr; cmd = (info->genlhdr)->cmd; memset((void *)(& adm_ctx), 0, 64UL); if ((unsigned int )cmd != 2U) { tmp = capable(12); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { return (-1); } else { } } else { } adm_ctx.reply_skb = genlmsg_new(3776UL, 208U); if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { err = -12; goto fail; } else { } tmp___1 = genlmsg_put_reply(adm_ctx.reply_skb, info, & drbd_genl_family, 0, (int )cmd); adm_ctx.reply_dh = (struct drbd_genlmsghdr *)tmp___1; if ((unsigned long )adm_ctx.reply_dh == (unsigned long )((struct drbd_genlmsghdr *)0)) { err = -12; goto fail; } else { } (adm_ctx.reply_dh)->minor = d_in->minor; (adm_ctx.reply_dh)->ldv_49826.ret_code = 101; adm_ctx.volume = 4294967295U; if ((unsigned long )*(info->attrs + 2UL) != (unsigned long )((struct nlattr *)0)) { err = drbd_cfg_context_from_attrs(0, info); if (err != 0) { goto fail; } else { } err = nla_put_nohdr(adm_ctx.reply_skb, (int )(*(info->attrs + 2UL))->nla_len, (void const *)*(info->attrs + 2UL)); if (err != 0) { goto fail; } else { } nla = nested_attr_tb[1]; if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { adm_ctx.volume = nla_get_u32((struct nlattr const *)nla); } else { } nla = nested_attr_tb[2]; if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { tmp___2 = nla_data((struct nlattr const *)nla); adm_ctx.resource_name = (char *)tmp___2; } else { } adm_ctx.my_addr = nested_attr_tb[3]; adm_ctx.peer_addr = nested_attr_tb[4]; if ((unsigned long )adm_ctx.my_addr != (unsigned long )((struct nlattr *)0)) { tmp___3 = nla_len((struct nlattr const *)adm_ctx.my_addr); if ((unsigned int )tmp___3 > 128U) { err = -22; goto fail; } else { goto _L; } } else _L: /* CIL Label */ if ((unsigned long )adm_ctx.peer_addr != (unsigned long )((struct nlattr *)0)) { tmp___4 = nla_len((struct nlattr const *)adm_ctx.peer_addr); if ((unsigned int )tmp___4 > 128U) { err = -22; goto fail; } else { } } else { } } else { } adm_ctx.minor = d_in->minor; adm_ctx.mdev = minor_to_mdev(d_in->minor); adm_ctx.tconn = conn_get_by_name((char const *)adm_ctx.resource_name); if ((unsigned long )adm_ctx.mdev == (unsigned long )((struct drbd_conf *)0) && (int )flags & 1) { drbd_msg_put_info("unknown minor"); return (127); } else { } if ((unsigned long )adm_ctx.tconn == (unsigned long )((struct drbd_tconn *)0) && (flags & 2U) != 0U) { drbd_msg_put_info("unknown resource"); return (162); } else { } if ((flags & 4U) != 0U) { if ((unsigned long )adm_ctx.tconn != (unsigned long )((struct drbd_tconn *)0) && (flags & 2U) == 0U) { drbd_msg_put_info("no resource name expected"); return (162); } else { } if ((unsigned long )adm_ctx.mdev != (unsigned long )((struct drbd_conf *)0)) { drbd_msg_put_info("no minor number expected"); return (162); } else { } if ((unsigned long )adm_ctx.my_addr != (unsigned long )((struct nlattr *)0) && (unsigned long )adm_ctx.peer_addr != (unsigned long )((struct nlattr *)0)) { tmp___5 = nla_len((struct nlattr const *)adm_ctx.peer_addr); tmp___6 = nla_data((struct nlattr const *)adm_ctx.peer_addr); tmp___7 = nla_len((struct nlattr const *)adm_ctx.my_addr); tmp___8 = nla_data((struct nlattr const *)adm_ctx.my_addr); adm_ctx.tconn = conn_get_by_addrs(tmp___8, tmp___7, tmp___6, tmp___5); } else { } if ((unsigned long )adm_ctx.tconn == (unsigned long )((struct drbd_tconn *)0)) { drbd_msg_put_info("unknown connection"); return (162); } else { } } else { } if (((unsigned long )adm_ctx.mdev != (unsigned long )((struct drbd_conf *)0) && (unsigned long )adm_ctx.tconn != (unsigned long )((struct drbd_tconn *)0)) && (unsigned long )(adm_ctx.mdev)->tconn != (unsigned long )adm_ctx.tconn) { printk("\frequest: minor=%u, resource=%s; but that minor belongs to connection %s\n", adm_ctx.minor, adm_ctx.resource_name, ((adm_ctx.mdev)->tconn)->name); drbd_msg_put_info("minor exists in different resource"); return (162); } else { } if (((unsigned long )adm_ctx.mdev != (unsigned long )((struct drbd_conf *)0) && adm_ctx.volume != 4294967295U) && adm_ctx.volume != (unsigned int )(adm_ctx.mdev)->vnr) { printk("\frequest: minor=%u, volume=%u; but that minor is volume %u in %s\n", adm_ctx.minor, adm_ctx.volume, (adm_ctx.mdev)->vnr, ((adm_ctx.mdev)->tconn)->name); drbd_msg_put_info("minor exists as different volume"); return (162); } else { } return (101); fail: nlmsg_free(adm_ctx.reply_skb); adm_ctx.reply_skb = 0; return (err); } } static int drbd_adm_finish(struct genl_info *info , int retcode ) { { if ((unsigned long )adm_ctx.tconn != (unsigned long )((struct drbd_tconn *)0)) { kref_put(& (adm_ctx.tconn)->kref, & conn_destroy); adm_ctx.tconn = 0; } else { } if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return (-12); } else { } (adm_ctx.reply_dh)->ldv_49826.ret_code = retcode; drbd_adm_send_reply(adm_ctx.reply_skb, info); return (0); } } static void setup_khelper_env(struct drbd_tconn *tconn , char **envp ) { char *afs ; { if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0) { return; } else { } switch ((int )((struct sockaddr *)(& tconn->peer_addr))->sa_family) { case 10: afs = (char *)"ipv6"; snprintf(*(envp + 4UL), 60UL, "DRBD_PEER_ADDRESS=%pI6", & ((struct sockaddr_in6 *)(& tconn->peer_addr))->sin6_addr); goto ldv_53159; case 2: afs = (char *)"ipv4"; snprintf(*(envp + 4UL), 60UL, "DRBD_PEER_ADDRESS=%pI4", & ((struct sockaddr_in *)(& tconn->peer_addr))->sin_addr); goto ldv_53159; default: afs = (char *)"ssocks"; snprintf(*(envp + 4UL), 60UL, "DRBD_PEER_ADDRESS=%pI4", & ((struct sockaddr_in *)(& tconn->peer_addr))->sin_addr); } ldv_53159: snprintf(*(envp + 3UL), 20UL, "DRBD_PEER_AF=%s", afs); return; } } int drbd_khelper(struct drbd_conf *mdev , char *cmd ) { char *envp[6U] ; char __constr_expr_0[20] ; char __constr_expr_1[60] ; char mb[12U] ; char *argv[4U] ; struct drbd_tconn *tconn ; struct sib_info sib ; int ret ; struct task_struct *tmp ; unsigned int tmp___0 ; struct task_struct *tmp___1 ; { __constr_expr_0[0] = (char)0; __constr_expr_0[1] = (char)0; __constr_expr_0[2] = (char)0; __constr_expr_0[3] = (char)0; __constr_expr_0[4] = (char)0; __constr_expr_0[5] = (char)0; __constr_expr_0[6] = (char)0; __constr_expr_0[7] = (char)0; __constr_expr_0[8] = (char)0; __constr_expr_0[9] = (char)0; __constr_expr_0[10] = (char)0; __constr_expr_0[11] = (char)0; __constr_expr_0[12] = (char)0; __constr_expr_0[13] = (char)0; __constr_expr_0[14] = (char)0; __constr_expr_0[15] = (char)0; __constr_expr_0[16] = (char)0; __constr_expr_0[17] = (char)0; __constr_expr_0[18] = (char)0; __constr_expr_0[19] = (char)0; __constr_expr_1[0] = (char)0; __constr_expr_1[1] = (char)0; __constr_expr_1[2] = (char)0; __constr_expr_1[3] = (char)0; __constr_expr_1[4] = (char)0; __constr_expr_1[5] = (char)0; __constr_expr_1[6] = (char)0; __constr_expr_1[7] = (char)0; __constr_expr_1[8] = (char)0; __constr_expr_1[9] = (char)0; __constr_expr_1[10] = (char)0; __constr_expr_1[11] = (char)0; __constr_expr_1[12] = (char)0; __constr_expr_1[13] = (char)0; __constr_expr_1[14] = (char)0; __constr_expr_1[15] = (char)0; __constr_expr_1[16] = (char)0; __constr_expr_1[17] = (char)0; __constr_expr_1[18] = (char)0; __constr_expr_1[19] = (char)0; __constr_expr_1[20] = (char)0; __constr_expr_1[21] = (char)0; __constr_expr_1[22] = (char)0; __constr_expr_1[23] = (char)0; __constr_expr_1[24] = (char)0; __constr_expr_1[25] = (char)0; __constr_expr_1[26] = (char)0; __constr_expr_1[27] = (char)0; __constr_expr_1[28] = (char)0; __constr_expr_1[29] = (char)0; __constr_expr_1[30] = (char)0; __constr_expr_1[31] = (char)0; __constr_expr_1[32] = (char)0; __constr_expr_1[33] = (char)0; __constr_expr_1[34] = (char)0; __constr_expr_1[35] = (char)0; __constr_expr_1[36] = (char)0; __constr_expr_1[37] = (char)0; __constr_expr_1[38] = (char)0; __constr_expr_1[39] = (char)0; __constr_expr_1[40] = (char)0; __constr_expr_1[41] = (char)0; __constr_expr_1[42] = (char)0; __constr_expr_1[43] = (char)0; __constr_expr_1[44] = (char)0; __constr_expr_1[45] = (char)0; __constr_expr_1[46] = (char)0; __constr_expr_1[47] = (char)0; __constr_expr_1[48] = (char)0; __constr_expr_1[49] = (char)0; __constr_expr_1[50] = (char)0; __constr_expr_1[51] = (char)0; __constr_expr_1[52] = (char)0; __constr_expr_1[53] = (char)0; __constr_expr_1[54] = (char)0; __constr_expr_1[55] = (char)0; __constr_expr_1[56] = (char)0; __constr_expr_1[57] = (char)0; __constr_expr_1[58] = (char)0; __constr_expr_1[59] = (char)0; envp[0] = (char *)"HOME=/"; envp[1] = (char *)"TERM=linux"; envp[2] = (char *)"PATH=/sbin:/usr/sbin:/bin:/usr/bin"; envp[3] = (char *)(& __constr_expr_0); envp[4] = (char *)(& __constr_expr_1); envp[5] = 0; argv[0] = (char *)(& usermode_helper); argv[1] = cmd; argv[2] = (char *)(& mb); argv[3] = 0; tconn = mdev->tconn; tmp = get_current(); if ((unsigned long )tmp == (unsigned long )tconn->worker.task) { set_bit(11U, (unsigned long volatile *)(& tconn->flags)); } else { } tmp___0 = mdev_to_minor(mdev); snprintf((char *)(& mb), 12UL, "minor-%d", tmp___0); setup_khelper_env(tconn, (char **)(& envp)); drbd_md_sync(mdev); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "helper command: %s %s %s\n", (char *)(& usermode_helper), cmd, (char *)(& mb)); sib.sib_reason = SIB_HELPER_PRE; sib.ldv_50742.ldv_50737.helper_name = cmd; drbd_bcast_event(mdev, (struct sib_info const *)(& sib)); ret = call_usermodehelper((char *)(& usermode_helper), (char **)(& argv), (char **)(& envp), 2); if (ret != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "helper command: %s %s %s exit code %u (0x%x)\n", (char *)(& usermode_helper), cmd, (char *)(& mb), (ret >> 8) & 255, ret); } else { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "helper command: %s %s %s exit code %u (0x%x)\n", (char *)(& usermode_helper), cmd, (char *)(& mb), (ret >> 8) & 255, ret); } sib.sib_reason = SIB_HELPER_POST; sib.ldv_50742.ldv_50737.helper_exit_code = (unsigned int )ret; drbd_bcast_event(mdev, (struct sib_info const *)(& sib)); tmp___1 = get_current(); if ((unsigned long )tmp___1 == (unsigned long )tconn->worker.task) { clear_bit(11, (unsigned long volatile *)(& tconn->flags)); } else { } if (ret < 0) { ret = 0; } else { } return (ret); } } int conn_khelper(struct drbd_tconn *tconn , char *cmd ) { char *envp[6U] ; char __constr_expr_0[20] ; char __constr_expr_1[60] ; char *argv[4U] ; int ret ; { __constr_expr_0[0] = (char)0; __constr_expr_0[1] = (char)0; __constr_expr_0[2] = (char)0; __constr_expr_0[3] = (char)0; __constr_expr_0[4] = (char)0; __constr_expr_0[5] = (char)0; __constr_expr_0[6] = (char)0; __constr_expr_0[7] = (char)0; __constr_expr_0[8] = (char)0; __constr_expr_0[9] = (char)0; __constr_expr_0[10] = (char)0; __constr_expr_0[11] = (char)0; __constr_expr_0[12] = (char)0; __constr_expr_0[13] = (char)0; __constr_expr_0[14] = (char)0; __constr_expr_0[15] = (char)0; __constr_expr_0[16] = (char)0; __constr_expr_0[17] = (char)0; __constr_expr_0[18] = (char)0; __constr_expr_0[19] = (char)0; __constr_expr_1[0] = (char)0; __constr_expr_1[1] = (char)0; __constr_expr_1[2] = (char)0; __constr_expr_1[3] = (char)0; __constr_expr_1[4] = (char)0; __constr_expr_1[5] = (char)0; __constr_expr_1[6] = (char)0; __constr_expr_1[7] = (char)0; __constr_expr_1[8] = (char)0; __constr_expr_1[9] = (char)0; __constr_expr_1[10] = (char)0; __constr_expr_1[11] = (char)0; __constr_expr_1[12] = (char)0; __constr_expr_1[13] = (char)0; __constr_expr_1[14] = (char)0; __constr_expr_1[15] = (char)0; __constr_expr_1[16] = (char)0; __constr_expr_1[17] = (char)0; __constr_expr_1[18] = (char)0; __constr_expr_1[19] = (char)0; __constr_expr_1[20] = (char)0; __constr_expr_1[21] = (char)0; __constr_expr_1[22] = (char)0; __constr_expr_1[23] = (char)0; __constr_expr_1[24] = (char)0; __constr_expr_1[25] = (char)0; __constr_expr_1[26] = (char)0; __constr_expr_1[27] = (char)0; __constr_expr_1[28] = (char)0; __constr_expr_1[29] = (char)0; __constr_expr_1[30] = (char)0; __constr_expr_1[31] = (char)0; __constr_expr_1[32] = (char)0; __constr_expr_1[33] = (char)0; __constr_expr_1[34] = (char)0; __constr_expr_1[35] = (char)0; __constr_expr_1[36] = (char)0; __constr_expr_1[37] = (char)0; __constr_expr_1[38] = (char)0; __constr_expr_1[39] = (char)0; __constr_expr_1[40] = (char)0; __constr_expr_1[41] = (char)0; __constr_expr_1[42] = (char)0; __constr_expr_1[43] = (char)0; __constr_expr_1[44] = (char)0; __constr_expr_1[45] = (char)0; __constr_expr_1[46] = (char)0; __constr_expr_1[47] = (char)0; __constr_expr_1[48] = (char)0; __constr_expr_1[49] = (char)0; __constr_expr_1[50] = (char)0; __constr_expr_1[51] = (char)0; __constr_expr_1[52] = (char)0; __constr_expr_1[53] = (char)0; __constr_expr_1[54] = (char)0; __constr_expr_1[55] = (char)0; __constr_expr_1[56] = (char)0; __constr_expr_1[57] = (char)0; __constr_expr_1[58] = (char)0; __constr_expr_1[59] = (char)0; envp[0] = (char *)"HOME=/"; envp[1] = (char *)"TERM=linux"; envp[2] = (char *)"PATH=/sbin:/usr/sbin:/bin:/usr/bin"; envp[3] = (char *)(& __constr_expr_0); envp[4] = (char *)(& __constr_expr_1); envp[5] = 0; argv[0] = (char *)(& usermode_helper); argv[1] = cmd; argv[2] = tconn->name; argv[3] = 0; setup_khelper_env(tconn, (char **)(& envp)); conn_md_sync(tconn); printk("\016d-con %s: helper command: %s %s %s\n", tconn->name, (char *)(& usermode_helper), cmd, tconn->name); ret = call_usermodehelper((char *)(& usermode_helper), (char **)(& argv), (char **)(& envp), 2); if (ret != 0) { printk("\fd-con %s: helper command: %s %s %s exit code %u (0x%x)\n", tconn->name, (char *)(& usermode_helper), cmd, tconn->name, (ret >> 8) & 255, ret); } else { printk("\016d-con %s: helper command: %s %s %s exit code %u (0x%x)\n", tconn->name, (char *)(& usermode_helper), cmd, tconn->name, (ret >> 8) & 255, ret); } if (ret < 0) { ret = 0; } else { } return (ret); } } static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn ) { enum drbd_fencing_p fp ; struct drbd_conf *mdev ; int vnr ; void *tmp ; enum drbd_fencing_p __max1 ; enum drbd_fencing_p __max2 ; struct disk_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; int tmp___2 ; void *tmp___3 ; { fp = -1; rcu_read_lock___6(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_53196; ldv_53195: tmp___2 = _get_ldev_if_state(mdev, D_CONSISTENT); if (tmp___2 != 0) { __max1 = fp; _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 496, "suspicious rcu_dereference_check() usage"); } else { } } else { } __max2 = (enum drbd_fencing_p )_________p1->fencing; fp = (enum drbd_fencing_p )((int )__max1 > (int )__max2 ? (int )__max1 : (int )__max2); put_ldev(mdev); } else { } vnr = vnr + 1; tmp___3 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___3; ldv_53196: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53195; } else { } rcu_read_unlock___6(); return (fp); } } bool conn_try_outdate_peer(struct drbd_tconn *tconn ) { union drbd_state mask ; union drbd_state val ; enum drbd_fencing_p fp ; char *ex_to_string ; int r ; enum drbd_disk_state tmp ; int tmp___0 ; enum drbd_disk_state tmp___1 ; { mask.ldv_40024.role = (unsigned char)0; mask.ldv_40024.peer = (unsigned char)0; mask.ldv_40024.conn = (unsigned char)0; mask.ldv_40024.disk = (unsigned char)0; mask.ldv_40024.pdsk = (unsigned char)0; mask.ldv_40024.susp = (unsigned char)0; mask.ldv_40024.aftr_isp = (unsigned char)0; mask.ldv_40024.peer_isp = (unsigned char)0; mask.ldv_40024.user_isp = (unsigned char)0; mask.ldv_40024.susp_nod = (unsigned char)0; mask.ldv_40024.susp_fen = (unsigned char)0; mask.ldv_40024._pad = (unsigned short)0; val.ldv_40024.role = (unsigned char)0; val.ldv_40024.peer = (unsigned char)0; val.ldv_40024.conn = (unsigned char)0; val.ldv_40024.disk = (unsigned char)0; val.ldv_40024.pdsk = (unsigned char)0; val.ldv_40024.susp = (unsigned char)0; val.ldv_40024.aftr_isp = (unsigned char)0; val.ldv_40024.peer_isp = (unsigned char)0; val.ldv_40024.user_isp = (unsigned char)0; val.ldv_40024.susp_nod = (unsigned char)0; val.ldv_40024.susp_fen = (unsigned char)0; val.ldv_40024._pad = (unsigned short)0; if ((unsigned int )tconn->cstate > 8U) { printk("\vd-con %s: Expected cstate < C_WF_REPORT_PARAMS\n", tconn->name); return (0); } else { } fp = highest_fencing_policy(tconn); switch ((int )fp) { case -1: printk("\fd-con %s: Not fencing peer, I\'m not even Consistent myself.\n", tconn->name); goto out; case 0: ; return (1); default: ; } r = conn_khelper(tconn, (char *)"fence-peer"); switch ((r >> 8) & 255) { case 3: ex_to_string = (char *)"peer is inconsistent or worse"; mask.ldv_40024.pdsk = 15U; val.ldv_40024.pdsk = 4U; goto ldv_53211; case 4: ex_to_string = (char *)"peer was fenced"; mask.ldv_40024.pdsk = 15U; val.ldv_40024.pdsk = 5U; goto ldv_53211; case 5: tmp = conn_highest_disk(tconn); if ((unsigned int )tmp == 8U) { ex_to_string = (char *)"peer is unreachable, assumed to be dead"; mask.ldv_40024.pdsk = 15U; val.ldv_40024.pdsk = 5U; } else { ex_to_string = (char *)"peer unreachable, doing nothing since disk != UpToDate"; } goto ldv_53211; case 6: ex_to_string = (char *)"peer is active"; printk("\fd-con %s: Peer is primary, outdating myself.\n", tconn->name); mask.ldv_40024.disk = 15U; val.ldv_40024.disk = 5U; goto ldv_53211; case 7: ; if ((int )fp != 2) { printk("\vd-con %s: fence-peer() = 7 && fencing != Stonith !!!\n", tconn->name); } else { } ex_to_string = (char *)"peer was stonithed"; mask.ldv_40024.pdsk = 15U; val.ldv_40024.pdsk = 5U; goto ldv_53211; default: printk("\vd-con %s: fence-peer helper broken, returned %d\n", tconn->name, (r >> 8) & 255); return (0); } ldv_53211: printk("\016d-con %s: fence-peer helper returned %d (%s)\n", tconn->name, (r >> 8) & 255, ex_to_string); out: spin_lock_irq(& tconn->req_lock); if ((unsigned int )tconn->cstate <= 8U) { tmp___0 = constant_test_bit(10U, (unsigned long const volatile *)(& tconn->flags)); if (tmp___0 == 0) { _conn_request_state(tconn, mask, val, CS_VERBOSE); } else { } } else { } spin_unlock_irq(& tconn->req_lock); tmp___1 = conn_highest_pdsk(tconn); return ((unsigned int )tmp___1 <= 5U); } } static int _try_outdate_peer_async(void *data ) { struct drbd_tconn *tconn ; { tconn = (struct drbd_tconn *)data; conn_try_outdate_peer(tconn); kref_put(& tconn->kref, & conn_destroy); return (0); } } void conn_try_outdate_peer_async(struct drbd_tconn *tconn ) { struct task_struct *opa ; struct task_struct *__k ; struct task_struct *tmp ; long tmp___0 ; long tmp___1 ; { kref_get(& tconn->kref); tmp = kthread_create_on_node(& _try_outdate_peer_async, (void *)tconn, -1, "drbd_async_h"); __k = tmp; tmp___0 = IS_ERR((void const *)__k); if (tmp___0 == 0L) { wake_up_process(__k); } else { } opa = __k; tmp___1 = IS_ERR((void const *)opa); if (tmp___1 != 0L) { printk("\vd-con %s: out of mem, failed to invoke fence-peer helper\n", tconn->name); kref_put(& tconn->kref, & conn_destroy); } else { } return; } } enum drbd_state_rv drbd_set_role(struct drbd_conf *mdev , enum drbd_role new_role , int force ) { int max_tries ; enum drbd_state_rv rv ; struct net_conf *nc ; int try ; int forced ; union drbd_state mask ; union drbd_state val ; bool tmp ; bool tmp___0 ; int tmp___1 ; int timeo ; struct net_conf *_________p1 ; bool __warned ; int tmp___2 ; int tmp___3 ; int tmp___4 ; int tmp___5 ; wait_queue_t __wait ; struct task_struct *tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; { max_tries = 4; rv = 0; try = 0; forced = 0; if ((unsigned int )new_role == 1U) { request_ping(mdev->tconn); } else { } ldv_mutex_lock_286(mdev->state_mutex); mask.i = 0U; mask.ldv_40024.role = 3U; val.i = 0U; val.ldv_40024.role = (unsigned char )new_role; goto ldv_53239; ldv_53246: rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE); if ((int )rv == -10 && *((unsigned int *)(& mask) + 0UL) != 0U) { val.ldv_40024.pdsk = 0U; mask.ldv_40024.pdsk = 0U; goto ldv_53239; } else { } if (((int )rv == -2 && force != 0) && ((int )mdev->state.ldv_49522.disk <= 7 && (int )mdev->state.ldv_49522.disk > 3)) { mask.ldv_40024.disk = 15U; val.ldv_40024.disk = 8U; forced = 1; goto ldv_53239; } else { } if (((int )rv == -2 && (unsigned int )*((unsigned char *)mdev + 749UL) == 14U) && *((unsigned int *)(& mask) + 0UL) == 0U) { if (*((unsigned int *)mdev + 187UL) != 49152U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->state.pdsk == D_UNKNOWN ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 651); } else { } tmp = conn_try_outdate_peer(mdev->tconn); if ((int )tmp) { val.ldv_40024.disk = 8U; mask.ldv_40024.disk = 15U; } else { } goto ldv_53239; } else { } if ((int )rv == 2) { goto out; } else { } if ((int )rv == -7 && *((unsigned int *)(& mask) + 0UL) == 0U) { tmp___0 = conn_try_outdate_peer(mdev->tconn); if (tmp___0) { tmp___1 = 0; } else { tmp___1 = 1; } if (tmp___1 && force != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Forced into split brain situation!\n"); mask.ldv_40024.pdsk = 15U; val.ldv_40024.pdsk = 5U; } else { } goto ldv_53239; } else { } if ((int )rv == -1) { rcu_read_lock___6(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 676, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; timeo = (unsigned long )nc != (unsigned long )((struct net_conf *)0) ? (int )(((nc->ping_timeo + 1U) * 250U) / 10U) : 1; rcu_read_unlock___6(); schedule_timeout_interruptible((long )timeo); if (try < max_tries) { try = max_tries + -1; } else { } goto ldv_53239; } else { } if ((int )rv <= 0) { rv = _drbd_request_state(mdev, mask, val, 6); if ((int )rv <= 0) { goto out; } else { } } else { } goto ldv_53245; ldv_53239: tmp___4 = try; try = try + 1; if (tmp___4 < max_tries) { goto ldv_53246; } else { } ldv_53245: ; if ((int )rv <= 0) { goto out; } else { } if (forced != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Forced to consider local data as UpToDate!\n"); } else { } tmp___5 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); if (tmp___5 == 0) { goto ldv_53247; } else { } tmp___6 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___6; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53250: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___7 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); if (tmp___7 == 0) { goto ldv_53249; } else { } schedule(); goto ldv_53250; ldv_53249: finish_wait(& mdev->misc_wait, & __wait); ldv_53247: ; if ((unsigned int )new_role == 2U) { set_disk_ro(mdev->vdisk, 1); tmp___8 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___8 != 0) { (mdev->ldev)->md.uuid[0] = (mdev->ldev)->md.uuid[0] & 0xfffffffffffffffeULL; put_ldev(mdev); } else { } } else { ldv_mutex_lock_287(& (mdev->tconn)->conf_update); nc = (mdev->tconn)->net_conf; if ((unsigned long )nc != (unsigned long )((struct net_conf *)0)) { nc->discard_my_data = 0; } else { } ldv_mutex_unlock_288(& (mdev->tconn)->conf_update); set_disk_ro(mdev->vdisk, 0); tmp___9 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___9 != 0) { if ((((int )mdev->state.ldv_49522.conn <= 9 || (int )mdev->state.ldv_49522.pdsk <= 2) && (mdev->ldev)->md.uuid[1] == 0ULL) || forced != 0) { drbd_uuid_new_current(mdev); } else { } (mdev->ldev)->md.uuid[0] = (mdev->ldev)->md.uuid[0] | 1ULL; put_ldev(mdev); } else { } } if ((int )mdev->state.ldv_49522.conn > 8) { if (forced != 0) { drbd_send_uuids(mdev); } else { } drbd_send_current_state(mdev); } else { } drbd_md_sync(mdev); kobject_uevent(& (mdev->vdisk)->part0.__dev.kobj, KOBJ_CHANGE); out: ldv_mutex_unlock_289(mdev->state_mutex); return (rv); } } static char const *from_attrs_err_to_txt(int err ) { { return (err != -42 ? (err != -95 ? (err == -17 ? "can not change invariant setting" : "invalid attribute value") : "unknown mandatory attribute") : "required attribute missing"); } } int drbd_adm_set_role(struct sk_buff *skb , struct genl_info *info ) { struct set_role_parms parms ; int err ; enum drbd_ret_code retcode ; int tmp ; char const *tmp___0 ; enum drbd_state_rv tmp___1 ; enum drbd_state_rv tmp___2 ; { tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } memset((void *)(& parms), 0, 1UL); if ((unsigned long )*(info->attrs + 6UL) != (unsigned long )((struct nlattr *)0)) { err = set_role_parms_from_attrs(& parms, info); if (err != 0) { retcode = ERR_MANDATORY_TAG; tmp___0 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___0); goto out; } else { } } else { } if ((unsigned int )(info->genlhdr)->cmd == 14U) { tmp___1 = drbd_set_role(adm_ctx.mdev, R_PRIMARY, (int )parms.assume_uptodate); retcode = (enum drbd_ret_code )tmp___1; } else { tmp___2 = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0); retcode = (enum drbd_ret_code )tmp___2; } out: drbd_adm_finish(info, (int )retcode); return (0); } } static void drbd_md_set_sector_offsets(struct drbd_conf *mdev , struct drbd_backing_dev *bdev ) { sector_t md_size_sect ; int meta_dev_idx ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; sector_t tmp___1 ; sector_t tmp___2 ; sector_t tmp___3 ; { md_size_sect = 0UL; rcu_read_lock___6(); _________p1 = *((struct disk_conf * volatile *)(& bdev->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 795, "suspicious rcu_dereference_check() usage"); } else { } } else { } meta_dev_idx = _________p1->meta_dev_idx; switch (meta_dev_idx) { default: bdev->md.md_size_sect = 262144U; tmp___1 = drbd_md_ss_____0(mdev, bdev); bdev->md.md_offset = (u64 )tmp___1; bdev->md.al_offset = 8; bdev->md.bm_offset = 72; goto ldv_53272; case -2: tmp___2 = drbd_get_capacity(bdev->md_bdev); bdev->md.md_size_sect = (u32 )tmp___2; bdev->md.md_offset = 0ULL; bdev->md.al_offset = 8; bdev->md.bm_offset = 72; goto ldv_53272; case -1: ; case -3: tmp___3 = drbd_md_ss_____0(mdev, bdev); bdev->md.md_offset = (u64 )tmp___3; bdev->md.al_offset = -64; md_size_sect = drbd_get_capacity(bdev->backing_bdev); md_size_sect = (md_size_sect + 32767UL) & 0xffffffffffff8000UL; md_size_sect = md_size_sect >> 15; md_size_sect = (md_size_sect + 7UL) & 0xfffffffffffffff8UL; md_size_sect = md_size_sect + 72UL; bdev->md.md_size_sect = (u32 )md_size_sect; bdev->md.bm_offset = (s32 )(8U - (unsigned int )md_size_sect); goto ldv_53272; } ldv_53272: rcu_read_unlock___6(); return; } } char *ppsize(char *buf , unsigned long long size ) { char units[6U] ; int base ; { units[0] = 75; units[1] = 77; units[2] = 71; units[3] = 84; units[4] = 80; units[5] = 69; base = 0; goto ldv_53283; ldv_53282: size = (size >> 10) + (unsigned long long )((size & 512ULL) != 0ULL); base = base + 1; ldv_53283: ; if (size > 9999ULL && (unsigned int )base <= 4U) { goto ldv_53282; } else { } sprintf(buf, "%u %cB", (unsigned int )size, (int )units[base]); return (buf); } } void drbd_suspend_io(struct drbd_conf *mdev ) { int tmp ; int tmp___0 ; wait_queue_t __wait ; struct task_struct *tmp___1 ; int tmp___2 ; { set_bit(8U, (unsigned long volatile *)(& mdev->flags)); tmp = drbd_suspended(mdev); if (tmp != 0) { return; } else { } tmp___0 = atomic_read((atomic_t const *)(& mdev->ap_bio_cnt)); if (tmp___0 == 0) { goto ldv_53288; } else { } tmp___1 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___1; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53291: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___2 = atomic_read((atomic_t const *)(& mdev->ap_bio_cnt)); if (tmp___2 == 0) { goto ldv_53290; } else { } schedule(); goto ldv_53291; ldv_53290: finish_wait(& mdev->misc_wait, & __wait); ldv_53288: ; return; } } void drbd_resume_io(struct drbd_conf *mdev ) { { clear_bit(8, (unsigned long volatile *)(& mdev->flags)); __wake_up(& mdev->misc_wait, 3U, 1, 0); return; } } enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev , enum dds_flags flags ) { sector_t prev_first_sect ; sector_t prev_size ; sector_t la_size ; sector_t u_size ; sector_t size ; char ppb[10U] ; int md_moved ; int la_size_changed ; enum determine_dev_size rv ; int tmp ; wait_queue_t __wait ; struct task_struct *tmp___0 ; int tmp___1 ; struct disk_conf *_________p1 ; bool __warned ; int tmp___2 ; int tmp___3 ; int err ; sector_t tmp___4 ; long tmp___5 ; char *tmp___6 ; sector_t tmp___7 ; sector_t tmp___8 ; sector_t tmp___9 ; int err___0 ; { rv = 0; drbd_suspend_io(mdev); tmp = lc_try_lock(mdev->act_log); if (tmp != 0) { goto ldv_53308; } else { } tmp___0 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___0; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53311: prepare_to_wait(& mdev->al_wait, & __wait, 2); tmp___1 = lc_try_lock(mdev->act_log); if (tmp___1 != 0) { goto ldv_53310; } else { } schedule(); goto ldv_53311; ldv_53310: finish_wait(& mdev->al_wait, & __wait); ldv_53308: prev_first_sect = drbd_md_first_sector___0(mdev->ldev); prev_size = (sector_t )(mdev->ldev)->md.md_size_sect; la_size = (sector_t )(mdev->ldev)->md.la_size_sect; drbd_md_set_sector_offsets(mdev, mdev->ldev); rcu_read_lock___6(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 924, "suspicious rcu_dereference_check() usage"); } else { } } else { } u_size = (sector_t )_________p1->disk_size; rcu_read_unlock___6(); size = drbd_new_dev_size(mdev, mdev->ldev, u_size, (int )flags & 1); tmp___7 = drbd_get_capacity(mdev->this_bdev); if (tmp___7 != size) { goto _L; } else { tmp___8 = drbd_bm_capacity(mdev); if (tmp___8 != size) { _L: /* CIL Label */ err = drbd_bm_resize(mdev, size, ((unsigned int )flags & 2U) == 0U); tmp___5 = ldv__builtin_expect(err != 0, 0L); if (tmp___5 != 0L) { tmp___4 = drbd_bm_capacity(mdev); size = tmp___4 >> 1; if (size == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "OUT OF MEMORY! Could not allocate bitmap!\n"); } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "BM resizing failed. Leaving size unchanged at size = %lu KB\n", size); } rv = dev_size_error; } else { } drbd_set_my_capacity(mdev, size); (mdev->ldev)->md.la_size_sect = (u64 )size; tmp___6 = ppsize((char *)(& ppb), (unsigned long long )(size >> 1)); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "size = %s (%llu KB)\n", tmp___6, (unsigned long long )size >> 1); } else { } } if ((int )rv == -1) { goto out; } else { } la_size_changed = (mdev->ldev)->md.la_size_sect != (unsigned long long )la_size; tmp___9 = drbd_md_first_sector___0(mdev->ldev); md_moved = tmp___9 != prev_first_sect || (sector_t )(mdev->ldev)->md.md_size_sect != prev_size; if (la_size_changed != 0 || md_moved != 0) { drbd_al_shrink(mdev); _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Writing the whole bitmap, %s\n", la_size_changed == 0 || md_moved == 0 ? (la_size_changed != 0 ? (char *)"size changed" : (char *)"md moved") : (char *)"size changed and md moved"); err___0 = drbd_bitmap_io(mdev, md_moved != 0 ? & drbd_bm_write_all : & drbd_bm_write, (char *)"size changed", BM_LOCKED_MASK); if (err___0 != 0) { rv = dev_size_error; goto out; } else { } drbd_md_mark_dirty(mdev); } else { } if (size > la_size) { rv = grew; } else { } if (size < la_size) { rv = shrunk; } else { } out: lc_unlock(mdev->act_log); __wake_up(& mdev->al_wait, 3U, 1, 0); drbd_resume_io(mdev); return (rv); } } sector_t drbd_new_dev_size(struct drbd_conf *mdev , struct drbd_backing_dev *bdev , sector_t u_size , int assume_peer_has_space ) { sector_t p_size ; sector_t la_size ; sector_t m_size ; sector_t size ; sector_t __min1 ; sector_t __min2 ; { p_size = mdev->p_size; la_size = (sector_t )bdev->md.la_size_sect; size = 0UL; m_size = drbd_get_max_capacity___1(bdev); if ((int )mdev->state.ldv_49522.conn <= 9 && assume_peer_has_space != 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Resize while not connected was forced by the user!\n"); p_size = m_size; } else { } if (p_size != 0UL && m_size != 0UL) { __min1 = p_size; __min2 = m_size; size = __min1 < __min2 ? __min1 : __min2; } else if (la_size != 0UL) { size = la_size; if (m_size != 0UL && m_size < size) { size = m_size; } else { } if (p_size != 0UL && p_size < size) { size = p_size; } else { } } else { if (m_size != 0UL) { size = m_size; } else { } if (p_size != 0UL) { size = p_size; } else { } } if (size == 0UL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Both nodes diskless!\n"); } else { } if (u_size != 0UL) { if (u_size > size) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Requested disk size is too big (%lu > %lu)\n", u_size >> 1, size >> 1); } else { size = u_size; } } else { } return (size); } } static int drbd_check_al_size(struct drbd_conf *mdev , struct disk_conf *dc ) { struct lru_cache *n ; struct lru_cache *t ; struct lc_element *e ; unsigned int in_use ; int i ; { if ((unsigned long )mdev->act_log != (unsigned long )((struct lru_cache *)0) && (mdev->act_log)->nr_elements == dc->al_extents) { return (0); } else { } in_use = 0U; t = mdev->act_log; n = lc_create("act_log", drbd_al_ext_cache, 64U, dc->al_extents, 48UL, 0UL); if ((unsigned long )n == (unsigned long )((struct lru_cache *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Cannot allocate act_log lru!\n"); return (-12); } else { } spin_lock_irq(& mdev->al_lock); if ((unsigned long )t != (unsigned long )((struct lru_cache *)0)) { i = 0; goto ldv_53341; ldv_53340: e = lc_element_by_index(t, (unsigned int )i); if (e->refcnt != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "refcnt(%d)==%d\n", e->lc_number, e->refcnt); } else { } in_use = e->refcnt + in_use; i = i + 1; ldv_53341: ; if ((unsigned int )i < t->nr_elements) { goto ldv_53340; } else { } } else { } if (in_use == 0U) { mdev->act_log = n; } else { } spin_unlock_irq(& mdev->al_lock); if (in_use != 0U) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Activity log still in use!\n"); lc_destroy(n); return (-16); } else if ((unsigned long )t != (unsigned long )((struct lru_cache *)0)) { lc_destroy(t); } else { } drbd_md_mark_dirty(mdev); return (0); } } static void drbd_setup_queue_param(struct drbd_conf *mdev , unsigned int max_bio_size ) { struct request_queue *q ; unsigned int max_hw_sectors ; unsigned int max_segments ; struct request_queue *b ; unsigned int _min1 ; unsigned int tmp ; unsigned int _min2 ; struct disk_conf *_________p1 ; bool __warned ; int tmp___0 ; int tmp___1 ; int tmp___2 ; struct request_queue *b___0 ; int tmp___3 ; { q = mdev->rq_queue; max_hw_sectors = max_bio_size >> 9; max_segments = 0U; tmp___2 = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp___2 != 0) { b = (((mdev->ldev)->backing_bdev)->bd_disk)->queue; tmp = queue_max_hw_sectors(b); _min1 = tmp; _min2 = max_bio_size >> 9; max_hw_sectors = _min1 < _min2 ? _min1 : _min2; rcu_read_lock___6(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { tmp___1 = rcu_read_lock_held(); if (tmp___1 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 1099, "suspicious rcu_dereference_check() usage"); } else { } } else { } max_segments = _________p1->max_bio_bvecs; rcu_read_unlock___6(); put_ldev(mdev); } else { } blk_queue_logical_block_size(q, 512); blk_queue_max_hw_sectors(q, max_hw_sectors); blk_queue_max_segments(q, max_segments != 0U ? (int )((unsigned short )max_segments) : 128); blk_queue_segment_boundary(q, 4095UL); tmp___3 = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp___3 != 0) { b___0 = (((mdev->ldev)->backing_bdev)->bd_disk)->queue; blk_queue_stack_limits(q, b___0); if (q->backing_dev_info.ra_pages != b___0->backing_dev_info.ra_pages) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Adjusting my ra_pages to backing device\'s (%lu -> %lu)\n", q->backing_dev_info.ra_pages, b___0->backing_dev_info.ra_pages); q->backing_dev_info.ra_pages = b___0->backing_dev_info.ra_pages; } else { } put_ldev(mdev); } else { } return; } } void drbd_reconsider_max_bio_size(struct drbd_conf *mdev ) { unsigned int now ; unsigned int new ; unsigned int local ; unsigned int peer ; unsigned int tmp ; unsigned int tmp___0 ; int tmp___1 ; unsigned int _min1 ; unsigned int _min2 ; unsigned int _min1___0 ; unsigned int _min2___0 ; unsigned int _min1___1 ; unsigned int _min2___1 ; { tmp = queue_max_hw_sectors(mdev->rq_queue); now = tmp << 9; local = mdev->local_max_bio_size; peer = mdev->peer_max_bio_size; tmp___1 = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp___1 != 0) { tmp___0 = queue_max_hw_sectors((((mdev->ldev)->backing_bdev)->bd_disk)->queue); local = tmp___0 << 9; mdev->local_max_bio_size = local; put_ldev(mdev); } else { } _min1 = local; _min2 = 1048576U; local = _min1 < _min2 ? _min1 : _min2; if ((int )mdev->state.ldv_49522.conn > 9) { if ((mdev->tconn)->agreed_pro_version <= 93) { _min1___0 = mdev->peer_max_bio_size; _min2___0 = 32768U; peer = _min1___0 < _min2___0 ? _min1___0 : _min2___0; } else if ((mdev->tconn)->agreed_pro_version == 94) { peer = 32768U; } else if ((mdev->tconn)->agreed_pro_version <= 99) { peer = 131072U; } else { peer = 1048576U; } } else { } _min1___1 = local; _min2___1 = peer; new = _min1___1 < _min2___1 ? _min1___1 : _min2___1; if ((unsigned int )*((unsigned char *)mdev + 748UL) == 1U && new < now) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED new < now; (%u < %u)\n", new, now); } else { } if (new != now) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "max BIO size = %u\n", new); } else { } drbd_setup_queue_param(mdev, new); return; } } static void conn_reconfig_start(struct drbd_tconn *tconn ) { { drbd_thread_start(& tconn->worker); conn_flush_workqueue(tconn); return; } } static void conn_reconfig_done(struct drbd_tconn *tconn ) { bool stop_threads ; bool tmp ; { spin_lock_irq(& tconn->req_lock); tmp = conn_all_vols_unconf(tconn); stop_threads = (bool )((int )tmp && (unsigned int )tconn->cstate == 0U); spin_unlock_irq(& tconn->req_lock); if ((int )stop_threads) { drbd_thread_stop(& tconn->receiver); drbd_thread_stop(& tconn->worker); } else { } return; } } static void drbd_suspend_al(struct drbd_conf *mdev ) { int s ; int tmp ; int tmp___0 ; { s = 0; tmp = lc_try_lock(mdev->act_log); if (tmp == 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "Failed to lock al in drbd_suspend_al()\n"); return; } else { } drbd_al_shrink(mdev); spin_lock_irq(& (mdev->tconn)->req_lock); if ((int )mdev->state.ldv_49522.conn <= 9) { tmp___0 = test_and_set_bit(18, (unsigned long volatile *)(& mdev->flags)); s = tmp___0 == 0; } else { } spin_unlock_irq(& (mdev->tconn)->req_lock); lc_unlock(mdev->act_log); if (s != 0) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Suspended AL updates\n"); } else { } return; } } static bool should_set_defaults(struct genl_info *info ) { unsigned int flags ; { flags = ((struct drbd_genlmsghdr *)info->userhdr)->ldv_49826.flags; return (((int )flags & 1) != 0); } } static void enforce_disk_conf_limits(struct disk_conf *dc ) { { if (dc->al_extents <= 6U) { dc->al_extents = 7U; } else { } if (dc->al_extents > 6433U) { dc->al_extents = 6433U; } else { } if (dc->c_plan_ahead > 300U) { dc->c_plan_ahead = 300U; } else { } return; } } int drbd_adm_disk_opts(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; struct drbd_conf *mdev ; struct disk_conf *new_disk_conf ; struct disk_conf *old_disk_conf ; struct fifo_buffer *old_plan ; struct fifo_buffer *new_plan ; int err ; int fifo_size ; int tmp ; int tmp___0 ; void *tmp___1 ; bool tmp___2 ; char const *tmp___3 ; bool _bool ; int tmp___4 ; int tmp___5 ; wait_queue_t __wait ; struct task_struct *tmp___6 ; int tmp___7 ; { old_plan = 0; new_plan = 0; tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } mdev = adm_ctx.mdev; tmp___0 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___0 == 0) { retcode = ERR_NO_DISK; goto out; } else { } tmp___1 = kmalloc(344UL, 208U); new_disk_conf = (struct disk_conf *)tmp___1; if ((unsigned long )new_disk_conf == (unsigned long )((struct disk_conf *)0)) { retcode = ERR_NOMEM; goto fail; } else { } ldv_mutex_lock_290(& (mdev->tconn)->conf_update); old_disk_conf = (mdev->ldev)->disk_conf; *new_disk_conf = *old_disk_conf; tmp___2 = should_set_defaults(info); if ((int )tmp___2) { set_disk_conf_defaults(new_disk_conf); } else { } err = disk_conf_from_attrs_for_change(new_disk_conf, info); if (err != 0 && err != -42) { retcode = ERR_MANDATORY_TAG; tmp___3 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___3); } else { } _bool = new_disk_conf->resync_rate != 0U; if (! _bool) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERTION %s FAILED in %s\n", (char *)"new_disk_conf->resync_rate >= 1", "drbd_adm_disk_opts"); } else { } if (_bool) { tmp___4 = 0; } else { tmp___4 = 1; } if (tmp___4) { new_disk_conf->resync_rate = 1U; } else { } enforce_disk_conf_limits(new_disk_conf); fifo_size = (int )((new_disk_conf->c_plan_ahead * 250U) / 250U); if ((unsigned int )fifo_size != (mdev->rs_plan_s)->size) { new_plan = fifo_alloc(fifo_size); if ((unsigned long )new_plan == (unsigned long )((struct fifo_buffer *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "kmalloc of fifo_buffer failed"); retcode = ERR_NOMEM; goto fail_unlock; } else { } } else { } drbd_suspend_io(mdev); tmp___5 = lc_try_lock(mdev->act_log); if (tmp___5 != 0) { goto ldv_53410; } else { } tmp___6 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___6; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53413: prepare_to_wait(& mdev->al_wait, & __wait, 2); tmp___7 = lc_try_lock(mdev->act_log); if (tmp___7 != 0) { goto ldv_53412; } else { } schedule(); goto ldv_53413; ldv_53412: finish_wait(& mdev->al_wait, & __wait); ldv_53410: drbd_al_shrink(mdev); err = drbd_check_al_size(mdev, new_disk_conf); lc_unlock(mdev->act_log); __wake_up(& mdev->al_wait, 3U, 1, 0); drbd_resume_io(mdev); if (err != 0) { retcode = ERR_NOMEM; goto fail_unlock; } else { } _raw_write_lock_irq(& global_state_lock); retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after); if ((unsigned int )retcode == 101U) { __asm__ volatile ("": : : "memory"); (mdev->ldev)->disk_conf = new_disk_conf; drbd_resync_after_changed(mdev); } else { } _raw_write_unlock_irq(& global_state_lock); if ((unsigned int )retcode != 101U) { goto fail_unlock; } else { } if ((unsigned long )new_plan != (unsigned long )((struct fifo_buffer *)0)) { old_plan = mdev->rs_plan_s; __asm__ volatile ("": : : "memory"); mdev->rs_plan_s = new_plan; } else { } ldv_mutex_unlock_291(& (mdev->tconn)->conf_update); if ((int )((signed char )new_disk_conf->al_updates) != 0) { (mdev->ldev)->md.flags = (mdev->ldev)->md.flags & 4294967039U; } else { (mdev->ldev)->md.flags = (mdev->ldev)->md.flags | 256U; } drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush); drbd_md_sync(mdev); if ((int )mdev->state.ldv_49522.conn > 9) { drbd_send_sync_param(mdev); } else { } synchronize_rcu(); kfree((void const *)old_disk_conf); kfree((void const *)old_plan); mod_timer(& mdev->request_timer, (unsigned long )jiffies + 250UL); goto success; fail_unlock: ldv_mutex_unlock_292(& (mdev->tconn)->conf_update); fail: kfree((void const *)new_disk_conf); kfree((void const *)new_plan); success: put_ldev(mdev); out: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_attach(struct sk_buff *skb , struct genl_info *info ) { struct drbd_conf *mdev ; int err ; enum drbd_ret_code retcode ; enum determine_dev_size dd ; sector_t max_possible_sectors ; sector_t min_md_device_sectors ; struct drbd_backing_dev *nbc ; struct disk_conf *new_disk_conf ; struct block_device *bdev ; struct lru_cache *resync_lru ; struct fifo_buffer *new_plan ; union drbd_state ns ; union drbd_state os ; enum drbd_state_rv rv ; struct net_conf *nc ; int tmp ; int tmp___0 ; wait_queue_t __wait ; struct task_struct *tmp___1 ; int tmp___2 ; void *tmp___3 ; struct lock_class_key __key ; void *tmp___4 ; char const *tmp___5 ; struct net_conf *_________p1 ; bool __warned ; int tmp___6 ; int tmp___7 ; long tmp___8 ; long tmp___9 ; long tmp___10 ; long tmp___11 ; sector_t tmp___12 ; sector_t tmp___13 ; sector_t tmp___14 ; sector_t tmp___15 ; sector_t tmp___16 ; int tmp___17 ; int tmp___18 ; wait_queue_t __wait___0 ; struct task_struct *tmp___19 ; int tmp___20 ; int tmp___21 ; union drbd_state val ; union drbd_state mask ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int tmp___26 ; sector_t tmp___27 ; int tmp___28 ; int tmp___29 ; int tmp___30 ; int tmp___31 ; int tmp___32 ; int tmp___33 ; int tmp___34 ; int tmp___35 ; int tmp___36 ; unsigned long tmp___37 ; unsigned long tmp___38 ; int tmp___39 ; int tmp___40 ; int tmp___41 ; struct disk_conf *_________p1___0 ; bool __warned___0 ; int tmp___42 ; int tmp___43 ; struct disk_conf *_________p1___1 ; bool __warned___1 ; int tmp___44 ; int tmp___45 ; union drbd_state val___0 ; union drbd_state mask___0 ; { nbc = 0; new_disk_conf = 0; resync_lru = 0; new_plan = 0; tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto finish; } else { } mdev = adm_ctx.mdev; conn_reconfig_start(mdev->tconn); if ((int )mdev->state.ldv_49522.disk > 0) { retcode = ERR_DISK_CONFIGURED; goto fail; } else { } tmp___0 = atomic_read((atomic_t const *)(& mdev->local_cnt)); if (tmp___0 == 0) { goto ldv_53436; } else { } tmp___1 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___1; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53439: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___2 = atomic_read((atomic_t const *)(& mdev->local_cnt)); if (tmp___2 == 0) { goto ldv_53438; } else { } schedule(); goto ldv_53439; ldv_53438: finish_wait(& mdev->misc_wait, & __wait); ldv_53436: clear_bit(14, (unsigned long volatile *)(& mdev->flags)); clear_bit(12, (unsigned long volatile *)(& mdev->flags)); clear_bit(13, (unsigned long volatile *)(& mdev->flags)); mdev->rs_total = 0UL; mdev->rs_failed = 0UL; atomic_set(& mdev->rs_pending_cnt, 0); tmp___3 = kzalloc(176UL, 208U); nbc = (struct drbd_backing_dev *)tmp___3; if ((unsigned long )nbc == (unsigned long )((struct drbd_backing_dev *)0)) { retcode = ERR_NOMEM; goto fail; } else { } spinlock_check(& nbc->md.uuid_lock); __raw_spin_lock_init(& nbc->md.uuid_lock.ldv_5957.rlock, "&(&nbc->md.uuid_lock)->rlock", & __key); tmp___4 = kzalloc(344UL, 208U); new_disk_conf = (struct disk_conf *)tmp___4; if ((unsigned long )new_disk_conf == (unsigned long )((struct disk_conf *)0)) { retcode = ERR_NOMEM; goto fail; } else { } nbc->disk_conf = new_disk_conf; set_disk_conf_defaults(new_disk_conf); err = disk_conf_from_attrs(new_disk_conf, info); if (err != 0) { retcode = ERR_MANDATORY_TAG; tmp___5 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___5); goto fail; } else { } enforce_disk_conf_limits(new_disk_conf); new_plan = fifo_alloc((int )((new_disk_conf->c_plan_ahead * 250U) / 250U)); if ((unsigned long )new_plan == (unsigned long )((struct fifo_buffer *)0)) { retcode = ERR_NOMEM; goto fail; } else { } if (new_disk_conf->meta_dev_idx < -3) { retcode = ERR_MD_IDX_INVALID; goto fail; } else { } rcu_read_lock___6(); _________p1 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___6 = debug_lockdep_rcu_enabled(); if (tmp___6 != 0 && ! __warned) { tmp___7 = rcu_read_lock_held(); if (tmp___7 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 1429, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1; if ((unsigned long )nc != (unsigned long )((struct net_conf *)0)) { if (new_disk_conf->fencing == 2U && nc->wire_protocol == 1U) { rcu_read_unlock___6(); retcode = ERR_STONITH_AND_PROT_A; goto fail; } else { } } else { } rcu_read_unlock___6(); bdev = blkdev_get_by_path((char const *)(& new_disk_conf->backing_dev), 131U, (void *)mdev); tmp___9 = IS_ERR((void const *)bdev); if (tmp___9 != 0L) { tmp___8 = PTR_ERR((void const *)bdev); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "open(\"%s\") failed with %ld\n", (char *)(& new_disk_conf->backing_dev), tmp___8); retcode = ERR_OPEN_DISK; goto fail; } else { } nbc->backing_bdev = bdev; bdev = blkdev_get_by_path((char const *)(& new_disk_conf->meta_dev), 131U, new_disk_conf->meta_dev_idx < 0 ? (void *)mdev : (void *)drbd_m_holder); tmp___11 = IS_ERR((void const *)bdev); if (tmp___11 != 0L) { tmp___10 = PTR_ERR((void const *)bdev); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "open(\"%s\") failed with %ld\n", (char *)(& new_disk_conf->meta_dev), tmp___10); retcode = ERR_OPEN_MD_DISK; goto fail; } else { } nbc->md_bdev = bdev; if (((unsigned long )nbc->backing_bdev == (unsigned long )nbc->md_bdev) ^ (int )((_Bool )(new_disk_conf->meta_dev_idx == -1 || new_disk_conf->meta_dev_idx == -3))) { retcode = ERR_MD_IDX_INVALID; goto fail; } else { } resync_lru = lc_create("resync", drbd_bm_ext_cache, 1U, 61U, 64UL, 16UL); if ((unsigned long )resync_lru == (unsigned long )((struct lru_cache *)0)) { retcode = ERR_NOMEM; goto fail; } else { } drbd_md_set_sector_offsets(mdev, nbc); tmp___13 = drbd_get_max_capacity___1(nbc); if ((unsigned long long )tmp___13 < new_disk_conf->disk_size) { tmp___12 = drbd_get_max_capacity___1(nbc); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "max capacity %llu smaller than disk size %llu\n", (unsigned long long )tmp___12, new_disk_conf->disk_size); retcode = ERR_DISK_TOO_SMALL; goto fail; } else { } if (new_disk_conf->meta_dev_idx < 0) { max_possible_sectors = 2251799813685248UL; min_md_device_sectors = 2048UL; } else { max_possible_sectors = 8587575296UL; min_md_device_sectors = (unsigned long )(new_disk_conf->meta_dev_idx + 1) * 262144UL; } tmp___14 = drbd_get_capacity(nbc->md_bdev); if (tmp___14 < min_md_device_sectors) { retcode = ERR_MD_DISK_TOO_SMALL; dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "refusing attach: md-device too small, at least %llu sectors needed for this meta-disk type\n", (unsigned long long )min_md_device_sectors); goto fail; } else { } tmp___15 = drbd_get_max_capacity___1(nbc); tmp___16 = drbd_get_capacity(mdev->this_bdev); if (tmp___15 < tmp___16) { retcode = ERR_DISK_TOO_SMALL; goto fail; } else { } nbc->known_size = drbd_get_capacity(nbc->backing_bdev); if (nbc->known_size > max_possible_sectors) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "==> truncating very big lower level device to currently maximum possible %llu sectors <==\n", (unsigned long long )max_possible_sectors); if (new_disk_conf->meta_dev_idx >= 0) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "==>> using internal or flexible meta data may help <<==\n"); } else { } } else { } drbd_suspend_io(mdev); tmp___17 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); if (tmp___17 == 0) { goto ldv_53444; } else { tmp___18 = drbd_suspended(mdev); if (tmp___18 != 0) { goto ldv_53444; } else { } } tmp___19 = get_current(); __wait___0.flags = 0U; __wait___0.private = (void *)tmp___19; __wait___0.func = & autoremove_wake_function; __wait___0.task_list.next = & __wait___0.task_list; __wait___0.task_list.prev = & __wait___0.task_list; ldv_53447: prepare_to_wait(& mdev->misc_wait, & __wait___0, 2); tmp___20 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); if (tmp___20 == 0) { goto ldv_53446; } else { tmp___21 = drbd_suspended(mdev); if (tmp___21 != 0) { goto ldv_53446; } else { } } schedule(); goto ldv_53447; ldv_53446: finish_wait(& mdev->misc_wait, & __wait___0); ldv_53444: drbd_flush_workqueue(mdev); val.i = 0U; val.ldv_40024.disk = 1U; mask.i = 0U; mask.ldv_40024.disk = 15U; rv = _drbd_request_state(mdev, mask, val, CS_VERBOSE); retcode = (enum drbd_ret_code )rv; drbd_resume_io(mdev); if ((int )rv <= 0) { goto fail; } else { } tmp___22 = _get_ldev_if_state(mdev, D_ATTACHING); if (tmp___22 == 0) { goto force_diskless; } else { } drbd_md_set_sector_offsets(mdev, nbc); if ((unsigned long )mdev->bitmap == (unsigned long )((struct drbd_bitmap *)0)) { tmp___23 = drbd_bm_init(mdev); if (tmp___23 != 0) { retcode = ERR_NOMEM; goto force_diskless_dec; } else { } } else { } tmp___24 = drbd_md_read(mdev, nbc); retcode = (enum drbd_ret_code )tmp___24; if ((unsigned int )retcode != 101U) { goto force_diskless_dec; } else { } if (((int )mdev->state.ldv_49522.conn <= 9 && (unsigned int )*((unsigned char *)mdev + 748UL) == 1U) && ((mdev->ed_uuid ^ nbc->md.uuid[0]) & 0xfffffffffffffffeULL) != 0ULL) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Can only attach to data with current UUID=%016llX\n", mdev->ed_uuid); retcode = ERR_DATA_NOT_CURRENT; goto force_diskless_dec; } else { } tmp___25 = drbd_check_al_size(mdev, new_disk_conf); if (tmp___25 != 0) { retcode = ERR_NOMEM; goto force_diskless_dec; } else { } tmp___26 = drbd_md_test_flag(nbc, 1); if (tmp___26 != 0) { tmp___27 = drbd_new_dev_size(mdev, nbc, (sector_t )(nbc->disk_conf)->disk_size, 0); if ((unsigned long long )tmp___27 < nbc->md.la_size_sect) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "refusing to truncate a consistent device\n"); retcode = ERR_DISK_TOO_SMALL; goto force_diskless_dec; } else { } } else { } if ((int )((signed char )new_disk_conf->md_flushes) != 0) { clear_bit(7, (unsigned long volatile *)(& mdev->flags)); } else { set_bit(7U, (unsigned long volatile *)(& mdev->flags)); } if ((unsigned long )mdev->ldev != (unsigned long )((struct drbd_backing_dev *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->ldev == NULL ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 1599); } else { } mdev->ldev = nbc; mdev->resync = resync_lru; mdev->rs_plan_s = new_plan; nbc = 0; resync_lru = 0; new_disk_conf = 0; new_plan = 0; drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush); tmp___28 = drbd_md_test_flag(mdev->ldev, 64); if (tmp___28 != 0) { set_bit(5U, (unsigned long volatile *)(& mdev->flags)); } else { clear_bit(5, (unsigned long volatile *)(& mdev->flags)); } tmp___29 = drbd_md_test_flag(mdev->ldev, 2); if (tmp___29 != 0 && ((unsigned int )*((unsigned char *)mdev + 748UL) != 1U || (unsigned int )*((unsigned char *)mdev->tconn + 132UL) == 0U)) { set_bit(5U, (unsigned long volatile *)(& mdev->flags)); } else { } mdev->send_cnt = 0U; mdev->recv_cnt = 0U; mdev->read_cnt = 0U; mdev->writ_cnt = 0U; drbd_reconsider_max_bio_size(mdev); clear_bit(2, (unsigned long volatile *)(& mdev->flags)); if ((unsigned int )*((unsigned char *)mdev + 748UL) != 1U) { tmp___30 = drbd_md_test_flag(mdev->ldev, 2); if (tmp___30 != 0) { tmp___31 = drbd_md_test_flag(mdev->ldev, 4); if (tmp___31 == 0) { set_bit(2U, (unsigned long volatile *)(& mdev->flags)); } else { } } else { } } else { } dd = drbd_determine_dev_size(mdev, 0); if ((int )dd == -1) { retcode = ERR_NOMEM_BITMAP; goto force_diskless_dec; } else if ((int )dd == 2) { set_bit(15U, (unsigned long volatile *)(& mdev->flags)); } else { } tmp___34 = drbd_md_test_flag(mdev->ldev, 8); if (tmp___34 != 0) { goto _L; } else { tmp___35 = constant_test_bit(5U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___35 != 0) { tmp___36 = drbd_md_test_flag(mdev->ldev, 256); if (tmp___36 != 0) { _L: /* CIL Label */ _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Assuming that all blocks are out of sync (aka FullSync)\n"); tmp___32 = drbd_bitmap_io(mdev, & drbd_bmio_set_n_write, (char *)"set_n_write from attaching", BM_LOCKED_MASK); if (tmp___32 != 0) { retcode = ERR_IO_MD_DISK; goto force_diskless_dec; } else { } } else { goto _L___0; } } else { _L___0: /* CIL Label */ tmp___33 = drbd_bitmap_io(mdev, & drbd_bm_read, (char *)"read from attaching", BM_LOCKED_MASK); if (tmp___33 != 0) { retcode = ERR_IO_MD_DISK; goto force_diskless_dec; } else { } } } tmp___37 = _drbd_bm_total_weight(mdev); tmp___38 = drbd_bm_bits(mdev); if (tmp___37 == tmp___38) { drbd_suspend_al(mdev); } else { } spin_lock_irq(& (mdev->tconn)->req_lock); os = drbd_read_state(mdev); ns = os; tmp___40 = drbd_md_test_flag(mdev->ldev, 1); if (tmp___40 != 0) { tmp___39 = drbd_md_test_flag(mdev->ldev, 16); if (tmp___39 != 0) { ns.ldv_40024.disk = 7U; } else { ns.ldv_40024.disk = 5U; } } else { ns.ldv_40024.disk = 4U; } tmp___41 = drbd_md_test_flag(mdev->ldev, 32); if (tmp___41 != 0) { ns.ldv_40024.pdsk = 5U; } else { } rcu_read_lock___6(); if ((unsigned int )*((unsigned char *)(& ns) + 1UL) == 14U) { if (*((unsigned int *)(& ns) + 0UL) == 40960U) { ns.ldv_40024.disk = 8U; } else { _________p1___0 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___42 = debug_lockdep_rcu_enabled(); if (tmp___42 != 0 && ! __warned___0) { tmp___43 = rcu_read_lock_held(); if (tmp___43 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 1696, "suspicious rcu_dereference_check() usage"); } else { } } else { } if (_________p1___0->fencing == 0U) { ns.ldv_40024.disk = 8U; } else { } } } else { } _________p1___1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___44 = debug_lockdep_rcu_enabled(); if (tmp___44 != 0 && ! __warned___1) { tmp___45 = rcu_read_lock_held(); if (tmp___45 == 0 && 1) { __warned___1 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 1704, "suspicious rcu_dereference_check() usage"); } else { } } else { } if ((int )((signed char )_________p1___1->al_updates) != 0) { (mdev->ldev)->md.flags = (mdev->ldev)->md.flags & 4294967039U; } else { (mdev->ldev)->md.flags = (mdev->ldev)->md.flags | 256U; } rcu_read_unlock___6(); if ((unsigned int )*((unsigned short *)mdev + 374UL) == 160U) { mdev->new_state_tmp.i = ns.i; ns.i = os.i; ns.ldv_40024.disk = 3U; kfree((void const *)mdev->p_uuid); mdev->p_uuid = 0; } else { } rv = _drbd_set_state(mdev, ns, CS_VERBOSE, 0); spin_unlock_irq(& (mdev->tconn)->req_lock); if ((int )rv <= 0) { goto force_diskless_dec; } else { } mod_timer(& mdev->request_timer, (unsigned long )jiffies + 250UL); if ((unsigned int )*((unsigned char *)mdev + 748UL) == 1U) { (mdev->ldev)->md.uuid[0] = (mdev->ldev)->md.uuid[0] | 1ULL; } else { (mdev->ldev)->md.uuid[0] = (mdev->ldev)->md.uuid[0] & 0xfffffffffffffffeULL; } drbd_md_mark_dirty(mdev); drbd_md_sync(mdev); kobject_uevent(& (mdev->vdisk)->part0.__dev.kobj, KOBJ_CHANGE); put_ldev(mdev); conn_reconfig_done(mdev->tconn); drbd_adm_finish(info, (int )retcode); return (0); force_diskless_dec: put_ldev(mdev); force_diskless: val___0.i = 0U; val___0.ldv_40024.disk = 0U; mask___0.i = 0U; mask___0.ldv_40024.disk = 15U; drbd_force_state(mdev, mask___0, val___0); drbd_md_sync(mdev); fail: conn_reconfig_done(mdev->tconn); if ((unsigned long )nbc != (unsigned long )((struct drbd_backing_dev *)0)) { if ((unsigned long )nbc->backing_bdev != (unsigned long )((struct block_device *)0)) { blkdev_put(nbc->backing_bdev, 131U); } else { } if ((unsigned long )nbc->md_bdev != (unsigned long )((struct block_device *)0)) { blkdev_put(nbc->md_bdev, 131U); } else { } kfree((void const *)nbc); } else { } kfree((void const *)new_disk_conf); lc_destroy(resync_lru); kfree((void const *)new_plan); finish: drbd_adm_finish(info, (int )retcode); return (0); } } static int adm_detach(struct drbd_conf *mdev , int force ) { enum drbd_state_rv retcode ; int ret ; union drbd_state val ; union drbd_state mask ; union drbd_state val___0 ; union drbd_state mask___0 ; int tmp ; int __ret ; wait_queue_t __wait ; struct task_struct *tmp___0 ; struct task_struct *tmp___1 ; int tmp___2 ; { if (force != 0) { set_bit(14U, (unsigned long volatile *)(& mdev->flags)); val.i = 0U; val.ldv_40024.disk = 2U; mask.i = 0U; mask.ldv_40024.disk = 15U; drbd_force_state(mdev, mask, val); retcode = SS_SUCCESS; goto out; } else { } drbd_suspend_io(mdev); drbd_md_get_buffer(mdev); val___0.i = 0U; val___0.ldv_40024.disk = 2U; mask___0.i = 0U; mask___0.ldv_40024.disk = 15U; tmp = drbd_request_state(mdev, mask___0, val___0); retcode = (enum drbd_state_rv )tmp; drbd_md_put_buffer(mdev); __ret = 0; if ((unsigned int )*((unsigned char *)mdev + 749UL) == 4U) { tmp___0 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___0; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53483: prepare_to_wait(& mdev->misc_wait, & __wait, 1); if ((unsigned int )*((unsigned char *)mdev + 749UL) != 4U) { goto ldv_53481; } else { } tmp___1 = get_current(); tmp___2 = signal_pending(tmp___1); if (tmp___2 == 0) { schedule(); goto ldv_53482; } else { } __ret = -512; goto ldv_53481; ldv_53482: ; goto ldv_53483; ldv_53481: finish_wait(& mdev->misc_wait, & __wait); } else { } ret = __ret; drbd_resume_io(mdev); if ((int )retcode == -11) { retcode = SS_NOTHING_TO_DO; } else { } if (ret != 0) { retcode = 129; } else { } out: ; return ((int )retcode); } } int drbd_adm_detach(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; struct detach_parms parms ; int err ; int tmp ; char const *tmp___0 ; int tmp___1 ; { parms.force_detach = (char)0; tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } if ((unsigned long )*(info->attrs + 13UL) != (unsigned long )((struct nlattr *)0)) { err = detach_parms_from_attrs(& parms, info); if (err != 0) { retcode = ERR_MANDATORY_TAG; tmp___0 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___0); goto out; } else { } } else { } tmp___1 = adm_detach(adm_ctx.mdev, (int )parms.force_detach); retcode = (enum drbd_ret_code )tmp___1; out: drbd_adm_finish(info, (int )retcode); return (0); } } static bool conn_resync_running(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; bool rv ; int vnr ; void *tmp ; void *tmp___0 ; { rv = 0; rcu_read_lock___6(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_53501; ldv_53500: ; if ((((unsigned int )*((unsigned short *)mdev + 374UL) == 256U || (unsigned int )*((unsigned short *)mdev + 374UL) == 272U) || (unsigned int )*((unsigned short *)mdev + 374UL) == 320U) || (unsigned int )*((unsigned short *)mdev + 374UL) == 336U) { rv = 1; goto ldv_53499; } else { } vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_53501: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53500; } else { } ldv_53499: rcu_read_unlock___6(); return (rv); } } static bool conn_ov_running(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; bool rv ; int vnr ; void *tmp ; void *tmp___0 ; { rv = 0; rcu_read_lock___6(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_53510; ldv_53509: ; if ((unsigned int )*((unsigned short *)mdev + 374UL) == 288U || (unsigned int )*((unsigned short *)mdev + 374UL) == 304U) { rv = 1; goto ldv_53508; } else { } vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_53510: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53509; } else { } ldv_53508: rcu_read_unlock___6(); return (rv); } } static enum drbd_ret_code _check_net_options(struct drbd_tconn *tconn , struct net_conf *old_conf , struct net_conf *new_conf ) { struct drbd_conf *mdev ; int i ; int tmp ; enum drbd_role tmp___0 ; enum drbd_role tmp___1 ; void *tmp___2 ; enum drbd_fencing_p fp ; struct disk_conf *_________p1 ; bool __warned ; int tmp___3 ; int tmp___4 ; int tmp___5 ; void *tmp___6 ; { if (((unsigned long )old_conf != (unsigned long )((struct net_conf *)0) && (unsigned int )tconn->cstate == 9U) && tconn->agreed_pro_version <= 99) { if (new_conf->wire_protocol != old_conf->wire_protocol) { return (ERR_NEED_APV_100); } else { } if ((int )((signed char )new_conf->two_primaries) != (int )((signed char )old_conf->two_primaries)) { return (ERR_NEED_APV_100); } else { } tmp = strcmp((char const *)(& new_conf->integrity_alg), (char const *)(& old_conf->integrity_alg)); if (tmp != 0) { return (ERR_NEED_APV_100); } else { } } else { } if ((int )((signed char )new_conf->two_primaries) == 0) { tmp___0 = conn_highest_role(tconn); if ((unsigned int )tmp___0 == 1U) { tmp___1 = conn_highest_peer(tconn); if ((unsigned int )tmp___1 == 1U) { return (ERR_NEED_ALLOW_TWO_PRI); } else { } } else { } } else { } if ((int )((signed char )new_conf->two_primaries) != 0 && new_conf->wire_protocol != 3U) { return (ERR_NOT_PROTO_C); } else { } i = 0; tmp___2 = idr_get_next(& tconn->volumes, & i); mdev = (struct drbd_conf *)tmp___2; goto ldv_53523; ldv_53522: tmp___5 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___5 != 0) { _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned) { tmp___4 = rcu_read_lock_held(); if (tmp___4 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 1900, "suspicious rcu_dereference_check() usage"); } else { } } else { } fp = (enum drbd_fencing_p )_________p1->fencing; put_ldev(mdev); if (new_conf->wire_protocol == 1U && (int )fp == 2) { return (ERR_STONITH_AND_PROT_A); } else { } } else { } if ((unsigned int )*((unsigned char *)mdev + 748UL) == 1U && (int )((signed char )new_conf->discard_my_data) != 0) { return (ERR_DISCARD_IMPOSSIBLE); } else { } i = i + 1; tmp___6 = idr_get_next(& tconn->volumes, & i); mdev = (struct drbd_conf *)tmp___6; ldv_53523: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53522; } else { } if (new_conf->on_congestion != 0U && new_conf->wire_protocol != 1U) { return (ERR_CONG_NOT_PROTO_A); } else { } return (NO_ERROR); } } static enum drbd_ret_code check_net_options(struct drbd_tconn *tconn , struct net_conf *new_conf ) { enum drbd_ret_code rv ; struct drbd_conf *mdev ; int i ; struct net_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; void *tmp___1 ; int tmp___2 ; void *tmp___3 ; { rcu_read_lock___6(); _________p1 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 1923, "suspicious rcu_dereference_check() usage"); } else { } } else { } rv = _check_net_options(tconn, _________p1, new_conf); rcu_read_unlock___6(); i = 0; tmp___1 = idr_get_next(& tconn->volumes, & i); mdev = (struct drbd_conf *)tmp___1; goto ldv_53536; ldv_53535: ; if ((unsigned long )mdev->bitmap == (unsigned long )((struct drbd_bitmap *)0)) { tmp___2 = drbd_bm_init(mdev); if (tmp___2 != 0) { return (ERR_NOMEM); } else { } } else { } i = i + 1; tmp___3 = idr_get_next(& tconn->volumes, & i); mdev = (struct drbd_conf *)tmp___3; ldv_53536: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53535; } else { } return (rv); } } static int alloc_hash(struct crypto_hash **tfm , char *tfm_name , int err_alg ) { long tmp ; { if ((int )((signed char )*tfm_name) == 0) { return (101); } else { } *tfm = crypto_alloc_hash((char const *)tfm_name, 0U, 128U); tmp = IS_ERR((void const *)*tfm); if (tmp != 0L) { *tfm = 0; return (err_alg); } else { } return (101); } } static enum drbd_ret_code alloc_crypto(struct crypto *crypto , struct net_conf *new_conf ) { char hmac_name[64U] ; enum drbd_ret_code rv ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { tmp = alloc_hash(& crypto->csums_tfm, (char *)(& new_conf->csums_alg), 144); rv = (enum drbd_ret_code )tmp; if ((unsigned int )rv != 101U) { return (rv); } else { } tmp___0 = alloc_hash(& crypto->verify_tfm, (char *)(& new_conf->verify_alg), 146); rv = (enum drbd_ret_code )tmp___0; if ((unsigned int )rv != 101U) { return (rv); } else { } tmp___1 = alloc_hash(& crypto->integrity_tfm, (char *)(& new_conf->integrity_alg), 141); rv = (enum drbd_ret_code )tmp___1; if ((unsigned int )rv != 101U) { return (rv); } else { } if ((int )((signed char )new_conf->cram_hmac_alg[0]) != 0) { snprintf((char *)(& hmac_name), 64UL, "hmac(%s)", (char *)(& new_conf->cram_hmac_alg)); tmp___2 = alloc_hash(& crypto->cram_hmac_tfm, (char *)(& hmac_name), 120); rv = (enum drbd_ret_code )tmp___2; } else { } return (rv); } } static void free_crypto(struct crypto *crypto ) { { crypto_free_hash(crypto->cram_hmac_tfm); crypto_free_hash(crypto->integrity_tfm); crypto_free_hash(crypto->csums_tfm); crypto_free_hash(crypto->verify_tfm); return; } } int drbd_adm_net_opts(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; struct drbd_tconn *tconn ; struct net_conf *old_conf ; struct net_conf *new_conf ; int err ; int ovr ; int rsr ; struct crypto crypto ; int tmp ; void *tmp___0 ; bool tmp___1 ; char const *tmp___2 ; bool tmp___3 ; int tmp___4 ; bool tmp___5 ; int tmp___6 ; int tmp___7 ; struct drbd_conf *tmp___8 ; { new_conf = 0; crypto.verify_tfm = 0; crypto.csums_tfm = 0; crypto.cram_hmac_tfm = 0; crypto.integrity_tfm = 0; tmp = drbd_adm_prepare(skb, info, 4U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } tconn = adm_ctx.tconn; tmp___0 = kzalloc(420UL, 208U); new_conf = (struct net_conf *)tmp___0; if ((unsigned long )new_conf == (unsigned long )((struct net_conf *)0)) { retcode = ERR_NOMEM; goto out; } else { } conn_reconfig_start(tconn); ldv_mutex_lock_293(& tconn->data.mutex); ldv_mutex_lock_294(& tconn->conf_update); old_conf = tconn->net_conf; if ((unsigned long )old_conf == (unsigned long )((struct net_conf *)0)) { drbd_msg_put_info("net conf missing, try connect"); retcode = ERR_INVALID_REQUEST; goto fail; } else { } *new_conf = *old_conf; tmp___1 = should_set_defaults(info); if ((int )tmp___1) { set_net_conf_defaults(new_conf); } else { } err = net_conf_from_attrs_for_change(new_conf, info); if (err != 0 && err != -42) { retcode = ERR_MANDATORY_TAG; tmp___2 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___2); goto fail; } else { } retcode = check_net_options(tconn, new_conf); if ((unsigned int )retcode != 101U) { goto fail; } else { } tmp___3 = conn_resync_running(tconn); rsr = (int )tmp___3; if (rsr != 0) { tmp___4 = strcmp((char const *)(& new_conf->csums_alg), (char const *)(& old_conf->csums_alg)); if (tmp___4 != 0) { retcode = ERR_CSUMS_RESYNC_RUNNING; goto fail; } else { } } else { } tmp___5 = conn_ov_running(tconn); ovr = (int )tmp___5; if (ovr != 0) { tmp___6 = strcmp((char const *)(& new_conf->verify_alg), (char const *)(& old_conf->verify_alg)); if (tmp___6 != 0) { retcode = ERR_VERIFY_RUNNING; goto fail; } else { } } else { } retcode = alloc_crypto(& crypto, new_conf); if ((unsigned int )retcode != 101U) { goto fail; } else { } __asm__ volatile ("": : : "memory"); tconn->net_conf = new_conf; if (rsr == 0) { crypto_free_hash(tconn->csums_tfm); tconn->csums_tfm = crypto.csums_tfm; crypto.csums_tfm = 0; } else { } if (ovr == 0) { crypto_free_hash(tconn->verify_tfm); tconn->verify_tfm = crypto.verify_tfm; crypto.verify_tfm = 0; } else { } crypto_free_hash(tconn->integrity_tfm); tconn->integrity_tfm = crypto.integrity_tfm; if ((unsigned int )tconn->cstate > 8U && tconn->agreed_pro_version > 99) { __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE); } else { } crypto_free_hash(tconn->cram_hmac_tfm); tconn->cram_hmac_tfm = crypto.cram_hmac_tfm; ldv_mutex_unlock_295(& tconn->conf_update); ldv_mutex_unlock_296(& tconn->data.mutex); synchronize_rcu(); kfree((void const *)old_conf); if ((unsigned int )tconn->cstate > 8U) { tmp___7 = conn_lowest_minor(tconn); tmp___8 = minor_to_mdev((unsigned int )tmp___7); drbd_send_sync_param(tmp___8); } else { } goto done; fail: ldv_mutex_unlock_297(& tconn->conf_update); ldv_mutex_unlock_298(& tconn->data.mutex); free_crypto(& crypto); kfree((void const *)new_conf); done: conn_reconfig_done(tconn); out: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_connect(struct sk_buff *skb , struct genl_info *info ) { struct drbd_conf *mdev ; struct net_conf *old_conf ; struct net_conf *new_conf ; struct crypto crypto ; struct drbd_tconn *tconn ; enum drbd_ret_code retcode ; int i ; int err ; int tmp ; struct list_head const *__mptr ; int tmp___0 ; void *tmp___1 ; int tmp___2 ; int tmp___3 ; void *tmp___4 ; int tmp___5 ; struct list_head const *__mptr___0 ; void *tmp___6 ; char const *tmp___7 ; size_t __len ; void *__ret ; void *tmp___9 ; size_t __len___0 ; void *__ret___0 ; void *tmp___11 ; void *tmp___12 ; void *tmp___13 ; union drbd_state val ; union drbd_state mask ; enum drbd_state_rv tmp___14 ; { new_conf = 0; crypto.verify_tfm = 0; crypto.csums_tfm = 0; crypto.cram_hmac_tfm = 0; crypto.integrity_tfm = 0; tmp = drbd_adm_prepare(skb, info, 2U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } if ((unsigned long )adm_ctx.my_addr == (unsigned long )((struct nlattr *)0) || (unsigned long )adm_ctx.peer_addr == (unsigned long )((struct nlattr *)0)) { drbd_msg_put_info("connection endpoint(s) missing"); retcode = ERR_INVALID_REQUEST; goto out; } else { } __mptr = (struct list_head const *)drbd_tconns.next; tconn = (struct drbd_tconn *)__mptr + 0xfffffffffffffff8UL; goto ldv_53590; ldv_53589: tmp___0 = nla_len((struct nlattr const *)adm_ctx.my_addr); if (tmp___0 == tconn->my_addr_len) { tmp___1 = nla_data((struct nlattr const *)adm_ctx.my_addr); tmp___2 = memcmp((void const *)tmp___1, (void const *)(& tconn->my_addr), (size_t )tconn->my_addr_len); if (tmp___2 == 0) { retcode = ERR_LOCAL_ADDR; goto out; } else { } } else { } tmp___3 = nla_len((struct nlattr const *)adm_ctx.peer_addr); if (tmp___3 == tconn->peer_addr_len) { tmp___4 = nla_data((struct nlattr const *)adm_ctx.peer_addr); tmp___5 = memcmp((void const *)tmp___4, (void const *)(& tconn->peer_addr), (size_t )tconn->peer_addr_len); if (tmp___5 == 0) { retcode = ERR_PEER_ADDR; goto out; } else { } } else { } __mptr___0 = (struct list_head const *)tconn->all_tconn.next; tconn = (struct drbd_tconn *)__mptr___0 + 0xfffffffffffffff8UL; ldv_53590: ; if ((unsigned long )(& tconn->all_tconn) != (unsigned long )(& drbd_tconns)) { goto ldv_53589; } else { } tconn = adm_ctx.tconn; conn_reconfig_start(tconn); if ((unsigned int )tconn->cstate != 0U) { retcode = ERR_NET_CONFIGURED; goto fail; } else { } tmp___6 = kzalloc(420UL, 208U); new_conf = (struct net_conf *)tmp___6; if ((unsigned long )new_conf == (unsigned long )((struct net_conf *)0)) { retcode = ERR_NOMEM; goto fail; } else { } set_net_conf_defaults(new_conf); err = net_conf_from_attrs(new_conf, info); if (err != 0 && err != -42) { retcode = ERR_MANDATORY_TAG; tmp___7 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___7); goto fail; } else { } retcode = check_net_options(tconn, new_conf); if ((unsigned int )retcode != 101U) { goto fail; } else { } retcode = alloc_crypto(& crypto, new_conf); if ((unsigned int )retcode != 101U) { goto fail; } else { } *((char *)(& new_conf->shared_secret) + 63UL) = 0; conn_flush_workqueue(tconn); ldv_mutex_lock_299(& tconn->conf_update); old_conf = tconn->net_conf; if ((unsigned long )old_conf != (unsigned long )((struct net_conf *)0)) { retcode = ERR_NET_CONFIGURED; ldv_mutex_unlock_300(& tconn->conf_update); goto fail; } else { } __asm__ volatile ("": : : "memory"); tconn->net_conf = new_conf; conn_free_crypto(tconn); tconn->cram_hmac_tfm = crypto.cram_hmac_tfm; tconn->integrity_tfm = crypto.integrity_tfm; tconn->csums_tfm = crypto.csums_tfm; tconn->verify_tfm = crypto.verify_tfm; tconn->my_addr_len = nla_len((struct nlattr const *)adm_ctx.my_addr); __len = (size_t )tconn->my_addr_len; tmp___9 = nla_data((struct nlattr const *)adm_ctx.my_addr); __ret = memcpy((void *)(& tconn->my_addr), (void const *)tmp___9, __len); tconn->peer_addr_len = nla_len((struct nlattr const *)adm_ctx.peer_addr); __len___0 = (size_t )tconn->peer_addr_len; tmp___11 = nla_data((struct nlattr const *)adm_ctx.peer_addr); __ret___0 = memcpy((void *)(& tconn->peer_addr), (void const *)tmp___11, __len___0); ldv_mutex_unlock_301(& tconn->conf_update); rcu_read_lock___6(); i = 0; tmp___12 = idr_get_next(& tconn->volumes, & i); mdev = (struct drbd_conf *)tmp___12; goto ldv_53600; ldv_53599: mdev->send_cnt = 0U; mdev->recv_cnt = 0U; i = i + 1; tmp___13 = idr_get_next(& tconn->volumes, & i); mdev = (struct drbd_conf *)tmp___13; ldv_53600: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53599; } else { } rcu_read_unlock___6(); val.i = 0U; val.ldv_40024.conn = 2U; mask.i = 0U; mask.ldv_40024.conn = 31U; tmp___14 = conn_request_state(tconn, mask, val, CS_VERBOSE); retcode = (enum drbd_ret_code )tmp___14; conn_reconfig_done(tconn); drbd_adm_finish(info, (int )retcode); return (0); fail: free_crypto(& crypto); kfree((void const *)new_conf); conn_reconfig_done(tconn); out: drbd_adm_finish(info, (int )retcode); return (0); } } static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn , bool force ) { enum drbd_state_rv rv ; union drbd_state val ; union drbd_state mask ; union drbd_state val___0 ; union drbd_state mask___0 ; union drbd_state val___1 ; union drbd_state mask___1 ; union drbd_state val___2 ; union drbd_state mask___2 ; enum drbd_state_rv rv2 ; union drbd_state val___3 ; union drbd_state mask___3 ; { val.i = 0U; val.ldv_40024.conn = 1U; mask.i = 0U; mask.ldv_40024.conn = 31U; rv = conn_request_state(tconn, mask, val, (enum chg_state_flags )force); switch ((int )rv) { case 2: ; goto ldv_53616; case -9: ; return (SS_SUCCESS); case -7: val___0.i = 0U; val___0.ldv_40024.conn = 1U; val___0.ldv_40024.pdsk = 5U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; mask___0.ldv_40024.pdsk = 15U; rv = conn_request_state(tconn, mask___0, val___0, CS_VERBOSE); goto ldv_53616; case -10: val___1.i = 0U; val___1.ldv_40024.conn = 1U; val___1.ldv_40024.disk = 5U; mask___1.i = 0U; mask___1.ldv_40024.conn = 31U; mask___1.ldv_40024.disk = 15U; rv = conn_request_state(tconn, mask___1, val___1, 0); if ((int )rv == -11 || (int )rv == -16) { val___2.i = 0U; val___2.ldv_40024.conn = 1U; mask___2.i = 0U; mask___2.ldv_40024.conn = 31U; rv = conn_request_state(tconn, mask___2, val___2, CS_HARD); } else { } goto ldv_53616; default: ; } ldv_53616: ; if ((int )rv > 0) { drbd_thread_stop(& (adm_ctx.tconn)->receiver); val___3.i = 0U; val___3.ldv_40024.conn = 0U; mask___3.i = 0U; mask___3.ldv_40024.conn = 31U; rv2 = conn_request_state(tconn, mask___3, val___3, 3); if ((int )rv2 <= 0) { printk("\vd-con %s: unexpected rv2=%d in conn_try_disconnect()\n", tconn->name, (int )rv2); } else { } } else { } return (rv); } } int drbd_adm_disconnect(struct sk_buff *skb , struct genl_info *info ) { struct disconnect_parms parms ; struct drbd_tconn *tconn ; enum drbd_state_rv rv ; enum drbd_ret_code retcode ; int err ; int tmp ; char const *tmp___0 ; { tmp = drbd_adm_prepare(skb, info, 4U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto fail; } else { } tconn = adm_ctx.tconn; memset((void *)(& parms), 0, 1UL); if ((unsigned long )*(info->attrs + 12UL) != (unsigned long )((struct nlattr *)0)) { err = disconnect_parms_from_attrs(& parms, info); if (err != 0) { retcode = ERR_MANDATORY_TAG; tmp___0 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___0); goto fail; } else { } } else { } rv = conn_try_disconnect(tconn, (int )((signed char )parms.force_disconnect) != 0); if ((int )rv <= 0) { retcode = (enum drbd_ret_code )rv; } else { retcode = NO_ERROR; } fail: drbd_adm_finish(info, (int )retcode); return (0); } } void resync_after_online_grow(struct drbd_conf *mdev ) { int iass ; union drbd_state val ; union drbd_state mask ; { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Resync of new storage after online grow\n"); if ((int )mdev->state.ldv_49522.role != (int )mdev->state.ldv_49522.peer) { iass = (unsigned int )*((unsigned char *)mdev + 748UL) == 1U; } else { iass = constant_test_bit(1U, (unsigned long const volatile *)(& (mdev->tconn)->flags)); } if (iass != 0) { drbd_start_resync(mdev, C_SYNC_SOURCE); } else { val.i = 0U; val.ldv_40024.conn = 15U; mask.i = 0U; mask.ldv_40024.conn = 31U; _drbd_request_state(mdev, mask, val, 10); } return; } } int drbd_adm_resize(struct sk_buff *skb , struct genl_info *info ) { struct disk_conf *old_disk_conf ; struct disk_conf *new_disk_conf ; struct resize_parms rs ; struct drbd_conf *mdev ; enum drbd_ret_code retcode ; enum determine_dev_size dd ; enum dds_flags ddsf ; sector_t u_size ; int err ; int tmp ; char const *tmp___0 ; int tmp___1 ; struct disk_conf *_________p1 ; bool __warned ; int tmp___2 ; int tmp___3 ; void *tmp___4 ; sector_t tmp___5 ; { new_disk_conf = 0; tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto fail; } else { } memset((void *)(& rs), 0, 16UL); if ((unsigned long )*(info->attrs + 7UL) != (unsigned long )((struct nlattr *)0)) { err = resize_parms_from_attrs(& rs, info); if (err != 0) { retcode = ERR_MANDATORY_TAG; tmp___0 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___0); goto fail; } else { } } else { } mdev = adm_ctx.mdev; if ((int )mdev->state.ldv_49522.conn > 10) { retcode = ERR_RESIZE_RESYNC; goto fail; } else { } if ((unsigned int )*((unsigned char *)mdev + 748UL) == 2U && (unsigned int )*((unsigned char *)mdev + 748UL) == 8U) { retcode = ERR_NO_PRIMARY; goto fail; } else { } tmp___1 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___1 == 0) { retcode = ERR_NO_DISK; goto fail; } else { } if ((int )((signed char )rs.no_resync) != 0 && (mdev->tconn)->agreed_pro_version <= 92) { retcode = ERR_NEED_APV_93; goto fail_ldev; } else { } rcu_read_lock___6(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 2384, "suspicious rcu_dereference_check() usage"); } else { } } else { } u_size = (sector_t )_________p1->disk_size; rcu_read_unlock___6(); if ((unsigned long )rs.resize_size != u_size) { tmp___4 = kmalloc(344UL, 208U); new_disk_conf = (struct disk_conf *)tmp___4; if ((unsigned long )new_disk_conf == (unsigned long )((struct disk_conf *)0)) { retcode = ERR_NOMEM; goto fail_ldev; } else { } } else { } tmp___5 = drbd_get_capacity((mdev->ldev)->backing_bdev); if ((mdev->ldev)->known_size != tmp___5) { (mdev->ldev)->known_size = drbd_get_capacity((mdev->ldev)->backing_bdev); } else { } if ((unsigned long )new_disk_conf != (unsigned long )((struct disk_conf *)0)) { ldv_mutex_lock_302(& (mdev->tconn)->conf_update); old_disk_conf = (mdev->ldev)->disk_conf; *new_disk_conf = *old_disk_conf; new_disk_conf->disk_size = rs.resize_size; __asm__ volatile ("": : : "memory"); (mdev->ldev)->disk_conf = new_disk_conf; ldv_mutex_unlock_303(& (mdev->tconn)->conf_update); synchronize_rcu(); kfree((void const *)old_disk_conf); } else { } ddsf = (enum dds_flags )(((int )((signed char )rs.resize_force) != 0) | ((int )((signed char )rs.no_resync) != 0 ? 2 : 0)); dd = drbd_determine_dev_size(mdev, ddsf); drbd_md_sync(mdev); put_ldev(mdev); if ((int )dd == -1) { retcode = ERR_NOMEM_BITMAP; goto fail; } else { } if ((unsigned int )*((unsigned short *)mdev + 374UL) == 160U) { if ((int )dd == 2) { set_bit(16U, (unsigned long volatile *)(& mdev->flags)); } else { } drbd_send_uuids(mdev); drbd_send_sizes(mdev, 1, ddsf); } else { } fail: drbd_adm_finish(info, (int )retcode); return (0); fail_ldev: put_ldev(mdev); goto fail; } } int drbd_adm_resource_opts(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; struct drbd_tconn *tconn ; struct res_opts res_opts ; int err ; int tmp ; bool tmp___0 ; char const *tmp___1 ; { tmp = drbd_adm_prepare(skb, info, 2U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto fail; } else { } tconn = adm_ctx.tconn; res_opts = tconn->res_opts; tmp___0 = should_set_defaults(info); if ((int )tmp___0) { set_res_opts_defaults(& res_opts); } else { } err = res_opts_from_attrs(& res_opts, info); if (err != 0 && err != -42) { retcode = ERR_MANDATORY_TAG; tmp___1 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___1); goto fail; } else { } err = set_resource_options(tconn, & res_opts); if (err != 0) { retcode = ERR_INVALID_REQUEST; if (err == -12) { retcode = ERR_NOMEM; } else { } } else { } fail: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_invalidate(struct sk_buff *skb , struct genl_info *info ) { struct drbd_conf *mdev ; int retcode ; int tmp ; wait_queue_t __wait ; struct task_struct *tmp___0 ; int tmp___1 ; union drbd_state val ; union drbd_state mask ; enum drbd_state_rv tmp___2 ; union drbd_state val___0 ; union drbd_state mask___0 ; union drbd_state __ns ; enum drbd_state_rv tmp___3 ; union drbd_state val___1 ; union drbd_state mask___1 ; { retcode = drbd_adm_prepare(skb, info, 1U); if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return (retcode); } else { } if (retcode != 101) { goto out; } else { } mdev = adm_ctx.mdev; drbd_suspend_io(mdev); tmp = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp == 0) { goto ldv_53690; } else { } tmp___0 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___0; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53693: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___1 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___1 == 0) { goto ldv_53692; } else { } schedule(); goto ldv_53693; ldv_53692: finish_wait(& mdev->misc_wait, & __wait); ldv_53690: drbd_flush_workqueue(mdev); val.i = 0U; val.ldv_40024.conn = 12U; mask.i = 0U; mask.ldv_40024.conn = 31U; tmp___2 = _drbd_request_state(mdev, mask, val, CS_ORDERED); retcode = (int )tmp___2; if (retcode <= 0 && retcode != -15) { val___0.i = 0U; val___0.ldv_40024.conn = 12U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; retcode = drbd_request_state(mdev, mask___0, val___0); } else { } goto ldv_53710; ldv_53709: spin_lock_irq(& (mdev->tconn)->req_lock); if ((int )mdev->state.ldv_49522.conn <= 9) { __ns = drbd_read_state(mdev); __ns.ldv_40024.disk = 4U; tmp___3 = _drbd_set_state(mdev, __ns, CS_VERBOSE, 0); retcode = (int )tmp___3; } else { } spin_unlock_irq(& (mdev->tconn)->req_lock); if (retcode != -15) { goto ldv_53704; } else { } val___1.i = 0U; val___1.ldv_40024.conn = 12U; mask___1.i = 0U; mask___1.ldv_40024.conn = 31U; retcode = drbd_request_state(mdev, mask___1, val___1); ldv_53710: ; if (retcode == -15) { goto ldv_53709; } else { } ldv_53704: drbd_resume_io(mdev); out: drbd_adm_finish(info, retcode); return (0); } } static int drbd_adm_simple_request_state(struct sk_buff *skb , struct genl_info *info , union drbd_state mask , union drbd_state val ) { enum drbd_ret_code retcode ; int tmp ; int tmp___0 ; { tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } tmp___0 = drbd_request_state(adm_ctx.mdev, mask, val); retcode = (enum drbd_ret_code )tmp___0; out: drbd_adm_finish(info, (int )retcode); return (0); } } static int drbd_bmio_set_susp_al(struct drbd_conf *mdev ) { int rv ; { rv = drbd_bmio_set_n_write(mdev); drbd_suspend_al(mdev); return (rv); } } int drbd_adm_invalidate_peer(struct sk_buff *skb , struct genl_info *info ) { int retcode ; struct drbd_conf *mdev ; int tmp ; wait_queue_t __wait ; struct task_struct *tmp___0 ; int tmp___1 ; union drbd_state val ; union drbd_state mask ; enum drbd_state_rv tmp___2 ; union drbd_state val___0 ; union drbd_state mask___0 ; int tmp___3 ; union drbd_state val___1 ; union drbd_state mask___1 ; { retcode = drbd_adm_prepare(skb, info, 1U); if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return (retcode); } else { } if (retcode != 101) { goto out; } else { } mdev = adm_ctx.mdev; drbd_suspend_io(mdev); tmp = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp == 0) { goto ldv_53730; } else { } tmp___0 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___0; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53733: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___1 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___1 == 0) { goto ldv_53732; } else { } schedule(); goto ldv_53733; ldv_53732: finish_wait(& mdev->misc_wait, & __wait); ldv_53730: drbd_flush_workqueue(mdev); val.i = 0U; val.ldv_40024.conn = 11U; mask.i = 0U; mask.ldv_40024.conn = 31U; tmp___2 = _drbd_request_state(mdev, mask, val, CS_ORDERED); retcode = (int )tmp___2; if (retcode <= 0) { if (retcode == -15 && (unsigned int )*((unsigned char *)mdev + 748UL) == 1U) { val___0.i = 0U; val___0.ldv_40024.pdsk = 4U; mask___0.i = 0U; mask___0.ldv_40024.pdsk = 15U; retcode = drbd_request_state(mdev, mask___0, val___0); if (retcode > 0) { tmp___3 = drbd_bitmap_io(mdev, & drbd_bmio_set_susp_al, (char *)"set_n_write from invalidate_peer", BM_LOCKED_SET_ALLOWED); if (tmp___3 != 0) { retcode = 118; } else { } } else { } } else { val___1.i = 0U; val___1.ldv_40024.conn = 11U; mask___1.i = 0U; mask___1.ldv_40024.conn = 31U; retcode = drbd_request_state(mdev, mask___1, val___1); } } else { } drbd_resume_io(mdev); out: drbd_adm_finish(info, retcode); return (0); } } int drbd_adm_pause_sync(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; int tmp ; union drbd_state val ; union drbd_state mask ; int tmp___0 ; { tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } val.i = 0U; val.ldv_40024.user_isp = 1U; mask.i = 0U; mask.ldv_40024.user_isp = 1U; tmp___0 = drbd_request_state(adm_ctx.mdev, mask, val); if (tmp___0 == 2) { retcode = ERR_PAUSE_IS_SET; } else { } out: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_resume_sync(struct sk_buff *skb , struct genl_info *info ) { union drbd_dev_state s ; enum drbd_ret_code retcode ; int tmp ; union drbd_state val ; union drbd_state mask ; int tmp___0 ; { tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } val.i = 0U; val.ldv_40024.user_isp = 0U; mask.i = 0U; mask.ldv_40024.user_isp = 1U; tmp___0 = drbd_request_state(adm_ctx.mdev, mask, val); if (tmp___0 == 2) { s = (adm_ctx.mdev)->state; if ((unsigned int )*((unsigned short *)(& s) + 0UL) == 320U || (unsigned int )*((unsigned short *)(& s) + 0UL) == 336U) { retcode = (unsigned int )*((unsigned char *)(& s) + 2UL) == 0U ? ((unsigned int )*((unsigned char *)(& s) + 2UL) != 0U ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR) : ERR_PIC_AFTER_DEP; } else { retcode = ERR_PAUSE_IS_CLEAR; } } else { } out: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_suspend_io(struct sk_buff *skb , struct genl_info *info ) { union drbd_state val ; union drbd_state mask ; int tmp ; { val.i = 0U; val.ldv_40024.susp = 1U; mask.i = 0U; mask.ldv_40024.susp = 1U; tmp = drbd_adm_simple_request_state(skb, info, mask, val); return (tmp); } } int drbd_adm_resume_io(struct sk_buff *skb , struct genl_info *info ) { struct drbd_conf *mdev ; int retcode ; int tmp ; union drbd_state val ; union drbd_state mask ; { retcode = drbd_adm_prepare(skb, info, 1U); if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return (retcode); } else { } if (retcode != 101) { goto out; } else { } mdev = adm_ctx.mdev; tmp = constant_test_bit(17U, (unsigned long const volatile *)(& mdev->flags)); if (tmp != 0) { drbd_uuid_new_current(mdev); clear_bit(17, (unsigned long volatile *)(& mdev->flags)); } else { } drbd_suspend_io(mdev); val.i = 0U; val.ldv_40024.susp = 0U; val.ldv_40024.susp_nod = 0U; val.ldv_40024.susp_fen = 0U; mask.i = 0U; mask.ldv_40024.susp = 1U; mask.ldv_40024.susp_nod = 1U; mask.ldv_40024.susp_fen = 1U; retcode = drbd_request_state(mdev, mask, val); if (retcode == 1) { if ((int )mdev->state.ldv_49522.conn <= 9) { tl_clear(mdev->tconn); } else { } if ((unsigned int )*((unsigned char *)mdev + 749UL) == 0U || (unsigned int )*((unsigned char *)mdev + 749UL) == 4U) { tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO); } else { } } else { } drbd_resume_io(mdev); out: drbd_adm_finish(info, retcode); return (0); } } int drbd_adm_outdate(struct sk_buff *skb , struct genl_info *info ) { union drbd_state val ; union drbd_state mask ; int tmp ; { val.i = 0U; val.ldv_40024.disk = 5U; mask.i = 0U; mask.ldv_40024.disk = 15U; tmp = drbd_adm_simple_request_state(skb, info, mask, val); return (tmp); } } int nla_put_drbd_cfg_context(struct sk_buff *skb , struct drbd_tconn *tconn , unsigned int vnr ) { struct nlattr *nla ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { nla = nla_nest_start(skb, 2); if ((unsigned long )nla == (unsigned long )((struct nlattr *)0)) { goto nla_put_failure; } else { } if (vnr != 4294967295U) { tmp = nla_put_u32(skb, 16385, vnr); if (tmp != 0) { goto nla_put_failure; } else { } } else { } tmp___0 = nla_put_string(skb, 16386, (char const *)tconn->name); if (tmp___0 != 0) { goto nla_put_failure; } else { } if (tconn->my_addr_len != 0) { tmp___1 = nla_put(skb, 16387, tconn->my_addr_len, (void const *)(& tconn->my_addr)); if (tmp___1 != 0) { goto nla_put_failure; } else { } } else { } if (tconn->peer_addr_len != 0) { tmp___2 = nla_put(skb, 16388, tconn->peer_addr_len, (void const *)(& tconn->peer_addr)); if (tmp___2 != 0) { goto nla_put_failure; } else { } } else { } nla_nest_end(skb, nla); return (0); nla_put_failure: ; if ((unsigned long )nla != (unsigned long )((struct nlattr *)0)) { nla_nest_cancel(skb, nla); } else { } return (-90); } } int nla_put_status_info(struct sk_buff *skb , struct drbd_conf *mdev , struct sib_info const *sib ) { struct state_info *si ; struct net_conf *nc ; struct nlattr *nla ; int got_ldev ; int err ; int exclude_sensitive ; bool tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; int tmp___3 ; struct disk_conf *_________p1 ; bool __warned ; int tmp___4 ; int tmp___5 ; int tmp___6 ; struct net_conf *_________p1___0 ; bool __warned___0 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; int tmp___10 ; int tmp___11 ; sector_t tmp___12 ; int tmp___13 ; int tmp___14 ; int tmp___15 ; int tmp___16 ; int tmp___17 ; int tmp___18 ; int tmp___19 ; int tmp___20 ; int tmp___21 ; int tmp___22 ; int tmp___23 ; int tmp___24 ; int tmp___25 ; int err___0 ; int tmp___26 ; unsigned long tmp___27 ; int tmp___28 ; unsigned long tmp___29 ; int tmp___30 ; int tmp___31 ; int tmp___32 ; int tmp___33 ; int tmp___34 ; int tmp___35 ; int tmp___36 ; { si = 0; err = 0; if ((unsigned long )sib != (unsigned long )((struct sib_info const *)0)) { tmp___1 = 1; } else { tmp = capable(21); if (tmp) { tmp___0 = 0; } else { tmp___0 = 1; } if (tmp___0) { tmp___1 = 1; } else { tmp___1 = 0; } } exclude_sensitive = tmp___1; got_ldev = _get_ldev_if_state(mdev, D_INCONSISTENT); tmp___2 = nla_put_drbd_cfg_context(skb, mdev->tconn, (unsigned int )mdev->vnr); if (tmp___2 != 0) { goto nla_put_failure; } else { } tmp___3 = res_opts_to_skb(skb, & (mdev->tconn)->res_opts, exclude_sensitive != 0); if (tmp___3 != 0) { goto nla_put_failure; } else { } rcu_read_lock___6(); if (got_ldev != 0) { _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___4 = debug_lockdep_rcu_enabled(); if (tmp___4 != 0 && ! __warned) { tmp___5 = rcu_read_lock_held(); if (tmp___5 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 2727, "suspicious rcu_dereference_check() usage"); } else { } } else { } tmp___6 = disk_conf_to_skb(skb, _________p1, exclude_sensitive != 0); if (tmp___6 != 0) { goto nla_put_failure; } else { } } else { } _________p1___0 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___7 = debug_lockdep_rcu_enabled(); if (tmp___7 != 0 && ! __warned___0) { tmp___8 = rcu_read_lock_held(); if (tmp___8 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 2730, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1___0; if ((unsigned long )nc != (unsigned long )((struct net_conf *)0)) { err = net_conf_to_skb(skb, nc, exclude_sensitive != 0); } else { } rcu_read_unlock___6(); if (err != 0) { goto nla_put_failure; } else { } nla = nla_nest_start(skb, 8); if ((unsigned long )nla == (unsigned long )((struct nlattr *)0)) { goto nla_put_failure; } else { } tmp___9 = nla_put_u32(skb, 16385, (unsigned long )sib != (unsigned long )((struct sib_info const *)0) ? (unsigned int )sib->sib_reason : 1U); if (tmp___9 != 0) { goto nla_put_failure; } else { tmp___10 = nla_put_u32(skb, 2, mdev->state.i); if (tmp___10 != 0) { goto nla_put_failure; } else { tmp___11 = nla_put_u64(skb, 16388, mdev->ed_uuid); if (tmp___11 != 0) { goto nla_put_failure; } else { tmp___12 = drbd_get_capacity(mdev->this_bdev); tmp___13 = nla_put_u64(skb, 16387, (u64 )tmp___12); if (tmp___13 != 0) { goto nla_put_failure; } else { tmp___14 = nla_put_u64(skb, 15, (u64 )mdev->send_cnt); if (tmp___14 != 0) { goto nla_put_failure; } else { tmp___15 = nla_put_u64(skb, 16, (u64 )mdev->recv_cnt); if (tmp___15 != 0) { goto nla_put_failure; } else { tmp___16 = nla_put_u64(skb, 17, (u64 )mdev->read_cnt); if (tmp___16 != 0) { goto nla_put_failure; } else { tmp___17 = nla_put_u64(skb, 18, (u64 )mdev->writ_cnt); if (tmp___17 != 0) { goto nla_put_failure; } else { tmp___18 = nla_put_u64(skb, 19, (u64 )mdev->al_writ_cnt); if (tmp___18 != 0) { goto nla_put_failure; } else { tmp___19 = nla_put_u64(skb, 20, (u64 )mdev->bm_writ_cnt); if (tmp___19 != 0) { goto nla_put_failure; } else { tmp___20 = atomic_read((atomic_t const *)(& mdev->ap_bio_cnt)); tmp___21 = nla_put_u32(skb, 21, (u32 )tmp___20); if (tmp___21 != 0) { goto nla_put_failure; } else { tmp___22 = atomic_read((atomic_t const *)(& mdev->ap_pending_cnt)); tmp___23 = nla_put_u32(skb, 22, (u32 )tmp___22); if (tmp___23 != 0) { goto nla_put_failure; } else { tmp___24 = atomic_read((atomic_t const *)(& mdev->rs_pending_cnt)); tmp___25 = nla_put_u32(skb, 23, (u32 )tmp___24); if (tmp___25 != 0) { goto nla_put_failure; } else { } } } } } } } } } } } } } if (got_ldev != 0) { spin_lock_irq(& (mdev->ldev)->md.uuid_lock); err___0 = nla_put(skb, 16391, 32, (void const *)(& (mdev->ldev)->md.uuid)); spin_unlock_irq(& (mdev->ldev)->md.uuid_lock); if (err___0 != 0) { goto nla_put_failure; } else { } tmp___26 = nla_put_u32(skb, 16392, (mdev->ldev)->md.flags); if (tmp___26 != 0) { goto nla_put_failure; } else { tmp___27 = drbd_bm_bits(mdev); tmp___28 = nla_put_u64(skb, 16393, (u64 )tmp___27); if (tmp___28 != 0) { goto nla_put_failure; } else { tmp___29 = drbd_bm_total_weight(mdev); tmp___30 = nla_put_u64(skb, 16394, (u64 )tmp___29); if (tmp___30 != 0) { goto nla_put_failure; } else { } } } if ((int )mdev->state.ldv_49522.conn > 15 && (int )mdev->state.ldv_49522.conn <= 21) { tmp___31 = nla_put_u64(skb, 16395, (u64 )mdev->rs_total); if (tmp___31 != 0) { goto nla_put_failure; } else { tmp___32 = nla_put_u64(skb, 16396, (u64 )mdev->rs_failed); if (tmp___32 != 0) { goto nla_put_failure; } else { } } } else { } } else { } if ((unsigned long )sib != (unsigned long )((struct sib_info const *)0)) { switch ((unsigned int )sib->sib_reason) { case 5U: ; case 1U: ; goto ldv_53822; case 2U: tmp___33 = nla_put_u32(skb, 16389, sib->ldv_50742.ldv_50741.os.i); if (tmp___33 != 0) { goto nla_put_failure; } else { tmp___34 = nla_put_u32(skb, 16390, sib->ldv_50742.ldv_50741.ns.i); if (tmp___34 != 0) { goto nla_put_failure; } else { } } goto ldv_53822; case 4U: tmp___35 = nla_put_u32(skb, 16398, sib->ldv_50742.ldv_50737.helper_exit_code); if (tmp___35 != 0) { goto nla_put_failure; } else { } case 3U: tmp___36 = nla_put_string(skb, 16397, (char const *)sib->ldv_50742.ldv_50737.helper_name); if (tmp___36 != 0) { goto nla_put_failure; } else { } goto ldv_53822; } ldv_53822: ; } else { } nla_nest_end(skb, nla); if (0) { nla_put_failure: err = -90; } else { } if (got_ldev != 0) { put_ldev(mdev); } else { } return (err); } } int drbd_adm_get_status(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; int err ; int tmp ; { tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, 0); if (err != 0) { nlmsg_free(adm_ctx.reply_skb); return (err); } else { } out: drbd_adm_finish(info, (int )retcode); return (0); } } int get_one_status(struct sk_buff *skb , struct netlink_callback *cb ) { struct drbd_conf *mdev ; struct drbd_genlmsghdr *dh ; struct drbd_tconn *pos ; struct drbd_tconn *tconn ; struct drbd_tconn *tmp ; unsigned int volume ; struct list_head *__ptr ; struct list_head const *__mptr ; struct list_head *_________p1 ; bool __warned ; int tmp___0 ; struct list_head *__ptr___0 ; struct list_head const *__mptr___0 ; struct list_head *_________p1___0 ; bool __warned___0 ; int tmp___1 ; void *tmp___2 ; struct list_head *__ptr___1 ; struct list_head const *__mptr___1 ; struct list_head *_________p1___1 ; bool __warned___1 ; int tmp___3 ; void *tmp___4 ; struct net_conf *nc ; int tmp___5 ; struct net_conf *_________p1___2 ; bool __warned___2 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; int tmp___9 ; { pos = (struct drbd_tconn *)cb->args[0]; tconn = 0; volume = (unsigned int )cb->args[1]; rcu_read_lock___6(); __ptr = drbd_tconns.next; _________p1 = *((struct list_head * volatile *)(& __ptr)); tmp___0 = debug_lockdep_rcu_enabled(); if (tmp___0 != 0 && ! __warned) { rcu_read_lock_held(); } else { } __mptr = (struct list_head const *)_________p1; tmp = (struct drbd_tconn *)__mptr + 0xfffffffffffffff8UL; goto ldv_53859; ldv_53858: ; if ((unsigned long )pos == (unsigned long )((struct drbd_tconn *)0)) { pos = tmp; tconn = pos; goto ldv_53857; } else { } if ((unsigned long )tmp == (unsigned long )pos) { tconn = pos; goto ldv_53857; } else { } __ptr___0 = tmp->all_tconn.next; _________p1___0 = *((struct list_head * volatile *)(& __ptr___0)); tmp___1 = debug_lockdep_rcu_enabled(); if (tmp___1 != 0 && ! __warned___0) { rcu_read_lock_held(); } else { } __mptr___0 = (struct list_head const *)_________p1___0; tmp = (struct drbd_tconn *)__mptr___0 + 0xfffffffffffffff8UL; ldv_53859: ; if ((unsigned long )(& tmp->all_tconn) != (unsigned long )(& drbd_tconns)) { goto ldv_53858; } else { } ldv_53857: ; if ((unsigned long )tconn != (unsigned long )((struct drbd_tconn *)0)) { next_tconn: tmp___2 = idr_get_next(& tconn->volumes, (int *)(& volume)); mdev = (struct drbd_conf *)tmp___2; if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { __ptr___1 = tconn->all_tconn.next; _________p1___1 = *((struct list_head * volatile *)(& __ptr___1)); tmp___3 = debug_lockdep_rcu_enabled(); if (tmp___3 != 0 && ! __warned___1) { rcu_read_lock_held(); } else { } __mptr___1 = (struct list_head const *)_________p1___1; pos = (struct drbd_tconn *)__mptr___1 + 0xfffffffffffffff8UL; if (volume != 0U) { if ((unsigned long )(& pos->all_tconn) == (unsigned long )(& drbd_tconns) || cb->args[2] != 0L) { goto out; } else { } volume = 0U; tconn = pos; goto next_tconn; } else { } } else { } tmp___4 = genlmsg_put(skb, ((struct netlink_skb_parms *)(& (cb->skb)->cb))->portid, (cb->nlh)->nlmsg_seq, & drbd_genl_family, 2, 2); dh = (struct drbd_genlmsghdr *)tmp___4; if ((unsigned long )dh == (unsigned long )((struct drbd_genlmsghdr *)0)) { goto out; } else { } if ((unsigned long )mdev == (unsigned long )((struct drbd_conf *)0)) { dh->minor = 4294967295U; dh->ldv_49826.ret_code = 101; tmp___5 = nla_put_drbd_cfg_context(skb, tconn, 4294967295U); if (tmp___5 != 0) { goto cancel; } else { } _________p1___2 = *((struct net_conf * volatile *)(& tconn->net_conf)); tmp___6 = debug_lockdep_rcu_enabled(); if (tmp___6 != 0 && ! __warned___2) { tmp___7 = rcu_read_lock_held(); if (tmp___7 == 0 && 1) { __warned___2 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 2909, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1___2; if ((unsigned long )nc != (unsigned long )((struct net_conf *)0)) { tmp___8 = net_conf_to_skb(skb, nc, 1); if (tmp___8 != 0) { goto cancel; } else { } } else { } goto done; } else { } if ((unsigned int )mdev->vnr != volume) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->vnr == volume ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 2915); } else { } if ((unsigned long )mdev->tconn != (unsigned long )tconn) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( mdev->tconn == tconn ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_nl.c.prepared", 2916); } else { } dh->minor = mdev_to_minor(mdev); dh->ldv_49826.ret_code = 101; tmp___9 = nla_put_status_info(skb, mdev, 0); if (tmp___9 != 0) { cancel: genlmsg_cancel(skb, (void *)dh); goto out; } else { } done: genlmsg_end(skb, (void *)dh); } else { } out: rcu_read_unlock___6(); cb->args[0] = (long )pos; cb->args[1] = (unsigned long )pos == (unsigned long )tconn ? (long )(volume + 1U) : 0L; return ((int )skb->len); } } int drbd_adm_get_status_all(struct sk_buff *skb , struct netlink_callback *cb ) { unsigned int hdrlen ; struct nlattr *nla ; char const *resource_name ; struct drbd_tconn *tconn ; int maxtype ; int tmp ; struct nlattr *tmp___0 ; long tmp___1 ; long tmp___2 ; void *tmp___3 ; int tmp___4 ; { hdrlen = 12U; if (cb->args[0] != 0L) { if (cb->args[2] != 0L && cb->args[2] != cb->args[0]) { return (0); } else { } goto dump; } else { } tmp = nlmsg_attrlen(cb->nlh, (int )hdrlen); tmp___0 = nlmsg_attrdata(cb->nlh, (int )hdrlen); nla = nla_find((struct nlattr const *)tmp___0, tmp, 2); if ((unsigned long )nla == (unsigned long )((struct nlattr *)0)) { goto dump; } else { } maxtype = 4; nla = drbd_nla_find_nested(maxtype, nla, 2); tmp___2 = IS_ERR((void const *)nla); if (tmp___2 != 0L) { tmp___1 = PTR_ERR((void const *)nla); return ((int )tmp___1); } else { } if ((unsigned long )nla == (unsigned long )((struct nlattr *)0)) { return (-22); } else { } tmp___3 = nla_data((struct nlattr const *)nla); resource_name = (char const *)tmp___3; tconn = conn_get_by_name(resource_name); if ((unsigned long )tconn == (unsigned long )((struct drbd_tconn *)0)) { return (-19); } else { } kref_put(& tconn->kref, & conn_destroy); cb->args[0] = (long )tconn; cb->args[2] = (long )tconn; dump: tmp___4 = get_one_status(skb, cb); return (tmp___4); } } int drbd_adm_get_timeout_type(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; struct timeout_parms tp ; int err ; int tmp ; int tmp___0 ; { tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } if (*((unsigned int *)adm_ctx.mdev + 187UL) != 40960U) { tmp___0 = constant_test_bit(2U, (unsigned long const volatile *)(& (adm_ctx.mdev)->flags)); tp.timeout_type = tmp___0 != 0; } else { tp.timeout_type = 2U; } err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, & tp); if (err != 0) { nlmsg_free(adm_ctx.reply_skb); return (err); } else { } out: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_start_ov(struct sk_buff *skb , struct genl_info *info ) { struct drbd_conf *mdev ; enum drbd_ret_code retcode ; struct start_ov_parms parms ; int tmp ; int err ; int tmp___0 ; char const *tmp___1 ; int tmp___2 ; wait_queue_t __wait ; struct task_struct *tmp___3 ; int tmp___4 ; union drbd_state val ; union drbd_state mask ; int tmp___5 ; { tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } mdev = adm_ctx.mdev; parms.ov_start_sector = (__u64 )mdev->ov_start_sector; parms.ov_stop_sector = 0xffffffffffffffffULL; if ((unsigned long )*(info->attrs + 9UL) != (unsigned long )((struct nlattr *)0)) { tmp___0 = start_ov_parms_from_attrs(& parms, info); err = tmp___0; if (err != 0) { retcode = ERR_MANDATORY_TAG; tmp___1 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___1); goto out; } else { } } else { } mdev->ov_start_sector = (sector_t )parms.ov_start_sector & 0xfffffffffffffff8UL; mdev->ov_stop_sector = (sector_t )parms.ov_stop_sector; drbd_suspend_io(mdev); tmp___2 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___2 == 0) { goto ldv_53904; } else { } tmp___3 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___3; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_53907: prepare_to_wait(& mdev->misc_wait, & __wait, 2); tmp___4 = constant_test_bit(9U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___4 == 0) { goto ldv_53906; } else { } schedule(); goto ldv_53907; ldv_53906: finish_wait(& mdev->misc_wait, & __wait); ldv_53904: val.i = 0U; val.ldv_40024.conn = 18U; mask.i = 0U; mask.ldv_40024.conn = 31U; tmp___5 = drbd_request_state(mdev, mask, val); retcode = (enum drbd_ret_code )tmp___5; drbd_resume_io(mdev); out: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_new_c_uuid(struct sk_buff *skb , struct genl_info *info ) { struct drbd_conf *mdev ; enum drbd_ret_code retcode ; int skip_initial_sync ; int err ; struct new_c_uuid_parms args ; int tmp ; char const *tmp___0 ; int tmp___1 ; union drbd_state __ns ; { skip_initial_sync = 0; tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out_nolock; } else { } mdev = adm_ctx.mdev; memset((void *)(& args), 0, 1UL); if ((unsigned long )*(info->attrs + 10UL) != (unsigned long )((struct nlattr *)0)) { err = new_c_uuid_parms_from_attrs(& args, info); if (err != 0) { retcode = ERR_MANDATORY_TAG; tmp___0 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___0); goto out_nolock; } else { } } else { } ldv_mutex_lock_304(mdev->state_mutex); tmp___1 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___1 == 0) { retcode = ERR_NO_DISK; goto out; } else { } if ((((unsigned int )*((unsigned short *)mdev + 374UL) == 160U && (mdev->tconn)->agreed_pro_version > 89) && (mdev->ldev)->md.uuid[0] == 4ULL) && (int )((signed char )args.clear_bm) != 0) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Preparing to skip initial sync\n"); skip_initial_sync = 1; } else if ((unsigned int )*((unsigned short *)mdev + 374UL) != 0U) { retcode = ERR_CONNECTED; goto out_dec; } else { } drbd_uuid_set(mdev, 1, 0ULL); drbd_uuid_new_current(mdev); if ((int )((signed char )args.clear_bm) != 0) { err = drbd_bitmap_io(mdev, & drbd_bmio_clear_n_write, (char *)"clear_n_write from new_c_uuid", BM_LOCKED_MASK); if (err != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Writing bitmap failed with %d\n", err); retcode = ERR_IO_MD_DISK; } else { } if (skip_initial_sync != 0) { drbd_send_uuids_skip_initial_sync(mdev); _drbd_uuid_set(mdev, 1, 0ULL); drbd_print_uuids(mdev, "cleared bitmap UUID"); spin_lock_irq(& (mdev->tconn)->req_lock); __ns = drbd_read_state(mdev); __ns.ldv_40024.disk = 8U; __ns.ldv_40024.pdsk = 8U; _drbd_set_state(mdev, __ns, CS_VERBOSE, 0); spin_unlock_irq(& (mdev->tconn)->req_lock); } else { } } else { } drbd_md_sync(mdev); out_dec: put_ldev(mdev); out: ldv_mutex_unlock_305(mdev->state_mutex); out_nolock: drbd_adm_finish(info, (int )retcode); return (0); } } static enum drbd_ret_code drbd_check_resource_name(char const *name ) { char *tmp ; { if ((unsigned long )name == (unsigned long )((char const *)0) || (int )((signed char )*name) == 0) { drbd_msg_put_info("resource name missing"); return (ERR_MANDATORY_TAG); } else { } tmp = strchr(name, 47); if ((unsigned long )tmp != (unsigned long )((char *)0)) { drbd_msg_put_info("invalid resource name"); return (ERR_INVALID_REQUEST); } else { } return (NO_ERROR); } } int drbd_adm_new_resource(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; struct res_opts res_opts ; int err ; int tmp ; char const *tmp___0 ; struct drbd_tconn *tmp___1 ; { tmp = drbd_adm_prepare(skb, info, 0U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } set_res_opts_defaults(& res_opts); err = res_opts_from_attrs(& res_opts, info); if (err != 0 && err != -42) { retcode = ERR_MANDATORY_TAG; tmp___0 = from_attrs_err_to_txt(err); drbd_msg_put_info(tmp___0); goto out; } else { } retcode = drbd_check_resource_name((char const *)adm_ctx.resource_name); if ((unsigned int )retcode != 101U) { goto out; } else { } if ((unsigned long )adm_ctx.tconn != (unsigned long )((struct drbd_tconn *)0)) { if (((int )(info->nlhdr)->nlmsg_flags & 512) != 0) { retcode = ERR_INVALID_REQUEST; drbd_msg_put_info("resource exists"); } else { } goto out; } else { } tmp___1 = conn_create((char const *)adm_ctx.resource_name, & res_opts); if ((unsigned long )tmp___1 == (unsigned long )((struct drbd_tconn *)0)) { retcode = ERR_NOMEM; } else { } out: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_add_minor(struct sk_buff *skb , struct genl_info *info ) { struct drbd_genlmsghdr *dh ; enum drbd_ret_code retcode ; int tmp ; { dh = (struct drbd_genlmsghdr *)info->userhdr; tmp = drbd_adm_prepare(skb, info, 2U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } if (dh->minor > 1048575U) { drbd_msg_put_info("requested minor out of range"); retcode = ERR_INVALID_REQUEST; goto out; } else { } if (adm_ctx.volume > 65535U) { drbd_msg_put_info("requested volume id out of range"); retcode = ERR_INVALID_REQUEST; goto out; } else { } if ((unsigned long )adm_ctx.mdev != (unsigned long )((struct drbd_conf *)0)) { if (((int )(info->nlhdr)->nlmsg_flags & 512) != 0) { retcode = ERR_MINOR_EXISTS; } else { } goto out; } else { } retcode = conn_new_minor(adm_ctx.tconn, dh->minor, (int )adm_ctx.volume); out: drbd_adm_finish(info, (int )retcode); return (0); } } static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev ) { union drbd_state val ; union drbd_state mask ; unsigned int tmp ; { if ((unsigned int )*((unsigned char *)mdev + 749UL) == 0U && (unsigned int )*((unsigned char *)mdev + 748UL) == 2U) { val.i = 0U; val.ldv_40024.conn = 9U; mask.i = 0U; mask.ldv_40024.conn = 31U; _drbd_request_state(mdev, mask, val, 6); idr_remove(& (mdev->tconn)->volumes, mdev->vnr); tmp = mdev_to_minor(mdev); idr_remove(& minors, (int )tmp); del_gendisk(mdev->vdisk); synchronize_rcu(); kref_put(& mdev->kref, & drbd_minor_destroy); return (NO_ERROR); } else { return (ERR_MINOR_CONFIGURED); } } } int drbd_adm_delete_minor(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; int tmp ; { tmp = drbd_adm_prepare(skb, info, 1U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } retcode = adm_delete_minor(adm_ctx.mdev); out: drbd_adm_finish(info, (int )retcode); return (0); } } int drbd_adm_down(struct sk_buff *skb , struct genl_info *info ) { int retcode ; struct drbd_conf *mdev ; unsigned int i ; void *tmp ; enum drbd_state_rv tmp___0 ; void *tmp___1 ; enum drbd_state_rv tmp___2 ; void *tmp___3 ; void *tmp___4 ; void *tmp___5 ; enum drbd_ret_code tmp___6 ; void *tmp___7 ; int tmp___8 ; { retcode = drbd_adm_prepare(skb, info, 0U); if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return (retcode); } else { } if (retcode != 101) { goto out; } else { } if ((unsigned long )adm_ctx.tconn == (unsigned long )((struct drbd_tconn *)0)) { retcode = 158; goto out; } else { } i = 0U; tmp = idr_get_next(& (adm_ctx.tconn)->volumes, (int *)(& i)); mdev = (struct drbd_conf *)tmp; goto ldv_53966; ldv_53965: tmp___0 = drbd_set_role(mdev, R_SECONDARY, 0); retcode = (int )tmp___0; if (retcode <= 0) { drbd_msg_put_info("failed to demote"); goto out; } else { } i = i + 1U; tmp___1 = idr_get_next(& (adm_ctx.tconn)->volumes, (int *)(& i)); mdev = (struct drbd_conf *)tmp___1; ldv_53966: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53965; } else { } tmp___2 = conn_try_disconnect(adm_ctx.tconn, 0); retcode = (int )tmp___2; if (retcode <= 0) { drbd_msg_put_info("failed to disconnect"); goto out; } else { } i = 0U; tmp___3 = idr_get_next(& (adm_ctx.tconn)->volumes, (int *)(& i)); mdev = (struct drbd_conf *)tmp___3; goto ldv_53969; ldv_53968: retcode = adm_detach(mdev, 0); if (retcode <= 0 || retcode > 101) { drbd_msg_put_info("failed to detach"); goto out; } else { } i = i + 1U; tmp___4 = idr_get_next(& (adm_ctx.tconn)->volumes, (int *)(& i)); mdev = (struct drbd_conf *)tmp___4; ldv_53969: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53968; } else { } drbd_thread_stop(& (adm_ctx.tconn)->worker); i = 0U; tmp___5 = idr_get_next(& (adm_ctx.tconn)->volumes, (int *)(& i)); mdev = (struct drbd_conf *)tmp___5; goto ldv_53972; ldv_53971: tmp___6 = adm_delete_minor(mdev); retcode = (int )tmp___6; if (retcode != 101) { drbd_msg_put_info("failed to delete volume"); goto out; } else { } i = i + 1U; tmp___7 = idr_get_next(& (adm_ctx.tconn)->volumes, (int *)(& i)); mdev = (struct drbd_conf *)tmp___7; ldv_53972: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_53971; } else { } tmp___8 = conn_lowest_minor(adm_ctx.tconn); if (tmp___8 < 0) { list_del_rcu(& (adm_ctx.tconn)->all_tconn); synchronize_rcu(); kref_put(& (adm_ctx.tconn)->kref, & conn_destroy); retcode = 101; } else { retcode = 159; drbd_msg_put_info("failed to delete connection"); } goto out; out: drbd_adm_finish(info, retcode); return (0); } } int drbd_adm_del_resource(struct sk_buff *skb , struct genl_info *info ) { enum drbd_ret_code retcode ; int tmp ; int tmp___0 ; { tmp = drbd_adm_prepare(skb, info, 2U); retcode = (enum drbd_ret_code )tmp; if ((unsigned long )adm_ctx.reply_skb == (unsigned long )((struct sk_buff *)0)) { return ((int )retcode); } else { } if ((unsigned int )retcode != 101U) { goto out; } else { } tmp___0 = conn_lowest_minor(adm_ctx.tconn); if (tmp___0 < 0) { list_del_rcu(& (adm_ctx.tconn)->all_tconn); synchronize_rcu(); kref_put(& (adm_ctx.tconn)->kref, & conn_destroy); retcode = NO_ERROR; } else { retcode = ERR_RES_IN_USE; } if ((unsigned int )retcode == 101U) { drbd_thread_stop(& (adm_ctx.tconn)->worker); } else { } out: drbd_adm_finish(info, (int )retcode); return (0); } } void drbd_bcast_event(struct drbd_conf *mdev , struct sib_info const *sib ) { atomic_t drbd_genl_seq ; struct sk_buff *msg ; struct drbd_genlmsghdr *d_out ; unsigned int seq ; int err ; int tmp ; void *tmp___0 ; int tmp___1 ; { drbd_genl_seq.counter = 2; err = -12; if ((unsigned int )sib->sib_reason == 5U && ((1 != 0 && 1 != 0) && (long )(mdev->rs_last_bcast + 250UL) - (long )jiffies < 0L)) { mdev->rs_last_bcast = jiffies; } else { return; } tmp = atomic_add_return(1, & drbd_genl_seq); seq = (unsigned int )tmp; msg = genlmsg_new(3776UL, 16U); if ((unsigned long )msg == (unsigned long )((struct sk_buff *)0)) { goto failed; } else { } err = -90; tmp___0 = genlmsg_put(msg, 0U, seq, & drbd_genl_family, 0, 1); d_out = (struct drbd_genlmsghdr *)tmp___0; if ((unsigned long )d_out == (unsigned long )((struct drbd_genlmsghdr *)0)) { goto nla_put_failure; } else { } d_out->minor = mdev_to_minor(mdev); d_out->ldv_49826.ret_code = 101; tmp___1 = nla_put_status_info(msg, mdev, sib); if (tmp___1 != 0) { goto nla_put_failure; } else { } genlmsg_end(msg, (void *)d_out); err = drbd_genl_multicast_events(msg, 0U); if (err != 0 && err != -3) { goto failed; } else { } return; nla_put_failure: nlmsg_free(msg); failed: dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Error %d while broadcasting event. Event seq:%u sib_reason:%u\n", err, seq, (unsigned int )sib->sib_reason); return; } } void ldv_mutex_lock_275(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_276(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_277(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_278(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___2 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_279(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_280(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_281(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_282(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_283(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_284(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_285(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_286(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_287(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_288(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_289(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_290(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_291(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_292(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_293(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_drbd_socket(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_lock_294(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_295(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_296(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_297(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_298(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_drbd_socket(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_299(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_300(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_unlock_301(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_302(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_303(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_304(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_305(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } extern struct rb_node *rb_next(struct rb_node const * ) ; __inline static void rb_link_node(struct rb_node *node , struct rb_node *parent , struct rb_node **rb_link ) { struct rb_node *tmp ; { node->__rb_parent_color = (unsigned long )parent; tmp = 0; node->rb_right = tmp; node->rb_left = tmp; *rb_link = node; return; } } extern void __rb_insert_augmented(struct rb_node * , struct rb_root * , void (*)(struct rb_node * , struct rb_node * ) ) ; __inline static void rb_insert_augmented(struct rb_node *node , struct rb_root *root , struct rb_augment_callbacks const *augment ) { { __rb_insert_augmented(node, root, augment->rotate); return; } } __inline static void rb_set_parent(struct rb_node *rb , struct rb_node *p ) { { rb->__rb_parent_color = (rb->__rb_parent_color & 1UL) | (unsigned long )p; return; } } __inline static void rb_set_parent_color(struct rb_node *rb , struct rb_node *p , int color ) { { rb->__rb_parent_color = (unsigned long )color | (unsigned long )p; return; } } __inline static void __rb_change_child(struct rb_node *old , struct rb_node *new , struct rb_node *parent , struct rb_root *root ) { { if ((unsigned long )parent != (unsigned long )((struct rb_node *)0)) { if ((unsigned long )parent->rb_left == (unsigned long )old) { parent->rb_left = new; } else { parent->rb_right = new; } } else { root->rb_node = new; } return; } } extern void __rb_erase_color(struct rb_node * , struct rb_root * , void (*)(struct rb_node * , struct rb_node * ) ) ; __inline static void rb_erase_augmented(struct rb_node *node , struct rb_root *root , struct rb_augment_callbacks const *augment ) { struct rb_node *child ; struct rb_node *tmp ; struct rb_node *parent ; struct rb_node *rebalance ; unsigned long pc ; struct rb_node *successor ; struct rb_node *child2 ; unsigned long pc2 ; { child = node->rb_right; tmp = node->rb_left; if ((unsigned long )tmp == (unsigned long )((struct rb_node *)0)) { pc = node->__rb_parent_color; parent = (struct rb_node *)(pc & 0xfffffffffffffffcUL); __rb_change_child(node, child, parent, root); if ((unsigned long )child != (unsigned long )((struct rb_node *)0)) { child->__rb_parent_color = pc; rebalance = 0; } else { rebalance = (int )pc & 1 ? parent : 0; } tmp = parent; } else if ((unsigned long )child == (unsigned long )((struct rb_node *)0)) { pc = node->__rb_parent_color; tmp->__rb_parent_color = pc; parent = (struct rb_node *)(pc & 0xfffffffffffffffcUL); __rb_change_child(node, tmp, parent, root); rebalance = 0; tmp = parent; } else { successor = child; tmp = child->rb_left; if ((unsigned long )tmp == (unsigned long )((struct rb_node *)0)) { parent = successor; child2 = successor->rb_right; (*(augment->copy))(node, successor); } else { ldv_6275: parent = successor; successor = tmp; tmp = tmp->rb_left; if ((unsigned long )tmp != (unsigned long )((struct rb_node *)0)) { goto ldv_6275; } else { } child2 = successor->rb_right; parent->rb_left = child2; successor->rb_right = child; rb_set_parent(child, successor); (*(augment->copy))(node, successor); (*(augment->propagate))(parent, successor); } tmp = node->rb_left; successor->rb_left = tmp; rb_set_parent(tmp, successor); pc = node->__rb_parent_color; tmp = (struct rb_node *)(pc & 0xfffffffffffffffcUL); __rb_change_child(node, successor, tmp, root); if ((unsigned long )child2 != (unsigned long )((struct rb_node *)0)) { successor->__rb_parent_color = pc; rb_set_parent_color(child2, parent, 1); rebalance = 0; } else { pc2 = successor->__rb_parent_color; successor->__rb_parent_color = pc; rebalance = (int )pc2 & 1 ? parent : 0; } tmp = successor; } (*(augment->propagate))(tmp, 0); if ((unsigned long )rebalance != (unsigned long )((struct rb_node *)0)) { __rb_erase_color(rebalance, root, augment->rotate); } else { } return; } } __inline static sector_t interval_end(struct rb_node *node ) { struct drbd_interval *this ; struct rb_node const *__mptr ; { __mptr = (struct rb_node const *)node; this = (struct drbd_interval *)__mptr; return (this->end); } } __inline static sector_t compute_subtree_last(struct drbd_interval *node ) { sector_t max ; sector_t left ; sector_t tmp ; sector_t right ; sector_t tmp___0 ; { max = node->sector + (sector_t )(node->size >> 9); if ((unsigned long )node->rb.rb_left != (unsigned long )((struct rb_node *)0)) { tmp = interval_end(node->rb.rb_left); left = tmp; if (left > max) { max = left; } else { } } else { } if ((unsigned long )node->rb.rb_right != (unsigned long )((struct rb_node *)0)) { tmp___0 = interval_end(node->rb.rb_right); right = tmp___0; if (right > max) { max = right; } else { } } else { } return (max); } } static void augment_propagate(struct rb_node *rb , struct rb_node *stop ) { struct drbd_interval *node ; struct rb_node const *__mptr ; sector_t subtree_last ; sector_t tmp ; { goto ldv_6331; ldv_6330: __mptr = (struct rb_node const *)rb; node = (struct drbd_interval *)__mptr; tmp = compute_subtree_last(node); subtree_last = tmp; if (node->end == subtree_last) { goto ldv_6329; } else { } node->end = subtree_last; rb = (struct rb_node *)(node->rb.__rb_parent_color & 0xfffffffffffffffcUL); ldv_6331: ; if ((unsigned long )rb != (unsigned long )stop) { goto ldv_6330; } else { } ldv_6329: ; return; } } static void augment_copy(struct rb_node *rb_old , struct rb_node *rb_new ) { struct drbd_interval *old ; struct rb_node const *__mptr ; struct drbd_interval *new ; struct rb_node const *__mptr___0 ; { __mptr = (struct rb_node const *)rb_old; old = (struct drbd_interval *)__mptr; __mptr___0 = (struct rb_node const *)rb_new; new = (struct drbd_interval *)__mptr___0; new->end = old->end; return; } } static void augment_rotate(struct rb_node *rb_old , struct rb_node *rb_new ) { struct drbd_interval *old ; struct rb_node const *__mptr ; struct drbd_interval *new ; struct rb_node const *__mptr___0 ; { __mptr = (struct rb_node const *)rb_old; old = (struct drbd_interval *)__mptr; __mptr___0 = (struct rb_node const *)rb_new; new = (struct drbd_interval *)__mptr___0; new->end = old->end; old->end = compute_subtree_last(old); return; } } static struct rb_augment_callbacks const augment_callbacks = {& augment_propagate, & augment_copy, & augment_rotate}; bool drbd_insert_interval(struct rb_root *root , struct drbd_interval *this ) { struct rb_node **new ; struct rb_node *parent ; long tmp ; struct drbd_interval *here ; struct rb_node const *__mptr ; { new = & root->rb_node; parent = 0; tmp = ldv__builtin_expect((this->size & 511U) != 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_interval.c.prepared"), "i" (170), "i" (12UL)); ldv_6359: ; goto ldv_6359; } else { } goto ldv_6364; ldv_6363: __mptr = (struct rb_node const *)*new; here = (struct drbd_interval *)__mptr; parent = *new; if (this->sector < here->sector) { new = & (*new)->rb_left; } else if (this->sector > here->sector) { new = & (*new)->rb_right; } else if ((unsigned long )this < (unsigned long )here) { new = & (*new)->rb_left; } else if ((unsigned long )this > (unsigned long )here) { new = & (*new)->rb_right; } else { return (0); } ldv_6364: ; if ((unsigned long )*new != (unsigned long )((struct rb_node *)0)) { goto ldv_6363; } else { } rb_link_node(& this->rb, parent, new); rb_insert_augmented(& this->rb, root, & augment_callbacks); return (1); } } bool drbd_contains_interval(struct rb_root *root , sector_t sector , struct drbd_interval *interval ) { struct rb_node *node ; struct drbd_interval *here ; struct rb_node const *__mptr ; { node = root->rb_node; goto ldv_6376; ldv_6375: __mptr = (struct rb_node const *)node; here = (struct drbd_interval *)__mptr; if (here->sector > sector) { node = node->rb_left; } else if (here->sector < sector) { node = node->rb_right; } else if ((unsigned long )interval < (unsigned long )here) { node = node->rb_left; } else if ((unsigned long )interval > (unsigned long )here) { node = node->rb_right; } else { return (1); } ldv_6376: ; if ((unsigned long )node != (unsigned long )((struct rb_node *)0)) { goto ldv_6375; } else { } return (0); } } void drbd_remove_interval(struct rb_root *root , struct drbd_interval *this ) { { rb_erase_augmented(& this->rb, root, & augment_callbacks); return; } } struct drbd_interval *drbd_find_overlap(struct rb_root *root , sector_t sector , unsigned int size ) { struct rb_node *node ; struct drbd_interval *overlap ; sector_t end ; long tmp ; struct drbd_interval *here ; struct rb_node const *__mptr ; sector_t tmp___0 ; { node = root->rb_node; overlap = 0; end = (sector_t )(size >> 9) + sector; tmp = ldv__builtin_expect((size & 511U) != 0U, 0L); if (tmp != 0L) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_interval.c.prepared"), "i" (255), "i" (12UL)); ldv_6390: ; goto ldv_6390; } else { } goto ldv_6396; ldv_6395: __mptr = (struct rb_node const *)node; here = (struct drbd_interval *)__mptr; if ((unsigned long )node->rb_left != (unsigned long )((struct rb_node *)0)) { tmp___0 = interval_end(node->rb_left); if (tmp___0 > sector) { node = node->rb_left; } else { goto _L; } } else _L: /* CIL Label */ if (here->sector < end && here->sector + (sector_t )(here->size >> 9) > sector) { overlap = here; goto ldv_6394; } else if (here->sector <= sector) { node = node->rb_right; } else { goto ldv_6394; } ldv_6396: ; if ((unsigned long )node != (unsigned long )((struct rb_node *)0)) { goto ldv_6395; } else { } ldv_6394: ; return (overlap); } } struct drbd_interval *drbd_next_overlap(struct drbd_interval *i , sector_t sector , unsigned int size ) { sector_t end ; struct rb_node *node ; struct rb_node const *__mptr ; { end = (sector_t )(size >> 9) + sector; ldv_6406: node = rb_next((struct rb_node const *)(& i->rb)); if ((unsigned long )node == (unsigned long )((struct rb_node *)0)) { return (0); } else { } __mptr = (struct rb_node const *)node; i = (struct drbd_interval *)__mptr; if (i->sector >= end) { return (0); } else { } if (i->sector + (sector_t )(i->size >> 9) > sector) { return (i); } else { } goto ldv_6406; } } int ldv_mutex_trylock_342(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_338(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_340(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_343(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_345(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_347(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_349(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_351(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_353(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_337(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_339(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_341(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_344(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_346(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_348(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_350(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_352(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_cstate_mutex_of_drbd_tconn(struct mutex *lock ) ; void ldv_mutex_unlock_cstate_mutex_of_drbd_tconn(struct mutex *lock ) ; __inline static struct thread_info *current_thread_info___7(void) { struct thread_info *ti ; unsigned long pfo_ret__ ; { switch (8UL) { case 1UL: __asm__ ("movb %%gs:%P1,%0": "=q" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6444; case 2UL: __asm__ ("movw %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6444; case 4UL: __asm__ ("movl %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6444; case 8UL: __asm__ ("movq %%gs:%P1,%0": "=r" (pfo_ret__): "p" (& kernel_stack)); goto ldv_6444; default: __bad_percpu_size(); } ldv_6444: ti = (struct thread_info *)(pfo_ret__ - 8152UL); return (ti); } } extern int default_wake_function(wait_queue_t * , unsigned int , int , void * ) ; __inline static void init_waitqueue_entry(wait_queue_t *q , struct task_struct *p ) { { q->flags = 0U; q->private = (void *)p; q->func = & default_wake_function; return; } } extern void add_wait_queue(wait_queue_head_t * , wait_queue_t * ) ; extern void remove_wait_queue(wait_queue_head_t * , wait_queue_t * ) ; __inline static void __rcu_read_lock___7(void) { struct thread_info *tmp ; { tmp = current_thread_info___7(); tmp->preempt_count = tmp->preempt_count + 1; __asm__ volatile ("": : : "memory"); return; } } __inline static void __rcu_read_unlock___7(void) { struct thread_info *tmp ; { __asm__ volatile ("": : : "memory"); tmp = current_thread_info___7(); tmp->preempt_count = tmp->preempt_count + -1; __asm__ volatile ("": : : "memory"); return; } } __inline static void rcu_read_lock___7(void) { bool __warned ; int tmp ; int tmp___0 ; { __rcu_read_lock___7(); rcu_lock_acquire(& rcu_lock_map); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 738, "rcu_read_lock() used illegally while idle"); } else { } } else { } return; } } __inline static void rcu_read_unlock___7(void) { bool __warned ; int tmp ; int tmp___0 ; { tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_is_cpu_idle(); if (tmp___0 != 0) { __warned = 1; lockdep_rcu_suspicious("include/linux/rcupdate.h", 759, "rcu_read_unlock() used illegally while idle"); } else { } } else { } rcu_lock_release(& rcu_lock_map); __rcu_read_unlock___7(); return; } } void print_st_err(struct drbd_conf *mdev , union drbd_state os , union drbd_state ns , enum drbd_state_rv err ) ; enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn ) ; enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn ) ; __inline static void drbd_thread_stop_nowait(struct drbd_thread *thi ) { { _drbd_thread_stop(thi, 0, 0); return; } } __inline static void drbd_thread_restart_nowait(struct drbd_thread *thi ) { { _drbd_thread_stop(thi, 1, 0); return; } } static int w_after_state_ch(struct drbd_work *w , int unused ) ; static void after_state_ch(struct drbd_conf *mdev , union drbd_state os , union drbd_state ns , enum chg_state_flags flags ) ; static enum drbd_state_rv is_valid_state(struct drbd_conf *mdev , union drbd_state ns ) ; static enum drbd_state_rv is_valid_soft_transition(union drbd_state os , union drbd_state ns , struct drbd_tconn *tconn ) ; static enum drbd_state_rv is_valid_transition(union drbd_state os , union drbd_state ns ) ; static union drbd_state sanitize_state(struct drbd_conf *mdev , union drbd_state ns , enum sanitize_state_warnings *warn ) ; __inline static bool is_susp(union drbd_state s ) { { return ((bool )(((unsigned int )*((unsigned char *)(& s) + 2UL) != 0U || (unsigned int )*((unsigned char *)(& s) + 2UL) != 0U) || (unsigned int )*((unsigned char *)(& s) + 2UL) != 0U)); } } bool conn_all_vols_unconf(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; bool rv ; int vnr ; void *tmp ; void *tmp___0 ; { rv = 1; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52059; ldv_52058: ; if (((unsigned int )*((unsigned char *)mdev + 749UL) != 0U || (unsigned int )*((unsigned short *)mdev + 374UL) != 0U) || (unsigned int )*((unsigned char *)mdev + 748UL) != 2U) { rv = 0; goto ldv_52057; } else { } vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52059: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52058; } else { } ldv_52057: rcu_read_unlock___7(); return (rv); } } static enum drbd_role max_role(enum drbd_role role1 , enum drbd_role role2 ) { { if ((unsigned int )role1 == 1U || (unsigned int )role2 == 1U) { return (R_PRIMARY); } else { } if ((unsigned int )role1 == 2U || (unsigned int )role2 == 2U) { return (R_SECONDARY); } else { } return (R_UNKNOWN); } } static enum drbd_role min_role(enum drbd_role role1 , enum drbd_role role2 ) { { if ((unsigned int )role1 == 0U || (unsigned int )role2 == 0U) { return (R_UNKNOWN); } else { } if ((unsigned int )role1 == 2U || (unsigned int )role2 == 2U) { return (R_SECONDARY); } else { } return (R_PRIMARY); } } enum drbd_role conn_highest_role(struct drbd_tconn *tconn ) { enum drbd_role role ; struct drbd_conf *mdev ; int vnr ; void *tmp ; void *tmp___0 ; { role = R_UNKNOWN; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52075; ldv_52074: role = max_role(role, (enum drbd_role )mdev->state.ldv_49522.role); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52075: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52074; } else { } rcu_read_unlock___7(); return (role); } } enum drbd_role conn_highest_peer(struct drbd_tconn *tconn ) { enum drbd_role peer ; struct drbd_conf *mdev ; int vnr ; void *tmp ; void *tmp___0 ; { peer = R_UNKNOWN; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52084; ldv_52083: peer = max_role(peer, (enum drbd_role )mdev->state.ldv_49522.peer); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52084: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52083; } else { } rcu_read_unlock___7(); return (peer); } } enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn ) { enum drbd_disk_state ds ; struct drbd_conf *mdev ; int vnr ; void *tmp ; enum drbd_disk_state __max1 ; enum drbd_disk_state __max2 ; void *tmp___0 ; { ds = D_DISKLESS; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52096; ldv_52095: __max1 = ds; __max2 = (enum drbd_disk_state )mdev->state.ldv_49522.disk; ds = (enum drbd_disk_state )((unsigned int )__max1 > (unsigned int )__max2 ? (unsigned int )__max1 : (unsigned int )__max2); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52096: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52095; } else { } rcu_read_unlock___7(); return (ds); } } enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn ) { enum drbd_disk_state ds ; struct drbd_conf *mdev ; int vnr ; void *tmp ; enum drbd_disk_state __min1 ; enum drbd_disk_state __min2 ; void *tmp___0 ; { ds = D_MASK; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52108; ldv_52107: __min1 = ds; __min2 = (enum drbd_disk_state )mdev->state.ldv_49522.disk; ds = (enum drbd_disk_state )((unsigned int )__min1 < (unsigned int )__min2 ? (unsigned int )__min1 : (unsigned int )__min2); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52108: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52107; } else { } rcu_read_unlock___7(); return (ds); } } enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn ) { enum drbd_disk_state ds ; struct drbd_conf *mdev ; int vnr ; void *tmp ; enum drbd_disk_state __max1 ; enum drbd_disk_state __max2 ; void *tmp___0 ; { ds = D_DISKLESS; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52120; ldv_52119: __max1 = ds; __max2 = (enum drbd_disk_state )mdev->state.ldv_49522.pdsk; ds = (enum drbd_disk_state )((unsigned int )__max1 > (unsigned int )__max2 ? (unsigned int )__max1 : (unsigned int )__max2); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52120: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52119; } else { } rcu_read_unlock___7(); return (ds); } } enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn ) { enum drbd_conns conn ; struct drbd_conf *mdev ; int vnr ; void *tmp ; enum drbd_conns __min1 ; enum drbd_conns __min2 ; void *tmp___0 ; { conn = C_MASK; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52132; ldv_52131: __min1 = conn; __min2 = (enum drbd_conns )mdev->state.ldv_49522.conn; conn = (enum drbd_conns )((unsigned int )__min1 < (unsigned int )__min2 ? (unsigned int )__min1 : (unsigned int )__min2); vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52132: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52131; } else { } rcu_read_unlock___7(); return (conn); } } static bool no_peer_wf_report_params(struct drbd_tconn *tconn ) { struct drbd_conf *mdev ; int vnr ; bool rv ; void *tmp ; void *tmp___0 ; { rv = 1; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52142; ldv_52141: ; if ((unsigned int )*((unsigned short *)mdev + 374UL) == 144U) { rv = 0; goto ldv_52140; } else { } vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52142: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52141; } else { } ldv_52140: rcu_read_unlock___7(); return (rv); } } static int cl_wide_st_chg(struct drbd_conf *mdev , union drbd_state os , union drbd_state ns ) { { return ((((((int )os.ldv_40024.conn > 9 && (int )ns.ldv_40024.conn > 9) && (((((unsigned int )*((unsigned char *)(& os) + 0UL) != 1U && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U) || ((unsigned int )*((unsigned short *)(& os) + 0UL) != 192U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 192U)) || ((unsigned int )*((unsigned short *)(& os) + 0UL) != 176U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 176U)) || ((unsigned int )*((unsigned char *)(& os) + 1UL) != 4U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 4U))) || ((int )os.ldv_40024.conn > 9 && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 16U)) || ((unsigned int )*((unsigned short *)(& os) + 0UL) == 160U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 288U)) || ((unsigned int )*((unsigned short *)(& os) + 0UL) == 160U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 144U)); } } static union drbd_state apply_mask_val(union drbd_state os , union drbd_state mask , union drbd_state val ) { union drbd_state ns ; { ns.i = (os.i & ~ mask.i) | val.i; return (ns); } } enum drbd_state_rv drbd_change_state(struct drbd_conf *mdev , enum chg_state_flags f , union drbd_state mask , union drbd_state val ) { unsigned long flags ; union drbd_state ns ; enum drbd_state_rv rv ; raw_spinlock_t *tmp ; union drbd_state tmp___0 ; { tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); tmp___0 = drbd_read_state(mdev); ns = apply_mask_val(tmp___0, mask, val); rv = _drbd_set_state(mdev, ns, f, 0); spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); return (rv); } } void drbd_force_state(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val ) { { drbd_change_state(mdev, CS_HARD, mask, val); return; } } static enum drbd_state_rv _req_st_cond(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val ) { union drbd_state os ; union drbd_state ns ; unsigned long flags ; enum drbd_state_rv rv ; int tmp ; int tmp___0 ; raw_spinlock_t *tmp___1 ; union drbd_state tmp___2 ; int tmp___3 ; { tmp = test_and_clear_bit(3, (unsigned long volatile *)(& mdev->flags)); if (tmp != 0) { return (SS_CW_SUCCESS); } else { } tmp___0 = test_and_clear_bit(4, (unsigned long volatile *)(& mdev->flags)); if (tmp___0 != 0) { return (SS_CW_FAILED_BY_PEER); } else { } tmp___1 = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp___1); os = drbd_read_state(mdev); tmp___2 = apply_mask_val(os, mask, val); ns = sanitize_state(mdev, tmp___2, 0); rv = is_valid_transition(os, ns); if ((int )rv > 0) { rv = SS_UNKNOWN_ERROR; } else { } tmp___3 = cl_wide_st_chg(mdev, os, ns); if (tmp___3 == 0) { rv = SS_CW_NO_NEED; } else { } if ((int )rv == 0) { rv = is_valid_state(mdev, ns); if ((int )rv > 0) { rv = is_valid_soft_transition(os, ns, mdev->tconn); if ((int )rv > 0) { rv = SS_UNKNOWN_ERROR; } else { } } else { } } else { } spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); return (rv); } } static enum drbd_state_rv drbd_req_state(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val , enum chg_state_flags f ) { struct completion done ; unsigned long flags ; union drbd_state os ; union drbd_state ns ; enum drbd_state_rv rv ; raw_spinlock_t *tmp ; union drbd_state tmp___0 ; int tmp___1 ; wait_queue_t __wait ; struct task_struct *tmp___2 ; raw_spinlock_t *tmp___3 ; union drbd_state tmp___4 ; int tmp___5 ; struct task_struct *tmp___6 ; { init_completion(& done); if (((unsigned int )f & 8U) != 0U) { ldv_mutex_lock_348(mdev->state_mutex); } else { } tmp = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp); os = drbd_read_state(mdev); tmp___0 = apply_mask_val(os, mask, val); ns = sanitize_state(mdev, tmp___0, 0); rv = is_valid_transition(os, ns); if ((int )rv <= 0) { spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); goto abort; } else { } tmp___5 = cl_wide_st_chg(mdev, os, ns); if (tmp___5 != 0) { rv = is_valid_state(mdev, ns); if ((int )rv == 1) { rv = is_valid_soft_transition(os, ns, mdev->tconn); } else { } spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); if ((int )rv <= 0) { if (((unsigned int )f & 2U) != 0U) { print_st_err(mdev, os, ns, (int )rv); } else { } goto abort; } else { } tmp___1 = drbd_send_state_req(mdev, mask, val); if (tmp___1 != 0) { rv = SS_CW_FAILED_BY_PEER; if (((unsigned int )f & 2U) != 0U) { print_st_err(mdev, os, ns, (int )rv); } else { } goto abort; } else { } rv = _req_st_cond(mdev, mask, val); if ((int )rv != 0) { goto ldv_52198; } else { } tmp___2 = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp___2; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_52201: prepare_to_wait(& mdev->state_wait, & __wait, 2); rv = _req_st_cond(mdev, mask, val); if ((int )rv != 0) { goto ldv_52200; } else { } schedule(); goto ldv_52201; ldv_52200: finish_wait(& mdev->state_wait, & __wait); ldv_52198: ; if ((int )rv <= 0) { if (((unsigned int )f & 2U) != 0U) { print_st_err(mdev, os, ns, (int )rv); } else { } goto abort; } else { } tmp___3 = spinlock_check(& (mdev->tconn)->req_lock); flags = _raw_spin_lock_irqsave(tmp___3); tmp___4 = drbd_read_state(mdev); ns = apply_mask_val(tmp___4, mask, val); rv = _drbd_set_state(mdev, ns, f, & done); } else { rv = _drbd_set_state(mdev, ns, f, & done); } spin_unlock_irqrestore(& (mdev->tconn)->req_lock, flags); if (((unsigned int )f & 4U) != 0U && (int )rv == 1) { tmp___6 = get_current(); if ((unsigned long )tmp___6 == (unsigned long )(mdev->tconn)->worker.task) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( current != mdev->tconn->worker.task ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_state.c.prepared", 455); } else { } wait_for_completion(& done); } else { } abort: ; if (((unsigned int )f & 8U) != 0U) { ldv_mutex_unlock_349(mdev->state_mutex); } else { } return (rv); } } enum drbd_state_rv _drbd_request_state(struct drbd_conf *mdev , union drbd_state mask , union drbd_state val , enum chg_state_flags f ) { enum drbd_state_rv rv ; wait_queue_t __wait ; struct task_struct *tmp ; { rv = drbd_req_state(mdev, mask, val, f); if ((int )rv != -18) { goto ldv_52212; } else { } tmp = get_current(); __wait.flags = 0U; __wait.private = (void *)tmp; __wait.func = & autoremove_wake_function; __wait.task_list.next = & __wait.task_list; __wait.task_list.prev = & __wait.task_list; ldv_52215: prepare_to_wait(& mdev->state_wait, & __wait, 2); rv = drbd_req_state(mdev, mask, val, f); if ((int )rv != -18) { goto ldv_52214; } else { } schedule(); goto ldv_52215; ldv_52214: finish_wait(& mdev->state_wait, & __wait); ldv_52212: ; return (rv); } } static void print_st(struct drbd_conf *mdev , char *name , union drbd_state ns ) { bool tmp ; char const *tmp___0 ; char const *tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; char const *tmp___4 ; { tmp = is_susp(ns); tmp___0 = drbd_disk_str((enum drbd_disk_state )ns.ldv_40024.pdsk); tmp___1 = drbd_disk_str((enum drbd_disk_state )ns.ldv_40024.disk); tmp___2 = drbd_role_str((enum drbd_role )ns.ldv_40024.peer); tmp___3 = drbd_role_str((enum drbd_role )ns.ldv_40024.role); tmp___4 = drbd_conn_str((enum drbd_conns )ns.ldv_40024.conn); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n", name, tmp___4, tmp___3, tmp___2, tmp___1, tmp___0, (int )tmp ? 115 : 114, (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U ? 97 : 45, (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U ? 112 : 45, (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U ? 117 : 45, (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U ? 70 : 45, (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U ? 78 : 45); return; } } void print_st_err(struct drbd_conf *mdev , union drbd_state os , union drbd_state ns , enum drbd_state_rv err ) { char const *tmp ; { if ((int )err == -18) { return; } else { } tmp = drbd_set_st_err_str(err); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "State change failed: %s\n", tmp); print_st(mdev, (char *)" state", os); print_st(mdev, (char *)"wanted", ns); return; } } static long print_state_change(char *pb , union drbd_state os , union drbd_state ns , enum chg_state_flags flags ) { char *pbp ; char const *tmp ; char const *tmp___0 ; int tmp___1 ; char const *tmp___2 ; char const *tmp___3 ; int tmp___4 ; char const *tmp___5 ; char const *tmp___6 ; int tmp___7 ; char const *tmp___8 ; char const *tmp___9 ; int tmp___10 ; char const *tmp___11 ; char const *tmp___12 ; int tmp___13 ; { pbp = pb; *pbp = 0; if ((int )ns.ldv_40024.role != (int )os.ldv_40024.role && ((unsigned int )flags & 32U) != 0U) { tmp = drbd_role_str((enum drbd_role )ns.ldv_40024.role); tmp___0 = drbd_role_str((enum drbd_role )os.ldv_40024.role); tmp___1 = sprintf(pbp, "role( %s -> %s ) ", tmp___0, tmp); pbp = pbp + (unsigned long )tmp___1; } else { } if ((int )ns.ldv_40024.peer != (int )os.ldv_40024.peer && ((unsigned int )flags & 64U) != 0U) { tmp___2 = drbd_role_str((enum drbd_role )ns.ldv_40024.peer); tmp___3 = drbd_role_str((enum drbd_role )os.ldv_40024.peer); tmp___4 = sprintf(pbp, "peer( %s -> %s ) ", tmp___3, tmp___2); pbp = pbp + (unsigned long )tmp___4; } else { } if ((int )ns.ldv_40024.conn != (int )os.ldv_40024.conn && ((unsigned int )flags & 128U) != 0U) { tmp___5 = drbd_conn_str((enum drbd_conns )ns.ldv_40024.conn); tmp___6 = drbd_conn_str((enum drbd_conns )os.ldv_40024.conn); tmp___7 = sprintf(pbp, "conn( %s -> %s ) ", tmp___6, tmp___5); pbp = pbp + (unsigned long )tmp___7; } else { } if ((int )ns.ldv_40024.disk != (int )os.ldv_40024.disk && ((unsigned int )flags & 256U) != 0U) { tmp___8 = drbd_disk_str((enum drbd_disk_state )ns.ldv_40024.disk); tmp___9 = drbd_disk_str((enum drbd_disk_state )os.ldv_40024.disk); tmp___10 = sprintf(pbp, "disk( %s -> %s ) ", tmp___9, tmp___8); pbp = pbp + (unsigned long )tmp___10; } else { } if ((int )ns.ldv_40024.pdsk != (int )os.ldv_40024.pdsk && ((unsigned int )flags & 512U) != 0U) { tmp___11 = drbd_disk_str((enum drbd_disk_state )ns.ldv_40024.pdsk); tmp___12 = drbd_disk_str((enum drbd_disk_state )os.ldv_40024.pdsk); tmp___13 = sprintf(pbp, "pdsk( %s -> %s ) ", tmp___12, tmp___11); pbp = pbp + (unsigned long )tmp___13; } else { } return ((long )pbp - (long )pb); } } static void drbd_pr_state_change(struct drbd_conf *mdev , union drbd_state os , union drbd_state ns , enum chg_state_flags flags ) { char pb[300U] ; char *pbp ; long tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { pbp = (char *)(& pb); tmp = print_state_change(pbp, os, ns, (enum chg_state_flags )((unsigned int )flags ^ 992U)); pbp = pbp + (unsigned long )tmp; if ((int )ns.ldv_40024.aftr_isp != (int )os.ldv_40024.aftr_isp) { tmp___0 = sprintf(pbp, "aftr_isp( %d -> %d ) ", (int )os.ldv_40024.aftr_isp, (int )ns.ldv_40024.aftr_isp); pbp = pbp + (unsigned long )tmp___0; } else { } if ((int )ns.ldv_40024.peer_isp != (int )os.ldv_40024.peer_isp) { tmp___1 = sprintf(pbp, "peer_isp( %d -> %d ) ", (int )os.ldv_40024.peer_isp, (int )ns.ldv_40024.peer_isp); pbp = pbp + (unsigned long )tmp___1; } else { } if ((int )ns.ldv_40024.user_isp != (int )os.ldv_40024.user_isp) { tmp___2 = sprintf(pbp, "user_isp( %d -> %d ) ", (int )os.ldv_40024.user_isp, (int )ns.ldv_40024.user_isp); pbp = pbp + (unsigned long )tmp___2; } else { } if ((unsigned long )((char *)(& pb)) != (unsigned long )pbp) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s\n", (char *)(& pb)); } else { } return; } } static void conn_pr_state_change(struct drbd_tconn *tconn , union drbd_state os , union drbd_state ns , enum chg_state_flags flags ) { char pb[300U] ; char *pbp ; long tmp ; bool tmp___0 ; bool tmp___1 ; int tmp___2 ; bool tmp___3 ; bool tmp___4 ; { pbp = (char *)(& pb); tmp = print_state_change(pbp, os, ns, flags); pbp = pbp + (unsigned long )tmp; tmp___3 = is_susp(ns); tmp___4 = is_susp(os); if ((int )tmp___3 != (int )tmp___4 && ((unsigned int )flags & 1024U) != 0U) { tmp___0 = is_susp(ns); tmp___1 = is_susp(os); tmp___2 = sprintf(pbp, "susp( %d -> %d ) ", (int )tmp___1, (int )tmp___0); pbp = pbp + (unsigned long )tmp___2; } else { } if ((unsigned long )((char *)(& pb)) != (unsigned long )pbp) { printk("\016d-con %s: %s\n", tconn->name, (char *)(& pb)); } else { } return; } } static enum drbd_state_rv is_valid_state(struct drbd_conf *mdev , union drbd_state ns ) { enum drbd_fencing_p fp ; enum drbd_state_rv rv ; struct net_conf *nc ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; struct net_conf *_________p1___0 ; bool __warned___0 ; int tmp___2 ; int tmp___3 ; enum drbd_role tmp___4 ; { rv = 1; rcu_read_lock___7(); fp = FP_DONT_CARE; tmp___1 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___1 != 0) { _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_state.c.prepared", 607, "suspicious rcu_dereference_check() usage"); } else { } } else { } fp = (enum drbd_fencing_p )_________p1->fencing; put_ldev(mdev); } else { } _________p1___0 = *((struct net_conf * volatile *)(& (mdev->tconn)->net_conf)); tmp___2 = debug_lockdep_rcu_enabled(); if (tmp___2 != 0 && ! __warned___0) { tmp___3 = rcu_read_lock_held(); if (tmp___3 == 0 && 1) { __warned___0 = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_state.c.prepared", 611, "suspicious rcu_dereference_check() usage"); } else { } } else { } nc = _________p1___0; if ((unsigned long )nc != (unsigned long )((struct net_conf *)0)) { if ((int )((signed char )nc->two_primaries) == 0 && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U) { if ((unsigned int )*((unsigned char *)(& ns) + 0UL) == 4U) { rv = SS_TWO_PRIMARIES; } else { tmp___4 = conn_highest_peer(mdev->tconn); if ((unsigned int )tmp___4 == 1U) { rv = SS_O_VOL_PEER_PRI; } else { } } } else { } } else { } if ((int )rv <= 0) { } else if ((unsigned int )*((unsigned char *)(& ns) + 0UL) == 2U && mdev->open_cnt != 0) { rv = SS_DEVICE_IN_USE; } else if (((unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U && (int )ns.ldv_40024.conn <= 9) && (int )ns.ldv_40024.disk <= 7) { rv = SS_NO_UP_TO_DATE_DISK; } else if ((((int )fp > 0 && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U) && (int )ns.ldv_40024.conn <= 9) && (int )ns.ldv_40024.pdsk > 5) { rv = SS_PRIMARY_NOP; } else if (((unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U && (int )ns.ldv_40024.disk <= 4) && (int )ns.ldv_40024.pdsk <= 4) { rv = SS_NO_UP_TO_DATE_DISK; } else if ((int )ns.ldv_40024.conn > 10 && (int )ns.ldv_40024.disk <= 3) { rv = SS_NO_LOCAL_DISK; } else if ((int )ns.ldv_40024.conn > 10 && (int )ns.ldv_40024.pdsk <= 3) { rv = SS_NO_REMOTE_DISK; } else if (((int )ns.ldv_40024.conn > 10 && (int )ns.ldv_40024.disk <= 7) && (int )ns.ldv_40024.pdsk <= 7) { rv = SS_NO_UP_TO_DATE_DISK; } else if (((((unsigned int )*((unsigned short *)(& ns) + 0UL) == 160U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 208U) || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 256U) || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 320U) && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 10U) { rv = SS_CONNECTED_OUTDATES; } else if (((unsigned int )*((unsigned short *)(& ns) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 304U) && (int )((signed char )nc->verify_alg[0]) == 0) { rv = SS_NO_VERIFY_ALG; } else if (((unsigned int )*((unsigned short *)(& ns) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 304U) && (mdev->tconn)->agreed_pro_version <= 87) { rv = SS_NOT_SUPPORTED; } else if ((int )ns.ldv_40024.conn > 9 && *((unsigned int *)(& ns) + 0UL) == 49152U) { rv = SS_CONNECTED_OUTDATES; } else { } rcu_read_unlock___7(); return (rv); } } static enum drbd_state_rv is_valid_soft_transition(union drbd_state os , union drbd_state ns , struct drbd_tconn *tconn ) { enum drbd_state_rv rv ; int tmp ; { rv = 1; if (((unsigned int )*((unsigned short *)(& ns) + 0UL) == 192U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 176U) && (int )os.ldv_40024.conn > 10) { rv = SS_RESYNC_RUNNING; } else { } if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 16U && (unsigned int )*((unsigned short *)(& os) + 0UL) == 0U) { rv = SS_ALREADY_STANDALONE; } else { } if ((int )ns.ldv_40024.disk > 1 && (unsigned int )*((unsigned char *)(& os) + 1UL) == 0U) { rv = SS_IS_DISKLESS; } else { } if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 128U && (int )os.ldv_40024.conn <= 1) { rv = SS_NO_NET_CONFIG; } else { } if (((unsigned int )*((unsigned char *)(& ns) + 1UL) == 10U && (int )os.ldv_40024.disk <= 4) && (unsigned int )*((unsigned char *)(& os) + 1UL) != 2U) { rv = SS_LOWER_THAN_OUTDATED; } else { } if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 16U && (unsigned int )*((unsigned short *)(& os) + 0UL) == 32U) { rv = SS_IN_TRANSIENT_STATE; } else { } tmp = constant_test_bit(10U, (unsigned long const volatile *)(& tconn->flags)); if (tmp != 0 && ((unsigned int )*((unsigned short *)(& os) + 0UL) != 144U && ((unsigned int )*((unsigned short *)(& ns) + 0UL) != 144U || (unsigned int )*((unsigned short *)(& os) + 0UL) != 128U))) { rv = SS_IN_TRANSIENT_STATE; } else { } if (((unsigned int )*((unsigned short *)(& ns) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 304U) && (int )os.ldv_40024.conn <= 9) { rv = SS_NEED_CONNECTION; } else { } if ((((unsigned int )*((unsigned short *)(& ns) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 304U) && (int )ns.ldv_40024.conn != (int )os.ldv_40024.conn) && (int )os.ldv_40024.conn > 10) { rv = SS_RESYNC_RUNNING; } else { } if (((unsigned int )*((unsigned short *)(& ns) + 0UL) == 176U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 192U) && (int )os.ldv_40024.conn <= 9) { rv = SS_NEED_CONNECTION; } else { } if (((unsigned int )*((unsigned short *)(& ns) + 0UL) == 272U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 256U) && (int )os.ldv_40024.conn <= 8) { rv = SS_NEED_CONNECTION; } else { } return (rv); } } static enum drbd_state_rv is_valid_conn_transition(enum drbd_conns oc , enum drbd_conns nc ) { { if ((unsigned int )oc == (unsigned int )nc) { return (SS_NOTHING_TO_DO); } else { } if ((unsigned int )oc == 0U && (unsigned int )nc == 1U) { return (SS_ALREADY_STANDALONE); } else { } if ((unsigned int )oc == 0U && (unsigned int )nc != 2U) { return (SS_NEED_CONNECTION); } else { } if ((unsigned int )oc <= 8U && (unsigned int )nc > 9U) { return (SS_NEED_CONNECTION); } else { } if ((((unsigned int )oc > 2U && (unsigned int )oc <= 7U) && (unsigned int )nc != 2U) && (unsigned int )nc != 1U) { return (SS_IN_TRANSIENT_STATE); } else { } if ((unsigned int )oc == 1U && (unsigned int )nc != 0U) { return (SS_IN_TRANSIENT_STATE); } else { } return (SS_SUCCESS); } } static enum drbd_state_rv is_valid_transition(union drbd_state os , union drbd_state ns ) { enum drbd_state_rv rv ; { rv = is_valid_conn_transition((enum drbd_conns )os.ldv_40024.conn, (enum drbd_conns )ns.ldv_40024.conn); if ((unsigned int )*((unsigned char *)(& ns) + 1UL) == 4U && (unsigned int )*((unsigned char *)(& os) + 1UL) == 0U) { rv = SS_IS_DISKLESS; } else { } return (rv); } } static void print_sanitize_warnings(struct drbd_conf *mdev , enum sanitize_state_warnings warn ) { char const *msg_table[6U] ; { msg_table[0] = ""; msg_table[1] = "Online-verify aborted."; msg_table[2] = "Resync aborted."; msg_table[3] = "Connection lost while negotiating, no data!"; msg_table[4] = "Implicitly upgraded disk"; msg_table[5] = "Implicitly upgraded pdsk"; if ((unsigned int )warn != 0U) { dev_warn((struct device const *)(& (mdev->vdisk)->part0.__dev), "%s\n", msg_table[(unsigned int )warn]); } else { } return; } } static union drbd_state sanitize_state(struct drbd_conf *mdev , union drbd_state ns , enum sanitize_state_warnings *warn ) { enum drbd_fencing_p fp ; enum drbd_disk_state disk_min ; enum drbd_disk_state disk_max ; enum drbd_disk_state pdsk_min ; enum drbd_disk_state pdsk_max ; struct disk_conf *_________p1 ; bool __warned ; int tmp ; int tmp___0 ; int tmp___1 ; int tmp___2 ; { if ((unsigned long )warn != (unsigned long )((enum sanitize_state_warnings *)0)) { *warn = NO_WARNING; } else { } fp = FP_DONT_CARE; tmp___1 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___1 != 0) { rcu_read_lock___7(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp = debug_lockdep_rcu_enabled(); if (tmp != 0 && ! __warned) { tmp___0 = rcu_read_lock_held(); if (tmp___0 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_state.c.prepared", 819, "suspicious rcu_dereference_check() usage"); } else { } } else { } fp = (enum drbd_fencing_p )_________p1->fencing; rcu_read_unlock___7(); put_ldev(mdev); } else { } if ((int )ns.ldv_40024.conn <= 9) { ns.ldv_40024.peer_isp = 0U; ns.ldv_40024.peer = 0U; if ((int )ns.ldv_40024.pdsk > 6 || (int )ns.ldv_40024.pdsk <= 3) { ns.ldv_40024.pdsk = 6U; } else { } } else { } if (((unsigned int )*((unsigned short *)(& ns) + 0UL) == 0U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 0U) && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 2U) { ns.ldv_40024.aftr_isp = 0U; } else { } if ((int )ns.ldv_40024.conn > 10 && ((int )ns.ldv_40024.disk <= 2 || (int )ns.ldv_40024.pdsk <= 2)) { if ((unsigned long )warn != (unsigned long )((enum sanitize_state_warnings *)0)) { *warn = (unsigned int )*((unsigned short *)(& ns) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 304U ? ABORTED_ONLINE_VERIFY : ABORTED_RESYNC; } else { } ns.ldv_40024.conn = 10U; } else { } if ((int )ns.ldv_40024.conn <= 9 && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 6U) { tmp___2 = _get_ldev_if_state(mdev, D_NEGOTIATING); if (tmp___2 != 0) { if (mdev->ed_uuid == (mdev->ldev)->md.uuid[0]) { ns.ldv_40024.disk = mdev->new_state_tmp.ldv_40024.disk; ns.ldv_40024.pdsk = mdev->new_state_tmp.ldv_40024.pdsk; } else { if ((unsigned long )warn != (unsigned long )((enum sanitize_state_warnings *)0)) { *warn = CONNECTION_LOST_NEGOTIATING; } else { } ns.ldv_40024.disk = 0U; ns.ldv_40024.pdsk = 6U; } put_ldev(mdev); } else { } } else { } if ((int )ns.ldv_40024.conn > 9 && (int )ns.ldv_40024.conn <= 21) { if ((unsigned int )*((unsigned char *)(& ns) + 1UL) == 14U || (unsigned int )*((unsigned char *)(& ns) + 1UL) == 10U) { ns.ldv_40024.disk = 8U; } else { } if (*((unsigned int *)(& ns) + 0UL) == 57344U || *((unsigned int *)(& ns) + 0UL) == 40960U) { ns.ldv_40024.pdsk = 8U; } else { } } else { } disk_min = D_DISKLESS; disk_max = D_UP_TO_DATE; pdsk_min = D_INCONSISTENT; pdsk_max = D_UNKNOWN; switch ((unsigned int )ns.ldv_40024.conn) { case 14U: ; case 21U: ; case 12U: ; case 15U: ; case 23U: disk_min = D_INCONSISTENT; disk_max = D_OUTDATED; pdsk_min = D_UP_TO_DATE; pdsk_max = D_UP_TO_DATE; goto ldv_52301; case 18U: ; case 19U: disk_min = D_UP_TO_DATE; disk_max = D_UP_TO_DATE; pdsk_min = D_UP_TO_DATE; pdsk_max = D_UP_TO_DATE; goto ldv_52301; case 10U: disk_min = D_DISKLESS; disk_max = D_UP_TO_DATE; pdsk_min = D_DISKLESS; pdsk_max = D_UP_TO_DATE; goto ldv_52301; case 13U: ; case 20U: ; case 11U: ; case 22U: disk_min = D_UP_TO_DATE; disk_max = D_UP_TO_DATE; pdsk_min = D_INCONSISTENT; pdsk_max = D_CONSISTENT; goto ldv_52301; case 17U: disk_min = D_INCONSISTENT; disk_max = D_INCONSISTENT; pdsk_min = D_UP_TO_DATE; pdsk_max = D_UP_TO_DATE; goto ldv_52301; case 16U: disk_min = D_UP_TO_DATE; disk_max = D_UP_TO_DATE; pdsk_min = D_INCONSISTENT; pdsk_max = D_INCONSISTENT; goto ldv_52301; case 0U: ; case 1U: ; case 2U: ; case 3U: ; case 4U: ; case 5U: ; case 6U: ; case 7U: ; case 8U: ; case 9U: ; case 31U: ; goto ldv_52301; } ldv_52301: ; if ((unsigned int )ns.ldv_40024.disk > (unsigned int )disk_max) { ns.ldv_40024.disk = (unsigned char )disk_max; } else { } if ((unsigned int )ns.ldv_40024.disk < (unsigned int )disk_min) { if ((unsigned long )warn != (unsigned long )((enum sanitize_state_warnings *)0)) { *warn = IMPLICITLY_UPGRADED_DISK; } else { } ns.ldv_40024.disk = (unsigned char )disk_min; } else { } if ((unsigned int )ns.ldv_40024.pdsk > (unsigned int )pdsk_max) { ns.ldv_40024.pdsk = (unsigned char )pdsk_max; } else { } if ((unsigned int )ns.ldv_40024.pdsk < (unsigned int )pdsk_min) { if ((unsigned long )warn != (unsigned long )((enum sanitize_state_warnings *)0)) { *warn = IMPLICITLY_UPGRADED_PDSK; } else { } ns.ldv_40024.pdsk = (unsigned char )pdsk_min; } else { } if ((int )fp == 2 && (((unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U && (int )ns.ldv_40024.conn <= 9) && (int )ns.ldv_40024.pdsk > 5)) { ns.ldv_40024.susp_fen = 1U; } else { } if ((mdev->tconn)->res_opts.on_no_data == 1U && (((unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U && (int )ns.ldv_40024.disk <= 7) && (int )ns.ldv_40024.pdsk <= 7)) { ns.ldv_40024.susp_nod = 1U; } else { } if (((unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U || (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U) || (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U) { if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 256U) { ns.ldv_40024.conn = 20U; } else { } if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 272U) { ns.ldv_40024.conn = 21U; } else { } } else { if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 320U) { ns.ldv_40024.conn = 16U; } else { } if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 336U) { ns.ldv_40024.conn = 17U; } else { } } return (ns); } } void drbd_resume_al(struct drbd_conf *mdev ) { int tmp ; { tmp = test_and_clear_bit(18, (unsigned long volatile *)(& mdev->flags)); if (tmp != 0) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Resumed AL updates\n"); } else { } return; } } static void set_ov_position(struct drbd_conf *mdev , enum drbd_conns cs ) { unsigned long bit ; { if ((mdev->tconn)->agreed_pro_version <= 89) { mdev->ov_start_sector = 0UL; } else { } mdev->rs_total = drbd_bm_bits(mdev); mdev->ov_position = 0UL; if ((unsigned int )cs == 19U) { mdev->ov_start_sector = 0xffffffffffffffffUL; } else { bit = mdev->ov_start_sector >> 3; if (mdev->rs_total <= bit) { mdev->ov_start_sector = (mdev->rs_total - 1UL) << 3; mdev->rs_total = 1UL; } else { mdev->rs_total = mdev->rs_total - bit; } mdev->ov_position = mdev->ov_start_sector; } mdev->ov_left = mdev->rs_total; return; } } enum drbd_state_rv __drbd_set_state(struct drbd_conf *mdev , union drbd_state ns , enum chg_state_flags flags , struct completion *done ) { union drbd_state os ; enum drbd_state_rv rv ; enum sanitize_state_warnings ssw ; struct after_state_chg_work *ascw ; enum drbd_state_rv tmp ; bool tmp___0 ; unsigned long tmp___1 ; unsigned long now ; int i ; u32 mdf ; int tmp___2 ; int tmp___3 ; void *tmp___4 ; { rv = 1; os = drbd_read_state(mdev); ns = sanitize_state(mdev, ns, & ssw); if (ns.i == os.i) { return (SS_NOTHING_TO_DO); } else { } rv = is_valid_transition(os, ns); if ((int )rv <= 0) { return (rv); } else { } if (((unsigned int )flags & 1U) == 0U) { rv = is_valid_state(mdev, ns); if ((int )rv <= 0) { tmp = is_valid_state(mdev, os); if ((int )tmp == (int )rv) { rv = is_valid_soft_transition(os, ns, mdev->tconn); } else { rv = is_valid_soft_transition(os, ns, mdev->tconn); } } else { } } else { } if ((int )rv <= 0) { if (((unsigned int )flags & 2U) != 0U) { print_st_err(mdev, os, ns, rv); } else { } return (rv); } else { } print_sanitize_warnings(mdev, ssw); drbd_pr_state_change(mdev, os, ns, flags); if (((unsigned int )flags & 1024U) == 0U) { conn_pr_state_change(mdev->tconn, os, ns, (enum chg_state_flags )(((unsigned int )flags & 4294965279U) | 1024U)); } else { } if (((unsigned int )*((unsigned char *)(& os) + 1UL) != 4U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 4U) || ((unsigned int )*((unsigned char *)(& os) + 1UL) != 0U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 0U)) { atomic_inc(& mdev->local_cnt); } else { } mdev->state.i = ns.i; (mdev->tconn)->susp = ns.ldv_40024.susp; (mdev->tconn)->susp_nod = ns.ldv_40024.susp_nod; (mdev->tconn)->susp_fen = ns.ldv_40024.susp_fen; if ((unsigned int )*((unsigned char *)(& os) + 1UL) == 2U && (int )ns.ldv_40024.disk > 2) { drbd_print_uuids(mdev, "attached to UUIDs"); } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) == 144U && (unsigned int )*((unsigned short *)(& ns) + 0UL) != 144U) { tmp___0 = no_peer_wf_report_params(mdev->tconn); if ((int )tmp___0) { clear_bit(10, (unsigned long volatile *)(& (mdev->tconn)->flags)); } else { } } else { } __wake_up(& mdev->misc_wait, 3U, 1, 0); __wake_up(& mdev->state_wait, 3U, 1, 0); __wake_up(& (mdev->tconn)->ping_wait, 3U, 1, 0); if (((unsigned int )*((unsigned short *)(& os) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& os) + 0UL) == 304U) && (int )ns.ldv_40024.conn <= 10) { tmp___1 = drbd_bm_bits(mdev); mdev->ov_start_sector = (tmp___1 - mdev->ov_left) << 3; if (mdev->ov_left != 0UL) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Online Verify reached sector %llu\n", (unsigned long long )mdev->ov_start_sector); } else { } } else { } if (((unsigned int )*((unsigned short *)(& os) + 0UL) == 336U || (unsigned int )*((unsigned short *)(& os) + 0UL) == 320U) && ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 272U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 256U)) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Syncer continues.\n"); mdev->rs_paused = mdev->rs_paused + (unsigned long )((long )jiffies - (long )mdev->rs_mark_time[mdev->rs_last_mark]); if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 272U) { mod_timer(& mdev->resync_timer, jiffies); } else { } } else { } if (((unsigned int )*((unsigned short *)(& os) + 0UL) == 272U || (unsigned int )*((unsigned short *)(& os) + 0UL) == 256U) && ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 336U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 320U)) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Resync suspended\n"); mdev->rs_mark_time[mdev->rs_last_mark] = jiffies; } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) == 160U && ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 288U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 304U)) { now = jiffies; set_ov_position(mdev, (enum drbd_conns )ns.ldv_40024.conn); mdev->rs_start = now; mdev->rs_last_events = 0; mdev->rs_last_sect_ev = 0; mdev->ov_last_oos_size = 0UL; mdev->ov_last_oos_start = 0UL; i = 0; goto ldv_52343; ldv_52342: mdev->rs_mark_left[i] = mdev->ov_left; mdev->rs_mark_time[i] = now; i = i + 1; ldv_52343: ; if (i <= 7) { goto ldv_52342; } else { } drbd_rs_controller_reset(mdev); if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 288U) { _dev_info((struct device const *)(& (mdev->vdisk)->part0.__dev), "Starting Online Verify from sector %llu\n", (unsigned long long )mdev->ov_position); mod_timer(& mdev->resync_timer, jiffies); } else { } } else { } tmp___3 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___3 != 0) { mdf = (mdev->ldev)->md.flags & 4294967176U; mdf = mdf & 4294967167U; tmp___2 = constant_test_bit(5U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___2 != 0) { mdf = mdf | 64U; } else { } if ((unsigned int )*((unsigned char *)mdev + 748UL) == 1U || ((int )mdev->state.ldv_49522.pdsk <= 3 && (unsigned int )*((unsigned char *)mdev + 748UL) == 4U)) { mdf = mdf | 2U; } else { } if ((int )mdev->state.ldv_49522.conn > 9) { mdf = mdf | 4U; } else { } if ((int )mdev->state.ldv_49522.disk > 4) { mdf = mdf | 1U; } else { } if ((int )mdev->state.ldv_49522.disk > 5) { mdf = mdf | 16U; } else { } if ((int )mdev->state.ldv_49522.pdsk <= 5 && (int )mdev->state.ldv_49522.pdsk > 3) { mdf = mdf | 32U; } else { } if ((mdev->ldev)->md.flags != mdf) { (mdev->ldev)->md.flags = mdf; drbd_md_mark_dirty(mdev); } else { } if ((int )os.ldv_40024.disk <= 6 && (int )ns.ldv_40024.disk > 6) { drbd_set_ed_uuid(mdev, (mdev->ldev)->md.uuid[0]); } else { } put_ldev(mdev); } else { } if ((((unsigned int )*((unsigned char *)(& os) + 1UL) == 8U && *((unsigned int *)(& os) + 0UL) == 32768U) && (unsigned int )*((unsigned char *)(& os) + 0UL) == 8U) && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 4U) { set_bit(6U, (unsigned long volatile *)(& mdev->flags)); } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) != 16U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 16U) { drbd_thread_stop_nowait(& (mdev->tconn)->receiver); } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) != 0U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 0U) { drbd_thread_stop_nowait(& (mdev->tconn)->receiver); } else { } if (((int )os.ldv_40024.conn > 8 && (int )ns.ldv_40024.conn <= 7) && (int )ns.ldv_40024.conn > 2) { drbd_thread_restart_nowait(& (mdev->tconn)->receiver); } else { } if ((int )os.ldv_40024.conn <= 9 && (int )ns.ldv_40024.conn > 9) { drbd_resume_al(mdev); } else { } if (((unsigned int )*((unsigned char *)(& os) + 1UL) == 2U || (unsigned int )*((unsigned char *)(& os) + 1UL) == 6U) && (int )ns.ldv_40024.disk > 3) { mdev->last_reattach_jif = jiffies; } else { } tmp___4 = kmalloc(56UL, 32U); ascw = (struct after_state_chg_work *)tmp___4; if ((unsigned long )ascw != (unsigned long )((struct after_state_chg_work *)0)) { ascw->os = os; ascw->ns = ns; ascw->flags = flags; ascw->w.cb = & w_after_state_ch; ascw->w.ldv_49807.mdev = mdev; ascw->done = done; drbd_queue_work(& (mdev->tconn)->sender_work, & ascw->w); } else { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Could not kmalloc an ascw\n"); } return (rv); } } static int w_after_state_ch(struct drbd_work *w , int unused ) { struct after_state_chg_work *ascw ; struct drbd_work const *__mptr ; struct drbd_conf *mdev ; { __mptr = (struct drbd_work const *)w; ascw = (struct after_state_chg_work *)__mptr; mdev = w->ldv_49807.mdev; after_state_ch(mdev, ascw->os, ascw->ns, ascw->flags); if (((unsigned int )ascw->flags & 4U) != 0U) { if ((unsigned long )ascw->done == (unsigned long )((struct completion *)0)) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( ascw->done != NULL ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_state.c.prepared", 1221); } else { } complete(ascw->done); } else { } kfree((void const *)ascw); return (0); } } static void abw_start_sync(struct drbd_conf *mdev , int rv ) { union drbd_state val ; union drbd_state mask ; union drbd_state val___0 ; union drbd_state mask___0 ; { if (rv != 0) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "Writing the bitmap failed not starting resync.\n"); val.i = 0U; val.ldv_40024.conn = 10U; mask.i = 0U; mask.ldv_40024.conn = 31U; _drbd_request_state(mdev, mask, val, CS_VERBOSE); return; } else { } switch ((int )mdev->state.ldv_49522.conn) { case 12: val___0.i = 0U; val___0.ldv_40024.conn = 15U; mask___0.i = 0U; mask___0.ldv_40024.conn = 31U; _drbd_request_state(mdev, mask___0, val___0, CS_VERBOSE); goto ldv_52367; case 11: drbd_start_resync(mdev, C_SYNC_SOURCE); goto ldv_52367; } ldv_52367: ; return; } } int drbd_bitmap_io_from_worker(struct drbd_conf *mdev , int (*io_fn)(struct drbd_conf * ) , char *why , enum bm_flag flags ) { int rv ; struct task_struct *tmp ; { tmp = get_current(); if ((unsigned long )tmp != (unsigned long )(mdev->tconn)->worker.task) { dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT( current == mdev->tconn->worker.task ) in %s:%d\n", (char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_state.c.prepared", 1253); } else { } set_bit(8U, (unsigned long volatile *)(& mdev->flags)); drbd_bm_lock(mdev, why, flags); rv = (*io_fn)(mdev); drbd_bm_unlock(mdev); drbd_resume_io(mdev); return (rv); } } static void after_state_ch(struct drbd_conf *mdev , union drbd_state os , union drbd_state ns , enum chg_state_flags flags ) { struct sib_info sib ; struct drbd_tconn *tconn ; enum drbd_req_event what ; enum drbd_conns tmp ; enum drbd_disk_state tmp___0 ; union drbd_state __constr_expr_0 ; union drbd_state __constr_expr_1 ; struct drbd_tconn *tconn___0 ; struct drbd_conf *odev ; int vnr ; void *tmp___1 ; void *tmp___2 ; union drbd_state __constr_expr_2 ; union drbd_state __constr_expr_3 ; enum drbd_conns tmp___3 ; int tmp___4 ; int tmp___5 ; int tmp___6 ; int tmp___7 ; int tmp___8 ; enum drbd_io_error_p eh ; int was_io_error ; struct disk_conf *_________p1 ; bool __warned ; int tmp___9 ; int tmp___10 ; int tmp___11 ; char const *tmp___12 ; char const *tmp___13 ; int tmp___14 ; bool tmp___15 ; int tmp___16 ; { sib.sib_reason = SIB_STATE_CHANGE; sib.ldv_50742.ldv_50741.os = os; sib.ldv_50742.ldv_50741.ns = ns; if ((unsigned int )*((unsigned short *)(& os) + 0UL) != 160U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 160U) { clear_bit(5, (unsigned long volatile *)(& mdev->flags)); if ((unsigned long )mdev->p_uuid != (unsigned long )((u64 *)0)) { *(mdev->p_uuid + 5UL) = *(mdev->p_uuid + 5UL) & 0xfffffffffffffffdULL; } else { } } else { } drbd_bcast_event(mdev, (struct sib_info const *)(& sib)); if ((((unsigned int )*((unsigned char *)(& os) + 0UL) != 1U || (int )os.ldv_40024.disk > 7) || (int )os.ldv_40024.pdsk > 7) && (((unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U && (int )ns.ldv_40024.disk <= 7) && (int )ns.ldv_40024.pdsk <= 7)) { drbd_khelper(mdev, (char *)"pri-on-incon-degr"); } else { } if ((unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U) { tconn = mdev->tconn; what = NOTHING; spin_lock_irq(& tconn->req_lock); if ((int )os.ldv_40024.conn <= 9) { tmp = conn_lowest_conn(tconn); if ((unsigned int )tmp > 9U) { what = RESEND; } else { } } else { } if ((unsigned int )*((unsigned char *)(& os) + 1UL) == 2U || (unsigned int )*((unsigned char *)(& os) + 1UL) == 6U) { tmp___0 = conn_lowest_disk(tconn); if ((unsigned int )tmp___0 > 3U) { what = RESTART_FROZEN_DISK_IO; } else { } } else { } if ((unsigned int )*((unsigned char *)tconn + 132UL) != 0U && (unsigned int )what != 28U) { _tl_restart(tconn, what); __constr_expr_0.ldv_40024.role = (unsigned char)0; __constr_expr_0.ldv_40024.peer = (unsigned char)0; __constr_expr_0.ldv_40024.conn = (unsigned char)0; __constr_expr_0.ldv_40024.disk = (unsigned char)0; __constr_expr_0.ldv_40024.pdsk = (unsigned char)0; __constr_expr_0.ldv_40024.susp = (unsigned char)0; __constr_expr_0.ldv_40024.aftr_isp = (unsigned char)0; __constr_expr_0.ldv_40024.peer_isp = (unsigned char)0; __constr_expr_0.ldv_40024.user_isp = (unsigned char)0; __constr_expr_0.ldv_40024.susp_nod = 0U; __constr_expr_0.ldv_40024.susp_fen = (unsigned char)0; __constr_expr_0.ldv_40024._pad = (unsigned short)0; __constr_expr_1.ldv_40024.role = (unsigned char)0; __constr_expr_1.ldv_40024.peer = (unsigned char)0; __constr_expr_1.ldv_40024.conn = (unsigned char)0; __constr_expr_1.ldv_40024.disk = (unsigned char)0; __constr_expr_1.ldv_40024.pdsk = (unsigned char)0; __constr_expr_1.ldv_40024.susp = (unsigned char)0; __constr_expr_1.ldv_40024.aftr_isp = (unsigned char)0; __constr_expr_1.ldv_40024.peer_isp = (unsigned char)0; __constr_expr_1.ldv_40024.user_isp = (unsigned char)0; __constr_expr_1.ldv_40024.susp_nod = 1U; __constr_expr_1.ldv_40024.susp_fen = (unsigned char)0; __constr_expr_1.ldv_40024._pad = (unsigned short)0; _conn_request_state(tconn, __constr_expr_1, __constr_expr_0, CS_VERBOSE); } else { } spin_unlock_irq(& tconn->req_lock); } else { } if ((unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U) { tconn___0 = mdev->tconn; spin_lock_irq(& tconn___0->req_lock); if ((unsigned int )*((unsigned char *)tconn___0 + 132UL) != 0U) { tmp___3 = conn_lowest_conn(tconn___0); if ((unsigned int )tmp___3 > 9U) { rcu_read_lock___7(); vnr = 0; tmp___1 = idr_get_next(& tconn___0->volumes, & vnr); odev = (struct drbd_conf *)tmp___1; goto ldv_52392; ldv_52391: clear_bit(17, (unsigned long volatile *)(& odev->flags)); vnr = vnr + 1; tmp___2 = idr_get_next(& tconn___0->volumes, & vnr); odev = (struct drbd_conf *)tmp___2; ldv_52392: ; if ((unsigned long )odev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52391; } else { } rcu_read_unlock___7(); _tl_restart(tconn___0, RESEND); __constr_expr_2.ldv_40024.role = (unsigned char)0; __constr_expr_2.ldv_40024.peer = (unsigned char)0; __constr_expr_2.ldv_40024.conn = (unsigned char)0; __constr_expr_2.ldv_40024.disk = (unsigned char)0; __constr_expr_2.ldv_40024.pdsk = (unsigned char)0; __constr_expr_2.ldv_40024.susp = (unsigned char)0; __constr_expr_2.ldv_40024.aftr_isp = (unsigned char)0; __constr_expr_2.ldv_40024.peer_isp = (unsigned char)0; __constr_expr_2.ldv_40024.user_isp = (unsigned char)0; __constr_expr_2.ldv_40024.susp_nod = (unsigned char)0; __constr_expr_2.ldv_40024.susp_fen = 0U; __constr_expr_2.ldv_40024._pad = (unsigned short)0; __constr_expr_3.ldv_40024.role = (unsigned char)0; __constr_expr_3.ldv_40024.peer = (unsigned char)0; __constr_expr_3.ldv_40024.conn = (unsigned char)0; __constr_expr_3.ldv_40024.disk = (unsigned char)0; __constr_expr_3.ldv_40024.pdsk = (unsigned char)0; __constr_expr_3.ldv_40024.susp = (unsigned char)0; __constr_expr_3.ldv_40024.aftr_isp = (unsigned char)0; __constr_expr_3.ldv_40024.peer_isp = (unsigned char)0; __constr_expr_3.ldv_40024.user_isp = (unsigned char)0; __constr_expr_3.ldv_40024.susp_nod = (unsigned char)0; __constr_expr_3.ldv_40024.susp_fen = 1U; __constr_expr_3.ldv_40024._pad = (unsigned short)0; _conn_request_state(tconn___0, __constr_expr_3, __constr_expr_2, CS_VERBOSE); } else { } } else { } spin_unlock_irq(& tconn___0->req_lock); } else { } if ((((unsigned int )*((unsigned short *)(& os) + 0UL) != 256U && (unsigned int )*((unsigned short *)(& os) + 0UL) != 320U) && ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 256U || (unsigned int )*((unsigned short *)(& ns) + 0UL) == 320U)) && (mdev->tconn)->agreed_pro_version > 95) { tmp___4 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___4 != 0) { drbd_gen_and_send_sync_uuid(mdev); put_ldev(mdev); } else { } } else { } if ((*((unsigned int *)(& os) + 0UL) == 0U && (int )ns.ldv_40024.pdsk > 0) && *((unsigned int *)(& ns) + 0UL) != 49152U) { mdev->rs_total = 0UL; mdev->rs_failed = 0UL; atomic_set(& mdev->rs_pending_cnt, 0); drbd_rs_cancel_all(mdev); drbd_send_uuids(mdev); drbd_send_state(mdev, ns); } else { } if (((unsigned int )*((unsigned short *)(& os) + 0UL) != 208U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 208U) && (unsigned int )*((unsigned short *)mdev + 374UL) == 208U) { drbd_queue_bitmap_io(mdev, & drbd_send_bitmap, 0, (char *)"send_bitmap (WFBitMapS)", BM_LOCKED_TEST_ALLOWED); } else { } if ((((int )os.ldv_40024.pdsk > 3 && *((unsigned int *)(& os) + 0UL) != 49152U) && *((unsigned int *)(& os) + 0UL) != 40960U) && (((int )ns.ldv_40024.pdsk <= 3 || *((unsigned int *)(& ns) + 0UL) == 49152U) || *((unsigned int *)(& ns) + 0UL) == 40960U)) { tmp___6 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___6 != 0) { if ((((unsigned int )*((unsigned char *)(& ns) + 0UL) == 1U || (unsigned int )*((unsigned char *)(& ns) + 0UL) == 4U) && (mdev->ldev)->md.uuid[1] == 0ULL) && (int )ns.ldv_40024.disk > 7) { tmp___5 = drbd_suspended(mdev); if (tmp___5 != 0) { set_bit(17U, (unsigned long volatile *)(& mdev->flags)); } else { drbd_uuid_new_current(mdev); drbd_send_uuids(mdev); } } else { } put_ldev(mdev); } else { } } else { } if ((int )ns.ldv_40024.pdsk <= 3) { tmp___7 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___7 != 0) { if ((((unsigned int )*((unsigned char *)(& os) + 0UL) == 8U && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 4U) && (mdev->ldev)->md.uuid[1] == 0ULL) && (int )ns.ldv_40024.disk > 7) { drbd_uuid_new_current(mdev); drbd_send_uuids(mdev); } else { } if ((unsigned int )*((unsigned char *)(& os) + 0UL) == 4U && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 8U) { drbd_bitmap_io_from_worker(mdev, & drbd_bm_write, (char *)"demote diskless peer", BM_LOCKED_SET_ALLOWED); } else { } put_ldev(mdev); } else { } } else { } if (((unsigned int )*((unsigned char *)(& os) + 0UL) == 1U && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 2U) && (int )mdev->state.ldv_49522.conn <= 10) { tmp___8 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___8 != 0) { drbd_bitmap_io_from_worker(mdev, & drbd_bm_write, (char *)"demote", BM_LOCKED_TEST_ALLOWED); put_ldev(mdev); } else { } } else { } if (((int )ns.ldv_40024.conn > 9 && (unsigned int )*((unsigned char *)(& os) + 1UL) == 2U) && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 6U) { drbd_send_sizes(mdev, 0, 0); drbd_send_uuids(mdev); drbd_send_state(mdev, ns); } else { } if ((int )ns.ldv_40024.conn > 9 && ((int )os.ldv_40024.aftr_isp != (int )ns.ldv_40024.aftr_isp || (int )os.ldv_40024.user_isp != (int )ns.ldv_40024.user_isp)) { drbd_send_state(mdev, ns); } else { } if ((((unsigned int )*((unsigned char *)(& os) + 2UL) == 0U && (unsigned int )*((unsigned char *)(& os) + 2UL) == 0U) && (unsigned int )*((unsigned char *)(& os) + 2UL) == 0U) && (((unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U || (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U) || (unsigned int )*((unsigned char *)(& ns) + 2UL) != 0U)) { suspend_other_sg(mdev); } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) == 144U && (int )ns.ldv_40024.conn > 9) { drbd_send_state(mdev, ns); } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) != 352U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 352U) { drbd_send_state(mdev, ns); } else { } if (((unsigned int )*((unsigned short *)(& os) + 0UL) != 192U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 192U) || ((unsigned int )*((unsigned short *)(& os) + 0UL) != 176U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 176U)) { drbd_queue_bitmap_io(mdev, & drbd_bmio_set_n_write, & abw_start_sync, (char *)"set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED); } else { } if ((((int )os.ldv_40024.conn <= 9 && (int )ns.ldv_40024.conn <= 9) && (int )os.ldv_40024.disk > 4) && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 8U) { drbd_queue_bitmap_io(mdev, & drbd_bmio_set_n_write, 0, (char *)"set_n_write from invalidate", BM_LOCKED_MASK); } else { } if ((unsigned int )*((unsigned char *)(& os) + 1UL) != 4U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 4U) { eh = EP_PASS_ON; was_io_error = 0; if ((unsigned long )mdev->ldev != (unsigned long )((struct drbd_backing_dev *)0)) { rcu_read_lock___7(); _________p1 = *((struct disk_conf * volatile *)(& (mdev->ldev)->disk_conf)); tmp___9 = debug_lockdep_rcu_enabled(); if (tmp___9 != 0 && ! __warned) { tmp___10 = rcu_read_lock_held(); if (tmp___10 == 0 && 1) { __warned = 1; lockdep_rcu_suspicious("/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_state.c.prepared", 1478, "suspicious rcu_dereference_check() usage"); } else { } } else { } eh = (enum drbd_io_error_p )_________p1->on_io_error; rcu_read_unlock___7(); was_io_error = test_and_clear_bit(12, (unsigned long volatile *)(& mdev->flags)); if (was_io_error != 0 && (unsigned int )eh == 1U) { drbd_khelper(mdev, (char *)"local-io-error"); } else { } tmp___11 = test_and_clear_bit(14, (unsigned long volatile *)(& mdev->flags)); if (tmp___11 != 0) { tl_abort_disk_io(mdev); } else { } if ((unsigned int )*((unsigned char *)mdev + 749UL) != 4U) { tmp___12 = drbd_disk_str((enum drbd_disk_state )mdev->state.ldv_49522.disk); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED: disk is %s during detach\n", tmp___12); } else { } if ((int )ns.ldv_40024.conn > 9) { drbd_send_state(mdev, ns); } else { } drbd_rs_cancel_all(mdev); drbd_md_sync(mdev); } else { } put_ldev(mdev); } else { } if ((unsigned int )*((unsigned char *)(& os) + 1UL) != 0U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 0U) { if ((unsigned int )*((unsigned char *)mdev + 749UL) != 0U) { tmp___13 = drbd_disk_str((enum drbd_disk_state )mdev->state.ldv_49522.disk); dev_err((struct device const *)(& (mdev->vdisk)->part0.__dev), "ASSERT FAILED: disk is %s while going diskless\n", tmp___13); } else { } if ((int )ns.ldv_40024.conn > 9) { drbd_send_state(mdev, ns); } else { } put_ldev(mdev); } else { } if (((unsigned int )*((unsigned char *)(& os) + 1UL) == 16U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 8U) && (int )ns.ldv_40024.conn > 9) { drbd_send_state(mdev, ns); } else { } if ((int )ns.ldv_40024.disk > 3 && (int )ns.ldv_40024.pdsk > 3) { tmp___14 = test_and_clear_bit(15, (unsigned long volatile *)(& mdev->flags)); if (tmp___14 != 0) { if ((unsigned int )*((unsigned short *)(& ns) + 0UL) == 160U) { resync_after_online_grow(mdev); } else { } } else { } } else { } if ((((int )os.ldv_40024.conn > 10 && (int )ns.ldv_40024.conn <= 10) || ((unsigned int )*((unsigned char *)(& os) + 2UL) != 0U && (unsigned int )*((unsigned char *)(& ns) + 2UL) == 0U)) || ((unsigned int )*((unsigned char *)(& os) + 2UL) != 0U && (unsigned int )*((unsigned char *)(& ns) + 2UL) == 0U)) { resume_next_sg(mdev); } else { } if (((int )os.ldv_40024.disk <= 7 && (int )os.ldv_40024.conn > 15) && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 160U) { drbd_send_state(mdev, ns); } else { } if ((unsigned int )*((unsigned short *)(& os) + 0UL) == 288U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 160U) { tmp___15 = verify_can_do_stop_sector(mdev); if ((int )tmp___15) { drbd_send_state(mdev, ns); } else { } } else { } if ((int )os.ldv_40024.conn > 10 && (int )ns.ldv_40024.conn <= 10) { tmp___16 = _get_ldev_if_state(mdev, D_INCONSISTENT); if (tmp___16 != 0) { drbd_queue_bitmap_io(mdev, & drbd_bm_write_copy_pages, 0, (char *)"write from resync_finished", BM_IS_LOCKED); put_ldev(mdev); } else { } } else { } if (((unsigned int )*((unsigned char *)(& ns) + 1UL) == 0U && (unsigned int )*((unsigned short *)(& ns) + 0UL) == 0U) && (unsigned int )*((unsigned char *)(& ns) + 0UL) == 2U) { if ((int )os.ldv_40024.aftr_isp != (int )ns.ldv_40024.aftr_isp) { resume_next_sg(mdev); } else { } } else { } drbd_md_sync(mdev); return; } } static int w_after_conn_state_ch(struct drbd_work *w , int unused ) { struct after_conn_state_chg_work *acscw ; struct drbd_work const *__mptr ; struct drbd_tconn *tconn ; enum drbd_conns oc ; union drbd_state ns_max ; struct drbd_conf *mdev ; int vnr ; struct net_conf *old_conf ; void *tmp ; int tmp___0 ; void *tmp___1 ; union drbd_state __constr_expr_0 ; union drbd_state __constr_expr_1 ; { __mptr = (struct drbd_work const *)w; acscw = (struct after_conn_state_chg_work *)__mptr; tconn = w->ldv_49807.tconn; oc = acscw->oc; ns_max = acscw->ns_max; kfree((void const *)acscw); if ((unsigned int )oc == 0U && (unsigned int )*((unsigned short *)(& ns_max) + 0UL) == 32U) { drbd_thread_start(& tconn->receiver); } else { } if ((unsigned int )oc == 1U && (unsigned int )*((unsigned short *)(& ns_max) + 0UL) == 0U) { ldv_mutex_lock_350(& tconn->conf_update); old_conf = tconn->net_conf; tconn->my_addr_len = 0; tconn->peer_addr_len = 0; __asm__ volatile ("": : : "memory"); tconn->net_conf = 0; conn_free_crypto(tconn); ldv_mutex_unlock_351(& tconn->conf_update); synchronize_rcu(); kfree((void const *)old_conf); } else { } if ((unsigned int )*((unsigned char *)(& ns_max) + 2UL) != 0U) { if ((int )ns_max.ldv_40024.pdsk <= 5) { rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52421; ldv_52420: tmp___0 = constant_test_bit(17U, (unsigned long const volatile *)(& mdev->flags)); if (tmp___0 != 0) { drbd_uuid_new_current(mdev); clear_bit(17, (unsigned long volatile *)(& mdev->flags)); } else { } vnr = vnr + 1; tmp___1 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___1; ldv_52421: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52420; } else { } rcu_read_unlock___7(); spin_lock_irq(& tconn->req_lock); _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); __constr_expr_0.ldv_40024.role = (unsigned char)0; __constr_expr_0.ldv_40024.peer = (unsigned char)0; __constr_expr_0.ldv_40024.conn = (unsigned char)0; __constr_expr_0.ldv_40024.disk = (unsigned char)0; __constr_expr_0.ldv_40024.pdsk = (unsigned char)0; __constr_expr_0.ldv_40024.susp = (unsigned char)0; __constr_expr_0.ldv_40024.aftr_isp = (unsigned char)0; __constr_expr_0.ldv_40024.peer_isp = (unsigned char)0; __constr_expr_0.ldv_40024.user_isp = (unsigned char)0; __constr_expr_0.ldv_40024.susp_nod = (unsigned char)0; __constr_expr_0.ldv_40024.susp_fen = 0U; __constr_expr_0.ldv_40024._pad = (unsigned short)0; __constr_expr_1.ldv_40024.role = (unsigned char)0; __constr_expr_1.ldv_40024.peer = (unsigned char)0; __constr_expr_1.ldv_40024.conn = (unsigned char)0; __constr_expr_1.ldv_40024.disk = (unsigned char)0; __constr_expr_1.ldv_40024.pdsk = (unsigned char)0; __constr_expr_1.ldv_40024.susp = (unsigned char)0; __constr_expr_1.ldv_40024.aftr_isp = (unsigned char)0; __constr_expr_1.ldv_40024.peer_isp = (unsigned char)0; __constr_expr_1.ldv_40024.user_isp = (unsigned char)0; __constr_expr_1.ldv_40024.susp_nod = (unsigned char)0; __constr_expr_1.ldv_40024.susp_fen = 1U; __constr_expr_1.ldv_40024._pad = (unsigned short)0; _conn_request_state(tconn, __constr_expr_1, __constr_expr_0, CS_VERBOSE); spin_unlock_irq(& tconn->req_lock); } else { } } else { } kref_put(& tconn->kref, & conn_destroy); conn_md_sync(tconn); return (0); } } void conn_old_common_state(struct drbd_tconn *tconn , union drbd_state *pcs , enum chg_state_flags *pf ) { enum chg_state_flags flags ; struct drbd_conf *mdev ; int vnr ; int first_vol ; union drbd_dev_state os ; union drbd_dev_state cs ; void *tmp ; void *tmp___0 ; { flags = 4294967295L; first_vol = 1; cs.ldv_49522.role = 2U; cs.ldv_49522.peer = 0U; cs.ldv_49522.conn = (unsigned char )tconn->cstate; cs.ldv_49522.disk = 0U; cs.ldv_49522.pdsk = 6U; cs.ldv_49522._unused = (unsigned char)0; cs.ldv_49522.aftr_isp = (unsigned char)0; cs.ldv_49522.peer_isp = (unsigned char)0; cs.ldv_49522.user_isp = (unsigned char)0; cs.ldv_49522._pad = (unsigned short)0; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52438; ldv_52437: os = mdev->state; if (first_vol != 0) { cs = os; first_vol = 0; goto ldv_52436; } else { } if ((int )cs.ldv_49522.role != (int )os.ldv_49522.role) { flags = (enum chg_state_flags )((unsigned int )flags & 4294967263U); } else { } if ((int )cs.ldv_49522.peer != (int )os.ldv_49522.peer) { flags = (enum chg_state_flags )((unsigned int )flags & 4294967231U); } else { } if ((int )cs.ldv_49522.conn != (int )os.ldv_49522.conn) { flags = (enum chg_state_flags )((unsigned int )flags & 4294967167U); } else { } if ((int )cs.ldv_49522.disk != (int )os.ldv_49522.disk) { flags = (enum chg_state_flags )((unsigned int )flags & 4294967039U); } else { } if ((int )cs.ldv_49522.pdsk != (int )os.ldv_49522.pdsk) { flags = (enum chg_state_flags )((unsigned int )flags & 4294966783U); } else { } ldv_52436: vnr = vnr + 1; tmp___0 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___0; ldv_52438: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52437; } else { } rcu_read_unlock___7(); *pf = (enum chg_state_flags )((unsigned int )*pf | 992U); *pf = (enum chg_state_flags )((unsigned int )*pf & (unsigned int )flags); pcs->i = cs.i; return; } } static enum drbd_state_rv conn_is_valid_transition(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val , enum chg_state_flags flags ) { enum drbd_state_rv rv ; union drbd_state ns ; union drbd_state os ; struct drbd_conf *mdev ; int vnr ; void *tmp ; union drbd_state tmp___0 ; enum drbd_state_rv tmp___1 ; void *tmp___2 ; { rv = 1; rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52454; ldv_52453: os = drbd_read_state(mdev); tmp___0 = apply_mask_val(os, mask, val); ns = sanitize_state(mdev, tmp___0, 0); if ((((unsigned int )flags & 2048U) != 0U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 10U) && (int )os.ldv_40024.disk <= 4) { ns.ldv_40024.disk = os.ldv_40024.disk; } else { } if (ns.i == os.i) { goto ldv_52451; } else { } rv = is_valid_transition(os, ns); if ((int )rv <= 0) { goto ldv_52452; } else { } if (((unsigned int )flags & 1U) == 0U) { rv = is_valid_state(mdev, ns); if ((int )rv <= 0) { tmp___1 = is_valid_state(mdev, os); if ((int )tmp___1 == (int )rv) { rv = is_valid_soft_transition(os, ns, tconn); } else { rv = is_valid_soft_transition(os, ns, tconn); } } else { } } else { } if ((int )rv <= 0) { goto ldv_52452; } else { } ldv_52451: vnr = vnr + 1; tmp___2 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___2; ldv_52454: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52453; } else { } ldv_52452: rcu_read_unlock___7(); if ((int )rv <= 0 && ((unsigned int )flags & 2U) != 0U) { print_st_err(mdev, os, ns, rv); } else { } return (rv); } } void conn_set_state(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val , union drbd_state *pns_min , union drbd_state *pns_max , enum chg_state_flags flags ) { union drbd_state ns ; union drbd_state os ; union drbd_state ns_max ; union drbd_state ns_min ; struct drbd_conf *mdev ; enum drbd_state_rv rv ; int vnr ; int number_of_volumes ; void *tmp ; enum drbd_role tmp___0 ; enum drbd_role tmp___1 ; enum drbd_conns __max1 ; enum drbd_conns __max2 ; enum drbd_disk_state __max1___0 ; enum drbd_disk_state __max2___0 ; enum drbd_disk_state __max1___1 ; enum drbd_disk_state __max2___1 ; enum drbd_role tmp___2 ; enum drbd_role tmp___3 ; enum drbd_conns __min1 ; enum drbd_conns __min2 ; enum drbd_disk_state __min1___0 ; enum drbd_disk_state __min2___0 ; enum drbd_disk_state __min1___1 ; enum drbd_disk_state __min2___1 ; void *tmp___4 ; union drbd_state __constr_expr_0 ; { ns_max.ldv_40024.role = (unsigned char)0; ns_max.ldv_40024.peer = (unsigned char)0; ns_max.ldv_40024.conn = (unsigned char)0; ns_max.ldv_40024.disk = (unsigned char)0; ns_max.ldv_40024.pdsk = (unsigned char)0; ns_max.ldv_40024.susp = (unsigned char)0; ns_max.ldv_40024.aftr_isp = (unsigned char)0; ns_max.ldv_40024.peer_isp = (unsigned char)0; ns_max.ldv_40024.user_isp = (unsigned char)0; ns_max.ldv_40024.susp_nod = (unsigned char)0; ns_max.ldv_40024.susp_fen = (unsigned char)0; ns_max.ldv_40024._pad = (unsigned short)0; ns_min.ldv_40024.role = 3U; ns_min.ldv_40024.peer = 3U; ns_min.ldv_40024.conn = val.ldv_40024.conn; ns_min.ldv_40024.disk = 15U; ns_min.ldv_40024.pdsk = 15U; ns_min.ldv_40024.susp = (unsigned char)0; ns_min.ldv_40024.aftr_isp = (unsigned char)0; ns_min.ldv_40024.peer_isp = (unsigned char)0; ns_min.ldv_40024.user_isp = (unsigned char)0; ns_min.ldv_40024.susp_nod = (unsigned char)0; ns_min.ldv_40024.susp_fen = (unsigned char)0; ns_min.ldv_40024._pad = (unsigned short)0; number_of_volumes = 0; if ((unsigned int )*((unsigned short *)(& mask) + 0UL) == 496U) { if ((unsigned int )tconn->cstate != 9U && (unsigned int )*((unsigned short *)(& val) + 0UL) == 144U) { tconn->last_reconnect_jif = jiffies; } else { } tconn->cstate = (enum drbd_conns )val.ldv_40024.conn; } else { } rcu_read_lock___7(); vnr = 0; tmp = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp; goto ldv_52491; ldv_52490: number_of_volumes = number_of_volumes + 1; os = drbd_read_state(mdev); ns = apply_mask_val(os, mask, val); ns = sanitize_state(mdev, ns, 0); if ((((unsigned int )flags & 2048U) != 0U && (unsigned int )*((unsigned char *)(& ns) + 1UL) == 10U) && (int )os.ldv_40024.disk <= 4) { ns.ldv_40024.disk = os.ldv_40024.disk; } else { } rv = __drbd_set_state(mdev, ns, flags, 0); if ((int )rv <= 0) { __asm__ volatile ("1:\tud2\n.pushsection __bug_table,\"a\"\n2:\t.long 1b - 2b, %c0 - 2b\n\t.word %c1, 0\n\t.org 2b+%c2\n.popsection": : "i" ((char *)"/work/ldvuser/novikov/work/current--X--drivers/block/drbd/drbd.ko--X--defaultlinux--X--32_7a--X--cpachecker/linux/csd_deg_dscv/23/dscv_tempdir/dscv/ri/32_7a/drivers/block/drbd/drbd_state.c.prepared"), "i" (1785), "i" (12UL)); ldv_52471: ; goto ldv_52471; } else { } ns.i = mdev->state.i; tmp___0 = max_role((enum drbd_role )ns.ldv_40024.role, (enum drbd_role )ns_max.ldv_40024.role); ns_max.ldv_40024.role = (unsigned char )tmp___0; tmp___1 = max_role((enum drbd_role )ns.ldv_40024.peer, (enum drbd_role )ns_max.ldv_40024.peer); ns_max.ldv_40024.peer = (unsigned char )tmp___1; __max1 = (enum drbd_conns )ns.ldv_40024.conn; __max2 = (enum drbd_conns )ns_max.ldv_40024.conn; ns_max.ldv_40024.conn = (unsigned char )((unsigned int )__max1 > (unsigned int )__max2 ? (unsigned int )__max1 : (unsigned int )__max2); __max1___0 = (enum drbd_disk_state )ns.ldv_40024.disk; __max2___0 = (enum drbd_disk_state )ns_max.ldv_40024.disk; ns_max.ldv_40024.disk = (unsigned char )((unsigned int )__max1___0 > (unsigned int )__max2___0 ? (unsigned int )__max1___0 : (unsigned int )__max2___0); __max1___1 = (enum drbd_disk_state )ns.ldv_40024.pdsk; __max2___1 = (enum drbd_disk_state )ns_max.ldv_40024.pdsk; ns_max.ldv_40024.pdsk = (unsigned char )((unsigned int )__max1___1 > (unsigned int )__max2___1 ? (unsigned int )__max1___1 : (unsigned int )__max2___1); tmp___2 = min_role((enum drbd_role )ns.ldv_40024.role, (enum drbd_role )ns_min.ldv_40024.role); ns_min.ldv_40024.role = (unsigned char )tmp___2; tmp___3 = min_role((enum drbd_role )ns.ldv_40024.peer, (enum drbd_role )ns_min.ldv_40024.peer); ns_min.ldv_40024.peer = (unsigned char )tmp___3; __min1 = (enum drbd_conns )ns.ldv_40024.conn; __min2 = (enum drbd_conns )ns_min.ldv_40024.conn; ns_min.ldv_40024.conn = (unsigned char )((unsigned int )__min1 < (unsigned int )__min2 ? (unsigned int )__min1 : (unsigned int )__min2); __min1___0 = (enum drbd_disk_state )ns.ldv_40024.disk; __min2___0 = (enum drbd_disk_state )ns_min.ldv_40024.disk; ns_min.ldv_40024.disk = (unsigned char )((unsigned int )__min1___0 < (unsigned int )__min2___0 ? (unsigned int )__min1___0 : (unsigned int )__min2___0); __min1___1 = (enum drbd_disk_state )ns.ldv_40024.pdsk; __min2___1 = (enum drbd_disk_state )ns_min.ldv_40024.pdsk; ns_min.ldv_40024.pdsk = (unsigned char )((unsigned int )__min1___1 < (unsigned int )__min2___1 ? (unsigned int )__min1___1 : (unsigned int )__min2___1); vnr = vnr + 1; tmp___4 = idr_get_next(& tconn->volumes, & vnr); mdev = (struct drbd_conf *)tmp___4; ldv_52491: ; if ((unsigned long )mdev != (unsigned long )((struct drbd_conf *)0)) { goto ldv_52490; } else { } rcu_read_unlock___7(); if (number_of_volumes == 0) { __constr_expr_0.ldv_40024.role = 2U; __constr_expr_0.ldv_40024.peer = 0U; __constr_expr_0.ldv_40024.conn = val.ldv_40024.conn; __constr_expr_0.ldv_40024.disk = 0U; __constr_expr_0.ldv_40024.pdsk = 6U; __constr_expr_0.ldv_40024.susp = (unsigned char)0; __constr_expr_0.ldv_40024.aftr_isp = (unsigned char)0; __constr_expr_0.ldv_40024.peer_isp = (unsigned char)0; __constr_expr_0.ldv_40024.user_isp = (unsigned char)0; __constr_expr_0.ldv_40024.susp_nod = (unsigned char)0; __constr_expr_0.ldv_40024.susp_fen = (unsigned char)0; __constr_expr_0.ldv_40024._pad = (unsigned short)0; ns_max = __constr_expr_0; ns_min = ns_max; } else { } ns_max.ldv_40024.susp = tconn->susp; ns_min.ldv_40024.susp = ns_max.ldv_40024.susp; ns_max.ldv_40024.susp_nod = tconn->susp_nod; ns_min.ldv_40024.susp_nod = ns_max.ldv_40024.susp_nod; ns_max.ldv_40024.susp_fen = tconn->susp_fen; ns_min.ldv_40024.susp_fen = ns_max.ldv_40024.susp_fen; *pns_min = ns_min; *pns_max = ns_max; return; } } static enum drbd_state_rv _conn_rq_cond(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val ) { enum drbd_state_rv rv ; int tmp ; int tmp___0 ; { tmp = test_and_clear_bit(6, (unsigned long volatile *)(& tconn->flags)); if (tmp != 0) { return (SS_CW_SUCCESS); } else { } tmp___0 = test_and_clear_bit(7, (unsigned long volatile *)(& tconn->flags)); if (tmp___0 != 0) { return (SS_CW_FAILED_BY_PEER); } else { } rv = (unsigned int )tconn->cstate != 9U ? SS_CW_NO_NEED : SS_UNKNOWN_ERROR; if ((int )rv == 0) { rv = conn_is_valid_transition(tconn, mask, val, 0); } else { } if ((int )rv == 1) { rv = SS_UNKNOWN_ERROR; } else { } return (rv); } } enum drbd_state_rv _conn_request_state(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val , enum chg_state_flags flags ) { enum drbd_state_rv rv ; struct after_conn_state_chg_work *acscw ; enum drbd_conns oc ; union drbd_state ns_max ; union drbd_state ns_min ; union drbd_state os ; bool have_mutex ; int tmp ; wait_queue_t __wait ; struct task_struct *tmp___0 ; long volatile __ret ; struct task_struct *tmp___1 ; struct task_struct *tmp___2 ; struct task_struct *tmp___3 ; struct task_struct *tmp___4 ; struct task_struct *tmp___5 ; void *tmp___6 ; char const *tmp___7 ; char const *tmp___8 ; char const *tmp___9 ; { rv = 1; oc = tconn->cstate; have_mutex = 0; if ((unsigned int )*((unsigned short *)(& mask) + 0UL) != 0U) { rv = is_valid_conn_transition(oc, (enum drbd_conns )val.ldv_40024.conn); if ((int )rv <= 0) { goto abort; } else { } } else { } rv = conn_is_valid_transition(tconn, mask, val, flags); if ((int )rv <= 0) { goto abort; } else { } if (((unsigned int )oc == 9U && (unsigned int )*((unsigned short *)(& val) + 0UL) == 16U) && ((unsigned int )flags & 17U) == 0U) { spin_unlock_irq(& tconn->req_lock); ldv_mutex_lock_352(& tconn->cstate_mutex); have_mutex = 1; set_bit(5U, (unsigned long volatile *)(& tconn->flags)); tmp = conn_send_state_req(tconn, mask, val); if (tmp != 0) { clear_bit(5, (unsigned long volatile *)(& tconn->flags)); rv = SS_CW_FAILED_BY_PEER; goto abort_unlocked; } else { } if ((unsigned int )*((unsigned short *)(& val) + 0UL) == 16U) { set_bit(12U, (unsigned long volatile *)(& tconn->flags)); } else { } spin_lock_irq(& tconn->req_lock); rv = _conn_rq_cond(tconn, mask, val); if ((int )rv != 0) { goto ldv_52515; } else { } tmp___0 = get_current(); init_waitqueue_entry(& __wait, tmp___0); add_wait_queue(& tconn->ping_wait, & __wait); ldv_52526: __ret = 2L; switch (8UL) { case 1UL: tmp___1 = get_current(); __asm__ volatile ("xchgb %b0, %1\n": "+q" (__ret), "+m" (tmp___1->state): : "memory", "cc"); goto ldv_52519; case 2UL: tmp___2 = get_current(); __asm__ volatile ("xchgw %w0, %1\n": "+r" (__ret), "+m" (tmp___2->state): : "memory", "cc"); goto ldv_52519; case 4UL: tmp___3 = get_current(); __asm__ volatile ("xchgl %0, %1\n": "+r" (__ret), "+m" (tmp___3->state): : "memory", "cc"); goto ldv_52519; case 8UL: tmp___4 = get_current(); __asm__ volatile ("xchgq %q0, %1\n": "+r" (__ret), "+m" (tmp___4->state): : "memory", "cc"); goto ldv_52519; default: __xchg_wrong_size(); } ldv_52519: rv = _conn_rq_cond(tconn, mask, val); if ((int )rv != 0) { goto ldv_52525; } else { } spin_unlock_irq(& tconn->req_lock); schedule(); spin_lock_irq(& tconn->req_lock); goto ldv_52526; ldv_52525: tmp___5 = get_current(); tmp___5->state = 0L; remove_wait_queue(& tconn->ping_wait, & __wait); ldv_52515: clear_bit(5, (unsigned long volatile *)(& tconn->flags)); if ((int )rv <= 0) { goto abort; } else { } } else { } conn_old_common_state(tconn, & os, & flags); flags = (enum chg_state_flags )((unsigned int )flags | 1024U); conn_set_state(tconn, mask, val, & ns_min, & ns_max, flags); conn_pr_state_change(tconn, os, ns_max, flags); tmp___6 = kmalloc(48UL, 32U); acscw = (struct after_conn_state_chg_work *)tmp___6; if ((unsigned long )acscw != (unsigned long )((struct after_conn_state_chg_work *)0)) { acscw->oc = (enum drbd_conns )os.ldv_40024.conn; acscw->ns_min = ns_min; acscw->ns_max = ns_max; acscw->flags = flags; acscw->w.cb = & w_after_conn_state_ch; kref_get(& tconn->kref); acscw->w.ldv_49807.tconn = tconn; drbd_queue_work(& tconn->sender_work, & acscw->w); } else { printk("\vd-con %s: Could not kmalloc an acscw\n", tconn->name); } abort: ; if ((int )have_mutex) { spin_unlock_irq(& tconn->req_lock); abort_unlocked: ldv_mutex_unlock_353(& tconn->cstate_mutex); spin_lock_irq(& tconn->req_lock); } else { } if ((int )rv <= 0 && ((unsigned int )flags & 2U) != 0U) { tmp___7 = drbd_set_st_err_str(rv); printk("\vd-con %s: State change failed: %s\n", tconn->name, tmp___7); printk("\vd-con %s: mask = 0x%x val = 0x%x\n", tconn->name, mask.i, val.i); tmp___8 = drbd_conn_str((enum drbd_conns )val.ldv_40024.conn); tmp___9 = drbd_conn_str(oc); printk("\vd-con %s: old_conn:%s wanted_conn:%s\n", tconn->name, tmp___9, tmp___8); } else { } return (rv); } } enum drbd_state_rv conn_request_state(struct drbd_tconn *tconn , union drbd_state mask , union drbd_state val , enum chg_state_flags flags ) { enum drbd_state_rv rv ; { spin_lock_irq(& tconn->req_lock); rv = _conn_request_state(tconn, mask, val, flags); spin_unlock_irq(& tconn->req_lock); return (rv); } } void ldv_mutex_lock_337(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_338(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_339(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_340(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_341(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_342(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_343(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_344(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_345(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_346(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_347(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_348(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_349(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_state_mutex_of_drbd_conf(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_350(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_351(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_conf_update_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_352(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cstate_mutex_of_drbd_tconn(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_353(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cstate_mutex_of_drbd_tconn(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } int ldv_mutex_trylock_376(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_372(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_374(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_377(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_379(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_unlock_381(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_371(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_373(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_375(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_378(struct mutex *ldv_func_arg1 ) ; void ldv_mutex_lock_380(struct mutex *ldv_func_arg1 ) ; extern int nla_parse(struct nlattr ** , int , struct nlattr const * , int , struct nla_policy const * ) ; __inline static int nla_type(struct nlattr const *nla ) { { return ((int )nla->nla_type & -49153); } } __inline static int nla_ok(struct nlattr const *nla , int remaining ) { { return ((remaining > 3 && (unsigned int )((unsigned short )nla->nla_len) > 3U) && (int )nla->nla_len <= remaining); } } __inline static struct nlattr *nla_next(struct nlattr const *nla , int *remaining ) { int totlen ; { totlen = ((int )nla->nla_len + 3) & -4; *remaining = *remaining - totlen; return ((struct nlattr *)nla + (unsigned long )totlen); } } __inline static struct nlattr *nla_find_nested(struct nlattr const *nla , int attrtype ) { int tmp ; void *tmp___0 ; struct nlattr *tmp___1 ; { tmp = nla_len(nla); tmp___0 = nla_data(nla); tmp___1 = nla_find((struct nlattr const *)tmp___0, tmp, attrtype); return (tmp___1); } } __inline static int nla_parse_nested(struct nlattr **tb , int maxtype , struct nlattr const *nla , struct nla_policy const *policy ) { int tmp ; void *tmp___0 ; int tmp___1 ; { tmp = nla_len(nla); tmp___0 = nla_data(nla); tmp___1 = nla_parse(tb, maxtype, (struct nlattr const *)tmp___0, tmp, policy); return (tmp___1); } } static int drbd_nla_check_mandatory(int maxtype , struct nlattr *nla ) { struct nlattr *head ; void *tmp ; int len ; int tmp___0 ; int rem ; int tmp___1 ; int tmp___2 ; { tmp = nla_data((struct nlattr const *)nla); head = (struct nlattr *)tmp; tmp___0 = nla_len((struct nlattr const *)nla); len = tmp___0; nla = head; rem = len; goto ldv_51112; ldv_51111: ; if (((int )nla->nla_type & 16384) != 0) { nla->nla_type = (unsigned int )nla->nla_type & 49151U; tmp___1 = nla_type((struct nlattr const *)nla); if (tmp___1 > maxtype) { return (-95); } else { } } else { } nla = nla_next((struct nlattr const *)nla, & rem); ldv_51112: tmp___2 = nla_ok((struct nlattr const *)nla, rem); if (tmp___2 != 0) { goto ldv_51111; } else { } return (0); } } int drbd_nla_parse_nested(struct nlattr **tb , int maxtype , struct nlattr *nla , struct nla_policy const *policy ) { int err ; { err = drbd_nla_check_mandatory(maxtype, nla); if (err == 0) { err = nla_parse_nested(tb, maxtype, (struct nlattr const *)nla, policy); } else { } return (err); } } struct nlattr *drbd_nla_find_nested(int maxtype , struct nlattr *nla , int attrtype ) { int err ; void *tmp ; struct nlattr *tmp___0 ; { err = drbd_nla_check_mandatory(maxtype, nla); if (err != 0) { tmp = ERR_PTR((long )err); return ((struct nlattr *)tmp); } else { } tmp___0 = nla_find_nested((struct nlattr const *)nla, attrtype); return (tmp___0); } } void ldv_mutex_lock_371(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_372(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_373(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_lock(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_374(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_lock(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_375(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mutex_of_device(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } int ldv_mutex_trylock_376(struct mutex *ldv_func_arg1 ) { ldv_func_ret_type___4 ldv_func_res ; int tmp ; int tmp___0 ; { tmp = mutex_trylock(ldv_func_arg1); ldv_func_res = tmp; tmp___0 = ldv_mutex_trylock_mutex_of_device(ldv_func_arg1); return (tmp___0); return (ldv_func_res); } } void ldv_mutex_unlock_377(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mutex_of_device(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_378(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_379(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } void ldv_mutex_lock_380(struct mutex *ldv_func_arg1 ) { { ldv_mutex_lock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_lock(ldv_func_arg1); return; } } void ldv_mutex_unlock_381(struct mutex *ldv_func_arg1 ) { { ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(ldv_func_arg1); mutex_unlock(ldv_func_arg1); return; } } __inline static void ldv_error(void) __attribute__((__no_instrument_function__)) ; __inline static void ldv_error(void) { { LDV_ERROR: {reach_error();abort();} } } extern int ldv_undef_int(void) ; long ldv__builtin_expect(long exp , long c ) { { return (exp); } } static int ldv_mutex_bm_change_of_drbd_bitmap ; int ldv_mutex_lock_interruptible_bm_change_of_drbd_bitmap(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_bm_change_of_drbd_bitmap == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_bm_change_of_drbd_bitmap = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_bm_change_of_drbd_bitmap(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_bm_change_of_drbd_bitmap == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_bm_change_of_drbd_bitmap = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_bm_change_of_drbd_bitmap(struct mutex *lock ) { { if (ldv_mutex_bm_change_of_drbd_bitmap == 1) { } else { ldv_error(); } ldv_mutex_bm_change_of_drbd_bitmap = 2; return; } } int ldv_mutex_trylock_bm_change_of_drbd_bitmap(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_bm_change_of_drbd_bitmap == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_bm_change_of_drbd_bitmap = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_bm_change_of_drbd_bitmap(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_bm_change_of_drbd_bitmap == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_bm_change_of_drbd_bitmap = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_bm_change_of_drbd_bitmap(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_bm_change_of_drbd_bitmap == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_bm_change_of_drbd_bitmap(struct mutex *lock ) { { if (ldv_mutex_bm_change_of_drbd_bitmap == 2) { } else { ldv_error(); } ldv_mutex_bm_change_of_drbd_bitmap = 1; return; } } static int ldv_mutex_conf_update_of_drbd_tconn ; int ldv_mutex_lock_interruptible_conf_update_of_drbd_tconn(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_conf_update_of_drbd_tconn == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_conf_update_of_drbd_tconn = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_conf_update_of_drbd_tconn(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_conf_update_of_drbd_tconn == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_conf_update_of_drbd_tconn = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_conf_update_of_drbd_tconn(struct mutex *lock ) { { if (ldv_mutex_conf_update_of_drbd_tconn == 1) { } else { ldv_error(); } ldv_mutex_conf_update_of_drbd_tconn = 2; return; } } int ldv_mutex_trylock_conf_update_of_drbd_tconn(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_conf_update_of_drbd_tconn == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_conf_update_of_drbd_tconn = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_conf_update_of_drbd_tconn(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_conf_update_of_drbd_tconn == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_conf_update_of_drbd_tconn = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_conf_update_of_drbd_tconn(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_conf_update_of_drbd_tconn == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_conf_update_of_drbd_tconn(struct mutex *lock ) { { if (ldv_mutex_conf_update_of_drbd_tconn == 2) { } else { ldv_error(); } ldv_mutex_conf_update_of_drbd_tconn = 1; return; } } static int ldv_mutex_cred_guard_mutex_of_signal_struct ; int ldv_mutex_lock_interruptible_cred_guard_mutex_of_signal_struct(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cred_guard_mutex_of_signal_struct == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_cred_guard_mutex_of_signal_struct = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_cred_guard_mutex_of_signal_struct(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cred_guard_mutex_of_signal_struct == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_cred_guard_mutex_of_signal_struct = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_cred_guard_mutex_of_signal_struct(struct mutex *lock ) { { if (ldv_mutex_cred_guard_mutex_of_signal_struct == 1) { } else { ldv_error(); } ldv_mutex_cred_guard_mutex_of_signal_struct = 2; return; } } int ldv_mutex_trylock_cred_guard_mutex_of_signal_struct(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_cred_guard_mutex_of_signal_struct == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_cred_guard_mutex_of_signal_struct = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_cred_guard_mutex_of_signal_struct(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_cred_guard_mutex_of_signal_struct == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_cred_guard_mutex_of_signal_struct = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_cred_guard_mutex_of_signal_struct(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cred_guard_mutex_of_signal_struct == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_cred_guard_mutex_of_signal_struct(struct mutex *lock ) { { if (ldv_mutex_cred_guard_mutex_of_signal_struct == 2) { } else { ldv_error(); } ldv_mutex_cred_guard_mutex_of_signal_struct = 1; return; } } static int ldv_mutex_cstate_mutex_of_drbd_tconn ; int ldv_mutex_lock_interruptible_cstate_mutex_of_drbd_tconn(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cstate_mutex_of_drbd_tconn == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_cstate_mutex_of_drbd_tconn = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_cstate_mutex_of_drbd_tconn(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cstate_mutex_of_drbd_tconn == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_cstate_mutex_of_drbd_tconn = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_cstate_mutex_of_drbd_tconn(struct mutex *lock ) { { if (ldv_mutex_cstate_mutex_of_drbd_tconn == 1) { } else { ldv_error(); } ldv_mutex_cstate_mutex_of_drbd_tconn = 2; return; } } int ldv_mutex_trylock_cstate_mutex_of_drbd_tconn(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_cstate_mutex_of_drbd_tconn == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_cstate_mutex_of_drbd_tconn = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_cstate_mutex_of_drbd_tconn(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_cstate_mutex_of_drbd_tconn == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_cstate_mutex_of_drbd_tconn = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_cstate_mutex_of_drbd_tconn(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_cstate_mutex_of_drbd_tconn == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_cstate_mutex_of_drbd_tconn(struct mutex *lock ) { { if (ldv_mutex_cstate_mutex_of_drbd_tconn == 2) { } else { ldv_error(); } ldv_mutex_cstate_mutex_of_drbd_tconn = 1; return; } } static int ldv_mutex_drbd_main_mutex ; int ldv_mutex_lock_interruptible_drbd_main_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_drbd_main_mutex == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_drbd_main_mutex = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_drbd_main_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_drbd_main_mutex == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_drbd_main_mutex = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_drbd_main_mutex(struct mutex *lock ) { { if (ldv_mutex_drbd_main_mutex == 1) { } else { ldv_error(); } ldv_mutex_drbd_main_mutex = 2; return; } } int ldv_mutex_trylock_drbd_main_mutex(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_drbd_main_mutex == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_drbd_main_mutex = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_drbd_main_mutex(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_drbd_main_mutex == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_drbd_main_mutex = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_drbd_main_mutex(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_drbd_main_mutex == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_drbd_main_mutex(struct mutex *lock ) { { if (ldv_mutex_drbd_main_mutex == 2) { } else { ldv_error(); } ldv_mutex_drbd_main_mutex = 1; return; } } static int ldv_mutex_lock ; int ldv_mutex_lock_interruptible_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_lock = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_lock = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_lock(struct mutex *lock ) { { if (ldv_mutex_lock == 1) { } else { ldv_error(); } ldv_mutex_lock = 2; return; } } int ldv_mutex_trylock_lock(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_lock == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_lock = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_lock(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_lock == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_lock = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_lock(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_lock == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_lock(struct mutex *lock ) { { if (ldv_mutex_lock == 2) { } else { ldv_error(); } ldv_mutex_lock = 1; return; } } static int ldv_mutex_mtx_of_percpu_rw_semaphore ; int ldv_mutex_lock_interruptible_mtx_of_percpu_rw_semaphore(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mtx_of_percpu_rw_semaphore == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_mtx_of_percpu_rw_semaphore = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_mtx_of_percpu_rw_semaphore(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mtx_of_percpu_rw_semaphore == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_mtx_of_percpu_rw_semaphore = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_mtx_of_percpu_rw_semaphore(struct mutex *lock ) { { if (ldv_mutex_mtx_of_percpu_rw_semaphore == 1) { } else { ldv_error(); } ldv_mutex_mtx_of_percpu_rw_semaphore = 2; return; } } int ldv_mutex_trylock_mtx_of_percpu_rw_semaphore(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_mtx_of_percpu_rw_semaphore == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_mtx_of_percpu_rw_semaphore = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_mtx_of_percpu_rw_semaphore(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_mtx_of_percpu_rw_semaphore == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_mtx_of_percpu_rw_semaphore = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_mtx_of_percpu_rw_semaphore(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mtx_of_percpu_rw_semaphore == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_mtx_of_percpu_rw_semaphore(struct mutex *lock ) { { if (ldv_mutex_mtx_of_percpu_rw_semaphore == 2) { } else { ldv_error(); } ldv_mutex_mtx_of_percpu_rw_semaphore = 1; return; } } static int ldv_mutex_mutex_of_device ; int ldv_mutex_lock_interruptible_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_mutex_of_device = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_mutex_of_device = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_mutex_of_device(struct mutex *lock ) { { if (ldv_mutex_mutex_of_device == 1) { } else { ldv_error(); } ldv_mutex_mutex_of_device = 2; return; } } int ldv_mutex_trylock_mutex_of_device(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_mutex_of_device == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_mutex_of_device = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_mutex_of_device(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_mutex_of_device == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_mutex_of_device = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_mutex_of_device(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_device == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_mutex_of_device(struct mutex *lock ) { { if (ldv_mutex_mutex_of_device == 2) { } else { ldv_error(); } ldv_mutex_mutex_of_device = 1; return; } } static int ldv_mutex_mutex_of_drbd_socket ; int ldv_mutex_lock_interruptible_mutex_of_drbd_socket(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_drbd_socket == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_mutex_of_drbd_socket = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_mutex_of_drbd_socket(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_drbd_socket == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_mutex_of_drbd_socket = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_mutex_of_drbd_socket(struct mutex *lock ) { { if (ldv_mutex_mutex_of_drbd_socket == 1) { } else { ldv_error(); } ldv_mutex_mutex_of_drbd_socket = 2; return; } } int ldv_mutex_trylock_mutex_of_drbd_socket(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_mutex_of_drbd_socket == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_mutex_of_drbd_socket = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_mutex_of_drbd_socket(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_mutex_of_drbd_socket == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_mutex_of_drbd_socket = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_mutex_of_drbd_socket(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_mutex_of_drbd_socket == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_mutex_of_drbd_socket(struct mutex *lock ) { { if (ldv_mutex_mutex_of_drbd_socket == 2) { } else { ldv_error(); } ldv_mutex_mutex_of_drbd_socket = 1; return; } } static int ldv_mutex_state_mutex_of_drbd_conf ; int ldv_mutex_lock_interruptible_state_mutex_of_drbd_conf(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_state_mutex_of_drbd_conf == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_state_mutex_of_drbd_conf = 2; return (0); } else { return (-4); } } } int ldv_mutex_lock_killable_state_mutex_of_drbd_conf(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_state_mutex_of_drbd_conf == 1) { } else { ldv_error(); } nondetermined = ldv_undef_int(); if (nondetermined) { ldv_mutex_state_mutex_of_drbd_conf = 2; return (0); } else { return (-4); } } } void ldv_mutex_lock_state_mutex_of_drbd_conf(struct mutex *lock ) { { if (ldv_mutex_state_mutex_of_drbd_conf == 1) { } else { ldv_error(); } ldv_mutex_state_mutex_of_drbd_conf = 2; return; } } int ldv_mutex_trylock_state_mutex_of_drbd_conf(struct mutex *lock ) { int is_mutex_held_by_another_thread ; { if (ldv_mutex_state_mutex_of_drbd_conf == 1) { } else { ldv_error(); } is_mutex_held_by_another_thread = ldv_undef_int(); if (is_mutex_held_by_another_thread) { return (0); } else { ldv_mutex_state_mutex_of_drbd_conf = 2; return (1); } } } int ldv_atomic_dec_and_mutex_lock_state_mutex_of_drbd_conf(atomic_t *cnt , struct mutex *lock ) { int atomic_value_after_dec ; { if (ldv_mutex_state_mutex_of_drbd_conf == 1) { } else { ldv_error(); } atomic_value_after_dec = ldv_undef_int(); if (atomic_value_after_dec == 0) { ldv_mutex_state_mutex_of_drbd_conf = 2; return (1); } else { } return (0); } } int ldv_mutex_is_locked_state_mutex_of_drbd_conf(struct mutex *lock ) { int nondetermined ; { if (ldv_mutex_state_mutex_of_drbd_conf == 1) { nondetermined = ldv_undef_int(); if (nondetermined) { return (0); } else { return (1); } } else { return (1); } } } void ldv_mutex_unlock_state_mutex_of_drbd_conf(struct mutex *lock ) { { if (ldv_mutex_state_mutex_of_drbd_conf == 2) { } else { ldv_error(); } ldv_mutex_state_mutex_of_drbd_conf = 1; return; } } void ldv_initialize(void) { { ldv_mutex_bm_change_of_drbd_bitmap = 1; ldv_mutex_conf_update_of_drbd_tconn = 1; ldv_mutex_cred_guard_mutex_of_signal_struct = 1; ldv_mutex_cstate_mutex_of_drbd_tconn = 1; ldv_mutex_drbd_main_mutex = 1; ldv_mutex_lock = 1; ldv_mutex_mtx_of_percpu_rw_semaphore = 1; ldv_mutex_mutex_of_device = 1; ldv_mutex_mutex_of_drbd_socket = 1; ldv_mutex_state_mutex_of_drbd_conf = 1; return; } } void ldv_check_final_state(void) { { if (ldv_mutex_bm_change_of_drbd_bitmap == 1) { } else { ldv_error(); } if (ldv_mutex_conf_update_of_drbd_tconn == 1) { } else { ldv_error(); } if (ldv_mutex_cred_guard_mutex_of_signal_struct == 1) { } else { ldv_error(); } if (ldv_mutex_cstate_mutex_of_drbd_tconn == 1) { } else { ldv_error(); } if (ldv_mutex_drbd_main_mutex == 1) { } else { ldv_error(); } if (ldv_mutex_lock == 1) { } else { ldv_error(); } if (ldv_mutex_mtx_of_percpu_rw_semaphore == 1) { } else { ldv_error(); } if (ldv_mutex_mutex_of_device == 1) { } else { ldv_error(); } if (ldv_mutex_mutex_of_drbd_socket == 1) { } else { ldv_error(); } if (ldv_mutex_state_mutex_of_drbd_conf == 1) { } else { ldv_error(); } return; } }