-
Notifications
You must be signed in to change notification settings - Fork 11
/
pxd_core.h
149 lines (124 loc) · 3.39 KB
/
pxd_core.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
#ifndef _PXD_CORE_H_
#define _PXD_CORE_H_
#include <linux/types.h>
#include <linux/miscdevice.h>
#ifdef __PX_BLKMQ__
#include <linux/blk-mq.h>
#endif
#include "pxd.h"
#include "pxd_fastpath.h"
#include "fuse_i.h"
struct pxd_context {
spinlock_t lock;
struct list_head list;
size_t num_devices;
struct fuse_conn fc;
struct file_operations fops;
char name[256];
int id;
struct miscdevice miscdev;
struct delayed_work abort_work;
uint64_t open_seq;
};
struct pxd_context* find_context(unsigned ctx);
struct pxd_device {
#define PXD_DEV_MAGIC (0xcafec0de)
unsigned int magic;
uint64_t dev_id;
int major;
int minor;
struct gendisk *disk;
struct device dev;
size_t size;
spinlock_t lock;
spinlock_t qlock;
struct list_head node;
int open_count;
bool removing;
struct pxd_fastpath_extension fp;
struct pxd_context *ctx;
bool connected;
mode_t mode;
bool fastpath; // this is persistent, how the block device registered with kernel
unsigned int queue_depth; // sysfs attribute bdev io queue depth
unsigned int discard_size;
#define PXD_ACTIVE(pxd_dev) (atomic_read(&pxd_dev->ncount))
// congestion handling
atomic_t ncount; // [global] total active requests, always modify with pxd_dev.lock
unsigned int qdepth;
atomic_t congested;
bool exported;
unsigned int nr_congestion_on;
unsigned int nr_congestion_off;
struct work_struct remove_work;
wait_queue_head_t remove_wait;
wait_queue_head_t suspend_wq;
#if defined(__PXD_BIO_BLKMQ__) && defined(__PX_BLKMQ__)
struct blk_mq_tag_set tag_set;
#endif
};
// how pxd_device got registered with the kernel during device add.
static inline
bool fastpath_enabled(struct pxd_device *pxd_dev) {
return pxd_dev->fastpath;
}
// current IO status - fastpath vs nativepath
static inline
bool fastpath_active(struct pxd_device *pxd_dev) {
return pxd_dev->fp.fastpath;
}
void pxd_check_q_congested(struct pxd_device *pxd_dev);
void pxd_check_q_decongested(struct pxd_device *pxd_dev);
#define pxd_printk(args...)
//#define pxd_printk(args, ...) printk(KERN_ERR args, ##__VA_ARGS__)
#define pxd_io_printk(args...)
//#define pxd_io_printk(args, ...) printk(KERN_ERR args, ##__VA_ARGS__)
//
#define pxd_mem_printk(args...)
//#define pxd_mem_printk(args, ...) printk(KERN_ERR args, ##__VA_ARGS__)
#ifndef SECTOR_SIZE
#define SECTOR_SIZE 512
#endif
#ifndef SECTOR_SHIFT
#define SECTOR_SHIFT (9)
#endif
#define SEGMENT_SIZE (1024 * 1024)
#ifdef __PXD_BIO_MAKEREQ__
void pxd_reroute_slowpath(struct request_queue *q, struct bio *bio);
#else
void pxdmq_reroute_slowpath(struct fuse_req*);
#endif
int pxd_initiate_fallback(struct pxd_device *pxd_dev);
int pxd_initiate_failover(struct pxd_device *pxd_dev);
static inline
mode_t open_mode(mode_t mode) {
mode_t m = O_LARGEFILE | O_NOATIME; // default
if (mode & O_RDWR) {
m |= O_RDWR;
}
if (mode & O_SYNC) m |= O_SYNC;
if (mode & O_DIRECT) m |= O_DIRECT;
return m;
}
static inline
void decode_mode(mode_t mode, char *out) {
if (mode & O_LARGEFILE) *out++ = 'L';
if (mode & O_NOATIME) *out++ = 'A';
if (mode & O_DIRECT) *out++='D';
if (mode & O_WRONLY) *out++ = 'W';
if (mode & O_RDWR) {
*out++ = 'R';
*out++ = 'W';
} else { // O_RDONLY is defined as zero
*out++ = 'R';
}
if (mode & O_SYNC) *out++ = 'S';
if (mode & O_TRUNC) *out++ = 'T';
if (mode & O_APPEND) *out++ = 'P';
*out = '\0';
}
static inline
int write_allowed(mode_t curr) {
return ((curr & (O_RDWR | O_WRONLY)));
}
#endif /* _PXD_CORE_H_ */