1 #ifndef __CEPH_DECODE_H
2 #define __CEPH_DECODE_H
3
4 #include <linux/bug.h>
5 #include <linux/time.h>
6 #include <asm/unaligned.h>
7
8 #include "types.h"
9
10 /*
11 * in all cases,
12 * void **p pointer to position pointer
13 * void *end pointer to end of buffer (last byte + 1)
14 */
15
ceph_decode_64(void ** p)16 static inline u64 ceph_decode_64(void **p)
17 {
18 u64 v = get_unaligned_le64(*p);
19 *p += sizeof(u64);
20 return v;
21 }
ceph_decode_32(void ** p)22 static inline u32 ceph_decode_32(void **p)
23 {
24 u32 v = get_unaligned_le32(*p);
25 *p += sizeof(u32);
26 return v;
27 }
ceph_decode_16(void ** p)28 static inline u16 ceph_decode_16(void **p)
29 {
30 u16 v = get_unaligned_le16(*p);
31 *p += sizeof(u16);
32 return v;
33 }
ceph_decode_8(void ** p)34 static inline u8 ceph_decode_8(void **p)
35 {
36 u8 v = *(u8 *)*p;
37 (*p)++;
38 return v;
39 }
ceph_decode_copy(void ** p,void * pv,size_t n)40 static inline void ceph_decode_copy(void **p, void *pv, size_t n)
41 {
42 memcpy(pv, *p, n);
43 *p += n;
44 }
45
46 /*
47 * bounds check input.
48 */
49 #define ceph_decode_need(p, end, n, bad) \
50 do { \
51 if (unlikely(*(p) + (n) > (end))) \
52 goto bad; \
53 } while (0)
54
55 #define ceph_decode_64_safe(p, end, v, bad) \
56 do { \
57 ceph_decode_need(p, end, sizeof(u64), bad); \
58 v = ceph_decode_64(p); \
59 } while (0)
60 #define ceph_decode_32_safe(p, end, v, bad) \
61 do { \
62 ceph_decode_need(p, end, sizeof(u32), bad); \
63 v = ceph_decode_32(p); \
64 } while (0)
65 #define ceph_decode_16_safe(p, end, v, bad) \
66 do { \
67 ceph_decode_need(p, end, sizeof(u16), bad); \
68 v = ceph_decode_16(p); \
69 } while (0)
70 #define ceph_decode_8_safe(p, end, v, bad) \
71 do { \
72 ceph_decode_need(p, end, sizeof(u8), bad); \
73 v = ceph_decode_8(p); \
74 } while (0)
75
76 #define ceph_decode_copy_safe(p, end, pv, n, bad) \
77 do { \
78 ceph_decode_need(p, end, n, bad); \
79 ceph_decode_copy(p, pv, n); \
80 } while (0)
81
82 /*
83 * struct ceph_timespec <-> struct timespec
84 */
ceph_decode_timespec(struct timespec * ts,const struct ceph_timespec * tv)85 static inline void ceph_decode_timespec(struct timespec *ts,
86 const struct ceph_timespec *tv)
87 {
88 ts->tv_sec = le32_to_cpu(tv->tv_sec);
89 ts->tv_nsec = le32_to_cpu(tv->tv_nsec);
90 }
ceph_encode_timespec(struct ceph_timespec * tv,const struct timespec * ts)91 static inline void ceph_encode_timespec(struct ceph_timespec *tv,
92 const struct timespec *ts)
93 {
94 tv->tv_sec = cpu_to_le32(ts->tv_sec);
95 tv->tv_nsec = cpu_to_le32(ts->tv_nsec);
96 }
97
98 /*
99 * sockaddr_storage <-> ceph_sockaddr
100 */
ceph_encode_addr(struct ceph_entity_addr * a)101 static inline void ceph_encode_addr(struct ceph_entity_addr *a)
102 {
103 __be16 ss_family = htons(a->in_addr.ss_family);
104 a->in_addr.ss_family = *(__u16 *)&ss_family;
105 }
ceph_decode_addr(struct ceph_entity_addr * a)106 static inline void ceph_decode_addr(struct ceph_entity_addr *a)
107 {
108 __be16 ss_family = *(__be16 *)&a->in_addr.ss_family;
109 a->in_addr.ss_family = ntohs(ss_family);
110 WARN_ON(a->in_addr.ss_family == 512);
111 }
112
113 /*
114 * encoders
115 */
ceph_encode_64(void ** p,u64 v)116 static inline void ceph_encode_64(void **p, u64 v)
117 {
118 put_unaligned_le64(v, (__le64 *)*p);
119 *p += sizeof(u64);
120 }
ceph_encode_32(void ** p,u32 v)121 static inline void ceph_encode_32(void **p, u32 v)
122 {
123 put_unaligned_le32(v, (__le32 *)*p);
124 *p += sizeof(u32);
125 }
ceph_encode_16(void ** p,u16 v)126 static inline void ceph_encode_16(void **p, u16 v)
127 {
128 put_unaligned_le16(v, (__le16 *)*p);
129 *p += sizeof(u16);
130 }
ceph_encode_8(void ** p,u8 v)131 static inline void ceph_encode_8(void **p, u8 v)
132 {
133 *(u8 *)*p = v;
134 (*p)++;
135 }
ceph_encode_copy(void ** p,const void * s,int len)136 static inline void ceph_encode_copy(void **p, const void *s, int len)
137 {
138 memcpy(*p, s, len);
139 *p += len;
140 }
141
142 /*
143 * filepath, string encoders
144 */
ceph_encode_filepath(void ** p,void * end,u64 ino,const char * path)145 static inline void ceph_encode_filepath(void **p, void *end,
146 u64 ino, const char *path)
147 {
148 u32 len = path ? strlen(path) : 0;
149 BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end);
150 ceph_encode_8(p, 1);
151 ceph_encode_64(p, ino);
152 ceph_encode_32(p, len);
153 if (len)
154 memcpy(*p, path, len);
155 *p += len;
156 }
157
ceph_encode_string(void ** p,void * end,const char * s,u32 len)158 static inline void ceph_encode_string(void **p, void *end,
159 const char *s, u32 len)
160 {
161 BUG_ON(*p + sizeof(len) + len > end);
162 ceph_encode_32(p, len);
163 if (len)
164 memcpy(*p, s, len);
165 *p += len;
166 }
167
168 #define ceph_encode_need(p, end, n, bad) \
169 do { \
170 if (unlikely(*(p) + (n) > (end))) \
171 goto bad; \
172 } while (0)
173
174 #define ceph_encode_64_safe(p, end, v, bad) \
175 do { \
176 ceph_encode_need(p, end, sizeof(u64), bad); \
177 ceph_encode_64(p, v); \
178 } while (0)
179 #define ceph_encode_32_safe(p, end, v, bad) \
180 do { \
181 ceph_encode_need(p, end, sizeof(u32), bad); \
182 ceph_encode_32(p, v); \
183 } while (0)
184 #define ceph_encode_16_safe(p, end, v, bad) \
185 do { \
186 ceph_encode_need(p, end, sizeof(u16), bad); \
187 ceph_encode_16(p, v); \
188 } while (0)
189
190 #define ceph_encode_copy_safe(p, end, pv, n, bad) \
191 do { \
192 ceph_encode_need(p, end, n, bad); \
193 ceph_encode_copy(p, pv, n); \
194 } while (0)
195 #define ceph_encode_string_safe(p, end, s, n, bad) \
196 do { \
197 ceph_encode_need(p, end, n, bad); \
198 ceph_encode_string(p, end, s, n); \
199 } while (0)
200
201
202 #endif
203