1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2018 HUAWEI, Inc.
4  *             https://www.huawei.com/
5  */
6 #ifndef __EROFS_FS_ZDATA_H
7 #define __EROFS_FS_ZDATA_H
8 
9 #include "internal.h"
10 #include "zpvec.h"
11 
12 #define Z_EROFS_PCLUSTER_MAX_PAGES	(Z_EROFS_PCLUSTER_MAX_SIZE / PAGE_SIZE)
13 #define Z_EROFS_NR_INLINE_PAGEVECS      3
14 
15 #define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
16 #define Z_EROFS_PCLUSTER_LENGTH_BIT     1
17 
18 /*
19  * let's leave a type here in case of introducing
20  * another tagged pointer later.
21  */
22 typedef void *z_erofs_next_pcluster_t;
23 
24 /*
25  * Structure fields follow one of the following exclusion rules.
26  *
27  * I: Modifiable by initialization/destruction paths and read-only
28  *    for everyone else;
29  *
30  * L: Field should be protected by the pcluster lock;
31  *
32  * A: Field should be accessed / updated in atomic for parallelized code.
33  */
34 struct z_erofs_pcluster {
35 	struct erofs_workgroup obj;
36 	struct mutex lock;
37 
38 	/* A: point to next chained pcluster or TAILs */
39 	z_erofs_next_pcluster_t next;
40 
41 	/* A: lower limit of decompressed length and if full length or not */
42 	unsigned int length;
43 
44 	/* I: page offset of start position of decompression */
45 	unsigned short pageofs_out;
46 
47 	/* I: page offset of inline compressed data */
48 	unsigned short pageofs_in;
49 
50 	/* L: maximum relative page index in pagevec[] */
51 	unsigned short nr_pages;
52 
53 	/* L: total number of pages in pagevec[] */
54 	unsigned int vcnt;
55 
56 	union {
57 		/* L: inline a certain number of pagevecs for bootstrap */
58 		erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
59 
60 		/* I: can be used to free the pcluster by RCU. */
61 		struct rcu_head rcu;
62 	};
63 
64 	union {
65 		/* I: physical cluster size in pages */
66 		unsigned short pclusterpages;
67 
68 		/* I: tailpacking inline compressed size */
69 		unsigned short tailpacking_size;
70 	};
71 
72 	/* I: compression algorithm format */
73 	unsigned char algorithmformat;
74 
75 	/* A: compressed pages (can be cached or inplaced pages) */
76 	struct page *compressed_pages[];
77 };
78 
79 /* let's avoid the valid 32-bit kernel addresses */
80 
81 /* the chained workgroup has't submitted io (still open) */
82 #define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
83 /* the chained workgroup has already submitted io */
84 #define Z_EROFS_PCLUSTER_TAIL_CLOSED    ((void *)0x5F0EDEAD)
85 
86 #define Z_EROFS_PCLUSTER_NIL            (NULL)
87 
88 struct z_erofs_decompressqueue {
89 	struct super_block *sb;
90 	atomic_t pending_bios;
91 	z_erofs_next_pcluster_t head;
92 
93 	union {
94 		struct completion done;
95 		struct work_struct work;
96 	} u;
97 };
98 
z_erofs_is_inline_pcluster(struct z_erofs_pcluster * pcl)99 static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
100 {
101 	return !pcl->obj.index;
102 }
103 
z_erofs_pclusterpages(struct z_erofs_pcluster * pcl)104 static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
105 {
106 	if (z_erofs_is_inline_pcluster(pcl))
107 		return 1;
108 	return pcl->pclusterpages;
109 }
110 
111 #define Z_EROFS_ONLINEPAGE_COUNT_BITS   2
112 #define Z_EROFS_ONLINEPAGE_COUNT_MASK   ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
113 #define Z_EROFS_ONLINEPAGE_INDEX_SHIFT  (Z_EROFS_ONLINEPAGE_COUNT_BITS)
114 
115 /*
116  * waiters (aka. ongoing_packs): # to unlock the page
117  * sub-index: 0 - for partial page, >= 1 full page sub-index
118  */
119 typedef atomic_t z_erofs_onlinepage_t;
120 
121 /* type punning */
122 union z_erofs_onlinepage_converter {
123 	z_erofs_onlinepage_t *o;
124 	unsigned long *v;
125 };
126 
z_erofs_onlinepage_index(struct page * page)127 static inline unsigned int z_erofs_onlinepage_index(struct page *page)
128 {
129 	union z_erofs_onlinepage_converter u;
130 
131 	DBG_BUGON(!PagePrivate(page));
132 	u.v = &page_private(page);
133 
134 	return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
135 }
136 
z_erofs_onlinepage_init(struct page * page)137 static inline void z_erofs_onlinepage_init(struct page *page)
138 {
139 	union {
140 		z_erofs_onlinepage_t o;
141 		unsigned long v;
142 	/* keep from being unlocked in advance */
143 	} u = { .o = ATOMIC_INIT(1) };
144 
145 	set_page_private(page, u.v);
146 	smp_wmb();
147 	SetPagePrivate(page);
148 }
149 
z_erofs_onlinepage_fixup(struct page * page,uintptr_t index,bool down)150 static inline void z_erofs_onlinepage_fixup(struct page *page,
151 	uintptr_t index, bool down)
152 {
153 	union z_erofs_onlinepage_converter u = { .v = &page_private(page) };
154 	int orig, orig_index, val;
155 
156 repeat:
157 	orig = atomic_read(u.o);
158 	orig_index = orig >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
159 	if (orig_index) {
160 		if (!index)
161 			return;
162 
163 		DBG_BUGON(orig_index != index);
164 	}
165 
166 	val = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
167 		((orig & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
168 	if (atomic_cmpxchg(u.o, orig, val) != orig)
169 		goto repeat;
170 }
171 
z_erofs_onlinepage_endio(struct page * page)172 static inline void z_erofs_onlinepage_endio(struct page *page)
173 {
174 	union z_erofs_onlinepage_converter u;
175 	unsigned int v;
176 
177 	DBG_BUGON(!PagePrivate(page));
178 	u.v = &page_private(page);
179 
180 	v = atomic_dec_return(u.o);
181 	if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
182 		set_page_private(page, 0);
183 		ClearPagePrivate(page);
184 		if (!PageError(page))
185 			SetPageUptodate(page);
186 		unlock_page(page);
187 	}
188 	erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
189 }
190 
191 #define Z_EROFS_VMAP_ONSTACK_PAGES	\
192 	min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
193 #define Z_EROFS_VMAP_GLOBAL_PAGES	2048
194 
195 #endif
196