1 /* Copyright (C) 1993-2022 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>.
17
18 As a special exception, if you link the code in this file with
19 files compiled with a GNU compiler to produce an executable,
20 that does not cause the resulting executable to be covered by
21 the GNU Lesser General Public License. This exception does not
22 however invalidate any other reasons why the executable file
23 might be covered by the GNU Lesser General Public License.
24 This exception applies to code released by its copyright holders
25 in files containing the exception. */
26
27
28 #include "libioP.h"
29 #include <assert.h>
30 #include <fcntl.h>
31 #include <sys/mman.h>
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/stat.h>
35 #include <string.h>
36 #include <errno.h>
37 #include <unistd.h>
38 #include <stdlib.h>
39 #include "../wcsmbs/wcsmbsload.h"
40 #include "../iconv/gconv_charset.h"
41 #include "../iconv/gconv_int.h"
42 #include <shlib-compat.h>
43 #include <not-cancel.h>
44 #include <kernel-features.h>
45
46 extern struct __gconv_trans_data __libio_translit attribute_hidden;
47
48 /* An fstream can be in at most one of put mode, get mode, or putback mode.
49 Putback mode is a variant of get mode.
50
51 In a filebuf, there is only one current position, instead of two
52 separate get and put pointers. In get mode, the current position
53 is that of gptr(); in put mode that of pptr().
54
55 The position in the buffer that corresponds to the position
56 in external file system is normally _IO_read_end, except in putback
57 mode, when it is _IO_save_end and also when the file is in append mode,
58 since switching from read to write mode automatically sends the position in
59 the external file system to the end of file.
60 If the field _fb._offset is >= 0, it gives the offset in
61 the file as a whole corresponding to eGptr(). (?)
62
63 PUT MODE:
64 If a filebuf is in put mode, then all of _IO_read_ptr, _IO_read_end,
65 and _IO_read_base are equal to each other. These are usually equal
66 to _IO_buf_base, though not necessarily if we have switched from
67 get mode to put mode. (The reason is to maintain the invariant
68 that _IO_read_end corresponds to the external file position.)
69 _IO_write_base is non-NULL and usually equal to _IO_buf_base.
70 We also have _IO_write_end == _IO_buf_end, but only in fully buffered mode.
71 The un-flushed character are those between _IO_write_base and _IO_write_ptr.
72
73 GET MODE:
74 If a filebuf is in get or putback mode, eback() != egptr().
75 In get mode, the unread characters are between gptr() and egptr().
76 The OS file position corresponds to that of egptr().
77
78 PUTBACK MODE:
79 Putback mode is used to remember "excess" characters that have
80 been sputbackc'd in a separate putback buffer.
81 In putback mode, the get buffer points to the special putback buffer.
82 The unread characters are the characters between gptr() and egptr()
83 in the putback buffer, as well as the area between save_gptr()
84 and save_egptr(), which point into the original reserve buffer.
85 (The pointers save_gptr() and save_egptr() are the values
86 of gptr() and egptr() at the time putback mode was entered.)
87 The OS position corresponds to that of save_egptr().
88
89 LINE BUFFERED OUTPUT:
90 During line buffered output, _IO_write_base==base() && epptr()==base().
91 However, ptr() may be anywhere between base() and ebuf().
92 This forces a call to filebuf::overflow(int C) on every put.
93 If there is more space in the buffer, and C is not a '\n',
94 then C is inserted, and pptr() incremented.
95
96 UNBUFFERED STREAMS:
97 If a filebuf is unbuffered(), the _shortbuf[1] is used as the buffer.
98 */
99
100 #define CLOSED_FILEBUF_FLAGS \
101 (_IO_IS_FILEBUF+_IO_NO_READS+_IO_NO_WRITES+_IO_TIED_PUT_GET)
102
103
104 void
_IO_new_file_init_internal(struct _IO_FILE_plus * fp)105 _IO_new_file_init_internal (struct _IO_FILE_plus *fp)
106 {
107 /* POSIX.1 allows another file handle to be used to change the position
108 of our file descriptor. Hence we actually don't know the actual
109 position before we do the first fseek (and until a following fflush). */
110 fp->file._offset = _IO_pos_BAD;
111 fp->file._flags |= CLOSED_FILEBUF_FLAGS;
112
113 _IO_link_in (fp);
114 fp->file._fileno = -1;
115 }
116
117 /* External version of _IO_new_file_init_internal which switches off
118 vtable validation. */
119 void
_IO_new_file_init(struct _IO_FILE_plus * fp)120 _IO_new_file_init (struct _IO_FILE_plus *fp)
121 {
122 IO_set_accept_foreign_vtables (&_IO_vtable_check);
123 _IO_new_file_init_internal (fp);
124 }
125
126 int
_IO_new_file_close_it(FILE * fp)127 _IO_new_file_close_it (FILE *fp)
128 {
129 int write_status;
130 if (!_IO_file_is_open (fp))
131 return EOF;
132
133 if ((fp->_flags & _IO_NO_WRITES) == 0
134 && (fp->_flags & _IO_CURRENTLY_PUTTING) != 0)
135 write_status = _IO_do_flush (fp);
136 else
137 write_status = 0;
138
139 _IO_unsave_markers (fp);
140
141 int close_status = ((fp->_flags2 & _IO_FLAGS2_NOCLOSE) == 0
142 ? _IO_SYSCLOSE (fp) : 0);
143
144 /* Free buffer. */
145 if (fp->_mode > 0)
146 {
147 if (_IO_have_wbackup (fp))
148 _IO_free_wbackup_area (fp);
149 _IO_wsetb (fp, NULL, NULL, 0);
150 _IO_wsetg (fp, NULL, NULL, NULL);
151 _IO_wsetp (fp, NULL, NULL);
152 }
153 _IO_setb (fp, NULL, NULL, 0);
154 _IO_setg (fp, NULL, NULL, NULL);
155 _IO_setp (fp, NULL, NULL);
156
157 _IO_un_link ((struct _IO_FILE_plus *) fp);
158 fp->_flags = _IO_MAGIC|CLOSED_FILEBUF_FLAGS;
159 fp->_fileno = -1;
160 fp->_offset = _IO_pos_BAD;
161
162 return close_status ? close_status : write_status;
163 }
libc_hidden_ver(_IO_new_file_close_it,_IO_file_close_it)164 libc_hidden_ver (_IO_new_file_close_it, _IO_file_close_it)
165
166 void
167 _IO_new_file_finish (FILE *fp, int dummy)
168 {
169 if (_IO_file_is_open (fp))
170 {
171 _IO_do_flush (fp);
172 if (!(fp->_flags & _IO_DELETE_DONT_CLOSE))
173 _IO_SYSCLOSE (fp);
174 }
175 _IO_default_finish (fp, 0);
176 }
libc_hidden_ver(_IO_new_file_finish,_IO_file_finish)177 libc_hidden_ver (_IO_new_file_finish, _IO_file_finish)
178
179 FILE *
180 _IO_file_open (FILE *fp, const char *filename, int posix_mode, int prot,
181 int read_write, int is32not64)
182 {
183 int fdesc;
184 if (__glibc_unlikely (fp->_flags2 & _IO_FLAGS2_NOTCANCEL))
185 fdesc = __open_nocancel (filename,
186 posix_mode | (is32not64 ? 0 : O_LARGEFILE), prot);
187 else
188 fdesc = __open (filename, posix_mode | (is32not64 ? 0 : O_LARGEFILE), prot);
189 if (fdesc < 0)
190 return NULL;
191 fp->_fileno = fdesc;
192 _IO_mask_flags (fp, read_write,_IO_NO_READS+_IO_NO_WRITES+_IO_IS_APPENDING);
193 /* For append mode, send the file offset to the end of the file. Don't
194 update the offset cache though, since the file handle is not active. */
195 if ((read_write & (_IO_IS_APPENDING | _IO_NO_READS))
196 == (_IO_IS_APPENDING | _IO_NO_READS))
197 {
198 off64_t new_pos = _IO_SYSSEEK (fp, 0, _IO_seek_end);
199 if (new_pos == _IO_pos_BAD && errno != ESPIPE)
200 {
201 __close_nocancel (fdesc);
202 return NULL;
203 }
204 }
205 _IO_link_in ((struct _IO_FILE_plus *) fp);
206 return fp;
207 }
libc_hidden_def(_IO_file_open)208 libc_hidden_def (_IO_file_open)
209
210 FILE *
211 _IO_new_file_fopen (FILE *fp, const char *filename, const char *mode,
212 int is32not64)
213 {
214 int oflags = 0, omode;
215 int read_write;
216 int oprot = 0666;
217 int i;
218 FILE *result;
219 const char *cs;
220 const char *last_recognized;
221
222 if (_IO_file_is_open (fp))
223 return 0;
224 switch (*mode)
225 {
226 case 'r':
227 omode = O_RDONLY;
228 read_write = _IO_NO_WRITES;
229 break;
230 case 'w':
231 omode = O_WRONLY;
232 oflags = O_CREAT|O_TRUNC;
233 read_write = _IO_NO_READS;
234 break;
235 case 'a':
236 omode = O_WRONLY;
237 oflags = O_CREAT|O_APPEND;
238 read_write = _IO_NO_READS|_IO_IS_APPENDING;
239 break;
240 default:
241 __set_errno (EINVAL);
242 return NULL;
243 }
244 last_recognized = mode;
245 for (i = 1; i < 7; ++i)
246 {
247 switch (*++mode)
248 {
249 case '\0':
250 break;
251 case '+':
252 omode = O_RDWR;
253 read_write &= _IO_IS_APPENDING;
254 last_recognized = mode;
255 continue;
256 case 'x':
257 oflags |= O_EXCL;
258 last_recognized = mode;
259 continue;
260 case 'b':
261 last_recognized = mode;
262 continue;
263 case 'm':
264 fp->_flags2 |= _IO_FLAGS2_MMAP;
265 continue;
266 case 'c':
267 fp->_flags2 |= _IO_FLAGS2_NOTCANCEL;
268 continue;
269 case 'e':
270 oflags |= O_CLOEXEC;
271 fp->_flags2 |= _IO_FLAGS2_CLOEXEC;
272 continue;
273 default:
274 /* Ignore. */
275 continue;
276 }
277 break;
278 }
279
280 result = _IO_file_open (fp, filename, omode|oflags, oprot, read_write,
281 is32not64);
282
283 if (result != NULL)
284 {
285 /* Test whether the mode string specifies the conversion. */
286 cs = strstr (last_recognized + 1, ",ccs=");
287 if (cs != NULL)
288 {
289 /* Yep. Load the appropriate conversions and set the orientation
290 to wide. */
291 struct gconv_fcts fcts;
292 struct _IO_codecvt *cc;
293 char *endp = __strchrnul (cs + 5, ',');
294 char *ccs = malloc (endp - (cs + 5) + 3);
295
296 if (ccs == NULL)
297 {
298 int malloc_err = errno; /* Whatever malloc failed with. */
299 (void) _IO_file_close_it (fp);
300 __set_errno (malloc_err);
301 return NULL;
302 }
303
304 *((char *) __mempcpy (ccs, cs + 5, endp - (cs + 5))) = '\0';
305 strip (ccs, ccs);
306
307 if (__wcsmbs_named_conv (&fcts, ccs[2] == '\0'
308 ? upstr (ccs, cs + 5) : ccs) != 0)
309 {
310 /* Something went wrong, we cannot load the conversion modules.
311 This means we cannot proceed since the user explicitly asked
312 for these. */
313 (void) _IO_file_close_it (fp);
314 free (ccs);
315 __set_errno (EINVAL);
316 return NULL;
317 }
318
319 free (ccs);
320
321 assert (fcts.towc_nsteps == 1);
322 assert (fcts.tomb_nsteps == 1);
323
324 fp->_wide_data->_IO_read_ptr = fp->_wide_data->_IO_read_end;
325 fp->_wide_data->_IO_write_ptr = fp->_wide_data->_IO_write_base;
326
327 /* Clear the state. We start all over again. */
328 memset (&fp->_wide_data->_IO_state, '\0', sizeof (__mbstate_t));
329 memset (&fp->_wide_data->_IO_last_state, '\0', sizeof (__mbstate_t));
330
331 cc = fp->_codecvt = &fp->_wide_data->_codecvt;
332
333 cc->__cd_in.step = fcts.towc;
334
335 cc->__cd_in.step_data.__invocation_counter = 0;
336 cc->__cd_in.step_data.__internal_use = 1;
337 cc->__cd_in.step_data.__flags = __GCONV_IS_LAST;
338 cc->__cd_in.step_data.__statep = &result->_wide_data->_IO_state;
339
340 cc->__cd_out.step = fcts.tomb;
341
342 cc->__cd_out.step_data.__invocation_counter = 0;
343 cc->__cd_out.step_data.__internal_use = 1;
344 cc->__cd_out.step_data.__flags = __GCONV_IS_LAST | __GCONV_TRANSLIT;
345 cc->__cd_out.step_data.__statep = &result->_wide_data->_IO_state;
346
347 /* From now on use the wide character callback functions. */
348 _IO_JUMPS_FILE_plus (fp) = fp->_wide_data->_wide_vtable;
349
350 /* Set the mode now. */
351 result->_mode = 1;
352 }
353 }
354
355 return result;
356 }
libc_hidden_ver(_IO_new_file_fopen,_IO_file_fopen)357 libc_hidden_ver (_IO_new_file_fopen, _IO_file_fopen)
358
359 FILE *
360 _IO_new_file_attach (FILE *fp, int fd)
361 {
362 if (_IO_file_is_open (fp))
363 return NULL;
364 fp->_fileno = fd;
365 fp->_flags &= ~(_IO_NO_READS+_IO_NO_WRITES);
366 fp->_flags |= _IO_DELETE_DONT_CLOSE;
367 /* Get the current position of the file. */
368 /* We have to do that since that may be junk. */
369 fp->_offset = _IO_pos_BAD;
370 int save_errno = errno;
371 if (_IO_SEEKOFF (fp, (off64_t)0, _IO_seek_cur, _IOS_INPUT|_IOS_OUTPUT)
372 == _IO_pos_BAD && errno != ESPIPE)
373 return NULL;
374 __set_errno (save_errno);
375 return fp;
376 }
libc_hidden_ver(_IO_new_file_attach,_IO_file_attach)377 libc_hidden_ver (_IO_new_file_attach, _IO_file_attach)
378
379 FILE *
380 _IO_new_file_setbuf (FILE *fp, char *p, ssize_t len)
381 {
382 if (_IO_default_setbuf (fp, p, len) == NULL)
383 return NULL;
384
385 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end
386 = fp->_IO_buf_base;
387 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
388
389 return fp;
390 }
libc_hidden_ver(_IO_new_file_setbuf,_IO_file_setbuf)391 libc_hidden_ver (_IO_new_file_setbuf, _IO_file_setbuf)
392
393
394 FILE *
395 _IO_file_setbuf_mmap (FILE *fp, char *p, ssize_t len)
396 {
397 FILE *result;
398
399 /* Change the function table. */
400 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
401 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;
402
403 /* And perform the normal operation. */
404 result = _IO_new_file_setbuf (fp, p, len);
405
406 /* If the call failed, restore to using mmap. */
407 if (result == NULL)
408 {
409 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps_mmap;
410 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps_mmap;
411 }
412
413 return result;
414 }
415
416 static size_t new_do_write (FILE *, const char *, size_t);
417
418 /* Write TO_DO bytes from DATA to FP.
419 Then mark FP as having empty buffers. */
420
421 int
_IO_new_do_write(FILE * fp,const char * data,size_t to_do)422 _IO_new_do_write (FILE *fp, const char *data, size_t to_do)
423 {
424 return (to_do == 0
425 || (size_t) new_do_write (fp, data, to_do) == to_do) ? 0 : EOF;
426 }
libc_hidden_ver(_IO_new_do_write,_IO_do_write)427 libc_hidden_ver (_IO_new_do_write, _IO_do_write)
428
429 static size_t
430 new_do_write (FILE *fp, const char *data, size_t to_do)
431 {
432 size_t count;
433 if (fp->_flags & _IO_IS_APPENDING)
434 /* On a system without a proper O_APPEND implementation,
435 you would need to sys_seek(0, SEEK_END) here, but is
436 not needed nor desirable for Unix- or Posix-like systems.
437 Instead, just indicate that offset (before and after) is
438 unpredictable. */
439 fp->_offset = _IO_pos_BAD;
440 else if (fp->_IO_read_end != fp->_IO_write_base)
441 {
442 off64_t new_pos
443 = _IO_SYSSEEK (fp, fp->_IO_write_base - fp->_IO_read_end, 1);
444 if (new_pos == _IO_pos_BAD)
445 return 0;
446 fp->_offset = new_pos;
447 }
448 count = _IO_SYSWRITE (fp, data, to_do);
449 if (fp->_cur_column && count)
450 fp->_cur_column = _IO_adjust_column (fp->_cur_column - 1, data, count) + 1;
451 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
452 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_buf_base;
453 fp->_IO_write_end = (fp->_mode <= 0
454 && (fp->_flags & (_IO_LINE_BUF | _IO_UNBUFFERED))
455 ? fp->_IO_buf_base : fp->_IO_buf_end);
456 return count;
457 }
458
459 int
_IO_new_file_underflow(FILE * fp)460 _IO_new_file_underflow (FILE *fp)
461 {
462 ssize_t count;
463
464 /* C99 requires EOF to be "sticky". */
465 if (fp->_flags & _IO_EOF_SEEN)
466 return EOF;
467
468 if (fp->_flags & _IO_NO_READS)
469 {
470 fp->_flags |= _IO_ERR_SEEN;
471 __set_errno (EBADF);
472 return EOF;
473 }
474 if (fp->_IO_read_ptr < fp->_IO_read_end)
475 return *(unsigned char *) fp->_IO_read_ptr;
476
477 if (fp->_IO_buf_base == NULL)
478 {
479 /* Maybe we already have a push back pointer. */
480 if (fp->_IO_save_base != NULL)
481 {
482 free (fp->_IO_save_base);
483 fp->_flags &= ~_IO_IN_BACKUP;
484 }
485 _IO_doallocbuf (fp);
486 }
487
488 /* FIXME This can/should be moved to genops ?? */
489 if (fp->_flags & (_IO_LINE_BUF|_IO_UNBUFFERED))
490 {
491 /* We used to flush all line-buffered stream. This really isn't
492 required by any standard. My recollection is that
493 traditional Unix systems did this for stdout. stderr better
494 not be line buffered. So we do just that here
495 explicitly. --drepper */
496 _IO_acquire_lock (stdout);
497
498 if ((stdout->_flags & (_IO_LINKED | _IO_NO_WRITES | _IO_LINE_BUF))
499 == (_IO_LINKED | _IO_LINE_BUF))
500 _IO_OVERFLOW (stdout, EOF);
501
502 _IO_release_lock (stdout);
503 }
504
505 _IO_switch_to_get_mode (fp);
506
507 /* This is very tricky. We have to adjust those
508 pointers before we call _IO_SYSREAD () since
509 we may longjump () out while waiting for
510 input. Those pointers may be screwed up. H.J. */
511 fp->_IO_read_base = fp->_IO_read_ptr = fp->_IO_buf_base;
512 fp->_IO_read_end = fp->_IO_buf_base;
513 fp->_IO_write_base = fp->_IO_write_ptr = fp->_IO_write_end
514 = fp->_IO_buf_base;
515
516 count = _IO_SYSREAD (fp, fp->_IO_buf_base,
517 fp->_IO_buf_end - fp->_IO_buf_base);
518 if (count <= 0)
519 {
520 if (count == 0)
521 fp->_flags |= _IO_EOF_SEEN;
522 else
523 fp->_flags |= _IO_ERR_SEEN, count = 0;
524 }
525 fp->_IO_read_end += count;
526 if (count == 0)
527 {
528 /* If a stream is read to EOF, the calling application may switch active
529 handles. As a result, our offset cache would no longer be valid, so
530 unset it. */
531 fp->_offset = _IO_pos_BAD;
532 return EOF;
533 }
534 if (fp->_offset != _IO_pos_BAD)
535 _IO_pos_adjust (fp->_offset, count);
536 return *(unsigned char *) fp->_IO_read_ptr;
537 }
libc_hidden_ver(_IO_new_file_underflow,_IO_file_underflow)538 libc_hidden_ver (_IO_new_file_underflow, _IO_file_underflow)
539
540 /* Guts of underflow callback if we mmap the file. This stats the file and
541 updates the stream state to match. In the normal case we return zero.
542 If the file is no longer eligible for mmap, its jump tables are reset to
543 the vanilla ones and we return nonzero. */
544 static int
545 mmap_remap_check (FILE *fp)
546 {
547 struct __stat64_t64 st;
548
549 if (_IO_SYSSTAT (fp, &st) == 0
550 && S_ISREG (st.st_mode) && st.st_size != 0
551 /* Limit the file size to 1MB for 32-bit machines. */
552 && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024))
553 {
554 const size_t pagesize = __getpagesize ();
555 # define ROUNDED(x) (((x) + pagesize - 1) & ~(pagesize - 1))
556 if (ROUNDED (st.st_size) < ROUNDED (fp->_IO_buf_end
557 - fp->_IO_buf_base))
558 {
559 /* We can trim off some pages past the end of the file. */
560 (void) __munmap (fp->_IO_buf_base + ROUNDED (st.st_size),
561 ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base)
562 - ROUNDED (st.st_size));
563 fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
564 }
565 else if (ROUNDED (st.st_size) > ROUNDED (fp->_IO_buf_end
566 - fp->_IO_buf_base))
567 {
568 /* The file added some pages. We need to remap it. */
569 void *p;
570 #if _G_HAVE_MREMAP
571 p = __mremap (fp->_IO_buf_base, ROUNDED (fp->_IO_buf_end
572 - fp->_IO_buf_base),
573 ROUNDED (st.st_size), MREMAP_MAYMOVE);
574 if (p == MAP_FAILED)
575 {
576 (void) __munmap (fp->_IO_buf_base,
577 fp->_IO_buf_end - fp->_IO_buf_base);
578 goto punt;
579 }
580 #else
581 (void) __munmap (fp->_IO_buf_base,
582 fp->_IO_buf_end - fp->_IO_buf_base);
583 p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED,
584 fp->_fileno, 0);
585 if (p == MAP_FAILED)
586 goto punt;
587 #endif
588 fp->_IO_buf_base = p;
589 fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
590 }
591 else
592 {
593 /* The number of pages didn't change. */
594 fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
595 }
596 # undef ROUNDED
597
598 fp->_offset -= fp->_IO_read_end - fp->_IO_read_ptr;
599 _IO_setg (fp, fp->_IO_buf_base,
600 fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base
601 ? fp->_IO_buf_base + fp->_offset : fp->_IO_buf_end,
602 fp->_IO_buf_end);
603
604 /* If we are already positioned at or past the end of the file, don't
605 change the current offset. If not, seek past what we have mapped,
606 mimicking the position left by a normal underflow reading into its
607 buffer until EOF. */
608
609 if (fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base)
610 {
611 if (__lseek64 (fp->_fileno, fp->_IO_buf_end - fp->_IO_buf_base,
612 SEEK_SET)
613 != fp->_IO_buf_end - fp->_IO_buf_base)
614 fp->_flags |= _IO_ERR_SEEN;
615 else
616 fp->_offset = fp->_IO_buf_end - fp->_IO_buf_base;
617 }
618
619 return 0;
620 }
621 else
622 {
623 /* Life is no longer good for mmap. Punt it. */
624 (void) __munmap (fp->_IO_buf_base,
625 fp->_IO_buf_end - fp->_IO_buf_base);
626 punt:
627 fp->_IO_buf_base = fp->_IO_buf_end = NULL;
628 _IO_setg (fp, NULL, NULL, NULL);
629 if (fp->_mode <= 0)
630 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
631 else
632 _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps;
633 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;
634
635 return 1;
636 }
637 }
638
639 /* Special callback replacing the underflow callbacks if we mmap the file. */
640 int
_IO_file_underflow_mmap(FILE * fp)641 _IO_file_underflow_mmap (FILE *fp)
642 {
643 if (fp->_IO_read_ptr < fp->_IO_read_end)
644 return *(unsigned char *) fp->_IO_read_ptr;
645
646 if (__glibc_unlikely (mmap_remap_check (fp)))
647 /* We punted to the regular file functions. */
648 return _IO_UNDERFLOW (fp);
649
650 if (fp->_IO_read_ptr < fp->_IO_read_end)
651 return *(unsigned char *) fp->_IO_read_ptr;
652
653 fp->_flags |= _IO_EOF_SEEN;
654 return EOF;
655 }
656
657 static void
decide_maybe_mmap(FILE * fp)658 decide_maybe_mmap (FILE *fp)
659 {
660 /* We use the file in read-only mode. This could mean we can
661 mmap the file and use it without any copying. But not all
662 file descriptors are for mmap-able objects and on 32-bit
663 machines we don't want to map files which are too large since
664 this would require too much virtual memory. */
665 struct __stat64_t64 st;
666
667 if (_IO_SYSSTAT (fp, &st) == 0
668 && S_ISREG (st.st_mode) && st.st_size != 0
669 /* Limit the file size to 1MB for 32-bit machines. */
670 && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024)
671 /* Sanity check. */
672 && (fp->_offset == _IO_pos_BAD || fp->_offset <= st.st_size))
673 {
674 /* Try to map the file. */
675 void *p;
676
677 p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED, fp->_fileno, 0);
678 if (p != MAP_FAILED)
679 {
680 /* OK, we managed to map the file. Set the buffer up and use a
681 special jump table with simplified underflow functions which
682 never tries to read anything from the file. */
683
684 if (__lseek64 (fp->_fileno, st.st_size, SEEK_SET) != st.st_size)
685 {
686 (void) __munmap (p, st.st_size);
687 fp->_offset = _IO_pos_BAD;
688 }
689 else
690 {
691 _IO_setb (fp, p, (char *) p + st.st_size, 0);
692
693 if (fp->_offset == _IO_pos_BAD)
694 fp->_offset = 0;
695
696 _IO_setg (fp, p, p + fp->_offset, p + st.st_size);
697 fp->_offset = st.st_size;
698
699 if (fp->_mode <= 0)
700 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps_mmap;
701 else
702 _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps_mmap;
703 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps_mmap;
704
705 return;
706 }
707 }
708 }
709
710 /* We couldn't use mmap, so revert to the vanilla file operations. */
711
712 if (fp->_mode <= 0)
713 _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
714 else
715 _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps;
716 fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;
717 }
718
719 int
_IO_file_underflow_maybe_mmap(FILE * fp)720 _IO_file_underflow_maybe_mmap (FILE *fp)
721 {
722 /* This is the first read attempt. Choose mmap or vanilla operations
723 and then punt to the chosen underflow routine. */
724 decide_maybe_mmap (fp);
725 return _IO_UNDERFLOW (fp);
726 }
727
728
729 int
_IO_new_file_overflow(FILE * f,int ch)730 _IO_new_file_overflow (FILE *f, int ch)
731 {
732 if (f->_flags & _IO_NO_WRITES) /* SET ERROR */
733 {
734 f->_flags |= _IO_ERR_SEEN;
735 __set_errno (EBADF);
736 return EOF;
737 }
738 /* If currently reading or no buffer allocated. */
739 if ((f->_flags & _IO_CURRENTLY_PUTTING) == 0 || f->_IO_write_base == NULL)
740 {
741 /* Allocate a buffer if needed. */
742 if (f->_IO_write_base == NULL)
743 {
744 _IO_doallocbuf (f);
745 _IO_setg (f, f->_IO_buf_base, f->_IO_buf_base, f->_IO_buf_base);
746 }
747 /* Otherwise must be currently reading.
748 If _IO_read_ptr (and hence also _IO_read_end) is at the buffer end,
749 logically slide the buffer forwards one block (by setting the
750 read pointers to all point at the beginning of the block). This
751 makes room for subsequent output.
752 Otherwise, set the read pointers to _IO_read_end (leaving that
753 alone, so it can continue to correspond to the external position). */
754 if (__glibc_unlikely (_IO_in_backup (f)))
755 {
756 size_t nbackup = f->_IO_read_end - f->_IO_read_ptr;
757 _IO_free_backup_area (f);
758 f->_IO_read_base -= MIN (nbackup,
759 f->_IO_read_base - f->_IO_buf_base);
760 f->_IO_read_ptr = f->_IO_read_base;
761 }
762
763 if (f->_IO_read_ptr == f->_IO_buf_end)
764 f->_IO_read_end = f->_IO_read_ptr = f->_IO_buf_base;
765 f->_IO_write_ptr = f->_IO_read_ptr;
766 f->_IO_write_base = f->_IO_write_ptr;
767 f->_IO_write_end = f->_IO_buf_end;
768 f->_IO_read_base = f->_IO_read_ptr = f->_IO_read_end;
769
770 f->_flags |= _IO_CURRENTLY_PUTTING;
771 if (f->_mode <= 0 && f->_flags & (_IO_LINE_BUF | _IO_UNBUFFERED))
772 f->_IO_write_end = f->_IO_write_ptr;
773 }
774 if (ch == EOF)
775 return _IO_do_write (f, f->_IO_write_base,
776 f->_IO_write_ptr - f->_IO_write_base);
777 if (f->_IO_write_ptr == f->_IO_buf_end ) /* Buffer is really full */
778 if (_IO_do_flush (f) == EOF)
779 return EOF;
780 *f->_IO_write_ptr++ = ch;
781 if ((f->_flags & _IO_UNBUFFERED)
782 || ((f->_flags & _IO_LINE_BUF) && ch == '\n'))
783 if (_IO_do_write (f, f->_IO_write_base,
784 f->_IO_write_ptr - f->_IO_write_base) == EOF)
785 return EOF;
786 return (unsigned char) ch;
787 }
libc_hidden_ver(_IO_new_file_overflow,_IO_file_overflow)788 libc_hidden_ver (_IO_new_file_overflow, _IO_file_overflow)
789
790 int
791 _IO_new_file_sync (FILE *fp)
792 {
793 ssize_t delta;
794 int retval = 0;
795
796 /* char* ptr = cur_ptr(); */
797 if (fp->_IO_write_ptr > fp->_IO_write_base)
798 if (_IO_do_flush(fp)) return EOF;
799 delta = fp->_IO_read_ptr - fp->_IO_read_end;
800 if (delta != 0)
801 {
802 off64_t new_pos = _IO_SYSSEEK (fp, delta, 1);
803 if (new_pos != (off64_t) EOF)
804 fp->_IO_read_end = fp->_IO_read_ptr;
805 else if (errno == ESPIPE)
806 ; /* Ignore error from unseekable devices. */
807 else
808 retval = EOF;
809 }
810 if (retval != EOF)
811 fp->_offset = _IO_pos_BAD;
812 /* FIXME: Cleanup - can this be shared? */
813 /* setg(base(), ptr, ptr); */
814 return retval;
815 }
libc_hidden_ver(_IO_new_file_sync,_IO_file_sync)816 libc_hidden_ver (_IO_new_file_sync, _IO_file_sync)
817
818 static int
819 _IO_file_sync_mmap (FILE *fp)
820 {
821 if (fp->_IO_read_ptr != fp->_IO_read_end)
822 {
823 if (__lseek64 (fp->_fileno, fp->_IO_read_ptr - fp->_IO_buf_base,
824 SEEK_SET)
825 != fp->_IO_read_ptr - fp->_IO_buf_base)
826 {
827 fp->_flags |= _IO_ERR_SEEN;
828 return EOF;
829 }
830 }
831 fp->_offset = fp->_IO_read_ptr - fp->_IO_buf_base;
832 fp->_IO_read_end = fp->_IO_read_ptr = fp->_IO_read_base;
833 return 0;
834 }
835
836 /* ftell{,o} implementation. The only time we modify the state of the stream
837 is when we have unflushed writes. In that case we seek to the end and
838 record that offset in the stream object. */
839 static off64_t
do_ftell(FILE * fp)840 do_ftell (FILE *fp)
841 {
842 off64_t result, offset = 0;
843
844 /* No point looking at unflushed data if we haven't allocated buffers
845 yet. */
846 if (fp->_IO_buf_base != NULL)
847 {
848 bool unflushed_writes = fp->_IO_write_ptr > fp->_IO_write_base;
849
850 bool append_mode = (fp->_flags & _IO_IS_APPENDING) == _IO_IS_APPENDING;
851
852 /* When we have unflushed writes in append mode, seek to the end of the
853 file and record that offset. This is the only time we change the file
854 stream state and it is safe since the file handle is active. */
855 if (unflushed_writes && append_mode)
856 {
857 result = _IO_SYSSEEK (fp, 0, _IO_seek_end);
858 if (result == _IO_pos_BAD)
859 return EOF;
860 else
861 fp->_offset = result;
862 }
863
864 /* Adjust for unflushed data. */
865 if (!unflushed_writes)
866 offset -= fp->_IO_read_end - fp->_IO_read_ptr;
867 /* We don't trust _IO_read_end to represent the current file offset when
868 writing in append mode because the value would have to be shifted to
869 the end of the file during a flush. Use the write base instead, along
870 with the new offset we got above when we did a seek to the end of the
871 file. */
872 else if (append_mode)
873 offset += fp->_IO_write_ptr - fp->_IO_write_base;
874 /* For all other modes, _IO_read_end represents the file offset. */
875 else
876 offset += fp->_IO_write_ptr - fp->_IO_read_end;
877 }
878
879 if (fp->_offset != _IO_pos_BAD)
880 result = fp->_offset;
881 else
882 result = _IO_SYSSEEK (fp, 0, _IO_seek_cur);
883
884 if (result == EOF)
885 return result;
886
887 result += offset;
888
889 if (result < 0)
890 {
891 __set_errno (EINVAL);
892 return EOF;
893 }
894
895 return result;
896 }
897
898 off64_t
_IO_new_file_seekoff(FILE * fp,off64_t offset,int dir,int mode)899 _IO_new_file_seekoff (FILE *fp, off64_t offset, int dir, int mode)
900 {
901 off64_t result;
902 off64_t delta, new_offset;
903 long count;
904
905 /* Short-circuit into a separate function. We don't want to mix any
906 functionality and we don't want to touch anything inside the FILE
907 object. */
908 if (mode == 0)
909 return do_ftell (fp);
910
911 /* POSIX.1 8.2.3.7 says that after a call the fflush() the file
912 offset of the underlying file must be exact. */
913 int must_be_exact = (fp->_IO_read_base == fp->_IO_read_end
914 && fp->_IO_write_base == fp->_IO_write_ptr);
915
916 bool was_writing = (fp->_IO_write_ptr > fp->_IO_write_base
917 || _IO_in_put_mode (fp));
918
919 /* Flush unwritten characters.
920 (This may do an unneeded write if we seek within the buffer.
921 But to be able to switch to reading, we would need to set
922 egptr to pptr. That can't be done in the current design,
923 which assumes file_ptr() is eGptr. Anyway, since we probably
924 end up flushing when we close(), it doesn't make much difference.)
925 FIXME: simulate mem-mapped files. */
926 if (was_writing && _IO_switch_to_get_mode (fp))
927 return EOF;
928
929 if (fp->_IO_buf_base == NULL)
930 {
931 /* It could be that we already have a pushback buffer. */
932 if (fp->_IO_read_base != NULL)
933 {
934 free (fp->_IO_read_base);
935 fp->_flags &= ~_IO_IN_BACKUP;
936 }
937 _IO_doallocbuf (fp);
938 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
939 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
940 }
941
942 switch (dir)
943 {
944 case _IO_seek_cur:
945 /* Adjust for read-ahead (bytes is buffer). */
946 offset -= fp->_IO_read_end - fp->_IO_read_ptr;
947
948 if (fp->_offset == _IO_pos_BAD)
949 goto dumb;
950 /* Make offset absolute, assuming current pointer is file_ptr(). */
951 offset += fp->_offset;
952 if (offset < 0)
953 {
954 __set_errno (EINVAL);
955 return EOF;
956 }
957
958 dir = _IO_seek_set;
959 break;
960 case _IO_seek_set:
961 break;
962 case _IO_seek_end:
963 {
964 struct __stat64_t64 st;
965 if (_IO_SYSSTAT (fp, &st) == 0 && S_ISREG (st.st_mode))
966 {
967 offset += st.st_size;
968 dir = _IO_seek_set;
969 }
970 else
971 goto dumb;
972 }
973 }
974
975 _IO_free_backup_area (fp);
976
977 /* At this point, dir==_IO_seek_set. */
978
979 /* If destination is within current buffer, optimize: */
980 if (fp->_offset != _IO_pos_BAD && fp->_IO_read_base != NULL
981 && !_IO_in_backup (fp))
982 {
983 off64_t start_offset = (fp->_offset
984 - (fp->_IO_read_end - fp->_IO_buf_base));
985 if (offset >= start_offset && offset < fp->_offset)
986 {
987 _IO_setg (fp, fp->_IO_buf_base,
988 fp->_IO_buf_base + (offset - start_offset),
989 fp->_IO_read_end);
990 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
991
992 _IO_mask_flags (fp, 0, _IO_EOF_SEEN);
993 goto resync;
994 }
995 }
996
997 if (fp->_flags & _IO_NO_READS)
998 goto dumb;
999
1000 /* Try to seek to a block boundary, to improve kernel page management. */
1001 new_offset = offset & ~(fp->_IO_buf_end - fp->_IO_buf_base - 1);
1002 delta = offset - new_offset;
1003 if (delta > fp->_IO_buf_end - fp->_IO_buf_base)
1004 {
1005 new_offset = offset;
1006 delta = 0;
1007 }
1008 result = _IO_SYSSEEK (fp, new_offset, 0);
1009 if (result < 0)
1010 return EOF;
1011 if (delta == 0)
1012 count = 0;
1013 else
1014 {
1015 count = _IO_SYSREAD (fp, fp->_IO_buf_base,
1016 (must_be_exact
1017 ? delta : fp->_IO_buf_end - fp->_IO_buf_base));
1018 if (count < delta)
1019 {
1020 /* We weren't allowed to read, but try to seek the remainder. */
1021 offset = count == EOF ? delta : delta-count;
1022 dir = _IO_seek_cur;
1023 goto dumb;
1024 }
1025 }
1026 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + delta,
1027 fp->_IO_buf_base + count);
1028 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
1029 fp->_offset = result + count;
1030 _IO_mask_flags (fp, 0, _IO_EOF_SEEN);
1031 return offset;
1032 dumb:
1033
1034 _IO_unsave_markers (fp);
1035 result = _IO_SYSSEEK (fp, offset, dir);
1036 if (result != EOF)
1037 {
1038 _IO_mask_flags (fp, 0, _IO_EOF_SEEN);
1039 fp->_offset = result;
1040 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
1041 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
1042 }
1043 return result;
1044
1045 resync:
1046 /* We need to do it since it is possible that the file offset in
1047 the kernel may be changed behind our back. It may happen when
1048 we fopen a file and then do a fork. One process may access the
1049 file and the kernel file offset will be changed. */
1050 if (fp->_offset >= 0)
1051 _IO_SYSSEEK (fp, fp->_offset, 0);
1052
1053 return offset;
1054 }
libc_hidden_ver(_IO_new_file_seekoff,_IO_file_seekoff)1055 libc_hidden_ver (_IO_new_file_seekoff, _IO_file_seekoff)
1056
1057 off64_t
1058 _IO_file_seekoff_mmap (FILE *fp, off64_t offset, int dir, int mode)
1059 {
1060 off64_t result;
1061
1062 /* If we are only interested in the current position, calculate it and
1063 return right now. This calculation does the right thing when we are
1064 using a pushback buffer, but in the usual case has the same value as
1065 (fp->_IO_read_ptr - fp->_IO_buf_base). */
1066 if (mode == 0)
1067 return fp->_offset - (fp->_IO_read_end - fp->_IO_read_ptr);
1068
1069 switch (dir)
1070 {
1071 case _IO_seek_cur:
1072 /* Adjust for read-ahead (bytes is buffer). */
1073 offset += fp->_IO_read_ptr - fp->_IO_read_base;
1074 break;
1075 case _IO_seek_set:
1076 break;
1077 case _IO_seek_end:
1078 offset += fp->_IO_buf_end - fp->_IO_buf_base;
1079 break;
1080 }
1081 /* At this point, dir==_IO_seek_set. */
1082
1083 if (offset < 0)
1084 {
1085 /* No negative offsets are valid. */
1086 __set_errno (EINVAL);
1087 return EOF;
1088 }
1089
1090 result = _IO_SYSSEEK (fp, offset, 0);
1091 if (result < 0)
1092 return EOF;
1093
1094 if (offset > fp->_IO_buf_end - fp->_IO_buf_base)
1095 /* One can fseek arbitrarily past the end of the file
1096 and it is meaningless until one attempts to read.
1097 Leave the buffer pointers in EOF state until underflow. */
1098 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_end, fp->_IO_buf_end);
1099 else
1100 /* Adjust the read pointers to match the file position,
1101 but so the next read attempt will call underflow. */
1102 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base + offset,
1103 fp->_IO_buf_base + offset);
1104
1105 fp->_offset = result;
1106
1107 _IO_mask_flags (fp, 0, _IO_EOF_SEEN);
1108
1109 return offset;
1110 }
1111
1112 static off64_t
_IO_file_seekoff_maybe_mmap(FILE * fp,off64_t offset,int dir,int mode)1113 _IO_file_seekoff_maybe_mmap (FILE *fp, off64_t offset, int dir,
1114 int mode)
1115 {
1116 /* We only get here when we haven't tried to read anything yet.
1117 So there is nothing more useful for us to do here than just
1118 the underlying lseek call. */
1119
1120 off64_t result = _IO_SYSSEEK (fp, offset, dir);
1121 if (result < 0)
1122 return EOF;
1123
1124 fp->_offset = result;
1125 return result;
1126 }
1127
1128 ssize_t
_IO_file_read(FILE * fp,void * buf,ssize_t size)1129 _IO_file_read (FILE *fp, void *buf, ssize_t size)
1130 {
1131 return (__builtin_expect (fp->_flags2 & _IO_FLAGS2_NOTCANCEL, 0)
1132 ? __read_nocancel (fp->_fileno, buf, size)
1133 : __read (fp->_fileno, buf, size));
1134 }
libc_hidden_def(_IO_file_read)1135 libc_hidden_def (_IO_file_read)
1136
1137 off64_t
1138 _IO_file_seek (FILE *fp, off64_t offset, int dir)
1139 {
1140 return __lseek64 (fp->_fileno, offset, dir);
1141 }
libc_hidden_def(_IO_file_seek)1142 libc_hidden_def (_IO_file_seek)
1143
1144 int
1145 _IO_file_stat (FILE *fp, void *st)
1146 {
1147 return __fstat64_time64 (fp->_fileno, (struct __stat64_t64 *) st);
1148 }
libc_hidden_def(_IO_file_stat)1149 libc_hidden_def (_IO_file_stat)
1150
1151 int
1152 _IO_file_close_mmap (FILE *fp)
1153 {
1154 /* In addition to closing the file descriptor we have to unmap the file. */
1155 (void) __munmap (fp->_IO_buf_base, fp->_IO_buf_end - fp->_IO_buf_base);
1156 fp->_IO_buf_base = fp->_IO_buf_end = NULL;
1157 /* Cancelling close should be avoided if possible since it leaves an
1158 unrecoverable state behind. */
1159 return __close_nocancel (fp->_fileno);
1160 }
1161
1162 int
_IO_file_close(FILE * fp)1163 _IO_file_close (FILE *fp)
1164 {
1165 /* Cancelling close should be avoided if possible since it leaves an
1166 unrecoverable state behind. */
1167 return __close_nocancel (fp->_fileno);
1168 }
libc_hidden_def(_IO_file_close)1169 libc_hidden_def (_IO_file_close)
1170
1171 ssize_t
1172 _IO_new_file_write (FILE *f, const void *data, ssize_t n)
1173 {
1174 ssize_t to_do = n;
1175 while (to_do > 0)
1176 {
1177 ssize_t count = (__builtin_expect (f->_flags2
1178 & _IO_FLAGS2_NOTCANCEL, 0)
1179 ? __write_nocancel (f->_fileno, data, to_do)
1180 : __write (f->_fileno, data, to_do));
1181 if (count < 0)
1182 {
1183 f->_flags |= _IO_ERR_SEEN;
1184 break;
1185 }
1186 to_do -= count;
1187 data = (void *) ((char *) data + count);
1188 }
1189 n -= to_do;
1190 if (f->_offset >= 0)
1191 f->_offset += n;
1192 return n;
1193 }
1194
1195 size_t
_IO_new_file_xsputn(FILE * f,const void * data,size_t n)1196 _IO_new_file_xsputn (FILE *f, const void *data, size_t n)
1197 {
1198 const char *s = (const char *) data;
1199 size_t to_do = n;
1200 int must_flush = 0;
1201 size_t count = 0;
1202
1203 if (n <= 0)
1204 return 0;
1205 /* This is an optimized implementation.
1206 If the amount to be written straddles a block boundary
1207 (or the filebuf is unbuffered), use sys_write directly. */
1208
1209 /* First figure out how much space is available in the buffer. */
1210 if ((f->_flags & _IO_LINE_BUF) && (f->_flags & _IO_CURRENTLY_PUTTING))
1211 {
1212 count = f->_IO_buf_end - f->_IO_write_ptr;
1213 if (count >= n)
1214 {
1215 const char *p;
1216 for (p = s + n; p > s; )
1217 {
1218 if (*--p == '\n')
1219 {
1220 count = p - s + 1;
1221 must_flush = 1;
1222 break;
1223 }
1224 }
1225 }
1226 }
1227 else if (f->_IO_write_end > f->_IO_write_ptr)
1228 count = f->_IO_write_end - f->_IO_write_ptr; /* Space available. */
1229
1230 /* Then fill the buffer. */
1231 if (count > 0)
1232 {
1233 if (count > to_do)
1234 count = to_do;
1235 f->_IO_write_ptr = __mempcpy (f->_IO_write_ptr, s, count);
1236 s += count;
1237 to_do -= count;
1238 }
1239 if (to_do + must_flush > 0)
1240 {
1241 size_t block_size, do_write;
1242 /* Next flush the (full) buffer. */
1243 if (_IO_OVERFLOW (f, EOF) == EOF)
1244 /* If nothing else has to be written we must not signal the
1245 caller that everything has been written. */
1246 return to_do == 0 ? EOF : n - to_do;
1247
1248 /* Try to maintain alignment: write a whole number of blocks. */
1249 block_size = f->_IO_buf_end - f->_IO_buf_base;
1250 do_write = to_do - (block_size >= 128 ? to_do % block_size : 0);
1251
1252 if (do_write)
1253 {
1254 count = new_do_write (f, s, do_write);
1255 to_do -= count;
1256 if (count < do_write)
1257 return n - to_do;
1258 }
1259
1260 /* Now write out the remainder. Normally, this will fit in the
1261 buffer, but it's somewhat messier for line-buffered files,
1262 so we let _IO_default_xsputn handle the general case. */
1263 if (to_do)
1264 to_do -= _IO_default_xsputn (f, s+do_write, to_do);
1265 }
1266 return n - to_do;
1267 }
libc_hidden_ver(_IO_new_file_xsputn,_IO_file_xsputn)1268 libc_hidden_ver (_IO_new_file_xsputn, _IO_file_xsputn)
1269
1270 size_t
1271 _IO_file_xsgetn (FILE *fp, void *data, size_t n)
1272 {
1273 size_t want, have;
1274 ssize_t count;
1275 char *s = data;
1276
1277 want = n;
1278
1279 if (fp->_IO_buf_base == NULL)
1280 {
1281 /* Maybe we already have a push back pointer. */
1282 if (fp->_IO_save_base != NULL)
1283 {
1284 free (fp->_IO_save_base);
1285 fp->_flags &= ~_IO_IN_BACKUP;
1286 }
1287 _IO_doallocbuf (fp);
1288 }
1289
1290 while (want > 0)
1291 {
1292 have = fp->_IO_read_end - fp->_IO_read_ptr;
1293 if (want <= have)
1294 {
1295 memcpy (s, fp->_IO_read_ptr, want);
1296 fp->_IO_read_ptr += want;
1297 want = 0;
1298 }
1299 else
1300 {
1301 if (have > 0)
1302 {
1303 s = __mempcpy (s, fp->_IO_read_ptr, have);
1304 want -= have;
1305 fp->_IO_read_ptr += have;
1306 }
1307
1308 /* Check for backup and repeat */
1309 if (_IO_in_backup (fp))
1310 {
1311 _IO_switch_to_main_get_area (fp);
1312 continue;
1313 }
1314
1315 /* If we now want less than a buffer, underflow and repeat
1316 the copy. Otherwise, _IO_SYSREAD directly to
1317 the user buffer. */
1318 if (fp->_IO_buf_base
1319 && want < (size_t) (fp->_IO_buf_end - fp->_IO_buf_base))
1320 {
1321 if (__underflow (fp) == EOF)
1322 break;
1323
1324 continue;
1325 }
1326
1327 /* These must be set before the sysread as we might longjmp out
1328 waiting for input. */
1329 _IO_setg (fp, fp->_IO_buf_base, fp->_IO_buf_base, fp->_IO_buf_base);
1330 _IO_setp (fp, fp->_IO_buf_base, fp->_IO_buf_base);
1331
1332 /* Try to maintain alignment: read a whole number of blocks. */
1333 count = want;
1334 if (fp->_IO_buf_base)
1335 {
1336 size_t block_size = fp->_IO_buf_end - fp->_IO_buf_base;
1337 if (block_size >= 128)
1338 count -= want % block_size;
1339 }
1340
1341 count = _IO_SYSREAD (fp, s, count);
1342 if (count <= 0)
1343 {
1344 if (count == 0)
1345 fp->_flags |= _IO_EOF_SEEN;
1346 else
1347 fp->_flags |= _IO_ERR_SEEN;
1348
1349 break;
1350 }
1351
1352 s += count;
1353 want -= count;
1354 if (fp->_offset != _IO_pos_BAD)
1355 _IO_pos_adjust (fp->_offset, count);
1356 }
1357 }
1358
1359 return n - want;
1360 }
libc_hidden_def(_IO_file_xsgetn)1361 libc_hidden_def (_IO_file_xsgetn)
1362
1363 static size_t
1364 _IO_file_xsgetn_mmap (FILE *fp, void *data, size_t n)
1365 {
1366 size_t have;
1367 char *read_ptr = fp->_IO_read_ptr;
1368 char *s = (char *) data;
1369
1370 have = fp->_IO_read_end - fp->_IO_read_ptr;
1371
1372 if (have < n)
1373 {
1374 if (__glibc_unlikely (_IO_in_backup (fp)))
1375 {
1376 s = __mempcpy (s, read_ptr, have);
1377 n -= have;
1378 _IO_switch_to_main_get_area (fp);
1379 read_ptr = fp->_IO_read_ptr;
1380 have = fp->_IO_read_end - fp->_IO_read_ptr;
1381 }
1382
1383 if (have < n)
1384 {
1385 /* Check that we are mapping all of the file, in case it grew. */
1386 if (__glibc_unlikely (mmap_remap_check (fp)))
1387 /* We punted mmap, so complete with the vanilla code. */
1388 return s - (char *) data + _IO_XSGETN (fp, data, n);
1389
1390 read_ptr = fp->_IO_read_ptr;
1391 have = fp->_IO_read_end - read_ptr;
1392 }
1393 }
1394
1395 if (have < n)
1396 fp->_flags |= _IO_EOF_SEEN;
1397
1398 if (have != 0)
1399 {
1400 have = MIN (have, n);
1401 s = __mempcpy (s, read_ptr, have);
1402 fp->_IO_read_ptr = read_ptr + have;
1403 }
1404
1405 return s - (char *) data;
1406 }
1407
1408 static size_t
_IO_file_xsgetn_maybe_mmap(FILE * fp,void * data,size_t n)1409 _IO_file_xsgetn_maybe_mmap (FILE *fp, void *data, size_t n)
1410 {
1411 /* We only get here if this is the first attempt to read something.
1412 Decide which operations to use and then punt to the chosen one. */
1413
1414 decide_maybe_mmap (fp);
1415 return _IO_XSGETN (fp, data, n);
1416 }
1417
1418 versioned_symbol (libc, _IO_new_do_write, _IO_do_write, GLIBC_2_1);
1419 versioned_symbol (libc, _IO_new_file_attach, _IO_file_attach, GLIBC_2_1);
1420 versioned_symbol (libc, _IO_new_file_close_it, _IO_file_close_it, GLIBC_2_1);
1421 versioned_symbol (libc, _IO_new_file_finish, _IO_file_finish, GLIBC_2_1);
1422 versioned_symbol (libc, _IO_new_file_fopen, _IO_file_fopen, GLIBC_2_1);
1423 versioned_symbol (libc, _IO_new_file_init, _IO_file_init, GLIBC_2_1);
1424 versioned_symbol (libc, _IO_new_file_setbuf, _IO_file_setbuf, GLIBC_2_1);
1425 versioned_symbol (libc, _IO_new_file_sync, _IO_file_sync, GLIBC_2_1);
1426 versioned_symbol (libc, _IO_new_file_overflow, _IO_file_overflow, GLIBC_2_1);
1427 versioned_symbol (libc, _IO_new_file_seekoff, _IO_file_seekoff, GLIBC_2_1);
1428 versioned_symbol (libc, _IO_new_file_underflow, _IO_file_underflow, GLIBC_2_1);
1429 versioned_symbol (libc, _IO_new_file_write, _IO_file_write, GLIBC_2_1);
1430 versioned_symbol (libc, _IO_new_file_xsputn, _IO_file_xsputn, GLIBC_2_1);
1431
1432 const struct _IO_jump_t _IO_file_jumps libio_vtable =
1433 {
1434 JUMP_INIT_DUMMY,
1435 JUMP_INIT(finish, _IO_file_finish),
1436 JUMP_INIT(overflow, _IO_file_overflow),
1437 JUMP_INIT(underflow, _IO_file_underflow),
1438 JUMP_INIT(uflow, _IO_default_uflow),
1439 JUMP_INIT(pbackfail, _IO_default_pbackfail),
1440 JUMP_INIT(xsputn, _IO_file_xsputn),
1441 JUMP_INIT(xsgetn, _IO_file_xsgetn),
1442 JUMP_INIT(seekoff, _IO_new_file_seekoff),
1443 JUMP_INIT(seekpos, _IO_default_seekpos),
1444 JUMP_INIT(setbuf, _IO_new_file_setbuf),
1445 JUMP_INIT(sync, _IO_new_file_sync),
1446 JUMP_INIT(doallocate, _IO_file_doallocate),
1447 JUMP_INIT(read, _IO_file_read),
1448 JUMP_INIT(write, _IO_new_file_write),
1449 JUMP_INIT(seek, _IO_file_seek),
1450 JUMP_INIT(close, _IO_file_close),
1451 JUMP_INIT(stat, _IO_file_stat),
1452 JUMP_INIT(showmanyc, _IO_default_showmanyc),
1453 JUMP_INIT(imbue, _IO_default_imbue)
1454 };
1455 libc_hidden_data_def (_IO_file_jumps)
1456
1457 const struct _IO_jump_t _IO_file_jumps_mmap libio_vtable =
1458 {
1459 JUMP_INIT_DUMMY,
1460 JUMP_INIT(finish, _IO_file_finish),
1461 JUMP_INIT(overflow, _IO_file_overflow),
1462 JUMP_INIT(underflow, _IO_file_underflow_mmap),
1463 JUMP_INIT(uflow, _IO_default_uflow),
1464 JUMP_INIT(pbackfail, _IO_default_pbackfail),
1465 JUMP_INIT(xsputn, _IO_new_file_xsputn),
1466 JUMP_INIT(xsgetn, _IO_file_xsgetn_mmap),
1467 JUMP_INIT(seekoff, _IO_file_seekoff_mmap),
1468 JUMP_INIT(seekpos, _IO_default_seekpos),
1469 JUMP_INIT(setbuf, (_IO_setbuf_t) _IO_file_setbuf_mmap),
1470 JUMP_INIT(sync, _IO_file_sync_mmap),
1471 JUMP_INIT(doallocate, _IO_file_doallocate),
1472 JUMP_INIT(read, _IO_file_read),
1473 JUMP_INIT(write, _IO_new_file_write),
1474 JUMP_INIT(seek, _IO_file_seek),
1475 JUMP_INIT(close, _IO_file_close_mmap),
1476 JUMP_INIT(stat, _IO_file_stat),
1477 JUMP_INIT(showmanyc, _IO_default_showmanyc),
1478 JUMP_INIT(imbue, _IO_default_imbue)
1479 };
1480
1481 const struct _IO_jump_t _IO_file_jumps_maybe_mmap libio_vtable =
1482 {
1483 JUMP_INIT_DUMMY,
1484 JUMP_INIT(finish, _IO_file_finish),
1485 JUMP_INIT(overflow, _IO_file_overflow),
1486 JUMP_INIT(underflow, _IO_file_underflow_maybe_mmap),
1487 JUMP_INIT(uflow, _IO_default_uflow),
1488 JUMP_INIT(pbackfail, _IO_default_pbackfail),
1489 JUMP_INIT(xsputn, _IO_new_file_xsputn),
1490 JUMP_INIT(xsgetn, _IO_file_xsgetn_maybe_mmap),
1491 JUMP_INIT(seekoff, _IO_file_seekoff_maybe_mmap),
1492 JUMP_INIT(seekpos, _IO_default_seekpos),
1493 JUMP_INIT(setbuf, (_IO_setbuf_t) _IO_file_setbuf_mmap),
1494 JUMP_INIT(sync, _IO_new_file_sync),
1495 JUMP_INIT(doallocate, _IO_file_doallocate),
1496 JUMP_INIT(read, _IO_file_read),
1497 JUMP_INIT(write, _IO_new_file_write),
1498 JUMP_INIT(seek, _IO_file_seek),
1499 JUMP_INIT(close, _IO_file_close),
1500 JUMP_INIT(stat, _IO_file_stat),
1501 JUMP_INIT(showmanyc, _IO_default_showmanyc),
1502 JUMP_INIT(imbue, _IO_default_imbue)
1503 };
1504