1 /* Copyright (C) 1994-2022 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
8
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
13
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, see
16 <https://www.gnu.org/licenses/>. */
17
18 #include <sys/types.h>
19 #include <sys/mman.h>
20 #include <errno.h>
21 #include <hurd.h>
22 #include <hurd/fd.h>
23
24 /* Map addresses starting near ADDR and extending for LEN bytes. from
25 OFFSET into the file FD describes according to PROT and FLAGS. If ADDR
26 is nonzero, it is the desired mapping address. If the MAP_FIXED bit is
27 set in FLAGS, the mapping will be at ADDR exactly (which must be
28 page-aligned); otherwise the system chooses a convenient nearby address.
29 The return value is the actual mapping address chosen or (void *) -1
30 for errors (in which case `errno' is set). A successful `mmap' call
31 deallocates any previous mapping for the affected region. */
32
33 void *
__mmap(void * addr,size_t len,int prot,int flags,int fd,off_t offset)34 __mmap (void *addr, size_t len, int prot, int flags, int fd, off_t offset)
35 {
36 error_t err;
37 vm_prot_t vmprot, max_vmprot;
38 memory_object_t memobj;
39 vm_address_t mapaddr;
40 boolean_t copy;
41
42 mapaddr = (vm_address_t) addr;
43
44 /* ADDR and OFFSET must be page-aligned. */
45 if ((mapaddr & (__vm_page_size - 1)) || (offset & (__vm_page_size - 1)))
46 return (void *) (long int) __hurd_fail (EINVAL);
47
48 vmprot = VM_PROT_NONE;
49 if (prot & PROT_READ)
50 vmprot |= VM_PROT_READ;
51 if (prot & PROT_WRITE)
52 vmprot |= VM_PROT_WRITE;
53 if (prot & PROT_EXEC)
54 vmprot |= VM_PROT_EXECUTE;
55
56 copy = ! (flags & MAP_SHARED);
57
58 switch (flags & MAP_TYPE)
59 {
60 default:
61 return (void *) (long int) __hurd_fail (EINVAL);
62
63 case MAP_ANON:
64 memobj = MACH_PORT_NULL;
65 max_vmprot = VM_PROT_ALL;
66 break;
67
68 case MAP_FILE:
69 case 0: /* Allow, e.g., just MAP_SHARED. */
70 {
71 mach_port_t robj, wobj;
72 if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj)))
73 {
74 if (err == MIG_BAD_ID || err == EOPNOTSUPP || err == ENOSYS)
75 err = ENODEV; /* File descriptor doesn't support mmap. */
76 return (void *) (long int) __hurd_dfail (fd, err);
77 }
78 switch (prot & (PROT_READ|PROT_WRITE))
79 {
80 /* Although it apparently doesn't make sense to map a file with
81 protection set to PROT_NONE, it is actually sometimes done.
82 In particular, that's how localedef reserves some space for
83 the locale archive file, the rationale being that some
84 implementations take into account whether the mapping is
85 anonymous or not when selecting addresses. */
86 case PROT_NONE:
87 case PROT_READ:
88 max_vmprot = VM_PROT_READ|VM_PROT_EXECUTE;
89 if (wobj == robj)
90 max_vmprot |= VM_PROT_WRITE;
91 memobj = robj;
92 if (wobj != MACH_PORT_NULL)
93 __mach_port_deallocate (__mach_task_self (), wobj);
94 break;
95 case PROT_WRITE:
96 max_vmprot = VM_PROT_WRITE;
97 if (robj == wobj)
98 max_vmprot |= VM_PROT_READ|VM_PROT_EXECUTE;
99 memobj = wobj;
100 if (robj != MACH_PORT_NULL)
101 __mach_port_deallocate (__mach_task_self (), robj);
102 break;
103 case PROT_READ|PROT_WRITE:
104 max_vmprot = VM_PROT_ALL;
105 if (robj == wobj)
106 {
107 memobj = wobj;
108 /* Remove extra reference. */
109 __mach_port_deallocate (__mach_task_self (), memobj);
110 }
111 else if (wobj == MACH_PORT_NULL /* Not writable by mapping. */
112 && copy)
113 /* The file can only be mapped for reading. Since we are
114 making a private mapping, we will never try to write the
115 object anyway, so we don't care. */
116 memobj = robj;
117 else
118 {
119 __mach_port_deallocate (__mach_task_self (), wobj);
120 return (void *) (long int) __hurd_fail (EACCES);
121 }
122 break;
123 default:
124 __builtin_unreachable ();
125 }
126 break;
127 /* XXX handle MAP_NOEXTEND */
128 }
129 }
130
131 /* XXX handle MAP_INHERIT */
132
133 if (copy)
134 max_vmprot = VM_PROT_ALL;
135
136 err = __vm_map (__mach_task_self (),
137 &mapaddr, (vm_size_t) len, (vm_address_t) 0,
138 mapaddr == 0,
139 memobj, (vm_offset_t) offset,
140 copy, vmprot, max_vmprot,
141 copy ? VM_INHERIT_COPY : VM_INHERIT_SHARE);
142
143 if (flags & MAP_FIXED)
144 {
145 if (err == KERN_NO_SPACE)
146 {
147 /* XXX this is not atomic as it is in unix! */
148 /* The region is already allocated; deallocate it first. */
149 err = __vm_deallocate (__mach_task_self (), mapaddr, len);
150 if (! err)
151 err = __vm_map (__mach_task_self (),
152 &mapaddr, (vm_size_t) len, (vm_address_t) 0,
153 0, memobj, (vm_offset_t) offset,
154 copy, vmprot, max_vmprot,
155 copy ? VM_INHERIT_COPY : VM_INHERIT_SHARE);
156 }
157 }
158 else
159 {
160 if (mapaddr != 0 && (err == KERN_NO_SPACE || err == KERN_INVALID_ADDRESS))
161 err = __vm_map (__mach_task_self (),
162 &mapaddr, (vm_size_t) len, (vm_address_t) 0,
163 1, memobj, (vm_offset_t) offset,
164 copy, vmprot, max_vmprot,
165 copy ? VM_INHERIT_COPY : VM_INHERIT_SHARE);
166 }
167
168 if (memobj != MACH_PORT_NULL)
169 __mach_port_deallocate (__mach_task_self (), memobj);
170
171 if (err == KERN_PROTECTION_FAILURE)
172 err = EACCES;
173
174 if (err)
175 return (void *) (long int) __hurd_fail (err);
176
177 return (void *) mapaddr;
178 }
179
180 libc_hidden_def (__mmap)
181 weak_alias (__mmap, mmap)
182