Show More
@@ -1,849 +1,879 | |||||
1 | # dirstatemap.py |
|
1 | # dirstatemap.py | |
2 | # |
|
2 | # | |
3 | # This software may be used and distributed according to the terms of the |
|
3 | # This software may be used and distributed according to the terms of the | |
4 | # GNU General Public License version 2 or any later version. |
|
4 | # GNU General Public License version 2 or any later version. | |
5 |
|
5 | |||
6 |
|
6 | |||
|
7 | from typing import ( | |||
|
8 | Optional, | |||
|
9 | TYPE_CHECKING, | |||
|
10 | ) | |||
|
11 | ||||
7 | from .i18n import _ |
|
12 | from .i18n import _ | |
8 |
|
13 | |||
9 | from . import ( |
|
14 | from . import ( | |
10 | error, |
|
15 | error, | |
11 | pathutil, |
|
16 | pathutil, | |
12 | policy, |
|
17 | policy, | |
13 | testing, |
|
18 | testing, | |
14 | txnutil, |
|
19 | txnutil, | |
|
20 | typelib, | |||
15 | util, |
|
21 | util, | |
16 | ) |
|
22 | ) | |
17 |
|
23 | |||
18 | from .dirstateutils import ( |
|
24 | from .dirstateutils import ( | |
19 | docket as docketmod, |
|
25 | docket as docketmod, | |
20 | v2, |
|
26 | v2, | |
21 | ) |
|
27 | ) | |
22 |
|
28 | |||
|
29 | if TYPE_CHECKING: | |||
|
30 | from . import ( | |||
|
31 | ui as uimod, | |||
|
32 | ) | |||
|
33 | ||||
23 | parsers = policy.importmod('parsers') |
|
34 | parsers = policy.importmod('parsers') | |
24 | rustmod = policy.importrust('dirstate') |
|
35 | rustmod = policy.importrust('dirstate') | |
25 |
|
36 | |||
26 | propertycache = util.propertycache |
|
37 | propertycache = util.propertycache | |
27 |
|
38 | |||
28 | if rustmod is None: |
|
39 | if rustmod is None: | |
29 | DirstateItem = parsers.DirstateItem |
|
40 | DirstateItem = parsers.DirstateItem | |
30 | else: |
|
41 | else: | |
31 | DirstateItem = rustmod.DirstateItem |
|
42 | DirstateItem = rustmod.DirstateItem | |
32 |
|
43 | |||
33 | rangemask = 0x7FFFFFFF |
|
44 | rangemask = 0x7FFFFFFF | |
34 |
|
45 | |||
35 | WRITE_MODE_AUTO = 0 |
|
46 | WRITE_MODE_AUTO = 0 | |
36 | WRITE_MODE_FORCE_NEW = 1 |
|
47 | WRITE_MODE_FORCE_NEW = 1 | |
37 | WRITE_MODE_FORCE_APPEND = 2 |
|
48 | WRITE_MODE_FORCE_APPEND = 2 | |
38 |
|
49 | |||
39 |
|
50 | |||
40 | V2_MAX_READ_ATTEMPTS = 5 |
|
51 | V2_MAX_READ_ATTEMPTS = 5 | |
41 |
|
52 | |||
42 |
|
53 | |||
43 | class _dirstatemapcommon: |
|
54 | class _dirstatemapcommon: | |
44 | """ |
|
55 | """ | |
45 | Methods that are identical for both implementations of the dirstatemap |
|
56 | Methods that are identical for both implementations of the dirstatemap | |
46 | class, with and without Rust extensions enabled. |
|
57 | class, with and without Rust extensions enabled. | |
47 | """ |
|
58 | """ | |
48 |
|
59 | |||
|
60 | _use_dirstate_v2: bool | |||
|
61 | _nodeconstants: typelib.NodeConstants | |||
|
62 | _ui: "uimod.ui" | |||
|
63 | _root: bytes | |||
|
64 | _filename: bytes | |||
|
65 | _nodelen: int | |||
|
66 | _dirtyparents: bool | |||
|
67 | _docket: Optional["docketmod.DirstateDocket"] | |||
|
68 | _write_mode: int | |||
|
69 | _pendingmode: Optional[bool] | |||
|
70 | identity: Optional[typelib.CacheStat] | |||
|
71 | ||||
49 | # please pytype |
|
72 | # please pytype | |
50 |
|
73 | |||
51 | _map = None |
|
74 | _map = None | |
52 | copymap = None |
|
75 | copymap = None | |
53 |
|
76 | |||
54 | def __init__(self, ui, opener, root, nodeconstants, use_dirstate_v2): |
|
77 | def __init__( | |
|
78 | self, | |||
|
79 | ui: "uimod.ui", | |||
|
80 | opener, | |||
|
81 | root: bytes, | |||
|
82 | nodeconstants: typelib.NodeConstants, | |||
|
83 | use_dirstate_v2: bool, | |||
|
84 | ) -> None: | |||
55 | self._use_dirstate_v2 = use_dirstate_v2 |
|
85 | self._use_dirstate_v2 = use_dirstate_v2 | |
56 | self._nodeconstants = nodeconstants |
|
86 | self._nodeconstants = nodeconstants | |
57 | self._ui = ui |
|
87 | self._ui = ui | |
58 | self._opener = opener |
|
88 | self._opener = opener | |
59 | self._root = root |
|
89 | self._root = root | |
60 | self._filename = b'dirstate' |
|
90 | self._filename = b'dirstate' | |
61 | self._nodelen = 20 # Also update Rust code when changing this! |
|
91 | self._nodelen = 20 # Also update Rust code when changing this! | |
62 | self._parents = None |
|
92 | self._parents = None | |
63 | self._dirtyparents = False |
|
93 | self._dirtyparents = False | |
64 | self._docket = None |
|
94 | self._docket = None | |
65 | write_mode = ui.config(b"devel", b"dirstate.v2.data_update_mode") |
|
95 | write_mode = ui.config(b"devel", b"dirstate.v2.data_update_mode") | |
66 | if write_mode == b"auto": |
|
96 | if write_mode == b"auto": | |
67 | self._write_mode = WRITE_MODE_AUTO |
|
97 | self._write_mode = WRITE_MODE_AUTO | |
68 | elif write_mode == b"force-append": |
|
98 | elif write_mode == b"force-append": | |
69 | self._write_mode = WRITE_MODE_FORCE_APPEND |
|
99 | self._write_mode = WRITE_MODE_FORCE_APPEND | |
70 | elif write_mode == b"force-new": |
|
100 | elif write_mode == b"force-new": | |
71 | self._write_mode = WRITE_MODE_FORCE_NEW |
|
101 | self._write_mode = WRITE_MODE_FORCE_NEW | |
72 | else: |
|
102 | else: | |
73 | # unknown value, fallback to default |
|
103 | # unknown value, fallback to default | |
74 | self._write_mode = WRITE_MODE_AUTO |
|
104 | self._write_mode = WRITE_MODE_AUTO | |
75 |
|
105 | |||
76 | # for consistent view between _pl() and _read() invocations |
|
106 | # for consistent view between _pl() and _read() invocations | |
77 | self._pendingmode = None |
|
107 | self._pendingmode = None | |
78 |
|
108 | |||
79 | def _set_identity(self): |
|
109 | def _set_identity(self) -> None: | |
80 | self.identity = self._get_current_identity() |
|
110 | self.identity = self._get_current_identity() | |
81 |
|
111 | |||
82 | def _get_current_identity(self): |
|
112 | def _get_current_identity(self) -> Optional[typelib.CacheStat]: | |
83 | try: |
|
113 | try: | |
84 | return util.cachestat(self._opener.join(self._filename)) |
|
114 | return util.cachestat(self._opener.join(self._filename)) | |
85 | except FileNotFoundError: |
|
115 | except FileNotFoundError: | |
86 | return None |
|
116 | return None | |
87 |
|
117 | |||
88 | def may_need_refresh(self): |
|
118 | def may_need_refresh(self) -> bool: | |
89 | if 'identity' not in vars(self): |
|
119 | if 'identity' not in vars(self): | |
90 | # no existing identity, we need a refresh |
|
120 | # no existing identity, we need a refresh | |
91 | return True |
|
121 | return True | |
92 | if self.identity is None: |
|
122 | if self.identity is None: | |
93 | return True |
|
123 | return True | |
94 | if not self.identity.cacheable(): |
|
124 | if not self.identity.cacheable(): | |
95 | # We cannot trust the entry |
|
125 | # We cannot trust the entry | |
96 | # XXX this is a problem on windows, NFS, or other inode less system |
|
126 | # XXX this is a problem on windows, NFS, or other inode less system | |
97 | return True |
|
127 | return True | |
98 | current_identity = self._get_current_identity() |
|
128 | current_identity = self._get_current_identity() | |
99 | if current_identity is None: |
|
129 | if current_identity is None: | |
100 | return True |
|
130 | return True | |
101 | if not current_identity.cacheable(): |
|
131 | if not current_identity.cacheable(): | |
102 | # We cannot trust the entry |
|
132 | # We cannot trust the entry | |
103 | # XXX this is a problem on windows, NFS, or other inode less system |
|
133 | # XXX this is a problem on windows, NFS, or other inode less system | |
104 | return True |
|
134 | return True | |
105 | return current_identity != self.identity |
|
135 | return current_identity != self.identity | |
106 |
|
136 | |||
107 | def preload(self): |
|
137 | def preload(self) -> None: | |
108 | """Loads the underlying data, if it's not already loaded""" |
|
138 | """Loads the underlying data, if it's not already loaded""" | |
109 | self._map |
|
139 | self._map | |
110 |
|
140 | |||
111 | def get(self, key, default=None): |
|
141 | def get(self, key, default=None): | |
112 | return self._map.get(key, default) |
|
142 | return self._map.get(key, default) | |
113 |
|
143 | |||
114 | def __len__(self): |
|
144 | def __len__(self): | |
115 | return len(self._map) |
|
145 | return len(self._map) | |
116 |
|
146 | |||
117 | def __iter__(self): |
|
147 | def __iter__(self): | |
118 | return iter(self._map) |
|
148 | return iter(self._map) | |
119 |
|
149 | |||
120 | def __contains__(self, key): |
|
150 | def __contains__(self, key): | |
121 | return key in self._map |
|
151 | return key in self._map | |
122 |
|
152 | |||
123 | def __getitem__(self, item): |
|
153 | def __getitem__(self, item): | |
124 | return self._map[item] |
|
154 | return self._map[item] | |
125 |
|
155 | |||
126 | ### disk interaction |
|
156 | ### disk interaction | |
127 |
|
157 | |||
128 | def _opendirstatefile(self): |
|
158 | def _opendirstatefile(self): | |
129 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) |
|
159 | fp, mode = txnutil.trypending(self._root, self._opener, self._filename) | |
130 | if self._pendingmode is not None and self._pendingmode != mode: |
|
160 | if self._pendingmode is not None and self._pendingmode != mode: | |
131 | fp.close() |
|
161 | fp.close() | |
132 | raise error.Abort( |
|
162 | raise error.Abort( | |
133 | _(b'working directory state may be changed parallelly') |
|
163 | _(b'working directory state may be changed parallelly') | |
134 | ) |
|
164 | ) | |
135 | self._pendingmode = mode |
|
165 | self._pendingmode = mode | |
136 | return fp |
|
166 | return fp | |
137 |
|
167 | |||
138 | def _readdirstatefile(self, size=-1): |
|
168 | def _readdirstatefile(self, size: int = -1) -> bytes: | |
139 | try: |
|
169 | try: | |
140 | with self._opendirstatefile() as fp: |
|
170 | with self._opendirstatefile() as fp: | |
141 | return fp.read(size) |
|
171 | return fp.read(size) | |
142 | except FileNotFoundError: |
|
172 | except FileNotFoundError: | |
143 | # File doesn't exist, so the current state is empty |
|
173 | # File doesn't exist, so the current state is empty | |
144 | return b'' |
|
174 | return b'' | |
145 |
|
175 | |||
146 | @property |
|
176 | @property | |
147 | def docket(self): |
|
177 | def docket(self) -> "docketmod.DirstateDocket": | |
148 | if not self._docket: |
|
178 | if not self._docket: | |
149 | if not self._use_dirstate_v2: |
|
179 | if not self._use_dirstate_v2: | |
150 | raise error.ProgrammingError( |
|
180 | raise error.ProgrammingError( | |
151 | b'dirstate only has a docket in v2 format' |
|
181 | b'dirstate only has a docket in v2 format' | |
152 | ) |
|
182 | ) | |
153 | self._set_identity() |
|
183 | self._set_identity() | |
154 | data = self._readdirstatefile() |
|
184 | data = self._readdirstatefile() | |
155 | if data == b'' or data.startswith(docketmod.V2_FORMAT_MARKER): |
|
185 | if data == b'' or data.startswith(docketmod.V2_FORMAT_MARKER): | |
156 | self._docket = docketmod.DirstateDocket.parse( |
|
186 | self._docket = docketmod.DirstateDocket.parse( | |
157 | data, self._nodeconstants |
|
187 | data, self._nodeconstants | |
158 | ) |
|
188 | ) | |
159 | else: |
|
189 | else: | |
160 | raise error.CorruptedDirstate(b"dirstate is not in v2 format") |
|
190 | raise error.CorruptedDirstate(b"dirstate is not in v2 format") | |
161 | return self._docket |
|
191 | return self._docket | |
162 |
|
192 | |||
163 | def _read_v2_data(self): |
|
193 | def _read_v2_data(self): | |
164 | data = None |
|
194 | data = None | |
165 | attempts = 0 |
|
195 | attempts = 0 | |
166 | while attempts < V2_MAX_READ_ATTEMPTS: |
|
196 | while attempts < V2_MAX_READ_ATTEMPTS: | |
167 | attempts += 1 |
|
197 | attempts += 1 | |
168 | try: |
|
198 | try: | |
169 | # TODO: use mmap when possible |
|
199 | # TODO: use mmap when possible | |
170 | data = self._opener.read(self.docket.data_filename()) |
|
200 | data = self._opener.read(self.docket.data_filename()) | |
171 | except FileNotFoundError: |
|
201 | except FileNotFoundError: | |
172 | # read race detected between docket and data file |
|
202 | # read race detected between docket and data file | |
173 | # reload the docket and retry |
|
203 | # reload the docket and retry | |
174 | self._docket = None |
|
204 | self._docket = None | |
175 | if data is None: |
|
205 | if data is None: | |
176 | assert attempts >= V2_MAX_READ_ATTEMPTS |
|
206 | assert attempts >= V2_MAX_READ_ATTEMPTS | |
177 | msg = b"dirstate read race happened %d times in a row" |
|
207 | msg = b"dirstate read race happened %d times in a row" | |
178 | msg %= attempts |
|
208 | msg %= attempts | |
179 | raise error.Abort(msg) |
|
209 | raise error.Abort(msg) | |
180 | return self._opener.read(self.docket.data_filename()) |
|
210 | return self._opener.read(self.docket.data_filename()) | |
181 |
|
211 | |||
182 | def write_v2_no_append(self, tr, st, meta, packed): |
|
212 | def write_v2_no_append(self, tr, st, meta, packed): | |
183 | try: |
|
213 | try: | |
184 | old_docket = self.docket |
|
214 | old_docket = self.docket | |
185 | except error.CorruptedDirstate: |
|
215 | except error.CorruptedDirstate: | |
186 | # This means we've identified a dirstate-v1 file on-disk when we |
|
216 | # This means we've identified a dirstate-v1 file on-disk when we | |
187 | # were expecting a dirstate-v2 docket. We've managed to recover |
|
217 | # were expecting a dirstate-v2 docket. We've managed to recover | |
188 | # from that unexpected situation, and now we want to write back a |
|
218 | # from that unexpected situation, and now we want to write back a | |
189 | # dirstate-v2 file to make the on-disk situation right again. |
|
219 | # dirstate-v2 file to make the on-disk situation right again. | |
190 | # |
|
220 | # | |
191 | # This shouldn't be triggered since `self.docket` is cached and |
|
221 | # This shouldn't be triggered since `self.docket` is cached and | |
192 | # we would have called parents() or read() first, but it's here |
|
222 | # we would have called parents() or read() first, but it's here | |
193 | # just in case. |
|
223 | # just in case. | |
194 | old_docket = None |
|
224 | old_docket = None | |
195 |
|
225 | |||
196 | new_docket = docketmod.DirstateDocket.with_new_uuid( |
|
226 | new_docket = docketmod.DirstateDocket.with_new_uuid( | |
197 | self.parents(), len(packed), meta |
|
227 | self.parents(), len(packed), meta | |
198 | ) |
|
228 | ) | |
199 | if old_docket is not None and old_docket.uuid == new_docket.uuid: |
|
229 | if old_docket is not None and old_docket.uuid == new_docket.uuid: | |
200 | raise error.ProgrammingError(b'dirstate docket name collision') |
|
230 | raise error.ProgrammingError(b'dirstate docket name collision') | |
201 | data_filename = new_docket.data_filename() |
|
231 | data_filename = new_docket.data_filename() | |
202 | self._opener.write(data_filename, packed) |
|
232 | self._opener.write(data_filename, packed) | |
203 | # tell the transaction that we are adding a new file |
|
233 | # tell the transaction that we are adding a new file | |
204 | if tr is not None: |
|
234 | if tr is not None: | |
205 | tr.addbackup(data_filename, location=b'plain') |
|
235 | tr.addbackup(data_filename, location=b'plain') | |
206 | # Write the new docket after the new data file has been |
|
236 | # Write the new docket after the new data file has been | |
207 | # written. Because `st` was opened with `atomictemp=True`, |
|
237 | # written. Because `st` was opened with `atomictemp=True`, | |
208 | # the actual `.hg/dirstate` file is only affected on close. |
|
238 | # the actual `.hg/dirstate` file is only affected on close. | |
209 | st.write(new_docket.serialize()) |
|
239 | st.write(new_docket.serialize()) | |
210 | st.close() |
|
240 | st.close() | |
211 | # Remove the old data file after the new docket pointing to |
|
241 | # Remove the old data file after the new docket pointing to | |
212 | # the new data file was written. |
|
242 | # the new data file was written. | |
213 | if old_docket is not None and old_docket.uuid: |
|
243 | if old_docket is not None and old_docket.uuid: | |
214 | data_filename = old_docket.data_filename() |
|
244 | data_filename = old_docket.data_filename() | |
215 | if tr is not None: |
|
245 | if tr is not None: | |
216 | tr.addbackup(data_filename, location=b'plain') |
|
246 | tr.addbackup(data_filename, location=b'plain') | |
217 | unlink = lambda _tr=None: self._opener.unlink(data_filename) |
|
247 | unlink = lambda _tr=None: self._opener.unlink(data_filename) | |
218 | if tr: |
|
248 | if tr: | |
219 | category = b"dirstate-v2-clean-" + old_docket.uuid |
|
249 | category = b"dirstate-v2-clean-" + old_docket.uuid | |
220 | tr.addpostclose(category, unlink) |
|
250 | tr.addpostclose(category, unlink) | |
221 | else: |
|
251 | else: | |
222 | unlink() |
|
252 | unlink() | |
223 | self._docket = new_docket |
|
253 | self._docket = new_docket | |
224 |
|
254 | |||
225 | ### reading/setting parents |
|
255 | ### reading/setting parents | |
226 |
|
256 | |||
227 | def parents(self): |
|
257 | def parents(self): | |
228 | if not self._parents: |
|
258 | if not self._parents: | |
229 | if self._use_dirstate_v2: |
|
259 | if self._use_dirstate_v2: | |
230 | try: |
|
260 | try: | |
231 | self.docket |
|
261 | self.docket | |
232 | except error.CorruptedDirstate as e: |
|
262 | except error.CorruptedDirstate as e: | |
233 | # fall back to dirstate-v1 if we fail to read v2 |
|
263 | # fall back to dirstate-v1 if we fail to read v2 | |
234 | self._v1_parents(e) |
|
264 | self._v1_parents(e) | |
235 | else: |
|
265 | else: | |
236 | self._parents = self.docket.parents |
|
266 | self._parents = self.docket.parents | |
237 | else: |
|
267 | else: | |
238 | self._v1_parents() |
|
268 | self._v1_parents() | |
239 |
|
269 | |||
240 | return self._parents |
|
270 | return self._parents | |
241 |
|
271 | |||
242 | def _v1_parents(self, from_v2_exception=None): |
|
272 | def _v1_parents(self, from_v2_exception=None): | |
243 | read_len = self._nodelen * 2 |
|
273 | read_len = self._nodelen * 2 | |
244 | st = self._readdirstatefile(read_len) |
|
274 | st = self._readdirstatefile(read_len) | |
245 | l = len(st) |
|
275 | l = len(st) | |
246 | if l == read_len: |
|
276 | if l == read_len: | |
247 | self._parents = ( |
|
277 | self._parents = ( | |
248 | st[: self._nodelen], |
|
278 | st[: self._nodelen], | |
249 | st[self._nodelen : 2 * self._nodelen], |
|
279 | st[self._nodelen : 2 * self._nodelen], | |
250 | ) |
|
280 | ) | |
251 | elif l == 0: |
|
281 | elif l == 0: | |
252 | self._parents = ( |
|
282 | self._parents = ( | |
253 | self._nodeconstants.nullid, |
|
283 | self._nodeconstants.nullid, | |
254 | self._nodeconstants.nullid, |
|
284 | self._nodeconstants.nullid, | |
255 | ) |
|
285 | ) | |
256 | else: |
|
286 | else: | |
257 | hint = None |
|
287 | hint = None | |
258 | if from_v2_exception is not None: |
|
288 | if from_v2_exception is not None: | |
259 | hint = _(b"falling back to dirstate-v1 from v2 also failed") |
|
289 | hint = _(b"falling back to dirstate-v1 from v2 also failed") | |
260 | raise error.Abort( |
|
290 | raise error.Abort( | |
261 | _(b'working directory state appears damaged!'), hint |
|
291 | _(b'working directory state appears damaged!'), hint | |
262 | ) |
|
292 | ) | |
263 |
|
293 | |||
264 |
|
294 | |||
265 | class dirstatemap(_dirstatemapcommon): |
|
295 | class dirstatemap(_dirstatemapcommon): | |
266 | """Map encapsulating the dirstate's contents. |
|
296 | """Map encapsulating the dirstate's contents. | |
267 |
|
297 | |||
268 | The dirstate contains the following state: |
|
298 | The dirstate contains the following state: | |
269 |
|
299 | |||
270 | - `identity` is the identity of the dirstate file, which can be used to |
|
300 | - `identity` is the identity of the dirstate file, which can be used to | |
271 | detect when changes have occurred to the dirstate file. |
|
301 | detect when changes have occurred to the dirstate file. | |
272 |
|
302 | |||
273 | - `parents` is a pair containing the parents of the working copy. The |
|
303 | - `parents` is a pair containing the parents of the working copy. The | |
274 | parents are updated by calling `setparents`. |
|
304 | parents are updated by calling `setparents`. | |
275 |
|
305 | |||
276 | - the state map maps filenames to tuples of (state, mode, size, mtime), |
|
306 | - the state map maps filenames to tuples of (state, mode, size, mtime), | |
277 | where state is a single character representing 'normal', 'added', |
|
307 | where state is a single character representing 'normal', 'added', | |
278 | 'removed', or 'merged'. It is read by treating the dirstate as a |
|
308 | 'removed', or 'merged'. It is read by treating the dirstate as a | |
279 | dict. File state is updated by calling various methods (see each |
|
309 | dict. File state is updated by calling various methods (see each | |
280 | documentation for details): |
|
310 | documentation for details): | |
281 |
|
311 | |||
282 | - `reset_state`, |
|
312 | - `reset_state`, | |
283 | - `set_tracked` |
|
313 | - `set_tracked` | |
284 | - `set_untracked` |
|
314 | - `set_untracked` | |
285 | - `set_clean` |
|
315 | - `set_clean` | |
286 | - `set_possibly_dirty` |
|
316 | - `set_possibly_dirty` | |
287 |
|
317 | |||
288 | - `copymap` maps destination filenames to their source filename. |
|
318 | - `copymap` maps destination filenames to their source filename. | |
289 |
|
319 | |||
290 | The dirstate also provides the following views onto the state: |
|
320 | The dirstate also provides the following views onto the state: | |
291 |
|
321 | |||
292 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized |
|
322 | - `filefoldmap` is a dict mapping normalized filenames to the denormalized | |
293 | form that they appear as in the dirstate. |
|
323 | form that they appear as in the dirstate. | |
294 |
|
324 | |||
295 | - `dirfoldmap` is a dict mapping normalized directory names to the |
|
325 | - `dirfoldmap` is a dict mapping normalized directory names to the | |
296 | denormalized form that they appear as in the dirstate. |
|
326 | denormalized form that they appear as in the dirstate. | |
297 | """ |
|
327 | """ | |
298 |
|
328 | |||
299 | ### Core data storage and access |
|
329 | ### Core data storage and access | |
300 |
|
330 | |||
301 | @propertycache |
|
331 | @propertycache | |
302 | def _map(self): |
|
332 | def _map(self): | |
303 | self._map = {} |
|
333 | self._map = {} | |
304 | self.read() |
|
334 | self.read() | |
305 | return self._map |
|
335 | return self._map | |
306 |
|
336 | |||
307 | @propertycache |
|
337 | @propertycache | |
308 | def copymap(self): |
|
338 | def copymap(self): | |
309 | self.copymap = {} |
|
339 | self.copymap = {} | |
310 | self._map |
|
340 | self._map | |
311 | return self.copymap |
|
341 | return self.copymap | |
312 |
|
342 | |||
313 | def clear(self): |
|
343 | def clear(self): | |
314 | self._map.clear() |
|
344 | self._map.clear() | |
315 | self.copymap.clear() |
|
345 | self.copymap.clear() | |
316 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) |
|
346 | self.setparents(self._nodeconstants.nullid, self._nodeconstants.nullid) | |
317 | util.clearcachedproperty(self, b"_dirs") |
|
347 | util.clearcachedproperty(self, b"_dirs") | |
318 | util.clearcachedproperty(self, b"_alldirs") |
|
348 | util.clearcachedproperty(self, b"_alldirs") | |
319 | util.clearcachedproperty(self, b"filefoldmap") |
|
349 | util.clearcachedproperty(self, b"filefoldmap") | |
320 | util.clearcachedproperty(self, b"dirfoldmap") |
|
350 | util.clearcachedproperty(self, b"dirfoldmap") | |
321 |
|
351 | |||
322 | def items(self): |
|
352 | def items(self): | |
323 | return self._map.items() |
|
353 | return self._map.items() | |
324 |
|
354 | |||
325 | # forward for python2,3 compat |
|
355 | # forward for python2,3 compat | |
326 | iteritems = items |
|
356 | iteritems = items | |
327 |
|
357 | |||
328 | def debug_iter(self, all): |
|
358 | def debug_iter(self, all): | |
329 | """ |
|
359 | """ | |
330 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
360 | Return an iterator of (filename, state, mode, size, mtime) tuples | |
331 |
|
361 | |||
332 | `all` is unused when Rust is not enabled |
|
362 | `all` is unused when Rust is not enabled | |
333 | """ |
|
363 | """ | |
334 | for filename, item in self.items(): |
|
364 | for filename, item in self.items(): | |
335 | yield (filename, item.state, item.mode, item.size, item.mtime) |
|
365 | yield (filename, item.state, item.mode, item.size, item.mtime) | |
336 |
|
366 | |||
337 | def keys(self): |
|
367 | def keys(self): | |
338 | return self._map.keys() |
|
368 | return self._map.keys() | |
339 |
|
369 | |||
340 | ### reading/setting parents |
|
370 | ### reading/setting parents | |
341 |
|
371 | |||
342 | def setparents(self, p1, p2, fold_p2=False): |
|
372 | def setparents(self, p1, p2, fold_p2=False): | |
343 | self._parents = (p1, p2) |
|
373 | self._parents = (p1, p2) | |
344 | self._dirtyparents = True |
|
374 | self._dirtyparents = True | |
345 | copies = {} |
|
375 | copies = {} | |
346 | if fold_p2: |
|
376 | if fold_p2: | |
347 | for f, s in self._map.items(): |
|
377 | for f, s in self._map.items(): | |
348 | # Discard "merged" markers when moving away from a merge state |
|
378 | # Discard "merged" markers when moving away from a merge state | |
349 | if s.p2_info: |
|
379 | if s.p2_info: | |
350 | source = self.copymap.pop(f, None) |
|
380 | source = self.copymap.pop(f, None) | |
351 | if source: |
|
381 | if source: | |
352 | copies[f] = source |
|
382 | copies[f] = source | |
353 | s.drop_merge_data() |
|
383 | s.drop_merge_data() | |
354 | return copies |
|
384 | return copies | |
355 |
|
385 | |||
356 | ### disk interaction |
|
386 | ### disk interaction | |
357 |
|
387 | |||
358 | def read(self): |
|
388 | def read(self): | |
359 | testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file') |
|
389 | testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file') | |
360 | if self._use_dirstate_v2: |
|
390 | if self._use_dirstate_v2: | |
361 | try: |
|
391 | try: | |
362 | self.docket |
|
392 | self.docket | |
363 | except error.CorruptedDirstate: |
|
393 | except error.CorruptedDirstate: | |
364 | # fall back to dirstate-v1 if we fail to read v2 |
|
394 | # fall back to dirstate-v1 if we fail to read v2 | |
365 | self._set_identity() |
|
395 | self._set_identity() | |
366 | st = self._readdirstatefile() |
|
396 | st = self._readdirstatefile() | |
367 | else: |
|
397 | else: | |
368 | if not self.docket.uuid: |
|
398 | if not self.docket.uuid: | |
369 | return |
|
399 | return | |
370 | testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file') |
|
400 | testing.wait_on_cfg(self._ui, b'dirstate.post-docket-read-file') | |
371 | st = self._read_v2_data() |
|
401 | st = self._read_v2_data() | |
372 | else: |
|
402 | else: | |
373 | self._set_identity() |
|
403 | self._set_identity() | |
374 | st = self._readdirstatefile() |
|
404 | st = self._readdirstatefile() | |
375 |
|
405 | |||
376 | if not st: |
|
406 | if not st: | |
377 | return |
|
407 | return | |
378 |
|
408 | |||
379 | # TODO: adjust this estimate for dirstate-v2 |
|
409 | # TODO: adjust this estimate for dirstate-v2 | |
380 | if hasattr(parsers, 'dict_new_presized'): |
|
410 | if hasattr(parsers, 'dict_new_presized'): | |
381 | # Make an estimate of the number of files in the dirstate based on |
|
411 | # Make an estimate of the number of files in the dirstate based on | |
382 | # its size. This trades wasting some memory for avoiding costly |
|
412 | # its size. This trades wasting some memory for avoiding costly | |
383 | # resizes. Each entry have a prefix of 17 bytes followed by one or |
|
413 | # resizes. Each entry have a prefix of 17 bytes followed by one or | |
384 | # two path names. Studies on various large-scale real-world repositories |
|
414 | # two path names. Studies on various large-scale real-world repositories | |
385 | # found 54 bytes a reasonable upper limit for the average path names. |
|
415 | # found 54 bytes a reasonable upper limit for the average path names. | |
386 | # Copy entries are ignored for the sake of this estimate. |
|
416 | # Copy entries are ignored for the sake of this estimate. | |
387 | self._map = parsers.dict_new_presized(len(st) // 71) |
|
417 | self._map = parsers.dict_new_presized(len(st) // 71) | |
388 |
|
418 | |||
389 | # Python's garbage collector triggers a GC each time a certain number |
|
419 | # Python's garbage collector triggers a GC each time a certain number | |
390 | # of container objects (the number being defined by |
|
420 | # of container objects (the number being defined by | |
391 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple |
|
421 | # gc.get_threshold()) are allocated. parse_dirstate creates a tuple | |
392 | # for each file in the dirstate. The C version then immediately marks |
|
422 | # for each file in the dirstate. The C version then immediately marks | |
393 | # them as not to be tracked by the collector. However, this has no |
|
423 | # them as not to be tracked by the collector. However, this has no | |
394 | # effect on when GCs are triggered, only on what objects the GC looks |
|
424 | # effect on when GCs are triggered, only on what objects the GC looks | |
395 | # into. This means that O(number of files) GCs are unavoidable. |
|
425 | # into. This means that O(number of files) GCs are unavoidable. | |
396 | # Depending on when in the process's lifetime the dirstate is parsed, |
|
426 | # Depending on when in the process's lifetime the dirstate is parsed, | |
397 | # this can get very expensive. As a workaround, disable GC while |
|
427 | # this can get very expensive. As a workaround, disable GC while | |
398 | # parsing the dirstate. |
|
428 | # parsing the dirstate. | |
399 | # |
|
429 | # | |
400 | # (we cannot decorate the function directly since it is in a C module) |
|
430 | # (we cannot decorate the function directly since it is in a C module) | |
401 | if self._use_dirstate_v2: |
|
431 | if self._use_dirstate_v2: | |
402 | try: |
|
432 | try: | |
403 | self.docket |
|
433 | self.docket | |
404 | except error.CorruptedDirstate: |
|
434 | except error.CorruptedDirstate: | |
405 | # fall back to dirstate-v1 if we fail to parse v2 |
|
435 | # fall back to dirstate-v1 if we fail to parse v2 | |
406 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
436 | parse_dirstate = util.nogc(parsers.parse_dirstate) | |
407 | p = parse_dirstate(self._map, self.copymap, st) |
|
437 | p = parse_dirstate(self._map, self.copymap, st) | |
408 | else: |
|
438 | else: | |
409 | p = self.docket.parents |
|
439 | p = self.docket.parents | |
410 | meta = self.docket.tree_metadata |
|
440 | meta = self.docket.tree_metadata | |
411 | parse_dirstate = util.nogc(v2.parse_dirstate) |
|
441 | parse_dirstate = util.nogc(v2.parse_dirstate) | |
412 | parse_dirstate(self._map, self.copymap, st, meta) |
|
442 | parse_dirstate(self._map, self.copymap, st, meta) | |
413 | else: |
|
443 | else: | |
414 | parse_dirstate = util.nogc(parsers.parse_dirstate) |
|
444 | parse_dirstate = util.nogc(parsers.parse_dirstate) | |
415 | p = parse_dirstate(self._map, self.copymap, st) |
|
445 | p = parse_dirstate(self._map, self.copymap, st) | |
416 | if not self._dirtyparents: |
|
446 | if not self._dirtyparents: | |
417 | self.setparents(*p) |
|
447 | self.setparents(*p) | |
418 |
|
448 | |||
419 | # Avoid excess attribute lookups by fast pathing certain checks |
|
449 | # Avoid excess attribute lookups by fast pathing certain checks | |
420 | self.__contains__ = self._map.__contains__ |
|
450 | self.__contains__ = self._map.__contains__ | |
421 | self.__getitem__ = self._map.__getitem__ |
|
451 | self.__getitem__ = self._map.__getitem__ | |
422 | self.get = self._map.get |
|
452 | self.get = self._map.get | |
423 |
|
453 | |||
424 | def write(self, tr, st): |
|
454 | def write(self, tr, st): | |
425 | if self._use_dirstate_v2: |
|
455 | if self._use_dirstate_v2: | |
426 | packed, meta = v2.pack_dirstate(self._map, self.copymap) |
|
456 | packed, meta = v2.pack_dirstate(self._map, self.copymap) | |
427 | self.write_v2_no_append(tr, st, meta, packed) |
|
457 | self.write_v2_no_append(tr, st, meta, packed) | |
428 | else: |
|
458 | else: | |
429 | packed = parsers.pack_dirstate( |
|
459 | packed = parsers.pack_dirstate( | |
430 | self._map, self.copymap, self.parents() |
|
460 | self._map, self.copymap, self.parents() | |
431 | ) |
|
461 | ) | |
432 | st.write(packed) |
|
462 | st.write(packed) | |
433 | st.close() |
|
463 | st.close() | |
434 | self._dirtyparents = False |
|
464 | self._dirtyparents = False | |
435 |
|
465 | |||
436 | @propertycache |
|
466 | @propertycache | |
437 | def identity(self): |
|
467 | def identity(self): | |
438 | self._map |
|
468 | self._map | |
439 | return self.identity |
|
469 | return self.identity | |
440 |
|
470 | |||
441 | ### code related to maintaining and accessing "extra" property |
|
471 | ### code related to maintaining and accessing "extra" property | |
442 | # (e.g. "has_dir") |
|
472 | # (e.g. "has_dir") | |
443 |
|
473 | |||
444 | def _dirs_incr(self, filename, old_entry=None): |
|
474 | def _dirs_incr(self, filename, old_entry=None): | |
445 | """increment the dirstate counter if applicable""" |
|
475 | """increment the dirstate counter if applicable""" | |
446 | if ( |
|
476 | if ( | |
447 | old_entry is None or old_entry.removed |
|
477 | old_entry is None or old_entry.removed | |
448 | ) and "_dirs" in self.__dict__: |
|
478 | ) and "_dirs" in self.__dict__: | |
449 | self._dirs.addpath(filename) |
|
479 | self._dirs.addpath(filename) | |
450 | if old_entry is None and "_alldirs" in self.__dict__: |
|
480 | if old_entry is None and "_alldirs" in self.__dict__: | |
451 | self._alldirs.addpath(filename) |
|
481 | self._alldirs.addpath(filename) | |
452 |
|
482 | |||
453 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): |
|
483 | def _dirs_decr(self, filename, old_entry=None, remove_variant=False): | |
454 | """decrement the dirstate counter if applicable""" |
|
484 | """decrement the dirstate counter if applicable""" | |
455 | if old_entry is not None: |
|
485 | if old_entry is not None: | |
456 | if "_dirs" in self.__dict__ and not old_entry.removed: |
|
486 | if "_dirs" in self.__dict__ and not old_entry.removed: | |
457 | self._dirs.delpath(filename) |
|
487 | self._dirs.delpath(filename) | |
458 | if "_alldirs" in self.__dict__ and not remove_variant: |
|
488 | if "_alldirs" in self.__dict__ and not remove_variant: | |
459 | self._alldirs.delpath(filename) |
|
489 | self._alldirs.delpath(filename) | |
460 | elif remove_variant and "_alldirs" in self.__dict__: |
|
490 | elif remove_variant and "_alldirs" in self.__dict__: | |
461 | self._alldirs.addpath(filename) |
|
491 | self._alldirs.addpath(filename) | |
462 | if "filefoldmap" in self.__dict__: |
|
492 | if "filefoldmap" in self.__dict__: | |
463 | normed = util.normcase(filename) |
|
493 | normed = util.normcase(filename) | |
464 | self.filefoldmap.pop(normed, None) |
|
494 | self.filefoldmap.pop(normed, None) | |
465 |
|
495 | |||
466 | @propertycache |
|
496 | @propertycache | |
467 | def filefoldmap(self): |
|
497 | def filefoldmap(self): | |
468 | """Returns a dictionary mapping normalized case paths to their |
|
498 | """Returns a dictionary mapping normalized case paths to their | |
469 | non-normalized versions. |
|
499 | non-normalized versions. | |
470 | """ |
|
500 | """ | |
471 | try: |
|
501 | try: | |
472 | makefilefoldmap = parsers.make_file_foldmap |
|
502 | makefilefoldmap = parsers.make_file_foldmap | |
473 | except AttributeError: |
|
503 | except AttributeError: | |
474 | pass |
|
504 | pass | |
475 | else: |
|
505 | else: | |
476 | return makefilefoldmap( |
|
506 | return makefilefoldmap( | |
477 | self._map, util.normcasespec, util.normcasefallback |
|
507 | self._map, util.normcasespec, util.normcasefallback | |
478 | ) |
|
508 | ) | |
479 |
|
509 | |||
480 | f = {} |
|
510 | f = {} | |
481 | normcase = util.normcase |
|
511 | normcase = util.normcase | |
482 | for name, s in self._map.items(): |
|
512 | for name, s in self._map.items(): | |
483 | if not s.removed: |
|
513 | if not s.removed: | |
484 | f[normcase(name)] = name |
|
514 | f[normcase(name)] = name | |
485 | f[b'.'] = b'.' # prevents useless util.fspath() invocation |
|
515 | f[b'.'] = b'.' # prevents useless util.fspath() invocation | |
486 | return f |
|
516 | return f | |
487 |
|
517 | |||
488 | @propertycache |
|
518 | @propertycache | |
489 | def dirfoldmap(self): |
|
519 | def dirfoldmap(self): | |
490 | f = {} |
|
520 | f = {} | |
491 | normcase = util.normcase |
|
521 | normcase = util.normcase | |
492 | for name in self._dirs: |
|
522 | for name in self._dirs: | |
493 | f[normcase(name)] = name |
|
523 | f[normcase(name)] = name | |
494 | return f |
|
524 | return f | |
495 |
|
525 | |||
496 | def hastrackeddir(self, d): |
|
526 | def hastrackeddir(self, d): | |
497 | """ |
|
527 | """ | |
498 | Returns True if the dirstate contains a tracked (not removed) file |
|
528 | Returns True if the dirstate contains a tracked (not removed) file | |
499 | in this directory. |
|
529 | in this directory. | |
500 | """ |
|
530 | """ | |
501 | return d in self._dirs |
|
531 | return d in self._dirs | |
502 |
|
532 | |||
503 | def hasdir(self, d): |
|
533 | def hasdir(self, d): | |
504 | """ |
|
534 | """ | |
505 | Returns True if the dirstate contains a file (tracked or removed) |
|
535 | Returns True if the dirstate contains a file (tracked or removed) | |
506 | in this directory. |
|
536 | in this directory. | |
507 | """ |
|
537 | """ | |
508 | return d in self._alldirs |
|
538 | return d in self._alldirs | |
509 |
|
539 | |||
510 | @propertycache |
|
540 | @propertycache | |
511 | def _dirs(self): |
|
541 | def _dirs(self): | |
512 | return pathutil.dirs(self._map, only_tracked=True) |
|
542 | return pathutil.dirs(self._map, only_tracked=True) | |
513 |
|
543 | |||
514 | @propertycache |
|
544 | @propertycache | |
515 | def _alldirs(self): |
|
545 | def _alldirs(self): | |
516 | return pathutil.dirs(self._map) |
|
546 | return pathutil.dirs(self._map) | |
517 |
|
547 | |||
518 | ### code related to manipulation of entries and copy-sources |
|
548 | ### code related to manipulation of entries and copy-sources | |
519 |
|
549 | |||
520 | def reset_state( |
|
550 | def reset_state( | |
521 | self, |
|
551 | self, | |
522 | filename, |
|
552 | filename, | |
523 | wc_tracked=False, |
|
553 | wc_tracked=False, | |
524 | p1_tracked=False, |
|
554 | p1_tracked=False, | |
525 | p2_info=False, |
|
555 | p2_info=False, | |
526 | has_meaningful_mtime=True, |
|
556 | has_meaningful_mtime=True, | |
527 | parentfiledata=None, |
|
557 | parentfiledata=None, | |
528 | ): |
|
558 | ): | |
529 | """Set a entry to a given state, diregarding all previous state |
|
559 | """Set a entry to a given state, diregarding all previous state | |
530 |
|
560 | |||
531 | This is to be used by the part of the dirstate API dedicated to |
|
561 | This is to be used by the part of the dirstate API dedicated to | |
532 | adjusting the dirstate after a update/merge. |
|
562 | adjusting the dirstate after a update/merge. | |
533 |
|
563 | |||
534 | note: calling this might result to no entry existing at all if the |
|
564 | note: calling this might result to no entry existing at all if the | |
535 | dirstate map does not see any point at having one for this file |
|
565 | dirstate map does not see any point at having one for this file | |
536 | anymore. |
|
566 | anymore. | |
537 | """ |
|
567 | """ | |
538 | # copy information are now outdated |
|
568 | # copy information are now outdated | |
539 | # (maybe new information should be in directly passed to this function) |
|
569 | # (maybe new information should be in directly passed to this function) | |
540 | self.copymap.pop(filename, None) |
|
570 | self.copymap.pop(filename, None) | |
541 |
|
571 | |||
542 | if not (p1_tracked or p2_info or wc_tracked): |
|
572 | if not (p1_tracked or p2_info or wc_tracked): | |
543 | old_entry = self._map.get(filename) |
|
573 | old_entry = self._map.get(filename) | |
544 | self._drop_entry(filename) |
|
574 | self._drop_entry(filename) | |
545 | self._dirs_decr(filename, old_entry=old_entry) |
|
575 | self._dirs_decr(filename, old_entry=old_entry) | |
546 | return |
|
576 | return | |
547 |
|
577 | |||
548 | old_entry = self._map.get(filename) |
|
578 | old_entry = self._map.get(filename) | |
549 | self._dirs_incr(filename, old_entry) |
|
579 | self._dirs_incr(filename, old_entry) | |
550 | entry = DirstateItem( |
|
580 | entry = DirstateItem( | |
551 | wc_tracked=wc_tracked, |
|
581 | wc_tracked=wc_tracked, | |
552 | p1_tracked=p1_tracked, |
|
582 | p1_tracked=p1_tracked, | |
553 | p2_info=p2_info, |
|
583 | p2_info=p2_info, | |
554 | has_meaningful_mtime=has_meaningful_mtime, |
|
584 | has_meaningful_mtime=has_meaningful_mtime, | |
555 | parentfiledata=parentfiledata, |
|
585 | parentfiledata=parentfiledata, | |
556 | ) |
|
586 | ) | |
557 | self._map[filename] = entry |
|
587 | self._map[filename] = entry | |
558 |
|
588 | |||
559 | def set_tracked(self, filename): |
|
589 | def set_tracked(self, filename): | |
560 | new = False |
|
590 | new = False | |
561 | entry = self.get(filename) |
|
591 | entry = self.get(filename) | |
562 | if entry is None: |
|
592 | if entry is None: | |
563 | self._dirs_incr(filename) |
|
593 | self._dirs_incr(filename) | |
564 | entry = DirstateItem( |
|
594 | entry = DirstateItem( | |
565 | wc_tracked=True, |
|
595 | wc_tracked=True, | |
566 | ) |
|
596 | ) | |
567 |
|
597 | |||
568 | self._map[filename] = entry |
|
598 | self._map[filename] = entry | |
569 | new = True |
|
599 | new = True | |
570 | elif not entry.tracked: |
|
600 | elif not entry.tracked: | |
571 | self._dirs_incr(filename, entry) |
|
601 | self._dirs_incr(filename, entry) | |
572 | entry.set_tracked() |
|
602 | entry.set_tracked() | |
573 | self._refresh_entry(filename, entry) |
|
603 | self._refresh_entry(filename, entry) | |
574 | new = True |
|
604 | new = True | |
575 | else: |
|
605 | else: | |
576 | # XXX This is probably overkill for more case, but we need this to |
|
606 | # XXX This is probably overkill for more case, but we need this to | |
577 | # fully replace the `normallookup` call with `set_tracked` one. |
|
607 | # fully replace the `normallookup` call with `set_tracked` one. | |
578 | # Consider smoothing this in the future. |
|
608 | # Consider smoothing this in the future. | |
579 | entry.set_possibly_dirty() |
|
609 | entry.set_possibly_dirty() | |
580 | self._refresh_entry(filename, entry) |
|
610 | self._refresh_entry(filename, entry) | |
581 | return new |
|
611 | return new | |
582 |
|
612 | |||
583 | def set_untracked(self, f): |
|
613 | def set_untracked(self, f): | |
584 | """Mark a file as no longer tracked in the dirstate map""" |
|
614 | """Mark a file as no longer tracked in the dirstate map""" | |
585 | entry = self.get(f) |
|
615 | entry = self.get(f) | |
586 | if entry is None: |
|
616 | if entry is None: | |
587 | return False |
|
617 | return False | |
588 | else: |
|
618 | else: | |
589 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) |
|
619 | self._dirs_decr(f, old_entry=entry, remove_variant=not entry.added) | |
590 | if not entry.p2_info: |
|
620 | if not entry.p2_info: | |
591 | self.copymap.pop(f, None) |
|
621 | self.copymap.pop(f, None) | |
592 | entry.set_untracked() |
|
622 | entry.set_untracked() | |
593 | self._refresh_entry(f, entry) |
|
623 | self._refresh_entry(f, entry) | |
594 | return True |
|
624 | return True | |
595 |
|
625 | |||
596 | def set_clean(self, filename, mode, size, mtime): |
|
626 | def set_clean(self, filename, mode, size, mtime): | |
597 | """mark a file as back to a clean state""" |
|
627 | """mark a file as back to a clean state""" | |
598 | entry = self[filename] |
|
628 | entry = self[filename] | |
599 | size = size & rangemask |
|
629 | size = size & rangemask | |
600 | entry.set_clean(mode, size, mtime) |
|
630 | entry.set_clean(mode, size, mtime) | |
601 | self._refresh_entry(filename, entry) |
|
631 | self._refresh_entry(filename, entry) | |
602 | self.copymap.pop(filename, None) |
|
632 | self.copymap.pop(filename, None) | |
603 |
|
633 | |||
604 | def set_possibly_dirty(self, filename): |
|
634 | def set_possibly_dirty(self, filename): | |
605 | """record that the current state of the file on disk is unknown""" |
|
635 | """record that the current state of the file on disk is unknown""" | |
606 | entry = self[filename] |
|
636 | entry = self[filename] | |
607 | entry.set_possibly_dirty() |
|
637 | entry.set_possibly_dirty() | |
608 | self._refresh_entry(filename, entry) |
|
638 | self._refresh_entry(filename, entry) | |
609 |
|
639 | |||
610 | def _refresh_entry(self, f, entry): |
|
640 | def _refresh_entry(self, f, entry): | |
611 | """record updated state of an entry""" |
|
641 | """record updated state of an entry""" | |
612 | if not entry.any_tracked: |
|
642 | if not entry.any_tracked: | |
613 | self._map.pop(f, None) |
|
643 | self._map.pop(f, None) | |
614 |
|
644 | |||
615 | def _drop_entry(self, f): |
|
645 | def _drop_entry(self, f): | |
616 | """remove any entry for file f |
|
646 | """remove any entry for file f | |
617 |
|
647 | |||
618 | This should also drop associated copy information |
|
648 | This should also drop associated copy information | |
619 |
|
649 | |||
620 | The fact we actually need to drop it is the responsability of the caller |
|
650 | The fact we actually need to drop it is the responsability of the caller | |
621 | """ |
|
651 | """ | |
622 | self._map.pop(f, None) |
|
652 | self._map.pop(f, None) | |
623 | self.copymap.pop(f, None) |
|
653 | self.copymap.pop(f, None) | |
624 |
|
654 | |||
625 |
|
655 | |||
626 | if rustmod is not None: |
|
656 | if rustmod is not None: | |
627 |
|
657 | |||
628 | class dirstatemap(_dirstatemapcommon): |
|
658 | class dirstatemap(_dirstatemapcommon): | |
629 | ### Core data storage and access |
|
659 | ### Core data storage and access | |
630 |
|
660 | |||
631 | @propertycache |
|
661 | @propertycache | |
632 | def _map(self): |
|
662 | def _map(self): | |
633 | """ |
|
663 | """ | |
634 | Fills the Dirstatemap when called. |
|
664 | Fills the Dirstatemap when called. | |
635 | """ |
|
665 | """ | |
636 | # ignore HG_PENDING because identity is used only for writing |
|
666 | # ignore HG_PENDING because identity is used only for writing | |
637 | self._set_identity() |
|
667 | self._set_identity() | |
638 |
|
668 | |||
639 | testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file') |
|
669 | testing.wait_on_cfg(self._ui, b'dirstate.pre-read-file') | |
640 | if self._use_dirstate_v2: |
|
670 | if self._use_dirstate_v2: | |
641 | try: |
|
671 | try: | |
642 | self.docket |
|
672 | self.docket | |
643 | except error.CorruptedDirstate as e: |
|
673 | except error.CorruptedDirstate as e: | |
644 | # fall back to dirstate-v1 if we fail to read v2 |
|
674 | # fall back to dirstate-v1 if we fail to read v2 | |
645 | parents = self._v1_map(e) |
|
675 | parents = self._v1_map(e) | |
646 | else: |
|
676 | else: | |
647 | parents = self.docket.parents |
|
677 | parents = self.docket.parents | |
648 | inode = ( |
|
678 | inode = ( | |
649 | self.identity.stat.st_ino |
|
679 | self.identity.stat.st_ino | |
650 | if self.identity is not None |
|
680 | if self.identity is not None | |
651 | and self.identity.stat is not None |
|
681 | and self.identity.stat is not None | |
652 | else None |
|
682 | else None | |
653 | ) |
|
683 | ) | |
654 | testing.wait_on_cfg( |
|
684 | testing.wait_on_cfg( | |
655 | self._ui, b'dirstate.post-docket-read-file' |
|
685 | self._ui, b'dirstate.post-docket-read-file' | |
656 | ) |
|
686 | ) | |
657 | if not self.docket.uuid: |
|
687 | if not self.docket.uuid: | |
658 | data = b'' |
|
688 | data = b'' | |
659 | self._map = rustmod.DirstateMap.new_empty() |
|
689 | self._map = rustmod.DirstateMap.new_empty() | |
660 | else: |
|
690 | else: | |
661 | data = self._read_v2_data() |
|
691 | data = self._read_v2_data() | |
662 | self._map = rustmod.DirstateMap.new_v2( |
|
692 | self._map = rustmod.DirstateMap.new_v2( | |
663 | data, |
|
693 | data, | |
664 | self.docket.data_size, |
|
694 | self.docket.data_size, | |
665 | self.docket.tree_metadata, |
|
695 | self.docket.tree_metadata, | |
666 | self.docket.uuid, |
|
696 | self.docket.uuid, | |
667 | inode, |
|
697 | inode, | |
668 | ) |
|
698 | ) | |
669 | parents = self.docket.parents |
|
699 | parents = self.docket.parents | |
670 | else: |
|
700 | else: | |
671 | parents = self._v1_map() |
|
701 | parents = self._v1_map() | |
672 |
|
702 | |||
673 | if parents and not self._dirtyparents: |
|
703 | if parents and not self._dirtyparents: | |
674 | self.setparents(*parents) |
|
704 | self.setparents(*parents) | |
675 |
|
705 | |||
676 | self.__contains__ = self._map.__contains__ |
|
706 | self.__contains__ = self._map.__contains__ | |
677 | self.__getitem__ = self._map.__getitem__ |
|
707 | self.__getitem__ = self._map.__getitem__ | |
678 | self.get = self._map.get |
|
708 | self.get = self._map.get | |
679 | return self._map |
|
709 | return self._map | |
680 |
|
710 | |||
681 | def _v1_map(self, from_v2_exception=None): |
|
711 | def _v1_map(self, from_v2_exception=None): | |
682 | self._set_identity() |
|
712 | self._set_identity() | |
683 | inode = ( |
|
713 | inode = ( | |
684 | self.identity.stat.st_ino |
|
714 | self.identity.stat.st_ino | |
685 | if self.identity is not None and self.identity.stat is not None |
|
715 | if self.identity is not None and self.identity.stat is not None | |
686 | else None |
|
716 | else None | |
687 | ) |
|
717 | ) | |
688 | try: |
|
718 | try: | |
689 | self._map, parents = rustmod.DirstateMap.new_v1( |
|
719 | self._map, parents = rustmod.DirstateMap.new_v1( | |
690 | self._readdirstatefile(), inode |
|
720 | self._readdirstatefile(), inode | |
691 | ) |
|
721 | ) | |
692 | except OSError as e: |
|
722 | except OSError as e: | |
693 | if from_v2_exception is not None: |
|
723 | if from_v2_exception is not None: | |
694 | raise e from from_v2_exception |
|
724 | raise e from from_v2_exception | |
695 | raise |
|
725 | raise | |
696 | return parents |
|
726 | return parents | |
697 |
|
727 | |||
698 | @property |
|
728 | @property | |
699 | def copymap(self): |
|
729 | def copymap(self): | |
700 | return self._map.copymap() |
|
730 | return self._map.copymap() | |
701 |
|
731 | |||
702 | def debug_iter(self, all): |
|
732 | def debug_iter(self, all): | |
703 | """ |
|
733 | """ | |
704 | Return an iterator of (filename, state, mode, size, mtime) tuples |
|
734 | Return an iterator of (filename, state, mode, size, mtime) tuples | |
705 |
|
735 | |||
706 | `all`: also include with `state == b' '` dirstate tree nodes that |
|
736 | `all`: also include with `state == b' '` dirstate tree nodes that | |
707 | don't have an associated `DirstateItem`. |
|
737 | don't have an associated `DirstateItem`. | |
708 |
|
738 | |||
709 | """ |
|
739 | """ | |
710 | return self._map.debug_iter(all) |
|
740 | return self._map.debug_iter(all) | |
711 |
|
741 | |||
712 | def clear(self): |
|
742 | def clear(self): | |
713 | self._map.clear() |
|
743 | self._map.clear() | |
714 | self.setparents( |
|
744 | self.setparents( | |
715 | self._nodeconstants.nullid, self._nodeconstants.nullid |
|
745 | self._nodeconstants.nullid, self._nodeconstants.nullid | |
716 | ) |
|
746 | ) | |
717 | util.clearcachedproperty(self, b"_dirs") |
|
747 | util.clearcachedproperty(self, b"_dirs") | |
718 | util.clearcachedproperty(self, b"_alldirs") |
|
748 | util.clearcachedproperty(self, b"_alldirs") | |
719 | util.clearcachedproperty(self, b"dirfoldmap") |
|
749 | util.clearcachedproperty(self, b"dirfoldmap") | |
720 |
|
750 | |||
721 | def items(self): |
|
751 | def items(self): | |
722 | return self._map.items() |
|
752 | return self._map.items() | |
723 |
|
753 | |||
724 | # forward for python2,3 compat |
|
754 | # forward for python2,3 compat | |
725 | iteritems = items |
|
755 | iteritems = items | |
726 |
|
756 | |||
727 | def keys(self): |
|
757 | def keys(self): | |
728 | return iter(self._map) |
|
758 | return iter(self._map) | |
729 |
|
759 | |||
730 | ### reading/setting parents |
|
760 | ### reading/setting parents | |
731 |
|
761 | |||
732 | def setparents(self, p1, p2, fold_p2=False): |
|
762 | def setparents(self, p1, p2, fold_p2=False): | |
733 | self._parents = (p1, p2) |
|
763 | self._parents = (p1, p2) | |
734 | self._dirtyparents = True |
|
764 | self._dirtyparents = True | |
735 | copies = {} |
|
765 | copies = {} | |
736 | if fold_p2: |
|
766 | if fold_p2: | |
737 | copies = self._map.setparents_fixup() |
|
767 | copies = self._map.setparents_fixup() | |
738 | return copies |
|
768 | return copies | |
739 |
|
769 | |||
740 | ### disk interaction |
|
770 | ### disk interaction | |
741 |
|
771 | |||
742 | @propertycache |
|
772 | @propertycache | |
743 | def identity(self): |
|
773 | def identity(self): | |
744 | self._map |
|
774 | self._map | |
745 | return self.identity |
|
775 | return self.identity | |
746 |
|
776 | |||
747 | def write(self, tr, st): |
|
777 | def write(self, tr, st): | |
748 | if not self._use_dirstate_v2: |
|
778 | if not self._use_dirstate_v2: | |
749 | p1, p2 = self.parents() |
|
779 | p1, p2 = self.parents() | |
750 | packed = self._map.write_v1(p1, p2) |
|
780 | packed = self._map.write_v1(p1, p2) | |
751 | st.write(packed) |
|
781 | st.write(packed) | |
752 | st.close() |
|
782 | st.close() | |
753 | self._dirtyparents = False |
|
783 | self._dirtyparents = False | |
754 | return |
|
784 | return | |
755 |
|
785 | |||
756 | write_mode = self._write_mode |
|
786 | write_mode = self._write_mode | |
757 | try: |
|
787 | try: | |
758 | docket = self.docket |
|
788 | docket = self.docket | |
759 | except error.CorruptedDirstate: |
|
789 | except error.CorruptedDirstate: | |
760 | # fall back to dirstate-v1 if we fail to parse v2 |
|
790 | # fall back to dirstate-v1 if we fail to parse v2 | |
761 | docket = None |
|
791 | docket = None | |
762 |
|
792 | |||
763 | # We can only append to an existing data file if there is one |
|
793 | # We can only append to an existing data file if there is one | |
764 | if docket is None or docket.uuid is None: |
|
794 | if docket is None or docket.uuid is None: | |
765 | write_mode = WRITE_MODE_FORCE_NEW |
|
795 | write_mode = WRITE_MODE_FORCE_NEW | |
766 | packed, meta, append = self._map.write_v2(write_mode) |
|
796 | packed, meta, append = self._map.write_v2(write_mode) | |
767 | if append: |
|
797 | if append: | |
768 | docket = self.docket |
|
798 | docket = self.docket | |
769 | data_filename = docket.data_filename() |
|
799 | data_filename = docket.data_filename() | |
770 | # We mark it for backup to make sure a future `hg rollback` (or |
|
800 | # We mark it for backup to make sure a future `hg rollback` (or | |
771 | # `hg recover`?) call find the data it needs to restore a |
|
801 | # `hg recover`?) call find the data it needs to restore a | |
772 | # working repository. |
|
802 | # working repository. | |
773 | # |
|
803 | # | |
774 | # The backup can use a hardlink because the format is resistant |
|
804 | # The backup can use a hardlink because the format is resistant | |
775 | # to trailing "dead" data. |
|
805 | # to trailing "dead" data. | |
776 | if tr is not None: |
|
806 | if tr is not None: | |
777 | tr.addbackup(data_filename, location=b'plain') |
|
807 | tr.addbackup(data_filename, location=b'plain') | |
778 | with self._opener(data_filename, b'r+b') as fp: |
|
808 | with self._opener(data_filename, b'r+b') as fp: | |
779 | fp.seek(docket.data_size) |
|
809 | fp.seek(docket.data_size) | |
780 | assert fp.tell() == docket.data_size |
|
810 | assert fp.tell() == docket.data_size | |
781 | written = fp.write(packed) |
|
811 | written = fp.write(packed) | |
782 | if written is not None: # py2 may return None |
|
812 | if written is not None: # py2 may return None | |
783 | assert written == len(packed), (written, len(packed)) |
|
813 | assert written == len(packed), (written, len(packed)) | |
784 | docket.data_size += len(packed) |
|
814 | docket.data_size += len(packed) | |
785 | docket.parents = self.parents() |
|
815 | docket.parents = self.parents() | |
786 | docket.tree_metadata = meta |
|
816 | docket.tree_metadata = meta | |
787 | st.write(docket.serialize()) |
|
817 | st.write(docket.serialize()) | |
788 | st.close() |
|
818 | st.close() | |
789 | else: |
|
819 | else: | |
790 | self.write_v2_no_append(tr, st, meta, packed) |
|
820 | self.write_v2_no_append(tr, st, meta, packed) | |
791 | # Reload from the newly-written file |
|
821 | # Reload from the newly-written file | |
792 | util.clearcachedproperty(self, b"_map") |
|
822 | util.clearcachedproperty(self, b"_map") | |
793 | self._dirtyparents = False |
|
823 | self._dirtyparents = False | |
794 |
|
824 | |||
795 | ### code related to maintaining and accessing "extra" property |
|
825 | ### code related to maintaining and accessing "extra" property | |
796 | # (e.g. "has_dir") |
|
826 | # (e.g. "has_dir") | |
797 |
|
827 | |||
798 | @propertycache |
|
828 | @propertycache | |
799 | def filefoldmap(self): |
|
829 | def filefoldmap(self): | |
800 | """Returns a dictionary mapping normalized case paths to their |
|
830 | """Returns a dictionary mapping normalized case paths to their | |
801 | non-normalized versions. |
|
831 | non-normalized versions. | |
802 | """ |
|
832 | """ | |
803 | return self._map.filefoldmapasdict() |
|
833 | return self._map.filefoldmapasdict() | |
804 |
|
834 | |||
805 | def hastrackeddir(self, d): |
|
835 | def hastrackeddir(self, d): | |
806 | return self._map.hastrackeddir(d) |
|
836 | return self._map.hastrackeddir(d) | |
807 |
|
837 | |||
808 | def hasdir(self, d): |
|
838 | def hasdir(self, d): | |
809 | return self._map.hasdir(d) |
|
839 | return self._map.hasdir(d) | |
810 |
|
840 | |||
811 | @propertycache |
|
841 | @propertycache | |
812 | def dirfoldmap(self): |
|
842 | def dirfoldmap(self): | |
813 | f = {} |
|
843 | f = {} | |
814 | normcase = util.normcase |
|
844 | normcase = util.normcase | |
815 | for name in self._map.tracked_dirs(): |
|
845 | for name in self._map.tracked_dirs(): | |
816 | f[normcase(name)] = name |
|
846 | f[normcase(name)] = name | |
817 | return f |
|
847 | return f | |
818 |
|
848 | |||
819 | ### code related to manipulation of entries and copy-sources |
|
849 | ### code related to manipulation of entries and copy-sources | |
820 |
|
850 | |||
821 | def set_tracked(self, f): |
|
851 | def set_tracked(self, f): | |
822 | return self._map.set_tracked(f) |
|
852 | return self._map.set_tracked(f) | |
823 |
|
853 | |||
824 | def set_untracked(self, f): |
|
854 | def set_untracked(self, f): | |
825 | return self._map.set_untracked(f) |
|
855 | return self._map.set_untracked(f) | |
826 |
|
856 | |||
827 | def set_clean(self, filename, mode, size, mtime): |
|
857 | def set_clean(self, filename, mode, size, mtime): | |
828 | self._map.set_clean(filename, mode, size, mtime) |
|
858 | self._map.set_clean(filename, mode, size, mtime) | |
829 |
|
859 | |||
830 | def set_possibly_dirty(self, f): |
|
860 | def set_possibly_dirty(self, f): | |
831 | self._map.set_possibly_dirty(f) |
|
861 | self._map.set_possibly_dirty(f) | |
832 |
|
862 | |||
833 | def reset_state( |
|
863 | def reset_state( | |
834 | self, |
|
864 | self, | |
835 | filename, |
|
865 | filename, | |
836 | wc_tracked=False, |
|
866 | wc_tracked=False, | |
837 | p1_tracked=False, |
|
867 | p1_tracked=False, | |
838 | p2_info=False, |
|
868 | p2_info=False, | |
839 | has_meaningful_mtime=True, |
|
869 | has_meaningful_mtime=True, | |
840 | parentfiledata=None, |
|
870 | parentfiledata=None, | |
841 | ): |
|
871 | ): | |
842 | return self._map.reset_state( |
|
872 | return self._map.reset_state( | |
843 | filename, |
|
873 | filename, | |
844 | wc_tracked, |
|
874 | wc_tracked, | |
845 | p1_tracked, |
|
875 | p1_tracked, | |
846 | p2_info, |
|
876 | p2_info, | |
847 | has_meaningful_mtime, |
|
877 | has_meaningful_mtime, | |
848 | parentfiledata, |
|
878 | parentfiledata, | |
849 | ) |
|
879 | ) |
@@ -1,809 +1,811 | |||||
1 | # posix.py - Posix utility function implementations for Mercurial |
|
1 | # posix.py - Posix utility function implementations for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 |
|
8 | |||
9 | import errno |
|
9 | import errno | |
10 | import fcntl |
|
10 | import fcntl | |
11 | import getpass |
|
11 | import getpass | |
12 | import grp |
|
12 | import grp | |
13 | import os |
|
13 | import os | |
14 | import pwd |
|
14 | import pwd | |
15 | import re |
|
15 | import re | |
16 | import select |
|
16 | import select | |
17 | import stat |
|
17 | import stat | |
18 | import sys |
|
18 | import sys | |
19 | import tempfile |
|
19 | import tempfile | |
20 | import typing |
|
20 | import typing | |
21 | import unicodedata |
|
21 | import unicodedata | |
22 |
|
22 | |||
23 | from typing import ( |
|
23 | from typing import ( | |
24 | Any, |
|
24 | Any, | |
25 | AnyStr, |
|
25 | AnyStr, | |
26 | Iterable, |
|
26 | Iterable, | |
27 | Iterator, |
|
27 | Iterator, | |
28 | List, |
|
28 | List, | |
29 | Match, |
|
29 | Match, | |
30 | NoReturn, |
|
30 | NoReturn, | |
31 | Optional, |
|
31 | Optional, | |
32 | Sequence, |
|
32 | Sequence, | |
33 | Tuple, |
|
33 | Tuple, | |
34 | Union, |
|
34 | Union, | |
35 | ) |
|
35 | ) | |
36 |
|
36 | |||
37 | from .i18n import _ |
|
37 | from .i18n import _ | |
38 | from .pycompat import ( |
|
38 | from .pycompat import ( | |
39 | open, |
|
39 | open, | |
40 | ) |
|
40 | ) | |
41 | from . import ( |
|
41 | from . import ( | |
42 | encoding, |
|
42 | encoding, | |
43 | error, |
|
43 | error, | |
44 | policy, |
|
44 | policy, | |
45 | pycompat, |
|
45 | pycompat, | |
46 | ) |
|
46 | ) | |
47 |
|
47 | |||
48 | osutil = policy.importmod('osutil') |
|
48 | osutil = policy.importmod('osutil') | |
49 |
|
49 | |||
50 | normpath = os.path.normpath |
|
50 | normpath = os.path.normpath | |
51 | samestat = os.path.samestat |
|
51 | samestat = os.path.samestat | |
52 | abspath = os.path.abspath # re-exports |
|
52 | abspath = os.path.abspath # re-exports | |
53 |
|
53 | |||
54 | try: |
|
54 | try: | |
55 | oslink = os.link |
|
55 | oslink = os.link | |
56 | except AttributeError: |
|
56 | except AttributeError: | |
57 | # Some platforms build Python without os.link on systems that are |
|
57 | # Some platforms build Python without os.link on systems that are | |
58 | # vaguely unix-like but don't have hardlink support. For those |
|
58 | # vaguely unix-like but don't have hardlink support. For those | |
59 | # poor souls, just say we tried and that it failed so we fall back |
|
59 | # poor souls, just say we tried and that it failed so we fall back | |
60 | # to copies. |
|
60 | # to copies. | |
61 | def oslink(src: bytes, dst: bytes) -> NoReturn: |
|
61 | def oslink(src: bytes, dst: bytes) -> NoReturn: | |
62 | raise OSError( |
|
62 | raise OSError( | |
63 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) |
|
63 | errno.EINVAL, b'hardlinks not supported: %s to %s' % (src, dst) | |
64 | ) |
|
64 | ) | |
65 |
|
65 | |||
66 |
|
66 | |||
67 | readlink = os.readlink |
|
67 | readlink = os.readlink | |
68 | unlink = os.unlink |
|
68 | unlink = os.unlink | |
69 | rename = os.rename |
|
69 | rename = os.rename | |
70 | removedirs = os.removedirs |
|
70 | removedirs = os.removedirs | |
71 |
|
71 | |||
72 | if typing.TYPE_CHECKING: |
|
72 | if typing.TYPE_CHECKING: | |
73 |
|
73 | |||
74 | def normpath(path: bytes) -> bytes: |
|
74 | def normpath(path: bytes) -> bytes: | |
75 | raise NotImplementedError |
|
75 | raise NotImplementedError | |
76 |
|
76 | |||
77 | def abspath(path: AnyStr) -> AnyStr: |
|
77 | def abspath(path: AnyStr) -> AnyStr: | |
78 | raise NotImplementedError |
|
78 | raise NotImplementedError | |
79 |
|
79 | |||
80 | def oslink(src: bytes, dst: bytes) -> None: |
|
80 | def oslink(src: bytes, dst: bytes) -> None: | |
81 | raise NotImplementedError |
|
81 | raise NotImplementedError | |
82 |
|
82 | |||
83 | def readlink(path: bytes) -> bytes: |
|
83 | def readlink(path: bytes) -> bytes: | |
84 | raise NotImplementedError |
|
84 | raise NotImplementedError | |
85 |
|
85 | |||
86 | def unlink(path: bytes) -> None: |
|
86 | def unlink(path: bytes) -> None: | |
87 | raise NotImplementedError |
|
87 | raise NotImplementedError | |
88 |
|
88 | |||
89 | def rename(src: bytes, dst: bytes) -> None: |
|
89 | def rename(src: bytes, dst: bytes) -> None: | |
90 | raise NotImplementedError |
|
90 | raise NotImplementedError | |
91 |
|
91 | |||
92 | def removedirs(name: bytes) -> None: |
|
92 | def removedirs(name: bytes) -> None: | |
93 | raise NotImplementedError |
|
93 | raise NotImplementedError | |
94 |
|
94 | |||
95 |
|
95 | |||
96 | expandglobs: bool = False |
|
96 | expandglobs: bool = False | |
97 |
|
97 | |||
98 | umask: int = os.umask(0) |
|
98 | umask: int = os.umask(0) | |
99 | os.umask(umask) |
|
99 | os.umask(umask) | |
100 |
|
100 | |||
101 | posixfile = open |
|
101 | posixfile = open | |
102 |
|
102 | |||
103 |
|
103 | |||
104 | def split(p: bytes) -> Tuple[bytes, bytes]: |
|
104 | def split(p: bytes) -> Tuple[bytes, bytes]: | |
105 | """Same as posixpath.split, but faster |
|
105 | """Same as posixpath.split, but faster | |
106 |
|
106 | |||
107 | >>> import posixpath |
|
107 | >>> import posixpath | |
108 | >>> for f in [b'/absolute/path/to/file', |
|
108 | >>> for f in [b'/absolute/path/to/file', | |
109 | ... b'relative/path/to/file', |
|
109 | ... b'relative/path/to/file', | |
110 | ... b'file_alone', |
|
110 | ... b'file_alone', | |
111 | ... b'path/to/directory/', |
|
111 | ... b'path/to/directory/', | |
112 | ... b'/multiple/path//separators', |
|
112 | ... b'/multiple/path//separators', | |
113 | ... b'/file_at_root', |
|
113 | ... b'/file_at_root', | |
114 | ... b'///multiple_leading_separators_at_root', |
|
114 | ... b'///multiple_leading_separators_at_root', | |
115 | ... b'']: |
|
115 | ... b'']: | |
116 | ... assert split(f) == posixpath.split(f), f |
|
116 | ... assert split(f) == posixpath.split(f), f | |
117 | """ |
|
117 | """ | |
118 | ht = p.rsplit(b'/', 1) |
|
118 | ht = p.rsplit(b'/', 1) | |
119 | if len(ht) == 1: |
|
119 | if len(ht) == 1: | |
120 | return b'', p |
|
120 | return b'', p | |
121 | nh = ht[0].rstrip(b'/') |
|
121 | nh = ht[0].rstrip(b'/') | |
122 | if nh: |
|
122 | if nh: | |
123 | return nh, ht[1] |
|
123 | return nh, ht[1] | |
124 | return ht[0] + b'/', ht[1] |
|
124 | return ht[0] + b'/', ht[1] | |
125 |
|
125 | |||
126 |
|
126 | |||
127 | def openhardlinks() -> bool: |
|
127 | def openhardlinks() -> bool: | |
128 | '''return true if it is safe to hold open file handles to hardlinks''' |
|
128 | '''return true if it is safe to hold open file handles to hardlinks''' | |
129 | return True |
|
129 | return True | |
130 |
|
130 | |||
131 |
|
131 | |||
132 | def nlinks(name: bytes) -> int: |
|
132 | def nlinks(name: bytes) -> int: | |
133 | '''return number of hardlinks for the given file''' |
|
133 | '''return number of hardlinks for the given file''' | |
134 | return os.lstat(name).st_nlink |
|
134 | return os.lstat(name).st_nlink | |
135 |
|
135 | |||
136 |
|
136 | |||
137 | def parsepatchoutput(output_line: bytes) -> bytes: |
|
137 | def parsepatchoutput(output_line: bytes) -> bytes: | |
138 | """parses the output produced by patch and returns the filename""" |
|
138 | """parses the output produced by patch and returns the filename""" | |
139 | pf = output_line[14:] |
|
139 | pf = output_line[14:] | |
140 | if pycompat.sysplatform == b'OpenVMS': |
|
140 | if pycompat.sysplatform == b'OpenVMS': | |
141 | if pf[0] == b'`': |
|
141 | if pf[0] == b'`': | |
142 | pf = pf[1:-1] # Remove the quotes |
|
142 | pf = pf[1:-1] # Remove the quotes | |
143 | else: |
|
143 | else: | |
144 | if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf: |
|
144 | if pf.startswith(b"'") and pf.endswith(b"'") and b" " in pf: | |
145 | pf = pf[1:-1] # Remove the quotes |
|
145 | pf = pf[1:-1] # Remove the quotes | |
146 | return pf |
|
146 | return pf | |
147 |
|
147 | |||
148 |
|
148 | |||
149 | def sshargs( |
|
149 | def sshargs( | |
150 | sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes] |
|
150 | sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes] | |
151 | ) -> bytes: |
|
151 | ) -> bytes: | |
152 | '''Build argument list for ssh''' |
|
152 | '''Build argument list for ssh''' | |
153 | args = user and (b"%s@%s" % (user, host)) or host |
|
153 | args = user and (b"%s@%s" % (user, host)) or host | |
154 | if b'-' in args[:1]: |
|
154 | if b'-' in args[:1]: | |
155 | raise error.Abort( |
|
155 | raise error.Abort( | |
156 | _(b'illegal ssh hostname or username starting with -: %s') % args |
|
156 | _(b'illegal ssh hostname or username starting with -: %s') % args | |
157 | ) |
|
157 | ) | |
158 | args = shellquote(args) |
|
158 | args = shellquote(args) | |
159 | if port: |
|
159 | if port: | |
160 | args = b'-p %s %s' % (shellquote(port), args) |
|
160 | args = b'-p %s %s' % (shellquote(port), args) | |
161 | return args |
|
161 | return args | |
162 |
|
162 | |||
163 |
|
163 | |||
164 | def isexec(f: bytes) -> bool: |
|
164 | def isexec(f: bytes) -> bool: | |
165 | """check whether a file is executable""" |
|
165 | """check whether a file is executable""" | |
166 | return os.lstat(f).st_mode & 0o100 != 0 |
|
166 | return os.lstat(f).st_mode & 0o100 != 0 | |
167 |
|
167 | |||
168 |
|
168 | |||
169 | def setflags(f: bytes, l: bool, x: bool) -> None: |
|
169 | def setflags(f: bytes, l: bool, x: bool) -> None: | |
170 | st = os.lstat(f) |
|
170 | st = os.lstat(f) | |
171 | s = st.st_mode |
|
171 | s = st.st_mode | |
172 | if l: |
|
172 | if l: | |
173 | if not stat.S_ISLNK(s): |
|
173 | if not stat.S_ISLNK(s): | |
174 | # switch file to link |
|
174 | # switch file to link | |
175 | with open(f, b'rb') as fp: |
|
175 | with open(f, b'rb') as fp: | |
176 | data = fp.read() |
|
176 | data = fp.read() | |
177 | unlink(f) |
|
177 | unlink(f) | |
178 | try: |
|
178 | try: | |
179 | os.symlink(data, f) |
|
179 | os.symlink(data, f) | |
180 | except OSError: |
|
180 | except OSError: | |
181 | # failed to make a link, rewrite file |
|
181 | # failed to make a link, rewrite file | |
182 | with open(f, b"wb") as fp: |
|
182 | with open(f, b"wb") as fp: | |
183 | fp.write(data) |
|
183 | fp.write(data) | |
184 |
|
184 | |||
185 | # no chmod needed at this point |
|
185 | # no chmod needed at this point | |
186 | return |
|
186 | return | |
187 | if stat.S_ISLNK(s): |
|
187 | if stat.S_ISLNK(s): | |
188 | # switch link to file |
|
188 | # switch link to file | |
189 | data = os.readlink(f) |
|
189 | data = os.readlink(f) | |
190 | unlink(f) |
|
190 | unlink(f) | |
191 | with open(f, b"wb") as fp: |
|
191 | with open(f, b"wb") as fp: | |
192 | fp.write(data) |
|
192 | fp.write(data) | |
193 | s = 0o666 & ~umask # avoid restatting for chmod |
|
193 | s = 0o666 & ~umask # avoid restatting for chmod | |
194 |
|
194 | |||
195 | sx = s & 0o100 |
|
195 | sx = s & 0o100 | |
196 | if st.st_nlink > 1 and bool(x) != bool(sx): |
|
196 | if st.st_nlink > 1 and bool(x) != bool(sx): | |
197 | # the file is a hardlink, break it |
|
197 | # the file is a hardlink, break it | |
198 | with open(f, b"rb") as fp: |
|
198 | with open(f, b"rb") as fp: | |
199 | data = fp.read() |
|
199 | data = fp.read() | |
200 | unlink(f) |
|
200 | unlink(f) | |
201 | with open(f, b"wb") as fp: |
|
201 | with open(f, b"wb") as fp: | |
202 | fp.write(data) |
|
202 | fp.write(data) | |
203 |
|
203 | |||
204 | if x and not sx: |
|
204 | if x and not sx: | |
205 | # Turn on +x for every +r bit when making a file executable |
|
205 | # Turn on +x for every +r bit when making a file executable | |
206 | # and obey umask. |
|
206 | # and obey umask. | |
207 | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) |
|
207 | os.chmod(f, s | (s & 0o444) >> 2 & ~umask) | |
208 | elif not x and sx: |
|
208 | elif not x and sx: | |
209 | # Turn off all +x bits |
|
209 | # Turn off all +x bits | |
210 | os.chmod(f, s & 0o666) |
|
210 | os.chmod(f, s & 0o666) | |
211 |
|
211 | |||
212 |
|
212 | |||
213 | def copymode( |
|
213 | def copymode( | |
214 | src: bytes, |
|
214 | src: bytes, | |
215 | dst: bytes, |
|
215 | dst: bytes, | |
216 | mode: Optional[bytes] = None, |
|
216 | mode: Optional[bytes] = None, | |
217 | enforcewritable: bool = False, |
|
217 | enforcewritable: bool = False, | |
218 | ) -> None: |
|
218 | ) -> None: | |
219 | """Copy the file mode from the file at path src to dst. |
|
219 | """Copy the file mode from the file at path src to dst. | |
220 | If src doesn't exist, we're using mode instead. If mode is None, we're |
|
220 | If src doesn't exist, we're using mode instead. If mode is None, we're | |
221 | using umask.""" |
|
221 | using umask.""" | |
222 | try: |
|
222 | try: | |
223 | st_mode = os.lstat(src).st_mode & 0o777 |
|
223 | st_mode = os.lstat(src).st_mode & 0o777 | |
224 | except FileNotFoundError: |
|
224 | except FileNotFoundError: | |
225 | st_mode = mode |
|
225 | st_mode = mode | |
226 | if st_mode is None: |
|
226 | if st_mode is None: | |
227 | st_mode = ~umask |
|
227 | st_mode = ~umask | |
228 | st_mode &= 0o666 |
|
228 | st_mode &= 0o666 | |
229 |
|
229 | |||
230 | new_mode = st_mode |
|
230 | new_mode = st_mode | |
231 |
|
231 | |||
232 | if enforcewritable: |
|
232 | if enforcewritable: | |
233 | new_mode |= stat.S_IWUSR |
|
233 | new_mode |= stat.S_IWUSR | |
234 |
|
234 | |||
235 | os.chmod(dst, new_mode) |
|
235 | os.chmod(dst, new_mode) | |
236 |
|
236 | |||
237 |
|
237 | |||
238 | def checkexec(path: bytes) -> bool: |
|
238 | def checkexec(path: bytes) -> bool: | |
239 | """ |
|
239 | """ | |
240 | Check whether the given path is on a filesystem with UNIX-like exec flags |
|
240 | Check whether the given path is on a filesystem with UNIX-like exec flags | |
241 |
|
241 | |||
242 | Requires a directory (like /foo/.hg) |
|
242 | Requires a directory (like /foo/.hg) | |
243 | """ |
|
243 | """ | |
244 |
|
244 | |||
245 | # VFAT on some Linux versions can flip mode but it doesn't persist |
|
245 | # VFAT on some Linux versions can flip mode but it doesn't persist | |
246 | # a FS remount. Frequently we can detect it if files are created |
|
246 | # a FS remount. Frequently we can detect it if files are created | |
247 | # with exec bit on. |
|
247 | # with exec bit on. | |
248 |
|
248 | |||
249 | try: |
|
249 | try: | |
250 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH |
|
250 | EXECFLAGS = stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH | |
251 | basedir = os.path.join(path, b'.hg') |
|
251 | basedir = os.path.join(path, b'.hg') | |
252 | cachedir = os.path.join(basedir, b'wcache') |
|
252 | cachedir = os.path.join(basedir, b'wcache') | |
253 | storedir = os.path.join(basedir, b'store') |
|
253 | storedir = os.path.join(basedir, b'store') | |
254 | if not os.path.exists(cachedir): |
|
254 | if not os.path.exists(cachedir): | |
255 | try: |
|
255 | try: | |
256 | # we want to create the 'cache' directory, not the '.hg' one. |
|
256 | # we want to create the 'cache' directory, not the '.hg' one. | |
257 | # Automatically creating '.hg' directory could silently spawn |
|
257 | # Automatically creating '.hg' directory could silently spawn | |
258 | # invalid Mercurial repositories. That seems like a bad idea. |
|
258 | # invalid Mercurial repositories. That seems like a bad idea. | |
259 | os.mkdir(cachedir) |
|
259 | os.mkdir(cachedir) | |
260 | if os.path.exists(storedir): |
|
260 | if os.path.exists(storedir): | |
261 | copymode(storedir, cachedir) |
|
261 | copymode(storedir, cachedir) | |
262 | else: |
|
262 | else: | |
263 | copymode(basedir, cachedir) |
|
263 | copymode(basedir, cachedir) | |
264 | except (IOError, OSError): |
|
264 | except (IOError, OSError): | |
265 | # we other fallback logic triggers |
|
265 | # we other fallback logic triggers | |
266 | pass |
|
266 | pass | |
267 | if os.path.isdir(cachedir): |
|
267 | if os.path.isdir(cachedir): | |
268 | checkisexec = os.path.join(cachedir, b'checkisexec') |
|
268 | checkisexec = os.path.join(cachedir, b'checkisexec') | |
269 | checknoexec = os.path.join(cachedir, b'checknoexec') |
|
269 | checknoexec = os.path.join(cachedir, b'checknoexec') | |
270 |
|
270 | |||
271 | try: |
|
271 | try: | |
272 | m = os.stat(checkisexec).st_mode |
|
272 | m = os.stat(checkisexec).st_mode | |
273 | except FileNotFoundError: |
|
273 | except FileNotFoundError: | |
274 | # checkisexec does not exist - fall through ... |
|
274 | # checkisexec does not exist - fall through ... | |
275 | pass |
|
275 | pass | |
276 | else: |
|
276 | else: | |
277 | # checkisexec exists, check if it actually is exec |
|
277 | # checkisexec exists, check if it actually is exec | |
278 | if m & EXECFLAGS != 0: |
|
278 | if m & EXECFLAGS != 0: | |
279 | # ensure checknoexec exists, check it isn't exec |
|
279 | # ensure checknoexec exists, check it isn't exec | |
280 | try: |
|
280 | try: | |
281 | m = os.stat(checknoexec).st_mode |
|
281 | m = os.stat(checknoexec).st_mode | |
282 | except FileNotFoundError: |
|
282 | except FileNotFoundError: | |
283 | open(checknoexec, b'w').close() # might fail |
|
283 | open(checknoexec, b'w').close() # might fail | |
284 | m = os.stat(checknoexec).st_mode |
|
284 | m = os.stat(checknoexec).st_mode | |
285 | if m & EXECFLAGS == 0: |
|
285 | if m & EXECFLAGS == 0: | |
286 | # check-exec is exec and check-no-exec is not exec |
|
286 | # check-exec is exec and check-no-exec is not exec | |
287 | return True |
|
287 | return True | |
288 | # checknoexec exists but is exec - delete it |
|
288 | # checknoexec exists but is exec - delete it | |
289 | unlink(checknoexec) |
|
289 | unlink(checknoexec) | |
290 | # checkisexec exists but is not exec - delete it |
|
290 | # checkisexec exists but is not exec - delete it | |
291 | unlink(checkisexec) |
|
291 | unlink(checkisexec) | |
292 |
|
292 | |||
293 | # check using one file, leave it as checkisexec |
|
293 | # check using one file, leave it as checkisexec | |
294 | checkdir = cachedir |
|
294 | checkdir = cachedir | |
295 | else: |
|
295 | else: | |
296 | # check directly in path and don't leave checkisexec behind |
|
296 | # check directly in path and don't leave checkisexec behind | |
297 | checkdir = path |
|
297 | checkdir = path | |
298 | checkisexec = None |
|
298 | checkisexec = None | |
299 | fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-') |
|
299 | fh, fn = pycompat.mkstemp(dir=checkdir, prefix=b'hg-checkexec-') | |
300 | try: |
|
300 | try: | |
301 | os.close(fh) |
|
301 | os.close(fh) | |
302 | m = os.stat(fn).st_mode |
|
302 | m = os.stat(fn).st_mode | |
303 | if m & EXECFLAGS == 0: |
|
303 | if m & EXECFLAGS == 0: | |
304 | os.chmod(fn, m & 0o777 | EXECFLAGS) |
|
304 | os.chmod(fn, m & 0o777 | EXECFLAGS) | |
305 | if os.stat(fn).st_mode & EXECFLAGS != 0: |
|
305 | if os.stat(fn).st_mode & EXECFLAGS != 0: | |
306 | if checkisexec is not None: |
|
306 | if checkisexec is not None: | |
307 | os.rename(fn, checkisexec) |
|
307 | os.rename(fn, checkisexec) | |
308 | fn = None |
|
308 | fn = None | |
309 | return True |
|
309 | return True | |
310 | finally: |
|
310 | finally: | |
311 | if fn is not None: |
|
311 | if fn is not None: | |
312 | unlink(fn) |
|
312 | unlink(fn) | |
313 | except (IOError, OSError): |
|
313 | except (IOError, OSError): | |
314 | # we don't care, the user probably won't be able to commit anyway |
|
314 | # we don't care, the user probably won't be able to commit anyway | |
315 | return False |
|
315 | return False | |
316 |
|
316 | |||
317 |
|
317 | |||
318 | def checklink(path: bytes) -> bool: |
|
318 | def checklink(path: bytes) -> bool: | |
319 | """check whether the given path is on a symlink-capable filesystem""" |
|
319 | """check whether the given path is on a symlink-capable filesystem""" | |
320 | # mktemp is not racy because symlink creation will fail if the |
|
320 | # mktemp is not racy because symlink creation will fail if the | |
321 | # file already exists |
|
321 | # file already exists | |
322 | while True: |
|
322 | while True: | |
323 | cachedir = os.path.join(path, b'.hg', b'wcache') |
|
323 | cachedir = os.path.join(path, b'.hg', b'wcache') | |
324 | checklink = os.path.join(cachedir, b'checklink') |
|
324 | checklink = os.path.join(cachedir, b'checklink') | |
325 | # try fast path, read only |
|
325 | # try fast path, read only | |
326 | if os.path.islink(checklink): |
|
326 | if os.path.islink(checklink): | |
327 | return True |
|
327 | return True | |
328 | if os.path.isdir(cachedir): |
|
328 | if os.path.isdir(cachedir): | |
329 | checkdir = cachedir |
|
329 | checkdir = cachedir | |
330 | else: |
|
330 | else: | |
331 | checkdir = path |
|
331 | checkdir = path | |
332 | cachedir = None |
|
332 | cachedir = None | |
333 | name = tempfile.mktemp( |
|
333 | name = tempfile.mktemp( | |
334 | dir=pycompat.fsdecode(checkdir), prefix=r'checklink-' |
|
334 | dir=pycompat.fsdecode(checkdir), prefix=r'checklink-' | |
335 | ) |
|
335 | ) | |
336 | name = pycompat.fsencode(name) |
|
336 | name = pycompat.fsencode(name) | |
337 | try: |
|
337 | try: | |
338 | fd = None |
|
338 | fd = None | |
339 | if cachedir is None: |
|
339 | if cachedir is None: | |
340 | fd = pycompat.namedtempfile( |
|
340 | fd = pycompat.namedtempfile( | |
341 | dir=checkdir, prefix=b'hg-checklink-' |
|
341 | dir=checkdir, prefix=b'hg-checklink-' | |
342 | ) |
|
342 | ) | |
343 | target = os.path.basename(fd.name) |
|
343 | target = os.path.basename(fd.name) | |
344 | else: |
|
344 | else: | |
345 | # create a fixed file to link to; doesn't matter if it |
|
345 | # create a fixed file to link to; doesn't matter if it | |
346 | # already exists. |
|
346 | # already exists. | |
347 | target = b'checklink-target' |
|
347 | target = b'checklink-target' | |
348 | try: |
|
348 | try: | |
349 | fullpath = os.path.join(cachedir, target) |
|
349 | fullpath = os.path.join(cachedir, target) | |
350 | open(fullpath, b'w').close() |
|
350 | open(fullpath, b'w').close() | |
351 | except PermissionError: |
|
351 | except PermissionError: | |
352 | # If we can't write to cachedir, just pretend |
|
352 | # If we can't write to cachedir, just pretend | |
353 | # that the fs is readonly and by association |
|
353 | # that the fs is readonly and by association | |
354 | # that the fs won't support symlinks. This |
|
354 | # that the fs won't support symlinks. This | |
355 | # seems like the least dangerous way to avoid |
|
355 | # seems like the least dangerous way to avoid | |
356 | # data loss. |
|
356 | # data loss. | |
357 | return False |
|
357 | return False | |
358 | try: |
|
358 | try: | |
359 | os.symlink(target, name) |
|
359 | os.symlink(target, name) | |
360 | if cachedir is None: |
|
360 | if cachedir is None: | |
361 | unlink(name) |
|
361 | unlink(name) | |
362 | else: |
|
362 | else: | |
363 | try: |
|
363 | try: | |
364 | os.rename(name, checklink) |
|
364 | os.rename(name, checklink) | |
365 | except OSError: |
|
365 | except OSError: | |
366 | unlink(name) |
|
366 | unlink(name) | |
367 | return True |
|
367 | return True | |
368 | except FileExistsError: |
|
368 | except FileExistsError: | |
369 | # link creation might race, try again |
|
369 | # link creation might race, try again | |
370 | continue |
|
370 | continue | |
371 | finally: |
|
371 | finally: | |
372 | if fd is not None: |
|
372 | if fd is not None: | |
373 | fd.close() |
|
373 | fd.close() | |
374 | except AttributeError: |
|
374 | except AttributeError: | |
375 | return False |
|
375 | return False | |
376 | except OSError as inst: |
|
376 | except OSError as inst: | |
377 | # sshfs might report failure while successfully creating the link |
|
377 | # sshfs might report failure while successfully creating the link | |
378 | if inst.errno == errno.EIO and os.path.exists(name): |
|
378 | if inst.errno == errno.EIO and os.path.exists(name): | |
379 | unlink(name) |
|
379 | unlink(name) | |
380 | return False |
|
380 | return False | |
381 |
|
381 | |||
382 |
|
382 | |||
383 | def checkosfilename(path: bytes) -> Optional[bytes]: |
|
383 | def checkosfilename(path: bytes) -> Optional[bytes]: | |
384 | """Check that the base-relative path is a valid filename on this platform. |
|
384 | """Check that the base-relative path is a valid filename on this platform. | |
385 | Returns None if the path is ok, or a UI string describing the problem.""" |
|
385 | Returns None if the path is ok, or a UI string describing the problem.""" | |
386 | return None # on posix platforms, every path is ok |
|
386 | return None # on posix platforms, every path is ok | |
387 |
|
387 | |||
388 |
|
388 | |||
389 | def getfsmountpoint(dirpath: bytes) -> Optional[bytes]: |
|
389 | def getfsmountpoint(dirpath: bytes) -> Optional[bytes]: | |
390 | """Get the filesystem mount point from a directory (best-effort) |
|
390 | """Get the filesystem mount point from a directory (best-effort) | |
391 |
|
391 | |||
392 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
392 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | |
393 | """ |
|
393 | """ | |
394 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) |
|
394 | return getattr(osutil, 'getfsmountpoint', lambda x: None)(dirpath) | |
395 |
|
395 | |||
396 |
|
396 | |||
397 | def getfstype(dirpath: bytes) -> Optional[bytes]: |
|
397 | def getfstype(dirpath: bytes) -> Optional[bytes]: | |
398 | """Get the filesystem type name from a directory (best-effort) |
|
398 | """Get the filesystem type name from a directory (best-effort) | |
399 |
|
399 | |||
400 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. |
|
400 | Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc. | |
401 | """ |
|
401 | """ | |
402 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) |
|
402 | return getattr(osutil, 'getfstype', lambda x: None)(dirpath) | |
403 |
|
403 | |||
404 |
|
404 | |||
405 | def get_password() -> bytes: |
|
405 | def get_password() -> bytes: | |
406 | return encoding.strtolocal(getpass.getpass('')) |
|
406 | return encoding.strtolocal(getpass.getpass('')) | |
407 |
|
407 | |||
408 |
|
408 | |||
409 | def setbinary(fd) -> None: |
|
409 | def setbinary(fd) -> None: | |
410 | pass |
|
410 | pass | |
411 |
|
411 | |||
412 |
|
412 | |||
413 | def pconvert(path: bytes) -> bytes: |
|
413 | def pconvert(path: bytes) -> bytes: | |
414 | return path |
|
414 | return path | |
415 |
|
415 | |||
416 |
|
416 | |||
417 | def localpath(path: bytes) -> bytes: |
|
417 | def localpath(path: bytes) -> bytes: | |
418 | return path |
|
418 | return path | |
419 |
|
419 | |||
420 |
|
420 | |||
421 | def samefile(fpath1: bytes, fpath2: bytes) -> bool: |
|
421 | def samefile(fpath1: bytes, fpath2: bytes) -> bool: | |
422 | """Returns whether path1 and path2 refer to the same file. This is only |
|
422 | """Returns whether path1 and path2 refer to the same file. This is only | |
423 | guaranteed to work for files, not directories.""" |
|
423 | guaranteed to work for files, not directories.""" | |
424 | return os.path.samefile(fpath1, fpath2) |
|
424 | return os.path.samefile(fpath1, fpath2) | |
425 |
|
425 | |||
426 |
|
426 | |||
427 | def samedevice(fpath1: bytes, fpath2: bytes) -> bool: |
|
427 | def samedevice(fpath1: bytes, fpath2: bytes) -> bool: | |
428 | """Returns whether fpath1 and fpath2 are on the same device. This is only |
|
428 | """Returns whether fpath1 and fpath2 are on the same device. This is only | |
429 | guaranteed to work for files, not directories.""" |
|
429 | guaranteed to work for files, not directories.""" | |
430 | st1 = os.lstat(fpath1) |
|
430 | st1 = os.lstat(fpath1) | |
431 | st2 = os.lstat(fpath2) |
|
431 | st2 = os.lstat(fpath2) | |
432 | return st1.st_dev == st2.st_dev |
|
432 | return st1.st_dev == st2.st_dev | |
433 |
|
433 | |||
434 |
|
434 | |||
435 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems |
|
435 | # os.path.normcase is a no-op, which doesn't help us on non-native filesystems | |
436 | def normcase(path: bytes) -> bytes: |
|
436 | def normcase(path: bytes) -> bytes: | |
437 | return path.lower() |
|
437 | return path.lower() | |
438 |
|
438 | |||
439 |
|
439 | |||
440 | # what normcase does to ASCII strings |
|
440 | # what normcase does to ASCII strings | |
441 | normcasespec: int = encoding.normcasespecs.lower |
|
441 | normcasespec: int = encoding.normcasespecs.lower | |
442 | # fallback normcase function for non-ASCII strings |
|
442 | # fallback normcase function for non-ASCII strings | |
443 | normcasefallback = normcase |
|
443 | normcasefallback = normcase | |
444 |
|
444 | |||
445 | if pycompat.isdarwin: |
|
445 | if pycompat.isdarwin: | |
446 |
|
446 | |||
447 | def normcase(path: bytes) -> bytes: |
|
447 | def normcase(path: bytes) -> bytes: | |
448 | """ |
|
448 | """ | |
449 | Normalize a filename for OS X-compatible comparison: |
|
449 | Normalize a filename for OS X-compatible comparison: | |
450 | - escape-encode invalid characters |
|
450 | - escape-encode invalid characters | |
451 | - decompose to NFD |
|
451 | - decompose to NFD | |
452 | - lowercase |
|
452 | - lowercase | |
453 | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] |
|
453 | - omit ignored characters [200c-200f, 202a-202e, 206a-206f,feff] | |
454 |
|
454 | |||
455 | >>> normcase(b'UPPER') |
|
455 | >>> normcase(b'UPPER') | |
456 | 'upper' |
|
456 | 'upper' | |
457 | >>> normcase(b'Caf\\xc3\\xa9') |
|
457 | >>> normcase(b'Caf\\xc3\\xa9') | |
458 | 'cafe\\xcc\\x81' |
|
458 | 'cafe\\xcc\\x81' | |
459 | >>> normcase(b'\\xc3\\x89') |
|
459 | >>> normcase(b'\\xc3\\x89') | |
460 | 'e\\xcc\\x81' |
|
460 | 'e\\xcc\\x81' | |
461 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 |
|
461 | >>> normcase(b'\\xb8\\xca\\xc3\\xca\\xbe\\xc8.JPG') # issue3918 | |
462 | '%b8%ca%c3\\xca\\xbe%c8.jpg' |
|
462 | '%b8%ca%c3\\xca\\xbe%c8.jpg' | |
463 | """ |
|
463 | """ | |
464 |
|
464 | |||
465 | try: |
|
465 | try: | |
466 | return encoding.asciilower(path) # exception for non-ASCII |
|
466 | return encoding.asciilower(path) # exception for non-ASCII | |
467 | except UnicodeDecodeError: |
|
467 | except UnicodeDecodeError: | |
468 | return normcasefallback(path) |
|
468 | return normcasefallback(path) | |
469 |
|
469 | |||
470 | normcasespec = encoding.normcasespecs.lower |
|
470 | normcasespec = encoding.normcasespecs.lower | |
471 |
|
471 | |||
472 | def normcasefallback(path: bytes) -> bytes: |
|
472 | def normcasefallback(path: bytes) -> bytes: | |
473 | try: |
|
473 | try: | |
474 | u = path.decode('utf-8') |
|
474 | u = path.decode('utf-8') | |
475 | except UnicodeDecodeError: |
|
475 | except UnicodeDecodeError: | |
476 | # OS X percent-encodes any bytes that aren't valid utf-8 |
|
476 | # OS X percent-encodes any bytes that aren't valid utf-8 | |
477 | s = b'' |
|
477 | s = b'' | |
478 | pos = 0 |
|
478 | pos = 0 | |
479 | l = len(path) |
|
479 | l = len(path) | |
480 | while pos < l: |
|
480 | while pos < l: | |
481 | try: |
|
481 | try: | |
482 | c = encoding.getutf8char(path, pos) |
|
482 | c = encoding.getutf8char(path, pos) | |
483 | pos += len(c) |
|
483 | pos += len(c) | |
484 | except ValueError: |
|
484 | except ValueError: | |
485 | c = b'%%%02X' % ord(path[pos : pos + 1]) |
|
485 | c = b'%%%02X' % ord(path[pos : pos + 1]) | |
486 | pos += 1 |
|
486 | pos += 1 | |
487 | s += c |
|
487 | s += c | |
488 |
|
488 | |||
489 | u = s.decode('utf-8') |
|
489 | u = s.decode('utf-8') | |
490 |
|
490 | |||
491 | # Decompose then lowercase (HFS+ technote specifies lower) |
|
491 | # Decompose then lowercase (HFS+ technote specifies lower) | |
492 | enc = unicodedata.normalize('NFD', u).lower().encode('utf-8') |
|
492 | enc = unicodedata.normalize('NFD', u).lower().encode('utf-8') | |
493 | # drop HFS+ ignored characters |
|
493 | # drop HFS+ ignored characters | |
494 | return encoding.hfsignoreclean(enc) |
|
494 | return encoding.hfsignoreclean(enc) | |
495 |
|
495 | |||
496 |
|
496 | |||
497 | if pycompat.sysplatform == b'cygwin': |
|
497 | if pycompat.sysplatform == b'cygwin': | |
498 | # workaround for cygwin, in which mount point part of path is |
|
498 | # workaround for cygwin, in which mount point part of path is | |
499 | # treated as case sensitive, even though underlying NTFS is case |
|
499 | # treated as case sensitive, even though underlying NTFS is case | |
500 | # insensitive. |
|
500 | # insensitive. | |
501 |
|
501 | |||
502 | # default mount points |
|
502 | # default mount points | |
503 | cygwinmountpoints = sorted( |
|
503 | cygwinmountpoints = sorted( | |
504 | [ |
|
504 | [ | |
505 | b"/usr/bin", |
|
505 | b"/usr/bin", | |
506 | b"/usr/lib", |
|
506 | b"/usr/lib", | |
507 | b"/cygdrive", |
|
507 | b"/cygdrive", | |
508 | ], |
|
508 | ], | |
509 | reverse=True, |
|
509 | reverse=True, | |
510 | ) |
|
510 | ) | |
511 |
|
511 | |||
512 | # use upper-ing as normcase as same as NTFS workaround |
|
512 | # use upper-ing as normcase as same as NTFS workaround | |
513 | def normcase(path: bytes) -> bytes: |
|
513 | def normcase(path: bytes) -> bytes: | |
514 | pathlen = len(path) |
|
514 | pathlen = len(path) | |
515 | if (pathlen == 0) or (path[0] != pycompat.ossep): |
|
515 | if (pathlen == 0) or (path[0] != pycompat.ossep): | |
516 | # treat as relative |
|
516 | # treat as relative | |
517 | return encoding.upper(path) |
|
517 | return encoding.upper(path) | |
518 |
|
518 | |||
519 | # to preserve case of mountpoint part |
|
519 | # to preserve case of mountpoint part | |
520 | for mp in cygwinmountpoints: |
|
520 | for mp in cygwinmountpoints: | |
521 | if not path.startswith(mp): |
|
521 | if not path.startswith(mp): | |
522 | continue |
|
522 | continue | |
523 |
|
523 | |||
524 | mplen = len(mp) |
|
524 | mplen = len(mp) | |
525 | if mplen == pathlen: # mount point itself |
|
525 | if mplen == pathlen: # mount point itself | |
526 | return mp |
|
526 | return mp | |
527 | if path[mplen] == pycompat.ossep: |
|
527 | if path[mplen] == pycompat.ossep: | |
528 | return mp + encoding.upper(path[mplen:]) |
|
528 | return mp + encoding.upper(path[mplen:]) | |
529 |
|
529 | |||
530 | return encoding.upper(path) |
|
530 | return encoding.upper(path) | |
531 |
|
531 | |||
532 | normcasespec = encoding.normcasespecs.other |
|
532 | normcasespec = encoding.normcasespecs.other | |
533 | normcasefallback = normcase |
|
533 | normcasefallback = normcase | |
534 |
|
534 | |||
535 | # Cygwin translates native ACLs to POSIX permissions, |
|
535 | # Cygwin translates native ACLs to POSIX permissions, | |
536 | # but these translations are not supported by native |
|
536 | # but these translations are not supported by native | |
537 | # tools, so the exec bit tends to be set erroneously. |
|
537 | # tools, so the exec bit tends to be set erroneously. | |
538 | # Therefore, disable executable bit access on Cygwin. |
|
538 | # Therefore, disable executable bit access on Cygwin. | |
539 | def checkexec(path: bytes) -> bool: |
|
539 | def checkexec(path: bytes) -> bool: | |
540 | return False |
|
540 | return False | |
541 |
|
541 | |||
542 | # Similarly, Cygwin's symlink emulation is likely to create |
|
542 | # Similarly, Cygwin's symlink emulation is likely to create | |
543 | # problems when Mercurial is used from both Cygwin and native |
|
543 | # problems when Mercurial is used from both Cygwin and native | |
544 | # Windows, with other native tools, or on shared volumes |
|
544 | # Windows, with other native tools, or on shared volumes | |
545 | def checklink(path: bytes) -> bool: |
|
545 | def checklink(path: bytes) -> bool: | |
546 | return False |
|
546 | return False | |
547 |
|
547 | |||
548 |
|
548 | |||
549 | if pycompat.sysplatform == b'OpenVMS': |
|
549 | if pycompat.sysplatform == b'OpenVMS': | |
550 | # OpenVMS's symlink emulation is broken on some OpenVMS versions. |
|
550 | # OpenVMS's symlink emulation is broken on some OpenVMS versions. | |
551 | def checklink(path: bytes) -> bool: |
|
551 | def checklink(path: bytes) -> bool: | |
552 | return False |
|
552 | return False | |
553 |
|
553 | |||
554 |
|
554 | |||
555 | _needsshellquote: Optional[Match[bytes]] = None |
|
555 | _needsshellquote: Optional[Match[bytes]] = None | |
556 |
|
556 | |||
557 |
|
557 | |||
558 | def shellquote(s: bytes) -> bytes: |
|
558 | def shellquote(s: bytes) -> bytes: | |
559 | if pycompat.sysplatform == b'OpenVMS': |
|
559 | if pycompat.sysplatform == b'OpenVMS': | |
560 | return b'"%s"' % s |
|
560 | return b'"%s"' % s | |
561 | global _needsshellquote |
|
561 | global _needsshellquote | |
562 | if _needsshellquote is None: |
|
562 | if _needsshellquote is None: | |
563 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search |
|
563 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._/+-]').search | |
564 | if s and not _needsshellquote(s): |
|
564 | if s and not _needsshellquote(s): | |
565 | # "s" shouldn't have to be quoted |
|
565 | # "s" shouldn't have to be quoted | |
566 | return s |
|
566 | return s | |
567 | else: |
|
567 | else: | |
568 | return b"'%s'" % s.replace(b"'", b"'\\''") |
|
568 | return b"'%s'" % s.replace(b"'", b"'\\''") | |
569 |
|
569 | |||
570 |
|
570 | |||
571 | def shellsplit(s: bytes) -> List[bytes]: |
|
571 | def shellsplit(s: bytes) -> List[bytes]: | |
572 | """Parse a command string in POSIX shell way (best-effort)""" |
|
572 | """Parse a command string in POSIX shell way (best-effort)""" | |
573 | return pycompat.shlexsplit(s, posix=True) |
|
573 | return pycompat.shlexsplit(s, posix=True) | |
574 |
|
574 | |||
575 |
|
575 | |||
576 | def testpid(pid: int) -> bool: |
|
576 | def testpid(pid: int) -> bool: | |
577 | '''return False if pid dead, True if running or not sure''' |
|
577 | '''return False if pid dead, True if running or not sure''' | |
578 | if pycompat.sysplatform == b'OpenVMS': |
|
578 | if pycompat.sysplatform == b'OpenVMS': | |
579 | return True |
|
579 | return True | |
580 | try: |
|
580 | try: | |
581 | os.kill(pid, 0) |
|
581 | os.kill(pid, 0) | |
582 | return True |
|
582 | return True | |
583 | except OSError as inst: |
|
583 | except OSError as inst: | |
584 | return inst.errno != errno.ESRCH |
|
584 | return inst.errno != errno.ESRCH | |
585 |
|
585 | |||
586 |
|
586 | |||
587 | def isowner(st: os.stat_result) -> bool: |
|
587 | def isowner(st: os.stat_result) -> bool: | |
588 | """Return True if the stat object st is from the current user.""" |
|
588 | """Return True if the stat object st is from the current user.""" | |
589 | return st.st_uid == os.getuid() |
|
589 | return st.st_uid == os.getuid() | |
590 |
|
590 | |||
591 |
|
591 | |||
592 | def findexe(command: bytes) -> Optional[bytes]: |
|
592 | def findexe(command: bytes) -> Optional[bytes]: | |
593 | """Find executable for command searching like which does. |
|
593 | """Find executable for command searching like which does. | |
594 | If command is a basename then PATH is searched for command. |
|
594 | If command is a basename then PATH is searched for command. | |
595 | PATH isn't searched if command is an absolute or relative path. |
|
595 | PATH isn't searched if command is an absolute or relative path. | |
596 | If command isn't found None is returned.""" |
|
596 | If command isn't found None is returned.""" | |
597 | if pycompat.sysplatform == b'OpenVMS': |
|
597 | if pycompat.sysplatform == b'OpenVMS': | |
598 | return command |
|
598 | return command | |
599 |
|
599 | |||
600 | def findexisting(executable: bytes) -> Optional[bytes]: |
|
600 | def findexisting(executable: bytes) -> Optional[bytes]: | |
601 | b'Will return executable if existing file' |
|
601 | b'Will return executable if existing file' | |
602 | if os.path.isfile(executable) and os.access(executable, os.X_OK): |
|
602 | if os.path.isfile(executable) and os.access(executable, os.X_OK): | |
603 | return executable |
|
603 | return executable | |
604 | return None |
|
604 | return None | |
605 |
|
605 | |||
606 | if pycompat.ossep in command: |
|
606 | if pycompat.ossep in command: | |
607 | return findexisting(command) |
|
607 | return findexisting(command) | |
608 |
|
608 | |||
609 | if pycompat.sysplatform == b'plan9': |
|
609 | if pycompat.sysplatform == b'plan9': | |
610 | return findexisting(os.path.join(b'/bin', command)) |
|
610 | return findexisting(os.path.join(b'/bin', command)) | |
611 |
|
611 | |||
612 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
612 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): | |
613 | executable = findexisting(os.path.join(path, command)) |
|
613 | executable = findexisting(os.path.join(path, command)) | |
614 | if executable is not None: |
|
614 | if executable is not None: | |
615 | return executable |
|
615 | return executable | |
616 | return None |
|
616 | return None | |
617 |
|
617 | |||
618 |
|
618 | |||
619 | def setsignalhandler() -> None: |
|
619 | def setsignalhandler() -> None: | |
620 | pass |
|
620 | pass | |
621 |
|
621 | |||
622 |
|
622 | |||
623 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
623 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | |
624 |
|
624 | |||
625 |
|
625 | |||
626 | def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]: |
|
626 | def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]: | |
627 | """Stat each file in files. Yield each stat, or None if a file does not |
|
627 | """Stat each file in files. Yield each stat, or None if a file does not | |
628 | exist or has a type we don't care about.""" |
|
628 | exist or has a type we don't care about.""" | |
629 | lstat = os.lstat |
|
629 | lstat = os.lstat | |
630 | getkind = stat.S_IFMT |
|
630 | getkind = stat.S_IFMT | |
631 | for nf in files: |
|
631 | for nf in files: | |
632 | try: |
|
632 | try: | |
633 | st = lstat(nf) |
|
633 | st = lstat(nf) | |
634 | if getkind(st.st_mode) not in _wantedkinds: |
|
634 | if getkind(st.st_mode) not in _wantedkinds: | |
635 | st = None |
|
635 | st = None | |
636 | except (FileNotFoundError, NotADirectoryError): |
|
636 | except (FileNotFoundError, NotADirectoryError): | |
637 | st = None |
|
637 | st = None | |
638 | yield st |
|
638 | yield st | |
639 |
|
639 | |||
640 |
|
640 | |||
641 | def getuser() -> bytes: |
|
641 | def getuser() -> bytes: | |
642 | '''return name of current user''' |
|
642 | '''return name of current user''' | |
643 | return pycompat.fsencode(getpass.getuser()) |
|
643 | return pycompat.fsencode(getpass.getuser()) | |
644 |
|
644 | |||
645 |
|
645 | |||
646 | def username(uid: Optional[int] = None) -> Optional[bytes]: |
|
646 | def username(uid: Optional[int] = None) -> Optional[bytes]: | |
647 | """Return the name of the user with the given uid. |
|
647 | """Return the name of the user with the given uid. | |
648 |
|
648 | |||
649 | If uid is None, return the name of the current user.""" |
|
649 | If uid is None, return the name of the current user.""" | |
650 |
|
650 | |||
651 | if uid is None: |
|
651 | if uid is None: | |
652 | uid = os.getuid() |
|
652 | uid = os.getuid() | |
653 | try: |
|
653 | try: | |
654 | return pycompat.fsencode(pwd.getpwuid(uid)[0]) |
|
654 | return pycompat.fsencode(pwd.getpwuid(uid)[0]) | |
655 | except KeyError: |
|
655 | except KeyError: | |
656 | return b'%d' % uid |
|
656 | return b'%d' % uid | |
657 |
|
657 | |||
658 |
|
658 | |||
659 | def groupname(gid: Optional[int] = None) -> Optional[bytes]: |
|
659 | def groupname(gid: Optional[int] = None) -> Optional[bytes]: | |
660 | """Return the name of the group with the given gid. |
|
660 | """Return the name of the group with the given gid. | |
661 |
|
661 | |||
662 | If gid is None, return the name of the current group.""" |
|
662 | If gid is None, return the name of the current group.""" | |
663 |
|
663 | |||
664 | if gid is None: |
|
664 | if gid is None: | |
665 | gid = os.getgid() |
|
665 | gid = os.getgid() | |
666 | try: |
|
666 | try: | |
667 | return pycompat.fsencode(grp.getgrgid(gid)[0]) |
|
667 | return pycompat.fsencode(grp.getgrgid(gid)[0]) | |
668 | except KeyError: |
|
668 | except KeyError: | |
669 | return pycompat.bytestr(gid) |
|
669 | return pycompat.bytestr(gid) | |
670 |
|
670 | |||
671 |
|
671 | |||
672 | def groupmembers(name: bytes) -> List[bytes]: |
|
672 | def groupmembers(name: bytes) -> List[bytes]: | |
673 | """Return the list of members of the group with the given |
|
673 | """Return the list of members of the group with the given | |
674 | name, KeyError if the group does not exist. |
|
674 | name, KeyError if the group does not exist. | |
675 | """ |
|
675 | """ | |
676 | name = pycompat.fsdecode(name) |
|
676 | name = pycompat.fsdecode(name) | |
677 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) |
|
677 | return pycompat.rapply(pycompat.fsencode, list(grp.getgrnam(name).gr_mem)) | |
678 |
|
678 | |||
679 |
|
679 | |||
680 | def spawndetached(args: List[bytes]) -> int: |
|
680 | def spawndetached(args: List[bytes]) -> int: | |
681 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) |
|
681 | return os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0), args[0], args) | |
682 |
|
682 | |||
683 |
|
683 | |||
684 | def gethgcmd(): # TODO: convert to bytes, like on Windows? |
|
684 | def gethgcmd(): # TODO: convert to bytes, like on Windows? | |
685 | return sys.argv[:1] |
|
685 | return sys.argv[:1] | |
686 |
|
686 | |||
687 |
|
687 | |||
688 | def makedir(path: bytes, notindexed: bool) -> None: |
|
688 | def makedir(path: bytes, notindexed: bool) -> None: | |
689 | os.mkdir(path) |
|
689 | os.mkdir(path) | |
690 |
|
690 | |||
691 |
|
691 | |||
692 | def lookupreg( |
|
692 | def lookupreg( | |
693 | key: bytes, |
|
693 | key: bytes, | |
694 | name: Optional[bytes] = None, |
|
694 | name: Optional[bytes] = None, | |
695 | scope: Optional[Union[int, Iterable[int]]] = None, |
|
695 | scope: Optional[Union[int, Iterable[int]]] = None, | |
696 | ) -> Optional[bytes]: |
|
696 | ) -> Optional[bytes]: | |
697 | return None |
|
697 | return None | |
698 |
|
698 | |||
699 |
|
699 | |||
700 | def hidewindow() -> None: |
|
700 | def hidewindow() -> None: | |
701 | """Hide current shell window. |
|
701 | """Hide current shell window. | |
702 |
|
702 | |||
703 | Used to hide the window opened when starting asynchronous |
|
703 | Used to hide the window opened when starting asynchronous | |
704 | child process under Windows, unneeded on other systems. |
|
704 | child process under Windows, unneeded on other systems. | |
705 | """ |
|
705 | """ | |
706 | pass |
|
706 | pass | |
707 |
|
707 | |||
708 |
|
708 | |||
709 | class cachestat: |
|
709 | class cachestat: | |
|
710 | stat: os.stat_result | |||
|
711 | ||||
710 | def __init__(self, path: bytes) -> None: |
|
712 | def __init__(self, path: bytes) -> None: | |
711 | self.stat = os.stat(path) |
|
713 | self.stat = os.stat(path) | |
712 |
|
714 | |||
713 | def cacheable(self) -> bool: |
|
715 | def cacheable(self) -> bool: | |
714 | return bool(self.stat.st_ino) |
|
716 | return bool(self.stat.st_ino) | |
715 |
|
717 | |||
716 | __hash__ = object.__hash__ |
|
718 | __hash__ = object.__hash__ | |
717 |
|
719 | |||
718 | def __eq__(self, other: Any) -> bool: |
|
720 | def __eq__(self, other: Any) -> bool: | |
719 | try: |
|
721 | try: | |
720 | # Only dev, ino, size, mtime and atime are likely to change. Out |
|
722 | # Only dev, ino, size, mtime and atime are likely to change. Out | |
721 | # of these, we shouldn't compare atime but should compare the |
|
723 | # of these, we shouldn't compare atime but should compare the | |
722 | # rest. However, one of the other fields changing indicates |
|
724 | # rest. However, one of the other fields changing indicates | |
723 | # something fishy going on, so return False if anything but atime |
|
725 | # something fishy going on, so return False if anything but atime | |
724 | # changes. |
|
726 | # changes. | |
725 | return ( |
|
727 | return ( | |
726 | self.stat.st_mode == other.stat.st_mode |
|
728 | self.stat.st_mode == other.stat.st_mode | |
727 | and self.stat.st_ino == other.stat.st_ino |
|
729 | and self.stat.st_ino == other.stat.st_ino | |
728 | and self.stat.st_dev == other.stat.st_dev |
|
730 | and self.stat.st_dev == other.stat.st_dev | |
729 | and self.stat.st_nlink == other.stat.st_nlink |
|
731 | and self.stat.st_nlink == other.stat.st_nlink | |
730 | and self.stat.st_uid == other.stat.st_uid |
|
732 | and self.stat.st_uid == other.stat.st_uid | |
731 | and self.stat.st_gid == other.stat.st_gid |
|
733 | and self.stat.st_gid == other.stat.st_gid | |
732 | and self.stat.st_size == other.stat.st_size |
|
734 | and self.stat.st_size == other.stat.st_size | |
733 | and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] |
|
735 | and self.stat[stat.ST_MTIME] == other.stat[stat.ST_MTIME] | |
734 | and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME] |
|
736 | and self.stat[stat.ST_CTIME] == other.stat[stat.ST_CTIME] | |
735 | ) |
|
737 | ) | |
736 | except AttributeError: |
|
738 | except AttributeError: | |
737 | return False |
|
739 | return False | |
738 |
|
740 | |||
739 | def __ne__(self, other: Any) -> bool: |
|
741 | def __ne__(self, other: Any) -> bool: | |
740 | return not self == other |
|
742 | return not self == other | |
741 |
|
743 | |||
742 |
|
744 | |||
743 | def statislink(st: Optional[os.stat_result]) -> bool: |
|
745 | def statislink(st: Optional[os.stat_result]) -> bool: | |
744 | '''check whether a stat result is a symlink''' |
|
746 | '''check whether a stat result is a symlink''' | |
745 | return stat.S_ISLNK(st.st_mode) if st else False |
|
747 | return stat.S_ISLNK(st.st_mode) if st else False | |
746 |
|
748 | |||
747 |
|
749 | |||
748 | def statisexec(st: Optional[os.stat_result]) -> bool: |
|
750 | def statisexec(st: Optional[os.stat_result]) -> bool: | |
749 | '''check whether a stat result is an executable file''' |
|
751 | '''check whether a stat result is an executable file''' | |
750 | return (st.st_mode & 0o100 != 0) if st else False |
|
752 | return (st.st_mode & 0o100 != 0) if st else False | |
751 |
|
753 | |||
752 |
|
754 | |||
753 | def poll(fds): |
|
755 | def poll(fds): | |
754 | """block until something happens on any file descriptor |
|
756 | """block until something happens on any file descriptor | |
755 |
|
757 | |||
756 | This is a generic helper that will check for any activity |
|
758 | This is a generic helper that will check for any activity | |
757 | (read, write. exception) and return the list of touched files. |
|
759 | (read, write. exception) and return the list of touched files. | |
758 |
|
760 | |||
759 | In unsupported cases, it will raise a NotImplementedError""" |
|
761 | In unsupported cases, it will raise a NotImplementedError""" | |
760 | try: |
|
762 | try: | |
761 | res = select.select(fds, fds, fds) |
|
763 | res = select.select(fds, fds, fds) | |
762 | except ValueError: # out of range file descriptor |
|
764 | except ValueError: # out of range file descriptor | |
763 | raise NotImplementedError() |
|
765 | raise NotImplementedError() | |
764 | return sorted(list(set(sum(res, [])))) |
|
766 | return sorted(list(set(sum(res, [])))) | |
765 |
|
767 | |||
766 |
|
768 | |||
767 | def readpipe(pipe) -> bytes: |
|
769 | def readpipe(pipe) -> bytes: | |
768 | """Read all available data from a pipe.""" |
|
770 | """Read all available data from a pipe.""" | |
769 | # We can't fstat() a pipe because Linux will always report 0. |
|
771 | # We can't fstat() a pipe because Linux will always report 0. | |
770 | # So, we set the pipe to non-blocking mode and read everything |
|
772 | # So, we set the pipe to non-blocking mode and read everything | |
771 | # that's available. |
|
773 | # that's available. | |
772 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) |
|
774 | flags = fcntl.fcntl(pipe, fcntl.F_GETFL) | |
773 | flags |= os.O_NONBLOCK |
|
775 | flags |= os.O_NONBLOCK | |
774 | oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags) |
|
776 | oldflags = fcntl.fcntl(pipe, fcntl.F_SETFL, flags) | |
775 |
|
777 | |||
776 | try: |
|
778 | try: | |
777 | chunks = [] |
|
779 | chunks = [] | |
778 | while True: |
|
780 | while True: | |
779 | try: |
|
781 | try: | |
780 | s = pipe.read() |
|
782 | s = pipe.read() | |
781 | if not s: |
|
783 | if not s: | |
782 | break |
|
784 | break | |
783 | chunks.append(s) |
|
785 | chunks.append(s) | |
784 | except IOError: |
|
786 | except IOError: | |
785 | break |
|
787 | break | |
786 |
|
788 | |||
787 | return b''.join(chunks) |
|
789 | return b''.join(chunks) | |
788 | finally: |
|
790 | finally: | |
789 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) |
|
791 | fcntl.fcntl(pipe, fcntl.F_SETFL, oldflags) | |
790 |
|
792 | |||
791 |
|
793 | |||
792 | def bindunixsocket(sock, path: bytes) -> None: |
|
794 | def bindunixsocket(sock, path: bytes) -> None: | |
793 | """Bind the UNIX domain socket to the specified path""" |
|
795 | """Bind the UNIX domain socket to the specified path""" | |
794 | # use relative path instead of full path at bind() if possible, since |
|
796 | # use relative path instead of full path at bind() if possible, since | |
795 | # AF_UNIX path has very small length limit (107 chars) on common |
|
797 | # AF_UNIX path has very small length limit (107 chars) on common | |
796 | # platforms (see sys/un.h) |
|
798 | # platforms (see sys/un.h) | |
797 | dirname, basename = os.path.split(path) |
|
799 | dirname, basename = os.path.split(path) | |
798 | bakwdfd = None |
|
800 | bakwdfd = None | |
799 |
|
801 | |||
800 | try: |
|
802 | try: | |
801 | if dirname: |
|
803 | if dirname: | |
802 | bakwdfd = os.open(b'.', os.O_DIRECTORY) |
|
804 | bakwdfd = os.open(b'.', os.O_DIRECTORY) | |
803 | os.chdir(dirname) |
|
805 | os.chdir(dirname) | |
804 | sock.bind(basename) |
|
806 | sock.bind(basename) | |
805 | if bakwdfd: |
|
807 | if bakwdfd: | |
806 | os.fchdir(bakwdfd) |
|
808 | os.fchdir(bakwdfd) | |
807 | finally: |
|
809 | finally: | |
808 | if bakwdfd: |
|
810 | if bakwdfd: | |
809 | os.close(bakwdfd) |
|
811 | os.close(bakwdfd) |
@@ -1,28 +1,41 | |||||
1 | # typelib.py - type hint aliases and support |
|
1 | # typelib.py - type hint aliases and support | |
2 | # |
|
2 | # | |
3 | # Copyright 2022 Matt Harbison <matt_harbison@yahoo.com> |
|
3 | # Copyright 2022 Matt Harbison <matt_harbison@yahoo.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | import typing |
|
8 | import typing | |
9 |
|
9 | |||
10 | # Note: this is slightly different from pycompat.TYPE_CHECKING, as using |
|
10 | # Note: this is slightly different from pycompat.TYPE_CHECKING, as using | |
11 | # pycompat causes the BinaryIO_Proxy type to be resolved to ``object`` when |
|
11 | # pycompat causes the BinaryIO_Proxy type to be resolved to ``object`` when | |
12 | # used as the base class during a pytype run. |
|
12 | # used as the base class during a pytype run. | |
13 | TYPE_CHECKING = typing.TYPE_CHECKING |
|
13 | TYPE_CHECKING = typing.TYPE_CHECKING | |
14 |
|
14 | |||
15 |
|
15 | |||
16 | # The BinaryIO class provides empty methods, which at runtime means that |
|
16 | # The BinaryIO class provides empty methods, which at runtime means that | |
17 | # ``__getattr__`` on the proxy classes won't get called for the methods that |
|
17 | # ``__getattr__`` on the proxy classes won't get called for the methods that | |
18 | # should delegate to the internal object. So to avoid runtime changes because |
|
18 | # should delegate to the internal object. So to avoid runtime changes because | |
19 | # of the required typing inheritance, just use BinaryIO when typechecking, and |
|
19 | # of the required typing inheritance, just use BinaryIO when typechecking, and | |
20 | # ``object`` otherwise. |
|
20 | # ``object`` otherwise. | |
21 | if TYPE_CHECKING: |
|
21 | if TYPE_CHECKING: | |
22 | from typing import ( |
|
22 | from typing import ( | |
23 | BinaryIO, |
|
23 | BinaryIO, | |
|
24 | Union, | |||
|
25 | ) | |||
|
26 | ||||
|
27 | from . import ( | |||
|
28 | node, | |||
|
29 | posix, | |||
|
30 | windows, | |||
24 | ) |
|
31 | ) | |
25 |
|
32 | |||
26 | BinaryIO_Proxy = BinaryIO |
|
33 | BinaryIO_Proxy = BinaryIO | |
|
34 | CacheStat = Union[posix.cachestat, windows.cachestat] | |||
|
35 | NodeConstants = node.sha1nodeconstants | |||
27 | else: |
|
36 | else: | |
|
37 | from typing import Any | |||
|
38 | ||||
28 | BinaryIO_Proxy = object |
|
39 | BinaryIO_Proxy = object | |
|
40 | CacheStat = Any | |||
|
41 | NodeConstants = Any |
@@ -1,757 +1,759 | |||||
1 | # windows.py - Windows utility function implementations for Mercurial |
|
1 | # windows.py - Windows utility function implementations for Mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others |
|
3 | # Copyright 2005-2009 Olivia Mackall <olivia@selenic.com> and others | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 |
|
8 | |||
9 | import errno |
|
9 | import errno | |
10 | import getpass |
|
10 | import getpass | |
11 | import msvcrt # pytype: disable=import-error |
|
11 | import msvcrt # pytype: disable=import-error | |
12 | import os |
|
12 | import os | |
13 | import re |
|
13 | import re | |
14 | import stat |
|
14 | import stat | |
15 | import string |
|
15 | import string | |
16 | import sys |
|
16 | import sys | |
17 | import typing |
|
17 | import typing | |
18 | import winreg # pytype: disable=import-error |
|
18 | import winreg # pytype: disable=import-error | |
19 |
|
19 | |||
20 | from typing import ( |
|
20 | from typing import ( | |
21 | AnyStr, |
|
21 | AnyStr, | |
22 | BinaryIO, |
|
22 | BinaryIO, | |
23 | Iterable, |
|
23 | Iterable, | |
24 | Iterator, |
|
24 | Iterator, | |
25 | List, |
|
25 | List, | |
26 | Mapping, |
|
26 | Mapping, | |
27 | NoReturn, |
|
27 | NoReturn, | |
28 | Optional, |
|
28 | Optional, | |
29 | Pattern, |
|
29 | Pattern, | |
30 | Sequence, |
|
30 | Sequence, | |
31 | Tuple, |
|
31 | Tuple, | |
32 | Union, |
|
32 | Union, | |
33 | ) |
|
33 | ) | |
34 |
|
34 | |||
35 | from .i18n import _ |
|
35 | from .i18n import _ | |
36 | from . import ( |
|
36 | from . import ( | |
37 | encoding, |
|
37 | encoding, | |
38 | error, |
|
38 | error, | |
39 | policy, |
|
39 | policy, | |
40 | pycompat, |
|
40 | pycompat, | |
41 | typelib, |
|
41 | typelib, | |
42 | win32, |
|
42 | win32, | |
43 | ) |
|
43 | ) | |
44 |
|
44 | |||
45 |
|
45 | |||
46 | osutil = policy.importmod('osutil') |
|
46 | osutil = policy.importmod('osutil') | |
47 |
|
47 | |||
48 | getfsmountpoint = win32.getvolumename |
|
48 | getfsmountpoint = win32.getvolumename | |
49 | getfstype = win32.getfstype |
|
49 | getfstype = win32.getfstype | |
50 | getuser = win32.getuser |
|
50 | getuser = win32.getuser | |
51 | hidewindow = win32.hidewindow |
|
51 | hidewindow = win32.hidewindow | |
52 | makedir = win32.makedir |
|
52 | makedir = win32.makedir | |
53 | nlinks = win32.nlinks |
|
53 | nlinks = win32.nlinks | |
54 | oslink = win32.oslink |
|
54 | oslink = win32.oslink | |
55 | samedevice = win32.samedevice |
|
55 | samedevice = win32.samedevice | |
56 | samefile = win32.samefile |
|
56 | samefile = win32.samefile | |
57 | setsignalhandler = win32.setsignalhandler |
|
57 | setsignalhandler = win32.setsignalhandler | |
58 | spawndetached = win32.spawndetached |
|
58 | spawndetached = win32.spawndetached | |
59 | split = os.path.split |
|
59 | split = os.path.split | |
60 | testpid = win32.testpid |
|
60 | testpid = win32.testpid | |
61 | unlink = win32.unlink |
|
61 | unlink = win32.unlink | |
62 |
|
62 | |||
63 | if typing.TYPE_CHECKING: |
|
63 | if typing.TYPE_CHECKING: | |
64 |
|
64 | |||
65 | def split(p: bytes) -> Tuple[bytes, bytes]: |
|
65 | def split(p: bytes) -> Tuple[bytes, bytes]: | |
66 | raise NotImplementedError |
|
66 | raise NotImplementedError | |
67 |
|
67 | |||
68 |
|
68 | |||
69 | umask: int = 0o022 |
|
69 | umask: int = 0o022 | |
70 |
|
70 | |||
71 |
|
71 | |||
72 | class mixedfilemodewrapper: |
|
72 | class mixedfilemodewrapper: | |
73 | """Wraps a file handle when it is opened in read/write mode. |
|
73 | """Wraps a file handle when it is opened in read/write mode. | |
74 |
|
74 | |||
75 | fopen() and fdopen() on Windows have a specific-to-Windows requirement |
|
75 | fopen() and fdopen() on Windows have a specific-to-Windows requirement | |
76 | that files opened with mode r+, w+, or a+ make a call to a file positioning |
|
76 | that files opened with mode r+, w+, or a+ make a call to a file positioning | |
77 | function when switching between reads and writes. Without this extra call, |
|
77 | function when switching between reads and writes. Without this extra call, | |
78 | Python will raise a not very intuitive "IOError: [Errno 0] Error." |
|
78 | Python will raise a not very intuitive "IOError: [Errno 0] Error." | |
79 |
|
79 | |||
80 | This class wraps posixfile instances when the file is opened in read/write |
|
80 | This class wraps posixfile instances when the file is opened in read/write | |
81 | mode and automatically adds checks or inserts appropriate file positioning |
|
81 | mode and automatically adds checks or inserts appropriate file positioning | |
82 | calls when necessary. |
|
82 | calls when necessary. | |
83 | """ |
|
83 | """ | |
84 |
|
84 | |||
85 | OPNONE = 0 |
|
85 | OPNONE = 0 | |
86 | OPREAD = 1 |
|
86 | OPREAD = 1 | |
87 | OPWRITE = 2 |
|
87 | OPWRITE = 2 | |
88 |
|
88 | |||
89 | def __init__(self, fp): |
|
89 | def __init__(self, fp): | |
90 | object.__setattr__(self, '_fp', fp) |
|
90 | object.__setattr__(self, '_fp', fp) | |
91 | object.__setattr__(self, '_lastop', 0) |
|
91 | object.__setattr__(self, '_lastop', 0) | |
92 |
|
92 | |||
93 | def __enter__(self): |
|
93 | def __enter__(self): | |
94 | self._fp.__enter__() |
|
94 | self._fp.__enter__() | |
95 | return self |
|
95 | return self | |
96 |
|
96 | |||
97 | def __exit__(self, exc_type, exc_val, exc_tb): |
|
97 | def __exit__(self, exc_type, exc_val, exc_tb): | |
98 | self._fp.__exit__(exc_type, exc_val, exc_tb) |
|
98 | self._fp.__exit__(exc_type, exc_val, exc_tb) | |
99 |
|
99 | |||
100 | def __getattr__(self, name): |
|
100 | def __getattr__(self, name): | |
101 | return getattr(self._fp, name) |
|
101 | return getattr(self._fp, name) | |
102 |
|
102 | |||
103 | def __setattr__(self, name, value): |
|
103 | def __setattr__(self, name, value): | |
104 | return self._fp.__setattr__(name, value) |
|
104 | return self._fp.__setattr__(name, value) | |
105 |
|
105 | |||
106 | def _noopseek(self): |
|
106 | def _noopseek(self): | |
107 | self._fp.seek(0, os.SEEK_CUR) |
|
107 | self._fp.seek(0, os.SEEK_CUR) | |
108 |
|
108 | |||
109 | def seek(self, *args, **kwargs): |
|
109 | def seek(self, *args, **kwargs): | |
110 | object.__setattr__(self, '_lastop', self.OPNONE) |
|
110 | object.__setattr__(self, '_lastop', self.OPNONE) | |
111 | return self._fp.seek(*args, **kwargs) |
|
111 | return self._fp.seek(*args, **kwargs) | |
112 |
|
112 | |||
113 | def write(self, d): |
|
113 | def write(self, d): | |
114 | if self._lastop == self.OPREAD: |
|
114 | if self._lastop == self.OPREAD: | |
115 | self._noopseek() |
|
115 | self._noopseek() | |
116 |
|
116 | |||
117 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
117 | object.__setattr__(self, '_lastop', self.OPWRITE) | |
118 | return self._fp.write(d) |
|
118 | return self._fp.write(d) | |
119 |
|
119 | |||
120 | def writelines(self, *args, **kwargs): |
|
120 | def writelines(self, *args, **kwargs): | |
121 | if self._lastop == self.OPREAD: |
|
121 | if self._lastop == self.OPREAD: | |
122 | self._noopeseek() |
|
122 | self._noopeseek() | |
123 |
|
123 | |||
124 | object.__setattr__(self, '_lastop', self.OPWRITE) |
|
124 | object.__setattr__(self, '_lastop', self.OPWRITE) | |
125 | return self._fp.writelines(*args, **kwargs) |
|
125 | return self._fp.writelines(*args, **kwargs) | |
126 |
|
126 | |||
127 | def read(self, *args, **kwargs): |
|
127 | def read(self, *args, **kwargs): | |
128 | if self._lastop == self.OPWRITE: |
|
128 | if self._lastop == self.OPWRITE: | |
129 | self._noopseek() |
|
129 | self._noopseek() | |
130 |
|
130 | |||
131 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
131 | object.__setattr__(self, '_lastop', self.OPREAD) | |
132 | return self._fp.read(*args, **kwargs) |
|
132 | return self._fp.read(*args, **kwargs) | |
133 |
|
133 | |||
134 | def readline(self, *args, **kwargs): |
|
134 | def readline(self, *args, **kwargs): | |
135 | if self._lastop == self.OPWRITE: |
|
135 | if self._lastop == self.OPWRITE: | |
136 | self._noopseek() |
|
136 | self._noopseek() | |
137 |
|
137 | |||
138 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
138 | object.__setattr__(self, '_lastop', self.OPREAD) | |
139 | return self._fp.readline(*args, **kwargs) |
|
139 | return self._fp.readline(*args, **kwargs) | |
140 |
|
140 | |||
141 | def readlines(self, *args, **kwargs): |
|
141 | def readlines(self, *args, **kwargs): | |
142 | if self._lastop == self.OPWRITE: |
|
142 | if self._lastop == self.OPWRITE: | |
143 | self._noopseek() |
|
143 | self._noopseek() | |
144 |
|
144 | |||
145 | object.__setattr__(self, '_lastop', self.OPREAD) |
|
145 | object.__setattr__(self, '_lastop', self.OPREAD) | |
146 | return self._fp.readlines(*args, **kwargs) |
|
146 | return self._fp.readlines(*args, **kwargs) | |
147 |
|
147 | |||
148 |
|
148 | |||
149 | class fdproxy: |
|
149 | class fdproxy: | |
150 | """Wraps osutil.posixfile() to override the name attribute to reflect the |
|
150 | """Wraps osutil.posixfile() to override the name attribute to reflect the | |
151 | underlying file name. |
|
151 | underlying file name. | |
152 | """ |
|
152 | """ | |
153 |
|
153 | |||
154 | def __init__(self, name, fp): |
|
154 | def __init__(self, name, fp): | |
155 | self.name = name |
|
155 | self.name = name | |
156 | self._fp = fp |
|
156 | self._fp = fp | |
157 |
|
157 | |||
158 | def __enter__(self): |
|
158 | def __enter__(self): | |
159 | self._fp.__enter__() |
|
159 | self._fp.__enter__() | |
160 | # Return this wrapper for the context manager so that the name is |
|
160 | # Return this wrapper for the context manager so that the name is | |
161 | # still available. |
|
161 | # still available. | |
162 | return self |
|
162 | return self | |
163 |
|
163 | |||
164 | def __exit__(self, exc_type, exc_value, traceback): |
|
164 | def __exit__(self, exc_type, exc_value, traceback): | |
165 | self._fp.__exit__(exc_type, exc_value, traceback) |
|
165 | self._fp.__exit__(exc_type, exc_value, traceback) | |
166 |
|
166 | |||
167 | def __iter__(self): |
|
167 | def __iter__(self): | |
168 | return iter(self._fp) |
|
168 | return iter(self._fp) | |
169 |
|
169 | |||
170 | def __getattr__(self, name): |
|
170 | def __getattr__(self, name): | |
171 | return getattr(self._fp, name) |
|
171 | return getattr(self._fp, name) | |
172 |
|
172 | |||
173 |
|
173 | |||
174 | def posixfile(name, mode=b'r', buffering=-1): |
|
174 | def posixfile(name, mode=b'r', buffering=-1): | |
175 | '''Open a file with even more POSIX-like semantics''' |
|
175 | '''Open a file with even more POSIX-like semantics''' | |
176 | try: |
|
176 | try: | |
177 | fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError |
|
177 | fp = osutil.posixfile(name, mode, buffering) # may raise WindowsError | |
178 |
|
178 | |||
179 | # PyFile_FromFd() ignores the name, and seems to report fp.name as the |
|
179 | # PyFile_FromFd() ignores the name, and seems to report fp.name as the | |
180 | # underlying file descriptor. |
|
180 | # underlying file descriptor. | |
181 | fp = fdproxy(name, fp) |
|
181 | fp = fdproxy(name, fp) | |
182 |
|
182 | |||
183 | # The position when opening in append mode is implementation defined, so |
|
183 | # The position when opening in append mode is implementation defined, so | |
184 | # make it consistent with other platforms, which position at EOF. |
|
184 | # make it consistent with other platforms, which position at EOF. | |
185 | if b'a' in mode: |
|
185 | if b'a' in mode: | |
186 | fp.seek(0, os.SEEK_END) |
|
186 | fp.seek(0, os.SEEK_END) | |
187 |
|
187 | |||
188 | if b'+' in mode: |
|
188 | if b'+' in mode: | |
189 | return mixedfilemodewrapper(fp) |
|
189 | return mixedfilemodewrapper(fp) | |
190 |
|
190 | |||
191 | return fp |
|
191 | return fp | |
192 | except WindowsError as err: # pytype: disable=name-error |
|
192 | except WindowsError as err: # pytype: disable=name-error | |
193 | # convert to a friendlier exception |
|
193 | # convert to a friendlier exception | |
194 | raise IOError( |
|
194 | raise IOError( | |
195 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) |
|
195 | err.errno, '%s: %s' % (encoding.strfromlocal(name), err.strerror) | |
196 | ) |
|
196 | ) | |
197 |
|
197 | |||
198 |
|
198 | |||
199 | # may be wrapped by win32mbcs extension |
|
199 | # may be wrapped by win32mbcs extension | |
200 | listdir = osutil.listdir |
|
200 | listdir = osutil.listdir | |
201 |
|
201 | |||
202 |
|
202 | |||
203 | def get_password() -> bytes: |
|
203 | def get_password() -> bytes: | |
204 | """Prompt for password with echo off, using Windows getch(). |
|
204 | """Prompt for password with echo off, using Windows getch(). | |
205 |
|
205 | |||
206 | This shouldn't be called directly- use ``ui.getpass()`` instead, which |
|
206 | This shouldn't be called directly- use ``ui.getpass()`` instead, which | |
207 | checks if the session is interactive first. |
|
207 | checks if the session is interactive first. | |
208 | """ |
|
208 | """ | |
209 | pw = u"" |
|
209 | pw = u"" | |
210 | while True: |
|
210 | while True: | |
211 | c = msvcrt.getwch() # pytype: disable=module-attr |
|
211 | c = msvcrt.getwch() # pytype: disable=module-attr | |
212 | if c == u'\r' or c == u'\n': |
|
212 | if c == u'\r' or c == u'\n': | |
213 | break |
|
213 | break | |
214 | if c == u'\003': |
|
214 | if c == u'\003': | |
215 | raise KeyboardInterrupt |
|
215 | raise KeyboardInterrupt | |
216 | if c == u'\b': |
|
216 | if c == u'\b': | |
217 | pw = pw[:-1] |
|
217 | pw = pw[:-1] | |
218 | else: |
|
218 | else: | |
219 | pw = pw + c |
|
219 | pw = pw + c | |
220 | msvcrt.putwch(u'\r') # pytype: disable=module-attr |
|
220 | msvcrt.putwch(u'\r') # pytype: disable=module-attr | |
221 | msvcrt.putwch(u'\n') # pytype: disable=module-attr |
|
221 | msvcrt.putwch(u'\n') # pytype: disable=module-attr | |
222 | return encoding.unitolocal(pw) |
|
222 | return encoding.unitolocal(pw) | |
223 |
|
223 | |||
224 |
|
224 | |||
225 | class winstdout(typelib.BinaryIO_Proxy): |
|
225 | class winstdout(typelib.BinaryIO_Proxy): | |
226 | """Some files on Windows misbehave. |
|
226 | """Some files on Windows misbehave. | |
227 |
|
227 | |||
228 | When writing to a broken pipe, EINVAL instead of EPIPE may be raised. |
|
228 | When writing to a broken pipe, EINVAL instead of EPIPE may be raised. | |
229 |
|
229 | |||
230 | When writing too many bytes to a console at the same, a "Not enough space" |
|
230 | When writing too many bytes to a console at the same, a "Not enough space" | |
231 | error may happen. Python 3 already works around that. |
|
231 | error may happen. Python 3 already works around that. | |
232 | """ |
|
232 | """ | |
233 |
|
233 | |||
234 | def __init__(self, fp: BinaryIO): |
|
234 | def __init__(self, fp: BinaryIO): | |
235 | self.fp = fp |
|
235 | self.fp = fp | |
236 |
|
236 | |||
237 | def __getattr__(self, key): |
|
237 | def __getattr__(self, key): | |
238 | return getattr(self.fp, key) |
|
238 | return getattr(self.fp, key) | |
239 |
|
239 | |||
240 | def close(self): |
|
240 | def close(self): | |
241 | try: |
|
241 | try: | |
242 | self.fp.close() |
|
242 | self.fp.close() | |
243 | except IOError: |
|
243 | except IOError: | |
244 | pass |
|
244 | pass | |
245 |
|
245 | |||
246 | def write(self, s): |
|
246 | def write(self, s): | |
247 | try: |
|
247 | try: | |
248 | return self.fp.write(s) |
|
248 | return self.fp.write(s) | |
249 | except IOError as inst: |
|
249 | except IOError as inst: | |
250 | if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst): |
|
250 | if inst.errno != 0 and not win32.lasterrorwaspipeerror(inst): | |
251 | raise |
|
251 | raise | |
252 | self.close() |
|
252 | self.close() | |
253 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
253 | raise IOError(errno.EPIPE, 'Broken pipe') | |
254 |
|
254 | |||
255 | def flush(self): |
|
255 | def flush(self): | |
256 | try: |
|
256 | try: | |
257 | return self.fp.flush() |
|
257 | return self.fp.flush() | |
258 | except IOError as inst: |
|
258 | except IOError as inst: | |
259 | if not win32.lasterrorwaspipeerror(inst): |
|
259 | if not win32.lasterrorwaspipeerror(inst): | |
260 | raise |
|
260 | raise | |
261 | raise IOError(errno.EPIPE, 'Broken pipe') |
|
261 | raise IOError(errno.EPIPE, 'Broken pipe') | |
262 |
|
262 | |||
263 |
|
263 | |||
264 | def openhardlinks() -> bool: |
|
264 | def openhardlinks() -> bool: | |
265 | return True |
|
265 | return True | |
266 |
|
266 | |||
267 |
|
267 | |||
268 | def parsepatchoutput(output_line: bytes) -> bytes: |
|
268 | def parsepatchoutput(output_line: bytes) -> bytes: | |
269 | """parses the output produced by patch and returns the filename""" |
|
269 | """parses the output produced by patch and returns the filename""" | |
270 | pf = output_line[14:] |
|
270 | pf = output_line[14:] | |
271 | if pf[0] == b'`': |
|
271 | if pf[0] == b'`': | |
272 | pf = pf[1:-1] # Remove the quotes |
|
272 | pf = pf[1:-1] # Remove the quotes | |
273 | return pf |
|
273 | return pf | |
274 |
|
274 | |||
275 |
|
275 | |||
276 | def sshargs( |
|
276 | def sshargs( | |
277 | sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes] |
|
277 | sshcmd: bytes, host: bytes, user: Optional[bytes], port: Optional[bytes] | |
278 | ) -> bytes: |
|
278 | ) -> bytes: | |
279 | '''Build argument list for ssh or Plink''' |
|
279 | '''Build argument list for ssh or Plink''' | |
280 | pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p' |
|
280 | pflag = b'plink' in sshcmd.lower() and b'-P' or b'-p' | |
281 | args = user and (b"%s@%s" % (user, host)) or host |
|
281 | args = user and (b"%s@%s" % (user, host)) or host | |
282 | if args.startswith(b'-') or args.startswith(b'/'): |
|
282 | if args.startswith(b'-') or args.startswith(b'/'): | |
283 | raise error.Abort( |
|
283 | raise error.Abort( | |
284 | _(b'illegal ssh hostname or username starting with - or /: %s') |
|
284 | _(b'illegal ssh hostname or username starting with - or /: %s') | |
285 | % args |
|
285 | % args | |
286 | ) |
|
286 | ) | |
287 | args = shellquote(args) |
|
287 | args = shellquote(args) | |
288 | if port: |
|
288 | if port: | |
289 | args = b'%s %s %s' % (pflag, shellquote(port), args) |
|
289 | args = b'%s %s %s' % (pflag, shellquote(port), args) | |
290 | return args |
|
290 | return args | |
291 |
|
291 | |||
292 |
|
292 | |||
293 | def setflags(f: bytes, l: bool, x: bool) -> None: |
|
293 | def setflags(f: bytes, l: bool, x: bool) -> None: | |
294 | pass |
|
294 | pass | |
295 |
|
295 | |||
296 |
|
296 | |||
297 | def copymode( |
|
297 | def copymode( | |
298 | src: bytes, |
|
298 | src: bytes, | |
299 | dst: bytes, |
|
299 | dst: bytes, | |
300 | mode: Optional[bytes] = None, |
|
300 | mode: Optional[bytes] = None, | |
301 | enforcewritable: bool = False, |
|
301 | enforcewritable: bool = False, | |
302 | ) -> None: |
|
302 | ) -> None: | |
303 | pass |
|
303 | pass | |
304 |
|
304 | |||
305 |
|
305 | |||
306 | def checkexec(path: bytes) -> bool: |
|
306 | def checkexec(path: bytes) -> bool: | |
307 | return False |
|
307 | return False | |
308 |
|
308 | |||
309 |
|
309 | |||
310 | def checklink(path: bytes) -> bool: |
|
310 | def checklink(path: bytes) -> bool: | |
311 | return False |
|
311 | return False | |
312 |
|
312 | |||
313 |
|
313 | |||
314 | def setbinary(fd) -> None: |
|
314 | def setbinary(fd) -> None: | |
315 | # When run without console, pipes may expose invalid |
|
315 | # When run without console, pipes may expose invalid | |
316 | # fileno(), usually set to -1. |
|
316 | # fileno(), usually set to -1. | |
317 | fno = getattr(fd, 'fileno', None) |
|
317 | fno = getattr(fd, 'fileno', None) | |
318 | if fno is not None and fno() >= 0: |
|
318 | if fno is not None and fno() >= 0: | |
319 | msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr |
|
319 | msvcrt.setmode(fno(), os.O_BINARY) # pytype: disable=module-attr | |
320 |
|
320 | |||
321 |
|
321 | |||
322 | def pconvert(path: bytes) -> bytes: |
|
322 | def pconvert(path: bytes) -> bytes: | |
323 | return path.replace(pycompat.ossep, b'/') |
|
323 | return path.replace(pycompat.ossep, b'/') | |
324 |
|
324 | |||
325 |
|
325 | |||
326 | def localpath(path: bytes) -> bytes: |
|
326 | def localpath(path: bytes) -> bytes: | |
327 | return path.replace(b'/', b'\\') |
|
327 | return path.replace(b'/', b'\\') | |
328 |
|
328 | |||
329 |
|
329 | |||
330 | def normpath(path: bytes) -> bytes: |
|
330 | def normpath(path: bytes) -> bytes: | |
331 | return pconvert(os.path.normpath(path)) |
|
331 | return pconvert(os.path.normpath(path)) | |
332 |
|
332 | |||
333 |
|
333 | |||
334 | def normcase(path: bytes) -> bytes: |
|
334 | def normcase(path: bytes) -> bytes: | |
335 | return encoding.upper(path) # NTFS compares via upper() |
|
335 | return encoding.upper(path) # NTFS compares via upper() | |
336 |
|
336 | |||
337 |
|
337 | |||
338 | DRIVE_RE_B: Pattern[bytes] = re.compile(b'^[a-z]:') |
|
338 | DRIVE_RE_B: Pattern[bytes] = re.compile(b'^[a-z]:') | |
339 | DRIVE_RE_S: Pattern[str] = re.compile('^[a-z]:') |
|
339 | DRIVE_RE_S: Pattern[str] = re.compile('^[a-z]:') | |
340 |
|
340 | |||
341 |
|
341 | |||
342 | # TODO: why is this accepting str? |
|
342 | # TODO: why is this accepting str? | |
343 | def abspath(path: AnyStr) -> AnyStr: |
|
343 | def abspath(path: AnyStr) -> AnyStr: | |
344 | abs_path = os.path.abspath(path) # re-exports |
|
344 | abs_path = os.path.abspath(path) # re-exports | |
345 | # Python on Windows is inconsistent regarding the capitalization of drive |
|
345 | # Python on Windows is inconsistent regarding the capitalization of drive | |
346 | # letter and this cause issue with various path comparison along the way. |
|
346 | # letter and this cause issue with various path comparison along the way. | |
347 | # So we normalize the drive later to upper case here. |
|
347 | # So we normalize the drive later to upper case here. | |
348 | # |
|
348 | # | |
349 | # See https://bugs.python.org/issue40368 for and example of this hell. |
|
349 | # See https://bugs.python.org/issue40368 for and example of this hell. | |
350 | if isinstance(abs_path, bytes): |
|
350 | if isinstance(abs_path, bytes): | |
351 | if DRIVE_RE_B.match(abs_path): |
|
351 | if DRIVE_RE_B.match(abs_path): | |
352 | abs_path = abs_path[0:1].upper() + abs_path[1:] |
|
352 | abs_path = abs_path[0:1].upper() + abs_path[1:] | |
353 | elif DRIVE_RE_S.match(abs_path): |
|
353 | elif DRIVE_RE_S.match(abs_path): | |
354 | abs_path = abs_path[0:1].upper() + abs_path[1:] |
|
354 | abs_path = abs_path[0:1].upper() + abs_path[1:] | |
355 | return abs_path |
|
355 | return abs_path | |
356 |
|
356 | |||
357 |
|
357 | |||
358 | # see posix.py for definitions |
|
358 | # see posix.py for definitions | |
359 | normcasespec: int = encoding.normcasespecs.upper |
|
359 | normcasespec: int = encoding.normcasespecs.upper | |
360 | normcasefallback = encoding.upperfallback |
|
360 | normcasefallback = encoding.upperfallback | |
361 |
|
361 | |||
362 |
|
362 | |||
363 | def samestat(s1: os.stat_result, s2: os.stat_result) -> bool: |
|
363 | def samestat(s1: os.stat_result, s2: os.stat_result) -> bool: | |
364 | return False |
|
364 | return False | |
365 |
|
365 | |||
366 |
|
366 | |||
367 | def shelltocmdexe(path: bytes, env: Mapping[bytes, bytes]) -> bytes: |
|
367 | def shelltocmdexe(path: bytes, env: Mapping[bytes, bytes]) -> bytes: | |
368 | r"""Convert shell variables in the form $var and ${var} inside ``path`` |
|
368 | r"""Convert shell variables in the form $var and ${var} inside ``path`` | |
369 | to %var% form. Existing Windows style variables are left unchanged. |
|
369 | to %var% form. Existing Windows style variables are left unchanged. | |
370 |
|
370 | |||
371 | The variables are limited to the given environment. Unknown variables are |
|
371 | The variables are limited to the given environment. Unknown variables are | |
372 | left unchanged. |
|
372 | left unchanged. | |
373 |
|
373 | |||
374 | >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'} |
|
374 | >>> e = {b'var1': b'v1', b'var2': b'v2', b'var3': b'v3'} | |
375 | >>> # Only valid values are expanded |
|
375 | >>> # Only valid values are expanded | |
376 | >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%', |
|
376 | >>> shelltocmdexe(b'cmd $var1 ${var2} %var3% $missing ${missing} %missing%', | |
377 | ... e) |
|
377 | ... e) | |
378 | 'cmd %var1% %var2% %var3% $missing ${missing} %missing%' |
|
378 | 'cmd %var1% %var2% %var3% $missing ${missing} %missing%' | |
379 | >>> # Single quote prevents expansion, as does \$ escaping |
|
379 | >>> # Single quote prevents expansion, as does \$ escaping | |
380 | >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e) |
|
380 | >>> shelltocmdexe(b"cmd '$var1 ${var2} %var3%' \$var1 \${var2} \\", e) | |
381 | 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\' |
|
381 | 'cmd "$var1 ${var2} %var3%" $var1 ${var2} \\' | |
382 | >>> # $$ is not special. %% is not special either, but can be the end and |
|
382 | >>> # $$ is not special. %% is not special either, but can be the end and | |
383 | >>> # start of consecutive variables |
|
383 | >>> # start of consecutive variables | |
384 | >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e) |
|
384 | >>> shelltocmdexe(b"cmd $$ %% %var1%%var2%", e) | |
385 | 'cmd $$ %% %var1%%var2%' |
|
385 | 'cmd $$ %% %var1%%var2%' | |
386 | >>> # No double substitution |
|
386 | >>> # No double substitution | |
387 | >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'}) |
|
387 | >>> shelltocmdexe(b"$var1 %var1%", {b'var1': b'%var2%', b'var2': b'boom'}) | |
388 | '%var1% %var1%' |
|
388 | '%var1% %var1%' | |
389 | >>> # Tilde expansion |
|
389 | >>> # Tilde expansion | |
390 | >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {}) |
|
390 | >>> shelltocmdexe(b"~/dir ~\dir2 ~tmpfile \~/", {}) | |
391 | '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/' |
|
391 | '%USERPROFILE%/dir %USERPROFILE%\\dir2 ~tmpfile ~/' | |
392 | """ |
|
392 | """ | |
393 | if not any(c in path for c in b"$'~"): |
|
393 | if not any(c in path for c in b"$'~"): | |
394 | return path |
|
394 | return path | |
395 |
|
395 | |||
396 | varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-' |
|
396 | varchars = pycompat.sysbytes(string.ascii_letters + string.digits) + b'_-' | |
397 |
|
397 | |||
398 | res = b'' |
|
398 | res = b'' | |
399 | index = 0 |
|
399 | index = 0 | |
400 | pathlen = len(path) |
|
400 | pathlen = len(path) | |
401 | while index < pathlen: |
|
401 | while index < pathlen: | |
402 | c = path[index : index + 1] |
|
402 | c = path[index : index + 1] | |
403 | if c == b'\'': # no expansion within single quotes |
|
403 | if c == b'\'': # no expansion within single quotes | |
404 | path = path[index + 1 :] |
|
404 | path = path[index + 1 :] | |
405 | pathlen = len(path) |
|
405 | pathlen = len(path) | |
406 | try: |
|
406 | try: | |
407 | index = path.index(b'\'') |
|
407 | index = path.index(b'\'') | |
408 | res += b'"' + path[:index] + b'"' |
|
408 | res += b'"' + path[:index] + b'"' | |
409 | except ValueError: |
|
409 | except ValueError: | |
410 | res += c + path |
|
410 | res += c + path | |
411 | index = pathlen - 1 |
|
411 | index = pathlen - 1 | |
412 | elif c == b'%': # variable |
|
412 | elif c == b'%': # variable | |
413 | path = path[index + 1 :] |
|
413 | path = path[index + 1 :] | |
414 | pathlen = len(path) |
|
414 | pathlen = len(path) | |
415 | try: |
|
415 | try: | |
416 | index = path.index(b'%') |
|
416 | index = path.index(b'%') | |
417 | except ValueError: |
|
417 | except ValueError: | |
418 | res += b'%' + path |
|
418 | res += b'%' + path | |
419 | index = pathlen - 1 |
|
419 | index = pathlen - 1 | |
420 | else: |
|
420 | else: | |
421 | var = path[:index] |
|
421 | var = path[:index] | |
422 | res += b'%' + var + b'%' |
|
422 | res += b'%' + var + b'%' | |
423 | elif c == b'$': # variable |
|
423 | elif c == b'$': # variable | |
424 | if path[index + 1 : index + 2] == b'{': |
|
424 | if path[index + 1 : index + 2] == b'{': | |
425 | path = path[index + 2 :] |
|
425 | path = path[index + 2 :] | |
426 | pathlen = len(path) |
|
426 | pathlen = len(path) | |
427 | try: |
|
427 | try: | |
428 | index = path.index(b'}') |
|
428 | index = path.index(b'}') | |
429 | var = path[:index] |
|
429 | var = path[:index] | |
430 |
|
430 | |||
431 | # See below for why empty variables are handled specially |
|
431 | # See below for why empty variables are handled specially | |
432 | if env.get(var, b'') != b'': |
|
432 | if env.get(var, b'') != b'': | |
433 | res += b'%' + var + b'%' |
|
433 | res += b'%' + var + b'%' | |
434 | else: |
|
434 | else: | |
435 | res += b'${' + var + b'}' |
|
435 | res += b'${' + var + b'}' | |
436 | except ValueError: |
|
436 | except ValueError: | |
437 | res += b'${' + path |
|
437 | res += b'${' + path | |
438 | index = pathlen - 1 |
|
438 | index = pathlen - 1 | |
439 | else: |
|
439 | else: | |
440 | var = b'' |
|
440 | var = b'' | |
441 | index += 1 |
|
441 | index += 1 | |
442 | c = path[index : index + 1] |
|
442 | c = path[index : index + 1] | |
443 | while c != b'' and c in varchars: |
|
443 | while c != b'' and c in varchars: | |
444 | var += c |
|
444 | var += c | |
445 | index += 1 |
|
445 | index += 1 | |
446 | c = path[index : index + 1] |
|
446 | c = path[index : index + 1] | |
447 | # Some variables (like HG_OLDNODE) may be defined, but have an |
|
447 | # Some variables (like HG_OLDNODE) may be defined, but have an | |
448 | # empty value. Those need to be skipped because when spawning |
|
448 | # empty value. Those need to be skipped because when spawning | |
449 | # cmd.exe to run the hook, it doesn't replace %VAR% for an empty |
|
449 | # cmd.exe to run the hook, it doesn't replace %VAR% for an empty | |
450 | # VAR, and that really confuses things like revset expressions. |
|
450 | # VAR, and that really confuses things like revset expressions. | |
451 | # OTOH, if it's left in Unix format and the hook runs sh.exe, it |
|
451 | # OTOH, if it's left in Unix format and the hook runs sh.exe, it | |
452 | # will substitute to an empty string, and everything is happy. |
|
452 | # will substitute to an empty string, and everything is happy. | |
453 | if env.get(var, b'') != b'': |
|
453 | if env.get(var, b'') != b'': | |
454 | res += b'%' + var + b'%' |
|
454 | res += b'%' + var + b'%' | |
455 | else: |
|
455 | else: | |
456 | res += b'$' + var |
|
456 | res += b'$' + var | |
457 |
|
457 | |||
458 | if c != b'': |
|
458 | if c != b'': | |
459 | index -= 1 |
|
459 | index -= 1 | |
460 | elif ( |
|
460 | elif ( | |
461 | c == b'~' |
|
461 | c == b'~' | |
462 | and index + 1 < pathlen |
|
462 | and index + 1 < pathlen | |
463 | and path[index + 1 : index + 2] in (b'\\', b'/') |
|
463 | and path[index + 1 : index + 2] in (b'\\', b'/') | |
464 | ): |
|
464 | ): | |
465 | res += b"%USERPROFILE%" |
|
465 | res += b"%USERPROFILE%" | |
466 | elif ( |
|
466 | elif ( | |
467 | c == b'\\' |
|
467 | c == b'\\' | |
468 | and index + 1 < pathlen |
|
468 | and index + 1 < pathlen | |
469 | and path[index + 1 : index + 2] in (b'$', b'~') |
|
469 | and path[index + 1 : index + 2] in (b'$', b'~') | |
470 | ): |
|
470 | ): | |
471 | # Skip '\', but only if it is escaping $ or ~ |
|
471 | # Skip '\', but only if it is escaping $ or ~ | |
472 | res += path[index + 1 : index + 2] |
|
472 | res += path[index + 1 : index + 2] | |
473 | index += 1 |
|
473 | index += 1 | |
474 | else: |
|
474 | else: | |
475 | res += c |
|
475 | res += c | |
476 |
|
476 | |||
477 | index += 1 |
|
477 | index += 1 | |
478 | return res |
|
478 | return res | |
479 |
|
479 | |||
480 |
|
480 | |||
481 | # A sequence of backslashes is special iff it precedes a double quote: |
|
481 | # A sequence of backslashes is special iff it precedes a double quote: | |
482 | # - if there's an even number of backslashes, the double quote is not |
|
482 | # - if there's an even number of backslashes, the double quote is not | |
483 | # quoted (i.e. it ends the quoted region) |
|
483 | # quoted (i.e. it ends the quoted region) | |
484 | # - if there's an odd number of backslashes, the double quote is quoted |
|
484 | # - if there's an odd number of backslashes, the double quote is quoted | |
485 | # - in both cases, every pair of backslashes is unquoted into a single |
|
485 | # - in both cases, every pair of backslashes is unquoted into a single | |
486 | # backslash |
|
486 | # backslash | |
487 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) |
|
487 | # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx ) | |
488 | # So, to quote a string, we must surround it in double quotes, double |
|
488 | # So, to quote a string, we must surround it in double quotes, double | |
489 | # the number of backslashes that precede double quotes and add another |
|
489 | # the number of backslashes that precede double quotes and add another | |
490 | # backslash before every double quote (being careful with the double |
|
490 | # backslash before every double quote (being careful with the double | |
491 | # quote we've appended to the end) |
|
491 | # quote we've appended to the end) | |
492 | _quotere: Optional[Pattern[bytes]] = None |
|
492 | _quotere: Optional[Pattern[bytes]] = None | |
493 | _needsshellquote = None |
|
493 | _needsshellquote = None | |
494 |
|
494 | |||
495 |
|
495 | |||
496 | def shellquote(s: bytes) -> bytes: |
|
496 | def shellquote(s: bytes) -> bytes: | |
497 | r""" |
|
497 | r""" | |
498 | >>> shellquote(br'C:\Users\xyz') |
|
498 | >>> shellquote(br'C:\Users\xyz') | |
499 | '"C:\\Users\\xyz"' |
|
499 | '"C:\\Users\\xyz"' | |
500 | >>> shellquote(br'C:\Users\xyz/mixed') |
|
500 | >>> shellquote(br'C:\Users\xyz/mixed') | |
501 | '"C:\\Users\\xyz/mixed"' |
|
501 | '"C:\\Users\\xyz/mixed"' | |
502 | >>> # Would be safe not to quote too, since it is all double backslashes |
|
502 | >>> # Would be safe not to quote too, since it is all double backslashes | |
503 | >>> shellquote(br'C:\\Users\\xyz') |
|
503 | >>> shellquote(br'C:\\Users\\xyz') | |
504 | '"C:\\\\Users\\\\xyz"' |
|
504 | '"C:\\\\Users\\\\xyz"' | |
505 | >>> # But this must be quoted |
|
505 | >>> # But this must be quoted | |
506 | >>> shellquote(br'C:\\Users\\xyz/abc') |
|
506 | >>> shellquote(br'C:\\Users\\xyz/abc') | |
507 | '"C:\\\\Users\\\\xyz/abc"' |
|
507 | '"C:\\\\Users\\\\xyz/abc"' | |
508 | """ |
|
508 | """ | |
509 | global _quotere |
|
509 | global _quotere | |
510 | if _quotere is None: |
|
510 | if _quotere is None: | |
511 | _quotere = re.compile(br'(\\*)("|\\$)') |
|
511 | _quotere = re.compile(br'(\\*)("|\\$)') | |
512 | global _needsshellquote |
|
512 | global _needsshellquote | |
513 | if _needsshellquote is None: |
|
513 | if _needsshellquote is None: | |
514 | # ":" is also treated as "safe character", because it is used as a part |
|
514 | # ":" is also treated as "safe character", because it is used as a part | |
515 | # of path name on Windows. "\" is also part of a path name, but isn't |
|
515 | # of path name on Windows. "\" is also part of a path name, but isn't | |
516 | # safe because shlex.split() (kind of) treats it as an escape char and |
|
516 | # safe because shlex.split() (kind of) treats it as an escape char and | |
517 | # drops it. It will leave the next character, even if it is another |
|
517 | # drops it. It will leave the next character, even if it is another | |
518 | # "\". |
|
518 | # "\". | |
519 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search |
|
519 | _needsshellquote = re.compile(br'[^a-zA-Z0-9._:/-]').search | |
520 | if s and not _needsshellquote(s) and not _quotere.search(s): |
|
520 | if s and not _needsshellquote(s) and not _quotere.search(s): | |
521 | # "s" shouldn't have to be quoted |
|
521 | # "s" shouldn't have to be quoted | |
522 | return s |
|
522 | return s | |
523 | return b'"%s"' % _quotere.sub(br'\1\1\\\2', s) |
|
523 | return b'"%s"' % _quotere.sub(br'\1\1\\\2', s) | |
524 |
|
524 | |||
525 |
|
525 | |||
526 | def _unquote(s: bytes) -> bytes: |
|
526 | def _unquote(s: bytes) -> bytes: | |
527 | if s.startswith(b'"') and s.endswith(b'"'): |
|
527 | if s.startswith(b'"') and s.endswith(b'"'): | |
528 | return s[1:-1] |
|
528 | return s[1:-1] | |
529 | return s |
|
529 | return s | |
530 |
|
530 | |||
531 |
|
531 | |||
532 | def shellsplit(s: bytes) -> List[bytes]: |
|
532 | def shellsplit(s: bytes) -> List[bytes]: | |
533 | """Parse a command string in cmd.exe way (best-effort)""" |
|
533 | """Parse a command string in cmd.exe way (best-effort)""" | |
534 | return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) |
|
534 | return pycompat.maplist(_unquote, pycompat.shlexsplit(s, posix=False)) | |
535 |
|
535 | |||
536 |
|
536 | |||
537 | # if you change this stub into a real check, please try to implement the |
|
537 | # if you change this stub into a real check, please try to implement the | |
538 | # username and groupname functions above, too. |
|
538 | # username and groupname functions above, too. | |
539 | def isowner(st: os.stat_result) -> bool: |
|
539 | def isowner(st: os.stat_result) -> bool: | |
540 | return True |
|
540 | return True | |
541 |
|
541 | |||
542 |
|
542 | |||
543 | def findexe(command: bytes) -> Optional[bytes]: |
|
543 | def findexe(command: bytes) -> Optional[bytes]: | |
544 | """Find executable for command searching like cmd.exe does. |
|
544 | """Find executable for command searching like cmd.exe does. | |
545 | If command is a basename then PATH is searched for command. |
|
545 | If command is a basename then PATH is searched for command. | |
546 | PATH isn't searched if command is an absolute or relative path. |
|
546 | PATH isn't searched if command is an absolute or relative path. | |
547 | An extension from PATHEXT is found and added if not present. |
|
547 | An extension from PATHEXT is found and added if not present. | |
548 | If command isn't found None is returned.""" |
|
548 | If command isn't found None is returned.""" | |
549 | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') |
|
549 | pathext = encoding.environ.get(b'PATHEXT', b'.COM;.EXE;.BAT;.CMD') | |
550 | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] |
|
550 | pathexts = [ext for ext in pathext.lower().split(pycompat.ospathsep)] | |
551 | if os.path.splitext(command)[1].lower() in pathexts: |
|
551 | if os.path.splitext(command)[1].lower() in pathexts: | |
552 | pathexts = [b''] |
|
552 | pathexts = [b''] | |
553 |
|
553 | |||
554 | def findexisting(pathcommand: bytes) -> Optional[bytes]: |
|
554 | def findexisting(pathcommand: bytes) -> Optional[bytes]: | |
555 | """Will append extension (if needed) and return existing file""" |
|
555 | """Will append extension (if needed) and return existing file""" | |
556 | for ext in pathexts: |
|
556 | for ext in pathexts: | |
557 | executable = pathcommand + ext |
|
557 | executable = pathcommand + ext | |
558 | if os.path.exists(executable): |
|
558 | if os.path.exists(executable): | |
559 | return executable |
|
559 | return executable | |
560 | return None |
|
560 | return None | |
561 |
|
561 | |||
562 | if pycompat.ossep in command: |
|
562 | if pycompat.ossep in command: | |
563 | return findexisting(command) |
|
563 | return findexisting(command) | |
564 |
|
564 | |||
565 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): |
|
565 | for path in encoding.environ.get(b'PATH', b'').split(pycompat.ospathsep): | |
566 | executable = findexisting(os.path.join(path, command)) |
|
566 | executable = findexisting(os.path.join(path, command)) | |
567 | if executable is not None: |
|
567 | if executable is not None: | |
568 | return executable |
|
568 | return executable | |
569 | return findexisting(os.path.expanduser(os.path.expandvars(command))) |
|
569 | return findexisting(os.path.expanduser(os.path.expandvars(command))) | |
570 |
|
570 | |||
571 |
|
571 | |||
572 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} |
|
572 | _wantedkinds = {stat.S_IFREG, stat.S_IFLNK} | |
573 |
|
573 | |||
574 |
|
574 | |||
575 | def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]: |
|
575 | def statfiles(files: Sequence[bytes]) -> Iterator[Optional[os.stat_result]]: | |
576 | """Stat each file in files. Yield each stat, or None if a file |
|
576 | """Stat each file in files. Yield each stat, or None if a file | |
577 | does not exist or has a type we don't care about. |
|
577 | does not exist or has a type we don't care about. | |
578 |
|
578 | |||
579 | Cluster and cache stat per directory to minimize number of OS stat calls.""" |
|
579 | Cluster and cache stat per directory to minimize number of OS stat calls.""" | |
580 | dircache = {} # dirname -> filename -> status | None if file does not exist |
|
580 | dircache = {} # dirname -> filename -> status | None if file does not exist | |
581 | getkind = stat.S_IFMT |
|
581 | getkind = stat.S_IFMT | |
582 | for nf in files: |
|
582 | for nf in files: | |
583 | nf = normcase(nf) |
|
583 | nf = normcase(nf) | |
584 | dir, base = os.path.split(nf) |
|
584 | dir, base = os.path.split(nf) | |
585 | if not dir: |
|
585 | if not dir: | |
586 | dir = b'.' |
|
586 | dir = b'.' | |
587 | cache = dircache.get(dir, None) |
|
587 | cache = dircache.get(dir, None) | |
588 | if cache is None: |
|
588 | if cache is None: | |
589 | try: |
|
589 | try: | |
590 | dmap = { |
|
590 | dmap = { | |
591 | normcase(n): s |
|
591 | normcase(n): s | |
592 | for n, k, s in listdir(dir, True) |
|
592 | for n, k, s in listdir(dir, True) | |
593 | if getkind(s.st_mode) in _wantedkinds |
|
593 | if getkind(s.st_mode) in _wantedkinds | |
594 | } |
|
594 | } | |
595 | except (FileNotFoundError, NotADirectoryError): |
|
595 | except (FileNotFoundError, NotADirectoryError): | |
596 | dmap = {} |
|
596 | dmap = {} | |
597 | cache = dircache.setdefault(dir, dmap) |
|
597 | cache = dircache.setdefault(dir, dmap) | |
598 | yield cache.get(base, None) |
|
598 | yield cache.get(base, None) | |
599 |
|
599 | |||
600 |
|
600 | |||
601 | def username(uid: Optional[int] = None) -> Optional[bytes]: |
|
601 | def username(uid: Optional[int] = None) -> Optional[bytes]: | |
602 | """Return the name of the user with the given uid. |
|
602 | """Return the name of the user with the given uid. | |
603 |
|
603 | |||
604 | If uid is None, return the name of the current user.""" |
|
604 | If uid is None, return the name of the current user.""" | |
605 | if not uid: |
|
605 | if not uid: | |
606 | try: |
|
606 | try: | |
607 | return pycompat.fsencode(getpass.getuser()) |
|
607 | return pycompat.fsencode(getpass.getuser()) | |
608 | except ModuleNotFoundError: |
|
608 | except ModuleNotFoundError: | |
609 | # getpass.getuser() checks for a few environment variables first, |
|
609 | # getpass.getuser() checks for a few environment variables first, | |
610 | # but if those aren't set, imports pwd and calls getpwuid(), none of |
|
610 | # but if those aren't set, imports pwd and calls getpwuid(), none of | |
611 | # which exists on Windows. |
|
611 | # which exists on Windows. | |
612 | pass |
|
612 | pass | |
613 | return None |
|
613 | return None | |
614 |
|
614 | |||
615 |
|
615 | |||
616 | def groupname(gid: Optional[int] = None) -> Optional[bytes]: |
|
616 | def groupname(gid: Optional[int] = None) -> Optional[bytes]: | |
617 | """Return the name of the group with the given gid. |
|
617 | """Return the name of the group with the given gid. | |
618 |
|
618 | |||
619 | If gid is None, return the name of the current group.""" |
|
619 | If gid is None, return the name of the current group.""" | |
620 | return None |
|
620 | return None | |
621 |
|
621 | |||
622 |
|
622 | |||
623 | def readlink(pathname: bytes) -> bytes: |
|
623 | def readlink(pathname: bytes) -> bytes: | |
624 | path = pycompat.fsdecode(pathname) |
|
624 | path = pycompat.fsdecode(pathname) | |
625 | try: |
|
625 | try: | |
626 | link = os.readlink(path) |
|
626 | link = os.readlink(path) | |
627 | except ValueError as e: |
|
627 | except ValueError as e: | |
628 | # On py2, os.readlink() raises an AttributeError since it is |
|
628 | # On py2, os.readlink() raises an AttributeError since it is | |
629 | # unsupported. On py3, reading a non-link raises a ValueError. Simply |
|
629 | # unsupported. On py3, reading a non-link raises a ValueError. Simply | |
630 | # treat this as the error the locking code has been expecting up to now |
|
630 | # treat this as the error the locking code has been expecting up to now | |
631 | # until an effort can be made to enable symlink support on Windows. |
|
631 | # until an effort can be made to enable symlink support on Windows. | |
632 | raise AttributeError(e) |
|
632 | raise AttributeError(e) | |
633 | return pycompat.fsencode(link) |
|
633 | return pycompat.fsencode(link) | |
634 |
|
634 | |||
635 |
|
635 | |||
636 | def removedirs(name: bytes) -> None: |
|
636 | def removedirs(name: bytes) -> None: | |
637 | """special version of os.removedirs that does not remove symlinked |
|
637 | """special version of os.removedirs that does not remove symlinked | |
638 | directories or junction points if they actually contain files""" |
|
638 | directories or junction points if they actually contain files""" | |
639 | if listdir(name): |
|
639 | if listdir(name): | |
640 | return |
|
640 | return | |
641 | os.rmdir(name) |
|
641 | os.rmdir(name) | |
642 | head, tail = os.path.split(name) |
|
642 | head, tail = os.path.split(name) | |
643 | if not tail: |
|
643 | if not tail: | |
644 | head, tail = os.path.split(head) |
|
644 | head, tail = os.path.split(head) | |
645 | while head and tail: |
|
645 | while head and tail: | |
646 | try: |
|
646 | try: | |
647 | if listdir(head): |
|
647 | if listdir(head): | |
648 | return |
|
648 | return | |
649 | os.rmdir(head) |
|
649 | os.rmdir(head) | |
650 | except (ValueError, OSError): |
|
650 | except (ValueError, OSError): | |
651 | break |
|
651 | break | |
652 | head, tail = os.path.split(head) |
|
652 | head, tail = os.path.split(head) | |
653 |
|
653 | |||
654 |
|
654 | |||
655 | def rename(src: bytes, dst: bytes) -> None: |
|
655 | def rename(src: bytes, dst: bytes) -> None: | |
656 | '''atomically rename file src to dst, replacing dst if it exists''' |
|
656 | '''atomically rename file src to dst, replacing dst if it exists''' | |
657 | try: |
|
657 | try: | |
658 | os.rename(src, dst) |
|
658 | os.rename(src, dst) | |
659 | except FileExistsError: |
|
659 | except FileExistsError: | |
660 | unlink(dst) |
|
660 | unlink(dst) | |
661 | os.rename(src, dst) |
|
661 | os.rename(src, dst) | |
662 |
|
662 | |||
663 |
|
663 | |||
664 | def gethgcmd() -> List[bytes]: |
|
664 | def gethgcmd() -> List[bytes]: | |
665 | return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]] |
|
665 | return [encoding.strtolocal(arg) for arg in [sys.executable] + sys.argv[:1]] | |
666 |
|
666 | |||
667 |
|
667 | |||
668 | def groupmembers(name: bytes) -> List[bytes]: |
|
668 | def groupmembers(name: bytes) -> List[bytes]: | |
669 | # Don't support groups on Windows for now |
|
669 | # Don't support groups on Windows for now | |
670 | raise KeyError |
|
670 | raise KeyError | |
671 |
|
671 | |||
672 |
|
672 | |||
673 | def isexec(f: bytes) -> bool: |
|
673 | def isexec(f: bytes) -> bool: | |
674 | return False |
|
674 | return False | |
675 |
|
675 | |||
676 |
|
676 | |||
677 | class cachestat: |
|
677 | class cachestat: | |
|
678 | stat: Optional[os.stat_result] | |||
|
679 | ||||
678 | def __init__(self, path: bytes) -> None: |
|
680 | def __init__(self, path: bytes) -> None: | |
679 | pass |
|
681 | self.stat = None | |
680 |
|
682 | |||
681 | def cacheable(self) -> bool: |
|
683 | def cacheable(self) -> bool: | |
682 | return False |
|
684 | return False | |
683 |
|
685 | |||
684 |
|
686 | |||
685 | def lookupreg( |
|
687 | def lookupreg( | |
686 | key: bytes, |
|
688 | key: bytes, | |
687 | valname: Optional[bytes] = None, |
|
689 | valname: Optional[bytes] = None, | |
688 | scope: Optional[Union[int, Iterable[int]]] = None, |
|
690 | scope: Optional[Union[int, Iterable[int]]] = None, | |
689 | ) -> Optional[bytes]: |
|
691 | ) -> Optional[bytes]: | |
690 | """Look up a key/value name in the Windows registry. |
|
692 | """Look up a key/value name in the Windows registry. | |
691 |
|
693 | |||
692 | valname: value name. If unspecified, the default value for the key |
|
694 | valname: value name. If unspecified, the default value for the key | |
693 | is used. |
|
695 | is used. | |
694 | scope: optionally specify scope for registry lookup, this can be |
|
696 | scope: optionally specify scope for registry lookup, this can be | |
695 | a sequence of scopes to look up in order. Default (CURRENT_USER, |
|
697 | a sequence of scopes to look up in order. Default (CURRENT_USER, | |
696 | LOCAL_MACHINE). |
|
698 | LOCAL_MACHINE). | |
697 | """ |
|
699 | """ | |
698 | if scope is None: |
|
700 | if scope is None: | |
699 | # pytype: disable=module-attr |
|
701 | # pytype: disable=module-attr | |
700 | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) |
|
702 | scope = (winreg.HKEY_CURRENT_USER, winreg.HKEY_LOCAL_MACHINE) | |
701 | # pytype: enable=module-attr |
|
703 | # pytype: enable=module-attr | |
702 | elif not isinstance(scope, (list, tuple)): |
|
704 | elif not isinstance(scope, (list, tuple)): | |
703 | scope = (scope,) |
|
705 | scope = (scope,) | |
704 | for s in scope: |
|
706 | for s in scope: | |
705 | try: |
|
707 | try: | |
706 | # pytype: disable=module-attr |
|
708 | # pytype: disable=module-attr | |
707 | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: |
|
709 | with winreg.OpenKey(s, encoding.strfromlocal(key)) as hkey: | |
708 | # pytype: enable=module-attr |
|
710 | # pytype: enable=module-attr | |
709 | name = None |
|
711 | name = None | |
710 | if valname is not None: |
|
712 | if valname is not None: | |
711 | name = encoding.strfromlocal(valname) |
|
713 | name = encoding.strfromlocal(valname) | |
712 | # pytype: disable=module-attr |
|
714 | # pytype: disable=module-attr | |
713 | val = winreg.QueryValueEx(hkey, name)[0] |
|
715 | val = winreg.QueryValueEx(hkey, name)[0] | |
714 | # pytype: enable=module-attr |
|
716 | # pytype: enable=module-attr | |
715 |
|
717 | |||
716 | # never let a Unicode string escape into the wild |
|
718 | # never let a Unicode string escape into the wild | |
717 | return encoding.unitolocal(val) |
|
719 | return encoding.unitolocal(val) | |
718 | except EnvironmentError: |
|
720 | except EnvironmentError: | |
719 | pass |
|
721 | pass | |
720 |
|
722 | |||
721 |
|
723 | |||
722 | expandglobs: bool = True |
|
724 | expandglobs: bool = True | |
723 |
|
725 | |||
724 |
|
726 | |||
725 | def statislink(st: Optional[os.stat_result]) -> bool: |
|
727 | def statislink(st: Optional[os.stat_result]) -> bool: | |
726 | '''check whether a stat result is a symlink''' |
|
728 | '''check whether a stat result is a symlink''' | |
727 | return False |
|
729 | return False | |
728 |
|
730 | |||
729 |
|
731 | |||
730 | def statisexec(st: Optional[os.stat_result]) -> bool: |
|
732 | def statisexec(st: Optional[os.stat_result]) -> bool: | |
731 | '''check whether a stat result is an executable file''' |
|
733 | '''check whether a stat result is an executable file''' | |
732 | return False |
|
734 | return False | |
733 |
|
735 | |||
734 |
|
736 | |||
735 | def poll(fds) -> List: |
|
737 | def poll(fds) -> List: | |
736 | # see posix.py for description |
|
738 | # see posix.py for description | |
737 | raise NotImplementedError() |
|
739 | raise NotImplementedError() | |
738 |
|
740 | |||
739 |
|
741 | |||
740 | def readpipe(pipe) -> bytes: |
|
742 | def readpipe(pipe) -> bytes: | |
741 | """Read all available data from a pipe.""" |
|
743 | """Read all available data from a pipe.""" | |
742 | chunks = [] |
|
744 | chunks = [] | |
743 | while True: |
|
745 | while True: | |
744 | size = win32.peekpipe(pipe) |
|
746 | size = win32.peekpipe(pipe) | |
745 | if not size: |
|
747 | if not size: | |
746 | break |
|
748 | break | |
747 |
|
749 | |||
748 | s = pipe.read(size) |
|
750 | s = pipe.read(size) | |
749 | if not s: |
|
751 | if not s: | |
750 | break |
|
752 | break | |
751 | chunks.append(s) |
|
753 | chunks.append(s) | |
752 |
|
754 | |||
753 | return b''.join(chunks) |
|
755 | return b''.join(chunks) | |
754 |
|
756 | |||
755 |
|
757 | |||
756 | def bindunixsocket(sock, path: bytes) -> NoReturn: |
|
758 | def bindunixsocket(sock, path: bytes) -> NoReturn: | |
757 | raise NotImplementedError('unsupported platform') |
|
759 | raise NotImplementedError('unsupported platform') |
General Comments 0
You need to be logged in to leave comments.
Login now