##// END OF EJS Templates
dirstate: add a `set_untracked` method for "hg remove"-like usage...
marmoute -
r48399:cce51119 default
parent child Browse files
Show More
@@ -1,787 +1,790
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
168 def normal(self, f):
171 def normal(self, f):
169 return super(largefilesdirstate, self).normal(unixpath(f))
172 return super(largefilesdirstate, self).normal(unixpath(f))
170
173
171 def remove(self, f):
174 def remove(self, f):
172 return super(largefilesdirstate, self).remove(unixpath(f))
175 return super(largefilesdirstate, self).remove(unixpath(f))
173
176
174 def add(self, f):
177 def add(self, f):
175 return super(largefilesdirstate, self).add(unixpath(f))
178 return super(largefilesdirstate, self).add(unixpath(f))
176
179
177 def drop(self, f):
180 def drop(self, f):
178 return super(largefilesdirstate, self).drop(unixpath(f))
181 return super(largefilesdirstate, self).drop(unixpath(f))
179
182
180 def forget(self, f):
183 def forget(self, f):
181 return super(largefilesdirstate, self).forget(unixpath(f))
184 return super(largefilesdirstate, self).forget(unixpath(f))
182
185
183 def normallookup(self, f):
186 def normallookup(self, f):
184 return super(largefilesdirstate, self).normallookup(unixpath(f))
187 return super(largefilesdirstate, self).normallookup(unixpath(f))
185
188
186 def _ignore(self, f):
189 def _ignore(self, f):
187 return False
190 return False
188
191
189 def write(self, tr=False):
192 def write(self, tr=False):
190 # (1) disable PENDING mode always
193 # (1) disable PENDING mode always
191 # (lfdirstate isn't yet managed as a part of the transaction)
194 # (lfdirstate isn't yet managed as a part of the transaction)
192 # (2) avoid develwarn 'use dirstate.write with ....'
195 # (2) avoid develwarn 'use dirstate.write with ....'
193 super(largefilesdirstate, self).write(None)
196 super(largefilesdirstate, self).write(None)
194
197
195
198
196 def openlfdirstate(ui, repo, create=True):
199 def openlfdirstate(ui, repo, create=True):
197 """
200 """
198 Return a dirstate object that tracks largefiles: i.e. its root is
201 Return a dirstate object that tracks largefiles: i.e. its root is
199 the repo root, but it is saved in .hg/largefiles/dirstate.
202 the repo root, but it is saved in .hg/largefiles/dirstate.
200 """
203 """
201 vfs = repo.vfs
204 vfs = repo.vfs
202 lfstoredir = longname
205 lfstoredir = longname
203 opener = vfsmod.vfs(vfs.join(lfstoredir))
206 opener = vfsmod.vfs(vfs.join(lfstoredir))
204 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
207 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
205 lfdirstate = largefilesdirstate(
208 lfdirstate = largefilesdirstate(
206 opener,
209 opener,
207 ui,
210 ui,
208 repo.root,
211 repo.root,
209 repo.dirstate._validate,
212 repo.dirstate._validate,
210 lambda: sparse.matcher(repo),
213 lambda: sparse.matcher(repo),
211 repo.nodeconstants,
214 repo.nodeconstants,
212 use_dirstate_v2,
215 use_dirstate_v2,
213 )
216 )
214
217
215 # If the largefiles dirstate does not exist, populate and create
218 # If the largefiles dirstate does not exist, populate and create
216 # it. This ensures that we create it on the first meaningful
219 # it. This ensures that we create it on the first meaningful
217 # largefiles operation in a new clone.
220 # largefiles operation in a new clone.
218 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
221 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
219 matcher = getstandinmatcher(repo)
222 matcher = getstandinmatcher(repo)
220 standins = repo.dirstate.walk(
223 standins = repo.dirstate.walk(
221 matcher, subrepos=[], unknown=False, ignored=False
224 matcher, subrepos=[], unknown=False, ignored=False
222 )
225 )
223
226
224 if len(standins) > 0:
227 if len(standins) > 0:
225 vfs.makedirs(lfstoredir)
228 vfs.makedirs(lfstoredir)
226
229
227 for standin in standins:
230 for standin in standins:
228 lfile = splitstandin(standin)
231 lfile = splitstandin(standin)
229 lfdirstate.normallookup(lfile)
232 lfdirstate.normallookup(lfile)
230 return lfdirstate
233 return lfdirstate
231
234
232
235
233 def lfdirstatestatus(lfdirstate, repo):
236 def lfdirstatestatus(lfdirstate, repo):
234 pctx = repo[b'.']
237 pctx = repo[b'.']
235 match = matchmod.always()
238 match = matchmod.always()
236 unsure, s = lfdirstate.status(
239 unsure, s = lfdirstate.status(
237 match, subrepos=[], ignored=False, clean=False, unknown=False
240 match, subrepos=[], ignored=False, clean=False, unknown=False
238 )
241 )
239 modified, clean = s.modified, s.clean
242 modified, clean = s.modified, s.clean
240 for lfile in unsure:
243 for lfile in unsure:
241 try:
244 try:
242 fctx = pctx[standin(lfile)]
245 fctx = pctx[standin(lfile)]
243 except LookupError:
246 except LookupError:
244 fctx = None
247 fctx = None
245 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
248 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
246 modified.append(lfile)
249 modified.append(lfile)
247 else:
250 else:
248 clean.append(lfile)
251 clean.append(lfile)
249 lfdirstate.normal(lfile)
252 lfdirstate.normal(lfile)
250 return s
253 return s
251
254
252
255
253 def listlfiles(repo, rev=None, matcher=None):
256 def listlfiles(repo, rev=None, matcher=None):
254 """return a list of largefiles in the working copy or the
257 """return a list of largefiles in the working copy or the
255 specified changeset"""
258 specified changeset"""
256
259
257 if matcher is None:
260 if matcher is None:
258 matcher = getstandinmatcher(repo)
261 matcher = getstandinmatcher(repo)
259
262
260 # ignore unknown files in working directory
263 # ignore unknown files in working directory
261 return [
264 return [
262 splitstandin(f)
265 splitstandin(f)
263 for f in repo[rev].walk(matcher)
266 for f in repo[rev].walk(matcher)
264 if rev is not None or repo.dirstate[f] != b'?'
267 if rev is not None or repo.dirstate[f] != b'?'
265 ]
268 ]
266
269
267
270
268 def instore(repo, hash, forcelocal=False):
271 def instore(repo, hash, forcelocal=False):
269 '''Return true if a largefile with the given hash exists in the store'''
272 '''Return true if a largefile with the given hash exists in the store'''
270 return os.path.exists(storepath(repo, hash, forcelocal))
273 return os.path.exists(storepath(repo, hash, forcelocal))
271
274
272
275
273 def storepath(repo, hash, forcelocal=False):
276 def storepath(repo, hash, forcelocal=False):
274 """Return the correct location in the repository largefiles store for a
277 """Return the correct location in the repository largefiles store for a
275 file with the given hash."""
278 file with the given hash."""
276 if not forcelocal and repo.shared():
279 if not forcelocal and repo.shared():
277 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
280 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
278 return repo.vfs.join(longname, hash)
281 return repo.vfs.join(longname, hash)
279
282
280
283
281 def findstorepath(repo, hash):
284 def findstorepath(repo, hash):
282 """Search through the local store path(s) to find the file for the given
285 """Search through the local store path(s) to find the file for the given
283 hash. If the file is not found, its path in the primary store is returned.
286 hash. If the file is not found, its path in the primary store is returned.
284 The return value is a tuple of (path, exists(path)).
287 The return value is a tuple of (path, exists(path)).
285 """
288 """
286 # For shared repos, the primary store is in the share source. But for
289 # For shared repos, the primary store is in the share source. But for
287 # backward compatibility, force a lookup in the local store if it wasn't
290 # backward compatibility, force a lookup in the local store if it wasn't
288 # found in the share source.
291 # found in the share source.
289 path = storepath(repo, hash, False)
292 path = storepath(repo, hash, False)
290
293
291 if instore(repo, hash):
294 if instore(repo, hash):
292 return (path, True)
295 return (path, True)
293 elif repo.shared() and instore(repo, hash, True):
296 elif repo.shared() and instore(repo, hash, True):
294 return storepath(repo, hash, True), True
297 return storepath(repo, hash, True), True
295
298
296 return (path, False)
299 return (path, False)
297
300
298
301
299 def copyfromcache(repo, hash, filename):
302 def copyfromcache(repo, hash, filename):
300 """Copy the specified largefile from the repo or system cache to
303 """Copy the specified largefile from the repo or system cache to
301 filename in the repository. Return true on success or false if the
304 filename in the repository. Return true on success or false if the
302 file was not found in either cache (which should not happened:
305 file was not found in either cache (which should not happened:
303 this is meant to be called only after ensuring that the needed
306 this is meant to be called only after ensuring that the needed
304 largefile exists in the cache)."""
307 largefile exists in the cache)."""
305 wvfs = repo.wvfs
308 wvfs = repo.wvfs
306 path = findfile(repo, hash)
309 path = findfile(repo, hash)
307 if path is None:
310 if path is None:
308 return False
311 return False
309 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
312 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
310 # The write may fail before the file is fully written, but we
313 # The write may fail before the file is fully written, but we
311 # don't use atomic writes in the working copy.
314 # don't use atomic writes in the working copy.
312 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
315 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
313 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
316 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
314 if gothash != hash:
317 if gothash != hash:
315 repo.ui.warn(
318 repo.ui.warn(
316 _(b'%s: data corruption in %s with hash %s\n')
319 _(b'%s: data corruption in %s with hash %s\n')
317 % (filename, path, gothash)
320 % (filename, path, gothash)
318 )
321 )
319 wvfs.unlink(filename)
322 wvfs.unlink(filename)
320 return False
323 return False
321 return True
324 return True
322
325
323
326
324 def copytostore(repo, ctx, file, fstandin):
327 def copytostore(repo, ctx, file, fstandin):
325 wvfs = repo.wvfs
328 wvfs = repo.wvfs
326 hash = readasstandin(ctx[fstandin])
329 hash = readasstandin(ctx[fstandin])
327 if instore(repo, hash):
330 if instore(repo, hash):
328 return
331 return
329 if wvfs.exists(file):
332 if wvfs.exists(file):
330 copytostoreabsolute(repo, wvfs.join(file), hash)
333 copytostoreabsolute(repo, wvfs.join(file), hash)
331 else:
334 else:
332 repo.ui.warn(
335 repo.ui.warn(
333 _(b"%s: largefile %s not available from local store\n")
336 _(b"%s: largefile %s not available from local store\n")
334 % (file, hash)
337 % (file, hash)
335 )
338 )
336
339
337
340
338 def copyalltostore(repo, node):
341 def copyalltostore(repo, node):
339 '''Copy all largefiles in a given revision to the store'''
342 '''Copy all largefiles in a given revision to the store'''
340
343
341 ctx = repo[node]
344 ctx = repo[node]
342 for filename in ctx.files():
345 for filename in ctx.files():
343 realfile = splitstandin(filename)
346 realfile = splitstandin(filename)
344 if realfile is not None and filename in ctx.manifest():
347 if realfile is not None and filename in ctx.manifest():
345 copytostore(repo, ctx, realfile, filename)
348 copytostore(repo, ctx, realfile, filename)
346
349
347
350
348 def copytostoreabsolute(repo, file, hash):
351 def copytostoreabsolute(repo, file, hash):
349 if inusercache(repo.ui, hash):
352 if inusercache(repo.ui, hash):
350 link(usercachepath(repo.ui, hash), storepath(repo, hash))
353 link(usercachepath(repo.ui, hash), storepath(repo, hash))
351 else:
354 else:
352 util.makedirs(os.path.dirname(storepath(repo, hash)))
355 util.makedirs(os.path.dirname(storepath(repo, hash)))
353 with open(file, b'rb') as srcf:
356 with open(file, b'rb') as srcf:
354 with util.atomictempfile(
357 with util.atomictempfile(
355 storepath(repo, hash), createmode=repo.store.createmode
358 storepath(repo, hash), createmode=repo.store.createmode
356 ) as dstf:
359 ) as dstf:
357 for chunk in util.filechunkiter(srcf):
360 for chunk in util.filechunkiter(srcf):
358 dstf.write(chunk)
361 dstf.write(chunk)
359 linktousercache(repo, hash)
362 linktousercache(repo, hash)
360
363
361
364
362 def linktousercache(repo, hash):
365 def linktousercache(repo, hash):
363 """Link / copy the largefile with the specified hash from the store
366 """Link / copy the largefile with the specified hash from the store
364 to the cache."""
367 to the cache."""
365 path = usercachepath(repo.ui, hash)
368 path = usercachepath(repo.ui, hash)
366 link(storepath(repo, hash), path)
369 link(storepath(repo, hash), path)
367
370
368
371
369 def getstandinmatcher(repo, rmatcher=None):
372 def getstandinmatcher(repo, rmatcher=None):
370 '''Return a match object that applies rmatcher to the standin directory'''
373 '''Return a match object that applies rmatcher to the standin directory'''
371 wvfs = repo.wvfs
374 wvfs = repo.wvfs
372 standindir = shortname
375 standindir = shortname
373
376
374 # no warnings about missing files or directories
377 # no warnings about missing files or directories
375 badfn = lambda f, msg: None
378 badfn = lambda f, msg: None
376
379
377 if rmatcher and not rmatcher.always():
380 if rmatcher and not rmatcher.always():
378 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
381 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
379 if not pats:
382 if not pats:
380 pats = [wvfs.join(standindir)]
383 pats = [wvfs.join(standindir)]
381 match = scmutil.match(repo[None], pats, badfn=badfn)
384 match = scmutil.match(repo[None], pats, badfn=badfn)
382 else:
385 else:
383 # no patterns: relative to repo root
386 # no patterns: relative to repo root
384 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
387 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
385 return match
388 return match
386
389
387
390
388 def composestandinmatcher(repo, rmatcher):
391 def composestandinmatcher(repo, rmatcher):
389 """Return a matcher that accepts standins corresponding to the
392 """Return a matcher that accepts standins corresponding to the
390 files accepted by rmatcher. Pass the list of files in the matcher
393 files accepted by rmatcher. Pass the list of files in the matcher
391 as the paths specified by the user."""
394 as the paths specified by the user."""
392 smatcher = getstandinmatcher(repo, rmatcher)
395 smatcher = getstandinmatcher(repo, rmatcher)
393 isstandin = smatcher.matchfn
396 isstandin = smatcher.matchfn
394
397
395 def composedmatchfn(f):
398 def composedmatchfn(f):
396 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
399 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
397
400
398 smatcher.matchfn = composedmatchfn
401 smatcher.matchfn = composedmatchfn
399
402
400 return smatcher
403 return smatcher
401
404
402
405
403 def standin(filename):
406 def standin(filename):
404 """Return the repo-relative path to the standin for the specified big
407 """Return the repo-relative path to the standin for the specified big
405 file."""
408 file."""
406 # Notes:
409 # Notes:
407 # 1) Some callers want an absolute path, but for instance addlargefiles
410 # 1) Some callers want an absolute path, but for instance addlargefiles
408 # needs it repo-relative so it can be passed to repo[None].add(). So
411 # needs it repo-relative so it can be passed to repo[None].add(). So
409 # leave it up to the caller to use repo.wjoin() to get an absolute path.
412 # leave it up to the caller to use repo.wjoin() to get an absolute path.
410 # 2) Join with '/' because that's what dirstate always uses, even on
413 # 2) Join with '/' because that's what dirstate always uses, even on
411 # Windows. Change existing separator to '/' first in case we are
414 # Windows. Change existing separator to '/' first in case we are
412 # passed filenames from an external source (like the command line).
415 # passed filenames from an external source (like the command line).
413 return shortnameslash + util.pconvert(filename)
416 return shortnameslash + util.pconvert(filename)
414
417
415
418
416 def isstandin(filename):
419 def isstandin(filename):
417 """Return true if filename is a big file standin. filename must be
420 """Return true if filename is a big file standin. filename must be
418 in Mercurial's internal form (slash-separated)."""
421 in Mercurial's internal form (slash-separated)."""
419 return filename.startswith(shortnameslash)
422 return filename.startswith(shortnameslash)
420
423
421
424
422 def splitstandin(filename):
425 def splitstandin(filename):
423 # Split on / because that's what dirstate always uses, even on Windows.
426 # Split on / because that's what dirstate always uses, even on Windows.
424 # Change local separator to / first just in case we are passed filenames
427 # Change local separator to / first just in case we are passed filenames
425 # from an external source (like the command line).
428 # from an external source (like the command line).
426 bits = util.pconvert(filename).split(b'/', 1)
429 bits = util.pconvert(filename).split(b'/', 1)
427 if len(bits) == 2 and bits[0] == shortname:
430 if len(bits) == 2 and bits[0] == shortname:
428 return bits[1]
431 return bits[1]
429 else:
432 else:
430 return None
433 return None
431
434
432
435
433 def updatestandin(repo, lfile, standin):
436 def updatestandin(repo, lfile, standin):
434 """Re-calculate hash value of lfile and write it into standin
437 """Re-calculate hash value of lfile and write it into standin
435
438
436 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
439 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
437 """
440 """
438 file = repo.wjoin(lfile)
441 file = repo.wjoin(lfile)
439 if repo.wvfs.exists(lfile):
442 if repo.wvfs.exists(lfile):
440 hash = hashfile(file)
443 hash = hashfile(file)
441 executable = getexecutable(file)
444 executable = getexecutable(file)
442 writestandin(repo, standin, hash, executable)
445 writestandin(repo, standin, hash, executable)
443 else:
446 else:
444 raise error.Abort(_(b'%s: file not found!') % lfile)
447 raise error.Abort(_(b'%s: file not found!') % lfile)
445
448
446
449
447 def readasstandin(fctx):
450 def readasstandin(fctx):
448 """read hex hash from given filectx of standin file
451 """read hex hash from given filectx of standin file
449
452
450 This encapsulates how "standin" data is stored into storage layer."""
453 This encapsulates how "standin" data is stored into storage layer."""
451 return fctx.data().strip()
454 return fctx.data().strip()
452
455
453
456
454 def writestandin(repo, standin, hash, executable):
457 def writestandin(repo, standin, hash, executable):
455 '''write hash to <repo.root>/<standin>'''
458 '''write hash to <repo.root>/<standin>'''
456 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
459 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
457
460
458
461
459 def copyandhash(instream, outfile):
462 def copyandhash(instream, outfile):
460 """Read bytes from instream (iterable) and write them to outfile,
463 """Read bytes from instream (iterable) and write them to outfile,
461 computing the SHA-1 hash of the data along the way. Return the hash."""
464 computing the SHA-1 hash of the data along the way. Return the hash."""
462 hasher = hashutil.sha1(b'')
465 hasher = hashutil.sha1(b'')
463 for data in instream:
466 for data in instream:
464 hasher.update(data)
467 hasher.update(data)
465 outfile.write(data)
468 outfile.write(data)
466 return hex(hasher.digest())
469 return hex(hasher.digest())
467
470
468
471
469 def hashfile(file):
472 def hashfile(file):
470 if not os.path.exists(file):
473 if not os.path.exists(file):
471 return b''
474 return b''
472 with open(file, b'rb') as fd:
475 with open(file, b'rb') as fd:
473 return hexsha1(fd)
476 return hexsha1(fd)
474
477
475
478
476 def getexecutable(filename):
479 def getexecutable(filename):
477 mode = os.stat(filename).st_mode
480 mode = os.stat(filename).st_mode
478 return (
481 return (
479 (mode & stat.S_IXUSR)
482 (mode & stat.S_IXUSR)
480 and (mode & stat.S_IXGRP)
483 and (mode & stat.S_IXGRP)
481 and (mode & stat.S_IXOTH)
484 and (mode & stat.S_IXOTH)
482 )
485 )
483
486
484
487
485 def urljoin(first, second, *arg):
488 def urljoin(first, second, *arg):
486 def join(left, right):
489 def join(left, right):
487 if not left.endswith(b'/'):
490 if not left.endswith(b'/'):
488 left += b'/'
491 left += b'/'
489 if right.startswith(b'/'):
492 if right.startswith(b'/'):
490 right = right[1:]
493 right = right[1:]
491 return left + right
494 return left + right
492
495
493 url = join(first, second)
496 url = join(first, second)
494 for a in arg:
497 for a in arg:
495 url = join(url, a)
498 url = join(url, a)
496 return url
499 return url
497
500
498
501
499 def hexsha1(fileobj):
502 def hexsha1(fileobj):
500 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
503 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
501 object data"""
504 object data"""
502 h = hashutil.sha1()
505 h = hashutil.sha1()
503 for chunk in util.filechunkiter(fileobj):
506 for chunk in util.filechunkiter(fileobj):
504 h.update(chunk)
507 h.update(chunk)
505 return hex(h.digest())
508 return hex(h.digest())
506
509
507
510
508 def httpsendfile(ui, filename):
511 def httpsendfile(ui, filename):
509 return httpconnection.httpsendfile(ui, filename, b'rb')
512 return httpconnection.httpsendfile(ui, filename, b'rb')
510
513
511
514
512 def unixpath(path):
515 def unixpath(path):
513 '''Return a version of path normalized for use with the lfdirstate.'''
516 '''Return a version of path normalized for use with the lfdirstate.'''
514 return util.pconvert(os.path.normpath(path))
517 return util.pconvert(os.path.normpath(path))
515
518
516
519
517 def islfilesrepo(repo):
520 def islfilesrepo(repo):
518 '''Return true if the repo is a largefile repo.'''
521 '''Return true if the repo is a largefile repo.'''
519 if b'largefiles' in repo.requirements and any(
522 if b'largefiles' in repo.requirements and any(
520 shortnameslash in f[1] for f in repo.store.datafiles()
523 shortnameslash in f[1] for f in repo.store.datafiles()
521 ):
524 ):
522 return True
525 return True
523
526
524 return any(openlfdirstate(repo.ui, repo, False))
527 return any(openlfdirstate(repo.ui, repo, False))
525
528
526
529
527 class storeprotonotcapable(Exception):
530 class storeprotonotcapable(Exception):
528 def __init__(self, storetypes):
531 def __init__(self, storetypes):
529 self.storetypes = storetypes
532 self.storetypes = storetypes
530
533
531
534
532 def getstandinsstate(repo):
535 def getstandinsstate(repo):
533 standins = []
536 standins = []
534 matcher = getstandinmatcher(repo)
537 matcher = getstandinmatcher(repo)
535 wctx = repo[None]
538 wctx = repo[None]
536 for standin in repo.dirstate.walk(
539 for standin in repo.dirstate.walk(
537 matcher, subrepos=[], unknown=False, ignored=False
540 matcher, subrepos=[], unknown=False, ignored=False
538 ):
541 ):
539 lfile = splitstandin(standin)
542 lfile = splitstandin(standin)
540 try:
543 try:
541 hash = readasstandin(wctx[standin])
544 hash = readasstandin(wctx[standin])
542 except IOError:
545 except IOError:
543 hash = None
546 hash = None
544 standins.append((lfile, hash))
547 standins.append((lfile, hash))
545 return standins
548 return standins
546
549
547
550
548 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
551 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
549 lfstandin = standin(lfile)
552 lfstandin = standin(lfile)
550 if lfstandin in repo.dirstate:
553 if lfstandin in repo.dirstate:
551 stat = repo.dirstate._map[lfstandin]
554 stat = repo.dirstate._map[lfstandin]
552 state, mtime = stat.state, stat.mtime
555 state, mtime = stat.state, stat.mtime
553 else:
556 else:
554 state, mtime = b'?', -1
557 state, mtime = b'?', -1
555 if state == b'n':
558 if state == b'n':
556 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
559 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
557 # state 'n' doesn't ensure 'clean' in this case
560 # state 'n' doesn't ensure 'clean' in this case
558 lfdirstate.normallookup(lfile)
561 lfdirstate.normallookup(lfile)
559 else:
562 else:
560 lfdirstate.normal(lfile)
563 lfdirstate.normal(lfile)
561 elif state == b'm':
564 elif state == b'm':
562 lfdirstate.normallookup(lfile)
565 lfdirstate.normallookup(lfile)
563 elif state == b'r':
566 elif state == b'r':
564 lfdirstate.remove(lfile)
567 lfdirstate.remove(lfile)
565 elif state == b'a':
568 elif state == b'a':
566 lfdirstate.add(lfile)
569 lfdirstate.add(lfile)
567 elif state == b'?':
570 elif state == b'?':
568 lfdirstate.drop(lfile)
571 lfdirstate.drop(lfile)
569
572
570
573
571 def markcommitted(orig, ctx, node):
574 def markcommitted(orig, ctx, node):
572 repo = ctx.repo()
575 repo = ctx.repo()
573
576
574 orig(node)
577 orig(node)
575
578
576 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
579 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
577 # because files coming from the 2nd parent are omitted in the latter.
580 # because files coming from the 2nd parent are omitted in the latter.
578 #
581 #
579 # The former should be used to get targets of "synclfdirstate",
582 # The former should be used to get targets of "synclfdirstate",
580 # because such files:
583 # because such files:
581 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
584 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
582 # - have to be marked as "n" after commit, but
585 # - have to be marked as "n" after commit, but
583 # - aren't listed in "repo[node].files()"
586 # - aren't listed in "repo[node].files()"
584
587
585 lfdirstate = openlfdirstate(repo.ui, repo)
588 lfdirstate = openlfdirstate(repo.ui, repo)
586 for f in ctx.files():
589 for f in ctx.files():
587 lfile = splitstandin(f)
590 lfile = splitstandin(f)
588 if lfile is not None:
591 if lfile is not None:
589 synclfdirstate(repo, lfdirstate, lfile, False)
592 synclfdirstate(repo, lfdirstate, lfile, False)
590 lfdirstate.write()
593 lfdirstate.write()
591
594
592 # As part of committing, copy all of the largefiles into the cache.
595 # As part of committing, copy all of the largefiles into the cache.
593 #
596 #
594 # Using "node" instead of "ctx" implies additional "repo[node]"
597 # Using "node" instead of "ctx" implies additional "repo[node]"
595 # lookup while copyalltostore(), but can omit redundant check for
598 # lookup while copyalltostore(), but can omit redundant check for
596 # files comming from the 2nd parent, which should exist in store
599 # files comming from the 2nd parent, which should exist in store
597 # at merging.
600 # at merging.
598 copyalltostore(repo, node)
601 copyalltostore(repo, node)
599
602
600
603
601 def getlfilestoupdate(oldstandins, newstandins):
604 def getlfilestoupdate(oldstandins, newstandins):
602 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
605 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
603 filelist = []
606 filelist = []
604 for f in changedstandins:
607 for f in changedstandins:
605 if f[0] not in filelist:
608 if f[0] not in filelist:
606 filelist.append(f[0])
609 filelist.append(f[0])
607 return filelist
610 return filelist
608
611
609
612
610 def getlfilestoupload(repo, missing, addfunc):
613 def getlfilestoupload(repo, missing, addfunc):
611 makeprogress = repo.ui.makeprogress
614 makeprogress = repo.ui.makeprogress
612 with makeprogress(
615 with makeprogress(
613 _(b'finding outgoing largefiles'),
616 _(b'finding outgoing largefiles'),
614 unit=_(b'revisions'),
617 unit=_(b'revisions'),
615 total=len(missing),
618 total=len(missing),
616 ) as progress:
619 ) as progress:
617 for i, n in enumerate(missing):
620 for i, n in enumerate(missing):
618 progress.update(i)
621 progress.update(i)
619 parents = [p for p in repo[n].parents() if p != repo.nullid]
622 parents = [p for p in repo[n].parents() if p != repo.nullid]
620
623
621 with lfstatus(repo, value=False):
624 with lfstatus(repo, value=False):
622 ctx = repo[n]
625 ctx = repo[n]
623
626
624 files = set(ctx.files())
627 files = set(ctx.files())
625 if len(parents) == 2:
628 if len(parents) == 2:
626 mc = ctx.manifest()
629 mc = ctx.manifest()
627 mp1 = ctx.p1().manifest()
630 mp1 = ctx.p1().manifest()
628 mp2 = ctx.p2().manifest()
631 mp2 = ctx.p2().manifest()
629 for f in mp1:
632 for f in mp1:
630 if f not in mc:
633 if f not in mc:
631 files.add(f)
634 files.add(f)
632 for f in mp2:
635 for f in mp2:
633 if f not in mc:
636 if f not in mc:
634 files.add(f)
637 files.add(f)
635 for f in mc:
638 for f in mc:
636 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
639 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
637 files.add(f)
640 files.add(f)
638 for fn in files:
641 for fn in files:
639 if isstandin(fn) and fn in ctx:
642 if isstandin(fn) and fn in ctx:
640 addfunc(fn, readasstandin(ctx[fn]))
643 addfunc(fn, readasstandin(ctx[fn]))
641
644
642
645
643 def updatestandinsbymatch(repo, match):
646 def updatestandinsbymatch(repo, match):
644 """Update standins in the working directory according to specified match
647 """Update standins in the working directory according to specified match
645
648
646 This returns (possibly modified) ``match`` object to be used for
649 This returns (possibly modified) ``match`` object to be used for
647 subsequent commit process.
650 subsequent commit process.
648 """
651 """
649
652
650 ui = repo.ui
653 ui = repo.ui
651
654
652 # Case 1: user calls commit with no specific files or
655 # Case 1: user calls commit with no specific files or
653 # include/exclude patterns: refresh and commit all files that
656 # include/exclude patterns: refresh and commit all files that
654 # are "dirty".
657 # are "dirty".
655 if match is None or match.always():
658 if match is None or match.always():
656 # Spend a bit of time here to get a list of files we know
659 # Spend a bit of time here to get a list of files we know
657 # are modified so we can compare only against those.
660 # are modified so we can compare only against those.
658 # It can cost a lot of time (several seconds)
661 # It can cost a lot of time (several seconds)
659 # otherwise to update all standins if the largefiles are
662 # otherwise to update all standins if the largefiles are
660 # large.
663 # large.
661 lfdirstate = openlfdirstate(ui, repo)
664 lfdirstate = openlfdirstate(ui, repo)
662 dirtymatch = matchmod.always()
665 dirtymatch = matchmod.always()
663 unsure, s = lfdirstate.status(
666 unsure, s = lfdirstate.status(
664 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
667 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
665 )
668 )
666 modifiedfiles = unsure + s.modified + s.added + s.removed
669 modifiedfiles = unsure + s.modified + s.added + s.removed
667 lfiles = listlfiles(repo)
670 lfiles = listlfiles(repo)
668 # this only loops through largefiles that exist (not
671 # this only loops through largefiles that exist (not
669 # removed/renamed)
672 # removed/renamed)
670 for lfile in lfiles:
673 for lfile in lfiles:
671 if lfile in modifiedfiles:
674 if lfile in modifiedfiles:
672 fstandin = standin(lfile)
675 fstandin = standin(lfile)
673 if repo.wvfs.exists(fstandin):
676 if repo.wvfs.exists(fstandin):
674 # this handles the case where a rebase is being
677 # this handles the case where a rebase is being
675 # performed and the working copy is not updated
678 # performed and the working copy is not updated
676 # yet.
679 # yet.
677 if repo.wvfs.exists(lfile):
680 if repo.wvfs.exists(lfile):
678 updatestandin(repo, lfile, fstandin)
681 updatestandin(repo, lfile, fstandin)
679
682
680 return match
683 return match
681
684
682 lfiles = listlfiles(repo)
685 lfiles = listlfiles(repo)
683 match._files = repo._subdirlfs(match.files(), lfiles)
686 match._files = repo._subdirlfs(match.files(), lfiles)
684
687
685 # Case 2: user calls commit with specified patterns: refresh
688 # Case 2: user calls commit with specified patterns: refresh
686 # any matching big files.
689 # any matching big files.
687 smatcher = composestandinmatcher(repo, match)
690 smatcher = composestandinmatcher(repo, match)
688 standins = repo.dirstate.walk(
691 standins = repo.dirstate.walk(
689 smatcher, subrepos=[], unknown=False, ignored=False
692 smatcher, subrepos=[], unknown=False, ignored=False
690 )
693 )
691
694
692 # No matching big files: get out of the way and pass control to
695 # No matching big files: get out of the way and pass control to
693 # the usual commit() method.
696 # the usual commit() method.
694 if not standins:
697 if not standins:
695 return match
698 return match
696
699
697 # Refresh all matching big files. It's possible that the
700 # Refresh all matching big files. It's possible that the
698 # commit will end up failing, in which case the big files will
701 # commit will end up failing, in which case the big files will
699 # stay refreshed. No harm done: the user modified them and
702 # stay refreshed. No harm done: the user modified them and
700 # asked to commit them, so sooner or later we're going to
703 # asked to commit them, so sooner or later we're going to
701 # refresh the standins. Might as well leave them refreshed.
704 # refresh the standins. Might as well leave them refreshed.
702 lfdirstate = openlfdirstate(ui, repo)
705 lfdirstate = openlfdirstate(ui, repo)
703 for fstandin in standins:
706 for fstandin in standins:
704 lfile = splitstandin(fstandin)
707 lfile = splitstandin(fstandin)
705 if lfdirstate[lfile] != b'r':
708 if lfdirstate[lfile] != b'r':
706 updatestandin(repo, lfile, fstandin)
709 updatestandin(repo, lfile, fstandin)
707
710
708 # Cook up a new matcher that only matches regular files or
711 # Cook up a new matcher that only matches regular files or
709 # standins corresponding to the big files requested by the
712 # standins corresponding to the big files requested by the
710 # user. Have to modify _files to prevent commit() from
713 # user. Have to modify _files to prevent commit() from
711 # complaining "not tracked" for big files.
714 # complaining "not tracked" for big files.
712 match = copy.copy(match)
715 match = copy.copy(match)
713 origmatchfn = match.matchfn
716 origmatchfn = match.matchfn
714
717
715 # Check both the list of largefiles and the list of
718 # Check both the list of largefiles and the list of
716 # standins because if a largefile was removed, it
719 # standins because if a largefile was removed, it
717 # won't be in the list of largefiles at this point
720 # won't be in the list of largefiles at this point
718 match._files += sorted(standins)
721 match._files += sorted(standins)
719
722
720 actualfiles = []
723 actualfiles = []
721 for f in match._files:
724 for f in match._files:
722 fstandin = standin(f)
725 fstandin = standin(f)
723
726
724 # For largefiles, only one of the normal and standin should be
727 # For largefiles, only one of the normal and standin should be
725 # committed (except if one of them is a remove). In the case of a
728 # committed (except if one of them is a remove). In the case of a
726 # standin removal, drop the normal file if it is unknown to dirstate.
729 # standin removal, drop the normal file if it is unknown to dirstate.
727 # Thus, skip plain largefile names but keep the standin.
730 # Thus, skip plain largefile names but keep the standin.
728 if f in lfiles or fstandin in standins:
731 if f in lfiles or fstandin in standins:
729 if repo.dirstate[fstandin] != b'r':
732 if repo.dirstate[fstandin] != b'r':
730 if repo.dirstate[f] != b'r':
733 if repo.dirstate[f] != b'r':
731 continue
734 continue
732 elif repo.dirstate[f] == b'?':
735 elif repo.dirstate[f] == b'?':
733 continue
736 continue
734
737
735 actualfiles.append(f)
738 actualfiles.append(f)
736 match._files = actualfiles
739 match._files = actualfiles
737
740
738 def matchfn(f):
741 def matchfn(f):
739 if origmatchfn(f):
742 if origmatchfn(f):
740 return f not in lfiles
743 return f not in lfiles
741 else:
744 else:
742 return f in standins
745 return f in standins
743
746
744 match.matchfn = matchfn
747 match.matchfn = matchfn
745
748
746 return match
749 return match
747
750
748
751
749 class automatedcommithook(object):
752 class automatedcommithook(object):
750 """Stateful hook to update standins at the 1st commit of resuming
753 """Stateful hook to update standins at the 1st commit of resuming
751
754
752 For efficiency, updating standins in the working directory should
755 For efficiency, updating standins in the working directory should
753 be avoided while automated committing (like rebase, transplant and
756 be avoided while automated committing (like rebase, transplant and
754 so on), because they should be updated before committing.
757 so on), because they should be updated before committing.
755
758
756 But the 1st commit of resuming automated committing (e.g. ``rebase
759 But the 1st commit of resuming automated committing (e.g. ``rebase
757 --continue``) should update them, because largefiles may be
760 --continue``) should update them, because largefiles may be
758 modified manually.
761 modified manually.
759 """
762 """
760
763
761 def __init__(self, resuming):
764 def __init__(self, resuming):
762 self.resuming = resuming
765 self.resuming = resuming
763
766
764 def __call__(self, repo, match):
767 def __call__(self, repo, match):
765 if self.resuming:
768 if self.resuming:
766 self.resuming = False # avoids updating at subsequent commits
769 self.resuming = False # avoids updating at subsequent commits
767 return updatestandinsbymatch(repo, match)
770 return updatestandinsbymatch(repo, match)
768 else:
771 else:
769 return match
772 return match
770
773
771
774
772 def getstatuswriter(ui, repo, forcibly=None):
775 def getstatuswriter(ui, repo, forcibly=None):
773 """Return the function to write largefiles specific status out
776 """Return the function to write largefiles specific status out
774
777
775 If ``forcibly`` is ``None``, this returns the last element of
778 If ``forcibly`` is ``None``, this returns the last element of
776 ``repo._lfstatuswriters`` as "default" writer function.
779 ``repo._lfstatuswriters`` as "default" writer function.
777
780
778 Otherwise, this returns the function to always write out (or
781 Otherwise, this returns the function to always write out (or
779 ignore if ``not forcibly``) status.
782 ignore if ``not forcibly``) status.
780 """
783 """
781 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
784 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
782 return repo._lfstatuswriters[-1]
785 return repo._lfstatuswriters[-1]
783 else:
786 else:
784 if forcibly:
787 if forcibly:
785 return ui.status # forcibly WRITE OUT
788 return ui.status # forcibly WRITE OUT
786 else:
789 else:
787 return lambda *msg, **opts: None # forcibly IGNORE
790 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,72 +1,76
1 # narrowdirstate.py - extensions to mercurial dirstate to support narrow clones
1 # narrowdirstate.py - extensions to mercurial dirstate to support narrow clones
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import error
11 from mercurial import error
12
12
13
13
14 def wrapdirstate(repo, dirstate):
14 def wrapdirstate(repo, dirstate):
15 """Add narrow spec dirstate ignore, block changes outside narrow spec."""
15 """Add narrow spec dirstate ignore, block changes outside narrow spec."""
16
16
17 def _editfunc(fn):
17 def _editfunc(fn):
18 def _wrapper(self, *args, **kwargs):
18 def _wrapper(self, *args, **kwargs):
19 narrowmatch = repo.narrowmatch()
19 narrowmatch = repo.narrowmatch()
20 for f in args:
20 for f in args:
21 if f is not None and not narrowmatch(f) and f not in self:
21 if f is not None and not narrowmatch(f) and f not in self:
22 raise error.Abort(
22 raise error.Abort(
23 _(
23 _(
24 b"cannot track '%s' - it is outside "
24 b"cannot track '%s' - it is outside "
25 + b"the narrow clone"
25 + b"the narrow clone"
26 )
26 )
27 % f
27 % f
28 )
28 )
29 return fn(self, *args, **kwargs)
29 return fn(self, *args, **kwargs)
30
30
31 return _wrapper
31 return _wrapper
32
32
33 class narrowdirstate(dirstate.__class__):
33 class narrowdirstate(dirstate.__class__):
34 # Prevent adding/editing/copying/deleting files that are outside the
34 # Prevent adding/editing/copying/deleting files that are outside the
35 # sparse checkout
35 # sparse checkout
36 @_editfunc
36 @_editfunc
37 def normal(self, *args, **kwargs):
37 def normal(self, *args, **kwargs):
38 return super(narrowdirstate, self).normal(*args, **kwargs)
38 return super(narrowdirstate, self).normal(*args, **kwargs)
39
39
40 @_editfunc
40 @_editfunc
41 def set_tracked(self, *args):
41 def set_tracked(self, *args):
42 return super(narrowdirstate, self).set_tracked(*args)
42 return super(narrowdirstate, self).set_tracked(*args)
43
43
44 @_editfunc
44 @_editfunc
45 def set_untracked(self, *args):
46 return super(narrowdirstate, self).set_untracked(*args)
47
48 @_editfunc
45 def add(self, *args):
49 def add(self, *args):
46 return super(narrowdirstate, self).add(*args)
50 return super(narrowdirstate, self).add(*args)
47
51
48 @_editfunc
52 @_editfunc
49 def normallookup(self, *args):
53 def normallookup(self, *args):
50 return super(narrowdirstate, self).normallookup(*args)
54 return super(narrowdirstate, self).normallookup(*args)
51
55
52 @_editfunc
56 @_editfunc
53 def copy(self, *args):
57 def copy(self, *args):
54 return super(narrowdirstate, self).copy(*args)
58 return super(narrowdirstate, self).copy(*args)
55
59
56 @_editfunc
60 @_editfunc
57 def remove(self, *args):
61 def remove(self, *args):
58 return super(narrowdirstate, self).remove(*args)
62 return super(narrowdirstate, self).remove(*args)
59
63
60 @_editfunc
64 @_editfunc
61 def merge(self, *args):
65 def merge(self, *args):
62 return super(narrowdirstate, self).merge(*args)
66 return super(narrowdirstate, self).merge(*args)
63
67
64 def rebuild(self, parent, allfiles, changedfiles=None):
68 def rebuild(self, parent, allfiles, changedfiles=None):
65 if changedfiles is None:
69 if changedfiles is None:
66 # Rebuilding entire dirstate, let's filter allfiles to match the
70 # Rebuilding entire dirstate, let's filter allfiles to match the
67 # narrowspec.
71 # narrowspec.
68 allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
72 allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
69 super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles)
73 super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles)
70
74
71 dirstate.__class__ = narrowdirstate
75 dirstate.__class__ = narrowdirstate
72 return dirstate
76 return dirstate
@@ -1,441 +1,442
1 # sparse.py - allow sparse checkouts of the working directory
1 # sparse.py - allow sparse checkouts of the working directory
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """allow sparse checkouts of the working directory (EXPERIMENTAL)
8 """allow sparse checkouts of the working directory (EXPERIMENTAL)
9
9
10 (This extension is not yet protected by backwards compatibility
10 (This extension is not yet protected by backwards compatibility
11 guarantees. Any aspect may break in future releases until this
11 guarantees. Any aspect may break in future releases until this
12 notice is removed.)
12 notice is removed.)
13
13
14 This extension allows the working directory to only consist of a
14 This extension allows the working directory to only consist of a
15 subset of files for the revision. This allows specific files or
15 subset of files for the revision. This allows specific files or
16 directories to be explicitly included or excluded. Many repository
16 directories to be explicitly included or excluded. Many repository
17 operations have performance proportional to the number of files in
17 operations have performance proportional to the number of files in
18 the working directory. So only realizing a subset of files in the
18 the working directory. So only realizing a subset of files in the
19 working directory can improve performance.
19 working directory can improve performance.
20
20
21 Sparse Config Files
21 Sparse Config Files
22 -------------------
22 -------------------
23
23
24 The set of files that are part of a sparse checkout are defined by
24 The set of files that are part of a sparse checkout are defined by
25 a sparse config file. The file defines 3 things: includes (files to
25 a sparse config file. The file defines 3 things: includes (files to
26 include in the sparse checkout), excludes (files to exclude from the
26 include in the sparse checkout), excludes (files to exclude from the
27 sparse checkout), and profiles (links to other config files).
27 sparse checkout), and profiles (links to other config files).
28
28
29 The file format is newline delimited. Empty lines and lines beginning
29 The file format is newline delimited. Empty lines and lines beginning
30 with ``#`` are ignored.
30 with ``#`` are ignored.
31
31
32 Lines beginning with ``%include `` denote another sparse config file
32 Lines beginning with ``%include `` denote another sparse config file
33 to include. e.g. ``%include tests.sparse``. The filename is relative
33 to include. e.g. ``%include tests.sparse``. The filename is relative
34 to the repository root.
34 to the repository root.
35
35
36 The special lines ``[include]`` and ``[exclude]`` denote the section
36 The special lines ``[include]`` and ``[exclude]`` denote the section
37 for includes and excludes that follow, respectively. It is illegal to
37 for includes and excludes that follow, respectively. It is illegal to
38 have ``[include]`` after ``[exclude]``.
38 have ``[include]`` after ``[exclude]``.
39
39
40 Non-special lines resemble file patterns to be added to either includes
40 Non-special lines resemble file patterns to be added to either includes
41 or excludes. The syntax of these lines is documented by :hg:`help patterns`.
41 or excludes. The syntax of these lines is documented by :hg:`help patterns`.
42 Patterns are interpreted as ``glob:`` by default and match against the
42 Patterns are interpreted as ``glob:`` by default and match against the
43 root of the repository.
43 root of the repository.
44
44
45 Exclusion patterns take precedence over inclusion patterns. So even
45 Exclusion patterns take precedence over inclusion patterns. So even
46 if a file is explicitly included, an ``[exclude]`` entry can remove it.
46 if a file is explicitly included, an ``[exclude]`` entry can remove it.
47
47
48 For example, say you have a repository with 3 directories, ``frontend/``,
48 For example, say you have a repository with 3 directories, ``frontend/``,
49 ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond
49 ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond
50 to different projects and it is uncommon for someone working on one
50 to different projects and it is uncommon for someone working on one
51 to need the files for the other. But ``tools/`` contains files shared
51 to need the files for the other. But ``tools/`` contains files shared
52 between both projects. Your sparse config files may resemble::
52 between both projects. Your sparse config files may resemble::
53
53
54 # frontend.sparse
54 # frontend.sparse
55 frontend/**
55 frontend/**
56 tools/**
56 tools/**
57
57
58 # backend.sparse
58 # backend.sparse
59 backend/**
59 backend/**
60 tools/**
60 tools/**
61
61
62 Say the backend grows in size. Or there's a directory with thousands
62 Say the backend grows in size. Or there's a directory with thousands
63 of files you wish to exclude. You can modify the profile to exclude
63 of files you wish to exclude. You can modify the profile to exclude
64 certain files::
64 certain files::
65
65
66 [include]
66 [include]
67 backend/**
67 backend/**
68 tools/**
68 tools/**
69
69
70 [exclude]
70 [exclude]
71 tools/tests/**
71 tools/tests/**
72 """
72 """
73
73
74 from __future__ import absolute_import
74 from __future__ import absolute_import
75
75
76 from mercurial.i18n import _
76 from mercurial.i18n import _
77 from mercurial.pycompat import setattr
77 from mercurial.pycompat import setattr
78 from mercurial import (
78 from mercurial import (
79 commands,
79 commands,
80 dirstate,
80 dirstate,
81 error,
81 error,
82 extensions,
82 extensions,
83 logcmdutil,
83 logcmdutil,
84 match as matchmod,
84 match as matchmod,
85 merge as mergemod,
85 merge as mergemod,
86 pycompat,
86 pycompat,
87 registrar,
87 registrar,
88 sparse,
88 sparse,
89 util,
89 util,
90 )
90 )
91
91
92 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
92 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
93 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
93 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
94 # be specifying the version(s) of Mercurial they are tested with, or
94 # be specifying the version(s) of Mercurial they are tested with, or
95 # leave the attribute unspecified.
95 # leave the attribute unspecified.
96 testedwith = b'ships-with-hg-core'
96 testedwith = b'ships-with-hg-core'
97
97
98 cmdtable = {}
98 cmdtable = {}
99 command = registrar.command(cmdtable)
99 command = registrar.command(cmdtable)
100
100
101
101
102 def extsetup(ui):
102 def extsetup(ui):
103 sparse.enabled = True
103 sparse.enabled = True
104
104
105 _setupclone(ui)
105 _setupclone(ui)
106 _setuplog(ui)
106 _setuplog(ui)
107 _setupadd(ui)
107 _setupadd(ui)
108 _setupdirstate(ui)
108 _setupdirstate(ui)
109
109
110
110
111 def replacefilecache(cls, propname, replacement):
111 def replacefilecache(cls, propname, replacement):
112 """Replace a filecache property with a new class. This allows changing the
112 """Replace a filecache property with a new class. This allows changing the
113 cache invalidation condition."""
113 cache invalidation condition."""
114 origcls = cls
114 origcls = cls
115 assert callable(replacement)
115 assert callable(replacement)
116 while cls is not object:
116 while cls is not object:
117 if propname in cls.__dict__:
117 if propname in cls.__dict__:
118 orig = cls.__dict__[propname]
118 orig = cls.__dict__[propname]
119 setattr(cls, propname, replacement(orig))
119 setattr(cls, propname, replacement(orig))
120 break
120 break
121 cls = cls.__bases__[0]
121 cls = cls.__bases__[0]
122
122
123 if cls is object:
123 if cls is object:
124 raise AttributeError(
124 raise AttributeError(
125 _(b"type '%s' has no property '%s'") % (origcls, propname)
125 _(b"type '%s' has no property '%s'") % (origcls, propname)
126 )
126 )
127
127
128
128
129 def _setuplog(ui):
129 def _setuplog(ui):
130 entry = commands.table[b'log|history']
130 entry = commands.table[b'log|history']
131 entry[1].append(
131 entry[1].append(
132 (
132 (
133 b'',
133 b'',
134 b'sparse',
134 b'sparse',
135 None,
135 None,
136 b"limit to changesets affecting the sparse checkout",
136 b"limit to changesets affecting the sparse checkout",
137 )
137 )
138 )
138 )
139
139
140 def _initialrevs(orig, repo, wopts):
140 def _initialrevs(orig, repo, wopts):
141 revs = orig(repo, wopts)
141 revs = orig(repo, wopts)
142 if wopts.opts.get(b'sparse'):
142 if wopts.opts.get(b'sparse'):
143 sparsematch = sparse.matcher(repo)
143 sparsematch = sparse.matcher(repo)
144
144
145 def ctxmatch(rev):
145 def ctxmatch(rev):
146 ctx = repo[rev]
146 ctx = repo[rev]
147 return any(f for f in ctx.files() if sparsematch(f))
147 return any(f for f in ctx.files() if sparsematch(f))
148
148
149 revs = revs.filter(ctxmatch)
149 revs = revs.filter(ctxmatch)
150 return revs
150 return revs
151
151
152 extensions.wrapfunction(logcmdutil, b'_initialrevs', _initialrevs)
152 extensions.wrapfunction(logcmdutil, b'_initialrevs', _initialrevs)
153
153
154
154
155 def _clonesparsecmd(orig, ui, repo, *args, **opts):
155 def _clonesparsecmd(orig, ui, repo, *args, **opts):
156 include_pat = opts.get('include')
156 include_pat = opts.get('include')
157 exclude_pat = opts.get('exclude')
157 exclude_pat = opts.get('exclude')
158 enableprofile_pat = opts.get('enable_profile')
158 enableprofile_pat = opts.get('enable_profile')
159 narrow_pat = opts.get('narrow')
159 narrow_pat = opts.get('narrow')
160 include = exclude = enableprofile = False
160 include = exclude = enableprofile = False
161 if include_pat:
161 if include_pat:
162 pat = include_pat
162 pat = include_pat
163 include = True
163 include = True
164 if exclude_pat:
164 if exclude_pat:
165 pat = exclude_pat
165 pat = exclude_pat
166 exclude = True
166 exclude = True
167 if enableprofile_pat:
167 if enableprofile_pat:
168 pat = enableprofile_pat
168 pat = enableprofile_pat
169 enableprofile = True
169 enableprofile = True
170 if sum([include, exclude, enableprofile]) > 1:
170 if sum([include, exclude, enableprofile]) > 1:
171 raise error.Abort(_(b"too many flags specified."))
171 raise error.Abort(_(b"too many flags specified."))
172 # if --narrow is passed, it means they are includes and excludes for narrow
172 # if --narrow is passed, it means they are includes and excludes for narrow
173 # clone
173 # clone
174 if not narrow_pat and (include or exclude or enableprofile):
174 if not narrow_pat and (include or exclude or enableprofile):
175
175
176 def clonesparse(orig, ctx, *args, **kwargs):
176 def clonesparse(orig, ctx, *args, **kwargs):
177 sparse.updateconfig(
177 sparse.updateconfig(
178 ctx.repo().unfiltered(),
178 ctx.repo().unfiltered(),
179 pat,
179 pat,
180 {},
180 {},
181 include=include,
181 include=include,
182 exclude=exclude,
182 exclude=exclude,
183 enableprofile=enableprofile,
183 enableprofile=enableprofile,
184 usereporootpaths=True,
184 usereporootpaths=True,
185 )
185 )
186 return orig(ctx, *args, **kwargs)
186 return orig(ctx, *args, **kwargs)
187
187
188 extensions.wrapfunction(mergemod, b'update', clonesparse)
188 extensions.wrapfunction(mergemod, b'update', clonesparse)
189 return orig(ui, repo, *args, **opts)
189 return orig(ui, repo, *args, **opts)
190
190
191
191
192 def _setupclone(ui):
192 def _setupclone(ui):
193 entry = commands.table[b'clone']
193 entry = commands.table[b'clone']
194 entry[1].append((b'', b'enable-profile', [], b'enable a sparse profile'))
194 entry[1].append((b'', b'enable-profile', [], b'enable a sparse profile'))
195 entry[1].append((b'', b'include', [], b'include sparse pattern'))
195 entry[1].append((b'', b'include', [], b'include sparse pattern'))
196 entry[1].append((b'', b'exclude', [], b'exclude sparse pattern'))
196 entry[1].append((b'', b'exclude', [], b'exclude sparse pattern'))
197 extensions.wrapcommand(commands.table, b'clone', _clonesparsecmd)
197 extensions.wrapcommand(commands.table, b'clone', _clonesparsecmd)
198
198
199
199
200 def _setupadd(ui):
200 def _setupadd(ui):
201 entry = commands.table[b'add']
201 entry = commands.table[b'add']
202 entry[1].append(
202 entry[1].append(
203 (
203 (
204 b's',
204 b's',
205 b'sparse',
205 b'sparse',
206 None,
206 None,
207 b'also include directories of added files in sparse config',
207 b'also include directories of added files in sparse config',
208 )
208 )
209 )
209 )
210
210
211 def _add(orig, ui, repo, *pats, **opts):
211 def _add(orig, ui, repo, *pats, **opts):
212 if opts.get('sparse'):
212 if opts.get('sparse'):
213 dirs = set()
213 dirs = set()
214 for pat in pats:
214 for pat in pats:
215 dirname, basename = util.split(pat)
215 dirname, basename = util.split(pat)
216 dirs.add(dirname)
216 dirs.add(dirname)
217 sparse.updateconfig(repo, list(dirs), opts, include=True)
217 sparse.updateconfig(repo, list(dirs), opts, include=True)
218 return orig(ui, repo, *pats, **opts)
218 return orig(ui, repo, *pats, **opts)
219
219
220 extensions.wrapcommand(commands.table, b'add', _add)
220 extensions.wrapcommand(commands.table, b'add', _add)
221
221
222
222
223 def _setupdirstate(ui):
223 def _setupdirstate(ui):
224 """Modify the dirstate to prevent stat'ing excluded files,
224 """Modify the dirstate to prevent stat'ing excluded files,
225 and to prevent modifications to files outside the checkout.
225 and to prevent modifications to files outside the checkout.
226 """
226 """
227
227
228 def walk(orig, self, match, subrepos, unknown, ignored, full=True):
228 def walk(orig, self, match, subrepos, unknown, ignored, full=True):
229 # hack to not exclude explicitly-specified paths so that they can
229 # hack to not exclude explicitly-specified paths so that they can
230 # be warned later on e.g. dirstate.add()
230 # be warned later on e.g. dirstate.add()
231 em = matchmod.exact(match.files())
231 em = matchmod.exact(match.files())
232 sm = matchmod.unionmatcher([self._sparsematcher, em])
232 sm = matchmod.unionmatcher([self._sparsematcher, em])
233 match = matchmod.intersectmatchers(match, sm)
233 match = matchmod.intersectmatchers(match, sm)
234 return orig(self, match, subrepos, unknown, ignored, full)
234 return orig(self, match, subrepos, unknown, ignored, full)
235
235
236 extensions.wrapfunction(dirstate.dirstate, b'walk', walk)
236 extensions.wrapfunction(dirstate.dirstate, b'walk', walk)
237
237
238 # dirstate.rebuild should not add non-matching files
238 # dirstate.rebuild should not add non-matching files
239 def _rebuild(orig, self, parent, allfiles, changedfiles=None):
239 def _rebuild(orig, self, parent, allfiles, changedfiles=None):
240 matcher = self._sparsematcher
240 matcher = self._sparsematcher
241 if not matcher.always():
241 if not matcher.always():
242 allfiles = [f for f in allfiles if matcher(f)]
242 allfiles = [f for f in allfiles if matcher(f)]
243 if changedfiles:
243 if changedfiles:
244 changedfiles = [f for f in changedfiles if matcher(f)]
244 changedfiles = [f for f in changedfiles if matcher(f)]
245
245
246 if changedfiles is not None:
246 if changedfiles is not None:
247 # In _rebuild, these files will be deleted from the dirstate
247 # In _rebuild, these files will be deleted from the dirstate
248 # when they are not found to be in allfiles
248 # when they are not found to be in allfiles
249 dirstatefilestoremove = {f for f in self if not matcher(f)}
249 dirstatefilestoremove = {f for f in self if not matcher(f)}
250 changedfiles = dirstatefilestoremove.union(changedfiles)
250 changedfiles = dirstatefilestoremove.union(changedfiles)
251
251
252 return orig(self, parent, allfiles, changedfiles)
252 return orig(self, parent, allfiles, changedfiles)
253
253
254 extensions.wrapfunction(dirstate.dirstate, b'rebuild', _rebuild)
254 extensions.wrapfunction(dirstate.dirstate, b'rebuild', _rebuild)
255
255
256 # Prevent adding files that are outside the sparse checkout
256 # Prevent adding files that are outside the sparse checkout
257 editfuncs = [
257 editfuncs = [
258 b'normal',
258 b'normal',
259 b'set_tracked',
259 b'set_tracked',
260 b'set_untracked',
260 b'add',
261 b'add',
261 b'normallookup',
262 b'normallookup',
262 b'copy',
263 b'copy',
263 b'remove',
264 b'remove',
264 b'merge',
265 b'merge',
265 ]
266 ]
266 hint = _(
267 hint = _(
267 b'include file with `hg debugsparse --include <pattern>` or use '
268 b'include file with `hg debugsparse --include <pattern>` or use '
268 + b'`hg add -s <file>` to include file directory while adding'
269 + b'`hg add -s <file>` to include file directory while adding'
269 )
270 )
270 for func in editfuncs:
271 for func in editfuncs:
271
272
272 def _wrapper(orig, self, *args, **kwargs):
273 def _wrapper(orig, self, *args, **kwargs):
273 sparsematch = self._sparsematcher
274 sparsematch = self._sparsematcher
274 if not sparsematch.always():
275 if not sparsematch.always():
275 for f in args:
276 for f in args:
276 if f is not None and not sparsematch(f) and f not in self:
277 if f is not None and not sparsematch(f) and f not in self:
277 raise error.Abort(
278 raise error.Abort(
278 _(
279 _(
279 b"cannot add '%s' - it is outside "
280 b"cannot add '%s' - it is outside "
280 b"the sparse checkout"
281 b"the sparse checkout"
281 )
282 )
282 % f,
283 % f,
283 hint=hint,
284 hint=hint,
284 )
285 )
285 return orig(self, *args, **kwargs)
286 return orig(self, *args, **kwargs)
286
287
287 extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
288 extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
288
289
289
290
290 @command(
291 @command(
291 b'debugsparse',
292 b'debugsparse',
292 [
293 [
293 (b'I', b'include', False, _(b'include files in the sparse checkout')),
294 (b'I', b'include', False, _(b'include files in the sparse checkout')),
294 (b'X', b'exclude', False, _(b'exclude files in the sparse checkout')),
295 (b'X', b'exclude', False, _(b'exclude files in the sparse checkout')),
295 (b'd', b'delete', False, _(b'delete an include/exclude rule')),
296 (b'd', b'delete', False, _(b'delete an include/exclude rule')),
296 (
297 (
297 b'f',
298 b'f',
298 b'force',
299 b'force',
299 False,
300 False,
300 _(b'allow changing rules even with pending changes'),
301 _(b'allow changing rules even with pending changes'),
301 ),
302 ),
302 (b'', b'enable-profile', False, _(b'enables the specified profile')),
303 (b'', b'enable-profile', False, _(b'enables the specified profile')),
303 (b'', b'disable-profile', False, _(b'disables the specified profile')),
304 (b'', b'disable-profile', False, _(b'disables the specified profile')),
304 (b'', b'import-rules', False, _(b'imports rules from a file')),
305 (b'', b'import-rules', False, _(b'imports rules from a file')),
305 (b'', b'clear-rules', False, _(b'clears local include/exclude rules')),
306 (b'', b'clear-rules', False, _(b'clears local include/exclude rules')),
306 (
307 (
307 b'',
308 b'',
308 b'refresh',
309 b'refresh',
309 False,
310 False,
310 _(b'updates the working after sparseness changes'),
311 _(b'updates the working after sparseness changes'),
311 ),
312 ),
312 (b'', b'reset', False, _(b'makes the repo full again')),
313 (b'', b'reset', False, _(b'makes the repo full again')),
313 ]
314 ]
314 + commands.templateopts,
315 + commands.templateopts,
315 _(b'[--OPTION] PATTERN...'),
316 _(b'[--OPTION] PATTERN...'),
316 helpbasic=True,
317 helpbasic=True,
317 )
318 )
318 def debugsparse(ui, repo, *pats, **opts):
319 def debugsparse(ui, repo, *pats, **opts):
319 """make the current checkout sparse, or edit the existing checkout
320 """make the current checkout sparse, or edit the existing checkout
320
321
321 The sparse command is used to make the current checkout sparse.
322 The sparse command is used to make the current checkout sparse.
322 This means files that don't meet the sparse condition will not be
323 This means files that don't meet the sparse condition will not be
323 written to disk, or show up in any working copy operations. It does
324 written to disk, or show up in any working copy operations. It does
324 not affect files in history in any way.
325 not affect files in history in any way.
325
326
326 Passing no arguments prints the currently applied sparse rules.
327 Passing no arguments prints the currently applied sparse rules.
327
328
328 --include and --exclude are used to add and remove files from the sparse
329 --include and --exclude are used to add and remove files from the sparse
329 checkout. The effects of adding an include or exclude rule are applied
330 checkout. The effects of adding an include or exclude rule are applied
330 immediately. If applying the new rule would cause a file with pending
331 immediately. If applying the new rule would cause a file with pending
331 changes to be added or removed, the command will fail. Pass --force to
332 changes to be added or removed, the command will fail. Pass --force to
332 force a rule change even with pending changes (the changes on disk will
333 force a rule change even with pending changes (the changes on disk will
333 be preserved).
334 be preserved).
334
335
335 --delete removes an existing include/exclude rule. The effects are
336 --delete removes an existing include/exclude rule. The effects are
336 immediate.
337 immediate.
337
338
338 --refresh refreshes the files on disk based on the sparse rules. This is
339 --refresh refreshes the files on disk based on the sparse rules. This is
339 only necessary if .hg/sparse was changed by hand.
340 only necessary if .hg/sparse was changed by hand.
340
341
341 --enable-profile and --disable-profile accept a path to a .hgsparse file.
342 --enable-profile and --disable-profile accept a path to a .hgsparse file.
342 This allows defining sparse checkouts and tracking them inside the
343 This allows defining sparse checkouts and tracking them inside the
343 repository. This is useful for defining commonly used sparse checkouts for
344 repository. This is useful for defining commonly used sparse checkouts for
344 many people to use. As the profile definition changes over time, the sparse
345 many people to use. As the profile definition changes over time, the sparse
345 checkout will automatically be updated appropriately, depending on which
346 checkout will automatically be updated appropriately, depending on which
346 changeset is checked out. Changes to .hgsparse are not applied until they
347 changeset is checked out. Changes to .hgsparse are not applied until they
347 have been committed.
348 have been committed.
348
349
349 --import-rules accepts a path to a file containing rules in the .hgsparse
350 --import-rules accepts a path to a file containing rules in the .hgsparse
350 format, allowing you to add --include, --exclude and --enable-profile rules
351 format, allowing you to add --include, --exclude and --enable-profile rules
351 in bulk. Like the --include, --exclude and --enable-profile switches, the
352 in bulk. Like the --include, --exclude and --enable-profile switches, the
352 changes are applied immediately.
353 changes are applied immediately.
353
354
354 --clear-rules removes all local include and exclude rules, while leaving
355 --clear-rules removes all local include and exclude rules, while leaving
355 any enabled profiles in place.
356 any enabled profiles in place.
356
357
357 Returns 0 if editing the sparse checkout succeeds.
358 Returns 0 if editing the sparse checkout succeeds.
358 """
359 """
359 opts = pycompat.byteskwargs(opts)
360 opts = pycompat.byteskwargs(opts)
360 include = opts.get(b'include')
361 include = opts.get(b'include')
361 exclude = opts.get(b'exclude')
362 exclude = opts.get(b'exclude')
362 force = opts.get(b'force')
363 force = opts.get(b'force')
363 enableprofile = opts.get(b'enable_profile')
364 enableprofile = opts.get(b'enable_profile')
364 disableprofile = opts.get(b'disable_profile')
365 disableprofile = opts.get(b'disable_profile')
365 importrules = opts.get(b'import_rules')
366 importrules = opts.get(b'import_rules')
366 clearrules = opts.get(b'clear_rules')
367 clearrules = opts.get(b'clear_rules')
367 delete = opts.get(b'delete')
368 delete = opts.get(b'delete')
368 refresh = opts.get(b'refresh')
369 refresh = opts.get(b'refresh')
369 reset = opts.get(b'reset')
370 reset = opts.get(b'reset')
370 count = sum(
371 count = sum(
371 [
372 [
372 include,
373 include,
373 exclude,
374 exclude,
374 enableprofile,
375 enableprofile,
375 disableprofile,
376 disableprofile,
376 delete,
377 delete,
377 importrules,
378 importrules,
378 refresh,
379 refresh,
379 clearrules,
380 clearrules,
380 reset,
381 reset,
381 ]
382 ]
382 )
383 )
383 if count > 1:
384 if count > 1:
384 raise error.Abort(_(b"too many flags specified"))
385 raise error.Abort(_(b"too many flags specified"))
385
386
386 if count == 0:
387 if count == 0:
387 if repo.vfs.exists(b'sparse'):
388 if repo.vfs.exists(b'sparse'):
388 ui.status(repo.vfs.read(b"sparse") + b"\n")
389 ui.status(repo.vfs.read(b"sparse") + b"\n")
389 temporaryincludes = sparse.readtemporaryincludes(repo)
390 temporaryincludes = sparse.readtemporaryincludes(repo)
390 if temporaryincludes:
391 if temporaryincludes:
391 ui.status(
392 ui.status(
392 _(b"Temporarily Included Files (for merge/rebase):\n")
393 _(b"Temporarily Included Files (for merge/rebase):\n")
393 )
394 )
394 ui.status((b"\n".join(temporaryincludes) + b"\n"))
395 ui.status((b"\n".join(temporaryincludes) + b"\n"))
395 return
396 return
396 else:
397 else:
397 raise error.Abort(
398 raise error.Abort(
398 _(
399 _(
399 b'the debugsparse command is only supported on'
400 b'the debugsparse command is only supported on'
400 b' sparse repositories'
401 b' sparse repositories'
401 )
402 )
402 )
403 )
403
404
404 if include or exclude or delete or reset or enableprofile or disableprofile:
405 if include or exclude or delete or reset or enableprofile or disableprofile:
405 sparse.updateconfig(
406 sparse.updateconfig(
406 repo,
407 repo,
407 pats,
408 pats,
408 opts,
409 opts,
409 include=include,
410 include=include,
410 exclude=exclude,
411 exclude=exclude,
411 reset=reset,
412 reset=reset,
412 delete=delete,
413 delete=delete,
413 enableprofile=enableprofile,
414 enableprofile=enableprofile,
414 disableprofile=disableprofile,
415 disableprofile=disableprofile,
415 force=force,
416 force=force,
416 )
417 )
417
418
418 if importrules:
419 if importrules:
419 sparse.importfromfiles(repo, opts, pats, force=force)
420 sparse.importfromfiles(repo, opts, pats, force=force)
420
421
421 if clearrules:
422 if clearrules:
422 sparse.clearrules(repo, force=force)
423 sparse.clearrules(repo, force=force)
423
424
424 if refresh:
425 if refresh:
425 try:
426 try:
426 wlock = repo.wlock()
427 wlock = repo.wlock()
427 fcounts = map(
428 fcounts = map(
428 len,
429 len,
429 sparse.refreshwdir(
430 sparse.refreshwdir(
430 repo, repo.status(), sparse.matcher(repo), force=force
431 repo, repo.status(), sparse.matcher(repo), force=force
431 ),
432 ),
432 )
433 )
433 sparse.printchanges(
434 sparse.printchanges(
434 ui,
435 ui,
435 opts,
436 opts,
436 added=fcounts[0],
437 added=fcounts[0],
437 dropped=fcounts[1],
438 dropped=fcounts[1],
438 conflicting=fcounts[2],
439 conflicting=fcounts[2],
439 )
440 )
440 finally:
441 finally:
441 wlock.release()
442 wlock.release()
@@ -1,1526 +1,1545
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._updatedfiles = set()
133 self._updatedfiles = set()
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
243
243
244 def f(x):
244 def f(x):
245 try:
245 try:
246 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
247 if util.statislink(st):
247 if util.statislink(st):
248 return b'l'
248 return b'l'
249 if util.statisexec(st):
249 if util.statisexec(st):
250 return b'x'
250 return b'x'
251 except OSError:
251 except OSError:
252 pass
252 pass
253 return b''
253 return b''
254
254
255 return f
255 return f
256
256
257 fallback = buildfallback()
257 fallback = buildfallback()
258 if self._checklink:
258 if self._checklink:
259
259
260 def f(x):
260 def f(x):
261 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
262 return b'l'
262 return b'l'
263 if b'x' in fallback(x):
263 if b'x' in fallback(x):
264 return b'x'
264 return b'x'
265 return b''
265 return b''
266
266
267 return f
267 return f
268 if self._checkexec:
268 if self._checkexec:
269
269
270 def f(x):
270 def f(x):
271 if b'l' in fallback(x):
271 if b'l' in fallback(x):
272 return b'l'
272 return b'l'
273 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
274 return b'x'
274 return b'x'
275 return b''
275 return b''
276
276
277 return f
277 return f
278 else:
278 else:
279 return fallback
279 return fallback
280
280
281 @propertycache
281 @propertycache
282 def _cwd(self):
282 def _cwd(self):
283 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
285 if forcecwd:
285 if forcecwd:
286 return forcecwd
286 return forcecwd
287 return encoding.getcwd()
287 return encoding.getcwd()
288
288
289 def getcwd(self):
289 def getcwd(self):
290 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
291
291
292 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
293 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
294 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
295 """
295 """
296 cwd = self._cwd
296 cwd = self._cwd
297 if cwd == self._root:
297 if cwd == self._root:
298 return b''
298 return b''
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
300 rootsep = self._root
300 rootsep = self._root
301 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
302 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
303 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
304 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
305 else:
305 else:
306 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
307 return cwd
307 return cwd
308
308
309 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
310 if cwd is None:
310 if cwd is None:
311 cwd = self.getcwd()
311 cwd = self.getcwd()
312 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
313 if self._slash:
313 if self._slash:
314 return util.pconvert(path)
314 return util.pconvert(path)
315 return path
315 return path
316
316
317 def __getitem__(self, key):
317 def __getitem__(self, key):
318 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
319
319
320 States are:
320 States are:
321 n normal
321 n normal
322 m needs merging
322 m needs merging
323 r marked for removal
323 r marked for removal
324 a marked for addition
324 a marked for addition
325 ? not tracked
325 ? not tracked
326
326
327 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
328 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
329 instead.
329 instead.
330 """
330 """
331 entry = self._map.get(key)
331 entry = self._map.get(key)
332 if entry is not None:
332 if entry is not None:
333 return entry.state
333 return entry.state
334 return b'?'
334 return b'?'
335
335
336 def __contains__(self, key):
336 def __contains__(self, key):
337 return key in self._map
337 return key in self._map
338
338
339 def __iter__(self):
339 def __iter__(self):
340 return iter(sorted(self._map))
340 return iter(sorted(self._map))
341
341
342 def items(self):
342 def items(self):
343 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
344
344
345 iteritems = items
345 iteritems = items
346
346
347 def directories(self):
347 def directories(self):
348 return self._map.directories()
348 return self._map.directories()
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
389 copies = {}
389 copies = {}
390 if (
390 if (
391 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
393 ):
393 ):
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
395
395
396 for f in candidatefiles:
396 for f in candidatefiles:
397 s = self._map.get(f)
397 s = self._map.get(f)
398 if s is None:
398 if s is None:
399 continue
399 continue
400
400
401 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
402 if s.merged:
402 if s.merged:
403 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
404 if source:
404 if source:
405 copies[f] = source
405 copies[f] = source
406 self.normallookup(f)
406 self.normallookup(f)
407 # Also fix up otherparent markers
407 # Also fix up otherparent markers
408 elif s.from_p2:
408 elif s.from_p2:
409 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
410 if source:
410 if source:
411 copies[f] = source
411 copies[f] = source
412 self._add(f)
412 self._add(f)
413 return copies
413 return copies
414
414
415 def setbranch(self, branch):
415 def setbranch(self, branch):
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
418 try:
418 try:
419 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
420 f.close()
420 f.close()
421
421
422 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
423 # replacing the underlying file
423 # replacing the underlying file
424 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
425 if ce:
425 if ce:
426 ce.refresh()
426 ce.refresh()
427 except: # re-raises
427 except: # re-raises
428 f.discard()
428 f.discard()
429 raise
429 raise
430
430
431 def invalidate(self):
431 def invalidate(self):
432 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
433
433
434 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
436 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
437
437
438 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
439 if a in self.__dict__:
439 if a in self.__dict__:
440 delattr(self, a)
440 delattr(self, a)
441 self._lastnormaltime = 0
441 self._lastnormaltime = 0
442 self._dirty = False
442 self._dirty = False
443 self._updatedfiles.clear()
443 self._updatedfiles.clear()
444 self._parentwriters = 0
444 self._parentwriters = 0
445 self._origpl = None
445 self._origpl = None
446
446
447 def copy(self, source, dest):
447 def copy(self, source, dest):
448 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
449 if source == dest:
449 if source == dest:
450 return
450 return
451 self._dirty = True
451 self._dirty = True
452 if source is not None:
452 if source is not None:
453 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
454 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
455 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
456 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
457 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
458
458
459 def copied(self, file):
459 def copied(self, file):
460 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
461
461
462 def copies(self):
462 def copies(self):
463 return self._map.copymap
463 return self._map.copymap
464
464
465 @requires_no_parents_change
465 @requires_no_parents_change
466 def set_tracked(self, filename):
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
467 """a "public" method for generic code to mark a file as tracked
468
468
469 This function is to be called outside of "update/merge" case. For
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
470 example by a command like `hg add X`.
471
471
472 return True the file was previously untracked, False otherwise.
472 return True the file was previously untracked, False otherwise.
473 """
473 """
474 entry = self._map.get(filename)
474 entry = self._map.get(filename)
475 if entry is None:
475 if entry is None:
476 self._add(filename)
476 self._add(filename)
477 return True
477 return True
478 elif not entry.tracked:
478 elif not entry.tracked:
479 self.normallookup(filename)
479 self.normallookup(filename)
480 return True
480 return True
481 return False
481 return False
482
482
483 @requires_no_parents_change
484 def set_untracked(self, filename):
485 """a "public" method for generic code to mark a file as untracked
486
487 This function is to be called outside of "update/merge" case. For
488 example by a command like `hg remove X`.
489
490 return True the file was previously tracked, False otherwise.
491 """
492 entry = self._map.get(filename)
493 if entry is None:
494 return False
495 elif entry.added:
496 self._drop(filename)
497 return True
498 else:
499 self._remove(filename)
500 return True
501
483 @requires_parents_change
502 @requires_parents_change
484 def update_file_reference(
503 def update_file_reference(
485 self,
504 self,
486 filename,
505 filename,
487 p1_tracked,
506 p1_tracked,
488 ):
507 ):
489 """Set a file as tracked in the parent (or not)
508 """Set a file as tracked in the parent (or not)
490
509
491 This is to be called when adjust the dirstate to a new parent after an history
510 This is to be called when adjust the dirstate to a new parent after an history
492 rewriting operation.
511 rewriting operation.
493
512
494 It should not be called during a merge (p2 != nullid) and only within
513 It should not be called during a merge (p2 != nullid) and only within
495 a `with dirstate.parentchange():` context.
514 a `with dirstate.parentchange():` context.
496 """
515 """
497 if self.in_merge:
516 if self.in_merge:
498 msg = b'update_file_reference should not be called when merging'
517 msg = b'update_file_reference should not be called when merging'
499 raise error.ProgrammingError(msg)
518 raise error.ProgrammingError(msg)
500 entry = self._map.get(filename)
519 entry = self._map.get(filename)
501 if entry is None:
520 if entry is None:
502 wc_tracked = False
521 wc_tracked = False
503 else:
522 else:
504 wc_tracked = entry.tracked
523 wc_tracked = entry.tracked
505 if p1_tracked and wc_tracked:
524 if p1_tracked and wc_tracked:
506 # the underlying reference might have changed, we will have to
525 # the underlying reference might have changed, we will have to
507 # check it.
526 # check it.
508 self.normallookup(filename)
527 self.normallookup(filename)
509 elif not (p1_tracked or wc_tracked):
528 elif not (p1_tracked or wc_tracked):
510 # the file is no longer relevant to anyone
529 # the file is no longer relevant to anyone
511 self._drop(filename)
530 self._drop(filename)
512 elif (not p1_tracked) and wc_tracked:
531 elif (not p1_tracked) and wc_tracked:
513 if not entry.added:
532 if not entry.added:
514 self._add(filename)
533 self._add(filename)
515 elif p1_tracked and not wc_tracked:
534 elif p1_tracked and not wc_tracked:
516 if entry is None or not entry.removed:
535 if entry is None or not entry.removed:
517 self._remove(filename)
536 self._remove(filename)
518 else:
537 else:
519 assert False, 'unreachable'
538 assert False, 'unreachable'
520
539
521 def _addpath(
540 def _addpath(
522 self,
541 self,
523 f,
542 f,
524 mode=0,
543 mode=0,
525 size=None,
544 size=None,
526 mtime=None,
545 mtime=None,
527 added=False,
546 added=False,
528 merged=False,
547 merged=False,
529 from_p2=False,
548 from_p2=False,
530 possibly_dirty=False,
549 possibly_dirty=False,
531 ):
550 ):
532 entry = self._map.get(f)
551 entry = self._map.get(f)
533 if added or entry is not None and entry.removed:
552 if added or entry is not None and entry.removed:
534 scmutil.checkfilename(f)
553 scmutil.checkfilename(f)
535 if self._map.hastrackeddir(f):
554 if self._map.hastrackeddir(f):
536 msg = _(b'directory %r already in dirstate')
555 msg = _(b'directory %r already in dirstate')
537 msg %= pycompat.bytestr(f)
556 msg %= pycompat.bytestr(f)
538 raise error.Abort(msg)
557 raise error.Abort(msg)
539 # shadows
558 # shadows
540 for d in pathutil.finddirs(f):
559 for d in pathutil.finddirs(f):
541 if self._map.hastrackeddir(d):
560 if self._map.hastrackeddir(d):
542 break
561 break
543 entry = self._map.get(d)
562 entry = self._map.get(d)
544 if entry is not None and not entry.removed:
563 if entry is not None and not entry.removed:
545 msg = _(b'file %r in dirstate clashes with %r')
564 msg = _(b'file %r in dirstate clashes with %r')
546 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
565 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
547 raise error.Abort(msg)
566 raise error.Abort(msg)
548 self._dirty = True
567 self._dirty = True
549 self._updatedfiles.add(f)
568 self._updatedfiles.add(f)
550 self._map.addfile(
569 self._map.addfile(
551 f,
570 f,
552 mode=mode,
571 mode=mode,
553 size=size,
572 size=size,
554 mtime=mtime,
573 mtime=mtime,
555 added=added,
574 added=added,
556 merged=merged,
575 merged=merged,
557 from_p2=from_p2,
576 from_p2=from_p2,
558 possibly_dirty=possibly_dirty,
577 possibly_dirty=possibly_dirty,
559 )
578 )
560
579
561 def normal(self, f, parentfiledata=None):
580 def normal(self, f, parentfiledata=None):
562 """Mark a file normal and clean.
581 """Mark a file normal and clean.
563
582
564 parentfiledata: (mode, size, mtime) of the clean file
583 parentfiledata: (mode, size, mtime) of the clean file
565
584
566 parentfiledata should be computed from memory (for mode,
585 parentfiledata should be computed from memory (for mode,
567 size), as or close as possible from the point where we
586 size), as or close as possible from the point where we
568 determined the file was clean, to limit the risk of the
587 determined the file was clean, to limit the risk of the
569 file having been changed by an external process between the
588 file having been changed by an external process between the
570 moment where the file was determined to be clean and now."""
589 moment where the file was determined to be clean and now."""
571 if parentfiledata:
590 if parentfiledata:
572 (mode, size, mtime) = parentfiledata
591 (mode, size, mtime) = parentfiledata
573 else:
592 else:
574 s = os.lstat(self._join(f))
593 s = os.lstat(self._join(f))
575 mode = s.st_mode
594 mode = s.st_mode
576 size = s.st_size
595 size = s.st_size
577 mtime = s[stat.ST_MTIME]
596 mtime = s[stat.ST_MTIME]
578 self._addpath(f, mode=mode, size=size, mtime=mtime)
597 self._addpath(f, mode=mode, size=size, mtime=mtime)
579 self._map.copymap.pop(f, None)
598 self._map.copymap.pop(f, None)
580 if f in self._map.nonnormalset:
599 if f in self._map.nonnormalset:
581 self._map.nonnormalset.remove(f)
600 self._map.nonnormalset.remove(f)
582 if mtime > self._lastnormaltime:
601 if mtime > self._lastnormaltime:
583 # Remember the most recent modification timeslot for status(),
602 # Remember the most recent modification timeslot for status(),
584 # to make sure we won't miss future size-preserving file content
603 # to make sure we won't miss future size-preserving file content
585 # modifications that happen within the same timeslot.
604 # modifications that happen within the same timeslot.
586 self._lastnormaltime = mtime
605 self._lastnormaltime = mtime
587
606
588 def normallookup(self, f):
607 def normallookup(self, f):
589 '''Mark a file normal, but possibly dirty.'''
608 '''Mark a file normal, but possibly dirty.'''
590 if self.in_merge:
609 if self.in_merge:
591 # if there is a merge going on and the file was either
610 # if there is a merge going on and the file was either
592 # "merged" or coming from other parent (-2) before
611 # "merged" or coming from other parent (-2) before
593 # being removed, restore that state.
612 # being removed, restore that state.
594 entry = self._map.get(f)
613 entry = self._map.get(f)
595 if entry is not None:
614 if entry is not None:
596 # XXX this should probably be dealt with a a lower level
615 # XXX this should probably be dealt with a a lower level
597 # (see `merged_removed` and `from_p2_removed`)
616 # (see `merged_removed` and `from_p2_removed`)
598 if entry.merged_removed or entry.from_p2_removed:
617 if entry.merged_removed or entry.from_p2_removed:
599 source = self._map.copymap.get(f)
618 source = self._map.copymap.get(f)
600 if entry.merged_removed:
619 if entry.merged_removed:
601 self.merge(f)
620 self.merge(f)
602 elif entry.from_p2_removed:
621 elif entry.from_p2_removed:
603 self.otherparent(f)
622 self.otherparent(f)
604 if source is not None:
623 if source is not None:
605 self.copy(source, f)
624 self.copy(source, f)
606 return
625 return
607 elif entry.merged or entry.from_p2:
626 elif entry.merged or entry.from_p2:
608 return
627 return
609 self._addpath(f, possibly_dirty=True)
628 self._addpath(f, possibly_dirty=True)
610 self._map.copymap.pop(f, None)
629 self._map.copymap.pop(f, None)
611
630
612 def otherparent(self, f):
631 def otherparent(self, f):
613 '''Mark as coming from the other parent, always dirty.'''
632 '''Mark as coming from the other parent, always dirty.'''
614 if not self.in_merge:
633 if not self.in_merge:
615 msg = _(b"setting %r to other parent only allowed in merges") % f
634 msg = _(b"setting %r to other parent only allowed in merges") % f
616 raise error.Abort(msg)
635 raise error.Abort(msg)
617 entry = self._map.get(f)
636 entry = self._map.get(f)
618 if entry is not None and entry.tracked:
637 if entry is not None and entry.tracked:
619 # merge-like
638 # merge-like
620 self._addpath(f, merged=True)
639 self._addpath(f, merged=True)
621 else:
640 else:
622 # add-like
641 # add-like
623 self._addpath(f, from_p2=True)
642 self._addpath(f, from_p2=True)
624 self._map.copymap.pop(f, None)
643 self._map.copymap.pop(f, None)
625
644
626 def add(self, f):
645 def add(self, f):
627 '''Mark a file added.'''
646 '''Mark a file added.'''
628 self._add(f)
647 self._add(f)
629
648
630 def _add(self, filename):
649 def _add(self, filename):
631 """internal function to mark a file as added"""
650 """internal function to mark a file as added"""
632 self._addpath(filename, added=True)
651 self._addpath(filename, added=True)
633 self._map.copymap.pop(filename, None)
652 self._map.copymap.pop(filename, None)
634
653
635 def remove(self, f):
654 def remove(self, f):
636 '''Mark a file removed'''
655 '''Mark a file removed'''
637 self._remove(f)
656 self._remove(f)
638
657
639 def _remove(self, filename):
658 def _remove(self, filename):
640 """internal function to mark a file removed"""
659 """internal function to mark a file removed"""
641 self._dirty = True
660 self._dirty = True
642 self._updatedfiles.add(filename)
661 self._updatedfiles.add(filename)
643 self._map.removefile(filename, in_merge=self.in_merge)
662 self._map.removefile(filename, in_merge=self.in_merge)
644
663
645 def merge(self, f):
664 def merge(self, f):
646 '''Mark a file merged.'''
665 '''Mark a file merged.'''
647 if not self.in_merge:
666 if not self.in_merge:
648 return self.normallookup(f)
667 return self.normallookup(f)
649 return self.otherparent(f)
668 return self.otherparent(f)
650
669
651 def drop(self, f):
670 def drop(self, f):
652 '''Drop a file from the dirstate'''
671 '''Drop a file from the dirstate'''
653 self._drop(f)
672 self._drop(f)
654
673
655 def _drop(self, filename):
674 def _drop(self, filename):
656 """internal function to drop a file from the dirstate"""
675 """internal function to drop a file from the dirstate"""
657 if self._map.dropfile(filename):
676 if self._map.dropfile(filename):
658 self._dirty = True
677 self._dirty = True
659 self._updatedfiles.add(filename)
678 self._updatedfiles.add(filename)
660 self._map.copymap.pop(filename, None)
679 self._map.copymap.pop(filename, None)
661
680
662 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
681 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
663 if exists is None:
682 if exists is None:
664 exists = os.path.lexists(os.path.join(self._root, path))
683 exists = os.path.lexists(os.path.join(self._root, path))
665 if not exists:
684 if not exists:
666 # Maybe a path component exists
685 # Maybe a path component exists
667 if not ignoremissing and b'/' in path:
686 if not ignoremissing and b'/' in path:
668 d, f = path.rsplit(b'/', 1)
687 d, f = path.rsplit(b'/', 1)
669 d = self._normalize(d, False, ignoremissing, None)
688 d = self._normalize(d, False, ignoremissing, None)
670 folded = d + b"/" + f
689 folded = d + b"/" + f
671 else:
690 else:
672 # No path components, preserve original case
691 # No path components, preserve original case
673 folded = path
692 folded = path
674 else:
693 else:
675 # recursively normalize leading directory components
694 # recursively normalize leading directory components
676 # against dirstate
695 # against dirstate
677 if b'/' in normed:
696 if b'/' in normed:
678 d, f = normed.rsplit(b'/', 1)
697 d, f = normed.rsplit(b'/', 1)
679 d = self._normalize(d, False, ignoremissing, True)
698 d = self._normalize(d, False, ignoremissing, True)
680 r = self._root + b"/" + d
699 r = self._root + b"/" + d
681 folded = d + b"/" + util.fspath(f, r)
700 folded = d + b"/" + util.fspath(f, r)
682 else:
701 else:
683 folded = util.fspath(normed, self._root)
702 folded = util.fspath(normed, self._root)
684 storemap[normed] = folded
703 storemap[normed] = folded
685
704
686 return folded
705 return folded
687
706
688 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
707 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
689 normed = util.normcase(path)
708 normed = util.normcase(path)
690 folded = self._map.filefoldmap.get(normed, None)
709 folded = self._map.filefoldmap.get(normed, None)
691 if folded is None:
710 if folded is None:
692 if isknown:
711 if isknown:
693 folded = path
712 folded = path
694 else:
713 else:
695 folded = self._discoverpath(
714 folded = self._discoverpath(
696 path, normed, ignoremissing, exists, self._map.filefoldmap
715 path, normed, ignoremissing, exists, self._map.filefoldmap
697 )
716 )
698 return folded
717 return folded
699
718
700 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
719 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
701 normed = util.normcase(path)
720 normed = util.normcase(path)
702 folded = self._map.filefoldmap.get(normed, None)
721 folded = self._map.filefoldmap.get(normed, None)
703 if folded is None:
722 if folded is None:
704 folded = self._map.dirfoldmap.get(normed, None)
723 folded = self._map.dirfoldmap.get(normed, None)
705 if folded is None:
724 if folded is None:
706 if isknown:
725 if isknown:
707 folded = path
726 folded = path
708 else:
727 else:
709 # store discovered result in dirfoldmap so that future
728 # store discovered result in dirfoldmap so that future
710 # normalizefile calls don't start matching directories
729 # normalizefile calls don't start matching directories
711 folded = self._discoverpath(
730 folded = self._discoverpath(
712 path, normed, ignoremissing, exists, self._map.dirfoldmap
731 path, normed, ignoremissing, exists, self._map.dirfoldmap
713 )
732 )
714 return folded
733 return folded
715
734
716 def normalize(self, path, isknown=False, ignoremissing=False):
735 def normalize(self, path, isknown=False, ignoremissing=False):
717 """
736 """
718 normalize the case of a pathname when on a casefolding filesystem
737 normalize the case of a pathname when on a casefolding filesystem
719
738
720 isknown specifies whether the filename came from walking the
739 isknown specifies whether the filename came from walking the
721 disk, to avoid extra filesystem access.
740 disk, to avoid extra filesystem access.
722
741
723 If ignoremissing is True, missing path are returned
742 If ignoremissing is True, missing path are returned
724 unchanged. Otherwise, we try harder to normalize possibly
743 unchanged. Otherwise, we try harder to normalize possibly
725 existing path components.
744 existing path components.
726
745
727 The normalized case is determined based on the following precedence:
746 The normalized case is determined based on the following precedence:
728
747
729 - version of name already stored in the dirstate
748 - version of name already stored in the dirstate
730 - version of name stored on disk
749 - version of name stored on disk
731 - version provided via command arguments
750 - version provided via command arguments
732 """
751 """
733
752
734 if self._checkcase:
753 if self._checkcase:
735 return self._normalize(path, isknown, ignoremissing)
754 return self._normalize(path, isknown, ignoremissing)
736 return path
755 return path
737
756
738 def clear(self):
757 def clear(self):
739 self._map.clear()
758 self._map.clear()
740 self._lastnormaltime = 0
759 self._lastnormaltime = 0
741 self._updatedfiles.clear()
760 self._updatedfiles.clear()
742 self._dirty = True
761 self._dirty = True
743
762
744 def rebuild(self, parent, allfiles, changedfiles=None):
763 def rebuild(self, parent, allfiles, changedfiles=None):
745 if changedfiles is None:
764 if changedfiles is None:
746 # Rebuild entire dirstate
765 # Rebuild entire dirstate
747 to_lookup = allfiles
766 to_lookup = allfiles
748 to_drop = []
767 to_drop = []
749 lastnormaltime = self._lastnormaltime
768 lastnormaltime = self._lastnormaltime
750 self.clear()
769 self.clear()
751 self._lastnormaltime = lastnormaltime
770 self._lastnormaltime = lastnormaltime
752 elif len(changedfiles) < 10:
771 elif len(changedfiles) < 10:
753 # Avoid turning allfiles into a set, which can be expensive if it's
772 # Avoid turning allfiles into a set, which can be expensive if it's
754 # large.
773 # large.
755 to_lookup = []
774 to_lookup = []
756 to_drop = []
775 to_drop = []
757 for f in changedfiles:
776 for f in changedfiles:
758 if f in allfiles:
777 if f in allfiles:
759 to_lookup.append(f)
778 to_lookup.append(f)
760 else:
779 else:
761 to_drop.append(f)
780 to_drop.append(f)
762 else:
781 else:
763 changedfilesset = set(changedfiles)
782 changedfilesset = set(changedfiles)
764 to_lookup = changedfilesset & set(allfiles)
783 to_lookup = changedfilesset & set(allfiles)
765 to_drop = changedfilesset - to_lookup
784 to_drop = changedfilesset - to_lookup
766
785
767 if self._origpl is None:
786 if self._origpl is None:
768 self._origpl = self._pl
787 self._origpl = self._pl
769 self._map.setparents(parent, self._nodeconstants.nullid)
788 self._map.setparents(parent, self._nodeconstants.nullid)
770
789
771 for f in to_lookup:
790 for f in to_lookup:
772 self.normallookup(f)
791 self.normallookup(f)
773 for f in to_drop:
792 for f in to_drop:
774 self._drop(f)
793 self._drop(f)
775
794
776 self._dirty = True
795 self._dirty = True
777
796
778 def identity(self):
797 def identity(self):
779 """Return identity of dirstate itself to detect changing in storage
798 """Return identity of dirstate itself to detect changing in storage
780
799
781 If identity of previous dirstate is equal to this, writing
800 If identity of previous dirstate is equal to this, writing
782 changes based on the former dirstate out can keep consistency.
801 changes based on the former dirstate out can keep consistency.
783 """
802 """
784 return self._map.identity
803 return self._map.identity
785
804
786 def write(self, tr):
805 def write(self, tr):
787 if not self._dirty:
806 if not self._dirty:
788 return
807 return
789
808
790 filename = self._filename
809 filename = self._filename
791 if tr:
810 if tr:
792 # 'dirstate.write()' is not only for writing in-memory
811 # 'dirstate.write()' is not only for writing in-memory
793 # changes out, but also for dropping ambiguous timestamp.
812 # changes out, but also for dropping ambiguous timestamp.
794 # delayed writing re-raise "ambiguous timestamp issue".
813 # delayed writing re-raise "ambiguous timestamp issue".
795 # See also the wiki page below for detail:
814 # See also the wiki page below for detail:
796 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
815 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
797
816
798 # emulate dropping timestamp in 'parsers.pack_dirstate'
817 # emulate dropping timestamp in 'parsers.pack_dirstate'
799 now = _getfsnow(self._opener)
818 now = _getfsnow(self._opener)
800 self._map.clearambiguoustimes(self._updatedfiles, now)
819 self._map.clearambiguoustimes(self._updatedfiles, now)
801
820
802 # emulate that all 'dirstate.normal' results are written out
821 # emulate that all 'dirstate.normal' results are written out
803 self._lastnormaltime = 0
822 self._lastnormaltime = 0
804 self._updatedfiles.clear()
823 self._updatedfiles.clear()
805
824
806 # delay writing in-memory changes out
825 # delay writing in-memory changes out
807 tr.addfilegenerator(
826 tr.addfilegenerator(
808 b'dirstate',
827 b'dirstate',
809 (self._filename,),
828 (self._filename,),
810 self._writedirstate,
829 self._writedirstate,
811 location=b'plain',
830 location=b'plain',
812 )
831 )
813 return
832 return
814
833
815 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
834 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
816 self._writedirstate(st)
835 self._writedirstate(st)
817
836
818 def addparentchangecallback(self, category, callback):
837 def addparentchangecallback(self, category, callback):
819 """add a callback to be called when the wd parents are changed
838 """add a callback to be called when the wd parents are changed
820
839
821 Callback will be called with the following arguments:
840 Callback will be called with the following arguments:
822 dirstate, (oldp1, oldp2), (newp1, newp2)
841 dirstate, (oldp1, oldp2), (newp1, newp2)
823
842
824 Category is a unique identifier to allow overwriting an old callback
843 Category is a unique identifier to allow overwriting an old callback
825 with a newer callback.
844 with a newer callback.
826 """
845 """
827 self._plchangecallbacks[category] = callback
846 self._plchangecallbacks[category] = callback
828
847
829 def _writedirstate(self, st):
848 def _writedirstate(self, st):
830 # notify callbacks about parents change
849 # notify callbacks about parents change
831 if self._origpl is not None and self._origpl != self._pl:
850 if self._origpl is not None and self._origpl != self._pl:
832 for c, callback in sorted(
851 for c, callback in sorted(
833 pycompat.iteritems(self._plchangecallbacks)
852 pycompat.iteritems(self._plchangecallbacks)
834 ):
853 ):
835 callback(self, self._origpl, self._pl)
854 callback(self, self._origpl, self._pl)
836 self._origpl = None
855 self._origpl = None
837 # use the modification time of the newly created temporary file as the
856 # use the modification time of the newly created temporary file as the
838 # filesystem's notion of 'now'
857 # filesystem's notion of 'now'
839 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
858 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
840
859
841 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
860 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
842 # timestamp of each entries in dirstate, because of 'now > mtime'
861 # timestamp of each entries in dirstate, because of 'now > mtime'
843 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
862 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
844 if delaywrite > 0:
863 if delaywrite > 0:
845 # do we have any files to delay for?
864 # do we have any files to delay for?
846 for f, e in pycompat.iteritems(self._map):
865 for f, e in pycompat.iteritems(self._map):
847 if e.need_delay(now):
866 if e.need_delay(now):
848 import time # to avoid useless import
867 import time # to avoid useless import
849
868
850 # rather than sleep n seconds, sleep until the next
869 # rather than sleep n seconds, sleep until the next
851 # multiple of n seconds
870 # multiple of n seconds
852 clock = time.time()
871 clock = time.time()
853 start = int(clock) - (int(clock) % delaywrite)
872 start = int(clock) - (int(clock) % delaywrite)
854 end = start + delaywrite
873 end = start + delaywrite
855 time.sleep(end - clock)
874 time.sleep(end - clock)
856 now = end # trust our estimate that the end is near now
875 now = end # trust our estimate that the end is near now
857 break
876 break
858
877
859 self._map.write(st, now)
878 self._map.write(st, now)
860 self._lastnormaltime = 0
879 self._lastnormaltime = 0
861 self._dirty = False
880 self._dirty = False
862
881
863 def _dirignore(self, f):
882 def _dirignore(self, f):
864 if self._ignore(f):
883 if self._ignore(f):
865 return True
884 return True
866 for p in pathutil.finddirs(f):
885 for p in pathutil.finddirs(f):
867 if self._ignore(p):
886 if self._ignore(p):
868 return True
887 return True
869 return False
888 return False
870
889
871 def _ignorefiles(self):
890 def _ignorefiles(self):
872 files = []
891 files = []
873 if os.path.exists(self._join(b'.hgignore')):
892 if os.path.exists(self._join(b'.hgignore')):
874 files.append(self._join(b'.hgignore'))
893 files.append(self._join(b'.hgignore'))
875 for name, path in self._ui.configitems(b"ui"):
894 for name, path in self._ui.configitems(b"ui"):
876 if name == b'ignore' or name.startswith(b'ignore.'):
895 if name == b'ignore' or name.startswith(b'ignore.'):
877 # we need to use os.path.join here rather than self._join
896 # we need to use os.path.join here rather than self._join
878 # because path is arbitrary and user-specified
897 # because path is arbitrary and user-specified
879 files.append(os.path.join(self._rootdir, util.expandpath(path)))
898 files.append(os.path.join(self._rootdir, util.expandpath(path)))
880 return files
899 return files
881
900
882 def _ignorefileandline(self, f):
901 def _ignorefileandline(self, f):
883 files = collections.deque(self._ignorefiles())
902 files = collections.deque(self._ignorefiles())
884 visited = set()
903 visited = set()
885 while files:
904 while files:
886 i = files.popleft()
905 i = files.popleft()
887 patterns = matchmod.readpatternfile(
906 patterns = matchmod.readpatternfile(
888 i, self._ui.warn, sourceinfo=True
907 i, self._ui.warn, sourceinfo=True
889 )
908 )
890 for pattern, lineno, line in patterns:
909 for pattern, lineno, line in patterns:
891 kind, p = matchmod._patsplit(pattern, b'glob')
910 kind, p = matchmod._patsplit(pattern, b'glob')
892 if kind == b"subinclude":
911 if kind == b"subinclude":
893 if p not in visited:
912 if p not in visited:
894 files.append(p)
913 files.append(p)
895 continue
914 continue
896 m = matchmod.match(
915 m = matchmod.match(
897 self._root, b'', [], [pattern], warn=self._ui.warn
916 self._root, b'', [], [pattern], warn=self._ui.warn
898 )
917 )
899 if m(f):
918 if m(f):
900 return (i, lineno, line)
919 return (i, lineno, line)
901 visited.add(i)
920 visited.add(i)
902 return (None, -1, b"")
921 return (None, -1, b"")
903
922
904 def _walkexplicit(self, match, subrepos):
923 def _walkexplicit(self, match, subrepos):
905 """Get stat data about the files explicitly specified by match.
924 """Get stat data about the files explicitly specified by match.
906
925
907 Return a triple (results, dirsfound, dirsnotfound).
926 Return a triple (results, dirsfound, dirsnotfound).
908 - results is a mapping from filename to stat result. It also contains
927 - results is a mapping from filename to stat result. It also contains
909 listings mapping subrepos and .hg to None.
928 listings mapping subrepos and .hg to None.
910 - dirsfound is a list of files found to be directories.
929 - dirsfound is a list of files found to be directories.
911 - dirsnotfound is a list of files that the dirstate thinks are
930 - dirsnotfound is a list of files that the dirstate thinks are
912 directories and that were not found."""
931 directories and that were not found."""
913
932
914 def badtype(mode):
933 def badtype(mode):
915 kind = _(b'unknown')
934 kind = _(b'unknown')
916 if stat.S_ISCHR(mode):
935 if stat.S_ISCHR(mode):
917 kind = _(b'character device')
936 kind = _(b'character device')
918 elif stat.S_ISBLK(mode):
937 elif stat.S_ISBLK(mode):
919 kind = _(b'block device')
938 kind = _(b'block device')
920 elif stat.S_ISFIFO(mode):
939 elif stat.S_ISFIFO(mode):
921 kind = _(b'fifo')
940 kind = _(b'fifo')
922 elif stat.S_ISSOCK(mode):
941 elif stat.S_ISSOCK(mode):
923 kind = _(b'socket')
942 kind = _(b'socket')
924 elif stat.S_ISDIR(mode):
943 elif stat.S_ISDIR(mode):
925 kind = _(b'directory')
944 kind = _(b'directory')
926 return _(b'unsupported file type (type is %s)') % kind
945 return _(b'unsupported file type (type is %s)') % kind
927
946
928 badfn = match.bad
947 badfn = match.bad
929 dmap = self._map
948 dmap = self._map
930 lstat = os.lstat
949 lstat = os.lstat
931 getkind = stat.S_IFMT
950 getkind = stat.S_IFMT
932 dirkind = stat.S_IFDIR
951 dirkind = stat.S_IFDIR
933 regkind = stat.S_IFREG
952 regkind = stat.S_IFREG
934 lnkkind = stat.S_IFLNK
953 lnkkind = stat.S_IFLNK
935 join = self._join
954 join = self._join
936 dirsfound = []
955 dirsfound = []
937 foundadd = dirsfound.append
956 foundadd = dirsfound.append
938 dirsnotfound = []
957 dirsnotfound = []
939 notfoundadd = dirsnotfound.append
958 notfoundadd = dirsnotfound.append
940
959
941 if not match.isexact() and self._checkcase:
960 if not match.isexact() and self._checkcase:
942 normalize = self._normalize
961 normalize = self._normalize
943 else:
962 else:
944 normalize = None
963 normalize = None
945
964
946 files = sorted(match.files())
965 files = sorted(match.files())
947 subrepos.sort()
966 subrepos.sort()
948 i, j = 0, 0
967 i, j = 0, 0
949 while i < len(files) and j < len(subrepos):
968 while i < len(files) and j < len(subrepos):
950 subpath = subrepos[j] + b"/"
969 subpath = subrepos[j] + b"/"
951 if files[i] < subpath:
970 if files[i] < subpath:
952 i += 1
971 i += 1
953 continue
972 continue
954 while i < len(files) and files[i].startswith(subpath):
973 while i < len(files) and files[i].startswith(subpath):
955 del files[i]
974 del files[i]
956 j += 1
975 j += 1
957
976
958 if not files or b'' in files:
977 if not files or b'' in files:
959 files = [b'']
978 files = [b'']
960 # constructing the foldmap is expensive, so don't do it for the
979 # constructing the foldmap is expensive, so don't do it for the
961 # common case where files is ['']
980 # common case where files is ['']
962 normalize = None
981 normalize = None
963 results = dict.fromkeys(subrepos)
982 results = dict.fromkeys(subrepos)
964 results[b'.hg'] = None
983 results[b'.hg'] = None
965
984
966 for ff in files:
985 for ff in files:
967 if normalize:
986 if normalize:
968 nf = normalize(ff, False, True)
987 nf = normalize(ff, False, True)
969 else:
988 else:
970 nf = ff
989 nf = ff
971 if nf in results:
990 if nf in results:
972 continue
991 continue
973
992
974 try:
993 try:
975 st = lstat(join(nf))
994 st = lstat(join(nf))
976 kind = getkind(st.st_mode)
995 kind = getkind(st.st_mode)
977 if kind == dirkind:
996 if kind == dirkind:
978 if nf in dmap:
997 if nf in dmap:
979 # file replaced by dir on disk but still in dirstate
998 # file replaced by dir on disk but still in dirstate
980 results[nf] = None
999 results[nf] = None
981 foundadd((nf, ff))
1000 foundadd((nf, ff))
982 elif kind == regkind or kind == lnkkind:
1001 elif kind == regkind or kind == lnkkind:
983 results[nf] = st
1002 results[nf] = st
984 else:
1003 else:
985 badfn(ff, badtype(kind))
1004 badfn(ff, badtype(kind))
986 if nf in dmap:
1005 if nf in dmap:
987 results[nf] = None
1006 results[nf] = None
988 except OSError as inst: # nf not found on disk - it is dirstate only
1007 except OSError as inst: # nf not found on disk - it is dirstate only
989 if nf in dmap: # does it exactly match a missing file?
1008 if nf in dmap: # does it exactly match a missing file?
990 results[nf] = None
1009 results[nf] = None
991 else: # does it match a missing directory?
1010 else: # does it match a missing directory?
992 if self._map.hasdir(nf):
1011 if self._map.hasdir(nf):
993 notfoundadd(nf)
1012 notfoundadd(nf)
994 else:
1013 else:
995 badfn(ff, encoding.strtolocal(inst.strerror))
1014 badfn(ff, encoding.strtolocal(inst.strerror))
996
1015
997 # match.files() may contain explicitly-specified paths that shouldn't
1016 # match.files() may contain explicitly-specified paths that shouldn't
998 # be taken; drop them from the list of files found. dirsfound/notfound
1017 # be taken; drop them from the list of files found. dirsfound/notfound
999 # aren't filtered here because they will be tested later.
1018 # aren't filtered here because they will be tested later.
1000 if match.anypats():
1019 if match.anypats():
1001 for f in list(results):
1020 for f in list(results):
1002 if f == b'.hg' or f in subrepos:
1021 if f == b'.hg' or f in subrepos:
1003 # keep sentinel to disable further out-of-repo walks
1022 # keep sentinel to disable further out-of-repo walks
1004 continue
1023 continue
1005 if not match(f):
1024 if not match(f):
1006 del results[f]
1025 del results[f]
1007
1026
1008 # Case insensitive filesystems cannot rely on lstat() failing to detect
1027 # Case insensitive filesystems cannot rely on lstat() failing to detect
1009 # a case-only rename. Prune the stat object for any file that does not
1028 # a case-only rename. Prune the stat object for any file that does not
1010 # match the case in the filesystem, if there are multiple files that
1029 # match the case in the filesystem, if there are multiple files that
1011 # normalize to the same path.
1030 # normalize to the same path.
1012 if match.isexact() and self._checkcase:
1031 if match.isexact() and self._checkcase:
1013 normed = {}
1032 normed = {}
1014
1033
1015 for f, st in pycompat.iteritems(results):
1034 for f, st in pycompat.iteritems(results):
1016 if st is None:
1035 if st is None:
1017 continue
1036 continue
1018
1037
1019 nc = util.normcase(f)
1038 nc = util.normcase(f)
1020 paths = normed.get(nc)
1039 paths = normed.get(nc)
1021
1040
1022 if paths is None:
1041 if paths is None:
1023 paths = set()
1042 paths = set()
1024 normed[nc] = paths
1043 normed[nc] = paths
1025
1044
1026 paths.add(f)
1045 paths.add(f)
1027
1046
1028 for norm, paths in pycompat.iteritems(normed):
1047 for norm, paths in pycompat.iteritems(normed):
1029 if len(paths) > 1:
1048 if len(paths) > 1:
1030 for path in paths:
1049 for path in paths:
1031 folded = self._discoverpath(
1050 folded = self._discoverpath(
1032 path, norm, True, None, self._map.dirfoldmap
1051 path, norm, True, None, self._map.dirfoldmap
1033 )
1052 )
1034 if path != folded:
1053 if path != folded:
1035 results[path] = None
1054 results[path] = None
1036
1055
1037 return results, dirsfound, dirsnotfound
1056 return results, dirsfound, dirsnotfound
1038
1057
1039 def walk(self, match, subrepos, unknown, ignored, full=True):
1058 def walk(self, match, subrepos, unknown, ignored, full=True):
1040 """
1059 """
1041 Walk recursively through the directory tree, finding all files
1060 Walk recursively through the directory tree, finding all files
1042 matched by match.
1061 matched by match.
1043
1062
1044 If full is False, maybe skip some known-clean files.
1063 If full is False, maybe skip some known-clean files.
1045
1064
1046 Return a dict mapping filename to stat-like object (either
1065 Return a dict mapping filename to stat-like object (either
1047 mercurial.osutil.stat instance or return value of os.stat()).
1066 mercurial.osutil.stat instance or return value of os.stat()).
1048
1067
1049 """
1068 """
1050 # full is a flag that extensions that hook into walk can use -- this
1069 # full is a flag that extensions that hook into walk can use -- this
1051 # implementation doesn't use it at all. This satisfies the contract
1070 # implementation doesn't use it at all. This satisfies the contract
1052 # because we only guarantee a "maybe".
1071 # because we only guarantee a "maybe".
1053
1072
1054 if ignored:
1073 if ignored:
1055 ignore = util.never
1074 ignore = util.never
1056 dirignore = util.never
1075 dirignore = util.never
1057 elif unknown:
1076 elif unknown:
1058 ignore = self._ignore
1077 ignore = self._ignore
1059 dirignore = self._dirignore
1078 dirignore = self._dirignore
1060 else:
1079 else:
1061 # if not unknown and not ignored, drop dir recursion and step 2
1080 # if not unknown and not ignored, drop dir recursion and step 2
1062 ignore = util.always
1081 ignore = util.always
1063 dirignore = util.always
1082 dirignore = util.always
1064
1083
1065 matchfn = match.matchfn
1084 matchfn = match.matchfn
1066 matchalways = match.always()
1085 matchalways = match.always()
1067 matchtdir = match.traversedir
1086 matchtdir = match.traversedir
1068 dmap = self._map
1087 dmap = self._map
1069 listdir = util.listdir
1088 listdir = util.listdir
1070 lstat = os.lstat
1089 lstat = os.lstat
1071 dirkind = stat.S_IFDIR
1090 dirkind = stat.S_IFDIR
1072 regkind = stat.S_IFREG
1091 regkind = stat.S_IFREG
1073 lnkkind = stat.S_IFLNK
1092 lnkkind = stat.S_IFLNK
1074 join = self._join
1093 join = self._join
1075
1094
1076 exact = skipstep3 = False
1095 exact = skipstep3 = False
1077 if match.isexact(): # match.exact
1096 if match.isexact(): # match.exact
1078 exact = True
1097 exact = True
1079 dirignore = util.always # skip step 2
1098 dirignore = util.always # skip step 2
1080 elif match.prefix(): # match.match, no patterns
1099 elif match.prefix(): # match.match, no patterns
1081 skipstep3 = True
1100 skipstep3 = True
1082
1101
1083 if not exact and self._checkcase:
1102 if not exact and self._checkcase:
1084 normalize = self._normalize
1103 normalize = self._normalize
1085 normalizefile = self._normalizefile
1104 normalizefile = self._normalizefile
1086 skipstep3 = False
1105 skipstep3 = False
1087 else:
1106 else:
1088 normalize = self._normalize
1107 normalize = self._normalize
1089 normalizefile = None
1108 normalizefile = None
1090
1109
1091 # step 1: find all explicit files
1110 # step 1: find all explicit files
1092 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1111 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1093 if matchtdir:
1112 if matchtdir:
1094 for d in work:
1113 for d in work:
1095 matchtdir(d[0])
1114 matchtdir(d[0])
1096 for d in dirsnotfound:
1115 for d in dirsnotfound:
1097 matchtdir(d)
1116 matchtdir(d)
1098
1117
1099 skipstep3 = skipstep3 and not (work or dirsnotfound)
1118 skipstep3 = skipstep3 and not (work or dirsnotfound)
1100 work = [d for d in work if not dirignore(d[0])]
1119 work = [d for d in work if not dirignore(d[0])]
1101
1120
1102 # step 2: visit subdirectories
1121 # step 2: visit subdirectories
1103 def traverse(work, alreadynormed):
1122 def traverse(work, alreadynormed):
1104 wadd = work.append
1123 wadd = work.append
1105 while work:
1124 while work:
1106 tracing.counter('dirstate.walk work', len(work))
1125 tracing.counter('dirstate.walk work', len(work))
1107 nd = work.pop()
1126 nd = work.pop()
1108 visitentries = match.visitchildrenset(nd)
1127 visitentries = match.visitchildrenset(nd)
1109 if not visitentries:
1128 if not visitentries:
1110 continue
1129 continue
1111 if visitentries == b'this' or visitentries == b'all':
1130 if visitentries == b'this' or visitentries == b'all':
1112 visitentries = None
1131 visitentries = None
1113 skip = None
1132 skip = None
1114 if nd != b'':
1133 if nd != b'':
1115 skip = b'.hg'
1134 skip = b'.hg'
1116 try:
1135 try:
1117 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1136 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1118 entries = listdir(join(nd), stat=True, skip=skip)
1137 entries = listdir(join(nd), stat=True, skip=skip)
1119 except OSError as inst:
1138 except OSError as inst:
1120 if inst.errno in (errno.EACCES, errno.ENOENT):
1139 if inst.errno in (errno.EACCES, errno.ENOENT):
1121 match.bad(
1140 match.bad(
1122 self.pathto(nd), encoding.strtolocal(inst.strerror)
1141 self.pathto(nd), encoding.strtolocal(inst.strerror)
1123 )
1142 )
1124 continue
1143 continue
1125 raise
1144 raise
1126 for f, kind, st in entries:
1145 for f, kind, st in entries:
1127 # Some matchers may return files in the visitentries set,
1146 # Some matchers may return files in the visitentries set,
1128 # instead of 'this', if the matcher explicitly mentions them
1147 # instead of 'this', if the matcher explicitly mentions them
1129 # and is not an exactmatcher. This is acceptable; we do not
1148 # and is not an exactmatcher. This is acceptable; we do not
1130 # make any hard assumptions about file-or-directory below
1149 # make any hard assumptions about file-or-directory below
1131 # based on the presence of `f` in visitentries. If
1150 # based on the presence of `f` in visitentries. If
1132 # visitchildrenset returned a set, we can always skip the
1151 # visitchildrenset returned a set, we can always skip the
1133 # entries *not* in the set it provided regardless of whether
1152 # entries *not* in the set it provided regardless of whether
1134 # they're actually a file or a directory.
1153 # they're actually a file or a directory.
1135 if visitentries and f not in visitentries:
1154 if visitentries and f not in visitentries:
1136 continue
1155 continue
1137 if normalizefile:
1156 if normalizefile:
1138 # even though f might be a directory, we're only
1157 # even though f might be a directory, we're only
1139 # interested in comparing it to files currently in the
1158 # interested in comparing it to files currently in the
1140 # dmap -- therefore normalizefile is enough
1159 # dmap -- therefore normalizefile is enough
1141 nf = normalizefile(
1160 nf = normalizefile(
1142 nd and (nd + b"/" + f) or f, True, True
1161 nd and (nd + b"/" + f) or f, True, True
1143 )
1162 )
1144 else:
1163 else:
1145 nf = nd and (nd + b"/" + f) or f
1164 nf = nd and (nd + b"/" + f) or f
1146 if nf not in results:
1165 if nf not in results:
1147 if kind == dirkind:
1166 if kind == dirkind:
1148 if not ignore(nf):
1167 if not ignore(nf):
1149 if matchtdir:
1168 if matchtdir:
1150 matchtdir(nf)
1169 matchtdir(nf)
1151 wadd(nf)
1170 wadd(nf)
1152 if nf in dmap and (matchalways or matchfn(nf)):
1171 if nf in dmap and (matchalways or matchfn(nf)):
1153 results[nf] = None
1172 results[nf] = None
1154 elif kind == regkind or kind == lnkkind:
1173 elif kind == regkind or kind == lnkkind:
1155 if nf in dmap:
1174 if nf in dmap:
1156 if matchalways or matchfn(nf):
1175 if matchalways or matchfn(nf):
1157 results[nf] = st
1176 results[nf] = st
1158 elif (matchalways or matchfn(nf)) and not ignore(
1177 elif (matchalways or matchfn(nf)) and not ignore(
1159 nf
1178 nf
1160 ):
1179 ):
1161 # unknown file -- normalize if necessary
1180 # unknown file -- normalize if necessary
1162 if not alreadynormed:
1181 if not alreadynormed:
1163 nf = normalize(nf, False, True)
1182 nf = normalize(nf, False, True)
1164 results[nf] = st
1183 results[nf] = st
1165 elif nf in dmap and (matchalways or matchfn(nf)):
1184 elif nf in dmap and (matchalways or matchfn(nf)):
1166 results[nf] = None
1185 results[nf] = None
1167
1186
1168 for nd, d in work:
1187 for nd, d in work:
1169 # alreadynormed means that processwork doesn't have to do any
1188 # alreadynormed means that processwork doesn't have to do any
1170 # expensive directory normalization
1189 # expensive directory normalization
1171 alreadynormed = not normalize or nd == d
1190 alreadynormed = not normalize or nd == d
1172 traverse([d], alreadynormed)
1191 traverse([d], alreadynormed)
1173
1192
1174 for s in subrepos:
1193 for s in subrepos:
1175 del results[s]
1194 del results[s]
1176 del results[b'.hg']
1195 del results[b'.hg']
1177
1196
1178 # step 3: visit remaining files from dmap
1197 # step 3: visit remaining files from dmap
1179 if not skipstep3 and not exact:
1198 if not skipstep3 and not exact:
1180 # If a dmap file is not in results yet, it was either
1199 # If a dmap file is not in results yet, it was either
1181 # a) not matching matchfn b) ignored, c) missing, or d) under a
1200 # a) not matching matchfn b) ignored, c) missing, or d) under a
1182 # symlink directory.
1201 # symlink directory.
1183 if not results and matchalways:
1202 if not results and matchalways:
1184 visit = [f for f in dmap]
1203 visit = [f for f in dmap]
1185 else:
1204 else:
1186 visit = [f for f in dmap if f not in results and matchfn(f)]
1205 visit = [f for f in dmap if f not in results and matchfn(f)]
1187 visit.sort()
1206 visit.sort()
1188
1207
1189 if unknown:
1208 if unknown:
1190 # unknown == True means we walked all dirs under the roots
1209 # unknown == True means we walked all dirs under the roots
1191 # that wasn't ignored, and everything that matched was stat'ed
1210 # that wasn't ignored, and everything that matched was stat'ed
1192 # and is already in results.
1211 # and is already in results.
1193 # The rest must thus be ignored or under a symlink.
1212 # The rest must thus be ignored or under a symlink.
1194 audit_path = pathutil.pathauditor(self._root, cached=True)
1213 audit_path = pathutil.pathauditor(self._root, cached=True)
1195
1214
1196 for nf in iter(visit):
1215 for nf in iter(visit):
1197 # If a stat for the same file was already added with a
1216 # If a stat for the same file was already added with a
1198 # different case, don't add one for this, since that would
1217 # different case, don't add one for this, since that would
1199 # make it appear as if the file exists under both names
1218 # make it appear as if the file exists under both names
1200 # on disk.
1219 # on disk.
1201 if (
1220 if (
1202 normalizefile
1221 normalizefile
1203 and normalizefile(nf, True, True) in results
1222 and normalizefile(nf, True, True) in results
1204 ):
1223 ):
1205 results[nf] = None
1224 results[nf] = None
1206 # Report ignored items in the dmap as long as they are not
1225 # Report ignored items in the dmap as long as they are not
1207 # under a symlink directory.
1226 # under a symlink directory.
1208 elif audit_path.check(nf):
1227 elif audit_path.check(nf):
1209 try:
1228 try:
1210 results[nf] = lstat(join(nf))
1229 results[nf] = lstat(join(nf))
1211 # file was just ignored, no links, and exists
1230 # file was just ignored, no links, and exists
1212 except OSError:
1231 except OSError:
1213 # file doesn't exist
1232 # file doesn't exist
1214 results[nf] = None
1233 results[nf] = None
1215 else:
1234 else:
1216 # It's either missing or under a symlink directory
1235 # It's either missing or under a symlink directory
1217 # which we in this case report as missing
1236 # which we in this case report as missing
1218 results[nf] = None
1237 results[nf] = None
1219 else:
1238 else:
1220 # We may not have walked the full directory tree above,
1239 # We may not have walked the full directory tree above,
1221 # so stat and check everything we missed.
1240 # so stat and check everything we missed.
1222 iv = iter(visit)
1241 iv = iter(visit)
1223 for st in util.statfiles([join(i) for i in visit]):
1242 for st in util.statfiles([join(i) for i in visit]):
1224 results[next(iv)] = st
1243 results[next(iv)] = st
1225 return results
1244 return results
1226
1245
1227 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1246 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1228 # Force Rayon (Rust parallelism library) to respect the number of
1247 # Force Rayon (Rust parallelism library) to respect the number of
1229 # workers. This is a temporary workaround until Rust code knows
1248 # workers. This is a temporary workaround until Rust code knows
1230 # how to read the config file.
1249 # how to read the config file.
1231 numcpus = self._ui.configint(b"worker", b"numcpus")
1250 numcpus = self._ui.configint(b"worker", b"numcpus")
1232 if numcpus is not None:
1251 if numcpus is not None:
1233 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1252 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1234
1253
1235 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1254 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1236 if not workers_enabled:
1255 if not workers_enabled:
1237 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1256 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1238
1257
1239 (
1258 (
1240 lookup,
1259 lookup,
1241 modified,
1260 modified,
1242 added,
1261 added,
1243 removed,
1262 removed,
1244 deleted,
1263 deleted,
1245 clean,
1264 clean,
1246 ignored,
1265 ignored,
1247 unknown,
1266 unknown,
1248 warnings,
1267 warnings,
1249 bad,
1268 bad,
1250 traversed,
1269 traversed,
1251 dirty,
1270 dirty,
1252 ) = rustmod.status(
1271 ) = rustmod.status(
1253 self._map._rustmap,
1272 self._map._rustmap,
1254 matcher,
1273 matcher,
1255 self._rootdir,
1274 self._rootdir,
1256 self._ignorefiles(),
1275 self._ignorefiles(),
1257 self._checkexec,
1276 self._checkexec,
1258 self._lastnormaltime,
1277 self._lastnormaltime,
1259 bool(list_clean),
1278 bool(list_clean),
1260 bool(list_ignored),
1279 bool(list_ignored),
1261 bool(list_unknown),
1280 bool(list_unknown),
1262 bool(matcher.traversedir),
1281 bool(matcher.traversedir),
1263 )
1282 )
1264
1283
1265 self._dirty |= dirty
1284 self._dirty |= dirty
1266
1285
1267 if matcher.traversedir:
1286 if matcher.traversedir:
1268 for dir in traversed:
1287 for dir in traversed:
1269 matcher.traversedir(dir)
1288 matcher.traversedir(dir)
1270
1289
1271 if self._ui.warn:
1290 if self._ui.warn:
1272 for item in warnings:
1291 for item in warnings:
1273 if isinstance(item, tuple):
1292 if isinstance(item, tuple):
1274 file_path, syntax = item
1293 file_path, syntax = item
1275 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1294 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1276 file_path,
1295 file_path,
1277 syntax,
1296 syntax,
1278 )
1297 )
1279 self._ui.warn(msg)
1298 self._ui.warn(msg)
1280 else:
1299 else:
1281 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1300 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1282 self._ui.warn(
1301 self._ui.warn(
1283 msg
1302 msg
1284 % (
1303 % (
1285 pathutil.canonpath(
1304 pathutil.canonpath(
1286 self._rootdir, self._rootdir, item
1305 self._rootdir, self._rootdir, item
1287 ),
1306 ),
1288 b"No such file or directory",
1307 b"No such file or directory",
1289 )
1308 )
1290 )
1309 )
1291
1310
1292 for (fn, message) in bad:
1311 for (fn, message) in bad:
1293 matcher.bad(fn, encoding.strtolocal(message))
1312 matcher.bad(fn, encoding.strtolocal(message))
1294
1313
1295 status = scmutil.status(
1314 status = scmutil.status(
1296 modified=modified,
1315 modified=modified,
1297 added=added,
1316 added=added,
1298 removed=removed,
1317 removed=removed,
1299 deleted=deleted,
1318 deleted=deleted,
1300 unknown=unknown,
1319 unknown=unknown,
1301 ignored=ignored,
1320 ignored=ignored,
1302 clean=clean,
1321 clean=clean,
1303 )
1322 )
1304 return (lookup, status)
1323 return (lookup, status)
1305
1324
1306 def status(self, match, subrepos, ignored, clean, unknown):
1325 def status(self, match, subrepos, ignored, clean, unknown):
1307 """Determine the status of the working copy relative to the
1326 """Determine the status of the working copy relative to the
1308 dirstate and return a pair of (unsure, status), where status is of type
1327 dirstate and return a pair of (unsure, status), where status is of type
1309 scmutil.status and:
1328 scmutil.status and:
1310
1329
1311 unsure:
1330 unsure:
1312 files that might have been modified since the dirstate was
1331 files that might have been modified since the dirstate was
1313 written, but need to be read to be sure (size is the same
1332 written, but need to be read to be sure (size is the same
1314 but mtime differs)
1333 but mtime differs)
1315 status.modified:
1334 status.modified:
1316 files that have definitely been modified since the dirstate
1335 files that have definitely been modified since the dirstate
1317 was written (different size or mode)
1336 was written (different size or mode)
1318 status.clean:
1337 status.clean:
1319 files that have definitely not been modified since the
1338 files that have definitely not been modified since the
1320 dirstate was written
1339 dirstate was written
1321 """
1340 """
1322 listignored, listclean, listunknown = ignored, clean, unknown
1341 listignored, listclean, listunknown = ignored, clean, unknown
1323 lookup, modified, added, unknown, ignored = [], [], [], [], []
1342 lookup, modified, added, unknown, ignored = [], [], [], [], []
1324 removed, deleted, clean = [], [], []
1343 removed, deleted, clean = [], [], []
1325
1344
1326 dmap = self._map
1345 dmap = self._map
1327 dmap.preload()
1346 dmap.preload()
1328
1347
1329 use_rust = True
1348 use_rust = True
1330
1349
1331 allowed_matchers = (
1350 allowed_matchers = (
1332 matchmod.alwaysmatcher,
1351 matchmod.alwaysmatcher,
1333 matchmod.exactmatcher,
1352 matchmod.exactmatcher,
1334 matchmod.includematcher,
1353 matchmod.includematcher,
1335 )
1354 )
1336
1355
1337 if rustmod is None:
1356 if rustmod is None:
1338 use_rust = False
1357 use_rust = False
1339 elif self._checkcase:
1358 elif self._checkcase:
1340 # Case-insensitive filesystems are not handled yet
1359 # Case-insensitive filesystems are not handled yet
1341 use_rust = False
1360 use_rust = False
1342 elif subrepos:
1361 elif subrepos:
1343 use_rust = False
1362 use_rust = False
1344 elif sparse.enabled:
1363 elif sparse.enabled:
1345 use_rust = False
1364 use_rust = False
1346 elif not isinstance(match, allowed_matchers):
1365 elif not isinstance(match, allowed_matchers):
1347 # Some matchers have yet to be implemented
1366 # Some matchers have yet to be implemented
1348 use_rust = False
1367 use_rust = False
1349
1368
1350 if use_rust:
1369 if use_rust:
1351 try:
1370 try:
1352 return self._rust_status(
1371 return self._rust_status(
1353 match, listclean, listignored, listunknown
1372 match, listclean, listignored, listunknown
1354 )
1373 )
1355 except rustmod.FallbackError:
1374 except rustmod.FallbackError:
1356 pass
1375 pass
1357
1376
1358 def noop(f):
1377 def noop(f):
1359 pass
1378 pass
1360
1379
1361 dcontains = dmap.__contains__
1380 dcontains = dmap.__contains__
1362 dget = dmap.__getitem__
1381 dget = dmap.__getitem__
1363 ladd = lookup.append # aka "unsure"
1382 ladd = lookup.append # aka "unsure"
1364 madd = modified.append
1383 madd = modified.append
1365 aadd = added.append
1384 aadd = added.append
1366 uadd = unknown.append if listunknown else noop
1385 uadd = unknown.append if listunknown else noop
1367 iadd = ignored.append if listignored else noop
1386 iadd = ignored.append if listignored else noop
1368 radd = removed.append
1387 radd = removed.append
1369 dadd = deleted.append
1388 dadd = deleted.append
1370 cadd = clean.append if listclean else noop
1389 cadd = clean.append if listclean else noop
1371 mexact = match.exact
1390 mexact = match.exact
1372 dirignore = self._dirignore
1391 dirignore = self._dirignore
1373 checkexec = self._checkexec
1392 checkexec = self._checkexec
1374 copymap = self._map.copymap
1393 copymap = self._map.copymap
1375 lastnormaltime = self._lastnormaltime
1394 lastnormaltime = self._lastnormaltime
1376
1395
1377 # We need to do full walks when either
1396 # We need to do full walks when either
1378 # - we're listing all clean files, or
1397 # - we're listing all clean files, or
1379 # - match.traversedir does something, because match.traversedir should
1398 # - match.traversedir does something, because match.traversedir should
1380 # be called for every dir in the working dir
1399 # be called for every dir in the working dir
1381 full = listclean or match.traversedir is not None
1400 full = listclean or match.traversedir is not None
1382 for fn, st in pycompat.iteritems(
1401 for fn, st in pycompat.iteritems(
1383 self.walk(match, subrepos, listunknown, listignored, full=full)
1402 self.walk(match, subrepos, listunknown, listignored, full=full)
1384 ):
1403 ):
1385 if not dcontains(fn):
1404 if not dcontains(fn):
1386 if (listignored or mexact(fn)) and dirignore(fn):
1405 if (listignored or mexact(fn)) and dirignore(fn):
1387 if listignored:
1406 if listignored:
1388 iadd(fn)
1407 iadd(fn)
1389 else:
1408 else:
1390 uadd(fn)
1409 uadd(fn)
1391 continue
1410 continue
1392
1411
1393 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1412 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1394 # written like that for performance reasons. dmap[fn] is not a
1413 # written like that for performance reasons. dmap[fn] is not a
1395 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1414 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1396 # opcode has fast paths when the value to be unpacked is a tuple or
1415 # opcode has fast paths when the value to be unpacked is a tuple or
1397 # a list, but falls back to creating a full-fledged iterator in
1416 # a list, but falls back to creating a full-fledged iterator in
1398 # general. That is much slower than simply accessing and storing the
1417 # general. That is much slower than simply accessing and storing the
1399 # tuple members one by one.
1418 # tuple members one by one.
1400 t = dget(fn)
1419 t = dget(fn)
1401 mode = t.mode
1420 mode = t.mode
1402 size = t.size
1421 size = t.size
1403 time = t.mtime
1422 time = t.mtime
1404
1423
1405 if not st and t.tracked:
1424 if not st and t.tracked:
1406 dadd(fn)
1425 dadd(fn)
1407 elif t.merged:
1426 elif t.merged:
1408 madd(fn)
1427 madd(fn)
1409 elif t.added:
1428 elif t.added:
1410 aadd(fn)
1429 aadd(fn)
1411 elif t.removed:
1430 elif t.removed:
1412 radd(fn)
1431 radd(fn)
1413 elif t.tracked:
1432 elif t.tracked:
1414 if (
1433 if (
1415 size >= 0
1434 size >= 0
1416 and (
1435 and (
1417 (size != st.st_size and size != st.st_size & _rangemask)
1436 (size != st.st_size and size != st.st_size & _rangemask)
1418 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1437 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1419 )
1438 )
1420 or t.from_p2
1439 or t.from_p2
1421 or fn in copymap
1440 or fn in copymap
1422 ):
1441 ):
1423 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1442 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1424 # issue6456: Size returned may be longer due to
1443 # issue6456: Size returned may be longer due to
1425 # encryption on EXT-4 fscrypt, undecided.
1444 # encryption on EXT-4 fscrypt, undecided.
1426 ladd(fn)
1445 ladd(fn)
1427 else:
1446 else:
1428 madd(fn)
1447 madd(fn)
1429 elif (
1448 elif (
1430 time != st[stat.ST_MTIME]
1449 time != st[stat.ST_MTIME]
1431 and time != st[stat.ST_MTIME] & _rangemask
1450 and time != st[stat.ST_MTIME] & _rangemask
1432 ):
1451 ):
1433 ladd(fn)
1452 ladd(fn)
1434 elif st[stat.ST_MTIME] == lastnormaltime:
1453 elif st[stat.ST_MTIME] == lastnormaltime:
1435 # fn may have just been marked as normal and it may have
1454 # fn may have just been marked as normal and it may have
1436 # changed in the same second without changing its size.
1455 # changed in the same second without changing its size.
1437 # This can happen if we quickly do multiple commits.
1456 # This can happen if we quickly do multiple commits.
1438 # Force lookup, so we don't miss such a racy file change.
1457 # Force lookup, so we don't miss such a racy file change.
1439 ladd(fn)
1458 ladd(fn)
1440 elif listclean:
1459 elif listclean:
1441 cadd(fn)
1460 cadd(fn)
1442 status = scmutil.status(
1461 status = scmutil.status(
1443 modified, added, removed, deleted, unknown, ignored, clean
1462 modified, added, removed, deleted, unknown, ignored, clean
1444 )
1463 )
1445 return (lookup, status)
1464 return (lookup, status)
1446
1465
1447 def matches(self, match):
1466 def matches(self, match):
1448 """
1467 """
1449 return files in the dirstate (in whatever state) filtered by match
1468 return files in the dirstate (in whatever state) filtered by match
1450 """
1469 """
1451 dmap = self._map
1470 dmap = self._map
1452 if rustmod is not None:
1471 if rustmod is not None:
1453 dmap = self._map._rustmap
1472 dmap = self._map._rustmap
1454
1473
1455 if match.always():
1474 if match.always():
1456 return dmap.keys()
1475 return dmap.keys()
1457 files = match.files()
1476 files = match.files()
1458 if match.isexact():
1477 if match.isexact():
1459 # fast path -- filter the other way around, since typically files is
1478 # fast path -- filter the other way around, since typically files is
1460 # much smaller than dmap
1479 # much smaller than dmap
1461 return [f for f in files if f in dmap]
1480 return [f for f in files if f in dmap]
1462 if match.prefix() and all(fn in dmap for fn in files):
1481 if match.prefix() and all(fn in dmap for fn in files):
1463 # fast path -- all the values are known to be files, so just return
1482 # fast path -- all the values are known to be files, so just return
1464 # that
1483 # that
1465 return list(files)
1484 return list(files)
1466 return [f for f in dmap if match(f)]
1485 return [f for f in dmap if match(f)]
1467
1486
1468 def _actualfilename(self, tr):
1487 def _actualfilename(self, tr):
1469 if tr:
1488 if tr:
1470 return self._pendingfilename
1489 return self._pendingfilename
1471 else:
1490 else:
1472 return self._filename
1491 return self._filename
1473
1492
1474 def savebackup(self, tr, backupname):
1493 def savebackup(self, tr, backupname):
1475 '''Save current dirstate into backup file'''
1494 '''Save current dirstate into backup file'''
1476 filename = self._actualfilename(tr)
1495 filename = self._actualfilename(tr)
1477 assert backupname != filename
1496 assert backupname != filename
1478
1497
1479 # use '_writedirstate' instead of 'write' to write changes certainly,
1498 # use '_writedirstate' instead of 'write' to write changes certainly,
1480 # because the latter omits writing out if transaction is running.
1499 # because the latter omits writing out if transaction is running.
1481 # output file will be used to create backup of dirstate at this point.
1500 # output file will be used to create backup of dirstate at this point.
1482 if self._dirty or not self._opener.exists(filename):
1501 if self._dirty or not self._opener.exists(filename):
1483 self._writedirstate(
1502 self._writedirstate(
1484 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1503 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1485 )
1504 )
1486
1505
1487 if tr:
1506 if tr:
1488 # ensure that subsequent tr.writepending returns True for
1507 # ensure that subsequent tr.writepending returns True for
1489 # changes written out above, even if dirstate is never
1508 # changes written out above, even if dirstate is never
1490 # changed after this
1509 # changed after this
1491 tr.addfilegenerator(
1510 tr.addfilegenerator(
1492 b'dirstate',
1511 b'dirstate',
1493 (self._filename,),
1512 (self._filename,),
1494 self._writedirstate,
1513 self._writedirstate,
1495 location=b'plain',
1514 location=b'plain',
1496 )
1515 )
1497
1516
1498 # ensure that pending file written above is unlinked at
1517 # ensure that pending file written above is unlinked at
1499 # failure, even if tr.writepending isn't invoked until the
1518 # failure, even if tr.writepending isn't invoked until the
1500 # end of this transaction
1519 # end of this transaction
1501 tr.registertmp(filename, location=b'plain')
1520 tr.registertmp(filename, location=b'plain')
1502
1521
1503 self._opener.tryunlink(backupname)
1522 self._opener.tryunlink(backupname)
1504 # hardlink backup is okay because _writedirstate is always called
1523 # hardlink backup is okay because _writedirstate is always called
1505 # with an "atomictemp=True" file.
1524 # with an "atomictemp=True" file.
1506 util.copyfile(
1525 util.copyfile(
1507 self._opener.join(filename),
1526 self._opener.join(filename),
1508 self._opener.join(backupname),
1527 self._opener.join(backupname),
1509 hardlink=True,
1528 hardlink=True,
1510 )
1529 )
1511
1530
1512 def restorebackup(self, tr, backupname):
1531 def restorebackup(self, tr, backupname):
1513 '''Restore dirstate by backup file'''
1532 '''Restore dirstate by backup file'''
1514 # this "invalidate()" prevents "wlock.release()" from writing
1533 # this "invalidate()" prevents "wlock.release()" from writing
1515 # changes of dirstate out after restoring from backup file
1534 # changes of dirstate out after restoring from backup file
1516 self.invalidate()
1535 self.invalidate()
1517 filename = self._actualfilename(tr)
1536 filename = self._actualfilename(tr)
1518 o = self._opener
1537 o = self._opener
1519 if util.samefile(o.join(backupname), o.join(filename)):
1538 if util.samefile(o.join(backupname), o.join(filename)):
1520 o.unlink(backupname)
1539 o.unlink(backupname)
1521 else:
1540 else:
1522 o.rename(backupname, filename, checkambig=True)
1541 o.rename(backupname, filename, checkambig=True)
1523
1542
1524 def clearbackup(self, tr, backupname):
1543 def clearbackup(self, tr, backupname):
1525 '''Clear backup file'''
1544 '''Clear backup file'''
1526 self._opener.unlink(backupname)
1545 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now