##// END OF EJS Templates
dirstate: add a `set_tracked` method for "hg add"-like usage...
marmoute -
r48393:f927ad5a default
parent child Browse files
Show More
@@ -1,784 +1,787 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
165 def normal(self, f):
168 def normal(self, f):
166 return super(largefilesdirstate, self).normal(unixpath(f))
169 return super(largefilesdirstate, self).normal(unixpath(f))
167
170
168 def remove(self, f):
171 def remove(self, f):
169 return super(largefilesdirstate, self).remove(unixpath(f))
172 return super(largefilesdirstate, self).remove(unixpath(f))
170
173
171 def add(self, f):
174 def add(self, f):
172 return super(largefilesdirstate, self).add(unixpath(f))
175 return super(largefilesdirstate, self).add(unixpath(f))
173
176
174 def drop(self, f):
177 def drop(self, f):
175 return super(largefilesdirstate, self).drop(unixpath(f))
178 return super(largefilesdirstate, self).drop(unixpath(f))
176
179
177 def forget(self, f):
180 def forget(self, f):
178 return super(largefilesdirstate, self).forget(unixpath(f))
181 return super(largefilesdirstate, self).forget(unixpath(f))
179
182
180 def normallookup(self, f):
183 def normallookup(self, f):
181 return super(largefilesdirstate, self).normallookup(unixpath(f))
184 return super(largefilesdirstate, self).normallookup(unixpath(f))
182
185
183 def _ignore(self, f):
186 def _ignore(self, f):
184 return False
187 return False
185
188
186 def write(self, tr=False):
189 def write(self, tr=False):
187 # (1) disable PENDING mode always
190 # (1) disable PENDING mode always
188 # (lfdirstate isn't yet managed as a part of the transaction)
191 # (lfdirstate isn't yet managed as a part of the transaction)
189 # (2) avoid develwarn 'use dirstate.write with ....'
192 # (2) avoid develwarn 'use dirstate.write with ....'
190 super(largefilesdirstate, self).write(None)
193 super(largefilesdirstate, self).write(None)
191
194
192
195
193 def openlfdirstate(ui, repo, create=True):
196 def openlfdirstate(ui, repo, create=True):
194 """
197 """
195 Return a dirstate object that tracks largefiles: i.e. its root is
198 Return a dirstate object that tracks largefiles: i.e. its root is
196 the repo root, but it is saved in .hg/largefiles/dirstate.
199 the repo root, but it is saved in .hg/largefiles/dirstate.
197 """
200 """
198 vfs = repo.vfs
201 vfs = repo.vfs
199 lfstoredir = longname
202 lfstoredir = longname
200 opener = vfsmod.vfs(vfs.join(lfstoredir))
203 opener = vfsmod.vfs(vfs.join(lfstoredir))
201 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
204 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
202 lfdirstate = largefilesdirstate(
205 lfdirstate = largefilesdirstate(
203 opener,
206 opener,
204 ui,
207 ui,
205 repo.root,
208 repo.root,
206 repo.dirstate._validate,
209 repo.dirstate._validate,
207 lambda: sparse.matcher(repo),
210 lambda: sparse.matcher(repo),
208 repo.nodeconstants,
211 repo.nodeconstants,
209 use_dirstate_v2,
212 use_dirstate_v2,
210 )
213 )
211
214
212 # If the largefiles dirstate does not exist, populate and create
215 # If the largefiles dirstate does not exist, populate and create
213 # it. This ensures that we create it on the first meaningful
216 # it. This ensures that we create it on the first meaningful
214 # largefiles operation in a new clone.
217 # largefiles operation in a new clone.
215 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
218 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
216 matcher = getstandinmatcher(repo)
219 matcher = getstandinmatcher(repo)
217 standins = repo.dirstate.walk(
220 standins = repo.dirstate.walk(
218 matcher, subrepos=[], unknown=False, ignored=False
221 matcher, subrepos=[], unknown=False, ignored=False
219 )
222 )
220
223
221 if len(standins) > 0:
224 if len(standins) > 0:
222 vfs.makedirs(lfstoredir)
225 vfs.makedirs(lfstoredir)
223
226
224 for standin in standins:
227 for standin in standins:
225 lfile = splitstandin(standin)
228 lfile = splitstandin(standin)
226 lfdirstate.normallookup(lfile)
229 lfdirstate.normallookup(lfile)
227 return lfdirstate
230 return lfdirstate
228
231
229
232
230 def lfdirstatestatus(lfdirstate, repo):
233 def lfdirstatestatus(lfdirstate, repo):
231 pctx = repo[b'.']
234 pctx = repo[b'.']
232 match = matchmod.always()
235 match = matchmod.always()
233 unsure, s = lfdirstate.status(
236 unsure, s = lfdirstate.status(
234 match, subrepos=[], ignored=False, clean=False, unknown=False
237 match, subrepos=[], ignored=False, clean=False, unknown=False
235 )
238 )
236 modified, clean = s.modified, s.clean
239 modified, clean = s.modified, s.clean
237 for lfile in unsure:
240 for lfile in unsure:
238 try:
241 try:
239 fctx = pctx[standin(lfile)]
242 fctx = pctx[standin(lfile)]
240 except LookupError:
243 except LookupError:
241 fctx = None
244 fctx = None
242 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
245 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
243 modified.append(lfile)
246 modified.append(lfile)
244 else:
247 else:
245 clean.append(lfile)
248 clean.append(lfile)
246 lfdirstate.normal(lfile)
249 lfdirstate.normal(lfile)
247 return s
250 return s
248
251
249
252
250 def listlfiles(repo, rev=None, matcher=None):
253 def listlfiles(repo, rev=None, matcher=None):
251 """return a list of largefiles in the working copy or the
254 """return a list of largefiles in the working copy or the
252 specified changeset"""
255 specified changeset"""
253
256
254 if matcher is None:
257 if matcher is None:
255 matcher = getstandinmatcher(repo)
258 matcher = getstandinmatcher(repo)
256
259
257 # ignore unknown files in working directory
260 # ignore unknown files in working directory
258 return [
261 return [
259 splitstandin(f)
262 splitstandin(f)
260 for f in repo[rev].walk(matcher)
263 for f in repo[rev].walk(matcher)
261 if rev is not None or repo.dirstate[f] != b'?'
264 if rev is not None or repo.dirstate[f] != b'?'
262 ]
265 ]
263
266
264
267
265 def instore(repo, hash, forcelocal=False):
268 def instore(repo, hash, forcelocal=False):
266 '''Return true if a largefile with the given hash exists in the store'''
269 '''Return true if a largefile with the given hash exists in the store'''
267 return os.path.exists(storepath(repo, hash, forcelocal))
270 return os.path.exists(storepath(repo, hash, forcelocal))
268
271
269
272
270 def storepath(repo, hash, forcelocal=False):
273 def storepath(repo, hash, forcelocal=False):
271 """Return the correct location in the repository largefiles store for a
274 """Return the correct location in the repository largefiles store for a
272 file with the given hash."""
275 file with the given hash."""
273 if not forcelocal and repo.shared():
276 if not forcelocal and repo.shared():
274 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
277 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
275 return repo.vfs.join(longname, hash)
278 return repo.vfs.join(longname, hash)
276
279
277
280
278 def findstorepath(repo, hash):
281 def findstorepath(repo, hash):
279 """Search through the local store path(s) to find the file for the given
282 """Search through the local store path(s) to find the file for the given
280 hash. If the file is not found, its path in the primary store is returned.
283 hash. If the file is not found, its path in the primary store is returned.
281 The return value is a tuple of (path, exists(path)).
284 The return value is a tuple of (path, exists(path)).
282 """
285 """
283 # For shared repos, the primary store is in the share source. But for
286 # For shared repos, the primary store is in the share source. But for
284 # backward compatibility, force a lookup in the local store if it wasn't
287 # backward compatibility, force a lookup in the local store if it wasn't
285 # found in the share source.
288 # found in the share source.
286 path = storepath(repo, hash, False)
289 path = storepath(repo, hash, False)
287
290
288 if instore(repo, hash):
291 if instore(repo, hash):
289 return (path, True)
292 return (path, True)
290 elif repo.shared() and instore(repo, hash, True):
293 elif repo.shared() and instore(repo, hash, True):
291 return storepath(repo, hash, True), True
294 return storepath(repo, hash, True), True
292
295
293 return (path, False)
296 return (path, False)
294
297
295
298
296 def copyfromcache(repo, hash, filename):
299 def copyfromcache(repo, hash, filename):
297 """Copy the specified largefile from the repo or system cache to
300 """Copy the specified largefile from the repo or system cache to
298 filename in the repository. Return true on success or false if the
301 filename in the repository. Return true on success or false if the
299 file was not found in either cache (which should not happened:
302 file was not found in either cache (which should not happened:
300 this is meant to be called only after ensuring that the needed
303 this is meant to be called only after ensuring that the needed
301 largefile exists in the cache)."""
304 largefile exists in the cache)."""
302 wvfs = repo.wvfs
305 wvfs = repo.wvfs
303 path = findfile(repo, hash)
306 path = findfile(repo, hash)
304 if path is None:
307 if path is None:
305 return False
308 return False
306 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
309 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
307 # The write may fail before the file is fully written, but we
310 # The write may fail before the file is fully written, but we
308 # don't use atomic writes in the working copy.
311 # don't use atomic writes in the working copy.
309 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
312 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
310 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
313 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
311 if gothash != hash:
314 if gothash != hash:
312 repo.ui.warn(
315 repo.ui.warn(
313 _(b'%s: data corruption in %s with hash %s\n')
316 _(b'%s: data corruption in %s with hash %s\n')
314 % (filename, path, gothash)
317 % (filename, path, gothash)
315 )
318 )
316 wvfs.unlink(filename)
319 wvfs.unlink(filename)
317 return False
320 return False
318 return True
321 return True
319
322
320
323
321 def copytostore(repo, ctx, file, fstandin):
324 def copytostore(repo, ctx, file, fstandin):
322 wvfs = repo.wvfs
325 wvfs = repo.wvfs
323 hash = readasstandin(ctx[fstandin])
326 hash = readasstandin(ctx[fstandin])
324 if instore(repo, hash):
327 if instore(repo, hash):
325 return
328 return
326 if wvfs.exists(file):
329 if wvfs.exists(file):
327 copytostoreabsolute(repo, wvfs.join(file), hash)
330 copytostoreabsolute(repo, wvfs.join(file), hash)
328 else:
331 else:
329 repo.ui.warn(
332 repo.ui.warn(
330 _(b"%s: largefile %s not available from local store\n")
333 _(b"%s: largefile %s not available from local store\n")
331 % (file, hash)
334 % (file, hash)
332 )
335 )
333
336
334
337
335 def copyalltostore(repo, node):
338 def copyalltostore(repo, node):
336 '''Copy all largefiles in a given revision to the store'''
339 '''Copy all largefiles in a given revision to the store'''
337
340
338 ctx = repo[node]
341 ctx = repo[node]
339 for filename in ctx.files():
342 for filename in ctx.files():
340 realfile = splitstandin(filename)
343 realfile = splitstandin(filename)
341 if realfile is not None and filename in ctx.manifest():
344 if realfile is not None and filename in ctx.manifest():
342 copytostore(repo, ctx, realfile, filename)
345 copytostore(repo, ctx, realfile, filename)
343
346
344
347
345 def copytostoreabsolute(repo, file, hash):
348 def copytostoreabsolute(repo, file, hash):
346 if inusercache(repo.ui, hash):
349 if inusercache(repo.ui, hash):
347 link(usercachepath(repo.ui, hash), storepath(repo, hash))
350 link(usercachepath(repo.ui, hash), storepath(repo, hash))
348 else:
351 else:
349 util.makedirs(os.path.dirname(storepath(repo, hash)))
352 util.makedirs(os.path.dirname(storepath(repo, hash)))
350 with open(file, b'rb') as srcf:
353 with open(file, b'rb') as srcf:
351 with util.atomictempfile(
354 with util.atomictempfile(
352 storepath(repo, hash), createmode=repo.store.createmode
355 storepath(repo, hash), createmode=repo.store.createmode
353 ) as dstf:
356 ) as dstf:
354 for chunk in util.filechunkiter(srcf):
357 for chunk in util.filechunkiter(srcf):
355 dstf.write(chunk)
358 dstf.write(chunk)
356 linktousercache(repo, hash)
359 linktousercache(repo, hash)
357
360
358
361
359 def linktousercache(repo, hash):
362 def linktousercache(repo, hash):
360 """Link / copy the largefile with the specified hash from the store
363 """Link / copy the largefile with the specified hash from the store
361 to the cache."""
364 to the cache."""
362 path = usercachepath(repo.ui, hash)
365 path = usercachepath(repo.ui, hash)
363 link(storepath(repo, hash), path)
366 link(storepath(repo, hash), path)
364
367
365
368
366 def getstandinmatcher(repo, rmatcher=None):
369 def getstandinmatcher(repo, rmatcher=None):
367 '''Return a match object that applies rmatcher to the standin directory'''
370 '''Return a match object that applies rmatcher to the standin directory'''
368 wvfs = repo.wvfs
371 wvfs = repo.wvfs
369 standindir = shortname
372 standindir = shortname
370
373
371 # no warnings about missing files or directories
374 # no warnings about missing files or directories
372 badfn = lambda f, msg: None
375 badfn = lambda f, msg: None
373
376
374 if rmatcher and not rmatcher.always():
377 if rmatcher and not rmatcher.always():
375 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
378 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
376 if not pats:
379 if not pats:
377 pats = [wvfs.join(standindir)]
380 pats = [wvfs.join(standindir)]
378 match = scmutil.match(repo[None], pats, badfn=badfn)
381 match = scmutil.match(repo[None], pats, badfn=badfn)
379 else:
382 else:
380 # no patterns: relative to repo root
383 # no patterns: relative to repo root
381 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
384 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
382 return match
385 return match
383
386
384
387
385 def composestandinmatcher(repo, rmatcher):
388 def composestandinmatcher(repo, rmatcher):
386 """Return a matcher that accepts standins corresponding to the
389 """Return a matcher that accepts standins corresponding to the
387 files accepted by rmatcher. Pass the list of files in the matcher
390 files accepted by rmatcher. Pass the list of files in the matcher
388 as the paths specified by the user."""
391 as the paths specified by the user."""
389 smatcher = getstandinmatcher(repo, rmatcher)
392 smatcher = getstandinmatcher(repo, rmatcher)
390 isstandin = smatcher.matchfn
393 isstandin = smatcher.matchfn
391
394
392 def composedmatchfn(f):
395 def composedmatchfn(f):
393 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
396 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
394
397
395 smatcher.matchfn = composedmatchfn
398 smatcher.matchfn = composedmatchfn
396
399
397 return smatcher
400 return smatcher
398
401
399
402
400 def standin(filename):
403 def standin(filename):
401 """Return the repo-relative path to the standin for the specified big
404 """Return the repo-relative path to the standin for the specified big
402 file."""
405 file."""
403 # Notes:
406 # Notes:
404 # 1) Some callers want an absolute path, but for instance addlargefiles
407 # 1) Some callers want an absolute path, but for instance addlargefiles
405 # needs it repo-relative so it can be passed to repo[None].add(). So
408 # needs it repo-relative so it can be passed to repo[None].add(). So
406 # leave it up to the caller to use repo.wjoin() to get an absolute path.
409 # leave it up to the caller to use repo.wjoin() to get an absolute path.
407 # 2) Join with '/' because that's what dirstate always uses, even on
410 # 2) Join with '/' because that's what dirstate always uses, even on
408 # Windows. Change existing separator to '/' first in case we are
411 # Windows. Change existing separator to '/' first in case we are
409 # passed filenames from an external source (like the command line).
412 # passed filenames from an external source (like the command line).
410 return shortnameslash + util.pconvert(filename)
413 return shortnameslash + util.pconvert(filename)
411
414
412
415
413 def isstandin(filename):
416 def isstandin(filename):
414 """Return true if filename is a big file standin. filename must be
417 """Return true if filename is a big file standin. filename must be
415 in Mercurial's internal form (slash-separated)."""
418 in Mercurial's internal form (slash-separated)."""
416 return filename.startswith(shortnameslash)
419 return filename.startswith(shortnameslash)
417
420
418
421
419 def splitstandin(filename):
422 def splitstandin(filename):
420 # Split on / because that's what dirstate always uses, even on Windows.
423 # Split on / because that's what dirstate always uses, even on Windows.
421 # Change local separator to / first just in case we are passed filenames
424 # Change local separator to / first just in case we are passed filenames
422 # from an external source (like the command line).
425 # from an external source (like the command line).
423 bits = util.pconvert(filename).split(b'/', 1)
426 bits = util.pconvert(filename).split(b'/', 1)
424 if len(bits) == 2 and bits[0] == shortname:
427 if len(bits) == 2 and bits[0] == shortname:
425 return bits[1]
428 return bits[1]
426 else:
429 else:
427 return None
430 return None
428
431
429
432
430 def updatestandin(repo, lfile, standin):
433 def updatestandin(repo, lfile, standin):
431 """Re-calculate hash value of lfile and write it into standin
434 """Re-calculate hash value of lfile and write it into standin
432
435
433 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
436 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
434 """
437 """
435 file = repo.wjoin(lfile)
438 file = repo.wjoin(lfile)
436 if repo.wvfs.exists(lfile):
439 if repo.wvfs.exists(lfile):
437 hash = hashfile(file)
440 hash = hashfile(file)
438 executable = getexecutable(file)
441 executable = getexecutable(file)
439 writestandin(repo, standin, hash, executable)
442 writestandin(repo, standin, hash, executable)
440 else:
443 else:
441 raise error.Abort(_(b'%s: file not found!') % lfile)
444 raise error.Abort(_(b'%s: file not found!') % lfile)
442
445
443
446
444 def readasstandin(fctx):
447 def readasstandin(fctx):
445 """read hex hash from given filectx of standin file
448 """read hex hash from given filectx of standin file
446
449
447 This encapsulates how "standin" data is stored into storage layer."""
450 This encapsulates how "standin" data is stored into storage layer."""
448 return fctx.data().strip()
451 return fctx.data().strip()
449
452
450
453
451 def writestandin(repo, standin, hash, executable):
454 def writestandin(repo, standin, hash, executable):
452 '''write hash to <repo.root>/<standin>'''
455 '''write hash to <repo.root>/<standin>'''
453 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
456 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
454
457
455
458
456 def copyandhash(instream, outfile):
459 def copyandhash(instream, outfile):
457 """Read bytes from instream (iterable) and write them to outfile,
460 """Read bytes from instream (iterable) and write them to outfile,
458 computing the SHA-1 hash of the data along the way. Return the hash."""
461 computing the SHA-1 hash of the data along the way. Return the hash."""
459 hasher = hashutil.sha1(b'')
462 hasher = hashutil.sha1(b'')
460 for data in instream:
463 for data in instream:
461 hasher.update(data)
464 hasher.update(data)
462 outfile.write(data)
465 outfile.write(data)
463 return hex(hasher.digest())
466 return hex(hasher.digest())
464
467
465
468
466 def hashfile(file):
469 def hashfile(file):
467 if not os.path.exists(file):
470 if not os.path.exists(file):
468 return b''
471 return b''
469 with open(file, b'rb') as fd:
472 with open(file, b'rb') as fd:
470 return hexsha1(fd)
473 return hexsha1(fd)
471
474
472
475
473 def getexecutable(filename):
476 def getexecutable(filename):
474 mode = os.stat(filename).st_mode
477 mode = os.stat(filename).st_mode
475 return (
478 return (
476 (mode & stat.S_IXUSR)
479 (mode & stat.S_IXUSR)
477 and (mode & stat.S_IXGRP)
480 and (mode & stat.S_IXGRP)
478 and (mode & stat.S_IXOTH)
481 and (mode & stat.S_IXOTH)
479 )
482 )
480
483
481
484
482 def urljoin(first, second, *arg):
485 def urljoin(first, second, *arg):
483 def join(left, right):
486 def join(left, right):
484 if not left.endswith(b'/'):
487 if not left.endswith(b'/'):
485 left += b'/'
488 left += b'/'
486 if right.startswith(b'/'):
489 if right.startswith(b'/'):
487 right = right[1:]
490 right = right[1:]
488 return left + right
491 return left + right
489
492
490 url = join(first, second)
493 url = join(first, second)
491 for a in arg:
494 for a in arg:
492 url = join(url, a)
495 url = join(url, a)
493 return url
496 return url
494
497
495
498
496 def hexsha1(fileobj):
499 def hexsha1(fileobj):
497 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
500 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
498 object data"""
501 object data"""
499 h = hashutil.sha1()
502 h = hashutil.sha1()
500 for chunk in util.filechunkiter(fileobj):
503 for chunk in util.filechunkiter(fileobj):
501 h.update(chunk)
504 h.update(chunk)
502 return hex(h.digest())
505 return hex(h.digest())
503
506
504
507
505 def httpsendfile(ui, filename):
508 def httpsendfile(ui, filename):
506 return httpconnection.httpsendfile(ui, filename, b'rb')
509 return httpconnection.httpsendfile(ui, filename, b'rb')
507
510
508
511
509 def unixpath(path):
512 def unixpath(path):
510 '''Return a version of path normalized for use with the lfdirstate.'''
513 '''Return a version of path normalized for use with the lfdirstate.'''
511 return util.pconvert(os.path.normpath(path))
514 return util.pconvert(os.path.normpath(path))
512
515
513
516
514 def islfilesrepo(repo):
517 def islfilesrepo(repo):
515 '''Return true if the repo is a largefile repo.'''
518 '''Return true if the repo is a largefile repo.'''
516 if b'largefiles' in repo.requirements and any(
519 if b'largefiles' in repo.requirements and any(
517 shortnameslash in f[1] for f in repo.store.datafiles()
520 shortnameslash in f[1] for f in repo.store.datafiles()
518 ):
521 ):
519 return True
522 return True
520
523
521 return any(openlfdirstate(repo.ui, repo, False))
524 return any(openlfdirstate(repo.ui, repo, False))
522
525
523
526
524 class storeprotonotcapable(Exception):
527 class storeprotonotcapable(Exception):
525 def __init__(self, storetypes):
528 def __init__(self, storetypes):
526 self.storetypes = storetypes
529 self.storetypes = storetypes
527
530
528
531
529 def getstandinsstate(repo):
532 def getstandinsstate(repo):
530 standins = []
533 standins = []
531 matcher = getstandinmatcher(repo)
534 matcher = getstandinmatcher(repo)
532 wctx = repo[None]
535 wctx = repo[None]
533 for standin in repo.dirstate.walk(
536 for standin in repo.dirstate.walk(
534 matcher, subrepos=[], unknown=False, ignored=False
537 matcher, subrepos=[], unknown=False, ignored=False
535 ):
538 ):
536 lfile = splitstandin(standin)
539 lfile = splitstandin(standin)
537 try:
540 try:
538 hash = readasstandin(wctx[standin])
541 hash = readasstandin(wctx[standin])
539 except IOError:
542 except IOError:
540 hash = None
543 hash = None
541 standins.append((lfile, hash))
544 standins.append((lfile, hash))
542 return standins
545 return standins
543
546
544
547
545 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
548 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
546 lfstandin = standin(lfile)
549 lfstandin = standin(lfile)
547 if lfstandin in repo.dirstate:
550 if lfstandin in repo.dirstate:
548 stat = repo.dirstate._map[lfstandin]
551 stat = repo.dirstate._map[lfstandin]
549 state, mtime = stat.state, stat.mtime
552 state, mtime = stat.state, stat.mtime
550 else:
553 else:
551 state, mtime = b'?', -1
554 state, mtime = b'?', -1
552 if state == b'n':
555 if state == b'n':
553 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
556 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
554 # state 'n' doesn't ensure 'clean' in this case
557 # state 'n' doesn't ensure 'clean' in this case
555 lfdirstate.normallookup(lfile)
558 lfdirstate.normallookup(lfile)
556 else:
559 else:
557 lfdirstate.normal(lfile)
560 lfdirstate.normal(lfile)
558 elif state == b'm':
561 elif state == b'm':
559 lfdirstate.normallookup(lfile)
562 lfdirstate.normallookup(lfile)
560 elif state == b'r':
563 elif state == b'r':
561 lfdirstate.remove(lfile)
564 lfdirstate.remove(lfile)
562 elif state == b'a':
565 elif state == b'a':
563 lfdirstate.add(lfile)
566 lfdirstate.add(lfile)
564 elif state == b'?':
567 elif state == b'?':
565 lfdirstate.drop(lfile)
568 lfdirstate.drop(lfile)
566
569
567
570
568 def markcommitted(orig, ctx, node):
571 def markcommitted(orig, ctx, node):
569 repo = ctx.repo()
572 repo = ctx.repo()
570
573
571 orig(node)
574 orig(node)
572
575
573 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
576 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
574 # because files coming from the 2nd parent are omitted in the latter.
577 # because files coming from the 2nd parent are omitted in the latter.
575 #
578 #
576 # The former should be used to get targets of "synclfdirstate",
579 # The former should be used to get targets of "synclfdirstate",
577 # because such files:
580 # because such files:
578 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
581 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
579 # - have to be marked as "n" after commit, but
582 # - have to be marked as "n" after commit, but
580 # - aren't listed in "repo[node].files()"
583 # - aren't listed in "repo[node].files()"
581
584
582 lfdirstate = openlfdirstate(repo.ui, repo)
585 lfdirstate = openlfdirstate(repo.ui, repo)
583 for f in ctx.files():
586 for f in ctx.files():
584 lfile = splitstandin(f)
587 lfile = splitstandin(f)
585 if lfile is not None:
588 if lfile is not None:
586 synclfdirstate(repo, lfdirstate, lfile, False)
589 synclfdirstate(repo, lfdirstate, lfile, False)
587 lfdirstate.write()
590 lfdirstate.write()
588
591
589 # As part of committing, copy all of the largefiles into the cache.
592 # As part of committing, copy all of the largefiles into the cache.
590 #
593 #
591 # Using "node" instead of "ctx" implies additional "repo[node]"
594 # Using "node" instead of "ctx" implies additional "repo[node]"
592 # lookup while copyalltostore(), but can omit redundant check for
595 # lookup while copyalltostore(), but can omit redundant check for
593 # files comming from the 2nd parent, which should exist in store
596 # files comming from the 2nd parent, which should exist in store
594 # at merging.
597 # at merging.
595 copyalltostore(repo, node)
598 copyalltostore(repo, node)
596
599
597
600
598 def getlfilestoupdate(oldstandins, newstandins):
601 def getlfilestoupdate(oldstandins, newstandins):
599 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
602 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
600 filelist = []
603 filelist = []
601 for f in changedstandins:
604 for f in changedstandins:
602 if f[0] not in filelist:
605 if f[0] not in filelist:
603 filelist.append(f[0])
606 filelist.append(f[0])
604 return filelist
607 return filelist
605
608
606
609
607 def getlfilestoupload(repo, missing, addfunc):
610 def getlfilestoupload(repo, missing, addfunc):
608 makeprogress = repo.ui.makeprogress
611 makeprogress = repo.ui.makeprogress
609 with makeprogress(
612 with makeprogress(
610 _(b'finding outgoing largefiles'),
613 _(b'finding outgoing largefiles'),
611 unit=_(b'revisions'),
614 unit=_(b'revisions'),
612 total=len(missing),
615 total=len(missing),
613 ) as progress:
616 ) as progress:
614 for i, n in enumerate(missing):
617 for i, n in enumerate(missing):
615 progress.update(i)
618 progress.update(i)
616 parents = [p for p in repo[n].parents() if p != repo.nullid]
619 parents = [p for p in repo[n].parents() if p != repo.nullid]
617
620
618 with lfstatus(repo, value=False):
621 with lfstatus(repo, value=False):
619 ctx = repo[n]
622 ctx = repo[n]
620
623
621 files = set(ctx.files())
624 files = set(ctx.files())
622 if len(parents) == 2:
625 if len(parents) == 2:
623 mc = ctx.manifest()
626 mc = ctx.manifest()
624 mp1 = ctx.p1().manifest()
627 mp1 = ctx.p1().manifest()
625 mp2 = ctx.p2().manifest()
628 mp2 = ctx.p2().manifest()
626 for f in mp1:
629 for f in mp1:
627 if f not in mc:
630 if f not in mc:
628 files.add(f)
631 files.add(f)
629 for f in mp2:
632 for f in mp2:
630 if f not in mc:
633 if f not in mc:
631 files.add(f)
634 files.add(f)
632 for f in mc:
635 for f in mc:
633 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
636 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
634 files.add(f)
637 files.add(f)
635 for fn in files:
638 for fn in files:
636 if isstandin(fn) and fn in ctx:
639 if isstandin(fn) and fn in ctx:
637 addfunc(fn, readasstandin(ctx[fn]))
640 addfunc(fn, readasstandin(ctx[fn]))
638
641
639
642
640 def updatestandinsbymatch(repo, match):
643 def updatestandinsbymatch(repo, match):
641 """Update standins in the working directory according to specified match
644 """Update standins in the working directory according to specified match
642
645
643 This returns (possibly modified) ``match`` object to be used for
646 This returns (possibly modified) ``match`` object to be used for
644 subsequent commit process.
647 subsequent commit process.
645 """
648 """
646
649
647 ui = repo.ui
650 ui = repo.ui
648
651
649 # Case 1: user calls commit with no specific files or
652 # Case 1: user calls commit with no specific files or
650 # include/exclude patterns: refresh and commit all files that
653 # include/exclude patterns: refresh and commit all files that
651 # are "dirty".
654 # are "dirty".
652 if match is None or match.always():
655 if match is None or match.always():
653 # Spend a bit of time here to get a list of files we know
656 # Spend a bit of time here to get a list of files we know
654 # are modified so we can compare only against those.
657 # are modified so we can compare only against those.
655 # It can cost a lot of time (several seconds)
658 # It can cost a lot of time (several seconds)
656 # otherwise to update all standins if the largefiles are
659 # otherwise to update all standins if the largefiles are
657 # large.
660 # large.
658 lfdirstate = openlfdirstate(ui, repo)
661 lfdirstate = openlfdirstate(ui, repo)
659 dirtymatch = matchmod.always()
662 dirtymatch = matchmod.always()
660 unsure, s = lfdirstate.status(
663 unsure, s = lfdirstate.status(
661 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
664 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
662 )
665 )
663 modifiedfiles = unsure + s.modified + s.added + s.removed
666 modifiedfiles = unsure + s.modified + s.added + s.removed
664 lfiles = listlfiles(repo)
667 lfiles = listlfiles(repo)
665 # this only loops through largefiles that exist (not
668 # this only loops through largefiles that exist (not
666 # removed/renamed)
669 # removed/renamed)
667 for lfile in lfiles:
670 for lfile in lfiles:
668 if lfile in modifiedfiles:
671 if lfile in modifiedfiles:
669 fstandin = standin(lfile)
672 fstandin = standin(lfile)
670 if repo.wvfs.exists(fstandin):
673 if repo.wvfs.exists(fstandin):
671 # this handles the case where a rebase is being
674 # this handles the case where a rebase is being
672 # performed and the working copy is not updated
675 # performed and the working copy is not updated
673 # yet.
676 # yet.
674 if repo.wvfs.exists(lfile):
677 if repo.wvfs.exists(lfile):
675 updatestandin(repo, lfile, fstandin)
678 updatestandin(repo, lfile, fstandin)
676
679
677 return match
680 return match
678
681
679 lfiles = listlfiles(repo)
682 lfiles = listlfiles(repo)
680 match._files = repo._subdirlfs(match.files(), lfiles)
683 match._files = repo._subdirlfs(match.files(), lfiles)
681
684
682 # Case 2: user calls commit with specified patterns: refresh
685 # Case 2: user calls commit with specified patterns: refresh
683 # any matching big files.
686 # any matching big files.
684 smatcher = composestandinmatcher(repo, match)
687 smatcher = composestandinmatcher(repo, match)
685 standins = repo.dirstate.walk(
688 standins = repo.dirstate.walk(
686 smatcher, subrepos=[], unknown=False, ignored=False
689 smatcher, subrepos=[], unknown=False, ignored=False
687 )
690 )
688
691
689 # No matching big files: get out of the way and pass control to
692 # No matching big files: get out of the way and pass control to
690 # the usual commit() method.
693 # the usual commit() method.
691 if not standins:
694 if not standins:
692 return match
695 return match
693
696
694 # Refresh all matching big files. It's possible that the
697 # Refresh all matching big files. It's possible that the
695 # commit will end up failing, in which case the big files will
698 # commit will end up failing, in which case the big files will
696 # stay refreshed. No harm done: the user modified them and
699 # stay refreshed. No harm done: the user modified them and
697 # asked to commit them, so sooner or later we're going to
700 # asked to commit them, so sooner or later we're going to
698 # refresh the standins. Might as well leave them refreshed.
701 # refresh the standins. Might as well leave them refreshed.
699 lfdirstate = openlfdirstate(ui, repo)
702 lfdirstate = openlfdirstate(ui, repo)
700 for fstandin in standins:
703 for fstandin in standins:
701 lfile = splitstandin(fstandin)
704 lfile = splitstandin(fstandin)
702 if lfdirstate[lfile] != b'r':
705 if lfdirstate[lfile] != b'r':
703 updatestandin(repo, lfile, fstandin)
706 updatestandin(repo, lfile, fstandin)
704
707
705 # Cook up a new matcher that only matches regular files or
708 # Cook up a new matcher that only matches regular files or
706 # standins corresponding to the big files requested by the
709 # standins corresponding to the big files requested by the
707 # user. Have to modify _files to prevent commit() from
710 # user. Have to modify _files to prevent commit() from
708 # complaining "not tracked" for big files.
711 # complaining "not tracked" for big files.
709 match = copy.copy(match)
712 match = copy.copy(match)
710 origmatchfn = match.matchfn
713 origmatchfn = match.matchfn
711
714
712 # Check both the list of largefiles and the list of
715 # Check both the list of largefiles and the list of
713 # standins because if a largefile was removed, it
716 # standins because if a largefile was removed, it
714 # won't be in the list of largefiles at this point
717 # won't be in the list of largefiles at this point
715 match._files += sorted(standins)
718 match._files += sorted(standins)
716
719
717 actualfiles = []
720 actualfiles = []
718 for f in match._files:
721 for f in match._files:
719 fstandin = standin(f)
722 fstandin = standin(f)
720
723
721 # For largefiles, only one of the normal and standin should be
724 # For largefiles, only one of the normal and standin should be
722 # committed (except if one of them is a remove). In the case of a
725 # committed (except if one of them is a remove). In the case of a
723 # standin removal, drop the normal file if it is unknown to dirstate.
726 # standin removal, drop the normal file if it is unknown to dirstate.
724 # Thus, skip plain largefile names but keep the standin.
727 # Thus, skip plain largefile names but keep the standin.
725 if f in lfiles or fstandin in standins:
728 if f in lfiles or fstandin in standins:
726 if repo.dirstate[fstandin] != b'r':
729 if repo.dirstate[fstandin] != b'r':
727 if repo.dirstate[f] != b'r':
730 if repo.dirstate[f] != b'r':
728 continue
731 continue
729 elif repo.dirstate[f] == b'?':
732 elif repo.dirstate[f] == b'?':
730 continue
733 continue
731
734
732 actualfiles.append(f)
735 actualfiles.append(f)
733 match._files = actualfiles
736 match._files = actualfiles
734
737
735 def matchfn(f):
738 def matchfn(f):
736 if origmatchfn(f):
739 if origmatchfn(f):
737 return f not in lfiles
740 return f not in lfiles
738 else:
741 else:
739 return f in standins
742 return f in standins
740
743
741 match.matchfn = matchfn
744 match.matchfn = matchfn
742
745
743 return match
746 return match
744
747
745
748
746 class automatedcommithook(object):
749 class automatedcommithook(object):
747 """Stateful hook to update standins at the 1st commit of resuming
750 """Stateful hook to update standins at the 1st commit of resuming
748
751
749 For efficiency, updating standins in the working directory should
752 For efficiency, updating standins in the working directory should
750 be avoided while automated committing (like rebase, transplant and
753 be avoided while automated committing (like rebase, transplant and
751 so on), because they should be updated before committing.
754 so on), because they should be updated before committing.
752
755
753 But the 1st commit of resuming automated committing (e.g. ``rebase
756 But the 1st commit of resuming automated committing (e.g. ``rebase
754 --continue``) should update them, because largefiles may be
757 --continue``) should update them, because largefiles may be
755 modified manually.
758 modified manually.
756 """
759 """
757
760
758 def __init__(self, resuming):
761 def __init__(self, resuming):
759 self.resuming = resuming
762 self.resuming = resuming
760
763
761 def __call__(self, repo, match):
764 def __call__(self, repo, match):
762 if self.resuming:
765 if self.resuming:
763 self.resuming = False # avoids updating at subsequent commits
766 self.resuming = False # avoids updating at subsequent commits
764 return updatestandinsbymatch(repo, match)
767 return updatestandinsbymatch(repo, match)
765 else:
768 else:
766 return match
769 return match
767
770
768
771
769 def getstatuswriter(ui, repo, forcibly=None):
772 def getstatuswriter(ui, repo, forcibly=None):
770 """Return the function to write largefiles specific status out
773 """Return the function to write largefiles specific status out
771
774
772 If ``forcibly`` is ``None``, this returns the last element of
775 If ``forcibly`` is ``None``, this returns the last element of
773 ``repo._lfstatuswriters`` as "default" writer function.
776 ``repo._lfstatuswriters`` as "default" writer function.
774
777
775 Otherwise, this returns the function to always write out (or
778 Otherwise, this returns the function to always write out (or
776 ignore if ``not forcibly``) status.
779 ignore if ``not forcibly``) status.
777 """
780 """
778 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
781 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
779 return repo._lfstatuswriters[-1]
782 return repo._lfstatuswriters[-1]
780 else:
783 else:
781 if forcibly:
784 if forcibly:
782 return ui.status # forcibly WRITE OUT
785 return ui.status # forcibly WRITE OUT
783 else:
786 else:
784 return lambda *msg, **opts: None # forcibly IGNORE
787 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,68 +1,72 b''
1 # narrowdirstate.py - extensions to mercurial dirstate to support narrow clones
1 # narrowdirstate.py - extensions to mercurial dirstate to support narrow clones
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial.i18n import _
10 from mercurial.i18n import _
11 from mercurial import error
11 from mercurial import error
12
12
13
13
14 def wrapdirstate(repo, dirstate):
14 def wrapdirstate(repo, dirstate):
15 """Add narrow spec dirstate ignore, block changes outside narrow spec."""
15 """Add narrow spec dirstate ignore, block changes outside narrow spec."""
16
16
17 def _editfunc(fn):
17 def _editfunc(fn):
18 def _wrapper(self, *args, **kwargs):
18 def _wrapper(self, *args, **kwargs):
19 narrowmatch = repo.narrowmatch()
19 narrowmatch = repo.narrowmatch()
20 for f in args:
20 for f in args:
21 if f is not None and not narrowmatch(f) and f not in self:
21 if f is not None and not narrowmatch(f) and f not in self:
22 raise error.Abort(
22 raise error.Abort(
23 _(
23 _(
24 b"cannot track '%s' - it is outside "
24 b"cannot track '%s' - it is outside "
25 + b"the narrow clone"
25 + b"the narrow clone"
26 )
26 )
27 % f
27 % f
28 )
28 )
29 return fn(self, *args, **kwargs)
29 return fn(self, *args, **kwargs)
30
30
31 return _wrapper
31 return _wrapper
32
32
33 class narrowdirstate(dirstate.__class__):
33 class narrowdirstate(dirstate.__class__):
34 # Prevent adding/editing/copying/deleting files that are outside the
34 # Prevent adding/editing/copying/deleting files that are outside the
35 # sparse checkout
35 # sparse checkout
36 @_editfunc
36 @_editfunc
37 def normal(self, *args, **kwargs):
37 def normal(self, *args, **kwargs):
38 return super(narrowdirstate, self).normal(*args, **kwargs)
38 return super(narrowdirstate, self).normal(*args, **kwargs)
39
39
40 @_editfunc
40 @_editfunc
41 def set_tracked(self, *args):
42 return super(narrowdirstate, self).set_tracked(*args)
43
44 @_editfunc
41 def add(self, *args):
45 def add(self, *args):
42 return super(narrowdirstate, self).add(*args)
46 return super(narrowdirstate, self).add(*args)
43
47
44 @_editfunc
48 @_editfunc
45 def normallookup(self, *args):
49 def normallookup(self, *args):
46 return super(narrowdirstate, self).normallookup(*args)
50 return super(narrowdirstate, self).normallookup(*args)
47
51
48 @_editfunc
52 @_editfunc
49 def copy(self, *args):
53 def copy(self, *args):
50 return super(narrowdirstate, self).copy(*args)
54 return super(narrowdirstate, self).copy(*args)
51
55
52 @_editfunc
56 @_editfunc
53 def remove(self, *args):
57 def remove(self, *args):
54 return super(narrowdirstate, self).remove(*args)
58 return super(narrowdirstate, self).remove(*args)
55
59
56 @_editfunc
60 @_editfunc
57 def merge(self, *args):
61 def merge(self, *args):
58 return super(narrowdirstate, self).merge(*args)
62 return super(narrowdirstate, self).merge(*args)
59
63
60 def rebuild(self, parent, allfiles, changedfiles=None):
64 def rebuild(self, parent, allfiles, changedfiles=None):
61 if changedfiles is None:
65 if changedfiles is None:
62 # Rebuilding entire dirstate, let's filter allfiles to match the
66 # Rebuilding entire dirstate, let's filter allfiles to match the
63 # narrowspec.
67 # narrowspec.
64 allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
68 allfiles = [f for f in allfiles if repo.narrowmatch()(f)]
65 super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles)
69 super(narrowdirstate, self).rebuild(parent, allfiles, changedfiles)
66
70
67 dirstate.__class__ = narrowdirstate
71 dirstate.__class__ = narrowdirstate
68 return dirstate
72 return dirstate
@@ -1,440 +1,441 b''
1 # sparse.py - allow sparse checkouts of the working directory
1 # sparse.py - allow sparse checkouts of the working directory
2 #
2 #
3 # Copyright 2014 Facebook, Inc.
3 # Copyright 2014 Facebook, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """allow sparse checkouts of the working directory (EXPERIMENTAL)
8 """allow sparse checkouts of the working directory (EXPERIMENTAL)
9
9
10 (This extension is not yet protected by backwards compatibility
10 (This extension is not yet protected by backwards compatibility
11 guarantees. Any aspect may break in future releases until this
11 guarantees. Any aspect may break in future releases until this
12 notice is removed.)
12 notice is removed.)
13
13
14 This extension allows the working directory to only consist of a
14 This extension allows the working directory to only consist of a
15 subset of files for the revision. This allows specific files or
15 subset of files for the revision. This allows specific files or
16 directories to be explicitly included or excluded. Many repository
16 directories to be explicitly included or excluded. Many repository
17 operations have performance proportional to the number of files in
17 operations have performance proportional to the number of files in
18 the working directory. So only realizing a subset of files in the
18 the working directory. So only realizing a subset of files in the
19 working directory can improve performance.
19 working directory can improve performance.
20
20
21 Sparse Config Files
21 Sparse Config Files
22 -------------------
22 -------------------
23
23
24 The set of files that are part of a sparse checkout are defined by
24 The set of files that are part of a sparse checkout are defined by
25 a sparse config file. The file defines 3 things: includes (files to
25 a sparse config file. The file defines 3 things: includes (files to
26 include in the sparse checkout), excludes (files to exclude from the
26 include in the sparse checkout), excludes (files to exclude from the
27 sparse checkout), and profiles (links to other config files).
27 sparse checkout), and profiles (links to other config files).
28
28
29 The file format is newline delimited. Empty lines and lines beginning
29 The file format is newline delimited. Empty lines and lines beginning
30 with ``#`` are ignored.
30 with ``#`` are ignored.
31
31
32 Lines beginning with ``%include `` denote another sparse config file
32 Lines beginning with ``%include `` denote another sparse config file
33 to include. e.g. ``%include tests.sparse``. The filename is relative
33 to include. e.g. ``%include tests.sparse``. The filename is relative
34 to the repository root.
34 to the repository root.
35
35
36 The special lines ``[include]`` and ``[exclude]`` denote the section
36 The special lines ``[include]`` and ``[exclude]`` denote the section
37 for includes and excludes that follow, respectively. It is illegal to
37 for includes and excludes that follow, respectively. It is illegal to
38 have ``[include]`` after ``[exclude]``.
38 have ``[include]`` after ``[exclude]``.
39
39
40 Non-special lines resemble file patterns to be added to either includes
40 Non-special lines resemble file patterns to be added to either includes
41 or excludes. The syntax of these lines is documented by :hg:`help patterns`.
41 or excludes. The syntax of these lines is documented by :hg:`help patterns`.
42 Patterns are interpreted as ``glob:`` by default and match against the
42 Patterns are interpreted as ``glob:`` by default and match against the
43 root of the repository.
43 root of the repository.
44
44
45 Exclusion patterns take precedence over inclusion patterns. So even
45 Exclusion patterns take precedence over inclusion patterns. So even
46 if a file is explicitly included, an ``[exclude]`` entry can remove it.
46 if a file is explicitly included, an ``[exclude]`` entry can remove it.
47
47
48 For example, say you have a repository with 3 directories, ``frontend/``,
48 For example, say you have a repository with 3 directories, ``frontend/``,
49 ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond
49 ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond
50 to different projects and it is uncommon for someone working on one
50 to different projects and it is uncommon for someone working on one
51 to need the files for the other. But ``tools/`` contains files shared
51 to need the files for the other. But ``tools/`` contains files shared
52 between both projects. Your sparse config files may resemble::
52 between both projects. Your sparse config files may resemble::
53
53
54 # frontend.sparse
54 # frontend.sparse
55 frontend/**
55 frontend/**
56 tools/**
56 tools/**
57
57
58 # backend.sparse
58 # backend.sparse
59 backend/**
59 backend/**
60 tools/**
60 tools/**
61
61
62 Say the backend grows in size. Or there's a directory with thousands
62 Say the backend grows in size. Or there's a directory with thousands
63 of files you wish to exclude. You can modify the profile to exclude
63 of files you wish to exclude. You can modify the profile to exclude
64 certain files::
64 certain files::
65
65
66 [include]
66 [include]
67 backend/**
67 backend/**
68 tools/**
68 tools/**
69
69
70 [exclude]
70 [exclude]
71 tools/tests/**
71 tools/tests/**
72 """
72 """
73
73
74 from __future__ import absolute_import
74 from __future__ import absolute_import
75
75
76 from mercurial.i18n import _
76 from mercurial.i18n import _
77 from mercurial.pycompat import setattr
77 from mercurial.pycompat import setattr
78 from mercurial import (
78 from mercurial import (
79 commands,
79 commands,
80 dirstate,
80 dirstate,
81 error,
81 error,
82 extensions,
82 extensions,
83 logcmdutil,
83 logcmdutil,
84 match as matchmod,
84 match as matchmod,
85 merge as mergemod,
85 merge as mergemod,
86 pycompat,
86 pycompat,
87 registrar,
87 registrar,
88 sparse,
88 sparse,
89 util,
89 util,
90 )
90 )
91
91
92 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
92 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
93 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
93 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
94 # be specifying the version(s) of Mercurial they are tested with, or
94 # be specifying the version(s) of Mercurial they are tested with, or
95 # leave the attribute unspecified.
95 # leave the attribute unspecified.
96 testedwith = b'ships-with-hg-core'
96 testedwith = b'ships-with-hg-core'
97
97
98 cmdtable = {}
98 cmdtable = {}
99 command = registrar.command(cmdtable)
99 command = registrar.command(cmdtable)
100
100
101
101
102 def extsetup(ui):
102 def extsetup(ui):
103 sparse.enabled = True
103 sparse.enabled = True
104
104
105 _setupclone(ui)
105 _setupclone(ui)
106 _setuplog(ui)
106 _setuplog(ui)
107 _setupadd(ui)
107 _setupadd(ui)
108 _setupdirstate(ui)
108 _setupdirstate(ui)
109
109
110
110
111 def replacefilecache(cls, propname, replacement):
111 def replacefilecache(cls, propname, replacement):
112 """Replace a filecache property with a new class. This allows changing the
112 """Replace a filecache property with a new class. This allows changing the
113 cache invalidation condition."""
113 cache invalidation condition."""
114 origcls = cls
114 origcls = cls
115 assert callable(replacement)
115 assert callable(replacement)
116 while cls is not object:
116 while cls is not object:
117 if propname in cls.__dict__:
117 if propname in cls.__dict__:
118 orig = cls.__dict__[propname]
118 orig = cls.__dict__[propname]
119 setattr(cls, propname, replacement(orig))
119 setattr(cls, propname, replacement(orig))
120 break
120 break
121 cls = cls.__bases__[0]
121 cls = cls.__bases__[0]
122
122
123 if cls is object:
123 if cls is object:
124 raise AttributeError(
124 raise AttributeError(
125 _(b"type '%s' has no property '%s'") % (origcls, propname)
125 _(b"type '%s' has no property '%s'") % (origcls, propname)
126 )
126 )
127
127
128
128
129 def _setuplog(ui):
129 def _setuplog(ui):
130 entry = commands.table[b'log|history']
130 entry = commands.table[b'log|history']
131 entry[1].append(
131 entry[1].append(
132 (
132 (
133 b'',
133 b'',
134 b'sparse',
134 b'sparse',
135 None,
135 None,
136 b"limit to changesets affecting the sparse checkout",
136 b"limit to changesets affecting the sparse checkout",
137 )
137 )
138 )
138 )
139
139
140 def _initialrevs(orig, repo, wopts):
140 def _initialrevs(orig, repo, wopts):
141 revs = orig(repo, wopts)
141 revs = orig(repo, wopts)
142 if wopts.opts.get(b'sparse'):
142 if wopts.opts.get(b'sparse'):
143 sparsematch = sparse.matcher(repo)
143 sparsematch = sparse.matcher(repo)
144
144
145 def ctxmatch(rev):
145 def ctxmatch(rev):
146 ctx = repo[rev]
146 ctx = repo[rev]
147 return any(f for f in ctx.files() if sparsematch(f))
147 return any(f for f in ctx.files() if sparsematch(f))
148
148
149 revs = revs.filter(ctxmatch)
149 revs = revs.filter(ctxmatch)
150 return revs
150 return revs
151
151
152 extensions.wrapfunction(logcmdutil, b'_initialrevs', _initialrevs)
152 extensions.wrapfunction(logcmdutil, b'_initialrevs', _initialrevs)
153
153
154
154
155 def _clonesparsecmd(orig, ui, repo, *args, **opts):
155 def _clonesparsecmd(orig, ui, repo, *args, **opts):
156 include_pat = opts.get('include')
156 include_pat = opts.get('include')
157 exclude_pat = opts.get('exclude')
157 exclude_pat = opts.get('exclude')
158 enableprofile_pat = opts.get('enable_profile')
158 enableprofile_pat = opts.get('enable_profile')
159 narrow_pat = opts.get('narrow')
159 narrow_pat = opts.get('narrow')
160 include = exclude = enableprofile = False
160 include = exclude = enableprofile = False
161 if include_pat:
161 if include_pat:
162 pat = include_pat
162 pat = include_pat
163 include = True
163 include = True
164 if exclude_pat:
164 if exclude_pat:
165 pat = exclude_pat
165 pat = exclude_pat
166 exclude = True
166 exclude = True
167 if enableprofile_pat:
167 if enableprofile_pat:
168 pat = enableprofile_pat
168 pat = enableprofile_pat
169 enableprofile = True
169 enableprofile = True
170 if sum([include, exclude, enableprofile]) > 1:
170 if sum([include, exclude, enableprofile]) > 1:
171 raise error.Abort(_(b"too many flags specified."))
171 raise error.Abort(_(b"too many flags specified."))
172 # if --narrow is passed, it means they are includes and excludes for narrow
172 # if --narrow is passed, it means they are includes and excludes for narrow
173 # clone
173 # clone
174 if not narrow_pat and (include or exclude or enableprofile):
174 if not narrow_pat and (include or exclude or enableprofile):
175
175
176 def clonesparse(orig, ctx, *args, **kwargs):
176 def clonesparse(orig, ctx, *args, **kwargs):
177 sparse.updateconfig(
177 sparse.updateconfig(
178 ctx.repo().unfiltered(),
178 ctx.repo().unfiltered(),
179 pat,
179 pat,
180 {},
180 {},
181 include=include,
181 include=include,
182 exclude=exclude,
182 exclude=exclude,
183 enableprofile=enableprofile,
183 enableprofile=enableprofile,
184 usereporootpaths=True,
184 usereporootpaths=True,
185 )
185 )
186 return orig(ctx, *args, **kwargs)
186 return orig(ctx, *args, **kwargs)
187
187
188 extensions.wrapfunction(mergemod, b'update', clonesparse)
188 extensions.wrapfunction(mergemod, b'update', clonesparse)
189 return orig(ui, repo, *args, **opts)
189 return orig(ui, repo, *args, **opts)
190
190
191
191
192 def _setupclone(ui):
192 def _setupclone(ui):
193 entry = commands.table[b'clone']
193 entry = commands.table[b'clone']
194 entry[1].append((b'', b'enable-profile', [], b'enable a sparse profile'))
194 entry[1].append((b'', b'enable-profile', [], b'enable a sparse profile'))
195 entry[1].append((b'', b'include', [], b'include sparse pattern'))
195 entry[1].append((b'', b'include', [], b'include sparse pattern'))
196 entry[1].append((b'', b'exclude', [], b'exclude sparse pattern'))
196 entry[1].append((b'', b'exclude', [], b'exclude sparse pattern'))
197 extensions.wrapcommand(commands.table, b'clone', _clonesparsecmd)
197 extensions.wrapcommand(commands.table, b'clone', _clonesparsecmd)
198
198
199
199
200 def _setupadd(ui):
200 def _setupadd(ui):
201 entry = commands.table[b'add']
201 entry = commands.table[b'add']
202 entry[1].append(
202 entry[1].append(
203 (
203 (
204 b's',
204 b's',
205 b'sparse',
205 b'sparse',
206 None,
206 None,
207 b'also include directories of added files in sparse config',
207 b'also include directories of added files in sparse config',
208 )
208 )
209 )
209 )
210
210
211 def _add(orig, ui, repo, *pats, **opts):
211 def _add(orig, ui, repo, *pats, **opts):
212 if opts.get('sparse'):
212 if opts.get('sparse'):
213 dirs = set()
213 dirs = set()
214 for pat in pats:
214 for pat in pats:
215 dirname, basename = util.split(pat)
215 dirname, basename = util.split(pat)
216 dirs.add(dirname)
216 dirs.add(dirname)
217 sparse.updateconfig(repo, list(dirs), opts, include=True)
217 sparse.updateconfig(repo, list(dirs), opts, include=True)
218 return orig(ui, repo, *pats, **opts)
218 return orig(ui, repo, *pats, **opts)
219
219
220 extensions.wrapcommand(commands.table, b'add', _add)
220 extensions.wrapcommand(commands.table, b'add', _add)
221
221
222
222
223 def _setupdirstate(ui):
223 def _setupdirstate(ui):
224 """Modify the dirstate to prevent stat'ing excluded files,
224 """Modify the dirstate to prevent stat'ing excluded files,
225 and to prevent modifications to files outside the checkout.
225 and to prevent modifications to files outside the checkout.
226 """
226 """
227
227
228 def walk(orig, self, match, subrepos, unknown, ignored, full=True):
228 def walk(orig, self, match, subrepos, unknown, ignored, full=True):
229 # hack to not exclude explicitly-specified paths so that they can
229 # hack to not exclude explicitly-specified paths so that they can
230 # be warned later on e.g. dirstate.add()
230 # be warned later on e.g. dirstate.add()
231 em = matchmod.exact(match.files())
231 em = matchmod.exact(match.files())
232 sm = matchmod.unionmatcher([self._sparsematcher, em])
232 sm = matchmod.unionmatcher([self._sparsematcher, em])
233 match = matchmod.intersectmatchers(match, sm)
233 match = matchmod.intersectmatchers(match, sm)
234 return orig(self, match, subrepos, unknown, ignored, full)
234 return orig(self, match, subrepos, unknown, ignored, full)
235
235
236 extensions.wrapfunction(dirstate.dirstate, b'walk', walk)
236 extensions.wrapfunction(dirstate.dirstate, b'walk', walk)
237
237
238 # dirstate.rebuild should not add non-matching files
238 # dirstate.rebuild should not add non-matching files
239 def _rebuild(orig, self, parent, allfiles, changedfiles=None):
239 def _rebuild(orig, self, parent, allfiles, changedfiles=None):
240 matcher = self._sparsematcher
240 matcher = self._sparsematcher
241 if not matcher.always():
241 if not matcher.always():
242 allfiles = [f for f in allfiles if matcher(f)]
242 allfiles = [f for f in allfiles if matcher(f)]
243 if changedfiles:
243 if changedfiles:
244 changedfiles = [f for f in changedfiles if matcher(f)]
244 changedfiles = [f for f in changedfiles if matcher(f)]
245
245
246 if changedfiles is not None:
246 if changedfiles is not None:
247 # In _rebuild, these files will be deleted from the dirstate
247 # In _rebuild, these files will be deleted from the dirstate
248 # when they are not found to be in allfiles
248 # when they are not found to be in allfiles
249 dirstatefilestoremove = {f for f in self if not matcher(f)}
249 dirstatefilestoremove = {f for f in self if not matcher(f)}
250 changedfiles = dirstatefilestoremove.union(changedfiles)
250 changedfiles = dirstatefilestoremove.union(changedfiles)
251
251
252 return orig(self, parent, allfiles, changedfiles)
252 return orig(self, parent, allfiles, changedfiles)
253
253
254 extensions.wrapfunction(dirstate.dirstate, b'rebuild', _rebuild)
254 extensions.wrapfunction(dirstate.dirstate, b'rebuild', _rebuild)
255
255
256 # Prevent adding files that are outside the sparse checkout
256 # Prevent adding files that are outside the sparse checkout
257 editfuncs = [
257 editfuncs = [
258 b'normal',
258 b'normal',
259 b'set_tracked',
259 b'add',
260 b'add',
260 b'normallookup',
261 b'normallookup',
261 b'copy',
262 b'copy',
262 b'remove',
263 b'remove',
263 b'merge',
264 b'merge',
264 ]
265 ]
265 hint = _(
266 hint = _(
266 b'include file with `hg debugsparse --include <pattern>` or use '
267 b'include file with `hg debugsparse --include <pattern>` or use '
267 + b'`hg add -s <file>` to include file directory while adding'
268 + b'`hg add -s <file>` to include file directory while adding'
268 )
269 )
269 for func in editfuncs:
270 for func in editfuncs:
270
271
271 def _wrapper(orig, self, *args, **kwargs):
272 def _wrapper(orig, self, *args, **kwargs):
272 sparsematch = self._sparsematcher
273 sparsematch = self._sparsematcher
273 if not sparsematch.always():
274 if not sparsematch.always():
274 for f in args:
275 for f in args:
275 if f is not None and not sparsematch(f) and f not in self:
276 if f is not None and not sparsematch(f) and f not in self:
276 raise error.Abort(
277 raise error.Abort(
277 _(
278 _(
278 b"cannot add '%s' - it is outside "
279 b"cannot add '%s' - it is outside "
279 b"the sparse checkout"
280 b"the sparse checkout"
280 )
281 )
281 % f,
282 % f,
282 hint=hint,
283 hint=hint,
283 )
284 )
284 return orig(self, *args, **kwargs)
285 return orig(self, *args, **kwargs)
285
286
286 extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
287 extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
287
288
288
289
289 @command(
290 @command(
290 b'debugsparse',
291 b'debugsparse',
291 [
292 [
292 (b'I', b'include', False, _(b'include files in the sparse checkout')),
293 (b'I', b'include', False, _(b'include files in the sparse checkout')),
293 (b'X', b'exclude', False, _(b'exclude files in the sparse checkout')),
294 (b'X', b'exclude', False, _(b'exclude files in the sparse checkout')),
294 (b'd', b'delete', False, _(b'delete an include/exclude rule')),
295 (b'd', b'delete', False, _(b'delete an include/exclude rule')),
295 (
296 (
296 b'f',
297 b'f',
297 b'force',
298 b'force',
298 False,
299 False,
299 _(b'allow changing rules even with pending changes'),
300 _(b'allow changing rules even with pending changes'),
300 ),
301 ),
301 (b'', b'enable-profile', False, _(b'enables the specified profile')),
302 (b'', b'enable-profile', False, _(b'enables the specified profile')),
302 (b'', b'disable-profile', False, _(b'disables the specified profile')),
303 (b'', b'disable-profile', False, _(b'disables the specified profile')),
303 (b'', b'import-rules', False, _(b'imports rules from a file')),
304 (b'', b'import-rules', False, _(b'imports rules from a file')),
304 (b'', b'clear-rules', False, _(b'clears local include/exclude rules')),
305 (b'', b'clear-rules', False, _(b'clears local include/exclude rules')),
305 (
306 (
306 b'',
307 b'',
307 b'refresh',
308 b'refresh',
308 False,
309 False,
309 _(b'updates the working after sparseness changes'),
310 _(b'updates the working after sparseness changes'),
310 ),
311 ),
311 (b'', b'reset', False, _(b'makes the repo full again')),
312 (b'', b'reset', False, _(b'makes the repo full again')),
312 ]
313 ]
313 + commands.templateopts,
314 + commands.templateopts,
314 _(b'[--OPTION] PATTERN...'),
315 _(b'[--OPTION] PATTERN...'),
315 helpbasic=True,
316 helpbasic=True,
316 )
317 )
317 def debugsparse(ui, repo, *pats, **opts):
318 def debugsparse(ui, repo, *pats, **opts):
318 """make the current checkout sparse, or edit the existing checkout
319 """make the current checkout sparse, or edit the existing checkout
319
320
320 The sparse command is used to make the current checkout sparse.
321 The sparse command is used to make the current checkout sparse.
321 This means files that don't meet the sparse condition will not be
322 This means files that don't meet the sparse condition will not be
322 written to disk, or show up in any working copy operations. It does
323 written to disk, or show up in any working copy operations. It does
323 not affect files in history in any way.
324 not affect files in history in any way.
324
325
325 Passing no arguments prints the currently applied sparse rules.
326 Passing no arguments prints the currently applied sparse rules.
326
327
327 --include and --exclude are used to add and remove files from the sparse
328 --include and --exclude are used to add and remove files from the sparse
328 checkout. The effects of adding an include or exclude rule are applied
329 checkout. The effects of adding an include or exclude rule are applied
329 immediately. If applying the new rule would cause a file with pending
330 immediately. If applying the new rule would cause a file with pending
330 changes to be added or removed, the command will fail. Pass --force to
331 changes to be added or removed, the command will fail. Pass --force to
331 force a rule change even with pending changes (the changes on disk will
332 force a rule change even with pending changes (the changes on disk will
332 be preserved).
333 be preserved).
333
334
334 --delete removes an existing include/exclude rule. The effects are
335 --delete removes an existing include/exclude rule. The effects are
335 immediate.
336 immediate.
336
337
337 --refresh refreshes the files on disk based on the sparse rules. This is
338 --refresh refreshes the files on disk based on the sparse rules. This is
338 only necessary if .hg/sparse was changed by hand.
339 only necessary if .hg/sparse was changed by hand.
339
340
340 --enable-profile and --disable-profile accept a path to a .hgsparse file.
341 --enable-profile and --disable-profile accept a path to a .hgsparse file.
341 This allows defining sparse checkouts and tracking them inside the
342 This allows defining sparse checkouts and tracking them inside the
342 repository. This is useful for defining commonly used sparse checkouts for
343 repository. This is useful for defining commonly used sparse checkouts for
343 many people to use. As the profile definition changes over time, the sparse
344 many people to use. As the profile definition changes over time, the sparse
344 checkout will automatically be updated appropriately, depending on which
345 checkout will automatically be updated appropriately, depending on which
345 changeset is checked out. Changes to .hgsparse are not applied until they
346 changeset is checked out. Changes to .hgsparse are not applied until they
346 have been committed.
347 have been committed.
347
348
348 --import-rules accepts a path to a file containing rules in the .hgsparse
349 --import-rules accepts a path to a file containing rules in the .hgsparse
349 format, allowing you to add --include, --exclude and --enable-profile rules
350 format, allowing you to add --include, --exclude and --enable-profile rules
350 in bulk. Like the --include, --exclude and --enable-profile switches, the
351 in bulk. Like the --include, --exclude and --enable-profile switches, the
351 changes are applied immediately.
352 changes are applied immediately.
352
353
353 --clear-rules removes all local include and exclude rules, while leaving
354 --clear-rules removes all local include and exclude rules, while leaving
354 any enabled profiles in place.
355 any enabled profiles in place.
355
356
356 Returns 0 if editing the sparse checkout succeeds.
357 Returns 0 if editing the sparse checkout succeeds.
357 """
358 """
358 opts = pycompat.byteskwargs(opts)
359 opts = pycompat.byteskwargs(opts)
359 include = opts.get(b'include')
360 include = opts.get(b'include')
360 exclude = opts.get(b'exclude')
361 exclude = opts.get(b'exclude')
361 force = opts.get(b'force')
362 force = opts.get(b'force')
362 enableprofile = opts.get(b'enable_profile')
363 enableprofile = opts.get(b'enable_profile')
363 disableprofile = opts.get(b'disable_profile')
364 disableprofile = opts.get(b'disable_profile')
364 importrules = opts.get(b'import_rules')
365 importrules = opts.get(b'import_rules')
365 clearrules = opts.get(b'clear_rules')
366 clearrules = opts.get(b'clear_rules')
366 delete = opts.get(b'delete')
367 delete = opts.get(b'delete')
367 refresh = opts.get(b'refresh')
368 refresh = opts.get(b'refresh')
368 reset = opts.get(b'reset')
369 reset = opts.get(b'reset')
369 count = sum(
370 count = sum(
370 [
371 [
371 include,
372 include,
372 exclude,
373 exclude,
373 enableprofile,
374 enableprofile,
374 disableprofile,
375 disableprofile,
375 delete,
376 delete,
376 importrules,
377 importrules,
377 refresh,
378 refresh,
378 clearrules,
379 clearrules,
379 reset,
380 reset,
380 ]
381 ]
381 )
382 )
382 if count > 1:
383 if count > 1:
383 raise error.Abort(_(b"too many flags specified"))
384 raise error.Abort(_(b"too many flags specified"))
384
385
385 if count == 0:
386 if count == 0:
386 if repo.vfs.exists(b'sparse'):
387 if repo.vfs.exists(b'sparse'):
387 ui.status(repo.vfs.read(b"sparse") + b"\n")
388 ui.status(repo.vfs.read(b"sparse") + b"\n")
388 temporaryincludes = sparse.readtemporaryincludes(repo)
389 temporaryincludes = sparse.readtemporaryincludes(repo)
389 if temporaryincludes:
390 if temporaryincludes:
390 ui.status(
391 ui.status(
391 _(b"Temporarily Included Files (for merge/rebase):\n")
392 _(b"Temporarily Included Files (for merge/rebase):\n")
392 )
393 )
393 ui.status((b"\n".join(temporaryincludes) + b"\n"))
394 ui.status((b"\n".join(temporaryincludes) + b"\n"))
394 return
395 return
395 else:
396 else:
396 raise error.Abort(
397 raise error.Abort(
397 _(
398 _(
398 b'the debugsparse command is only supported on'
399 b'the debugsparse command is only supported on'
399 b' sparse repositories'
400 b' sparse repositories'
400 )
401 )
401 )
402 )
402
403
403 if include or exclude or delete or reset or enableprofile or disableprofile:
404 if include or exclude or delete or reset or enableprofile or disableprofile:
404 sparse.updateconfig(
405 sparse.updateconfig(
405 repo,
406 repo,
406 pats,
407 pats,
407 opts,
408 opts,
408 include=include,
409 include=include,
409 exclude=exclude,
410 exclude=exclude,
410 reset=reset,
411 reset=reset,
411 delete=delete,
412 delete=delete,
412 enableprofile=enableprofile,
413 enableprofile=enableprofile,
413 disableprofile=disableprofile,
414 disableprofile=disableprofile,
414 force=force,
415 force=force,
415 )
416 )
416
417
417 if importrules:
418 if importrules:
418 sparse.importfromfiles(repo, opts, pats, force=force)
419 sparse.importfromfiles(repo, opts, pats, force=force)
419
420
420 if clearrules:
421 if clearrules:
421 sparse.clearrules(repo, force=force)
422 sparse.clearrules(repo, force=force)
422
423
423 if refresh:
424 if refresh:
424 try:
425 try:
425 wlock = repo.wlock()
426 wlock = repo.wlock()
426 fcounts = map(
427 fcounts = map(
427 len,
428 len,
428 sparse.refreshwdir(
429 sparse.refreshwdir(
429 repo, repo.status(), sparse.matcher(repo), force=force
430 repo, repo.status(), sparse.matcher(repo), force=force
430 ),
431 ),
431 )
432 )
432 sparse.printchanges(
433 sparse.printchanges(
433 ui,
434 ui,
434 opts,
435 opts,
435 added=fcounts[0],
436 added=fcounts[0],
436 dropped=fcounts[1],
437 dropped=fcounts[1],
437 conflicting=fcounts[2],
438 conflicting=fcounts[2],
438 )
439 )
439 finally:
440 finally:
440 wlock.release()
441 wlock.release()
@@ -1,1497 +1,1526 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = parsers.DirstateItem
48 DirstateItem = parsers.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
88 if not self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
93
94 return wrap
95
96
86 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
87 class dirstate(object):
98 class dirstate(object):
88 def __init__(
99 def __init__(
89 self,
100 self,
90 opener,
101 opener,
91 ui,
102 ui,
92 root,
103 root,
93 validate,
104 validate,
94 sparsematchfn,
105 sparsematchfn,
95 nodeconstants,
106 nodeconstants,
96 use_dirstate_v2,
107 use_dirstate_v2,
97 ):
108 ):
98 """Create a new dirstate object.
109 """Create a new dirstate object.
99
110
100 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
101 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
102 the dirstate.
113 the dirstate.
103 """
114 """
104 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
105 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
106 self._opener = opener
117 self._opener = opener
107 self._validate = validate
118 self._validate = validate
108 self._root = root
119 self._root = root
109 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
110 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
111 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
112 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
113 self._dirty = False
124 self._dirty = False
114 self._lastnormaltime = 0
125 self._lastnormaltime = 0
115 self._ui = ui
126 self._ui = ui
116 self._filecache = {}
127 self._filecache = {}
117 self._parentwriters = 0
128 self._parentwriters = 0
118 self._filename = b'dirstate'
129 self._filename = b'dirstate'
119 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
120 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
121 self._origpl = None
132 self._origpl = None
122 self._updatedfiles = set()
133 self._updatedfiles = set()
123 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
124 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
125 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
126 # raises an exception).
137 # raises an exception).
127 self._cwd
138 self._cwd
128
139
129 def prefetch_parents(self):
140 def prefetch_parents(self):
130 """make sure the parents are loaded
141 """make sure the parents are loaded
131
142
132 Used to avoid a race condition.
143 Used to avoid a race condition.
133 """
144 """
134 self._pl
145 self._pl
135
146
136 @contextlib.contextmanager
147 @contextlib.contextmanager
137 def parentchange(self):
148 def parentchange(self):
138 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
139
150
140 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
141 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
142 released.
153 released.
143 """
154 """
144 self._parentwriters += 1
155 self._parentwriters += 1
145 yield
156 yield
146 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
147 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
148 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
149 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
150 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
151 self._parentwriters -= 1
162 self._parentwriters -= 1
152
163
153 def pendingparentchange(self):
164 def pendingparentchange(self):
154 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
155 that modify the dirstate parent.
166 that modify the dirstate parent.
156 """
167 """
157 return self._parentwriters > 0
168 return self._parentwriters > 0
158
169
159 @propertycache
170 @propertycache
160 def _map(self):
171 def _map(self):
161 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
162 self._map = self._mapcls(
173 self._map = self._mapcls(
163 self._ui,
174 self._ui,
164 self._opener,
175 self._opener,
165 self._root,
176 self._root,
166 self._nodeconstants,
177 self._nodeconstants,
167 self._use_dirstate_v2,
178 self._use_dirstate_v2,
168 )
179 )
169 return self._map
180 return self._map
170
181
171 @property
182 @property
172 def _sparsematcher(self):
183 def _sparsematcher(self):
173 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
174
185
175 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
176 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
177 included in the working directory.
188 included in the working directory.
178 """
189 """
179 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
180 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
181 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
182 return self._sparsematchfn()
193 return self._sparsematchfn()
183
194
184 @repocache(b'branch')
195 @repocache(b'branch')
185 def _branch(self):
196 def _branch(self):
186 try:
197 try:
187 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
188 except IOError as inst:
199 except IOError as inst:
189 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
190 raise
201 raise
191 return b"default"
202 return b"default"
192
203
193 @property
204 @property
194 def _pl(self):
205 def _pl(self):
195 return self._map.parents()
206 return self._map.parents()
196
207
197 def hasdir(self, d):
208 def hasdir(self, d):
198 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
199
210
200 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
201 def _ignore(self):
212 def _ignore(self):
202 files = self._ignorefiles()
213 files = self._ignorefiles()
203 if not files:
214 if not files:
204 return matchmod.never()
215 return matchmod.never()
205
216
206 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
207 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
208
219
209 @propertycache
220 @propertycache
210 def _slash(self):
221 def _slash(self):
211 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
212
223
213 @propertycache
224 @propertycache
214 def _checklink(self):
225 def _checklink(self):
215 return util.checklink(self._root)
226 return util.checklink(self._root)
216
227
217 @propertycache
228 @propertycache
218 def _checkexec(self):
229 def _checkexec(self):
219 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
220
231
221 @propertycache
232 @propertycache
222 def _checkcase(self):
233 def _checkcase(self):
223 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
224
235
225 def _join(self, f):
236 def _join(self, f):
226 # much faster than os.path.join()
237 # much faster than os.path.join()
227 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
228 return self._rootdir + f
239 return self._rootdir + f
229
240
230 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
231 if self._checklink and self._checkexec:
242 if self._checklink and self._checkexec:
232
243
233 def f(x):
244 def f(x):
234 try:
245 try:
235 st = os.lstat(self._join(x))
246 st = os.lstat(self._join(x))
236 if util.statislink(st):
247 if util.statislink(st):
237 return b'l'
248 return b'l'
238 if util.statisexec(st):
249 if util.statisexec(st):
239 return b'x'
250 return b'x'
240 except OSError:
251 except OSError:
241 pass
252 pass
242 return b''
253 return b''
243
254
244 return f
255 return f
245
256
246 fallback = buildfallback()
257 fallback = buildfallback()
247 if self._checklink:
258 if self._checklink:
248
259
249 def f(x):
260 def f(x):
250 if os.path.islink(self._join(x)):
261 if os.path.islink(self._join(x)):
251 return b'l'
262 return b'l'
252 if b'x' in fallback(x):
263 if b'x' in fallback(x):
253 return b'x'
264 return b'x'
254 return b''
265 return b''
255
266
256 return f
267 return f
257 if self._checkexec:
268 if self._checkexec:
258
269
259 def f(x):
270 def f(x):
260 if b'l' in fallback(x):
271 if b'l' in fallback(x):
261 return b'l'
272 return b'l'
262 if util.isexec(self._join(x)):
273 if util.isexec(self._join(x)):
263 return b'x'
274 return b'x'
264 return b''
275 return b''
265
276
266 return f
277 return f
267 else:
278 else:
268 return fallback
279 return fallback
269
280
270 @propertycache
281 @propertycache
271 def _cwd(self):
282 def _cwd(self):
272 # internal config: ui.forcecwd
283 # internal config: ui.forcecwd
273 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 forcecwd = self._ui.config(b'ui', b'forcecwd')
274 if forcecwd:
285 if forcecwd:
275 return forcecwd
286 return forcecwd
276 return encoding.getcwd()
287 return encoding.getcwd()
277
288
278 def getcwd(self):
289 def getcwd(self):
279 """Return the path from which a canonical path is calculated.
290 """Return the path from which a canonical path is calculated.
280
291
281 This path should be used to resolve file patterns or to convert
292 This path should be used to resolve file patterns or to convert
282 canonical paths back to file paths for display. It shouldn't be
293 canonical paths back to file paths for display. It shouldn't be
283 used to get real file paths. Use vfs functions instead.
294 used to get real file paths. Use vfs functions instead.
284 """
295 """
285 cwd = self._cwd
296 cwd = self._cwd
286 if cwd == self._root:
297 if cwd == self._root:
287 return b''
298 return b''
288 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 # self._root ends with a path separator if self._root is '/' or 'C:\'
289 rootsep = self._root
300 rootsep = self._root
290 if not util.endswithsep(rootsep):
301 if not util.endswithsep(rootsep):
291 rootsep += pycompat.ossep
302 rootsep += pycompat.ossep
292 if cwd.startswith(rootsep):
303 if cwd.startswith(rootsep):
293 return cwd[len(rootsep) :]
304 return cwd[len(rootsep) :]
294 else:
305 else:
295 # we're outside the repo. return an absolute path.
306 # we're outside the repo. return an absolute path.
296 return cwd
307 return cwd
297
308
298 def pathto(self, f, cwd=None):
309 def pathto(self, f, cwd=None):
299 if cwd is None:
310 if cwd is None:
300 cwd = self.getcwd()
311 cwd = self.getcwd()
301 path = util.pathto(self._root, cwd, f)
312 path = util.pathto(self._root, cwd, f)
302 if self._slash:
313 if self._slash:
303 return util.pconvert(path)
314 return util.pconvert(path)
304 return path
315 return path
305
316
306 def __getitem__(self, key):
317 def __getitem__(self, key):
307 """Return the current state of key (a filename) in the dirstate.
318 """Return the current state of key (a filename) in the dirstate.
308
319
309 States are:
320 States are:
310 n normal
321 n normal
311 m needs merging
322 m needs merging
312 r marked for removal
323 r marked for removal
313 a marked for addition
324 a marked for addition
314 ? not tracked
325 ? not tracked
315
326
316 XXX The "state" is a bit obscure to be in the "public" API. we should
327 XXX The "state" is a bit obscure to be in the "public" API. we should
317 consider migrating all user of this to going through the dirstate entry
328 consider migrating all user of this to going through the dirstate entry
318 instead.
329 instead.
319 """
330 """
320 entry = self._map.get(key)
331 entry = self._map.get(key)
321 if entry is not None:
332 if entry is not None:
322 return entry.state
333 return entry.state
323 return b'?'
334 return b'?'
324
335
325 def __contains__(self, key):
336 def __contains__(self, key):
326 return key in self._map
337 return key in self._map
327
338
328 def __iter__(self):
339 def __iter__(self):
329 return iter(sorted(self._map))
340 return iter(sorted(self._map))
330
341
331 def items(self):
342 def items(self):
332 return pycompat.iteritems(self._map)
343 return pycompat.iteritems(self._map)
333
344
334 iteritems = items
345 iteritems = items
335
346
336 def directories(self):
347 def directories(self):
337 return self._map.directories()
348 return self._map.directories()
338
349
339 def parents(self):
350 def parents(self):
340 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
341
352
342 def p1(self):
353 def p1(self):
343 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
344
355
345 def p2(self):
356 def p2(self):
346 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
347
358
348 @property
359 @property
349 def in_merge(self):
360 def in_merge(self):
350 """True if a merge is in progress"""
361 """True if a merge is in progress"""
351 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
352
363
353 def branch(self):
364 def branch(self):
354 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
355
366
356 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
357 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
358
369
359 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
360 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
361 returned by the call.
372 returned by the call.
362
373
363 See localrepo.setparents()
374 See localrepo.setparents()
364 """
375 """
365 if p2 is None:
376 if p2 is None:
366 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
367 if self._parentwriters == 0:
378 if self._parentwriters == 0:
368 raise ValueError(
379 raise ValueError(
369 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
370 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
371 )
382 )
372
383
373 self._dirty = True
384 self._dirty = True
374 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
375 if self._origpl is None:
386 if self._origpl is None:
376 self._origpl = self._pl
387 self._origpl = self._pl
377 self._map.setparents(p1, p2)
388 self._map.setparents(p1, p2)
378 copies = {}
389 copies = {}
379 if (
390 if (
380 oldp2 != self._nodeconstants.nullid
391 oldp2 != self._nodeconstants.nullid
381 and p2 == self._nodeconstants.nullid
392 and p2 == self._nodeconstants.nullid
382 ):
393 ):
383 candidatefiles = self._map.non_normal_or_other_parent_paths()
394 candidatefiles = self._map.non_normal_or_other_parent_paths()
384
395
385 for f in candidatefiles:
396 for f in candidatefiles:
386 s = self._map.get(f)
397 s = self._map.get(f)
387 if s is None:
398 if s is None:
388 continue
399 continue
389
400
390 # Discard "merged" markers when moving away from a merge state
401 # Discard "merged" markers when moving away from a merge state
391 if s.merged:
402 if s.merged:
392 source = self._map.copymap.get(f)
403 source = self._map.copymap.get(f)
393 if source:
404 if source:
394 copies[f] = source
405 copies[f] = source
395 self.normallookup(f)
406 self.normallookup(f)
396 # Also fix up otherparent markers
407 # Also fix up otherparent markers
397 elif s.from_p2:
408 elif s.from_p2:
398 source = self._map.copymap.get(f)
409 source = self._map.copymap.get(f)
399 if source:
410 if source:
400 copies[f] = source
411 copies[f] = source
401 self._add(f)
412 self._add(f)
402 return copies
413 return copies
403
414
404 def setbranch(self, branch):
415 def setbranch(self, branch):
405 self.__class__._branch.set(self, encoding.fromlocal(branch))
416 self.__class__._branch.set(self, encoding.fromlocal(branch))
406 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
417 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
407 try:
418 try:
408 f.write(self._branch + b'\n')
419 f.write(self._branch + b'\n')
409 f.close()
420 f.close()
410
421
411 # make sure filecache has the correct stat info for _branch after
422 # make sure filecache has the correct stat info for _branch after
412 # replacing the underlying file
423 # replacing the underlying file
413 ce = self._filecache[b'_branch']
424 ce = self._filecache[b'_branch']
414 if ce:
425 if ce:
415 ce.refresh()
426 ce.refresh()
416 except: # re-raises
427 except: # re-raises
417 f.discard()
428 f.discard()
418 raise
429 raise
419
430
420 def invalidate(self):
431 def invalidate(self):
421 """Causes the next access to reread the dirstate.
432 """Causes the next access to reread the dirstate.
422
433
423 This is different from localrepo.invalidatedirstate() because it always
434 This is different from localrepo.invalidatedirstate() because it always
424 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
435 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
425 check whether the dirstate has changed before rereading it."""
436 check whether the dirstate has changed before rereading it."""
426
437
427 for a in ("_map", "_branch", "_ignore"):
438 for a in ("_map", "_branch", "_ignore"):
428 if a in self.__dict__:
439 if a in self.__dict__:
429 delattr(self, a)
440 delattr(self, a)
430 self._lastnormaltime = 0
441 self._lastnormaltime = 0
431 self._dirty = False
442 self._dirty = False
432 self._updatedfiles.clear()
443 self._updatedfiles.clear()
433 self._parentwriters = 0
444 self._parentwriters = 0
434 self._origpl = None
445 self._origpl = None
435
446
436 def copy(self, source, dest):
447 def copy(self, source, dest):
437 """Mark dest as a copy of source. Unmark dest if source is None."""
448 """Mark dest as a copy of source. Unmark dest if source is None."""
438 if source == dest:
449 if source == dest:
439 return
450 return
440 self._dirty = True
451 self._dirty = True
441 if source is not None:
452 if source is not None:
442 self._map.copymap[dest] = source
453 self._map.copymap[dest] = source
443 self._updatedfiles.add(source)
454 self._updatedfiles.add(source)
444 self._updatedfiles.add(dest)
455 self._updatedfiles.add(dest)
445 elif self._map.copymap.pop(dest, None):
456 elif self._map.copymap.pop(dest, None):
446 self._updatedfiles.add(dest)
457 self._updatedfiles.add(dest)
447
458
448 def copied(self, file):
459 def copied(self, file):
449 return self._map.copymap.get(file, None)
460 return self._map.copymap.get(file, None)
450
461
451 def copies(self):
462 def copies(self):
452 return self._map.copymap
463 return self._map.copymap
453
464
465 @requires_no_parents_change
466 def set_tracked(self, filename):
467 """a "public" method for generic code to mark a file as tracked
468
469 This function is to be called outside of "update/merge" case. For
470 example by a command like `hg add X`.
471
472 return True the file was previously untracked, False otherwise.
473 """
474 entry = self._map.get(filename)
475 if entry is None:
476 self._add(filename)
477 return True
478 elif not entry.tracked:
479 self.normallookup(filename)
480 return True
481 return False
482
454 @requires_parents_change
483 @requires_parents_change
455 def update_file_reference(
484 def update_file_reference(
456 self,
485 self,
457 filename,
486 filename,
458 p1_tracked,
487 p1_tracked,
459 ):
488 ):
460 """Set a file as tracked in the parent (or not)
489 """Set a file as tracked in the parent (or not)
461
490
462 This is to be called when adjust the dirstate to a new parent after an history
491 This is to be called when adjust the dirstate to a new parent after an history
463 rewriting operation.
492 rewriting operation.
464
493
465 It should not be called during a merge (p2 != nullid) and only within
494 It should not be called during a merge (p2 != nullid) and only within
466 a `with dirstate.parentchange():` context.
495 a `with dirstate.parentchange():` context.
467 """
496 """
468 if self.in_merge:
497 if self.in_merge:
469 msg = b'update_file_reference should not be called when merging'
498 msg = b'update_file_reference should not be called when merging'
470 raise error.ProgrammingError(msg)
499 raise error.ProgrammingError(msg)
471 entry = self._map.get(filename)
500 entry = self._map.get(filename)
472 if entry is None:
501 if entry is None:
473 wc_tracked = False
502 wc_tracked = False
474 else:
503 else:
475 wc_tracked = entry.tracked
504 wc_tracked = entry.tracked
476 if p1_tracked and wc_tracked:
505 if p1_tracked and wc_tracked:
477 # the underlying reference might have changed, we will have to
506 # the underlying reference might have changed, we will have to
478 # check it.
507 # check it.
479 self.normallookup(filename)
508 self.normallookup(filename)
480 elif not (p1_tracked or wc_tracked):
509 elif not (p1_tracked or wc_tracked):
481 # the file is no longer relevant to anyone
510 # the file is no longer relevant to anyone
482 self._drop(filename)
511 self._drop(filename)
483 elif (not p1_tracked) and wc_tracked:
512 elif (not p1_tracked) and wc_tracked:
484 if not entry.added:
513 if not entry.added:
485 self._add(filename)
514 self._add(filename)
486 elif p1_tracked and not wc_tracked:
515 elif p1_tracked and not wc_tracked:
487 if entry is None or not entry.removed:
516 if entry is None or not entry.removed:
488 self._remove(filename)
517 self._remove(filename)
489 else:
518 else:
490 assert False, 'unreachable'
519 assert False, 'unreachable'
491
520
492 def _addpath(
521 def _addpath(
493 self,
522 self,
494 f,
523 f,
495 mode=0,
524 mode=0,
496 size=None,
525 size=None,
497 mtime=None,
526 mtime=None,
498 added=False,
527 added=False,
499 merged=False,
528 merged=False,
500 from_p2=False,
529 from_p2=False,
501 possibly_dirty=False,
530 possibly_dirty=False,
502 ):
531 ):
503 entry = self._map.get(f)
532 entry = self._map.get(f)
504 if added or entry is not None and entry.removed:
533 if added or entry is not None and entry.removed:
505 scmutil.checkfilename(f)
534 scmutil.checkfilename(f)
506 if self._map.hastrackeddir(f):
535 if self._map.hastrackeddir(f):
507 msg = _(b'directory %r already in dirstate')
536 msg = _(b'directory %r already in dirstate')
508 msg %= pycompat.bytestr(f)
537 msg %= pycompat.bytestr(f)
509 raise error.Abort(msg)
538 raise error.Abort(msg)
510 # shadows
539 # shadows
511 for d in pathutil.finddirs(f):
540 for d in pathutil.finddirs(f):
512 if self._map.hastrackeddir(d):
541 if self._map.hastrackeddir(d):
513 break
542 break
514 entry = self._map.get(d)
543 entry = self._map.get(d)
515 if entry is not None and not entry.removed:
544 if entry is not None and not entry.removed:
516 msg = _(b'file %r in dirstate clashes with %r')
545 msg = _(b'file %r in dirstate clashes with %r')
517 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
546 msg %= (pycompat.bytestr(d), pycompat.bytestr(f))
518 raise error.Abort(msg)
547 raise error.Abort(msg)
519 self._dirty = True
548 self._dirty = True
520 self._updatedfiles.add(f)
549 self._updatedfiles.add(f)
521 self._map.addfile(
550 self._map.addfile(
522 f,
551 f,
523 mode=mode,
552 mode=mode,
524 size=size,
553 size=size,
525 mtime=mtime,
554 mtime=mtime,
526 added=added,
555 added=added,
527 merged=merged,
556 merged=merged,
528 from_p2=from_p2,
557 from_p2=from_p2,
529 possibly_dirty=possibly_dirty,
558 possibly_dirty=possibly_dirty,
530 )
559 )
531
560
532 def normal(self, f, parentfiledata=None):
561 def normal(self, f, parentfiledata=None):
533 """Mark a file normal and clean.
562 """Mark a file normal and clean.
534
563
535 parentfiledata: (mode, size, mtime) of the clean file
564 parentfiledata: (mode, size, mtime) of the clean file
536
565
537 parentfiledata should be computed from memory (for mode,
566 parentfiledata should be computed from memory (for mode,
538 size), as or close as possible from the point where we
567 size), as or close as possible from the point where we
539 determined the file was clean, to limit the risk of the
568 determined the file was clean, to limit the risk of the
540 file having been changed by an external process between the
569 file having been changed by an external process between the
541 moment where the file was determined to be clean and now."""
570 moment where the file was determined to be clean and now."""
542 if parentfiledata:
571 if parentfiledata:
543 (mode, size, mtime) = parentfiledata
572 (mode, size, mtime) = parentfiledata
544 else:
573 else:
545 s = os.lstat(self._join(f))
574 s = os.lstat(self._join(f))
546 mode = s.st_mode
575 mode = s.st_mode
547 size = s.st_size
576 size = s.st_size
548 mtime = s[stat.ST_MTIME]
577 mtime = s[stat.ST_MTIME]
549 self._addpath(f, mode=mode, size=size, mtime=mtime)
578 self._addpath(f, mode=mode, size=size, mtime=mtime)
550 self._map.copymap.pop(f, None)
579 self._map.copymap.pop(f, None)
551 if f in self._map.nonnormalset:
580 if f in self._map.nonnormalset:
552 self._map.nonnormalset.remove(f)
581 self._map.nonnormalset.remove(f)
553 if mtime > self._lastnormaltime:
582 if mtime > self._lastnormaltime:
554 # Remember the most recent modification timeslot for status(),
583 # Remember the most recent modification timeslot for status(),
555 # to make sure we won't miss future size-preserving file content
584 # to make sure we won't miss future size-preserving file content
556 # modifications that happen within the same timeslot.
585 # modifications that happen within the same timeslot.
557 self._lastnormaltime = mtime
586 self._lastnormaltime = mtime
558
587
559 def normallookup(self, f):
588 def normallookup(self, f):
560 '''Mark a file normal, but possibly dirty.'''
589 '''Mark a file normal, but possibly dirty.'''
561 if self.in_merge:
590 if self.in_merge:
562 # if there is a merge going on and the file was either
591 # if there is a merge going on and the file was either
563 # "merged" or coming from other parent (-2) before
592 # "merged" or coming from other parent (-2) before
564 # being removed, restore that state.
593 # being removed, restore that state.
565 entry = self._map.get(f)
594 entry = self._map.get(f)
566 if entry is not None:
595 if entry is not None:
567 # XXX this should probably be dealt with a a lower level
596 # XXX this should probably be dealt with a a lower level
568 # (see `merged_removed` and `from_p2_removed`)
597 # (see `merged_removed` and `from_p2_removed`)
569 if entry.merged_removed or entry.from_p2_removed:
598 if entry.merged_removed or entry.from_p2_removed:
570 source = self._map.copymap.get(f)
599 source = self._map.copymap.get(f)
571 if entry.merged_removed:
600 if entry.merged_removed:
572 self.merge(f)
601 self.merge(f)
573 elif entry.from_p2_removed:
602 elif entry.from_p2_removed:
574 self.otherparent(f)
603 self.otherparent(f)
575 if source is not None:
604 if source is not None:
576 self.copy(source, f)
605 self.copy(source, f)
577 return
606 return
578 elif entry.merged or entry.from_p2:
607 elif entry.merged or entry.from_p2:
579 return
608 return
580 self._addpath(f, possibly_dirty=True)
609 self._addpath(f, possibly_dirty=True)
581 self._map.copymap.pop(f, None)
610 self._map.copymap.pop(f, None)
582
611
583 def otherparent(self, f):
612 def otherparent(self, f):
584 '''Mark as coming from the other parent, always dirty.'''
613 '''Mark as coming from the other parent, always dirty.'''
585 if not self.in_merge:
614 if not self.in_merge:
586 msg = _(b"setting %r to other parent only allowed in merges") % f
615 msg = _(b"setting %r to other parent only allowed in merges") % f
587 raise error.Abort(msg)
616 raise error.Abort(msg)
588 entry = self._map.get(f)
617 entry = self._map.get(f)
589 if entry is not None and entry.tracked:
618 if entry is not None and entry.tracked:
590 # merge-like
619 # merge-like
591 self._addpath(f, merged=True)
620 self._addpath(f, merged=True)
592 else:
621 else:
593 # add-like
622 # add-like
594 self._addpath(f, from_p2=True)
623 self._addpath(f, from_p2=True)
595 self._map.copymap.pop(f, None)
624 self._map.copymap.pop(f, None)
596
625
597 def add(self, f):
626 def add(self, f):
598 '''Mark a file added.'''
627 '''Mark a file added.'''
599 self._add(f)
628 self._add(f)
600
629
601 def _add(self, filename):
630 def _add(self, filename):
602 """internal function to mark a file as added"""
631 """internal function to mark a file as added"""
603 self._addpath(filename, added=True)
632 self._addpath(filename, added=True)
604 self._map.copymap.pop(filename, None)
633 self._map.copymap.pop(filename, None)
605
634
606 def remove(self, f):
635 def remove(self, f):
607 '''Mark a file removed'''
636 '''Mark a file removed'''
608 self._remove(f)
637 self._remove(f)
609
638
610 def _remove(self, filename):
639 def _remove(self, filename):
611 """internal function to mark a file removed"""
640 """internal function to mark a file removed"""
612 self._dirty = True
641 self._dirty = True
613 self._updatedfiles.add(filename)
642 self._updatedfiles.add(filename)
614 self._map.removefile(filename, in_merge=self.in_merge)
643 self._map.removefile(filename, in_merge=self.in_merge)
615
644
616 def merge(self, f):
645 def merge(self, f):
617 '''Mark a file merged.'''
646 '''Mark a file merged.'''
618 if not self.in_merge:
647 if not self.in_merge:
619 return self.normallookup(f)
648 return self.normallookup(f)
620 return self.otherparent(f)
649 return self.otherparent(f)
621
650
622 def drop(self, f):
651 def drop(self, f):
623 '''Drop a file from the dirstate'''
652 '''Drop a file from the dirstate'''
624 self._drop(f)
653 self._drop(f)
625
654
626 def _drop(self, filename):
655 def _drop(self, filename):
627 """internal function to drop a file from the dirstate"""
656 """internal function to drop a file from the dirstate"""
628 if self._map.dropfile(filename):
657 if self._map.dropfile(filename):
629 self._dirty = True
658 self._dirty = True
630 self._updatedfiles.add(filename)
659 self._updatedfiles.add(filename)
631 self._map.copymap.pop(filename, None)
660 self._map.copymap.pop(filename, None)
632
661
633 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
662 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
634 if exists is None:
663 if exists is None:
635 exists = os.path.lexists(os.path.join(self._root, path))
664 exists = os.path.lexists(os.path.join(self._root, path))
636 if not exists:
665 if not exists:
637 # Maybe a path component exists
666 # Maybe a path component exists
638 if not ignoremissing and b'/' in path:
667 if not ignoremissing and b'/' in path:
639 d, f = path.rsplit(b'/', 1)
668 d, f = path.rsplit(b'/', 1)
640 d = self._normalize(d, False, ignoremissing, None)
669 d = self._normalize(d, False, ignoremissing, None)
641 folded = d + b"/" + f
670 folded = d + b"/" + f
642 else:
671 else:
643 # No path components, preserve original case
672 # No path components, preserve original case
644 folded = path
673 folded = path
645 else:
674 else:
646 # recursively normalize leading directory components
675 # recursively normalize leading directory components
647 # against dirstate
676 # against dirstate
648 if b'/' in normed:
677 if b'/' in normed:
649 d, f = normed.rsplit(b'/', 1)
678 d, f = normed.rsplit(b'/', 1)
650 d = self._normalize(d, False, ignoremissing, True)
679 d = self._normalize(d, False, ignoremissing, True)
651 r = self._root + b"/" + d
680 r = self._root + b"/" + d
652 folded = d + b"/" + util.fspath(f, r)
681 folded = d + b"/" + util.fspath(f, r)
653 else:
682 else:
654 folded = util.fspath(normed, self._root)
683 folded = util.fspath(normed, self._root)
655 storemap[normed] = folded
684 storemap[normed] = folded
656
685
657 return folded
686 return folded
658
687
659 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
688 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
660 normed = util.normcase(path)
689 normed = util.normcase(path)
661 folded = self._map.filefoldmap.get(normed, None)
690 folded = self._map.filefoldmap.get(normed, None)
662 if folded is None:
691 if folded is None:
663 if isknown:
692 if isknown:
664 folded = path
693 folded = path
665 else:
694 else:
666 folded = self._discoverpath(
695 folded = self._discoverpath(
667 path, normed, ignoremissing, exists, self._map.filefoldmap
696 path, normed, ignoremissing, exists, self._map.filefoldmap
668 )
697 )
669 return folded
698 return folded
670
699
671 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
700 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
672 normed = util.normcase(path)
701 normed = util.normcase(path)
673 folded = self._map.filefoldmap.get(normed, None)
702 folded = self._map.filefoldmap.get(normed, None)
674 if folded is None:
703 if folded is None:
675 folded = self._map.dirfoldmap.get(normed, None)
704 folded = self._map.dirfoldmap.get(normed, None)
676 if folded is None:
705 if folded is None:
677 if isknown:
706 if isknown:
678 folded = path
707 folded = path
679 else:
708 else:
680 # store discovered result in dirfoldmap so that future
709 # store discovered result in dirfoldmap so that future
681 # normalizefile calls don't start matching directories
710 # normalizefile calls don't start matching directories
682 folded = self._discoverpath(
711 folded = self._discoverpath(
683 path, normed, ignoremissing, exists, self._map.dirfoldmap
712 path, normed, ignoremissing, exists, self._map.dirfoldmap
684 )
713 )
685 return folded
714 return folded
686
715
687 def normalize(self, path, isknown=False, ignoremissing=False):
716 def normalize(self, path, isknown=False, ignoremissing=False):
688 """
717 """
689 normalize the case of a pathname when on a casefolding filesystem
718 normalize the case of a pathname when on a casefolding filesystem
690
719
691 isknown specifies whether the filename came from walking the
720 isknown specifies whether the filename came from walking the
692 disk, to avoid extra filesystem access.
721 disk, to avoid extra filesystem access.
693
722
694 If ignoremissing is True, missing path are returned
723 If ignoremissing is True, missing path are returned
695 unchanged. Otherwise, we try harder to normalize possibly
724 unchanged. Otherwise, we try harder to normalize possibly
696 existing path components.
725 existing path components.
697
726
698 The normalized case is determined based on the following precedence:
727 The normalized case is determined based on the following precedence:
699
728
700 - version of name already stored in the dirstate
729 - version of name already stored in the dirstate
701 - version of name stored on disk
730 - version of name stored on disk
702 - version provided via command arguments
731 - version provided via command arguments
703 """
732 """
704
733
705 if self._checkcase:
734 if self._checkcase:
706 return self._normalize(path, isknown, ignoremissing)
735 return self._normalize(path, isknown, ignoremissing)
707 return path
736 return path
708
737
709 def clear(self):
738 def clear(self):
710 self._map.clear()
739 self._map.clear()
711 self._lastnormaltime = 0
740 self._lastnormaltime = 0
712 self._updatedfiles.clear()
741 self._updatedfiles.clear()
713 self._dirty = True
742 self._dirty = True
714
743
715 def rebuild(self, parent, allfiles, changedfiles=None):
744 def rebuild(self, parent, allfiles, changedfiles=None):
716 if changedfiles is None:
745 if changedfiles is None:
717 # Rebuild entire dirstate
746 # Rebuild entire dirstate
718 to_lookup = allfiles
747 to_lookup = allfiles
719 to_drop = []
748 to_drop = []
720 lastnormaltime = self._lastnormaltime
749 lastnormaltime = self._lastnormaltime
721 self.clear()
750 self.clear()
722 self._lastnormaltime = lastnormaltime
751 self._lastnormaltime = lastnormaltime
723 elif len(changedfiles) < 10:
752 elif len(changedfiles) < 10:
724 # Avoid turning allfiles into a set, which can be expensive if it's
753 # Avoid turning allfiles into a set, which can be expensive if it's
725 # large.
754 # large.
726 to_lookup = []
755 to_lookup = []
727 to_drop = []
756 to_drop = []
728 for f in changedfiles:
757 for f in changedfiles:
729 if f in allfiles:
758 if f in allfiles:
730 to_lookup.append(f)
759 to_lookup.append(f)
731 else:
760 else:
732 to_drop.append(f)
761 to_drop.append(f)
733 else:
762 else:
734 changedfilesset = set(changedfiles)
763 changedfilesset = set(changedfiles)
735 to_lookup = changedfilesset & set(allfiles)
764 to_lookup = changedfilesset & set(allfiles)
736 to_drop = changedfilesset - to_lookup
765 to_drop = changedfilesset - to_lookup
737
766
738 if self._origpl is None:
767 if self._origpl is None:
739 self._origpl = self._pl
768 self._origpl = self._pl
740 self._map.setparents(parent, self._nodeconstants.nullid)
769 self._map.setparents(parent, self._nodeconstants.nullid)
741
770
742 for f in to_lookup:
771 for f in to_lookup:
743 self.normallookup(f)
772 self.normallookup(f)
744 for f in to_drop:
773 for f in to_drop:
745 self._drop(f)
774 self._drop(f)
746
775
747 self._dirty = True
776 self._dirty = True
748
777
749 def identity(self):
778 def identity(self):
750 """Return identity of dirstate itself to detect changing in storage
779 """Return identity of dirstate itself to detect changing in storage
751
780
752 If identity of previous dirstate is equal to this, writing
781 If identity of previous dirstate is equal to this, writing
753 changes based on the former dirstate out can keep consistency.
782 changes based on the former dirstate out can keep consistency.
754 """
783 """
755 return self._map.identity
784 return self._map.identity
756
785
757 def write(self, tr):
786 def write(self, tr):
758 if not self._dirty:
787 if not self._dirty:
759 return
788 return
760
789
761 filename = self._filename
790 filename = self._filename
762 if tr:
791 if tr:
763 # 'dirstate.write()' is not only for writing in-memory
792 # 'dirstate.write()' is not only for writing in-memory
764 # changes out, but also for dropping ambiguous timestamp.
793 # changes out, but also for dropping ambiguous timestamp.
765 # delayed writing re-raise "ambiguous timestamp issue".
794 # delayed writing re-raise "ambiguous timestamp issue".
766 # See also the wiki page below for detail:
795 # See also the wiki page below for detail:
767 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
796 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
768
797
769 # emulate dropping timestamp in 'parsers.pack_dirstate'
798 # emulate dropping timestamp in 'parsers.pack_dirstate'
770 now = _getfsnow(self._opener)
799 now = _getfsnow(self._opener)
771 self._map.clearambiguoustimes(self._updatedfiles, now)
800 self._map.clearambiguoustimes(self._updatedfiles, now)
772
801
773 # emulate that all 'dirstate.normal' results are written out
802 # emulate that all 'dirstate.normal' results are written out
774 self._lastnormaltime = 0
803 self._lastnormaltime = 0
775 self._updatedfiles.clear()
804 self._updatedfiles.clear()
776
805
777 # delay writing in-memory changes out
806 # delay writing in-memory changes out
778 tr.addfilegenerator(
807 tr.addfilegenerator(
779 b'dirstate',
808 b'dirstate',
780 (self._filename,),
809 (self._filename,),
781 self._writedirstate,
810 self._writedirstate,
782 location=b'plain',
811 location=b'plain',
783 )
812 )
784 return
813 return
785
814
786 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
815 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
787 self._writedirstate(st)
816 self._writedirstate(st)
788
817
789 def addparentchangecallback(self, category, callback):
818 def addparentchangecallback(self, category, callback):
790 """add a callback to be called when the wd parents are changed
819 """add a callback to be called when the wd parents are changed
791
820
792 Callback will be called with the following arguments:
821 Callback will be called with the following arguments:
793 dirstate, (oldp1, oldp2), (newp1, newp2)
822 dirstate, (oldp1, oldp2), (newp1, newp2)
794
823
795 Category is a unique identifier to allow overwriting an old callback
824 Category is a unique identifier to allow overwriting an old callback
796 with a newer callback.
825 with a newer callback.
797 """
826 """
798 self._plchangecallbacks[category] = callback
827 self._plchangecallbacks[category] = callback
799
828
800 def _writedirstate(self, st):
829 def _writedirstate(self, st):
801 # notify callbacks about parents change
830 # notify callbacks about parents change
802 if self._origpl is not None and self._origpl != self._pl:
831 if self._origpl is not None and self._origpl != self._pl:
803 for c, callback in sorted(
832 for c, callback in sorted(
804 pycompat.iteritems(self._plchangecallbacks)
833 pycompat.iteritems(self._plchangecallbacks)
805 ):
834 ):
806 callback(self, self._origpl, self._pl)
835 callback(self, self._origpl, self._pl)
807 self._origpl = None
836 self._origpl = None
808 # use the modification time of the newly created temporary file as the
837 # use the modification time of the newly created temporary file as the
809 # filesystem's notion of 'now'
838 # filesystem's notion of 'now'
810 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
839 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
811
840
812 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
841 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
813 # timestamp of each entries in dirstate, because of 'now > mtime'
842 # timestamp of each entries in dirstate, because of 'now > mtime'
814 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
843 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
815 if delaywrite > 0:
844 if delaywrite > 0:
816 # do we have any files to delay for?
845 # do we have any files to delay for?
817 for f, e in pycompat.iteritems(self._map):
846 for f, e in pycompat.iteritems(self._map):
818 if e.need_delay(now):
847 if e.need_delay(now):
819 import time # to avoid useless import
848 import time # to avoid useless import
820
849
821 # rather than sleep n seconds, sleep until the next
850 # rather than sleep n seconds, sleep until the next
822 # multiple of n seconds
851 # multiple of n seconds
823 clock = time.time()
852 clock = time.time()
824 start = int(clock) - (int(clock) % delaywrite)
853 start = int(clock) - (int(clock) % delaywrite)
825 end = start + delaywrite
854 end = start + delaywrite
826 time.sleep(end - clock)
855 time.sleep(end - clock)
827 now = end # trust our estimate that the end is near now
856 now = end # trust our estimate that the end is near now
828 break
857 break
829
858
830 self._map.write(st, now)
859 self._map.write(st, now)
831 self._lastnormaltime = 0
860 self._lastnormaltime = 0
832 self._dirty = False
861 self._dirty = False
833
862
834 def _dirignore(self, f):
863 def _dirignore(self, f):
835 if self._ignore(f):
864 if self._ignore(f):
836 return True
865 return True
837 for p in pathutil.finddirs(f):
866 for p in pathutil.finddirs(f):
838 if self._ignore(p):
867 if self._ignore(p):
839 return True
868 return True
840 return False
869 return False
841
870
842 def _ignorefiles(self):
871 def _ignorefiles(self):
843 files = []
872 files = []
844 if os.path.exists(self._join(b'.hgignore')):
873 if os.path.exists(self._join(b'.hgignore')):
845 files.append(self._join(b'.hgignore'))
874 files.append(self._join(b'.hgignore'))
846 for name, path in self._ui.configitems(b"ui"):
875 for name, path in self._ui.configitems(b"ui"):
847 if name == b'ignore' or name.startswith(b'ignore.'):
876 if name == b'ignore' or name.startswith(b'ignore.'):
848 # we need to use os.path.join here rather than self._join
877 # we need to use os.path.join here rather than self._join
849 # because path is arbitrary and user-specified
878 # because path is arbitrary and user-specified
850 files.append(os.path.join(self._rootdir, util.expandpath(path)))
879 files.append(os.path.join(self._rootdir, util.expandpath(path)))
851 return files
880 return files
852
881
853 def _ignorefileandline(self, f):
882 def _ignorefileandline(self, f):
854 files = collections.deque(self._ignorefiles())
883 files = collections.deque(self._ignorefiles())
855 visited = set()
884 visited = set()
856 while files:
885 while files:
857 i = files.popleft()
886 i = files.popleft()
858 patterns = matchmod.readpatternfile(
887 patterns = matchmod.readpatternfile(
859 i, self._ui.warn, sourceinfo=True
888 i, self._ui.warn, sourceinfo=True
860 )
889 )
861 for pattern, lineno, line in patterns:
890 for pattern, lineno, line in patterns:
862 kind, p = matchmod._patsplit(pattern, b'glob')
891 kind, p = matchmod._patsplit(pattern, b'glob')
863 if kind == b"subinclude":
892 if kind == b"subinclude":
864 if p not in visited:
893 if p not in visited:
865 files.append(p)
894 files.append(p)
866 continue
895 continue
867 m = matchmod.match(
896 m = matchmod.match(
868 self._root, b'', [], [pattern], warn=self._ui.warn
897 self._root, b'', [], [pattern], warn=self._ui.warn
869 )
898 )
870 if m(f):
899 if m(f):
871 return (i, lineno, line)
900 return (i, lineno, line)
872 visited.add(i)
901 visited.add(i)
873 return (None, -1, b"")
902 return (None, -1, b"")
874
903
875 def _walkexplicit(self, match, subrepos):
904 def _walkexplicit(self, match, subrepos):
876 """Get stat data about the files explicitly specified by match.
905 """Get stat data about the files explicitly specified by match.
877
906
878 Return a triple (results, dirsfound, dirsnotfound).
907 Return a triple (results, dirsfound, dirsnotfound).
879 - results is a mapping from filename to stat result. It also contains
908 - results is a mapping from filename to stat result. It also contains
880 listings mapping subrepos and .hg to None.
909 listings mapping subrepos and .hg to None.
881 - dirsfound is a list of files found to be directories.
910 - dirsfound is a list of files found to be directories.
882 - dirsnotfound is a list of files that the dirstate thinks are
911 - dirsnotfound is a list of files that the dirstate thinks are
883 directories and that were not found."""
912 directories and that were not found."""
884
913
885 def badtype(mode):
914 def badtype(mode):
886 kind = _(b'unknown')
915 kind = _(b'unknown')
887 if stat.S_ISCHR(mode):
916 if stat.S_ISCHR(mode):
888 kind = _(b'character device')
917 kind = _(b'character device')
889 elif stat.S_ISBLK(mode):
918 elif stat.S_ISBLK(mode):
890 kind = _(b'block device')
919 kind = _(b'block device')
891 elif stat.S_ISFIFO(mode):
920 elif stat.S_ISFIFO(mode):
892 kind = _(b'fifo')
921 kind = _(b'fifo')
893 elif stat.S_ISSOCK(mode):
922 elif stat.S_ISSOCK(mode):
894 kind = _(b'socket')
923 kind = _(b'socket')
895 elif stat.S_ISDIR(mode):
924 elif stat.S_ISDIR(mode):
896 kind = _(b'directory')
925 kind = _(b'directory')
897 return _(b'unsupported file type (type is %s)') % kind
926 return _(b'unsupported file type (type is %s)') % kind
898
927
899 badfn = match.bad
928 badfn = match.bad
900 dmap = self._map
929 dmap = self._map
901 lstat = os.lstat
930 lstat = os.lstat
902 getkind = stat.S_IFMT
931 getkind = stat.S_IFMT
903 dirkind = stat.S_IFDIR
932 dirkind = stat.S_IFDIR
904 regkind = stat.S_IFREG
933 regkind = stat.S_IFREG
905 lnkkind = stat.S_IFLNK
934 lnkkind = stat.S_IFLNK
906 join = self._join
935 join = self._join
907 dirsfound = []
936 dirsfound = []
908 foundadd = dirsfound.append
937 foundadd = dirsfound.append
909 dirsnotfound = []
938 dirsnotfound = []
910 notfoundadd = dirsnotfound.append
939 notfoundadd = dirsnotfound.append
911
940
912 if not match.isexact() and self._checkcase:
941 if not match.isexact() and self._checkcase:
913 normalize = self._normalize
942 normalize = self._normalize
914 else:
943 else:
915 normalize = None
944 normalize = None
916
945
917 files = sorted(match.files())
946 files = sorted(match.files())
918 subrepos.sort()
947 subrepos.sort()
919 i, j = 0, 0
948 i, j = 0, 0
920 while i < len(files) and j < len(subrepos):
949 while i < len(files) and j < len(subrepos):
921 subpath = subrepos[j] + b"/"
950 subpath = subrepos[j] + b"/"
922 if files[i] < subpath:
951 if files[i] < subpath:
923 i += 1
952 i += 1
924 continue
953 continue
925 while i < len(files) and files[i].startswith(subpath):
954 while i < len(files) and files[i].startswith(subpath):
926 del files[i]
955 del files[i]
927 j += 1
956 j += 1
928
957
929 if not files or b'' in files:
958 if not files or b'' in files:
930 files = [b'']
959 files = [b'']
931 # constructing the foldmap is expensive, so don't do it for the
960 # constructing the foldmap is expensive, so don't do it for the
932 # common case where files is ['']
961 # common case where files is ['']
933 normalize = None
962 normalize = None
934 results = dict.fromkeys(subrepos)
963 results = dict.fromkeys(subrepos)
935 results[b'.hg'] = None
964 results[b'.hg'] = None
936
965
937 for ff in files:
966 for ff in files:
938 if normalize:
967 if normalize:
939 nf = normalize(ff, False, True)
968 nf = normalize(ff, False, True)
940 else:
969 else:
941 nf = ff
970 nf = ff
942 if nf in results:
971 if nf in results:
943 continue
972 continue
944
973
945 try:
974 try:
946 st = lstat(join(nf))
975 st = lstat(join(nf))
947 kind = getkind(st.st_mode)
976 kind = getkind(st.st_mode)
948 if kind == dirkind:
977 if kind == dirkind:
949 if nf in dmap:
978 if nf in dmap:
950 # file replaced by dir on disk but still in dirstate
979 # file replaced by dir on disk but still in dirstate
951 results[nf] = None
980 results[nf] = None
952 foundadd((nf, ff))
981 foundadd((nf, ff))
953 elif kind == regkind or kind == lnkkind:
982 elif kind == regkind or kind == lnkkind:
954 results[nf] = st
983 results[nf] = st
955 else:
984 else:
956 badfn(ff, badtype(kind))
985 badfn(ff, badtype(kind))
957 if nf in dmap:
986 if nf in dmap:
958 results[nf] = None
987 results[nf] = None
959 except OSError as inst: # nf not found on disk - it is dirstate only
988 except OSError as inst: # nf not found on disk - it is dirstate only
960 if nf in dmap: # does it exactly match a missing file?
989 if nf in dmap: # does it exactly match a missing file?
961 results[nf] = None
990 results[nf] = None
962 else: # does it match a missing directory?
991 else: # does it match a missing directory?
963 if self._map.hasdir(nf):
992 if self._map.hasdir(nf):
964 notfoundadd(nf)
993 notfoundadd(nf)
965 else:
994 else:
966 badfn(ff, encoding.strtolocal(inst.strerror))
995 badfn(ff, encoding.strtolocal(inst.strerror))
967
996
968 # match.files() may contain explicitly-specified paths that shouldn't
997 # match.files() may contain explicitly-specified paths that shouldn't
969 # be taken; drop them from the list of files found. dirsfound/notfound
998 # be taken; drop them from the list of files found. dirsfound/notfound
970 # aren't filtered here because they will be tested later.
999 # aren't filtered here because they will be tested later.
971 if match.anypats():
1000 if match.anypats():
972 for f in list(results):
1001 for f in list(results):
973 if f == b'.hg' or f in subrepos:
1002 if f == b'.hg' or f in subrepos:
974 # keep sentinel to disable further out-of-repo walks
1003 # keep sentinel to disable further out-of-repo walks
975 continue
1004 continue
976 if not match(f):
1005 if not match(f):
977 del results[f]
1006 del results[f]
978
1007
979 # Case insensitive filesystems cannot rely on lstat() failing to detect
1008 # Case insensitive filesystems cannot rely on lstat() failing to detect
980 # a case-only rename. Prune the stat object for any file that does not
1009 # a case-only rename. Prune the stat object for any file that does not
981 # match the case in the filesystem, if there are multiple files that
1010 # match the case in the filesystem, if there are multiple files that
982 # normalize to the same path.
1011 # normalize to the same path.
983 if match.isexact() and self._checkcase:
1012 if match.isexact() and self._checkcase:
984 normed = {}
1013 normed = {}
985
1014
986 for f, st in pycompat.iteritems(results):
1015 for f, st in pycompat.iteritems(results):
987 if st is None:
1016 if st is None:
988 continue
1017 continue
989
1018
990 nc = util.normcase(f)
1019 nc = util.normcase(f)
991 paths = normed.get(nc)
1020 paths = normed.get(nc)
992
1021
993 if paths is None:
1022 if paths is None:
994 paths = set()
1023 paths = set()
995 normed[nc] = paths
1024 normed[nc] = paths
996
1025
997 paths.add(f)
1026 paths.add(f)
998
1027
999 for norm, paths in pycompat.iteritems(normed):
1028 for norm, paths in pycompat.iteritems(normed):
1000 if len(paths) > 1:
1029 if len(paths) > 1:
1001 for path in paths:
1030 for path in paths:
1002 folded = self._discoverpath(
1031 folded = self._discoverpath(
1003 path, norm, True, None, self._map.dirfoldmap
1032 path, norm, True, None, self._map.dirfoldmap
1004 )
1033 )
1005 if path != folded:
1034 if path != folded:
1006 results[path] = None
1035 results[path] = None
1007
1036
1008 return results, dirsfound, dirsnotfound
1037 return results, dirsfound, dirsnotfound
1009
1038
1010 def walk(self, match, subrepos, unknown, ignored, full=True):
1039 def walk(self, match, subrepos, unknown, ignored, full=True):
1011 """
1040 """
1012 Walk recursively through the directory tree, finding all files
1041 Walk recursively through the directory tree, finding all files
1013 matched by match.
1042 matched by match.
1014
1043
1015 If full is False, maybe skip some known-clean files.
1044 If full is False, maybe skip some known-clean files.
1016
1045
1017 Return a dict mapping filename to stat-like object (either
1046 Return a dict mapping filename to stat-like object (either
1018 mercurial.osutil.stat instance or return value of os.stat()).
1047 mercurial.osutil.stat instance or return value of os.stat()).
1019
1048
1020 """
1049 """
1021 # full is a flag that extensions that hook into walk can use -- this
1050 # full is a flag that extensions that hook into walk can use -- this
1022 # implementation doesn't use it at all. This satisfies the contract
1051 # implementation doesn't use it at all. This satisfies the contract
1023 # because we only guarantee a "maybe".
1052 # because we only guarantee a "maybe".
1024
1053
1025 if ignored:
1054 if ignored:
1026 ignore = util.never
1055 ignore = util.never
1027 dirignore = util.never
1056 dirignore = util.never
1028 elif unknown:
1057 elif unknown:
1029 ignore = self._ignore
1058 ignore = self._ignore
1030 dirignore = self._dirignore
1059 dirignore = self._dirignore
1031 else:
1060 else:
1032 # if not unknown and not ignored, drop dir recursion and step 2
1061 # if not unknown and not ignored, drop dir recursion and step 2
1033 ignore = util.always
1062 ignore = util.always
1034 dirignore = util.always
1063 dirignore = util.always
1035
1064
1036 matchfn = match.matchfn
1065 matchfn = match.matchfn
1037 matchalways = match.always()
1066 matchalways = match.always()
1038 matchtdir = match.traversedir
1067 matchtdir = match.traversedir
1039 dmap = self._map
1068 dmap = self._map
1040 listdir = util.listdir
1069 listdir = util.listdir
1041 lstat = os.lstat
1070 lstat = os.lstat
1042 dirkind = stat.S_IFDIR
1071 dirkind = stat.S_IFDIR
1043 regkind = stat.S_IFREG
1072 regkind = stat.S_IFREG
1044 lnkkind = stat.S_IFLNK
1073 lnkkind = stat.S_IFLNK
1045 join = self._join
1074 join = self._join
1046
1075
1047 exact = skipstep3 = False
1076 exact = skipstep3 = False
1048 if match.isexact(): # match.exact
1077 if match.isexact(): # match.exact
1049 exact = True
1078 exact = True
1050 dirignore = util.always # skip step 2
1079 dirignore = util.always # skip step 2
1051 elif match.prefix(): # match.match, no patterns
1080 elif match.prefix(): # match.match, no patterns
1052 skipstep3 = True
1081 skipstep3 = True
1053
1082
1054 if not exact and self._checkcase:
1083 if not exact and self._checkcase:
1055 normalize = self._normalize
1084 normalize = self._normalize
1056 normalizefile = self._normalizefile
1085 normalizefile = self._normalizefile
1057 skipstep3 = False
1086 skipstep3 = False
1058 else:
1087 else:
1059 normalize = self._normalize
1088 normalize = self._normalize
1060 normalizefile = None
1089 normalizefile = None
1061
1090
1062 # step 1: find all explicit files
1091 # step 1: find all explicit files
1063 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1092 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1064 if matchtdir:
1093 if matchtdir:
1065 for d in work:
1094 for d in work:
1066 matchtdir(d[0])
1095 matchtdir(d[0])
1067 for d in dirsnotfound:
1096 for d in dirsnotfound:
1068 matchtdir(d)
1097 matchtdir(d)
1069
1098
1070 skipstep3 = skipstep3 and not (work or dirsnotfound)
1099 skipstep3 = skipstep3 and not (work or dirsnotfound)
1071 work = [d for d in work if not dirignore(d[0])]
1100 work = [d for d in work if not dirignore(d[0])]
1072
1101
1073 # step 2: visit subdirectories
1102 # step 2: visit subdirectories
1074 def traverse(work, alreadynormed):
1103 def traverse(work, alreadynormed):
1075 wadd = work.append
1104 wadd = work.append
1076 while work:
1105 while work:
1077 tracing.counter('dirstate.walk work', len(work))
1106 tracing.counter('dirstate.walk work', len(work))
1078 nd = work.pop()
1107 nd = work.pop()
1079 visitentries = match.visitchildrenset(nd)
1108 visitentries = match.visitchildrenset(nd)
1080 if not visitentries:
1109 if not visitentries:
1081 continue
1110 continue
1082 if visitentries == b'this' or visitentries == b'all':
1111 if visitentries == b'this' or visitentries == b'all':
1083 visitentries = None
1112 visitentries = None
1084 skip = None
1113 skip = None
1085 if nd != b'':
1114 if nd != b'':
1086 skip = b'.hg'
1115 skip = b'.hg'
1087 try:
1116 try:
1088 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1117 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1089 entries = listdir(join(nd), stat=True, skip=skip)
1118 entries = listdir(join(nd), stat=True, skip=skip)
1090 except OSError as inst:
1119 except OSError as inst:
1091 if inst.errno in (errno.EACCES, errno.ENOENT):
1120 if inst.errno in (errno.EACCES, errno.ENOENT):
1092 match.bad(
1121 match.bad(
1093 self.pathto(nd), encoding.strtolocal(inst.strerror)
1122 self.pathto(nd), encoding.strtolocal(inst.strerror)
1094 )
1123 )
1095 continue
1124 continue
1096 raise
1125 raise
1097 for f, kind, st in entries:
1126 for f, kind, st in entries:
1098 # Some matchers may return files in the visitentries set,
1127 # Some matchers may return files in the visitentries set,
1099 # instead of 'this', if the matcher explicitly mentions them
1128 # instead of 'this', if the matcher explicitly mentions them
1100 # and is not an exactmatcher. This is acceptable; we do not
1129 # and is not an exactmatcher. This is acceptable; we do not
1101 # make any hard assumptions about file-or-directory below
1130 # make any hard assumptions about file-or-directory below
1102 # based on the presence of `f` in visitentries. If
1131 # based on the presence of `f` in visitentries. If
1103 # visitchildrenset returned a set, we can always skip the
1132 # visitchildrenset returned a set, we can always skip the
1104 # entries *not* in the set it provided regardless of whether
1133 # entries *not* in the set it provided regardless of whether
1105 # they're actually a file or a directory.
1134 # they're actually a file or a directory.
1106 if visitentries and f not in visitentries:
1135 if visitentries and f not in visitentries:
1107 continue
1136 continue
1108 if normalizefile:
1137 if normalizefile:
1109 # even though f might be a directory, we're only
1138 # even though f might be a directory, we're only
1110 # interested in comparing it to files currently in the
1139 # interested in comparing it to files currently in the
1111 # dmap -- therefore normalizefile is enough
1140 # dmap -- therefore normalizefile is enough
1112 nf = normalizefile(
1141 nf = normalizefile(
1113 nd and (nd + b"/" + f) or f, True, True
1142 nd and (nd + b"/" + f) or f, True, True
1114 )
1143 )
1115 else:
1144 else:
1116 nf = nd and (nd + b"/" + f) or f
1145 nf = nd and (nd + b"/" + f) or f
1117 if nf not in results:
1146 if nf not in results:
1118 if kind == dirkind:
1147 if kind == dirkind:
1119 if not ignore(nf):
1148 if not ignore(nf):
1120 if matchtdir:
1149 if matchtdir:
1121 matchtdir(nf)
1150 matchtdir(nf)
1122 wadd(nf)
1151 wadd(nf)
1123 if nf in dmap and (matchalways or matchfn(nf)):
1152 if nf in dmap and (matchalways or matchfn(nf)):
1124 results[nf] = None
1153 results[nf] = None
1125 elif kind == regkind or kind == lnkkind:
1154 elif kind == regkind or kind == lnkkind:
1126 if nf in dmap:
1155 if nf in dmap:
1127 if matchalways or matchfn(nf):
1156 if matchalways or matchfn(nf):
1128 results[nf] = st
1157 results[nf] = st
1129 elif (matchalways or matchfn(nf)) and not ignore(
1158 elif (matchalways or matchfn(nf)) and not ignore(
1130 nf
1159 nf
1131 ):
1160 ):
1132 # unknown file -- normalize if necessary
1161 # unknown file -- normalize if necessary
1133 if not alreadynormed:
1162 if not alreadynormed:
1134 nf = normalize(nf, False, True)
1163 nf = normalize(nf, False, True)
1135 results[nf] = st
1164 results[nf] = st
1136 elif nf in dmap and (matchalways or matchfn(nf)):
1165 elif nf in dmap and (matchalways or matchfn(nf)):
1137 results[nf] = None
1166 results[nf] = None
1138
1167
1139 for nd, d in work:
1168 for nd, d in work:
1140 # alreadynormed means that processwork doesn't have to do any
1169 # alreadynormed means that processwork doesn't have to do any
1141 # expensive directory normalization
1170 # expensive directory normalization
1142 alreadynormed = not normalize or nd == d
1171 alreadynormed = not normalize or nd == d
1143 traverse([d], alreadynormed)
1172 traverse([d], alreadynormed)
1144
1173
1145 for s in subrepos:
1174 for s in subrepos:
1146 del results[s]
1175 del results[s]
1147 del results[b'.hg']
1176 del results[b'.hg']
1148
1177
1149 # step 3: visit remaining files from dmap
1178 # step 3: visit remaining files from dmap
1150 if not skipstep3 and not exact:
1179 if not skipstep3 and not exact:
1151 # If a dmap file is not in results yet, it was either
1180 # If a dmap file is not in results yet, it was either
1152 # a) not matching matchfn b) ignored, c) missing, or d) under a
1181 # a) not matching matchfn b) ignored, c) missing, or d) under a
1153 # symlink directory.
1182 # symlink directory.
1154 if not results and matchalways:
1183 if not results and matchalways:
1155 visit = [f for f in dmap]
1184 visit = [f for f in dmap]
1156 else:
1185 else:
1157 visit = [f for f in dmap if f not in results and matchfn(f)]
1186 visit = [f for f in dmap if f not in results and matchfn(f)]
1158 visit.sort()
1187 visit.sort()
1159
1188
1160 if unknown:
1189 if unknown:
1161 # unknown == True means we walked all dirs under the roots
1190 # unknown == True means we walked all dirs under the roots
1162 # that wasn't ignored, and everything that matched was stat'ed
1191 # that wasn't ignored, and everything that matched was stat'ed
1163 # and is already in results.
1192 # and is already in results.
1164 # The rest must thus be ignored or under a symlink.
1193 # The rest must thus be ignored or under a symlink.
1165 audit_path = pathutil.pathauditor(self._root, cached=True)
1194 audit_path = pathutil.pathauditor(self._root, cached=True)
1166
1195
1167 for nf in iter(visit):
1196 for nf in iter(visit):
1168 # If a stat for the same file was already added with a
1197 # If a stat for the same file was already added with a
1169 # different case, don't add one for this, since that would
1198 # different case, don't add one for this, since that would
1170 # make it appear as if the file exists under both names
1199 # make it appear as if the file exists under both names
1171 # on disk.
1200 # on disk.
1172 if (
1201 if (
1173 normalizefile
1202 normalizefile
1174 and normalizefile(nf, True, True) in results
1203 and normalizefile(nf, True, True) in results
1175 ):
1204 ):
1176 results[nf] = None
1205 results[nf] = None
1177 # Report ignored items in the dmap as long as they are not
1206 # Report ignored items in the dmap as long as they are not
1178 # under a symlink directory.
1207 # under a symlink directory.
1179 elif audit_path.check(nf):
1208 elif audit_path.check(nf):
1180 try:
1209 try:
1181 results[nf] = lstat(join(nf))
1210 results[nf] = lstat(join(nf))
1182 # file was just ignored, no links, and exists
1211 # file was just ignored, no links, and exists
1183 except OSError:
1212 except OSError:
1184 # file doesn't exist
1213 # file doesn't exist
1185 results[nf] = None
1214 results[nf] = None
1186 else:
1215 else:
1187 # It's either missing or under a symlink directory
1216 # It's either missing or under a symlink directory
1188 # which we in this case report as missing
1217 # which we in this case report as missing
1189 results[nf] = None
1218 results[nf] = None
1190 else:
1219 else:
1191 # We may not have walked the full directory tree above,
1220 # We may not have walked the full directory tree above,
1192 # so stat and check everything we missed.
1221 # so stat and check everything we missed.
1193 iv = iter(visit)
1222 iv = iter(visit)
1194 for st in util.statfiles([join(i) for i in visit]):
1223 for st in util.statfiles([join(i) for i in visit]):
1195 results[next(iv)] = st
1224 results[next(iv)] = st
1196 return results
1225 return results
1197
1226
1198 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1227 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1199 # Force Rayon (Rust parallelism library) to respect the number of
1228 # Force Rayon (Rust parallelism library) to respect the number of
1200 # workers. This is a temporary workaround until Rust code knows
1229 # workers. This is a temporary workaround until Rust code knows
1201 # how to read the config file.
1230 # how to read the config file.
1202 numcpus = self._ui.configint(b"worker", b"numcpus")
1231 numcpus = self._ui.configint(b"worker", b"numcpus")
1203 if numcpus is not None:
1232 if numcpus is not None:
1204 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1233 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1205
1234
1206 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1235 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1207 if not workers_enabled:
1236 if not workers_enabled:
1208 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1237 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1209
1238
1210 (
1239 (
1211 lookup,
1240 lookup,
1212 modified,
1241 modified,
1213 added,
1242 added,
1214 removed,
1243 removed,
1215 deleted,
1244 deleted,
1216 clean,
1245 clean,
1217 ignored,
1246 ignored,
1218 unknown,
1247 unknown,
1219 warnings,
1248 warnings,
1220 bad,
1249 bad,
1221 traversed,
1250 traversed,
1222 dirty,
1251 dirty,
1223 ) = rustmod.status(
1252 ) = rustmod.status(
1224 self._map._rustmap,
1253 self._map._rustmap,
1225 matcher,
1254 matcher,
1226 self._rootdir,
1255 self._rootdir,
1227 self._ignorefiles(),
1256 self._ignorefiles(),
1228 self._checkexec,
1257 self._checkexec,
1229 self._lastnormaltime,
1258 self._lastnormaltime,
1230 bool(list_clean),
1259 bool(list_clean),
1231 bool(list_ignored),
1260 bool(list_ignored),
1232 bool(list_unknown),
1261 bool(list_unknown),
1233 bool(matcher.traversedir),
1262 bool(matcher.traversedir),
1234 )
1263 )
1235
1264
1236 self._dirty |= dirty
1265 self._dirty |= dirty
1237
1266
1238 if matcher.traversedir:
1267 if matcher.traversedir:
1239 for dir in traversed:
1268 for dir in traversed:
1240 matcher.traversedir(dir)
1269 matcher.traversedir(dir)
1241
1270
1242 if self._ui.warn:
1271 if self._ui.warn:
1243 for item in warnings:
1272 for item in warnings:
1244 if isinstance(item, tuple):
1273 if isinstance(item, tuple):
1245 file_path, syntax = item
1274 file_path, syntax = item
1246 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1275 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1247 file_path,
1276 file_path,
1248 syntax,
1277 syntax,
1249 )
1278 )
1250 self._ui.warn(msg)
1279 self._ui.warn(msg)
1251 else:
1280 else:
1252 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1281 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1253 self._ui.warn(
1282 self._ui.warn(
1254 msg
1283 msg
1255 % (
1284 % (
1256 pathutil.canonpath(
1285 pathutil.canonpath(
1257 self._rootdir, self._rootdir, item
1286 self._rootdir, self._rootdir, item
1258 ),
1287 ),
1259 b"No such file or directory",
1288 b"No such file or directory",
1260 )
1289 )
1261 )
1290 )
1262
1291
1263 for (fn, message) in bad:
1292 for (fn, message) in bad:
1264 matcher.bad(fn, encoding.strtolocal(message))
1293 matcher.bad(fn, encoding.strtolocal(message))
1265
1294
1266 status = scmutil.status(
1295 status = scmutil.status(
1267 modified=modified,
1296 modified=modified,
1268 added=added,
1297 added=added,
1269 removed=removed,
1298 removed=removed,
1270 deleted=deleted,
1299 deleted=deleted,
1271 unknown=unknown,
1300 unknown=unknown,
1272 ignored=ignored,
1301 ignored=ignored,
1273 clean=clean,
1302 clean=clean,
1274 )
1303 )
1275 return (lookup, status)
1304 return (lookup, status)
1276
1305
1277 def status(self, match, subrepos, ignored, clean, unknown):
1306 def status(self, match, subrepos, ignored, clean, unknown):
1278 """Determine the status of the working copy relative to the
1307 """Determine the status of the working copy relative to the
1279 dirstate and return a pair of (unsure, status), where status is of type
1308 dirstate and return a pair of (unsure, status), where status is of type
1280 scmutil.status and:
1309 scmutil.status and:
1281
1310
1282 unsure:
1311 unsure:
1283 files that might have been modified since the dirstate was
1312 files that might have been modified since the dirstate was
1284 written, but need to be read to be sure (size is the same
1313 written, but need to be read to be sure (size is the same
1285 but mtime differs)
1314 but mtime differs)
1286 status.modified:
1315 status.modified:
1287 files that have definitely been modified since the dirstate
1316 files that have definitely been modified since the dirstate
1288 was written (different size or mode)
1317 was written (different size or mode)
1289 status.clean:
1318 status.clean:
1290 files that have definitely not been modified since the
1319 files that have definitely not been modified since the
1291 dirstate was written
1320 dirstate was written
1292 """
1321 """
1293 listignored, listclean, listunknown = ignored, clean, unknown
1322 listignored, listclean, listunknown = ignored, clean, unknown
1294 lookup, modified, added, unknown, ignored = [], [], [], [], []
1323 lookup, modified, added, unknown, ignored = [], [], [], [], []
1295 removed, deleted, clean = [], [], []
1324 removed, deleted, clean = [], [], []
1296
1325
1297 dmap = self._map
1326 dmap = self._map
1298 dmap.preload()
1327 dmap.preload()
1299
1328
1300 use_rust = True
1329 use_rust = True
1301
1330
1302 allowed_matchers = (
1331 allowed_matchers = (
1303 matchmod.alwaysmatcher,
1332 matchmod.alwaysmatcher,
1304 matchmod.exactmatcher,
1333 matchmod.exactmatcher,
1305 matchmod.includematcher,
1334 matchmod.includematcher,
1306 )
1335 )
1307
1336
1308 if rustmod is None:
1337 if rustmod is None:
1309 use_rust = False
1338 use_rust = False
1310 elif self._checkcase:
1339 elif self._checkcase:
1311 # Case-insensitive filesystems are not handled yet
1340 # Case-insensitive filesystems are not handled yet
1312 use_rust = False
1341 use_rust = False
1313 elif subrepos:
1342 elif subrepos:
1314 use_rust = False
1343 use_rust = False
1315 elif sparse.enabled:
1344 elif sparse.enabled:
1316 use_rust = False
1345 use_rust = False
1317 elif not isinstance(match, allowed_matchers):
1346 elif not isinstance(match, allowed_matchers):
1318 # Some matchers have yet to be implemented
1347 # Some matchers have yet to be implemented
1319 use_rust = False
1348 use_rust = False
1320
1349
1321 if use_rust:
1350 if use_rust:
1322 try:
1351 try:
1323 return self._rust_status(
1352 return self._rust_status(
1324 match, listclean, listignored, listunknown
1353 match, listclean, listignored, listunknown
1325 )
1354 )
1326 except rustmod.FallbackError:
1355 except rustmod.FallbackError:
1327 pass
1356 pass
1328
1357
1329 def noop(f):
1358 def noop(f):
1330 pass
1359 pass
1331
1360
1332 dcontains = dmap.__contains__
1361 dcontains = dmap.__contains__
1333 dget = dmap.__getitem__
1362 dget = dmap.__getitem__
1334 ladd = lookup.append # aka "unsure"
1363 ladd = lookup.append # aka "unsure"
1335 madd = modified.append
1364 madd = modified.append
1336 aadd = added.append
1365 aadd = added.append
1337 uadd = unknown.append if listunknown else noop
1366 uadd = unknown.append if listunknown else noop
1338 iadd = ignored.append if listignored else noop
1367 iadd = ignored.append if listignored else noop
1339 radd = removed.append
1368 radd = removed.append
1340 dadd = deleted.append
1369 dadd = deleted.append
1341 cadd = clean.append if listclean else noop
1370 cadd = clean.append if listclean else noop
1342 mexact = match.exact
1371 mexact = match.exact
1343 dirignore = self._dirignore
1372 dirignore = self._dirignore
1344 checkexec = self._checkexec
1373 checkexec = self._checkexec
1345 copymap = self._map.copymap
1374 copymap = self._map.copymap
1346 lastnormaltime = self._lastnormaltime
1375 lastnormaltime = self._lastnormaltime
1347
1376
1348 # We need to do full walks when either
1377 # We need to do full walks when either
1349 # - we're listing all clean files, or
1378 # - we're listing all clean files, or
1350 # - match.traversedir does something, because match.traversedir should
1379 # - match.traversedir does something, because match.traversedir should
1351 # be called for every dir in the working dir
1380 # be called for every dir in the working dir
1352 full = listclean or match.traversedir is not None
1381 full = listclean or match.traversedir is not None
1353 for fn, st in pycompat.iteritems(
1382 for fn, st in pycompat.iteritems(
1354 self.walk(match, subrepos, listunknown, listignored, full=full)
1383 self.walk(match, subrepos, listunknown, listignored, full=full)
1355 ):
1384 ):
1356 if not dcontains(fn):
1385 if not dcontains(fn):
1357 if (listignored or mexact(fn)) and dirignore(fn):
1386 if (listignored or mexact(fn)) and dirignore(fn):
1358 if listignored:
1387 if listignored:
1359 iadd(fn)
1388 iadd(fn)
1360 else:
1389 else:
1361 uadd(fn)
1390 uadd(fn)
1362 continue
1391 continue
1363
1392
1364 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1393 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1365 # written like that for performance reasons. dmap[fn] is not a
1394 # written like that for performance reasons. dmap[fn] is not a
1366 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1395 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1367 # opcode has fast paths when the value to be unpacked is a tuple or
1396 # opcode has fast paths when the value to be unpacked is a tuple or
1368 # a list, but falls back to creating a full-fledged iterator in
1397 # a list, but falls back to creating a full-fledged iterator in
1369 # general. That is much slower than simply accessing and storing the
1398 # general. That is much slower than simply accessing and storing the
1370 # tuple members one by one.
1399 # tuple members one by one.
1371 t = dget(fn)
1400 t = dget(fn)
1372 mode = t.mode
1401 mode = t.mode
1373 size = t.size
1402 size = t.size
1374 time = t.mtime
1403 time = t.mtime
1375
1404
1376 if not st and t.tracked:
1405 if not st and t.tracked:
1377 dadd(fn)
1406 dadd(fn)
1378 elif t.merged:
1407 elif t.merged:
1379 madd(fn)
1408 madd(fn)
1380 elif t.added:
1409 elif t.added:
1381 aadd(fn)
1410 aadd(fn)
1382 elif t.removed:
1411 elif t.removed:
1383 radd(fn)
1412 radd(fn)
1384 elif t.tracked:
1413 elif t.tracked:
1385 if (
1414 if (
1386 size >= 0
1415 size >= 0
1387 and (
1416 and (
1388 (size != st.st_size and size != st.st_size & _rangemask)
1417 (size != st.st_size and size != st.st_size & _rangemask)
1389 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1418 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1390 )
1419 )
1391 or t.from_p2
1420 or t.from_p2
1392 or fn in copymap
1421 or fn in copymap
1393 ):
1422 ):
1394 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1423 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1395 # issue6456: Size returned may be longer due to
1424 # issue6456: Size returned may be longer due to
1396 # encryption on EXT-4 fscrypt, undecided.
1425 # encryption on EXT-4 fscrypt, undecided.
1397 ladd(fn)
1426 ladd(fn)
1398 else:
1427 else:
1399 madd(fn)
1428 madd(fn)
1400 elif (
1429 elif (
1401 time != st[stat.ST_MTIME]
1430 time != st[stat.ST_MTIME]
1402 and time != st[stat.ST_MTIME] & _rangemask
1431 and time != st[stat.ST_MTIME] & _rangemask
1403 ):
1432 ):
1404 ladd(fn)
1433 ladd(fn)
1405 elif st[stat.ST_MTIME] == lastnormaltime:
1434 elif st[stat.ST_MTIME] == lastnormaltime:
1406 # fn may have just been marked as normal and it may have
1435 # fn may have just been marked as normal and it may have
1407 # changed in the same second without changing its size.
1436 # changed in the same second without changing its size.
1408 # This can happen if we quickly do multiple commits.
1437 # This can happen if we quickly do multiple commits.
1409 # Force lookup, so we don't miss such a racy file change.
1438 # Force lookup, so we don't miss such a racy file change.
1410 ladd(fn)
1439 ladd(fn)
1411 elif listclean:
1440 elif listclean:
1412 cadd(fn)
1441 cadd(fn)
1413 status = scmutil.status(
1442 status = scmutil.status(
1414 modified, added, removed, deleted, unknown, ignored, clean
1443 modified, added, removed, deleted, unknown, ignored, clean
1415 )
1444 )
1416 return (lookup, status)
1445 return (lookup, status)
1417
1446
1418 def matches(self, match):
1447 def matches(self, match):
1419 """
1448 """
1420 return files in the dirstate (in whatever state) filtered by match
1449 return files in the dirstate (in whatever state) filtered by match
1421 """
1450 """
1422 dmap = self._map
1451 dmap = self._map
1423 if rustmod is not None:
1452 if rustmod is not None:
1424 dmap = self._map._rustmap
1453 dmap = self._map._rustmap
1425
1454
1426 if match.always():
1455 if match.always():
1427 return dmap.keys()
1456 return dmap.keys()
1428 files = match.files()
1457 files = match.files()
1429 if match.isexact():
1458 if match.isexact():
1430 # fast path -- filter the other way around, since typically files is
1459 # fast path -- filter the other way around, since typically files is
1431 # much smaller than dmap
1460 # much smaller than dmap
1432 return [f for f in files if f in dmap]
1461 return [f for f in files if f in dmap]
1433 if match.prefix() and all(fn in dmap for fn in files):
1462 if match.prefix() and all(fn in dmap for fn in files):
1434 # fast path -- all the values are known to be files, so just return
1463 # fast path -- all the values are known to be files, so just return
1435 # that
1464 # that
1436 return list(files)
1465 return list(files)
1437 return [f for f in dmap if match(f)]
1466 return [f for f in dmap if match(f)]
1438
1467
1439 def _actualfilename(self, tr):
1468 def _actualfilename(self, tr):
1440 if tr:
1469 if tr:
1441 return self._pendingfilename
1470 return self._pendingfilename
1442 else:
1471 else:
1443 return self._filename
1472 return self._filename
1444
1473
1445 def savebackup(self, tr, backupname):
1474 def savebackup(self, tr, backupname):
1446 '''Save current dirstate into backup file'''
1475 '''Save current dirstate into backup file'''
1447 filename = self._actualfilename(tr)
1476 filename = self._actualfilename(tr)
1448 assert backupname != filename
1477 assert backupname != filename
1449
1478
1450 # use '_writedirstate' instead of 'write' to write changes certainly,
1479 # use '_writedirstate' instead of 'write' to write changes certainly,
1451 # because the latter omits writing out if transaction is running.
1480 # because the latter omits writing out if transaction is running.
1452 # output file will be used to create backup of dirstate at this point.
1481 # output file will be used to create backup of dirstate at this point.
1453 if self._dirty or not self._opener.exists(filename):
1482 if self._dirty or not self._opener.exists(filename):
1454 self._writedirstate(
1483 self._writedirstate(
1455 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1484 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1456 )
1485 )
1457
1486
1458 if tr:
1487 if tr:
1459 # ensure that subsequent tr.writepending returns True for
1488 # ensure that subsequent tr.writepending returns True for
1460 # changes written out above, even if dirstate is never
1489 # changes written out above, even if dirstate is never
1461 # changed after this
1490 # changed after this
1462 tr.addfilegenerator(
1491 tr.addfilegenerator(
1463 b'dirstate',
1492 b'dirstate',
1464 (self._filename,),
1493 (self._filename,),
1465 self._writedirstate,
1494 self._writedirstate,
1466 location=b'plain',
1495 location=b'plain',
1467 )
1496 )
1468
1497
1469 # ensure that pending file written above is unlinked at
1498 # ensure that pending file written above is unlinked at
1470 # failure, even if tr.writepending isn't invoked until the
1499 # failure, even if tr.writepending isn't invoked until the
1471 # end of this transaction
1500 # end of this transaction
1472 tr.registertmp(filename, location=b'plain')
1501 tr.registertmp(filename, location=b'plain')
1473
1502
1474 self._opener.tryunlink(backupname)
1503 self._opener.tryunlink(backupname)
1475 # hardlink backup is okay because _writedirstate is always called
1504 # hardlink backup is okay because _writedirstate is always called
1476 # with an "atomictemp=True" file.
1505 # with an "atomictemp=True" file.
1477 util.copyfile(
1506 util.copyfile(
1478 self._opener.join(filename),
1507 self._opener.join(filename),
1479 self._opener.join(backupname),
1508 self._opener.join(backupname),
1480 hardlink=True,
1509 hardlink=True,
1481 )
1510 )
1482
1511
1483 def restorebackup(self, tr, backupname):
1512 def restorebackup(self, tr, backupname):
1484 '''Restore dirstate by backup file'''
1513 '''Restore dirstate by backup file'''
1485 # this "invalidate()" prevents "wlock.release()" from writing
1514 # this "invalidate()" prevents "wlock.release()" from writing
1486 # changes of dirstate out after restoring from backup file
1515 # changes of dirstate out after restoring from backup file
1487 self.invalidate()
1516 self.invalidate()
1488 filename = self._actualfilename(tr)
1517 filename = self._actualfilename(tr)
1489 o = self._opener
1518 o = self._opener
1490 if util.samefile(o.join(backupname), o.join(filename)):
1519 if util.samefile(o.join(backupname), o.join(filename)):
1491 o.unlink(backupname)
1520 o.unlink(backupname)
1492 else:
1521 else:
1493 o.rename(backupname, filename, checkambig=True)
1522 o.rename(backupname, filename, checkambig=True)
1494
1523
1495 def clearbackup(self, tr, backupname):
1524 def clearbackup(self, tr, backupname):
1496 '''Clear backup file'''
1525 '''Clear backup file'''
1497 self._opener.unlink(backupname)
1526 self._opener.unlink(backupname)
General Comments 0
You need to be logged in to leave comments. Login now