##// END OF EJS Templates
dirstate: align the dirstate's API to the lower level ones...
marmoute -
r48956:6f54afb0 default
parent child Browse files
Show More
@@ -1,798 +1,788 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import contextlib
12 import contextlib
13 import copy
13 import copy
14 import os
14 import os
15 import stat
15 import stat
16
16
17 from mercurial.i18n import _
17 from mercurial.i18n import _
18 from mercurial.node import hex
18 from mercurial.node import hex
19 from mercurial.pycompat import open
19 from mercurial.pycompat import open
20
20
21 from mercurial import (
21 from mercurial import (
22 dirstate,
22 dirstate,
23 encoding,
23 encoding,
24 error,
24 error,
25 httpconnection,
25 httpconnection,
26 match as matchmod,
26 match as matchmod,
27 pycompat,
27 pycompat,
28 requirements,
28 requirements,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 vfs as vfsmod,
32 vfs as vfsmod,
33 )
33 )
34 from mercurial.utils import hashutil
34 from mercurial.utils import hashutil
35
35
36 shortname = b'.hglf'
36 shortname = b'.hglf'
37 shortnameslash = shortname + b'/'
37 shortnameslash = shortname + b'/'
38 longname = b'largefiles'
38 longname = b'largefiles'
39
39
40 # -- Private worker functions ------------------------------------------
40 # -- Private worker functions ------------------------------------------
41
41
42
42
43 @contextlib.contextmanager
43 @contextlib.contextmanager
44 def lfstatus(repo, value=True):
44 def lfstatus(repo, value=True):
45 oldvalue = getattr(repo, 'lfstatus', False)
45 oldvalue = getattr(repo, 'lfstatus', False)
46 repo.lfstatus = value
46 repo.lfstatus = value
47 try:
47 try:
48 yield
48 yield
49 finally:
49 finally:
50 repo.lfstatus = oldvalue
50 repo.lfstatus = oldvalue
51
51
52
52
53 def getminsize(ui, assumelfiles, opt, default=10):
53 def getminsize(ui, assumelfiles, opt, default=10):
54 lfsize = opt
54 lfsize = opt
55 if not lfsize and assumelfiles:
55 if not lfsize and assumelfiles:
56 lfsize = ui.config(longname, b'minsize', default=default)
56 lfsize = ui.config(longname, b'minsize', default=default)
57 if lfsize:
57 if lfsize:
58 try:
58 try:
59 lfsize = float(lfsize)
59 lfsize = float(lfsize)
60 except ValueError:
60 except ValueError:
61 raise error.Abort(
61 raise error.Abort(
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
62 _(b'largefiles: size must be number (not %s)\n') % lfsize
63 )
63 )
64 if lfsize is None:
64 if lfsize is None:
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
65 raise error.Abort(_(b'minimum size for largefiles must be specified'))
66 return lfsize
66 return lfsize
67
67
68
68
69 def link(src, dest):
69 def link(src, dest):
70 """Try to create hardlink - if that fails, efficiently make a copy."""
70 """Try to create hardlink - if that fails, efficiently make a copy."""
71 util.makedirs(os.path.dirname(dest))
71 util.makedirs(os.path.dirname(dest))
72 try:
72 try:
73 util.oslink(src, dest)
73 util.oslink(src, dest)
74 except OSError:
74 except OSError:
75 # if hardlinks fail, fallback on atomic copy
75 # if hardlinks fail, fallback on atomic copy
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
76 with open(src, b'rb') as srcf, util.atomictempfile(dest) as dstf:
77 for chunk in util.filechunkiter(srcf):
77 for chunk in util.filechunkiter(srcf):
78 dstf.write(chunk)
78 dstf.write(chunk)
79 os.chmod(dest, os.stat(src).st_mode)
79 os.chmod(dest, os.stat(src).st_mode)
80
80
81
81
82 def usercachepath(ui, hash):
82 def usercachepath(ui, hash):
83 """Return the correct location in the "global" largefiles cache for a file
83 """Return the correct location in the "global" largefiles cache for a file
84 with the given hash.
84 with the given hash.
85 This cache is used for sharing of largefiles across repositories - both
85 This cache is used for sharing of largefiles across repositories - both
86 to preserve download bandwidth and storage space."""
86 to preserve download bandwidth and storage space."""
87 return os.path.join(_usercachedir(ui), hash)
87 return os.path.join(_usercachedir(ui), hash)
88
88
89
89
90 def _usercachedir(ui, name=longname):
90 def _usercachedir(ui, name=longname):
91 '''Return the location of the "global" largefiles cache.'''
91 '''Return the location of the "global" largefiles cache.'''
92 path = ui.configpath(name, b'usercache')
92 path = ui.configpath(name, b'usercache')
93 if path:
93 if path:
94 return path
94 return path
95
95
96 hint = None
96 hint = None
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 appdata = encoding.environ.get(
99 appdata = encoding.environ.get(
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
100 b'LOCALAPPDATA', encoding.environ.get(b'APPDATA')
101 )
101 )
102 if appdata:
102 if appdata:
103 return os.path.join(appdata, name)
103 return os.path.join(appdata, name)
104
104
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
105 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
106 b"LOCALAPPDATA",
106 b"LOCALAPPDATA",
107 b"APPDATA",
107 b"APPDATA",
108 name,
108 name,
109 )
109 )
110 elif pycompat.isdarwin:
110 elif pycompat.isdarwin:
111 home = encoding.environ.get(b'HOME')
111 home = encoding.environ.get(b'HOME')
112 if home:
112 if home:
113 return os.path.join(home, b'Library', b'Caches', name)
113 return os.path.join(home, b'Library', b'Caches', name)
114
114
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
115 hint = _(b"define %s in the environment, or set %s.usercache") % (
116 b"HOME",
116 b"HOME",
117 name,
117 name,
118 )
118 )
119 elif pycompat.isposix:
119 elif pycompat.isposix:
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
120 path = encoding.environ.get(b'XDG_CACHE_HOME')
121 if path:
121 if path:
122 return os.path.join(path, name)
122 return os.path.join(path, name)
123 home = encoding.environ.get(b'HOME')
123 home = encoding.environ.get(b'HOME')
124 if home:
124 if home:
125 return os.path.join(home, b'.cache', name)
125 return os.path.join(home, b'.cache', name)
126
126
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
127 hint = _(b"define %s or %s in the environment, or set %s.usercache") % (
128 b"XDG_CACHE_HOME",
128 b"XDG_CACHE_HOME",
129 b"HOME",
129 b"HOME",
130 name,
130 name,
131 )
131 )
132 else:
132 else:
133 raise error.Abort(
133 raise error.Abort(
134 _(b'unknown operating system: %s\n') % pycompat.osname
134 _(b'unknown operating system: %s\n') % pycompat.osname
135 )
135 )
136
136
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
137 raise error.Abort(_(b'unknown %s usercache location') % name, hint=hint)
138
138
139
139
140 def inusercache(ui, hash):
140 def inusercache(ui, hash):
141 path = usercachepath(ui, hash)
141 path = usercachepath(ui, hash)
142 return os.path.exists(path)
142 return os.path.exists(path)
143
143
144
144
145 def findfile(repo, hash):
145 def findfile(repo, hash):
146 """Return store path of the largefile with the specified hash.
146 """Return store path of the largefile with the specified hash.
147 As a side effect, the file might be linked from user cache.
147 As a side effect, the file might be linked from user cache.
148 Return None if the file can't be found locally."""
148 Return None if the file can't be found locally."""
149 path, exists = findstorepath(repo, hash)
149 path, exists = findstorepath(repo, hash)
150 if exists:
150 if exists:
151 repo.ui.note(_(b'found %s in store\n') % hash)
151 repo.ui.note(_(b'found %s in store\n') % hash)
152 return path
152 return path
153 elif inusercache(repo.ui, hash):
153 elif inusercache(repo.ui, hash):
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
154 repo.ui.note(_(b'found %s in system cache\n') % hash)
155 path = storepath(repo, hash)
155 path = storepath(repo, hash)
156 link(usercachepath(repo.ui, hash), path)
156 link(usercachepath(repo.ui, hash), path)
157 return path
157 return path
158 return None
158 return None
159
159
160
160
161 class largefilesdirstate(dirstate.dirstate):
161 class largefilesdirstate(dirstate.dirstate):
162 def __getitem__(self, key):
162 def __getitem__(self, key):
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
163 return super(largefilesdirstate, self).__getitem__(unixpath(key))
164
164
165 def set_tracked(self, f):
165 def set_tracked(self, f):
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
166 return super(largefilesdirstate, self).set_tracked(unixpath(f))
167
167
168 def set_untracked(self, f):
168 def set_untracked(self, f):
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
169 return super(largefilesdirstate, self).set_untracked(unixpath(f))
170
170
171 def normal(self, f, parentfiledata=None):
171 def normal(self, f, parentfiledata=None):
172 # not sure if we should pass the `parentfiledata` down or throw it
172 # not sure if we should pass the `parentfiledata` down or throw it
173 # away. So throwing it away to stay on the safe side.
173 # away. So throwing it away to stay on the safe side.
174 return super(largefilesdirstate, self).normal(unixpath(f))
174 return super(largefilesdirstate, self).normal(unixpath(f))
175
175
176 def remove(self, f):
176 def remove(self, f):
177 return super(largefilesdirstate, self).remove(unixpath(f))
177 return super(largefilesdirstate, self).remove(unixpath(f))
178
178
179 def add(self, f):
179 def add(self, f):
180 return super(largefilesdirstate, self).add(unixpath(f))
180 return super(largefilesdirstate, self).add(unixpath(f))
181
181
182 def drop(self, f):
182 def drop(self, f):
183 return super(largefilesdirstate, self).drop(unixpath(f))
183 return super(largefilesdirstate, self).drop(unixpath(f))
184
184
185 def forget(self, f):
185 def forget(self, f):
186 return super(largefilesdirstate, self).forget(unixpath(f))
186 return super(largefilesdirstate, self).forget(unixpath(f))
187
187
188 def normallookup(self, f):
188 def normallookup(self, f):
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
189 return super(largefilesdirstate, self).normallookup(unixpath(f))
190
190
191 def _ignore(self, f):
191 def _ignore(self, f):
192 return False
192 return False
193
193
194 def write(self, tr=False):
194 def write(self, tr=False):
195 # (1) disable PENDING mode always
195 # (1) disable PENDING mode always
196 # (lfdirstate isn't yet managed as a part of the transaction)
196 # (lfdirstate isn't yet managed as a part of the transaction)
197 # (2) avoid develwarn 'use dirstate.write with ....'
197 # (2) avoid develwarn 'use dirstate.write with ....'
198 super(largefilesdirstate, self).write(None)
198 super(largefilesdirstate, self).write(None)
199
199
200
200
201 def openlfdirstate(ui, repo, create=True):
201 def openlfdirstate(ui, repo, create=True):
202 """
202 """
203 Return a dirstate object that tracks largefiles: i.e. its root is
203 Return a dirstate object that tracks largefiles: i.e. its root is
204 the repo root, but it is saved in .hg/largefiles/dirstate.
204 the repo root, but it is saved in .hg/largefiles/dirstate.
205 """
205 """
206 vfs = repo.vfs
206 vfs = repo.vfs
207 lfstoredir = longname
207 lfstoredir = longname
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
208 opener = vfsmod.vfs(vfs.join(lfstoredir))
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
209 use_dirstate_v2 = requirements.DIRSTATE_V2_REQUIREMENT in repo.requirements
210 lfdirstate = largefilesdirstate(
210 lfdirstate = largefilesdirstate(
211 opener,
211 opener,
212 ui,
212 ui,
213 repo.root,
213 repo.root,
214 repo.dirstate._validate,
214 repo.dirstate._validate,
215 lambda: sparse.matcher(repo),
215 lambda: sparse.matcher(repo),
216 repo.nodeconstants,
216 repo.nodeconstants,
217 use_dirstate_v2,
217 use_dirstate_v2,
218 )
218 )
219
219
220 # If the largefiles dirstate does not exist, populate and create
220 # If the largefiles dirstate does not exist, populate and create
221 # it. This ensures that we create it on the first meaningful
221 # it. This ensures that we create it on the first meaningful
222 # largefiles operation in a new clone.
222 # largefiles operation in a new clone.
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
223 if create and not vfs.exists(vfs.join(lfstoredir, b'dirstate')):
224 matcher = getstandinmatcher(repo)
224 matcher = getstandinmatcher(repo)
225 standins = repo.dirstate.walk(
225 standins = repo.dirstate.walk(
226 matcher, subrepos=[], unknown=False, ignored=False
226 matcher, subrepos=[], unknown=False, ignored=False
227 )
227 )
228
228
229 if len(standins) > 0:
229 if len(standins) > 0:
230 vfs.makedirs(lfstoredir)
230 vfs.makedirs(lfstoredir)
231
231
232 with lfdirstate.parentchange():
232 with lfdirstate.parentchange():
233 for standin in standins:
233 for standin in standins:
234 lfile = splitstandin(standin)
234 lfile = splitstandin(standin)
235 lfdirstate.update_file(
235 lfdirstate.update_file(
236 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
236 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
237 )
237 )
238 return lfdirstate
238 return lfdirstate
239
239
240
240
241 def lfdirstatestatus(lfdirstate, repo):
241 def lfdirstatestatus(lfdirstate, repo):
242 pctx = repo[b'.']
242 pctx = repo[b'.']
243 match = matchmod.always()
243 match = matchmod.always()
244 unsure, s = lfdirstate.status(
244 unsure, s = lfdirstate.status(
245 match, subrepos=[], ignored=False, clean=False, unknown=False
245 match, subrepos=[], ignored=False, clean=False, unknown=False
246 )
246 )
247 modified, clean = s.modified, s.clean
247 modified, clean = s.modified, s.clean
248 for lfile in unsure:
248 for lfile in unsure:
249 try:
249 try:
250 fctx = pctx[standin(lfile)]
250 fctx = pctx[standin(lfile)]
251 except LookupError:
251 except LookupError:
252 fctx = None
252 fctx = None
253 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
253 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
254 modified.append(lfile)
254 modified.append(lfile)
255 else:
255 else:
256 clean.append(lfile)
256 clean.append(lfile)
257 lfdirstate.set_clean(lfile)
257 lfdirstate.set_clean(lfile)
258 return s
258 return s
259
259
260
260
261 def listlfiles(repo, rev=None, matcher=None):
261 def listlfiles(repo, rev=None, matcher=None):
262 """return a list of largefiles in the working copy or the
262 """return a list of largefiles in the working copy or the
263 specified changeset"""
263 specified changeset"""
264
264
265 if matcher is None:
265 if matcher is None:
266 matcher = getstandinmatcher(repo)
266 matcher = getstandinmatcher(repo)
267
267
268 # ignore unknown files in working directory
268 # ignore unknown files in working directory
269 return [
269 return [
270 splitstandin(f)
270 splitstandin(f)
271 for f in repo[rev].walk(matcher)
271 for f in repo[rev].walk(matcher)
272 if rev is not None or repo.dirstate.get_entry(f).any_tracked
272 if rev is not None or repo.dirstate.get_entry(f).any_tracked
273 ]
273 ]
274
274
275
275
276 def instore(repo, hash, forcelocal=False):
276 def instore(repo, hash, forcelocal=False):
277 '''Return true if a largefile with the given hash exists in the store'''
277 '''Return true if a largefile with the given hash exists in the store'''
278 return os.path.exists(storepath(repo, hash, forcelocal))
278 return os.path.exists(storepath(repo, hash, forcelocal))
279
279
280
280
281 def storepath(repo, hash, forcelocal=False):
281 def storepath(repo, hash, forcelocal=False):
282 """Return the correct location in the repository largefiles store for a
282 """Return the correct location in the repository largefiles store for a
283 file with the given hash."""
283 file with the given hash."""
284 if not forcelocal and repo.shared():
284 if not forcelocal and repo.shared():
285 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
285 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
286 return repo.vfs.join(longname, hash)
286 return repo.vfs.join(longname, hash)
287
287
288
288
289 def findstorepath(repo, hash):
289 def findstorepath(repo, hash):
290 """Search through the local store path(s) to find the file for the given
290 """Search through the local store path(s) to find the file for the given
291 hash. If the file is not found, its path in the primary store is returned.
291 hash. If the file is not found, its path in the primary store is returned.
292 The return value is a tuple of (path, exists(path)).
292 The return value is a tuple of (path, exists(path)).
293 """
293 """
294 # For shared repos, the primary store is in the share source. But for
294 # For shared repos, the primary store is in the share source. But for
295 # backward compatibility, force a lookup in the local store if it wasn't
295 # backward compatibility, force a lookup in the local store if it wasn't
296 # found in the share source.
296 # found in the share source.
297 path = storepath(repo, hash, False)
297 path = storepath(repo, hash, False)
298
298
299 if instore(repo, hash):
299 if instore(repo, hash):
300 return (path, True)
300 return (path, True)
301 elif repo.shared() and instore(repo, hash, True):
301 elif repo.shared() and instore(repo, hash, True):
302 return storepath(repo, hash, True), True
302 return storepath(repo, hash, True), True
303
303
304 return (path, False)
304 return (path, False)
305
305
306
306
307 def copyfromcache(repo, hash, filename):
307 def copyfromcache(repo, hash, filename):
308 """Copy the specified largefile from the repo or system cache to
308 """Copy the specified largefile from the repo or system cache to
309 filename in the repository. Return true on success or false if the
309 filename in the repository. Return true on success or false if the
310 file was not found in either cache (which should not happened:
310 file was not found in either cache (which should not happened:
311 this is meant to be called only after ensuring that the needed
311 this is meant to be called only after ensuring that the needed
312 largefile exists in the cache)."""
312 largefile exists in the cache)."""
313 wvfs = repo.wvfs
313 wvfs = repo.wvfs
314 path = findfile(repo, hash)
314 path = findfile(repo, hash)
315 if path is None:
315 if path is None:
316 return False
316 return False
317 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
317 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
318 # The write may fail before the file is fully written, but we
318 # The write may fail before the file is fully written, but we
319 # don't use atomic writes in the working copy.
319 # don't use atomic writes in the working copy.
320 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
320 with open(path, b'rb') as srcfd, wvfs(filename, b'wb') as destfd:
321 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
321 gothash = copyandhash(util.filechunkiter(srcfd), destfd)
322 if gothash != hash:
322 if gothash != hash:
323 repo.ui.warn(
323 repo.ui.warn(
324 _(b'%s: data corruption in %s with hash %s\n')
324 _(b'%s: data corruption in %s with hash %s\n')
325 % (filename, path, gothash)
325 % (filename, path, gothash)
326 )
326 )
327 wvfs.unlink(filename)
327 wvfs.unlink(filename)
328 return False
328 return False
329 return True
329 return True
330
330
331
331
332 def copytostore(repo, ctx, file, fstandin):
332 def copytostore(repo, ctx, file, fstandin):
333 wvfs = repo.wvfs
333 wvfs = repo.wvfs
334 hash = readasstandin(ctx[fstandin])
334 hash = readasstandin(ctx[fstandin])
335 if instore(repo, hash):
335 if instore(repo, hash):
336 return
336 return
337 if wvfs.exists(file):
337 if wvfs.exists(file):
338 copytostoreabsolute(repo, wvfs.join(file), hash)
338 copytostoreabsolute(repo, wvfs.join(file), hash)
339 else:
339 else:
340 repo.ui.warn(
340 repo.ui.warn(
341 _(b"%s: largefile %s not available from local store\n")
341 _(b"%s: largefile %s not available from local store\n")
342 % (file, hash)
342 % (file, hash)
343 )
343 )
344
344
345
345
346 def copyalltostore(repo, node):
346 def copyalltostore(repo, node):
347 '''Copy all largefiles in a given revision to the store'''
347 '''Copy all largefiles in a given revision to the store'''
348
348
349 ctx = repo[node]
349 ctx = repo[node]
350 for filename in ctx.files():
350 for filename in ctx.files():
351 realfile = splitstandin(filename)
351 realfile = splitstandin(filename)
352 if realfile is not None and filename in ctx.manifest():
352 if realfile is not None and filename in ctx.manifest():
353 copytostore(repo, ctx, realfile, filename)
353 copytostore(repo, ctx, realfile, filename)
354
354
355
355
356 def copytostoreabsolute(repo, file, hash):
356 def copytostoreabsolute(repo, file, hash):
357 if inusercache(repo.ui, hash):
357 if inusercache(repo.ui, hash):
358 link(usercachepath(repo.ui, hash), storepath(repo, hash))
358 link(usercachepath(repo.ui, hash), storepath(repo, hash))
359 else:
359 else:
360 util.makedirs(os.path.dirname(storepath(repo, hash)))
360 util.makedirs(os.path.dirname(storepath(repo, hash)))
361 with open(file, b'rb') as srcf:
361 with open(file, b'rb') as srcf:
362 with util.atomictempfile(
362 with util.atomictempfile(
363 storepath(repo, hash), createmode=repo.store.createmode
363 storepath(repo, hash), createmode=repo.store.createmode
364 ) as dstf:
364 ) as dstf:
365 for chunk in util.filechunkiter(srcf):
365 for chunk in util.filechunkiter(srcf):
366 dstf.write(chunk)
366 dstf.write(chunk)
367 linktousercache(repo, hash)
367 linktousercache(repo, hash)
368
368
369
369
370 def linktousercache(repo, hash):
370 def linktousercache(repo, hash):
371 """Link / copy the largefile with the specified hash from the store
371 """Link / copy the largefile with the specified hash from the store
372 to the cache."""
372 to the cache."""
373 path = usercachepath(repo.ui, hash)
373 path = usercachepath(repo.ui, hash)
374 link(storepath(repo, hash), path)
374 link(storepath(repo, hash), path)
375
375
376
376
377 def getstandinmatcher(repo, rmatcher=None):
377 def getstandinmatcher(repo, rmatcher=None):
378 '''Return a match object that applies rmatcher to the standin directory'''
378 '''Return a match object that applies rmatcher to the standin directory'''
379 wvfs = repo.wvfs
379 wvfs = repo.wvfs
380 standindir = shortname
380 standindir = shortname
381
381
382 # no warnings about missing files or directories
382 # no warnings about missing files or directories
383 badfn = lambda f, msg: None
383 badfn = lambda f, msg: None
384
384
385 if rmatcher and not rmatcher.always():
385 if rmatcher and not rmatcher.always():
386 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
386 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
387 if not pats:
387 if not pats:
388 pats = [wvfs.join(standindir)]
388 pats = [wvfs.join(standindir)]
389 match = scmutil.match(repo[None], pats, badfn=badfn)
389 match = scmutil.match(repo[None], pats, badfn=badfn)
390 else:
390 else:
391 # no patterns: relative to repo root
391 # no patterns: relative to repo root
392 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
392 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
393 return match
393 return match
394
394
395
395
396 def composestandinmatcher(repo, rmatcher):
396 def composestandinmatcher(repo, rmatcher):
397 """Return a matcher that accepts standins corresponding to the
397 """Return a matcher that accepts standins corresponding to the
398 files accepted by rmatcher. Pass the list of files in the matcher
398 files accepted by rmatcher. Pass the list of files in the matcher
399 as the paths specified by the user."""
399 as the paths specified by the user."""
400 smatcher = getstandinmatcher(repo, rmatcher)
400 smatcher = getstandinmatcher(repo, rmatcher)
401 isstandin = smatcher.matchfn
401 isstandin = smatcher.matchfn
402
402
403 def composedmatchfn(f):
403 def composedmatchfn(f):
404 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
404 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
405
405
406 smatcher.matchfn = composedmatchfn
406 smatcher.matchfn = composedmatchfn
407
407
408 return smatcher
408 return smatcher
409
409
410
410
411 def standin(filename):
411 def standin(filename):
412 """Return the repo-relative path to the standin for the specified big
412 """Return the repo-relative path to the standin for the specified big
413 file."""
413 file."""
414 # Notes:
414 # Notes:
415 # 1) Some callers want an absolute path, but for instance addlargefiles
415 # 1) Some callers want an absolute path, but for instance addlargefiles
416 # needs it repo-relative so it can be passed to repo[None].add(). So
416 # needs it repo-relative so it can be passed to repo[None].add(). So
417 # leave it up to the caller to use repo.wjoin() to get an absolute path.
417 # leave it up to the caller to use repo.wjoin() to get an absolute path.
418 # 2) Join with '/' because that's what dirstate always uses, even on
418 # 2) Join with '/' because that's what dirstate always uses, even on
419 # Windows. Change existing separator to '/' first in case we are
419 # Windows. Change existing separator to '/' first in case we are
420 # passed filenames from an external source (like the command line).
420 # passed filenames from an external source (like the command line).
421 return shortnameslash + util.pconvert(filename)
421 return shortnameslash + util.pconvert(filename)
422
422
423
423
424 def isstandin(filename):
424 def isstandin(filename):
425 """Return true if filename is a big file standin. filename must be
425 """Return true if filename is a big file standin. filename must be
426 in Mercurial's internal form (slash-separated)."""
426 in Mercurial's internal form (slash-separated)."""
427 return filename.startswith(shortnameslash)
427 return filename.startswith(shortnameslash)
428
428
429
429
430 def splitstandin(filename):
430 def splitstandin(filename):
431 # Split on / because that's what dirstate always uses, even on Windows.
431 # Split on / because that's what dirstate always uses, even on Windows.
432 # Change local separator to / first just in case we are passed filenames
432 # Change local separator to / first just in case we are passed filenames
433 # from an external source (like the command line).
433 # from an external source (like the command line).
434 bits = util.pconvert(filename).split(b'/', 1)
434 bits = util.pconvert(filename).split(b'/', 1)
435 if len(bits) == 2 and bits[0] == shortname:
435 if len(bits) == 2 and bits[0] == shortname:
436 return bits[1]
436 return bits[1]
437 else:
437 else:
438 return None
438 return None
439
439
440
440
441 def updatestandin(repo, lfile, standin):
441 def updatestandin(repo, lfile, standin):
442 """Re-calculate hash value of lfile and write it into standin
442 """Re-calculate hash value of lfile and write it into standin
443
443
444 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
444 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
445 """
445 """
446 file = repo.wjoin(lfile)
446 file = repo.wjoin(lfile)
447 if repo.wvfs.exists(lfile):
447 if repo.wvfs.exists(lfile):
448 hash = hashfile(file)
448 hash = hashfile(file)
449 executable = getexecutable(file)
449 executable = getexecutable(file)
450 writestandin(repo, standin, hash, executable)
450 writestandin(repo, standin, hash, executable)
451 else:
451 else:
452 raise error.Abort(_(b'%s: file not found!') % lfile)
452 raise error.Abort(_(b'%s: file not found!') % lfile)
453
453
454
454
455 def readasstandin(fctx):
455 def readasstandin(fctx):
456 """read hex hash from given filectx of standin file
456 """read hex hash from given filectx of standin file
457
457
458 This encapsulates how "standin" data is stored into storage layer."""
458 This encapsulates how "standin" data is stored into storage layer."""
459 return fctx.data().strip()
459 return fctx.data().strip()
460
460
461
461
462 def writestandin(repo, standin, hash, executable):
462 def writestandin(repo, standin, hash, executable):
463 '''write hash to <repo.root>/<standin>'''
463 '''write hash to <repo.root>/<standin>'''
464 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
464 repo.wwrite(standin, hash + b'\n', executable and b'x' or b'')
465
465
466
466
467 def copyandhash(instream, outfile):
467 def copyandhash(instream, outfile):
468 """Read bytes from instream (iterable) and write them to outfile,
468 """Read bytes from instream (iterable) and write them to outfile,
469 computing the SHA-1 hash of the data along the way. Return the hash."""
469 computing the SHA-1 hash of the data along the way. Return the hash."""
470 hasher = hashutil.sha1(b'')
470 hasher = hashutil.sha1(b'')
471 for data in instream:
471 for data in instream:
472 hasher.update(data)
472 hasher.update(data)
473 outfile.write(data)
473 outfile.write(data)
474 return hex(hasher.digest())
474 return hex(hasher.digest())
475
475
476
476
477 def hashfile(file):
477 def hashfile(file):
478 if not os.path.exists(file):
478 if not os.path.exists(file):
479 return b''
479 return b''
480 with open(file, b'rb') as fd:
480 with open(file, b'rb') as fd:
481 return hexsha1(fd)
481 return hexsha1(fd)
482
482
483
483
484 def getexecutable(filename):
484 def getexecutable(filename):
485 mode = os.stat(filename).st_mode
485 mode = os.stat(filename).st_mode
486 return (
486 return (
487 (mode & stat.S_IXUSR)
487 (mode & stat.S_IXUSR)
488 and (mode & stat.S_IXGRP)
488 and (mode & stat.S_IXGRP)
489 and (mode & stat.S_IXOTH)
489 and (mode & stat.S_IXOTH)
490 )
490 )
491
491
492
492
493 def urljoin(first, second, *arg):
493 def urljoin(first, second, *arg):
494 def join(left, right):
494 def join(left, right):
495 if not left.endswith(b'/'):
495 if not left.endswith(b'/'):
496 left += b'/'
496 left += b'/'
497 if right.startswith(b'/'):
497 if right.startswith(b'/'):
498 right = right[1:]
498 right = right[1:]
499 return left + right
499 return left + right
500
500
501 url = join(first, second)
501 url = join(first, second)
502 for a in arg:
502 for a in arg:
503 url = join(url, a)
503 url = join(url, a)
504 return url
504 return url
505
505
506
506
507 def hexsha1(fileobj):
507 def hexsha1(fileobj):
508 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
508 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
509 object data"""
509 object data"""
510 h = hashutil.sha1()
510 h = hashutil.sha1()
511 for chunk in util.filechunkiter(fileobj):
511 for chunk in util.filechunkiter(fileobj):
512 h.update(chunk)
512 h.update(chunk)
513 return hex(h.digest())
513 return hex(h.digest())
514
514
515
515
516 def httpsendfile(ui, filename):
516 def httpsendfile(ui, filename):
517 return httpconnection.httpsendfile(ui, filename, b'rb')
517 return httpconnection.httpsendfile(ui, filename, b'rb')
518
518
519
519
520 def unixpath(path):
520 def unixpath(path):
521 '''Return a version of path normalized for use with the lfdirstate.'''
521 '''Return a version of path normalized for use with the lfdirstate.'''
522 return util.pconvert(os.path.normpath(path))
522 return util.pconvert(os.path.normpath(path))
523
523
524
524
525 def islfilesrepo(repo):
525 def islfilesrepo(repo):
526 '''Return true if the repo is a largefile repo.'''
526 '''Return true if the repo is a largefile repo.'''
527 if b'largefiles' in repo.requirements and any(
527 if b'largefiles' in repo.requirements and any(
528 shortnameslash in f[1] for f in repo.store.datafiles()
528 shortnameslash in f[1] for f in repo.store.datafiles()
529 ):
529 ):
530 return True
530 return True
531
531
532 return any(openlfdirstate(repo.ui, repo, False))
532 return any(openlfdirstate(repo.ui, repo, False))
533
533
534
534
535 class storeprotonotcapable(Exception):
535 class storeprotonotcapable(Exception):
536 def __init__(self, storetypes):
536 def __init__(self, storetypes):
537 self.storetypes = storetypes
537 self.storetypes = storetypes
538
538
539
539
540 def getstandinsstate(repo):
540 def getstandinsstate(repo):
541 standins = []
541 standins = []
542 matcher = getstandinmatcher(repo)
542 matcher = getstandinmatcher(repo)
543 wctx = repo[None]
543 wctx = repo[None]
544 for standin in repo.dirstate.walk(
544 for standin in repo.dirstate.walk(
545 matcher, subrepos=[], unknown=False, ignored=False
545 matcher, subrepos=[], unknown=False, ignored=False
546 ):
546 ):
547 lfile = splitstandin(standin)
547 lfile = splitstandin(standin)
548 try:
548 try:
549 hash = readasstandin(wctx[standin])
549 hash = readasstandin(wctx[standin])
550 except IOError:
550 except IOError:
551 hash = None
551 hash = None
552 standins.append((lfile, hash))
552 standins.append((lfile, hash))
553 return standins
553 return standins
554
554
555
555
556 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
556 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
557 lfstandin = standin(lfile)
557 lfstandin = standin(lfile)
558 if lfstandin not in repo.dirstate:
558 if lfstandin not in repo.dirstate:
559 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
559 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=False)
560 else:
560 else:
561 stat = repo.dirstate.get_entry(lfstandin)
561 entry = repo.dirstate.get_entry(lfstandin)
562 state, mtime = stat.state, stat.mtime
562 lfdirstate.update_file(
563 if state == b'n':
563 lfile,
564 if normallookup or mtime < 0 or not repo.wvfs.exists(lfile):
564 wc_tracked=entry.tracked,
565 # state 'n' doesn't ensure 'clean' in this case
565 p1_tracked=entry.p1_tracked,
566 lfdirstate.update_file(
566 p2_info=entry.p2_info,
567 lfile, p1_tracked=True, wc_tracked=True, possibly_dirty=True
567 possibly_dirty=True,
568 )
568 )
569 else:
570 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=True)
571 elif state == b'm':
572 lfdirstate.update_file(
573 lfile, p1_tracked=True, wc_tracked=True, merged=True
574 )
575 elif state == b'r':
576 lfdirstate.update_file(lfile, p1_tracked=True, wc_tracked=False)
577 elif state == b'a':
578 lfdirstate.update_file(lfile, p1_tracked=False, wc_tracked=True)
579
569
580
570
581 def markcommitted(orig, ctx, node):
571 def markcommitted(orig, ctx, node):
582 repo = ctx.repo()
572 repo = ctx.repo()
583
573
584 lfdirstate = openlfdirstate(repo.ui, repo)
574 lfdirstate = openlfdirstate(repo.ui, repo)
585 with lfdirstate.parentchange():
575 with lfdirstate.parentchange():
586 orig(node)
576 orig(node)
587
577
588 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
578 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
589 # because files coming from the 2nd parent are omitted in the latter.
579 # because files coming from the 2nd parent are omitted in the latter.
590 #
580 #
591 # The former should be used to get targets of "synclfdirstate",
581 # The former should be used to get targets of "synclfdirstate",
592 # because such files:
582 # because such files:
593 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
583 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
594 # - have to be marked as "n" after commit, but
584 # - have to be marked as "n" after commit, but
595 # - aren't listed in "repo[node].files()"
585 # - aren't listed in "repo[node].files()"
596
586
597 for f in ctx.files():
587 for f in ctx.files():
598 lfile = splitstandin(f)
588 lfile = splitstandin(f)
599 if lfile is not None:
589 if lfile is not None:
600 synclfdirstate(repo, lfdirstate, lfile, False)
590 synclfdirstate(repo, lfdirstate, lfile, False)
601 lfdirstate.write()
591 lfdirstate.write()
602
592
603 # As part of committing, copy all of the largefiles into the cache.
593 # As part of committing, copy all of the largefiles into the cache.
604 #
594 #
605 # Using "node" instead of "ctx" implies additional "repo[node]"
595 # Using "node" instead of "ctx" implies additional "repo[node]"
606 # lookup while copyalltostore(), but can omit redundant check for
596 # lookup while copyalltostore(), but can omit redundant check for
607 # files comming from the 2nd parent, which should exist in store
597 # files comming from the 2nd parent, which should exist in store
608 # at merging.
598 # at merging.
609 copyalltostore(repo, node)
599 copyalltostore(repo, node)
610
600
611
601
612 def getlfilestoupdate(oldstandins, newstandins):
602 def getlfilestoupdate(oldstandins, newstandins):
613 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
603 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
614 filelist = []
604 filelist = []
615 for f in changedstandins:
605 for f in changedstandins:
616 if f[0] not in filelist:
606 if f[0] not in filelist:
617 filelist.append(f[0])
607 filelist.append(f[0])
618 return filelist
608 return filelist
619
609
620
610
621 def getlfilestoupload(repo, missing, addfunc):
611 def getlfilestoupload(repo, missing, addfunc):
622 makeprogress = repo.ui.makeprogress
612 makeprogress = repo.ui.makeprogress
623 with makeprogress(
613 with makeprogress(
624 _(b'finding outgoing largefiles'),
614 _(b'finding outgoing largefiles'),
625 unit=_(b'revisions'),
615 unit=_(b'revisions'),
626 total=len(missing),
616 total=len(missing),
627 ) as progress:
617 ) as progress:
628 for i, n in enumerate(missing):
618 for i, n in enumerate(missing):
629 progress.update(i)
619 progress.update(i)
630 parents = [p for p in repo[n].parents() if p != repo.nullid]
620 parents = [p for p in repo[n].parents() if p != repo.nullid]
631
621
632 with lfstatus(repo, value=False):
622 with lfstatus(repo, value=False):
633 ctx = repo[n]
623 ctx = repo[n]
634
624
635 files = set(ctx.files())
625 files = set(ctx.files())
636 if len(parents) == 2:
626 if len(parents) == 2:
637 mc = ctx.manifest()
627 mc = ctx.manifest()
638 mp1 = ctx.p1().manifest()
628 mp1 = ctx.p1().manifest()
639 mp2 = ctx.p2().manifest()
629 mp2 = ctx.p2().manifest()
640 for f in mp1:
630 for f in mp1:
641 if f not in mc:
631 if f not in mc:
642 files.add(f)
632 files.add(f)
643 for f in mp2:
633 for f in mp2:
644 if f not in mc:
634 if f not in mc:
645 files.add(f)
635 files.add(f)
646 for f in mc:
636 for f in mc:
647 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
637 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
648 files.add(f)
638 files.add(f)
649 for fn in files:
639 for fn in files:
650 if isstandin(fn) and fn in ctx:
640 if isstandin(fn) and fn in ctx:
651 addfunc(fn, readasstandin(ctx[fn]))
641 addfunc(fn, readasstandin(ctx[fn]))
652
642
653
643
654 def updatestandinsbymatch(repo, match):
644 def updatestandinsbymatch(repo, match):
655 """Update standins in the working directory according to specified match
645 """Update standins in the working directory according to specified match
656
646
657 This returns (possibly modified) ``match`` object to be used for
647 This returns (possibly modified) ``match`` object to be used for
658 subsequent commit process.
648 subsequent commit process.
659 """
649 """
660
650
661 ui = repo.ui
651 ui = repo.ui
662
652
663 # Case 1: user calls commit with no specific files or
653 # Case 1: user calls commit with no specific files or
664 # include/exclude patterns: refresh and commit all files that
654 # include/exclude patterns: refresh and commit all files that
665 # are "dirty".
655 # are "dirty".
666 if match is None or match.always():
656 if match is None or match.always():
667 # Spend a bit of time here to get a list of files we know
657 # Spend a bit of time here to get a list of files we know
668 # are modified so we can compare only against those.
658 # are modified so we can compare only against those.
669 # It can cost a lot of time (several seconds)
659 # It can cost a lot of time (several seconds)
670 # otherwise to update all standins if the largefiles are
660 # otherwise to update all standins if the largefiles are
671 # large.
661 # large.
672 lfdirstate = openlfdirstate(ui, repo)
662 lfdirstate = openlfdirstate(ui, repo)
673 dirtymatch = matchmod.always()
663 dirtymatch = matchmod.always()
674 unsure, s = lfdirstate.status(
664 unsure, s = lfdirstate.status(
675 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
665 dirtymatch, subrepos=[], ignored=False, clean=False, unknown=False
676 )
666 )
677 modifiedfiles = unsure + s.modified + s.added + s.removed
667 modifiedfiles = unsure + s.modified + s.added + s.removed
678 lfiles = listlfiles(repo)
668 lfiles = listlfiles(repo)
679 # this only loops through largefiles that exist (not
669 # this only loops through largefiles that exist (not
680 # removed/renamed)
670 # removed/renamed)
681 for lfile in lfiles:
671 for lfile in lfiles:
682 if lfile in modifiedfiles:
672 if lfile in modifiedfiles:
683 fstandin = standin(lfile)
673 fstandin = standin(lfile)
684 if repo.wvfs.exists(fstandin):
674 if repo.wvfs.exists(fstandin):
685 # this handles the case where a rebase is being
675 # this handles the case where a rebase is being
686 # performed and the working copy is not updated
676 # performed and the working copy is not updated
687 # yet.
677 # yet.
688 if repo.wvfs.exists(lfile):
678 if repo.wvfs.exists(lfile):
689 updatestandin(repo, lfile, fstandin)
679 updatestandin(repo, lfile, fstandin)
690
680
691 return match
681 return match
692
682
693 lfiles = listlfiles(repo)
683 lfiles = listlfiles(repo)
694 match._files = repo._subdirlfs(match.files(), lfiles)
684 match._files = repo._subdirlfs(match.files(), lfiles)
695
685
696 # Case 2: user calls commit with specified patterns: refresh
686 # Case 2: user calls commit with specified patterns: refresh
697 # any matching big files.
687 # any matching big files.
698 smatcher = composestandinmatcher(repo, match)
688 smatcher = composestandinmatcher(repo, match)
699 standins = repo.dirstate.walk(
689 standins = repo.dirstate.walk(
700 smatcher, subrepos=[], unknown=False, ignored=False
690 smatcher, subrepos=[], unknown=False, ignored=False
701 )
691 )
702
692
703 # No matching big files: get out of the way and pass control to
693 # No matching big files: get out of the way and pass control to
704 # the usual commit() method.
694 # the usual commit() method.
705 if not standins:
695 if not standins:
706 return match
696 return match
707
697
708 # Refresh all matching big files. It's possible that the
698 # Refresh all matching big files. It's possible that the
709 # commit will end up failing, in which case the big files will
699 # commit will end up failing, in which case the big files will
710 # stay refreshed. No harm done: the user modified them and
700 # stay refreshed. No harm done: the user modified them and
711 # asked to commit them, so sooner or later we're going to
701 # asked to commit them, so sooner or later we're going to
712 # refresh the standins. Might as well leave them refreshed.
702 # refresh the standins. Might as well leave them refreshed.
713 lfdirstate = openlfdirstate(ui, repo)
703 lfdirstate = openlfdirstate(ui, repo)
714 for fstandin in standins:
704 for fstandin in standins:
715 lfile = splitstandin(fstandin)
705 lfile = splitstandin(fstandin)
716 if lfdirstate.get_entry(lfile).tracked:
706 if lfdirstate.get_entry(lfile).tracked:
717 updatestandin(repo, lfile, fstandin)
707 updatestandin(repo, lfile, fstandin)
718
708
719 # Cook up a new matcher that only matches regular files or
709 # Cook up a new matcher that only matches regular files or
720 # standins corresponding to the big files requested by the
710 # standins corresponding to the big files requested by the
721 # user. Have to modify _files to prevent commit() from
711 # user. Have to modify _files to prevent commit() from
722 # complaining "not tracked" for big files.
712 # complaining "not tracked" for big files.
723 match = copy.copy(match)
713 match = copy.copy(match)
724 origmatchfn = match.matchfn
714 origmatchfn = match.matchfn
725
715
726 # Check both the list of largefiles and the list of
716 # Check both the list of largefiles and the list of
727 # standins because if a largefile was removed, it
717 # standins because if a largefile was removed, it
728 # won't be in the list of largefiles at this point
718 # won't be in the list of largefiles at this point
729 match._files += sorted(standins)
719 match._files += sorted(standins)
730
720
731 actualfiles = []
721 actualfiles = []
732 for f in match._files:
722 for f in match._files:
733 fstandin = standin(f)
723 fstandin = standin(f)
734
724
735 # For largefiles, only one of the normal and standin should be
725 # For largefiles, only one of the normal and standin should be
736 # committed (except if one of them is a remove). In the case of a
726 # committed (except if one of them is a remove). In the case of a
737 # standin removal, drop the normal file if it is unknown to dirstate.
727 # standin removal, drop the normal file if it is unknown to dirstate.
738 # Thus, skip plain largefile names but keep the standin.
728 # Thus, skip plain largefile names but keep the standin.
739 if f in lfiles or fstandin in standins:
729 if f in lfiles or fstandin in standins:
740 if not repo.dirstate.get_entry(fstandin).removed:
730 if not repo.dirstate.get_entry(fstandin).removed:
741 if not repo.dirstate.get_entry(f).removed:
731 if not repo.dirstate.get_entry(f).removed:
742 continue
732 continue
743 elif not repo.dirstate.get_entry(f).any_tracked:
733 elif not repo.dirstate.get_entry(f).any_tracked:
744 continue
734 continue
745
735
746 actualfiles.append(f)
736 actualfiles.append(f)
747 match._files = actualfiles
737 match._files = actualfiles
748
738
749 def matchfn(f):
739 def matchfn(f):
750 if origmatchfn(f):
740 if origmatchfn(f):
751 return f not in lfiles
741 return f not in lfiles
752 else:
742 else:
753 return f in standins
743 return f in standins
754
744
755 match.matchfn = matchfn
745 match.matchfn = matchfn
756
746
757 return match
747 return match
758
748
759
749
760 class automatedcommithook(object):
750 class automatedcommithook(object):
761 """Stateful hook to update standins at the 1st commit of resuming
751 """Stateful hook to update standins at the 1st commit of resuming
762
752
763 For efficiency, updating standins in the working directory should
753 For efficiency, updating standins in the working directory should
764 be avoided while automated committing (like rebase, transplant and
754 be avoided while automated committing (like rebase, transplant and
765 so on), because they should be updated before committing.
755 so on), because they should be updated before committing.
766
756
767 But the 1st commit of resuming automated committing (e.g. ``rebase
757 But the 1st commit of resuming automated committing (e.g. ``rebase
768 --continue``) should update them, because largefiles may be
758 --continue``) should update them, because largefiles may be
769 modified manually.
759 modified manually.
770 """
760 """
771
761
772 def __init__(self, resuming):
762 def __init__(self, resuming):
773 self.resuming = resuming
763 self.resuming = resuming
774
764
775 def __call__(self, repo, match):
765 def __call__(self, repo, match):
776 if self.resuming:
766 if self.resuming:
777 self.resuming = False # avoids updating at subsequent commits
767 self.resuming = False # avoids updating at subsequent commits
778 return updatestandinsbymatch(repo, match)
768 return updatestandinsbymatch(repo, match)
779 else:
769 else:
780 return match
770 return match
781
771
782
772
783 def getstatuswriter(ui, repo, forcibly=None):
773 def getstatuswriter(ui, repo, forcibly=None):
784 """Return the function to write largefiles specific status out
774 """Return the function to write largefiles specific status out
785
775
786 If ``forcibly`` is ``None``, this returns the last element of
776 If ``forcibly`` is ``None``, this returns the last element of
787 ``repo._lfstatuswriters`` as "default" writer function.
777 ``repo._lfstatuswriters`` as "default" writer function.
788
778
789 Otherwise, this returns the function to always write out (or
779 Otherwise, this returns the function to always write out (or
790 ignore if ``not forcibly``) status.
780 ignore if ``not forcibly``) status.
791 """
781 """
792 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
782 if forcibly is None and util.safehasattr(repo, b'_largefilesenabled'):
793 return repo._lfstatuswriters[-1]
783 return repo._lfstatuswriters[-1]
794 else:
784 else:
795 if forcibly:
785 if forcibly:
796 return ui.status # forcibly WRITE OUT
786 return ui.status # forcibly WRITE OUT
797 else:
787 else:
798 return lambda *msg, **opts: None # forcibly IGNORE
788 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1534 +1,1526 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .pycompat import delattr
17 from .pycompat import delattr
18
18
19 from hgdemandimport import tracing
19 from hgdemandimport import tracing
20
20
21 from . import (
21 from . import (
22 dirstatemap,
22 dirstatemap,
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 sparse,
30 sparse,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
42 SUPPORTS_DIRSTATE_V2 = rustmod is not None
43
43
44 propertycache = util.propertycache
44 propertycache = util.propertycache
45 filecache = scmutil.filecache
45 filecache = scmutil.filecache
46 _rangemask = dirstatemap.rangemask
46 _rangemask = dirstatemap.rangemask
47
47
48 DirstateItem = dirstatemap.DirstateItem
48 DirstateItem = dirstatemap.DirstateItem
49
49
50
50
51 class repocache(filecache):
51 class repocache(filecache):
52 """filecache for files in .hg/"""
52 """filecache for files in .hg/"""
53
53
54 def join(self, obj, fname):
54 def join(self, obj, fname):
55 return obj._opener.join(fname)
55 return obj._opener.join(fname)
56
56
57
57
58 class rootcache(filecache):
58 class rootcache(filecache):
59 """filecache for files in the repository root"""
59 """filecache for files in the repository root"""
60
60
61 def join(self, obj, fname):
61 def join(self, obj, fname):
62 return obj._join(fname)
62 return obj._join(fname)
63
63
64
64
65 def _getfsnow(vfs):
65 def _getfsnow(vfs):
66 '''Get "now" timestamp on filesystem'''
66 '''Get "now" timestamp on filesystem'''
67 tmpfd, tmpname = vfs.mkstemp()
67 tmpfd, tmpname = vfs.mkstemp()
68 try:
68 try:
69 return os.fstat(tmpfd)[stat.ST_MTIME]
69 return os.fstat(tmpfd)[stat.ST_MTIME]
70 finally:
70 finally:
71 os.close(tmpfd)
71 os.close(tmpfd)
72 vfs.unlink(tmpname)
72 vfs.unlink(tmpname)
73
73
74
74
75 def requires_parents_change(func):
75 def requires_parents_change(func):
76 def wrap(self, *args, **kwargs):
76 def wrap(self, *args, **kwargs):
77 if not self.pendingparentchange():
77 if not self.pendingparentchange():
78 msg = 'calling `%s` outside of a parentchange context'
78 msg = 'calling `%s` outside of a parentchange context'
79 msg %= func.__name__
79 msg %= func.__name__
80 raise error.ProgrammingError(msg)
80 raise error.ProgrammingError(msg)
81 return func(self, *args, **kwargs)
81 return func(self, *args, **kwargs)
82
82
83 return wrap
83 return wrap
84
84
85
85
86 def requires_no_parents_change(func):
86 def requires_no_parents_change(func):
87 def wrap(self, *args, **kwargs):
87 def wrap(self, *args, **kwargs):
88 if self.pendingparentchange():
88 if self.pendingparentchange():
89 msg = 'calling `%s` inside of a parentchange context'
89 msg = 'calling `%s` inside of a parentchange context'
90 msg %= func.__name__
90 msg %= func.__name__
91 raise error.ProgrammingError(msg)
91 raise error.ProgrammingError(msg)
92 return func(self, *args, **kwargs)
92 return func(self, *args, **kwargs)
93
93
94 return wrap
94 return wrap
95
95
96
96
97 @interfaceutil.implementer(intdirstate.idirstate)
97 @interfaceutil.implementer(intdirstate.idirstate)
98 class dirstate(object):
98 class dirstate(object):
99 def __init__(
99 def __init__(
100 self,
100 self,
101 opener,
101 opener,
102 ui,
102 ui,
103 root,
103 root,
104 validate,
104 validate,
105 sparsematchfn,
105 sparsematchfn,
106 nodeconstants,
106 nodeconstants,
107 use_dirstate_v2,
107 use_dirstate_v2,
108 ):
108 ):
109 """Create a new dirstate object.
109 """Create a new dirstate object.
110
110
111 opener is an open()-like callable that can be used to open the
111 opener is an open()-like callable that can be used to open the
112 dirstate file; root is the root of the directory tracked by
112 dirstate file; root is the root of the directory tracked by
113 the dirstate.
113 the dirstate.
114 """
114 """
115 self._use_dirstate_v2 = use_dirstate_v2
115 self._use_dirstate_v2 = use_dirstate_v2
116 self._nodeconstants = nodeconstants
116 self._nodeconstants = nodeconstants
117 self._opener = opener
117 self._opener = opener
118 self._validate = validate
118 self._validate = validate
119 self._root = root
119 self._root = root
120 self._sparsematchfn = sparsematchfn
120 self._sparsematchfn = sparsematchfn
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
121 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
122 # UNC path pointing to root share (issue4557)
122 # UNC path pointing to root share (issue4557)
123 self._rootdir = pathutil.normasprefix(root)
123 self._rootdir = pathutil.normasprefix(root)
124 self._dirty = False
124 self._dirty = False
125 self._lastnormaltime = 0
125 self._lastnormaltime = 0
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._pendingfilename = b'%s.pending' % self._filename
130 self._pendingfilename = b'%s.pending' % self._filename
131 self._plchangecallbacks = {}
131 self._plchangecallbacks = {}
132 self._origpl = None
132 self._origpl = None
133 self._mapcls = dirstatemap.dirstatemap
133 self._mapcls = dirstatemap.dirstatemap
134 # Access and cache cwd early, so we don't access it for the first time
134 # Access and cache cwd early, so we don't access it for the first time
135 # after a working-copy update caused it to not exist (accessing it then
135 # after a working-copy update caused it to not exist (accessing it then
136 # raises an exception).
136 # raises an exception).
137 self._cwd
137 self._cwd
138
138
139 def prefetch_parents(self):
139 def prefetch_parents(self):
140 """make sure the parents are loaded
140 """make sure the parents are loaded
141
141
142 Used to avoid a race condition.
142 Used to avoid a race condition.
143 """
143 """
144 self._pl
144 self._pl
145
145
146 @contextlib.contextmanager
146 @contextlib.contextmanager
147 def parentchange(self):
147 def parentchange(self):
148 """Context manager for handling dirstate parents.
148 """Context manager for handling dirstate parents.
149
149
150 If an exception occurs in the scope of the context manager,
150 If an exception occurs in the scope of the context manager,
151 the incoherent dirstate won't be written when wlock is
151 the incoherent dirstate won't be written when wlock is
152 released.
152 released.
153 """
153 """
154 self._parentwriters += 1
154 self._parentwriters += 1
155 yield
155 yield
156 # Typically we want the "undo" step of a context manager in a
156 # Typically we want the "undo" step of a context manager in a
157 # finally block so it happens even when an exception
157 # finally block so it happens even when an exception
158 # occurs. In this case, however, we only want to decrement
158 # occurs. In this case, however, we only want to decrement
159 # parentwriters if the code in the with statement exits
159 # parentwriters if the code in the with statement exits
160 # normally, so we don't have a try/finally here on purpose.
160 # normally, so we don't have a try/finally here on purpose.
161 self._parentwriters -= 1
161 self._parentwriters -= 1
162
162
163 def pendingparentchange(self):
163 def pendingparentchange(self):
164 """Returns true if the dirstate is in the middle of a set of changes
164 """Returns true if the dirstate is in the middle of a set of changes
165 that modify the dirstate parent.
165 that modify the dirstate parent.
166 """
166 """
167 return self._parentwriters > 0
167 return self._parentwriters > 0
168
168
169 @propertycache
169 @propertycache
170 def _map(self):
170 def _map(self):
171 """Return the dirstate contents (see documentation for dirstatemap)."""
171 """Return the dirstate contents (see documentation for dirstatemap)."""
172 self._map = self._mapcls(
172 self._map = self._mapcls(
173 self._ui,
173 self._ui,
174 self._opener,
174 self._opener,
175 self._root,
175 self._root,
176 self._nodeconstants,
176 self._nodeconstants,
177 self._use_dirstate_v2,
177 self._use_dirstate_v2,
178 )
178 )
179 return self._map
179 return self._map
180
180
181 @property
181 @property
182 def _sparsematcher(self):
182 def _sparsematcher(self):
183 """The matcher for the sparse checkout.
183 """The matcher for the sparse checkout.
184
184
185 The working directory may not include every file from a manifest. The
185 The working directory may not include every file from a manifest. The
186 matcher obtained by this property will match a path if it is to be
186 matcher obtained by this property will match a path if it is to be
187 included in the working directory.
187 included in the working directory.
188 """
188 """
189 # TODO there is potential to cache this property. For now, the matcher
189 # TODO there is potential to cache this property. For now, the matcher
190 # is resolved on every access. (But the called function does use a
190 # is resolved on every access. (But the called function does use a
191 # cache to keep the lookup fast.)
191 # cache to keep the lookup fast.)
192 return self._sparsematchfn()
192 return self._sparsematchfn()
193
193
194 @repocache(b'branch')
194 @repocache(b'branch')
195 def _branch(self):
195 def _branch(self):
196 try:
196 try:
197 return self._opener.read(b"branch").strip() or b"default"
197 return self._opener.read(b"branch").strip() or b"default"
198 except IOError as inst:
198 except IOError as inst:
199 if inst.errno != errno.ENOENT:
199 if inst.errno != errno.ENOENT:
200 raise
200 raise
201 return b"default"
201 return b"default"
202
202
203 @property
203 @property
204 def _pl(self):
204 def _pl(self):
205 return self._map.parents()
205 return self._map.parents()
206
206
207 def hasdir(self, d):
207 def hasdir(self, d):
208 return self._map.hastrackeddir(d)
208 return self._map.hastrackeddir(d)
209
209
210 @rootcache(b'.hgignore')
210 @rootcache(b'.hgignore')
211 def _ignore(self):
211 def _ignore(self):
212 files = self._ignorefiles()
212 files = self._ignorefiles()
213 if not files:
213 if not files:
214 return matchmod.never()
214 return matchmod.never()
215
215
216 pats = [b'include:%s' % f for f in files]
216 pats = [b'include:%s' % f for f in files]
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
217 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218
218
219 @propertycache
219 @propertycache
220 def _slash(self):
220 def _slash(self):
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
221 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222
222
223 @propertycache
223 @propertycache
224 def _checklink(self):
224 def _checklink(self):
225 return util.checklink(self._root)
225 return util.checklink(self._root)
226
226
227 @propertycache
227 @propertycache
228 def _checkexec(self):
228 def _checkexec(self):
229 return bool(util.checkexec(self._root))
229 return bool(util.checkexec(self._root))
230
230
231 @propertycache
231 @propertycache
232 def _checkcase(self):
232 def _checkcase(self):
233 return not util.fscasesensitive(self._join(b'.hg'))
233 return not util.fscasesensitive(self._join(b'.hg'))
234
234
235 def _join(self, f):
235 def _join(self, f):
236 # much faster than os.path.join()
236 # much faster than os.path.join()
237 # it's safe because f is always a relative path
237 # it's safe because f is always a relative path
238 return self._rootdir + f
238 return self._rootdir + f
239
239
240 def flagfunc(self, buildfallback):
240 def flagfunc(self, buildfallback):
241 if self._checklink and self._checkexec:
241 if self._checklink and self._checkexec:
242
242
243 def f(x):
243 def f(x):
244 try:
244 try:
245 st = os.lstat(self._join(x))
245 st = os.lstat(self._join(x))
246 if util.statislink(st):
246 if util.statislink(st):
247 return b'l'
247 return b'l'
248 if util.statisexec(st):
248 if util.statisexec(st):
249 return b'x'
249 return b'x'
250 except OSError:
250 except OSError:
251 pass
251 pass
252 return b''
252 return b''
253
253
254 return f
254 return f
255
255
256 fallback = buildfallback()
256 fallback = buildfallback()
257 if self._checklink:
257 if self._checklink:
258
258
259 def f(x):
259 def f(x):
260 if os.path.islink(self._join(x)):
260 if os.path.islink(self._join(x)):
261 return b'l'
261 return b'l'
262 if b'x' in fallback(x):
262 if b'x' in fallback(x):
263 return b'x'
263 return b'x'
264 return b''
264 return b''
265
265
266 return f
266 return f
267 if self._checkexec:
267 if self._checkexec:
268
268
269 def f(x):
269 def f(x):
270 if b'l' in fallback(x):
270 if b'l' in fallback(x):
271 return b'l'
271 return b'l'
272 if util.isexec(self._join(x)):
272 if util.isexec(self._join(x)):
273 return b'x'
273 return b'x'
274 return b''
274 return b''
275
275
276 return f
276 return f
277 else:
277 else:
278 return fallback
278 return fallback
279
279
280 @propertycache
280 @propertycache
281 def _cwd(self):
281 def _cwd(self):
282 # internal config: ui.forcecwd
282 # internal config: ui.forcecwd
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
283 forcecwd = self._ui.config(b'ui', b'forcecwd')
284 if forcecwd:
284 if forcecwd:
285 return forcecwd
285 return forcecwd
286 return encoding.getcwd()
286 return encoding.getcwd()
287
287
288 def getcwd(self):
288 def getcwd(self):
289 """Return the path from which a canonical path is calculated.
289 """Return the path from which a canonical path is calculated.
290
290
291 This path should be used to resolve file patterns or to convert
291 This path should be used to resolve file patterns or to convert
292 canonical paths back to file paths for display. It shouldn't be
292 canonical paths back to file paths for display. It shouldn't be
293 used to get real file paths. Use vfs functions instead.
293 used to get real file paths. Use vfs functions instead.
294 """
294 """
295 cwd = self._cwd
295 cwd = self._cwd
296 if cwd == self._root:
296 if cwd == self._root:
297 return b''
297 return b''
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
298 # self._root ends with a path separator if self._root is '/' or 'C:\'
299 rootsep = self._root
299 rootsep = self._root
300 if not util.endswithsep(rootsep):
300 if not util.endswithsep(rootsep):
301 rootsep += pycompat.ossep
301 rootsep += pycompat.ossep
302 if cwd.startswith(rootsep):
302 if cwd.startswith(rootsep):
303 return cwd[len(rootsep) :]
303 return cwd[len(rootsep) :]
304 else:
304 else:
305 # we're outside the repo. return an absolute path.
305 # we're outside the repo. return an absolute path.
306 return cwd
306 return cwd
307
307
308 def pathto(self, f, cwd=None):
308 def pathto(self, f, cwd=None):
309 if cwd is None:
309 if cwd is None:
310 cwd = self.getcwd()
310 cwd = self.getcwd()
311 path = util.pathto(self._root, cwd, f)
311 path = util.pathto(self._root, cwd, f)
312 if self._slash:
312 if self._slash:
313 return util.pconvert(path)
313 return util.pconvert(path)
314 return path
314 return path
315
315
316 def __getitem__(self, key):
316 def __getitem__(self, key):
317 """Return the current state of key (a filename) in the dirstate.
317 """Return the current state of key (a filename) in the dirstate.
318
318
319 States are:
319 States are:
320 n normal
320 n normal
321 m needs merging
321 m needs merging
322 r marked for removal
322 r marked for removal
323 a marked for addition
323 a marked for addition
324 ? not tracked
324 ? not tracked
325
325
326 XXX The "state" is a bit obscure to be in the "public" API. we should
326 XXX The "state" is a bit obscure to be in the "public" API. we should
327 consider migrating all user of this to going through the dirstate entry
327 consider migrating all user of this to going through the dirstate entry
328 instead.
328 instead.
329 """
329 """
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
330 msg = b"don't use dirstate[file], use dirstate.get_entry(file)"
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
331 util.nouideprecwarn(msg, b'6.1', stacklevel=2)
332 entry = self._map.get(key)
332 entry = self._map.get(key)
333 if entry is not None:
333 if entry is not None:
334 return entry.state
334 return entry.state
335 return b'?'
335 return b'?'
336
336
337 def get_entry(self, path):
337 def get_entry(self, path):
338 """return a DirstateItem for the associated path"""
338 """return a DirstateItem for the associated path"""
339 entry = self._map.get(path)
339 entry = self._map.get(path)
340 if entry is None:
340 if entry is None:
341 return DirstateItem()
341 return DirstateItem()
342 return entry
342 return entry
343
343
344 def __contains__(self, key):
344 def __contains__(self, key):
345 return key in self._map
345 return key in self._map
346
346
347 def __iter__(self):
347 def __iter__(self):
348 return iter(sorted(self._map))
348 return iter(sorted(self._map))
349
349
350 def items(self):
350 def items(self):
351 return pycompat.iteritems(self._map)
351 return pycompat.iteritems(self._map)
352
352
353 iteritems = items
353 iteritems = items
354
354
355 def parents(self):
355 def parents(self):
356 return [self._validate(p) for p in self._pl]
356 return [self._validate(p) for p in self._pl]
357
357
358 def p1(self):
358 def p1(self):
359 return self._validate(self._pl[0])
359 return self._validate(self._pl[0])
360
360
361 def p2(self):
361 def p2(self):
362 return self._validate(self._pl[1])
362 return self._validate(self._pl[1])
363
363
364 @property
364 @property
365 def in_merge(self):
365 def in_merge(self):
366 """True if a merge is in progress"""
366 """True if a merge is in progress"""
367 return self._pl[1] != self._nodeconstants.nullid
367 return self._pl[1] != self._nodeconstants.nullid
368
368
369 def branch(self):
369 def branch(self):
370 return encoding.tolocal(self._branch)
370 return encoding.tolocal(self._branch)
371
371
372 def setparents(self, p1, p2=None):
372 def setparents(self, p1, p2=None):
373 """Set dirstate parents to p1 and p2.
373 """Set dirstate parents to p1 and p2.
374
374
375 When moving from two parents to one, "merged" entries a
375 When moving from two parents to one, "merged" entries a
376 adjusted to normal and previous copy records discarded and
376 adjusted to normal and previous copy records discarded and
377 returned by the call.
377 returned by the call.
378
378
379 See localrepo.setparents()
379 See localrepo.setparents()
380 """
380 """
381 if p2 is None:
381 if p2 is None:
382 p2 = self._nodeconstants.nullid
382 p2 = self._nodeconstants.nullid
383 if self._parentwriters == 0:
383 if self._parentwriters == 0:
384 raise ValueError(
384 raise ValueError(
385 b"cannot set dirstate parent outside of "
385 b"cannot set dirstate parent outside of "
386 b"dirstate.parentchange context manager"
386 b"dirstate.parentchange context manager"
387 )
387 )
388
388
389 self._dirty = True
389 self._dirty = True
390 oldp2 = self._pl[1]
390 oldp2 = self._pl[1]
391 if self._origpl is None:
391 if self._origpl is None:
392 self._origpl = self._pl
392 self._origpl = self._pl
393 nullid = self._nodeconstants.nullid
393 nullid = self._nodeconstants.nullid
394 # True if we need to fold p2 related state back to a linear case
394 # True if we need to fold p2 related state back to a linear case
395 fold_p2 = oldp2 != nullid and p2 == nullid
395 fold_p2 = oldp2 != nullid and p2 == nullid
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
396 return self._map.setparents(p1, p2, fold_p2=fold_p2)
397
397
398 def setbranch(self, branch):
398 def setbranch(self, branch):
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
399 self.__class__._branch.set(self, encoding.fromlocal(branch))
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
400 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
401 try:
401 try:
402 f.write(self._branch + b'\n')
402 f.write(self._branch + b'\n')
403 f.close()
403 f.close()
404
404
405 # make sure filecache has the correct stat info for _branch after
405 # make sure filecache has the correct stat info for _branch after
406 # replacing the underlying file
406 # replacing the underlying file
407 ce = self._filecache[b'_branch']
407 ce = self._filecache[b'_branch']
408 if ce:
408 if ce:
409 ce.refresh()
409 ce.refresh()
410 except: # re-raises
410 except: # re-raises
411 f.discard()
411 f.discard()
412 raise
412 raise
413
413
414 def invalidate(self):
414 def invalidate(self):
415 """Causes the next access to reread the dirstate.
415 """Causes the next access to reread the dirstate.
416
416
417 This is different from localrepo.invalidatedirstate() because it always
417 This is different from localrepo.invalidatedirstate() because it always
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
418 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
419 check whether the dirstate has changed before rereading it."""
419 check whether the dirstate has changed before rereading it."""
420
420
421 for a in ("_map", "_branch", "_ignore"):
421 for a in ("_map", "_branch", "_ignore"):
422 if a in self.__dict__:
422 if a in self.__dict__:
423 delattr(self, a)
423 delattr(self, a)
424 self._lastnormaltime = 0
424 self._lastnormaltime = 0
425 self._dirty = False
425 self._dirty = False
426 self._parentwriters = 0
426 self._parentwriters = 0
427 self._origpl = None
427 self._origpl = None
428
428
429 def copy(self, source, dest):
429 def copy(self, source, dest):
430 """Mark dest as a copy of source. Unmark dest if source is None."""
430 """Mark dest as a copy of source. Unmark dest if source is None."""
431 if source == dest:
431 if source == dest:
432 return
432 return
433 self._dirty = True
433 self._dirty = True
434 if source is not None:
434 if source is not None:
435 self._map.copymap[dest] = source
435 self._map.copymap[dest] = source
436 else:
436 else:
437 self._map.copymap.pop(dest, None)
437 self._map.copymap.pop(dest, None)
438
438
439 def copied(self, file):
439 def copied(self, file):
440 return self._map.copymap.get(file, None)
440 return self._map.copymap.get(file, None)
441
441
442 def copies(self):
442 def copies(self):
443 return self._map.copymap
443 return self._map.copymap
444
444
445 @requires_no_parents_change
445 @requires_no_parents_change
446 def set_tracked(self, filename):
446 def set_tracked(self, filename):
447 """a "public" method for generic code to mark a file as tracked
447 """a "public" method for generic code to mark a file as tracked
448
448
449 This function is to be called outside of "update/merge" case. For
449 This function is to be called outside of "update/merge" case. For
450 example by a command like `hg add X`.
450 example by a command like `hg add X`.
451
451
452 return True the file was previously untracked, False otherwise.
452 return True the file was previously untracked, False otherwise.
453 """
453 """
454 self._dirty = True
454 self._dirty = True
455 entry = self._map.get(filename)
455 entry = self._map.get(filename)
456 if entry is None or not entry.tracked:
456 if entry is None or not entry.tracked:
457 self._check_new_tracked_filename(filename)
457 self._check_new_tracked_filename(filename)
458 return self._map.set_tracked(filename)
458 return self._map.set_tracked(filename)
459
459
460 @requires_no_parents_change
460 @requires_no_parents_change
461 def set_untracked(self, filename):
461 def set_untracked(self, filename):
462 """a "public" method for generic code to mark a file as untracked
462 """a "public" method for generic code to mark a file as untracked
463
463
464 This function is to be called outside of "update/merge" case. For
464 This function is to be called outside of "update/merge" case. For
465 example by a command like `hg remove X`.
465 example by a command like `hg remove X`.
466
466
467 return True the file was previously tracked, False otherwise.
467 return True the file was previously tracked, False otherwise.
468 """
468 """
469 ret = self._map.set_untracked(filename)
469 ret = self._map.set_untracked(filename)
470 if ret:
470 if ret:
471 self._dirty = True
471 self._dirty = True
472 return ret
472 return ret
473
473
474 @requires_no_parents_change
474 @requires_no_parents_change
475 def set_clean(self, filename, parentfiledata=None):
475 def set_clean(self, filename, parentfiledata=None):
476 """record that the current state of the file on disk is known to be clean"""
476 """record that the current state of the file on disk is known to be clean"""
477 self._dirty = True
477 self._dirty = True
478 if parentfiledata:
478 if parentfiledata:
479 (mode, size, mtime) = parentfiledata
479 (mode, size, mtime) = parentfiledata
480 else:
480 else:
481 (mode, size, mtime) = self._get_filedata(filename)
481 (mode, size, mtime) = self._get_filedata(filename)
482 if not self._map[filename].tracked:
482 if not self._map[filename].tracked:
483 self._check_new_tracked_filename(filename)
483 self._check_new_tracked_filename(filename)
484 self._map.set_clean(filename, mode, size, mtime)
484 self._map.set_clean(filename, mode, size, mtime)
485 if mtime > self._lastnormaltime:
485 if mtime > self._lastnormaltime:
486 # Remember the most recent modification timeslot for status(),
486 # Remember the most recent modification timeslot for status(),
487 # to make sure we won't miss future size-preserving file content
487 # to make sure we won't miss future size-preserving file content
488 # modifications that happen within the same timeslot.
488 # modifications that happen within the same timeslot.
489 self._lastnormaltime = mtime
489 self._lastnormaltime = mtime
490
490
491 @requires_no_parents_change
491 @requires_no_parents_change
492 def set_possibly_dirty(self, filename):
492 def set_possibly_dirty(self, filename):
493 """record that the current state of the file on disk is unknown"""
493 """record that the current state of the file on disk is unknown"""
494 self._dirty = True
494 self._dirty = True
495 self._map.set_possibly_dirty(filename)
495 self._map.set_possibly_dirty(filename)
496
496
497 @requires_parents_change
497 @requires_parents_change
498 def update_file_p1(
498 def update_file_p1(
499 self,
499 self,
500 filename,
500 filename,
501 p1_tracked,
501 p1_tracked,
502 ):
502 ):
503 """Set a file as tracked in the parent (or not)
503 """Set a file as tracked in the parent (or not)
504
504
505 This is to be called when adjust the dirstate to a new parent after an history
505 This is to be called when adjust the dirstate to a new parent after an history
506 rewriting operation.
506 rewriting operation.
507
507
508 It should not be called during a merge (p2 != nullid) and only within
508 It should not be called during a merge (p2 != nullid) and only within
509 a `with dirstate.parentchange():` context.
509 a `with dirstate.parentchange():` context.
510 """
510 """
511 if self.in_merge:
511 if self.in_merge:
512 msg = b'update_file_reference should not be called when merging'
512 msg = b'update_file_reference should not be called when merging'
513 raise error.ProgrammingError(msg)
513 raise error.ProgrammingError(msg)
514 entry = self._map.get(filename)
514 entry = self._map.get(filename)
515 if entry is None:
515 if entry is None:
516 wc_tracked = False
516 wc_tracked = False
517 else:
517 else:
518 wc_tracked = entry.tracked
518 wc_tracked = entry.tracked
519 if not (p1_tracked or wc_tracked):
519 if not (p1_tracked or wc_tracked):
520 # the file is no longer relevant to anyone
520 # the file is no longer relevant to anyone
521 if self._map.get(filename) is not None:
521 if self._map.get(filename) is not None:
522 self._map.reset_state(filename)
522 self._map.reset_state(filename)
523 self._dirty = True
523 self._dirty = True
524 elif (not p1_tracked) and wc_tracked:
524 elif (not p1_tracked) and wc_tracked:
525 if entry is not None and entry.added:
525 if entry is not None and entry.added:
526 return # avoid dropping copy information (maybe?)
526 return # avoid dropping copy information (maybe?)
527
527
528 parentfiledata = None
528 parentfiledata = None
529 if wc_tracked and p1_tracked:
529 if wc_tracked and p1_tracked:
530 parentfiledata = self._get_filedata(filename)
530 parentfiledata = self._get_filedata(filename)
531
531
532 self._map.reset_state(
532 self._map.reset_state(
533 filename,
533 filename,
534 wc_tracked,
534 wc_tracked,
535 p1_tracked,
535 p1_tracked,
536 # the underlying reference might have changed, we will have to
536 # the underlying reference might have changed, we will have to
537 # check it.
537 # check it.
538 has_meaningful_mtime=False,
538 has_meaningful_mtime=False,
539 parentfiledata=parentfiledata,
539 parentfiledata=parentfiledata,
540 )
540 )
541 if (
541 if (
542 parentfiledata is not None
542 parentfiledata is not None
543 and parentfiledata[2] > self._lastnormaltime
543 and parentfiledata[2] > self._lastnormaltime
544 ):
544 ):
545 # Remember the most recent modification timeslot for status(),
545 # Remember the most recent modification timeslot for status(),
546 # to make sure we won't miss future size-preserving file content
546 # to make sure we won't miss future size-preserving file content
547 # modifications that happen within the same timeslot.
547 # modifications that happen within the same timeslot.
548 self._lastnormaltime = parentfiledata[2]
548 self._lastnormaltime = parentfiledata[2]
549
549
550 @requires_parents_change
550 @requires_parents_change
551 def update_file(
551 def update_file(
552 self,
552 self,
553 filename,
553 filename,
554 wc_tracked,
554 wc_tracked,
555 p1_tracked,
555 p1_tracked,
556 p2_tracked=False,
556 p2_info=False,
557 merged=False,
558 clean_p1=False,
559 clean_p2=False,
560 possibly_dirty=False,
557 possibly_dirty=False,
561 parentfiledata=None,
558 parentfiledata=None,
562 ):
559 ):
563 """update the information about a file in the dirstate
560 """update the information about a file in the dirstate
564
561
565 This is to be called when the direstates parent changes to keep track
562 This is to be called when the direstates parent changes to keep track
566 of what is the file situation in regards to the working copy and its parent.
563 of what is the file situation in regards to the working copy and its parent.
567
564
568 This function must be called within a `dirstate.parentchange` context.
565 This function must be called within a `dirstate.parentchange` context.
569
566
570 note: the API is at an early stage and we might need to adjust it
567 note: the API is at an early stage and we might need to adjust it
571 depending of what information ends up being relevant and useful to
568 depending of what information ends up being relevant and useful to
572 other processing.
569 other processing.
573 """
570 """
574 if merged and (clean_p1 or clean_p2):
575 msg = b'`merged` argument incompatible with `clean_p1`/`clean_p2`'
576 raise error.ProgrammingError(msg)
577
571
578 # note: I do not think we need to double check name clash here since we
572 # note: I do not think we need to double check name clash here since we
579 # are in a update/merge case that should already have taken care of
573 # are in a update/merge case that should already have taken care of
580 # this. The test agrees
574 # this. The test agrees
581
575
582 self._dirty = True
576 self._dirty = True
583
577
584 need_parent_file_data = (
578 need_parent_file_data = (
585 not (possibly_dirty or clean_p2 or merged)
579 not possibly_dirty and not p2_info and wc_tracked and p1_tracked
586 and wc_tracked
587 and p1_tracked
588 )
580 )
589
581
590 # this mean we are doing call for file we do not really care about the
582 # this mean we are doing call for file we do not really care about the
591 # data (eg: added or removed), however this should be a minor overhead
583 # data (eg: added or removed), however this should be a minor overhead
592 # compared to the overall update process calling this.
584 # compared to the overall update process calling this.
593 if need_parent_file_data:
585 if need_parent_file_data:
594 if parentfiledata is None:
586 if parentfiledata is None:
595 parentfiledata = self._get_filedata(filename)
587 parentfiledata = self._get_filedata(filename)
596 mtime = parentfiledata[2]
588 mtime = parentfiledata[2]
597
589
598 if mtime > self._lastnormaltime:
590 if mtime > self._lastnormaltime:
599 # Remember the most recent modification timeslot for
591 # Remember the most recent modification timeslot for
600 # status(), to make sure we won't miss future
592 # status(), to make sure we won't miss future
601 # size-preserving file content modifications that happen
593 # size-preserving file content modifications that happen
602 # within the same timeslot.
594 # within the same timeslot.
603 self._lastnormaltime = mtime
595 self._lastnormaltime = mtime
604
596
605 self._map.reset_state(
597 self._map.reset_state(
606 filename,
598 filename,
607 wc_tracked,
599 wc_tracked,
608 p1_tracked,
600 p1_tracked,
609 p2_info=merged or clean_p2,
601 p2_info=p2_info,
610 has_meaningful_mtime=not possibly_dirty,
602 has_meaningful_mtime=not possibly_dirty,
611 parentfiledata=parentfiledata,
603 parentfiledata=parentfiledata,
612 )
604 )
613 if (
605 if (
614 parentfiledata is not None
606 parentfiledata is not None
615 and parentfiledata[2] > self._lastnormaltime
607 and parentfiledata[2] > self._lastnormaltime
616 ):
608 ):
617 # Remember the most recent modification timeslot for status(),
609 # Remember the most recent modification timeslot for status(),
618 # to make sure we won't miss future size-preserving file content
610 # to make sure we won't miss future size-preserving file content
619 # modifications that happen within the same timeslot.
611 # modifications that happen within the same timeslot.
620 self._lastnormaltime = parentfiledata[2]
612 self._lastnormaltime = parentfiledata[2]
621
613
622 def _check_new_tracked_filename(self, filename):
614 def _check_new_tracked_filename(self, filename):
623 scmutil.checkfilename(filename)
615 scmutil.checkfilename(filename)
624 if self._map.hastrackeddir(filename):
616 if self._map.hastrackeddir(filename):
625 msg = _(b'directory %r already in dirstate')
617 msg = _(b'directory %r already in dirstate')
626 msg %= pycompat.bytestr(filename)
618 msg %= pycompat.bytestr(filename)
627 raise error.Abort(msg)
619 raise error.Abort(msg)
628 # shadows
620 # shadows
629 for d in pathutil.finddirs(filename):
621 for d in pathutil.finddirs(filename):
630 if self._map.hastrackeddir(d):
622 if self._map.hastrackeddir(d):
631 break
623 break
632 entry = self._map.get(d)
624 entry = self._map.get(d)
633 if entry is not None and not entry.removed:
625 if entry is not None and not entry.removed:
634 msg = _(b'file %r in dirstate clashes with %r')
626 msg = _(b'file %r in dirstate clashes with %r')
635 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
627 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
636 raise error.Abort(msg)
628 raise error.Abort(msg)
637
629
638 def _get_filedata(self, filename):
630 def _get_filedata(self, filename):
639 """returns"""
631 """returns"""
640 s = os.lstat(self._join(filename))
632 s = os.lstat(self._join(filename))
641 mode = s.st_mode
633 mode = s.st_mode
642 size = s.st_size
634 size = s.st_size
643 mtime = s[stat.ST_MTIME]
635 mtime = s[stat.ST_MTIME]
644 return (mode, size, mtime)
636 return (mode, size, mtime)
645
637
646 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
638 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
647 if exists is None:
639 if exists is None:
648 exists = os.path.lexists(os.path.join(self._root, path))
640 exists = os.path.lexists(os.path.join(self._root, path))
649 if not exists:
641 if not exists:
650 # Maybe a path component exists
642 # Maybe a path component exists
651 if not ignoremissing and b'/' in path:
643 if not ignoremissing and b'/' in path:
652 d, f = path.rsplit(b'/', 1)
644 d, f = path.rsplit(b'/', 1)
653 d = self._normalize(d, False, ignoremissing, None)
645 d = self._normalize(d, False, ignoremissing, None)
654 folded = d + b"/" + f
646 folded = d + b"/" + f
655 else:
647 else:
656 # No path components, preserve original case
648 # No path components, preserve original case
657 folded = path
649 folded = path
658 else:
650 else:
659 # recursively normalize leading directory components
651 # recursively normalize leading directory components
660 # against dirstate
652 # against dirstate
661 if b'/' in normed:
653 if b'/' in normed:
662 d, f = normed.rsplit(b'/', 1)
654 d, f = normed.rsplit(b'/', 1)
663 d = self._normalize(d, False, ignoremissing, True)
655 d = self._normalize(d, False, ignoremissing, True)
664 r = self._root + b"/" + d
656 r = self._root + b"/" + d
665 folded = d + b"/" + util.fspath(f, r)
657 folded = d + b"/" + util.fspath(f, r)
666 else:
658 else:
667 folded = util.fspath(normed, self._root)
659 folded = util.fspath(normed, self._root)
668 storemap[normed] = folded
660 storemap[normed] = folded
669
661
670 return folded
662 return folded
671
663
672 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
664 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
673 normed = util.normcase(path)
665 normed = util.normcase(path)
674 folded = self._map.filefoldmap.get(normed, None)
666 folded = self._map.filefoldmap.get(normed, None)
675 if folded is None:
667 if folded is None:
676 if isknown:
668 if isknown:
677 folded = path
669 folded = path
678 else:
670 else:
679 folded = self._discoverpath(
671 folded = self._discoverpath(
680 path, normed, ignoremissing, exists, self._map.filefoldmap
672 path, normed, ignoremissing, exists, self._map.filefoldmap
681 )
673 )
682 return folded
674 return folded
683
675
684 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
676 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
685 normed = util.normcase(path)
677 normed = util.normcase(path)
686 folded = self._map.filefoldmap.get(normed, None)
678 folded = self._map.filefoldmap.get(normed, None)
687 if folded is None:
679 if folded is None:
688 folded = self._map.dirfoldmap.get(normed, None)
680 folded = self._map.dirfoldmap.get(normed, None)
689 if folded is None:
681 if folded is None:
690 if isknown:
682 if isknown:
691 folded = path
683 folded = path
692 else:
684 else:
693 # store discovered result in dirfoldmap so that future
685 # store discovered result in dirfoldmap so that future
694 # normalizefile calls don't start matching directories
686 # normalizefile calls don't start matching directories
695 folded = self._discoverpath(
687 folded = self._discoverpath(
696 path, normed, ignoremissing, exists, self._map.dirfoldmap
688 path, normed, ignoremissing, exists, self._map.dirfoldmap
697 )
689 )
698 return folded
690 return folded
699
691
700 def normalize(self, path, isknown=False, ignoremissing=False):
692 def normalize(self, path, isknown=False, ignoremissing=False):
701 """
693 """
702 normalize the case of a pathname when on a casefolding filesystem
694 normalize the case of a pathname when on a casefolding filesystem
703
695
704 isknown specifies whether the filename came from walking the
696 isknown specifies whether the filename came from walking the
705 disk, to avoid extra filesystem access.
697 disk, to avoid extra filesystem access.
706
698
707 If ignoremissing is True, missing path are returned
699 If ignoremissing is True, missing path are returned
708 unchanged. Otherwise, we try harder to normalize possibly
700 unchanged. Otherwise, we try harder to normalize possibly
709 existing path components.
701 existing path components.
710
702
711 The normalized case is determined based on the following precedence:
703 The normalized case is determined based on the following precedence:
712
704
713 - version of name already stored in the dirstate
705 - version of name already stored in the dirstate
714 - version of name stored on disk
706 - version of name stored on disk
715 - version provided via command arguments
707 - version provided via command arguments
716 """
708 """
717
709
718 if self._checkcase:
710 if self._checkcase:
719 return self._normalize(path, isknown, ignoremissing)
711 return self._normalize(path, isknown, ignoremissing)
720 return path
712 return path
721
713
722 def clear(self):
714 def clear(self):
723 self._map.clear()
715 self._map.clear()
724 self._lastnormaltime = 0
716 self._lastnormaltime = 0
725 self._dirty = True
717 self._dirty = True
726
718
727 def rebuild(self, parent, allfiles, changedfiles=None):
719 def rebuild(self, parent, allfiles, changedfiles=None):
728 if changedfiles is None:
720 if changedfiles is None:
729 # Rebuild entire dirstate
721 # Rebuild entire dirstate
730 to_lookup = allfiles
722 to_lookup = allfiles
731 to_drop = []
723 to_drop = []
732 lastnormaltime = self._lastnormaltime
724 lastnormaltime = self._lastnormaltime
733 self.clear()
725 self.clear()
734 self._lastnormaltime = lastnormaltime
726 self._lastnormaltime = lastnormaltime
735 elif len(changedfiles) < 10:
727 elif len(changedfiles) < 10:
736 # Avoid turning allfiles into a set, which can be expensive if it's
728 # Avoid turning allfiles into a set, which can be expensive if it's
737 # large.
729 # large.
738 to_lookup = []
730 to_lookup = []
739 to_drop = []
731 to_drop = []
740 for f in changedfiles:
732 for f in changedfiles:
741 if f in allfiles:
733 if f in allfiles:
742 to_lookup.append(f)
734 to_lookup.append(f)
743 else:
735 else:
744 to_drop.append(f)
736 to_drop.append(f)
745 else:
737 else:
746 changedfilesset = set(changedfiles)
738 changedfilesset = set(changedfiles)
747 to_lookup = changedfilesset & set(allfiles)
739 to_lookup = changedfilesset & set(allfiles)
748 to_drop = changedfilesset - to_lookup
740 to_drop = changedfilesset - to_lookup
749
741
750 if self._origpl is None:
742 if self._origpl is None:
751 self._origpl = self._pl
743 self._origpl = self._pl
752 self._map.setparents(parent, self._nodeconstants.nullid)
744 self._map.setparents(parent, self._nodeconstants.nullid)
753
745
754 for f in to_lookup:
746 for f in to_lookup:
755
747
756 if self.in_merge:
748 if self.in_merge:
757 self.set_tracked(f)
749 self.set_tracked(f)
758 else:
750 else:
759 self._map.reset_state(
751 self._map.reset_state(
760 f,
752 f,
761 wc_tracked=True,
753 wc_tracked=True,
762 p1_tracked=True,
754 p1_tracked=True,
763 )
755 )
764 for f in to_drop:
756 for f in to_drop:
765 self._map.reset_state(f)
757 self._map.reset_state(f)
766
758
767 self._dirty = True
759 self._dirty = True
768
760
769 def identity(self):
761 def identity(self):
770 """Return identity of dirstate itself to detect changing in storage
762 """Return identity of dirstate itself to detect changing in storage
771
763
772 If identity of previous dirstate is equal to this, writing
764 If identity of previous dirstate is equal to this, writing
773 changes based on the former dirstate out can keep consistency.
765 changes based on the former dirstate out can keep consistency.
774 """
766 """
775 return self._map.identity
767 return self._map.identity
776
768
777 def write(self, tr):
769 def write(self, tr):
778 if not self._dirty:
770 if not self._dirty:
779 return
771 return
780
772
781 filename = self._filename
773 filename = self._filename
782 if tr:
774 if tr:
783 # 'dirstate.write()' is not only for writing in-memory
775 # 'dirstate.write()' is not only for writing in-memory
784 # changes out, but also for dropping ambiguous timestamp.
776 # changes out, but also for dropping ambiguous timestamp.
785 # delayed writing re-raise "ambiguous timestamp issue".
777 # delayed writing re-raise "ambiguous timestamp issue".
786 # See also the wiki page below for detail:
778 # See also the wiki page below for detail:
787 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
779 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
788
780
789 # record when mtime start to be ambiguous
781 # record when mtime start to be ambiguous
790 now = _getfsnow(self._opener)
782 now = _getfsnow(self._opener)
791
783
792 # delay writing in-memory changes out
784 # delay writing in-memory changes out
793 tr.addfilegenerator(
785 tr.addfilegenerator(
794 b'dirstate',
786 b'dirstate',
795 (self._filename,),
787 (self._filename,),
796 lambda f: self._writedirstate(tr, f, now=now),
788 lambda f: self._writedirstate(tr, f, now=now),
797 location=b'plain',
789 location=b'plain',
798 )
790 )
799 return
791 return
800
792
801 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
793 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
802 self._writedirstate(tr, st)
794 self._writedirstate(tr, st)
803
795
804 def addparentchangecallback(self, category, callback):
796 def addparentchangecallback(self, category, callback):
805 """add a callback to be called when the wd parents are changed
797 """add a callback to be called when the wd parents are changed
806
798
807 Callback will be called with the following arguments:
799 Callback will be called with the following arguments:
808 dirstate, (oldp1, oldp2), (newp1, newp2)
800 dirstate, (oldp1, oldp2), (newp1, newp2)
809
801
810 Category is a unique identifier to allow overwriting an old callback
802 Category is a unique identifier to allow overwriting an old callback
811 with a newer callback.
803 with a newer callback.
812 """
804 """
813 self._plchangecallbacks[category] = callback
805 self._plchangecallbacks[category] = callback
814
806
815 def _writedirstate(self, tr, st, now=None):
807 def _writedirstate(self, tr, st, now=None):
816 # notify callbacks about parents change
808 # notify callbacks about parents change
817 if self._origpl is not None and self._origpl != self._pl:
809 if self._origpl is not None and self._origpl != self._pl:
818 for c, callback in sorted(
810 for c, callback in sorted(
819 pycompat.iteritems(self._plchangecallbacks)
811 pycompat.iteritems(self._plchangecallbacks)
820 ):
812 ):
821 callback(self, self._origpl, self._pl)
813 callback(self, self._origpl, self._pl)
822 self._origpl = None
814 self._origpl = None
823
815
824 if now is None:
816 if now is None:
825 # use the modification time of the newly created temporary file as the
817 # use the modification time of the newly created temporary file as the
826 # filesystem's notion of 'now'
818 # filesystem's notion of 'now'
827 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
819 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
828
820
829 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
821 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
830 # timestamp of each entries in dirstate, because of 'now > mtime'
822 # timestamp of each entries in dirstate, because of 'now > mtime'
831 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
823 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
832 if delaywrite > 0:
824 if delaywrite > 0:
833 # do we have any files to delay for?
825 # do we have any files to delay for?
834 for f, e in pycompat.iteritems(self._map):
826 for f, e in pycompat.iteritems(self._map):
835 if e.need_delay(now):
827 if e.need_delay(now):
836 import time # to avoid useless import
828 import time # to avoid useless import
837
829
838 # rather than sleep n seconds, sleep until the next
830 # rather than sleep n seconds, sleep until the next
839 # multiple of n seconds
831 # multiple of n seconds
840 clock = time.time()
832 clock = time.time()
841 start = int(clock) - (int(clock) % delaywrite)
833 start = int(clock) - (int(clock) % delaywrite)
842 end = start + delaywrite
834 end = start + delaywrite
843 time.sleep(end - clock)
835 time.sleep(end - clock)
844 now = end # trust our estimate that the end is near now
836 now = end # trust our estimate that the end is near now
845 break
837 break
846
838
847 self._map.write(tr, st, now)
839 self._map.write(tr, st, now)
848 self._lastnormaltime = 0
840 self._lastnormaltime = 0
849 self._dirty = False
841 self._dirty = False
850
842
851 def _dirignore(self, f):
843 def _dirignore(self, f):
852 if self._ignore(f):
844 if self._ignore(f):
853 return True
845 return True
854 for p in pathutil.finddirs(f):
846 for p in pathutil.finddirs(f):
855 if self._ignore(p):
847 if self._ignore(p):
856 return True
848 return True
857 return False
849 return False
858
850
859 def _ignorefiles(self):
851 def _ignorefiles(self):
860 files = []
852 files = []
861 if os.path.exists(self._join(b'.hgignore')):
853 if os.path.exists(self._join(b'.hgignore')):
862 files.append(self._join(b'.hgignore'))
854 files.append(self._join(b'.hgignore'))
863 for name, path in self._ui.configitems(b"ui"):
855 for name, path in self._ui.configitems(b"ui"):
864 if name == b'ignore' or name.startswith(b'ignore.'):
856 if name == b'ignore' or name.startswith(b'ignore.'):
865 # we need to use os.path.join here rather than self._join
857 # we need to use os.path.join here rather than self._join
866 # because path is arbitrary and user-specified
858 # because path is arbitrary and user-specified
867 files.append(os.path.join(self._rootdir, util.expandpath(path)))
859 files.append(os.path.join(self._rootdir, util.expandpath(path)))
868 return files
860 return files
869
861
870 def _ignorefileandline(self, f):
862 def _ignorefileandline(self, f):
871 files = collections.deque(self._ignorefiles())
863 files = collections.deque(self._ignorefiles())
872 visited = set()
864 visited = set()
873 while files:
865 while files:
874 i = files.popleft()
866 i = files.popleft()
875 patterns = matchmod.readpatternfile(
867 patterns = matchmod.readpatternfile(
876 i, self._ui.warn, sourceinfo=True
868 i, self._ui.warn, sourceinfo=True
877 )
869 )
878 for pattern, lineno, line in patterns:
870 for pattern, lineno, line in patterns:
879 kind, p = matchmod._patsplit(pattern, b'glob')
871 kind, p = matchmod._patsplit(pattern, b'glob')
880 if kind == b"subinclude":
872 if kind == b"subinclude":
881 if p not in visited:
873 if p not in visited:
882 files.append(p)
874 files.append(p)
883 continue
875 continue
884 m = matchmod.match(
876 m = matchmod.match(
885 self._root, b'', [], [pattern], warn=self._ui.warn
877 self._root, b'', [], [pattern], warn=self._ui.warn
886 )
878 )
887 if m(f):
879 if m(f):
888 return (i, lineno, line)
880 return (i, lineno, line)
889 visited.add(i)
881 visited.add(i)
890 return (None, -1, b"")
882 return (None, -1, b"")
891
883
892 def _walkexplicit(self, match, subrepos):
884 def _walkexplicit(self, match, subrepos):
893 """Get stat data about the files explicitly specified by match.
885 """Get stat data about the files explicitly specified by match.
894
886
895 Return a triple (results, dirsfound, dirsnotfound).
887 Return a triple (results, dirsfound, dirsnotfound).
896 - results is a mapping from filename to stat result. It also contains
888 - results is a mapping from filename to stat result. It also contains
897 listings mapping subrepos and .hg to None.
889 listings mapping subrepos and .hg to None.
898 - dirsfound is a list of files found to be directories.
890 - dirsfound is a list of files found to be directories.
899 - dirsnotfound is a list of files that the dirstate thinks are
891 - dirsnotfound is a list of files that the dirstate thinks are
900 directories and that were not found."""
892 directories and that were not found."""
901
893
902 def badtype(mode):
894 def badtype(mode):
903 kind = _(b'unknown')
895 kind = _(b'unknown')
904 if stat.S_ISCHR(mode):
896 if stat.S_ISCHR(mode):
905 kind = _(b'character device')
897 kind = _(b'character device')
906 elif stat.S_ISBLK(mode):
898 elif stat.S_ISBLK(mode):
907 kind = _(b'block device')
899 kind = _(b'block device')
908 elif stat.S_ISFIFO(mode):
900 elif stat.S_ISFIFO(mode):
909 kind = _(b'fifo')
901 kind = _(b'fifo')
910 elif stat.S_ISSOCK(mode):
902 elif stat.S_ISSOCK(mode):
911 kind = _(b'socket')
903 kind = _(b'socket')
912 elif stat.S_ISDIR(mode):
904 elif stat.S_ISDIR(mode):
913 kind = _(b'directory')
905 kind = _(b'directory')
914 return _(b'unsupported file type (type is %s)') % kind
906 return _(b'unsupported file type (type is %s)') % kind
915
907
916 badfn = match.bad
908 badfn = match.bad
917 dmap = self._map
909 dmap = self._map
918 lstat = os.lstat
910 lstat = os.lstat
919 getkind = stat.S_IFMT
911 getkind = stat.S_IFMT
920 dirkind = stat.S_IFDIR
912 dirkind = stat.S_IFDIR
921 regkind = stat.S_IFREG
913 regkind = stat.S_IFREG
922 lnkkind = stat.S_IFLNK
914 lnkkind = stat.S_IFLNK
923 join = self._join
915 join = self._join
924 dirsfound = []
916 dirsfound = []
925 foundadd = dirsfound.append
917 foundadd = dirsfound.append
926 dirsnotfound = []
918 dirsnotfound = []
927 notfoundadd = dirsnotfound.append
919 notfoundadd = dirsnotfound.append
928
920
929 if not match.isexact() and self._checkcase:
921 if not match.isexact() and self._checkcase:
930 normalize = self._normalize
922 normalize = self._normalize
931 else:
923 else:
932 normalize = None
924 normalize = None
933
925
934 files = sorted(match.files())
926 files = sorted(match.files())
935 subrepos.sort()
927 subrepos.sort()
936 i, j = 0, 0
928 i, j = 0, 0
937 while i < len(files) and j < len(subrepos):
929 while i < len(files) and j < len(subrepos):
938 subpath = subrepos[j] + b"/"
930 subpath = subrepos[j] + b"/"
939 if files[i] < subpath:
931 if files[i] < subpath:
940 i += 1
932 i += 1
941 continue
933 continue
942 while i < len(files) and files[i].startswith(subpath):
934 while i < len(files) and files[i].startswith(subpath):
943 del files[i]
935 del files[i]
944 j += 1
936 j += 1
945
937
946 if not files or b'' in files:
938 if not files or b'' in files:
947 files = [b'']
939 files = [b'']
948 # constructing the foldmap is expensive, so don't do it for the
940 # constructing the foldmap is expensive, so don't do it for the
949 # common case where files is ['']
941 # common case where files is ['']
950 normalize = None
942 normalize = None
951 results = dict.fromkeys(subrepos)
943 results = dict.fromkeys(subrepos)
952 results[b'.hg'] = None
944 results[b'.hg'] = None
953
945
954 for ff in files:
946 for ff in files:
955 if normalize:
947 if normalize:
956 nf = normalize(ff, False, True)
948 nf = normalize(ff, False, True)
957 else:
949 else:
958 nf = ff
950 nf = ff
959 if nf in results:
951 if nf in results:
960 continue
952 continue
961
953
962 try:
954 try:
963 st = lstat(join(nf))
955 st = lstat(join(nf))
964 kind = getkind(st.st_mode)
956 kind = getkind(st.st_mode)
965 if kind == dirkind:
957 if kind == dirkind:
966 if nf in dmap:
958 if nf in dmap:
967 # file replaced by dir on disk but still in dirstate
959 # file replaced by dir on disk but still in dirstate
968 results[nf] = None
960 results[nf] = None
969 foundadd((nf, ff))
961 foundadd((nf, ff))
970 elif kind == regkind or kind == lnkkind:
962 elif kind == regkind or kind == lnkkind:
971 results[nf] = st
963 results[nf] = st
972 else:
964 else:
973 badfn(ff, badtype(kind))
965 badfn(ff, badtype(kind))
974 if nf in dmap:
966 if nf in dmap:
975 results[nf] = None
967 results[nf] = None
976 except OSError as inst: # nf not found on disk - it is dirstate only
968 except OSError as inst: # nf not found on disk - it is dirstate only
977 if nf in dmap: # does it exactly match a missing file?
969 if nf in dmap: # does it exactly match a missing file?
978 results[nf] = None
970 results[nf] = None
979 else: # does it match a missing directory?
971 else: # does it match a missing directory?
980 if self._map.hasdir(nf):
972 if self._map.hasdir(nf):
981 notfoundadd(nf)
973 notfoundadd(nf)
982 else:
974 else:
983 badfn(ff, encoding.strtolocal(inst.strerror))
975 badfn(ff, encoding.strtolocal(inst.strerror))
984
976
985 # match.files() may contain explicitly-specified paths that shouldn't
977 # match.files() may contain explicitly-specified paths that shouldn't
986 # be taken; drop them from the list of files found. dirsfound/notfound
978 # be taken; drop them from the list of files found. dirsfound/notfound
987 # aren't filtered here because they will be tested later.
979 # aren't filtered here because they will be tested later.
988 if match.anypats():
980 if match.anypats():
989 for f in list(results):
981 for f in list(results):
990 if f == b'.hg' or f in subrepos:
982 if f == b'.hg' or f in subrepos:
991 # keep sentinel to disable further out-of-repo walks
983 # keep sentinel to disable further out-of-repo walks
992 continue
984 continue
993 if not match(f):
985 if not match(f):
994 del results[f]
986 del results[f]
995
987
996 # Case insensitive filesystems cannot rely on lstat() failing to detect
988 # Case insensitive filesystems cannot rely on lstat() failing to detect
997 # a case-only rename. Prune the stat object for any file that does not
989 # a case-only rename. Prune the stat object for any file that does not
998 # match the case in the filesystem, if there are multiple files that
990 # match the case in the filesystem, if there are multiple files that
999 # normalize to the same path.
991 # normalize to the same path.
1000 if match.isexact() and self._checkcase:
992 if match.isexact() and self._checkcase:
1001 normed = {}
993 normed = {}
1002
994
1003 for f, st in pycompat.iteritems(results):
995 for f, st in pycompat.iteritems(results):
1004 if st is None:
996 if st is None:
1005 continue
997 continue
1006
998
1007 nc = util.normcase(f)
999 nc = util.normcase(f)
1008 paths = normed.get(nc)
1000 paths = normed.get(nc)
1009
1001
1010 if paths is None:
1002 if paths is None:
1011 paths = set()
1003 paths = set()
1012 normed[nc] = paths
1004 normed[nc] = paths
1013
1005
1014 paths.add(f)
1006 paths.add(f)
1015
1007
1016 for norm, paths in pycompat.iteritems(normed):
1008 for norm, paths in pycompat.iteritems(normed):
1017 if len(paths) > 1:
1009 if len(paths) > 1:
1018 for path in paths:
1010 for path in paths:
1019 folded = self._discoverpath(
1011 folded = self._discoverpath(
1020 path, norm, True, None, self._map.dirfoldmap
1012 path, norm, True, None, self._map.dirfoldmap
1021 )
1013 )
1022 if path != folded:
1014 if path != folded:
1023 results[path] = None
1015 results[path] = None
1024
1016
1025 return results, dirsfound, dirsnotfound
1017 return results, dirsfound, dirsnotfound
1026
1018
1027 def walk(self, match, subrepos, unknown, ignored, full=True):
1019 def walk(self, match, subrepos, unknown, ignored, full=True):
1028 """
1020 """
1029 Walk recursively through the directory tree, finding all files
1021 Walk recursively through the directory tree, finding all files
1030 matched by match.
1022 matched by match.
1031
1023
1032 If full is False, maybe skip some known-clean files.
1024 If full is False, maybe skip some known-clean files.
1033
1025
1034 Return a dict mapping filename to stat-like object (either
1026 Return a dict mapping filename to stat-like object (either
1035 mercurial.osutil.stat instance or return value of os.stat()).
1027 mercurial.osutil.stat instance or return value of os.stat()).
1036
1028
1037 """
1029 """
1038 # full is a flag that extensions that hook into walk can use -- this
1030 # full is a flag that extensions that hook into walk can use -- this
1039 # implementation doesn't use it at all. This satisfies the contract
1031 # implementation doesn't use it at all. This satisfies the contract
1040 # because we only guarantee a "maybe".
1032 # because we only guarantee a "maybe".
1041
1033
1042 if ignored:
1034 if ignored:
1043 ignore = util.never
1035 ignore = util.never
1044 dirignore = util.never
1036 dirignore = util.never
1045 elif unknown:
1037 elif unknown:
1046 ignore = self._ignore
1038 ignore = self._ignore
1047 dirignore = self._dirignore
1039 dirignore = self._dirignore
1048 else:
1040 else:
1049 # if not unknown and not ignored, drop dir recursion and step 2
1041 # if not unknown and not ignored, drop dir recursion and step 2
1050 ignore = util.always
1042 ignore = util.always
1051 dirignore = util.always
1043 dirignore = util.always
1052
1044
1053 matchfn = match.matchfn
1045 matchfn = match.matchfn
1054 matchalways = match.always()
1046 matchalways = match.always()
1055 matchtdir = match.traversedir
1047 matchtdir = match.traversedir
1056 dmap = self._map
1048 dmap = self._map
1057 listdir = util.listdir
1049 listdir = util.listdir
1058 lstat = os.lstat
1050 lstat = os.lstat
1059 dirkind = stat.S_IFDIR
1051 dirkind = stat.S_IFDIR
1060 regkind = stat.S_IFREG
1052 regkind = stat.S_IFREG
1061 lnkkind = stat.S_IFLNK
1053 lnkkind = stat.S_IFLNK
1062 join = self._join
1054 join = self._join
1063
1055
1064 exact = skipstep3 = False
1056 exact = skipstep3 = False
1065 if match.isexact(): # match.exact
1057 if match.isexact(): # match.exact
1066 exact = True
1058 exact = True
1067 dirignore = util.always # skip step 2
1059 dirignore = util.always # skip step 2
1068 elif match.prefix(): # match.match, no patterns
1060 elif match.prefix(): # match.match, no patterns
1069 skipstep3 = True
1061 skipstep3 = True
1070
1062
1071 if not exact and self._checkcase:
1063 if not exact and self._checkcase:
1072 normalize = self._normalize
1064 normalize = self._normalize
1073 normalizefile = self._normalizefile
1065 normalizefile = self._normalizefile
1074 skipstep3 = False
1066 skipstep3 = False
1075 else:
1067 else:
1076 normalize = self._normalize
1068 normalize = self._normalize
1077 normalizefile = None
1069 normalizefile = None
1078
1070
1079 # step 1: find all explicit files
1071 # step 1: find all explicit files
1080 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1072 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1081 if matchtdir:
1073 if matchtdir:
1082 for d in work:
1074 for d in work:
1083 matchtdir(d[0])
1075 matchtdir(d[0])
1084 for d in dirsnotfound:
1076 for d in dirsnotfound:
1085 matchtdir(d)
1077 matchtdir(d)
1086
1078
1087 skipstep3 = skipstep3 and not (work or dirsnotfound)
1079 skipstep3 = skipstep3 and not (work or dirsnotfound)
1088 work = [d for d in work if not dirignore(d[0])]
1080 work = [d for d in work if not dirignore(d[0])]
1089
1081
1090 # step 2: visit subdirectories
1082 # step 2: visit subdirectories
1091 def traverse(work, alreadynormed):
1083 def traverse(work, alreadynormed):
1092 wadd = work.append
1084 wadd = work.append
1093 while work:
1085 while work:
1094 tracing.counter('dirstate.walk work', len(work))
1086 tracing.counter('dirstate.walk work', len(work))
1095 nd = work.pop()
1087 nd = work.pop()
1096 visitentries = match.visitchildrenset(nd)
1088 visitentries = match.visitchildrenset(nd)
1097 if not visitentries:
1089 if not visitentries:
1098 continue
1090 continue
1099 if visitentries == b'this' or visitentries == b'all':
1091 if visitentries == b'this' or visitentries == b'all':
1100 visitentries = None
1092 visitentries = None
1101 skip = None
1093 skip = None
1102 if nd != b'':
1094 if nd != b'':
1103 skip = b'.hg'
1095 skip = b'.hg'
1104 try:
1096 try:
1105 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1097 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1106 entries = listdir(join(nd), stat=True, skip=skip)
1098 entries = listdir(join(nd), stat=True, skip=skip)
1107 except OSError as inst:
1099 except OSError as inst:
1108 if inst.errno in (errno.EACCES, errno.ENOENT):
1100 if inst.errno in (errno.EACCES, errno.ENOENT):
1109 match.bad(
1101 match.bad(
1110 self.pathto(nd), encoding.strtolocal(inst.strerror)
1102 self.pathto(nd), encoding.strtolocal(inst.strerror)
1111 )
1103 )
1112 continue
1104 continue
1113 raise
1105 raise
1114 for f, kind, st in entries:
1106 for f, kind, st in entries:
1115 # Some matchers may return files in the visitentries set,
1107 # Some matchers may return files in the visitentries set,
1116 # instead of 'this', if the matcher explicitly mentions them
1108 # instead of 'this', if the matcher explicitly mentions them
1117 # and is not an exactmatcher. This is acceptable; we do not
1109 # and is not an exactmatcher. This is acceptable; we do not
1118 # make any hard assumptions about file-or-directory below
1110 # make any hard assumptions about file-or-directory below
1119 # based on the presence of `f` in visitentries. If
1111 # based on the presence of `f` in visitentries. If
1120 # visitchildrenset returned a set, we can always skip the
1112 # visitchildrenset returned a set, we can always skip the
1121 # entries *not* in the set it provided regardless of whether
1113 # entries *not* in the set it provided regardless of whether
1122 # they're actually a file or a directory.
1114 # they're actually a file or a directory.
1123 if visitentries and f not in visitentries:
1115 if visitentries and f not in visitentries:
1124 continue
1116 continue
1125 if normalizefile:
1117 if normalizefile:
1126 # even though f might be a directory, we're only
1118 # even though f might be a directory, we're only
1127 # interested in comparing it to files currently in the
1119 # interested in comparing it to files currently in the
1128 # dmap -- therefore normalizefile is enough
1120 # dmap -- therefore normalizefile is enough
1129 nf = normalizefile(
1121 nf = normalizefile(
1130 nd and (nd + b"/" + f) or f, True, True
1122 nd and (nd + b"/" + f) or f, True, True
1131 )
1123 )
1132 else:
1124 else:
1133 nf = nd and (nd + b"/" + f) or f
1125 nf = nd and (nd + b"/" + f) or f
1134 if nf not in results:
1126 if nf not in results:
1135 if kind == dirkind:
1127 if kind == dirkind:
1136 if not ignore(nf):
1128 if not ignore(nf):
1137 if matchtdir:
1129 if matchtdir:
1138 matchtdir(nf)
1130 matchtdir(nf)
1139 wadd(nf)
1131 wadd(nf)
1140 if nf in dmap and (matchalways or matchfn(nf)):
1132 if nf in dmap and (matchalways or matchfn(nf)):
1141 results[nf] = None
1133 results[nf] = None
1142 elif kind == regkind or kind == lnkkind:
1134 elif kind == regkind or kind == lnkkind:
1143 if nf in dmap:
1135 if nf in dmap:
1144 if matchalways or matchfn(nf):
1136 if matchalways or matchfn(nf):
1145 results[nf] = st
1137 results[nf] = st
1146 elif (matchalways or matchfn(nf)) and not ignore(
1138 elif (matchalways or matchfn(nf)) and not ignore(
1147 nf
1139 nf
1148 ):
1140 ):
1149 # unknown file -- normalize if necessary
1141 # unknown file -- normalize if necessary
1150 if not alreadynormed:
1142 if not alreadynormed:
1151 nf = normalize(nf, False, True)
1143 nf = normalize(nf, False, True)
1152 results[nf] = st
1144 results[nf] = st
1153 elif nf in dmap and (matchalways or matchfn(nf)):
1145 elif nf in dmap and (matchalways or matchfn(nf)):
1154 results[nf] = None
1146 results[nf] = None
1155
1147
1156 for nd, d in work:
1148 for nd, d in work:
1157 # alreadynormed means that processwork doesn't have to do any
1149 # alreadynormed means that processwork doesn't have to do any
1158 # expensive directory normalization
1150 # expensive directory normalization
1159 alreadynormed = not normalize or nd == d
1151 alreadynormed = not normalize or nd == d
1160 traverse([d], alreadynormed)
1152 traverse([d], alreadynormed)
1161
1153
1162 for s in subrepos:
1154 for s in subrepos:
1163 del results[s]
1155 del results[s]
1164 del results[b'.hg']
1156 del results[b'.hg']
1165
1157
1166 # step 3: visit remaining files from dmap
1158 # step 3: visit remaining files from dmap
1167 if not skipstep3 and not exact:
1159 if not skipstep3 and not exact:
1168 # If a dmap file is not in results yet, it was either
1160 # If a dmap file is not in results yet, it was either
1169 # a) not matching matchfn b) ignored, c) missing, or d) under a
1161 # a) not matching matchfn b) ignored, c) missing, or d) under a
1170 # symlink directory.
1162 # symlink directory.
1171 if not results and matchalways:
1163 if not results and matchalways:
1172 visit = [f for f in dmap]
1164 visit = [f for f in dmap]
1173 else:
1165 else:
1174 visit = [f for f in dmap if f not in results and matchfn(f)]
1166 visit = [f for f in dmap if f not in results and matchfn(f)]
1175 visit.sort()
1167 visit.sort()
1176
1168
1177 if unknown:
1169 if unknown:
1178 # unknown == True means we walked all dirs under the roots
1170 # unknown == True means we walked all dirs under the roots
1179 # that wasn't ignored, and everything that matched was stat'ed
1171 # that wasn't ignored, and everything that matched was stat'ed
1180 # and is already in results.
1172 # and is already in results.
1181 # The rest must thus be ignored or under a symlink.
1173 # The rest must thus be ignored or under a symlink.
1182 audit_path = pathutil.pathauditor(self._root, cached=True)
1174 audit_path = pathutil.pathauditor(self._root, cached=True)
1183
1175
1184 for nf in iter(visit):
1176 for nf in iter(visit):
1185 # If a stat for the same file was already added with a
1177 # If a stat for the same file was already added with a
1186 # different case, don't add one for this, since that would
1178 # different case, don't add one for this, since that would
1187 # make it appear as if the file exists under both names
1179 # make it appear as if the file exists under both names
1188 # on disk.
1180 # on disk.
1189 if (
1181 if (
1190 normalizefile
1182 normalizefile
1191 and normalizefile(nf, True, True) in results
1183 and normalizefile(nf, True, True) in results
1192 ):
1184 ):
1193 results[nf] = None
1185 results[nf] = None
1194 # Report ignored items in the dmap as long as they are not
1186 # Report ignored items in the dmap as long as they are not
1195 # under a symlink directory.
1187 # under a symlink directory.
1196 elif audit_path.check(nf):
1188 elif audit_path.check(nf):
1197 try:
1189 try:
1198 results[nf] = lstat(join(nf))
1190 results[nf] = lstat(join(nf))
1199 # file was just ignored, no links, and exists
1191 # file was just ignored, no links, and exists
1200 except OSError:
1192 except OSError:
1201 # file doesn't exist
1193 # file doesn't exist
1202 results[nf] = None
1194 results[nf] = None
1203 else:
1195 else:
1204 # It's either missing or under a symlink directory
1196 # It's either missing or under a symlink directory
1205 # which we in this case report as missing
1197 # which we in this case report as missing
1206 results[nf] = None
1198 results[nf] = None
1207 else:
1199 else:
1208 # We may not have walked the full directory tree above,
1200 # We may not have walked the full directory tree above,
1209 # so stat and check everything we missed.
1201 # so stat and check everything we missed.
1210 iv = iter(visit)
1202 iv = iter(visit)
1211 for st in util.statfiles([join(i) for i in visit]):
1203 for st in util.statfiles([join(i) for i in visit]):
1212 results[next(iv)] = st
1204 results[next(iv)] = st
1213 return results
1205 return results
1214
1206
1215 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1207 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1216 # Force Rayon (Rust parallelism library) to respect the number of
1208 # Force Rayon (Rust parallelism library) to respect the number of
1217 # workers. This is a temporary workaround until Rust code knows
1209 # workers. This is a temporary workaround until Rust code knows
1218 # how to read the config file.
1210 # how to read the config file.
1219 numcpus = self._ui.configint(b"worker", b"numcpus")
1211 numcpus = self._ui.configint(b"worker", b"numcpus")
1220 if numcpus is not None:
1212 if numcpus is not None:
1221 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1213 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1222
1214
1223 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1215 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1224 if not workers_enabled:
1216 if not workers_enabled:
1225 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1217 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1226
1218
1227 (
1219 (
1228 lookup,
1220 lookup,
1229 modified,
1221 modified,
1230 added,
1222 added,
1231 removed,
1223 removed,
1232 deleted,
1224 deleted,
1233 clean,
1225 clean,
1234 ignored,
1226 ignored,
1235 unknown,
1227 unknown,
1236 warnings,
1228 warnings,
1237 bad,
1229 bad,
1238 traversed,
1230 traversed,
1239 dirty,
1231 dirty,
1240 ) = rustmod.status(
1232 ) = rustmod.status(
1241 self._map._map,
1233 self._map._map,
1242 matcher,
1234 matcher,
1243 self._rootdir,
1235 self._rootdir,
1244 self._ignorefiles(),
1236 self._ignorefiles(),
1245 self._checkexec,
1237 self._checkexec,
1246 self._lastnormaltime,
1238 self._lastnormaltime,
1247 bool(list_clean),
1239 bool(list_clean),
1248 bool(list_ignored),
1240 bool(list_ignored),
1249 bool(list_unknown),
1241 bool(list_unknown),
1250 bool(matcher.traversedir),
1242 bool(matcher.traversedir),
1251 )
1243 )
1252
1244
1253 self._dirty |= dirty
1245 self._dirty |= dirty
1254
1246
1255 if matcher.traversedir:
1247 if matcher.traversedir:
1256 for dir in traversed:
1248 for dir in traversed:
1257 matcher.traversedir(dir)
1249 matcher.traversedir(dir)
1258
1250
1259 if self._ui.warn:
1251 if self._ui.warn:
1260 for item in warnings:
1252 for item in warnings:
1261 if isinstance(item, tuple):
1253 if isinstance(item, tuple):
1262 file_path, syntax = item
1254 file_path, syntax = item
1263 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1255 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1264 file_path,
1256 file_path,
1265 syntax,
1257 syntax,
1266 )
1258 )
1267 self._ui.warn(msg)
1259 self._ui.warn(msg)
1268 else:
1260 else:
1269 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1261 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1270 self._ui.warn(
1262 self._ui.warn(
1271 msg
1263 msg
1272 % (
1264 % (
1273 pathutil.canonpath(
1265 pathutil.canonpath(
1274 self._rootdir, self._rootdir, item
1266 self._rootdir, self._rootdir, item
1275 ),
1267 ),
1276 b"No such file or directory",
1268 b"No such file or directory",
1277 )
1269 )
1278 )
1270 )
1279
1271
1280 for (fn, message) in bad:
1272 for (fn, message) in bad:
1281 matcher.bad(fn, encoding.strtolocal(message))
1273 matcher.bad(fn, encoding.strtolocal(message))
1282
1274
1283 status = scmutil.status(
1275 status = scmutil.status(
1284 modified=modified,
1276 modified=modified,
1285 added=added,
1277 added=added,
1286 removed=removed,
1278 removed=removed,
1287 deleted=deleted,
1279 deleted=deleted,
1288 unknown=unknown,
1280 unknown=unknown,
1289 ignored=ignored,
1281 ignored=ignored,
1290 clean=clean,
1282 clean=clean,
1291 )
1283 )
1292 return (lookup, status)
1284 return (lookup, status)
1293
1285
1294 def status(self, match, subrepos, ignored, clean, unknown):
1286 def status(self, match, subrepos, ignored, clean, unknown):
1295 """Determine the status of the working copy relative to the
1287 """Determine the status of the working copy relative to the
1296 dirstate and return a pair of (unsure, status), where status is of type
1288 dirstate and return a pair of (unsure, status), where status is of type
1297 scmutil.status and:
1289 scmutil.status and:
1298
1290
1299 unsure:
1291 unsure:
1300 files that might have been modified since the dirstate was
1292 files that might have been modified since the dirstate was
1301 written, but need to be read to be sure (size is the same
1293 written, but need to be read to be sure (size is the same
1302 but mtime differs)
1294 but mtime differs)
1303 status.modified:
1295 status.modified:
1304 files that have definitely been modified since the dirstate
1296 files that have definitely been modified since the dirstate
1305 was written (different size or mode)
1297 was written (different size or mode)
1306 status.clean:
1298 status.clean:
1307 files that have definitely not been modified since the
1299 files that have definitely not been modified since the
1308 dirstate was written
1300 dirstate was written
1309 """
1301 """
1310 listignored, listclean, listunknown = ignored, clean, unknown
1302 listignored, listclean, listunknown = ignored, clean, unknown
1311 lookup, modified, added, unknown, ignored = [], [], [], [], []
1303 lookup, modified, added, unknown, ignored = [], [], [], [], []
1312 removed, deleted, clean = [], [], []
1304 removed, deleted, clean = [], [], []
1313
1305
1314 dmap = self._map
1306 dmap = self._map
1315 dmap.preload()
1307 dmap.preload()
1316
1308
1317 use_rust = True
1309 use_rust = True
1318
1310
1319 allowed_matchers = (
1311 allowed_matchers = (
1320 matchmod.alwaysmatcher,
1312 matchmod.alwaysmatcher,
1321 matchmod.exactmatcher,
1313 matchmod.exactmatcher,
1322 matchmod.includematcher,
1314 matchmod.includematcher,
1323 )
1315 )
1324
1316
1325 if rustmod is None:
1317 if rustmod is None:
1326 use_rust = False
1318 use_rust = False
1327 elif self._checkcase:
1319 elif self._checkcase:
1328 # Case-insensitive filesystems are not handled yet
1320 # Case-insensitive filesystems are not handled yet
1329 use_rust = False
1321 use_rust = False
1330 elif subrepos:
1322 elif subrepos:
1331 use_rust = False
1323 use_rust = False
1332 elif sparse.enabled:
1324 elif sparse.enabled:
1333 use_rust = False
1325 use_rust = False
1334 elif not isinstance(match, allowed_matchers):
1326 elif not isinstance(match, allowed_matchers):
1335 # Some matchers have yet to be implemented
1327 # Some matchers have yet to be implemented
1336 use_rust = False
1328 use_rust = False
1337
1329
1338 if use_rust:
1330 if use_rust:
1339 try:
1331 try:
1340 return self._rust_status(
1332 return self._rust_status(
1341 match, listclean, listignored, listunknown
1333 match, listclean, listignored, listunknown
1342 )
1334 )
1343 except rustmod.FallbackError:
1335 except rustmod.FallbackError:
1344 pass
1336 pass
1345
1337
1346 def noop(f):
1338 def noop(f):
1347 pass
1339 pass
1348
1340
1349 dcontains = dmap.__contains__
1341 dcontains = dmap.__contains__
1350 dget = dmap.__getitem__
1342 dget = dmap.__getitem__
1351 ladd = lookup.append # aka "unsure"
1343 ladd = lookup.append # aka "unsure"
1352 madd = modified.append
1344 madd = modified.append
1353 aadd = added.append
1345 aadd = added.append
1354 uadd = unknown.append if listunknown else noop
1346 uadd = unknown.append if listunknown else noop
1355 iadd = ignored.append if listignored else noop
1347 iadd = ignored.append if listignored else noop
1356 radd = removed.append
1348 radd = removed.append
1357 dadd = deleted.append
1349 dadd = deleted.append
1358 cadd = clean.append if listclean else noop
1350 cadd = clean.append if listclean else noop
1359 mexact = match.exact
1351 mexact = match.exact
1360 dirignore = self._dirignore
1352 dirignore = self._dirignore
1361 checkexec = self._checkexec
1353 checkexec = self._checkexec
1362 copymap = self._map.copymap
1354 copymap = self._map.copymap
1363 lastnormaltime = self._lastnormaltime
1355 lastnormaltime = self._lastnormaltime
1364
1356
1365 # We need to do full walks when either
1357 # We need to do full walks when either
1366 # - we're listing all clean files, or
1358 # - we're listing all clean files, or
1367 # - match.traversedir does something, because match.traversedir should
1359 # - match.traversedir does something, because match.traversedir should
1368 # be called for every dir in the working dir
1360 # be called for every dir in the working dir
1369 full = listclean or match.traversedir is not None
1361 full = listclean or match.traversedir is not None
1370 for fn, st in pycompat.iteritems(
1362 for fn, st in pycompat.iteritems(
1371 self.walk(match, subrepos, listunknown, listignored, full=full)
1363 self.walk(match, subrepos, listunknown, listignored, full=full)
1372 ):
1364 ):
1373 if not dcontains(fn):
1365 if not dcontains(fn):
1374 if (listignored or mexact(fn)) and dirignore(fn):
1366 if (listignored or mexact(fn)) and dirignore(fn):
1375 if listignored:
1367 if listignored:
1376 iadd(fn)
1368 iadd(fn)
1377 else:
1369 else:
1378 uadd(fn)
1370 uadd(fn)
1379 continue
1371 continue
1380
1372
1381 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1373 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1382 # written like that for performance reasons. dmap[fn] is not a
1374 # written like that for performance reasons. dmap[fn] is not a
1383 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1375 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1384 # opcode has fast paths when the value to be unpacked is a tuple or
1376 # opcode has fast paths when the value to be unpacked is a tuple or
1385 # a list, but falls back to creating a full-fledged iterator in
1377 # a list, but falls back to creating a full-fledged iterator in
1386 # general. That is much slower than simply accessing and storing the
1378 # general. That is much slower than simply accessing and storing the
1387 # tuple members one by one.
1379 # tuple members one by one.
1388 t = dget(fn)
1380 t = dget(fn)
1389 mode = t.mode
1381 mode = t.mode
1390 size = t.size
1382 size = t.size
1391 time = t.mtime
1383 time = t.mtime
1392
1384
1393 if not st and t.tracked:
1385 if not st and t.tracked:
1394 dadd(fn)
1386 dadd(fn)
1395 elif t.merged:
1387 elif t.merged:
1396 madd(fn)
1388 madd(fn)
1397 elif t.added:
1389 elif t.added:
1398 aadd(fn)
1390 aadd(fn)
1399 elif t.removed:
1391 elif t.removed:
1400 radd(fn)
1392 radd(fn)
1401 elif t.tracked:
1393 elif t.tracked:
1402 if (
1394 if (
1403 size >= 0
1395 size >= 0
1404 and (
1396 and (
1405 (size != st.st_size and size != st.st_size & _rangemask)
1397 (size != st.st_size and size != st.st_size & _rangemask)
1406 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1398 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1407 )
1399 )
1408 or t.from_p2
1400 or t.from_p2
1409 or fn in copymap
1401 or fn in copymap
1410 ):
1402 ):
1411 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1403 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1412 # issue6456: Size returned may be longer due to
1404 # issue6456: Size returned may be longer due to
1413 # encryption on EXT-4 fscrypt, undecided.
1405 # encryption on EXT-4 fscrypt, undecided.
1414 ladd(fn)
1406 ladd(fn)
1415 else:
1407 else:
1416 madd(fn)
1408 madd(fn)
1417 elif (
1409 elif (
1418 time != st[stat.ST_MTIME]
1410 time != st[stat.ST_MTIME]
1419 and time != st[stat.ST_MTIME] & _rangemask
1411 and time != st[stat.ST_MTIME] & _rangemask
1420 ):
1412 ):
1421 ladd(fn)
1413 ladd(fn)
1422 elif st[stat.ST_MTIME] == lastnormaltime:
1414 elif st[stat.ST_MTIME] == lastnormaltime:
1423 # fn may have just been marked as normal and it may have
1415 # fn may have just been marked as normal and it may have
1424 # changed in the same second without changing its size.
1416 # changed in the same second without changing its size.
1425 # This can happen if we quickly do multiple commits.
1417 # This can happen if we quickly do multiple commits.
1426 # Force lookup, so we don't miss such a racy file change.
1418 # Force lookup, so we don't miss such a racy file change.
1427 ladd(fn)
1419 ladd(fn)
1428 elif listclean:
1420 elif listclean:
1429 cadd(fn)
1421 cadd(fn)
1430 status = scmutil.status(
1422 status = scmutil.status(
1431 modified, added, removed, deleted, unknown, ignored, clean
1423 modified, added, removed, deleted, unknown, ignored, clean
1432 )
1424 )
1433 return (lookup, status)
1425 return (lookup, status)
1434
1426
1435 def matches(self, match):
1427 def matches(self, match):
1436 """
1428 """
1437 return files in the dirstate (in whatever state) filtered by match
1429 return files in the dirstate (in whatever state) filtered by match
1438 """
1430 """
1439 dmap = self._map
1431 dmap = self._map
1440 if rustmod is not None:
1432 if rustmod is not None:
1441 dmap = self._map._map
1433 dmap = self._map._map
1442
1434
1443 if match.always():
1435 if match.always():
1444 return dmap.keys()
1436 return dmap.keys()
1445 files = match.files()
1437 files = match.files()
1446 if match.isexact():
1438 if match.isexact():
1447 # fast path -- filter the other way around, since typically files is
1439 # fast path -- filter the other way around, since typically files is
1448 # much smaller than dmap
1440 # much smaller than dmap
1449 return [f for f in files if f in dmap]
1441 return [f for f in files if f in dmap]
1450 if match.prefix() and all(fn in dmap for fn in files):
1442 if match.prefix() and all(fn in dmap for fn in files):
1451 # fast path -- all the values are known to be files, so just return
1443 # fast path -- all the values are known to be files, so just return
1452 # that
1444 # that
1453 return list(files)
1445 return list(files)
1454 return [f for f in dmap if match(f)]
1446 return [f for f in dmap if match(f)]
1455
1447
1456 def _actualfilename(self, tr):
1448 def _actualfilename(self, tr):
1457 if tr:
1449 if tr:
1458 return self._pendingfilename
1450 return self._pendingfilename
1459 else:
1451 else:
1460 return self._filename
1452 return self._filename
1461
1453
1462 def savebackup(self, tr, backupname):
1454 def savebackup(self, tr, backupname):
1463 '''Save current dirstate into backup file'''
1455 '''Save current dirstate into backup file'''
1464 filename = self._actualfilename(tr)
1456 filename = self._actualfilename(tr)
1465 assert backupname != filename
1457 assert backupname != filename
1466
1458
1467 # use '_writedirstate' instead of 'write' to write changes certainly,
1459 # use '_writedirstate' instead of 'write' to write changes certainly,
1468 # because the latter omits writing out if transaction is running.
1460 # because the latter omits writing out if transaction is running.
1469 # output file will be used to create backup of dirstate at this point.
1461 # output file will be used to create backup of dirstate at this point.
1470 if self._dirty or not self._opener.exists(filename):
1462 if self._dirty or not self._opener.exists(filename):
1471 self._writedirstate(
1463 self._writedirstate(
1472 tr,
1464 tr,
1473 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1465 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1474 )
1466 )
1475
1467
1476 if tr:
1468 if tr:
1477 # ensure that subsequent tr.writepending returns True for
1469 # ensure that subsequent tr.writepending returns True for
1478 # changes written out above, even if dirstate is never
1470 # changes written out above, even if dirstate is never
1479 # changed after this
1471 # changed after this
1480 tr.addfilegenerator(
1472 tr.addfilegenerator(
1481 b'dirstate',
1473 b'dirstate',
1482 (self._filename,),
1474 (self._filename,),
1483 lambda f: self._writedirstate(tr, f),
1475 lambda f: self._writedirstate(tr, f),
1484 location=b'plain',
1476 location=b'plain',
1485 )
1477 )
1486
1478
1487 # ensure that pending file written above is unlinked at
1479 # ensure that pending file written above is unlinked at
1488 # failure, even if tr.writepending isn't invoked until the
1480 # failure, even if tr.writepending isn't invoked until the
1489 # end of this transaction
1481 # end of this transaction
1490 tr.registertmp(filename, location=b'plain')
1482 tr.registertmp(filename, location=b'plain')
1491
1483
1492 self._opener.tryunlink(backupname)
1484 self._opener.tryunlink(backupname)
1493 # hardlink backup is okay because _writedirstate is always called
1485 # hardlink backup is okay because _writedirstate is always called
1494 # with an "atomictemp=True" file.
1486 # with an "atomictemp=True" file.
1495 util.copyfile(
1487 util.copyfile(
1496 self._opener.join(filename),
1488 self._opener.join(filename),
1497 self._opener.join(backupname),
1489 self._opener.join(backupname),
1498 hardlink=True,
1490 hardlink=True,
1499 )
1491 )
1500
1492
1501 def restorebackup(self, tr, backupname):
1493 def restorebackup(self, tr, backupname):
1502 '''Restore dirstate by backup file'''
1494 '''Restore dirstate by backup file'''
1503 # this "invalidate()" prevents "wlock.release()" from writing
1495 # this "invalidate()" prevents "wlock.release()" from writing
1504 # changes of dirstate out after restoring from backup file
1496 # changes of dirstate out after restoring from backup file
1505 self.invalidate()
1497 self.invalidate()
1506 filename = self._actualfilename(tr)
1498 filename = self._actualfilename(tr)
1507 o = self._opener
1499 o = self._opener
1508 if util.samefile(o.join(backupname), o.join(filename)):
1500 if util.samefile(o.join(backupname), o.join(filename)):
1509 o.unlink(backupname)
1501 o.unlink(backupname)
1510 else:
1502 else:
1511 o.rename(backupname, filename, checkambig=True)
1503 o.rename(backupname, filename, checkambig=True)
1512
1504
1513 def clearbackup(self, tr, backupname):
1505 def clearbackup(self, tr, backupname):
1514 '''Clear backup file'''
1506 '''Clear backup file'''
1515 self._opener.unlink(backupname)
1507 self._opener.unlink(backupname)
1516
1508
1517 def verify(self, m1, m2):
1509 def verify(self, m1, m2):
1518 """check the dirstate content again the parent manifest and yield errors"""
1510 """check the dirstate content again the parent manifest and yield errors"""
1519 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1511 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1520 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1512 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1521 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1513 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1522 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1514 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1523 for f, entry in self.items():
1515 for f, entry in self.items():
1524 state = entry.state
1516 state = entry.state
1525 if state in b"nr" and f not in m1:
1517 if state in b"nr" and f not in m1:
1526 yield (missing_from_p1, f, state)
1518 yield (missing_from_p1, f, state)
1527 if state in b"a" and f in m1:
1519 if state in b"a" and f in m1:
1528 yield (unexpected_in_p1, f, state)
1520 yield (unexpected_in_p1, f, state)
1529 if state in b"m" and f not in m1 and f not in m2:
1521 if state in b"m" and f not in m1 and f not in m2:
1530 yield (missing_from_ps, f, state)
1522 yield (missing_from_ps, f, state)
1531 for f in m1:
1523 for f in m1:
1532 state = self.get_entry(f).state
1524 state = self.get_entry(f).state
1533 if state not in b"nrm":
1525 if state not in b"nrm":
1534 yield (missing_from_ds, f, state)
1526 yield (missing_from_ds, f, state)
@@ -1,876 +1,871 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import collections
3 import collections
4 import errno
4 import errno
5 import shutil
5 import shutil
6 import struct
6 import struct
7
7
8 from .i18n import _
8 from .i18n import _
9 from .node import (
9 from .node import (
10 bin,
10 bin,
11 hex,
11 hex,
12 nullrev,
12 nullrev,
13 )
13 )
14 from . import (
14 from . import (
15 error,
15 error,
16 filemerge,
16 filemerge,
17 pycompat,
17 pycompat,
18 util,
18 util,
19 )
19 )
20 from .utils import hashutil
20 from .utils import hashutil
21
21
22 _pack = struct.pack
22 _pack = struct.pack
23 _unpack = struct.unpack
23 _unpack = struct.unpack
24
24
25
25
26 def _droponode(data):
26 def _droponode(data):
27 # used for compatibility for v1
27 # used for compatibility for v1
28 bits = data.split(b'\0')
28 bits = data.split(b'\0')
29 bits = bits[:-2] + bits[-1:]
29 bits = bits[:-2] + bits[-1:]
30 return b'\0'.join(bits)
30 return b'\0'.join(bits)
31
31
32
32
33 def _filectxorabsent(hexnode, ctx, f):
33 def _filectxorabsent(hexnode, ctx, f):
34 if hexnode == ctx.repo().nodeconstants.nullhex:
34 if hexnode == ctx.repo().nodeconstants.nullhex:
35 return filemerge.absentfilectx(ctx, f)
35 return filemerge.absentfilectx(ctx, f)
36 else:
36 else:
37 return ctx[f]
37 return ctx[f]
38
38
39
39
40 # Merge state record types. See ``mergestate`` docs for more.
40 # Merge state record types. See ``mergestate`` docs for more.
41
41
42 ####
42 ####
43 # merge records which records metadata about a current merge
43 # merge records which records metadata about a current merge
44 # exists only once in a mergestate
44 # exists only once in a mergestate
45 #####
45 #####
46 RECORD_LOCAL = b'L'
46 RECORD_LOCAL = b'L'
47 RECORD_OTHER = b'O'
47 RECORD_OTHER = b'O'
48 # record merge labels
48 # record merge labels
49 RECORD_LABELS = b'l'
49 RECORD_LABELS = b'l'
50
50
51 #####
51 #####
52 # record extra information about files, with one entry containing info about one
52 # record extra information about files, with one entry containing info about one
53 # file. Hence, multiple of them can exists
53 # file. Hence, multiple of them can exists
54 #####
54 #####
55 RECORD_FILE_VALUES = b'f'
55 RECORD_FILE_VALUES = b'f'
56
56
57 #####
57 #####
58 # merge records which represents state of individual merges of files/folders
58 # merge records which represents state of individual merges of files/folders
59 # These are top level records for each entry containing merge related info.
59 # These are top level records for each entry containing merge related info.
60 # Each record of these has info about one file. Hence multiple of them can
60 # Each record of these has info about one file. Hence multiple of them can
61 # exists
61 # exists
62 #####
62 #####
63 RECORD_MERGED = b'F'
63 RECORD_MERGED = b'F'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
64 RECORD_CHANGEDELETE_CONFLICT = b'C'
65 # the path was dir on one side of merge and file on another
65 # the path was dir on one side of merge and file on another
66 RECORD_PATH_CONFLICT = b'P'
66 RECORD_PATH_CONFLICT = b'P'
67
67
68 #####
68 #####
69 # possible state which a merge entry can have. These are stored inside top-level
69 # possible state which a merge entry can have. These are stored inside top-level
70 # merge records mentioned just above.
70 # merge records mentioned just above.
71 #####
71 #####
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
76 # represents that the file was automatically merged in favor
76 # represents that the file was automatically merged in favor
77 # of other version. This info is used on commit.
77 # of other version. This info is used on commit.
78 # This is now deprecated and commit related information is now
78 # This is now deprecated and commit related information is now
79 # stored in RECORD_FILE_VALUES
79 # stored in RECORD_FILE_VALUES
80 MERGE_RECORD_MERGED_OTHER = b'o'
80 MERGE_RECORD_MERGED_OTHER = b'o'
81
81
82 #####
82 #####
83 # top level record which stores other unknown records. Multiple of these can
83 # top level record which stores other unknown records. Multiple of these can
84 # exists
84 # exists
85 #####
85 #####
86 RECORD_OVERRIDE = b't'
86 RECORD_OVERRIDE = b't'
87
87
88 #####
88 #####
89 # legacy records which are no longer used but kept to prevent breaking BC
89 # legacy records which are no longer used but kept to prevent breaking BC
90 #####
90 #####
91 # This record was release in 5.4 and usage was removed in 5.5
91 # This record was release in 5.4 and usage was removed in 5.5
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
92 LEGACY_RECORD_RESOLVED_OTHER = b'R'
93 # This record was release in 3.7 and usage was removed in 5.6
93 # This record was release in 3.7 and usage was removed in 5.6
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
94 LEGACY_RECORD_DRIVER_RESOLVED = b'd'
95 # This record was release in 3.7 and usage was removed in 5.6
95 # This record was release in 3.7 and usage was removed in 5.6
96 LEGACY_MERGE_DRIVER_STATE = b'm'
96 LEGACY_MERGE_DRIVER_STATE = b'm'
97 # This record was release in 3.7 and usage was removed in 5.6
97 # This record was release in 3.7 and usage was removed in 5.6
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
98 LEGACY_MERGE_DRIVER_MERGE = b'D'
99
99
100
100
101 ACTION_FORGET = b'f'
101 ACTION_FORGET = b'f'
102 ACTION_REMOVE = b'r'
102 ACTION_REMOVE = b'r'
103 ACTION_ADD = b'a'
103 ACTION_ADD = b'a'
104 ACTION_GET = b'g'
104 ACTION_GET = b'g'
105 ACTION_PATH_CONFLICT = b'p'
105 ACTION_PATH_CONFLICT = b'p'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
106 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
107 ACTION_ADD_MODIFIED = b'am'
107 ACTION_ADD_MODIFIED = b'am'
108 ACTION_CREATED = b'c'
108 ACTION_CREATED = b'c'
109 ACTION_DELETED_CHANGED = b'dc'
109 ACTION_DELETED_CHANGED = b'dc'
110 ACTION_CHANGED_DELETED = b'cd'
110 ACTION_CHANGED_DELETED = b'cd'
111 ACTION_MERGE = b'm'
111 ACTION_MERGE = b'm'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
112 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
113 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
114 ACTION_KEEP = b'k'
114 ACTION_KEEP = b'k'
115 # the file was absent on local side before merge and we should
115 # the file was absent on local side before merge and we should
116 # keep it absent (absent means file not present, it can be a result
116 # keep it absent (absent means file not present, it can be a result
117 # of file deletion, rename etc.)
117 # of file deletion, rename etc.)
118 ACTION_KEEP_ABSENT = b'ka'
118 ACTION_KEEP_ABSENT = b'ka'
119 # the file is absent on the ancestor and remote side of the merge
119 # the file is absent on the ancestor and remote side of the merge
120 # hence this file is new and we should keep it
120 # hence this file is new and we should keep it
121 ACTION_KEEP_NEW = b'kn'
121 ACTION_KEEP_NEW = b'kn'
122 ACTION_EXEC = b'e'
122 ACTION_EXEC = b'e'
123 ACTION_CREATED_MERGE = b'cm'
123 ACTION_CREATED_MERGE = b'cm'
124
124
125 # actions which are no op
125 # actions which are no op
126 NO_OP_ACTIONS = (
126 NO_OP_ACTIONS = (
127 ACTION_KEEP,
127 ACTION_KEEP,
128 ACTION_KEEP_ABSENT,
128 ACTION_KEEP_ABSENT,
129 ACTION_KEEP_NEW,
129 ACTION_KEEP_NEW,
130 )
130 )
131
131
132
132
133 class _mergestate_base(object):
133 class _mergestate_base(object):
134 """track 3-way merge state of individual files
134 """track 3-way merge state of individual files
135
135
136 The merge state is stored on disk when needed. Two files are used: one with
136 The merge state is stored on disk when needed. Two files are used: one with
137 an old format (version 1), and one with a new format (version 2). Version 2
137 an old format (version 1), and one with a new format (version 2). Version 2
138 stores a superset of the data in version 1, including new kinds of records
138 stores a superset of the data in version 1, including new kinds of records
139 in the future. For more about the new format, see the documentation for
139 in the future. For more about the new format, see the documentation for
140 `_readrecordsv2`.
140 `_readrecordsv2`.
141
141
142 Each record can contain arbitrary content, and has an associated type. This
142 Each record can contain arbitrary content, and has an associated type. This
143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
143 `type` should be a letter. If `type` is uppercase, the record is mandatory:
144 versions of Mercurial that don't support it should abort. If `type` is
144 versions of Mercurial that don't support it should abort. If `type` is
145 lowercase, the record can be safely ignored.
145 lowercase, the record can be safely ignored.
146
146
147 Currently known records:
147 Currently known records:
148
148
149 L: the node of the "local" part of the merge (hexified version)
149 L: the node of the "local" part of the merge (hexified version)
150 O: the node of the "other" part of the merge (hexified version)
150 O: the node of the "other" part of the merge (hexified version)
151 F: a file to be merged entry
151 F: a file to be merged entry
152 C: a change/delete or delete/change conflict
152 C: a change/delete or delete/change conflict
153 P: a path conflict (file vs directory)
153 P: a path conflict (file vs directory)
154 f: a (filename, dictionary) tuple of optional values for a given file
154 f: a (filename, dictionary) tuple of optional values for a given file
155 l: the labels for the parts of the merge.
155 l: the labels for the parts of the merge.
156
156
157 Merge record states (stored in self._state, indexed by filename):
157 Merge record states (stored in self._state, indexed by filename):
158 u: unresolved conflict
158 u: unresolved conflict
159 r: resolved conflict
159 r: resolved conflict
160 pu: unresolved path conflict (file conflicts with directory)
160 pu: unresolved path conflict (file conflicts with directory)
161 pr: resolved path conflict
161 pr: resolved path conflict
162 o: file was merged in favor of other parent of merge (DEPRECATED)
162 o: file was merged in favor of other parent of merge (DEPRECATED)
163
163
164 The resolve command transitions between 'u' and 'r' for conflicts and
164 The resolve command transitions between 'u' and 'r' for conflicts and
165 'pu' and 'pr' for path conflicts.
165 'pu' and 'pr' for path conflicts.
166 """
166 """
167
167
168 def __init__(self, repo):
168 def __init__(self, repo):
169 """Initialize the merge state.
169 """Initialize the merge state.
170
170
171 Do not use this directly! Instead call read() or clean()."""
171 Do not use this directly! Instead call read() or clean()."""
172 self._repo = repo
172 self._repo = repo
173 self._state = {}
173 self._state = {}
174 self._stateextras = collections.defaultdict(dict)
174 self._stateextras = collections.defaultdict(dict)
175 self._local = None
175 self._local = None
176 self._other = None
176 self._other = None
177 self._labels = None
177 self._labels = None
178 # contains a mapping of form:
178 # contains a mapping of form:
179 # {filename : (merge_return_value, action_to_be_performed}
179 # {filename : (merge_return_value, action_to_be_performed}
180 # these are results of re-running merge process
180 # these are results of re-running merge process
181 # this dict is used to perform actions on dirstate caused by re-running
181 # this dict is used to perform actions on dirstate caused by re-running
182 # the merge
182 # the merge
183 self._results = {}
183 self._results = {}
184 self._dirty = False
184 self._dirty = False
185
185
186 def reset(self):
186 def reset(self):
187 pass
187 pass
188
188
189 def start(self, node, other, labels=None):
189 def start(self, node, other, labels=None):
190 self._local = node
190 self._local = node
191 self._other = other
191 self._other = other
192 self._labels = labels
192 self._labels = labels
193
193
194 @util.propertycache
194 @util.propertycache
195 def local(self):
195 def local(self):
196 if self._local is None:
196 if self._local is None:
197 msg = b"local accessed but self._local isn't set"
197 msg = b"local accessed but self._local isn't set"
198 raise error.ProgrammingError(msg)
198 raise error.ProgrammingError(msg)
199 return self._local
199 return self._local
200
200
201 @util.propertycache
201 @util.propertycache
202 def localctx(self):
202 def localctx(self):
203 return self._repo[self.local]
203 return self._repo[self.local]
204
204
205 @util.propertycache
205 @util.propertycache
206 def other(self):
206 def other(self):
207 if self._other is None:
207 if self._other is None:
208 msg = b"other accessed but self._other isn't set"
208 msg = b"other accessed but self._other isn't set"
209 raise error.ProgrammingError(msg)
209 raise error.ProgrammingError(msg)
210 return self._other
210 return self._other
211
211
212 @util.propertycache
212 @util.propertycache
213 def otherctx(self):
213 def otherctx(self):
214 return self._repo[self.other]
214 return self._repo[self.other]
215
215
216 def active(self):
216 def active(self):
217 """Whether mergestate is active.
217 """Whether mergestate is active.
218
218
219 Returns True if there appears to be mergestate. This is a rough proxy
219 Returns True if there appears to be mergestate. This is a rough proxy
220 for "is a merge in progress."
220 for "is a merge in progress."
221 """
221 """
222 return bool(self._local) or bool(self._state)
222 return bool(self._local) or bool(self._state)
223
223
224 def commit(self):
224 def commit(self):
225 """Write current state on disk (if necessary)"""
225 """Write current state on disk (if necessary)"""
226
226
227 @staticmethod
227 @staticmethod
228 def getlocalkey(path):
228 def getlocalkey(path):
229 """hash the path of a local file context for storage in the .hg/merge
229 """hash the path of a local file context for storage in the .hg/merge
230 directory."""
230 directory."""
231
231
232 return hex(hashutil.sha1(path).digest())
232 return hex(hashutil.sha1(path).digest())
233
233
234 def _make_backup(self, fctx, localkey):
234 def _make_backup(self, fctx, localkey):
235 raise NotImplementedError()
235 raise NotImplementedError()
236
236
237 def _restore_backup(self, fctx, localkey, flags):
237 def _restore_backup(self, fctx, localkey, flags):
238 raise NotImplementedError()
238 raise NotImplementedError()
239
239
240 def add(self, fcl, fco, fca, fd):
240 def add(self, fcl, fco, fca, fd):
241 """add a new (potentially?) conflicting file the merge state
241 """add a new (potentially?) conflicting file the merge state
242 fcl: file context for local,
242 fcl: file context for local,
243 fco: file context for remote,
243 fco: file context for remote,
244 fca: file context for ancestors,
244 fca: file context for ancestors,
245 fd: file path of the resulting merge.
245 fd: file path of the resulting merge.
246
246
247 note: also write the local version to the `.hg/merge` directory.
247 note: also write the local version to the `.hg/merge` directory.
248 """
248 """
249 if fcl.isabsent():
249 if fcl.isabsent():
250 localkey = self._repo.nodeconstants.nullhex
250 localkey = self._repo.nodeconstants.nullhex
251 else:
251 else:
252 localkey = mergestate.getlocalkey(fcl.path())
252 localkey = mergestate.getlocalkey(fcl.path())
253 self._make_backup(fcl, localkey)
253 self._make_backup(fcl, localkey)
254 self._state[fd] = [
254 self._state[fd] = [
255 MERGE_RECORD_UNRESOLVED,
255 MERGE_RECORD_UNRESOLVED,
256 localkey,
256 localkey,
257 fcl.path(),
257 fcl.path(),
258 fca.path(),
258 fca.path(),
259 hex(fca.filenode()),
259 hex(fca.filenode()),
260 fco.path(),
260 fco.path(),
261 hex(fco.filenode()),
261 hex(fco.filenode()),
262 fcl.flags(),
262 fcl.flags(),
263 ]
263 ]
264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
264 self._stateextras[fd][b'ancestorlinknode'] = hex(fca.node())
265 self._dirty = True
265 self._dirty = True
266
266
267 def addpathconflict(self, path, frename, forigin):
267 def addpathconflict(self, path, frename, forigin):
268 """add a new conflicting path to the merge state
268 """add a new conflicting path to the merge state
269 path: the path that conflicts
269 path: the path that conflicts
270 frename: the filename the conflicting file was renamed to
270 frename: the filename the conflicting file was renamed to
271 forigin: origin of the file ('l' or 'r' for local/remote)
271 forigin: origin of the file ('l' or 'r' for local/remote)
272 """
272 """
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
273 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
274 self._dirty = True
274 self._dirty = True
275
275
276 def addcommitinfo(self, path, data):
276 def addcommitinfo(self, path, data):
277 """stores information which is required at commit
277 """stores information which is required at commit
278 into _stateextras"""
278 into _stateextras"""
279 self._stateextras[path].update(data)
279 self._stateextras[path].update(data)
280 self._dirty = True
280 self._dirty = True
281
281
282 def __contains__(self, dfile):
282 def __contains__(self, dfile):
283 return dfile in self._state
283 return dfile in self._state
284
284
285 def __getitem__(self, dfile):
285 def __getitem__(self, dfile):
286 return self._state[dfile][0]
286 return self._state[dfile][0]
287
287
288 def __iter__(self):
288 def __iter__(self):
289 return iter(sorted(self._state))
289 return iter(sorted(self._state))
290
290
291 def files(self):
291 def files(self):
292 return self._state.keys()
292 return self._state.keys()
293
293
294 def mark(self, dfile, state):
294 def mark(self, dfile, state):
295 self._state[dfile][0] = state
295 self._state[dfile][0] = state
296 self._dirty = True
296 self._dirty = True
297
297
298 def unresolved(self):
298 def unresolved(self):
299 """Obtain the paths of unresolved files."""
299 """Obtain the paths of unresolved files."""
300
300
301 for f, entry in pycompat.iteritems(self._state):
301 for f, entry in pycompat.iteritems(self._state):
302 if entry[0] in (
302 if entry[0] in (
303 MERGE_RECORD_UNRESOLVED,
303 MERGE_RECORD_UNRESOLVED,
304 MERGE_RECORD_UNRESOLVED_PATH,
304 MERGE_RECORD_UNRESOLVED_PATH,
305 ):
305 ):
306 yield f
306 yield f
307
307
308 def allextras(self):
308 def allextras(self):
309 """return all extras information stored with the mergestate"""
309 """return all extras information stored with the mergestate"""
310 return self._stateextras
310 return self._stateextras
311
311
312 def extras(self, filename):
312 def extras(self, filename):
313 """return extras stored with the mergestate for the given filename"""
313 """return extras stored with the mergestate for the given filename"""
314 return self._stateextras[filename]
314 return self._stateextras[filename]
315
315
316 def _resolve(self, preresolve, dfile, wctx):
316 def _resolve(self, preresolve, dfile, wctx):
317 """rerun merge process for file path `dfile`.
317 """rerun merge process for file path `dfile`.
318 Returns whether the merge was completed and the return value of merge
318 Returns whether the merge was completed and the return value of merge
319 obtained from filemerge._filemerge().
319 obtained from filemerge._filemerge().
320 """
320 """
321 if self[dfile] in (
321 if self[dfile] in (
322 MERGE_RECORD_RESOLVED,
322 MERGE_RECORD_RESOLVED,
323 LEGACY_RECORD_DRIVER_RESOLVED,
323 LEGACY_RECORD_DRIVER_RESOLVED,
324 ):
324 ):
325 return True, 0
325 return True, 0
326 stateentry = self._state[dfile]
326 stateentry = self._state[dfile]
327 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
327 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
328 octx = self._repo[self._other]
328 octx = self._repo[self._other]
329 extras = self.extras(dfile)
329 extras = self.extras(dfile)
330 anccommitnode = extras.get(b'ancestorlinknode')
330 anccommitnode = extras.get(b'ancestorlinknode')
331 if anccommitnode:
331 if anccommitnode:
332 actx = self._repo[anccommitnode]
332 actx = self._repo[anccommitnode]
333 else:
333 else:
334 actx = None
334 actx = None
335 fcd = _filectxorabsent(localkey, wctx, dfile)
335 fcd = _filectxorabsent(localkey, wctx, dfile)
336 fco = _filectxorabsent(onode, octx, ofile)
336 fco = _filectxorabsent(onode, octx, ofile)
337 # TODO: move this to filectxorabsent
337 # TODO: move this to filectxorabsent
338 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
338 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
339 # "premerge" x flags
339 # "premerge" x flags
340 flo = fco.flags()
340 flo = fco.flags()
341 fla = fca.flags()
341 fla = fca.flags()
342 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
342 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
343 if fca.rev() == nullrev and flags != flo:
343 if fca.rev() == nullrev and flags != flo:
344 if preresolve:
344 if preresolve:
345 self._repo.ui.warn(
345 self._repo.ui.warn(
346 _(
346 _(
347 b'warning: cannot merge flags for %s '
347 b'warning: cannot merge flags for %s '
348 b'without common ancestor - keeping local flags\n'
348 b'without common ancestor - keeping local flags\n'
349 )
349 )
350 % afile
350 % afile
351 )
351 )
352 elif flags == fla:
352 elif flags == fla:
353 flags = flo
353 flags = flo
354 if preresolve:
354 if preresolve:
355 # restore local
355 # restore local
356 if localkey != self._repo.nodeconstants.nullhex:
356 if localkey != self._repo.nodeconstants.nullhex:
357 self._restore_backup(wctx[dfile], localkey, flags)
357 self._restore_backup(wctx[dfile], localkey, flags)
358 else:
358 else:
359 wctx[dfile].remove(ignoremissing=True)
359 wctx[dfile].remove(ignoremissing=True)
360 complete, merge_ret, deleted = filemerge.premerge(
360 complete, merge_ret, deleted = filemerge.premerge(
361 self._repo,
361 self._repo,
362 wctx,
362 wctx,
363 self._local,
363 self._local,
364 lfile,
364 lfile,
365 fcd,
365 fcd,
366 fco,
366 fco,
367 fca,
367 fca,
368 labels=self._labels,
368 labels=self._labels,
369 )
369 )
370 else:
370 else:
371 complete, merge_ret, deleted = filemerge.filemerge(
371 complete, merge_ret, deleted = filemerge.filemerge(
372 self._repo,
372 self._repo,
373 wctx,
373 wctx,
374 self._local,
374 self._local,
375 lfile,
375 lfile,
376 fcd,
376 fcd,
377 fco,
377 fco,
378 fca,
378 fca,
379 labels=self._labels,
379 labels=self._labels,
380 )
380 )
381 if merge_ret is None:
381 if merge_ret is None:
382 # If return value of merge is None, then there are no real conflict
382 # If return value of merge is None, then there are no real conflict
383 del self._state[dfile]
383 del self._state[dfile]
384 self._dirty = True
384 self._dirty = True
385 elif not merge_ret:
385 elif not merge_ret:
386 self.mark(dfile, MERGE_RECORD_RESOLVED)
386 self.mark(dfile, MERGE_RECORD_RESOLVED)
387
387
388 if complete:
388 if complete:
389 action = None
389 action = None
390 if deleted:
390 if deleted:
391 if fcd.isabsent():
391 if fcd.isabsent():
392 # dc: local picked. Need to drop if present, which may
392 # dc: local picked. Need to drop if present, which may
393 # happen on re-resolves.
393 # happen on re-resolves.
394 action = ACTION_FORGET
394 action = ACTION_FORGET
395 else:
395 else:
396 # cd: remote picked (or otherwise deleted)
396 # cd: remote picked (or otherwise deleted)
397 action = ACTION_REMOVE
397 action = ACTION_REMOVE
398 else:
398 else:
399 if fcd.isabsent(): # dc: remote picked
399 if fcd.isabsent(): # dc: remote picked
400 action = ACTION_GET
400 action = ACTION_GET
401 elif fco.isabsent(): # cd: local picked
401 elif fco.isabsent(): # cd: local picked
402 if dfile in self.localctx:
402 if dfile in self.localctx:
403 action = ACTION_ADD_MODIFIED
403 action = ACTION_ADD_MODIFIED
404 else:
404 else:
405 action = ACTION_ADD
405 action = ACTION_ADD
406 # else: regular merges (no action necessary)
406 # else: regular merges (no action necessary)
407 self._results[dfile] = merge_ret, action
407 self._results[dfile] = merge_ret, action
408
408
409 return complete, merge_ret
409 return complete, merge_ret
410
410
411 def preresolve(self, dfile, wctx):
411 def preresolve(self, dfile, wctx):
412 """run premerge process for dfile
412 """run premerge process for dfile
413
413
414 Returns whether the merge is complete, and the exit code."""
414 Returns whether the merge is complete, and the exit code."""
415 return self._resolve(True, dfile, wctx)
415 return self._resolve(True, dfile, wctx)
416
416
417 def resolve(self, dfile, wctx):
417 def resolve(self, dfile, wctx):
418 """run merge process (assuming premerge was run) for dfile
418 """run merge process (assuming premerge was run) for dfile
419
419
420 Returns the exit code of the merge."""
420 Returns the exit code of the merge."""
421 return self._resolve(False, dfile, wctx)[1]
421 return self._resolve(False, dfile, wctx)[1]
422
422
423 def counts(self):
423 def counts(self):
424 """return counts for updated, merged and removed files in this
424 """return counts for updated, merged and removed files in this
425 session"""
425 session"""
426 updated, merged, removed = 0, 0, 0
426 updated, merged, removed = 0, 0, 0
427 for r, action in pycompat.itervalues(self._results):
427 for r, action in pycompat.itervalues(self._results):
428 if r is None:
428 if r is None:
429 updated += 1
429 updated += 1
430 elif r == 0:
430 elif r == 0:
431 if action == ACTION_REMOVE:
431 if action == ACTION_REMOVE:
432 removed += 1
432 removed += 1
433 else:
433 else:
434 merged += 1
434 merged += 1
435 return updated, merged, removed
435 return updated, merged, removed
436
436
437 def unresolvedcount(self):
437 def unresolvedcount(self):
438 """get unresolved count for this merge (persistent)"""
438 """get unresolved count for this merge (persistent)"""
439 return len(list(self.unresolved()))
439 return len(list(self.unresolved()))
440
440
441 def actions(self):
441 def actions(self):
442 """return lists of actions to perform on the dirstate"""
442 """return lists of actions to perform on the dirstate"""
443 actions = {
443 actions = {
444 ACTION_REMOVE: [],
444 ACTION_REMOVE: [],
445 ACTION_FORGET: [],
445 ACTION_FORGET: [],
446 ACTION_ADD: [],
446 ACTION_ADD: [],
447 ACTION_ADD_MODIFIED: [],
447 ACTION_ADD_MODIFIED: [],
448 ACTION_GET: [],
448 ACTION_GET: [],
449 }
449 }
450 for f, (r, action) in pycompat.iteritems(self._results):
450 for f, (r, action) in pycompat.iteritems(self._results):
451 if action is not None:
451 if action is not None:
452 actions[action].append((f, None, b"merge result"))
452 actions[action].append((f, None, b"merge result"))
453 return actions
453 return actions
454
454
455
455
456 class mergestate(_mergestate_base):
456 class mergestate(_mergestate_base):
457
457
458 statepathv1 = b'merge/state'
458 statepathv1 = b'merge/state'
459 statepathv2 = b'merge/state2'
459 statepathv2 = b'merge/state2'
460
460
461 @staticmethod
461 @staticmethod
462 def clean(repo):
462 def clean(repo):
463 """Initialize a brand new merge state, removing any existing state on
463 """Initialize a brand new merge state, removing any existing state on
464 disk."""
464 disk."""
465 ms = mergestate(repo)
465 ms = mergestate(repo)
466 ms.reset()
466 ms.reset()
467 return ms
467 return ms
468
468
469 @staticmethod
469 @staticmethod
470 def read(repo):
470 def read(repo):
471 """Initialize the merge state, reading it from disk."""
471 """Initialize the merge state, reading it from disk."""
472 ms = mergestate(repo)
472 ms = mergestate(repo)
473 ms._read()
473 ms._read()
474 return ms
474 return ms
475
475
476 def _read(self):
476 def _read(self):
477 """Analyse each record content to restore a serialized state from disk
477 """Analyse each record content to restore a serialized state from disk
478
478
479 This function process "record" entry produced by the de-serialization
479 This function process "record" entry produced by the de-serialization
480 of on disk file.
480 of on disk file.
481 """
481 """
482 unsupported = set()
482 unsupported = set()
483 records = self._readrecords()
483 records = self._readrecords()
484 for rtype, record in records:
484 for rtype, record in records:
485 if rtype == RECORD_LOCAL:
485 if rtype == RECORD_LOCAL:
486 self._local = bin(record)
486 self._local = bin(record)
487 elif rtype == RECORD_OTHER:
487 elif rtype == RECORD_OTHER:
488 self._other = bin(record)
488 self._other = bin(record)
489 elif rtype == LEGACY_MERGE_DRIVER_STATE:
489 elif rtype == LEGACY_MERGE_DRIVER_STATE:
490 pass
490 pass
491 elif rtype in (
491 elif rtype in (
492 RECORD_MERGED,
492 RECORD_MERGED,
493 RECORD_CHANGEDELETE_CONFLICT,
493 RECORD_CHANGEDELETE_CONFLICT,
494 RECORD_PATH_CONFLICT,
494 RECORD_PATH_CONFLICT,
495 LEGACY_MERGE_DRIVER_MERGE,
495 LEGACY_MERGE_DRIVER_MERGE,
496 LEGACY_RECORD_RESOLVED_OTHER,
496 LEGACY_RECORD_RESOLVED_OTHER,
497 ):
497 ):
498 bits = record.split(b'\0')
498 bits = record.split(b'\0')
499 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
499 # merge entry type MERGE_RECORD_MERGED_OTHER is deprecated
500 # and we now store related information in _stateextras, so
500 # and we now store related information in _stateextras, so
501 # lets write to _stateextras directly
501 # lets write to _stateextras directly
502 if bits[1] == MERGE_RECORD_MERGED_OTHER:
502 if bits[1] == MERGE_RECORD_MERGED_OTHER:
503 self._stateextras[bits[0]][b'filenode-source'] = b'other'
503 self._stateextras[bits[0]][b'filenode-source'] = b'other'
504 else:
504 else:
505 self._state[bits[0]] = bits[1:]
505 self._state[bits[0]] = bits[1:]
506 elif rtype == RECORD_FILE_VALUES:
506 elif rtype == RECORD_FILE_VALUES:
507 filename, rawextras = record.split(b'\0', 1)
507 filename, rawextras = record.split(b'\0', 1)
508 extraparts = rawextras.split(b'\0')
508 extraparts = rawextras.split(b'\0')
509 extras = {}
509 extras = {}
510 i = 0
510 i = 0
511 while i < len(extraparts):
511 while i < len(extraparts):
512 extras[extraparts[i]] = extraparts[i + 1]
512 extras[extraparts[i]] = extraparts[i + 1]
513 i += 2
513 i += 2
514
514
515 self._stateextras[filename] = extras
515 self._stateextras[filename] = extras
516 elif rtype == RECORD_LABELS:
516 elif rtype == RECORD_LABELS:
517 labels = record.split(b'\0', 2)
517 labels = record.split(b'\0', 2)
518 self._labels = [l for l in labels if len(l) > 0]
518 self._labels = [l for l in labels if len(l) > 0]
519 elif not rtype.islower():
519 elif not rtype.islower():
520 unsupported.add(rtype)
520 unsupported.add(rtype)
521
521
522 if unsupported:
522 if unsupported:
523 raise error.UnsupportedMergeRecords(unsupported)
523 raise error.UnsupportedMergeRecords(unsupported)
524
524
525 def _readrecords(self):
525 def _readrecords(self):
526 """Read merge state from disk and return a list of record (TYPE, data)
526 """Read merge state from disk and return a list of record (TYPE, data)
527
527
528 We read data from both v1 and v2 files and decide which one to use.
528 We read data from both v1 and v2 files and decide which one to use.
529
529
530 V1 has been used by version prior to 2.9.1 and contains less data than
530 V1 has been used by version prior to 2.9.1 and contains less data than
531 v2. We read both versions and check if no data in v2 contradicts
531 v2. We read both versions and check if no data in v2 contradicts
532 v1. If there is not contradiction we can safely assume that both v1
532 v1. If there is not contradiction we can safely assume that both v1
533 and v2 were written at the same time and use the extract data in v2. If
533 and v2 were written at the same time and use the extract data in v2. If
534 there is contradiction we ignore v2 content as we assume an old version
534 there is contradiction we ignore v2 content as we assume an old version
535 of Mercurial has overwritten the mergestate file and left an old v2
535 of Mercurial has overwritten the mergestate file and left an old v2
536 file around.
536 file around.
537
537
538 returns list of record [(TYPE, data), ...]"""
538 returns list of record [(TYPE, data), ...]"""
539 v1records = self._readrecordsv1()
539 v1records = self._readrecordsv1()
540 v2records = self._readrecordsv2()
540 v2records = self._readrecordsv2()
541 if self._v1v2match(v1records, v2records):
541 if self._v1v2match(v1records, v2records):
542 return v2records
542 return v2records
543 else:
543 else:
544 # v1 file is newer than v2 file, use it
544 # v1 file is newer than v2 file, use it
545 # we have to infer the "other" changeset of the merge
545 # we have to infer the "other" changeset of the merge
546 # we cannot do better than that with v1 of the format
546 # we cannot do better than that with v1 of the format
547 mctx = self._repo[None].parents()[-1]
547 mctx = self._repo[None].parents()[-1]
548 v1records.append((RECORD_OTHER, mctx.hex()))
548 v1records.append((RECORD_OTHER, mctx.hex()))
549 # add place holder "other" file node information
549 # add place holder "other" file node information
550 # nobody is using it yet so we do no need to fetch the data
550 # nobody is using it yet so we do no need to fetch the data
551 # if mctx was wrong `mctx[bits[-2]]` may fails.
551 # if mctx was wrong `mctx[bits[-2]]` may fails.
552 for idx, r in enumerate(v1records):
552 for idx, r in enumerate(v1records):
553 if r[0] == RECORD_MERGED:
553 if r[0] == RECORD_MERGED:
554 bits = r[1].split(b'\0')
554 bits = r[1].split(b'\0')
555 bits.insert(-2, b'')
555 bits.insert(-2, b'')
556 v1records[idx] = (r[0], b'\0'.join(bits))
556 v1records[idx] = (r[0], b'\0'.join(bits))
557 return v1records
557 return v1records
558
558
559 def _v1v2match(self, v1records, v2records):
559 def _v1v2match(self, v1records, v2records):
560 oldv2 = set() # old format version of v2 record
560 oldv2 = set() # old format version of v2 record
561 for rec in v2records:
561 for rec in v2records:
562 if rec[0] == RECORD_LOCAL:
562 if rec[0] == RECORD_LOCAL:
563 oldv2.add(rec)
563 oldv2.add(rec)
564 elif rec[0] == RECORD_MERGED:
564 elif rec[0] == RECORD_MERGED:
565 # drop the onode data (not contained in v1)
565 # drop the onode data (not contained in v1)
566 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
566 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
567 for rec in v1records:
567 for rec in v1records:
568 if rec not in oldv2:
568 if rec not in oldv2:
569 return False
569 return False
570 else:
570 else:
571 return True
571 return True
572
572
573 def _readrecordsv1(self):
573 def _readrecordsv1(self):
574 """read on disk merge state for version 1 file
574 """read on disk merge state for version 1 file
575
575
576 returns list of record [(TYPE, data), ...]
576 returns list of record [(TYPE, data), ...]
577
577
578 Note: the "F" data from this file are one entry short
578 Note: the "F" data from this file are one entry short
579 (no "other file node" entry)
579 (no "other file node" entry)
580 """
580 """
581 records = []
581 records = []
582 try:
582 try:
583 f = self._repo.vfs(self.statepathv1)
583 f = self._repo.vfs(self.statepathv1)
584 for i, l in enumerate(f):
584 for i, l in enumerate(f):
585 if i == 0:
585 if i == 0:
586 records.append((RECORD_LOCAL, l[:-1]))
586 records.append((RECORD_LOCAL, l[:-1]))
587 else:
587 else:
588 records.append((RECORD_MERGED, l[:-1]))
588 records.append((RECORD_MERGED, l[:-1]))
589 f.close()
589 f.close()
590 except IOError as err:
590 except IOError as err:
591 if err.errno != errno.ENOENT:
591 if err.errno != errno.ENOENT:
592 raise
592 raise
593 return records
593 return records
594
594
595 def _readrecordsv2(self):
595 def _readrecordsv2(self):
596 """read on disk merge state for version 2 file
596 """read on disk merge state for version 2 file
597
597
598 This format is a list of arbitrary records of the form:
598 This format is a list of arbitrary records of the form:
599
599
600 [type][length][content]
600 [type][length][content]
601
601
602 `type` is a single character, `length` is a 4 byte integer, and
602 `type` is a single character, `length` is a 4 byte integer, and
603 `content` is an arbitrary byte sequence of length `length`.
603 `content` is an arbitrary byte sequence of length `length`.
604
604
605 Mercurial versions prior to 3.7 have a bug where if there are
605 Mercurial versions prior to 3.7 have a bug where if there are
606 unsupported mandatory merge records, attempting to clear out the merge
606 unsupported mandatory merge records, attempting to clear out the merge
607 state with hg update --clean or similar aborts. The 't' record type
607 state with hg update --clean or similar aborts. The 't' record type
608 works around that by writing out what those versions treat as an
608 works around that by writing out what those versions treat as an
609 advisory record, but later versions interpret as special: the first
609 advisory record, but later versions interpret as special: the first
610 character is the 'real' record type and everything onwards is the data.
610 character is the 'real' record type and everything onwards is the data.
611
611
612 Returns list of records [(TYPE, data), ...]."""
612 Returns list of records [(TYPE, data), ...]."""
613 records = []
613 records = []
614 try:
614 try:
615 f = self._repo.vfs(self.statepathv2)
615 f = self._repo.vfs(self.statepathv2)
616 data = f.read()
616 data = f.read()
617 off = 0
617 off = 0
618 end = len(data)
618 end = len(data)
619 while off < end:
619 while off < end:
620 rtype = data[off : off + 1]
620 rtype = data[off : off + 1]
621 off += 1
621 off += 1
622 length = _unpack(b'>I', data[off : (off + 4)])[0]
622 length = _unpack(b'>I', data[off : (off + 4)])[0]
623 off += 4
623 off += 4
624 record = data[off : (off + length)]
624 record = data[off : (off + length)]
625 off += length
625 off += length
626 if rtype == RECORD_OVERRIDE:
626 if rtype == RECORD_OVERRIDE:
627 rtype, record = record[0:1], record[1:]
627 rtype, record = record[0:1], record[1:]
628 records.append((rtype, record))
628 records.append((rtype, record))
629 f.close()
629 f.close()
630 except IOError as err:
630 except IOError as err:
631 if err.errno != errno.ENOENT:
631 if err.errno != errno.ENOENT:
632 raise
632 raise
633 return records
633 return records
634
634
635 def commit(self):
635 def commit(self):
636 if self._dirty:
636 if self._dirty:
637 records = self._makerecords()
637 records = self._makerecords()
638 self._writerecords(records)
638 self._writerecords(records)
639 self._dirty = False
639 self._dirty = False
640
640
641 def _makerecords(self):
641 def _makerecords(self):
642 records = []
642 records = []
643 records.append((RECORD_LOCAL, hex(self._local)))
643 records.append((RECORD_LOCAL, hex(self._local)))
644 records.append((RECORD_OTHER, hex(self._other)))
644 records.append((RECORD_OTHER, hex(self._other)))
645 # Write out state items. In all cases, the value of the state map entry
645 # Write out state items. In all cases, the value of the state map entry
646 # is written as the contents of the record. The record type depends on
646 # is written as the contents of the record. The record type depends on
647 # the type of state that is stored, and capital-letter records are used
647 # the type of state that is stored, and capital-letter records are used
648 # to prevent older versions of Mercurial that do not support the feature
648 # to prevent older versions of Mercurial that do not support the feature
649 # from loading them.
649 # from loading them.
650 for filename, v in pycompat.iteritems(self._state):
650 for filename, v in pycompat.iteritems(self._state):
651 if v[0] in (
651 if v[0] in (
652 MERGE_RECORD_UNRESOLVED_PATH,
652 MERGE_RECORD_UNRESOLVED_PATH,
653 MERGE_RECORD_RESOLVED_PATH,
653 MERGE_RECORD_RESOLVED_PATH,
654 ):
654 ):
655 # Path conflicts. These are stored in 'P' records. The current
655 # Path conflicts. These are stored in 'P' records. The current
656 # resolution state ('pu' or 'pr') is stored within the record.
656 # resolution state ('pu' or 'pr') is stored within the record.
657 records.append(
657 records.append(
658 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
658 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
659 )
659 )
660 elif (
660 elif (
661 v[1] == self._repo.nodeconstants.nullhex
661 v[1] == self._repo.nodeconstants.nullhex
662 or v[6] == self._repo.nodeconstants.nullhex
662 or v[6] == self._repo.nodeconstants.nullhex
663 ):
663 ):
664 # Change/Delete or Delete/Change conflicts. These are stored in
664 # Change/Delete or Delete/Change conflicts. These are stored in
665 # 'C' records. v[1] is the local file, and is nullhex when the
665 # 'C' records. v[1] is the local file, and is nullhex when the
666 # file is deleted locally ('dc'). v[6] is the remote file, and
666 # file is deleted locally ('dc'). v[6] is the remote file, and
667 # is nullhex when the file is deleted remotely ('cd').
667 # is nullhex when the file is deleted remotely ('cd').
668 records.append(
668 records.append(
669 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
669 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
670 )
670 )
671 else:
671 else:
672 # Normal files. These are stored in 'F' records.
672 # Normal files. These are stored in 'F' records.
673 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
673 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
674 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
674 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
675 rawextras = b'\0'.join(
675 rawextras = b'\0'.join(
676 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
676 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
677 )
677 )
678 records.append(
678 records.append(
679 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
679 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
680 )
680 )
681 if self._labels is not None:
681 if self._labels is not None:
682 labels = b'\0'.join(self._labels)
682 labels = b'\0'.join(self._labels)
683 records.append((RECORD_LABELS, labels))
683 records.append((RECORD_LABELS, labels))
684 return records
684 return records
685
685
686 def _writerecords(self, records):
686 def _writerecords(self, records):
687 """Write current state on disk (both v1 and v2)"""
687 """Write current state on disk (both v1 and v2)"""
688 self._writerecordsv1(records)
688 self._writerecordsv1(records)
689 self._writerecordsv2(records)
689 self._writerecordsv2(records)
690
690
691 def _writerecordsv1(self, records):
691 def _writerecordsv1(self, records):
692 """Write current state on disk in a version 1 file"""
692 """Write current state on disk in a version 1 file"""
693 f = self._repo.vfs(self.statepathv1, b'wb')
693 f = self._repo.vfs(self.statepathv1, b'wb')
694 irecords = iter(records)
694 irecords = iter(records)
695 lrecords = next(irecords)
695 lrecords = next(irecords)
696 assert lrecords[0] == RECORD_LOCAL
696 assert lrecords[0] == RECORD_LOCAL
697 f.write(hex(self._local) + b'\n')
697 f.write(hex(self._local) + b'\n')
698 for rtype, data in irecords:
698 for rtype, data in irecords:
699 if rtype == RECORD_MERGED:
699 if rtype == RECORD_MERGED:
700 f.write(b'%s\n' % _droponode(data))
700 f.write(b'%s\n' % _droponode(data))
701 f.close()
701 f.close()
702
702
703 def _writerecordsv2(self, records):
703 def _writerecordsv2(self, records):
704 """Write current state on disk in a version 2 file
704 """Write current state on disk in a version 2 file
705
705
706 See the docstring for _readrecordsv2 for why we use 't'."""
706 See the docstring for _readrecordsv2 for why we use 't'."""
707 # these are the records that all version 2 clients can read
707 # these are the records that all version 2 clients can read
708 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
708 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
709 f = self._repo.vfs(self.statepathv2, b'wb')
709 f = self._repo.vfs(self.statepathv2, b'wb')
710 for key, data in records:
710 for key, data in records:
711 assert len(key) == 1
711 assert len(key) == 1
712 if key not in allowlist:
712 if key not in allowlist:
713 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
713 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
714 format = b'>sI%is' % len(data)
714 format = b'>sI%is' % len(data)
715 f.write(_pack(format, key, len(data), data))
715 f.write(_pack(format, key, len(data), data))
716 f.close()
716 f.close()
717
717
718 def _make_backup(self, fctx, localkey):
718 def _make_backup(self, fctx, localkey):
719 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
719 self._repo.vfs.write(b'merge/' + localkey, fctx.data())
720
720
721 def _restore_backup(self, fctx, localkey, flags):
721 def _restore_backup(self, fctx, localkey, flags):
722 with self._repo.vfs(b'merge/' + localkey) as f:
722 with self._repo.vfs(b'merge/' + localkey) as f:
723 fctx.write(f.read(), flags)
723 fctx.write(f.read(), flags)
724
724
725 def reset(self):
725 def reset(self):
726 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
726 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
727
727
728
728
729 class memmergestate(_mergestate_base):
729 class memmergestate(_mergestate_base):
730 def __init__(self, repo):
730 def __init__(self, repo):
731 super(memmergestate, self).__init__(repo)
731 super(memmergestate, self).__init__(repo)
732 self._backups = {}
732 self._backups = {}
733
733
734 def _make_backup(self, fctx, localkey):
734 def _make_backup(self, fctx, localkey):
735 self._backups[localkey] = fctx.data()
735 self._backups[localkey] = fctx.data()
736
736
737 def _restore_backup(self, fctx, localkey, flags):
737 def _restore_backup(self, fctx, localkey, flags):
738 fctx.write(self._backups[localkey], flags)
738 fctx.write(self._backups[localkey], flags)
739
739
740
740
741 def recordupdates(repo, actions, branchmerge, getfiledata):
741 def recordupdates(repo, actions, branchmerge, getfiledata):
742 """record merge actions to the dirstate"""
742 """record merge actions to the dirstate"""
743 # remove (must come first)
743 # remove (must come first)
744 for f, args, msg in actions.get(ACTION_REMOVE, []):
744 for f, args, msg in actions.get(ACTION_REMOVE, []):
745 if branchmerge:
745 if branchmerge:
746 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
746 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=False)
747 else:
747 else:
748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
748 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
749
749
750 # forget (must come first)
750 # forget (must come first)
751 for f, args, msg in actions.get(ACTION_FORGET, []):
751 for f, args, msg in actions.get(ACTION_FORGET, []):
752 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
752 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=False)
753
753
754 # resolve path conflicts
754 # resolve path conflicts
755 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
755 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
756 (f0, origf0) = args
756 (f0, origf0) = args
757 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
757 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
758 repo.dirstate.copy(origf0, f)
758 repo.dirstate.copy(origf0, f)
759 if f0 == origf0:
759 if f0 == origf0:
760 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
760 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
761 else:
761 else:
762 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
762 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
763
763
764 # re-add
764 # re-add
765 for f, args, msg in actions.get(ACTION_ADD, []):
765 for f, args, msg in actions.get(ACTION_ADD, []):
766 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
766 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
767
767
768 # re-add/mark as modified
768 # re-add/mark as modified
769 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
769 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
770 if branchmerge:
770 if branchmerge:
771 repo.dirstate.update_file(
771 repo.dirstate.update_file(
772 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
772 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
773 )
773 )
774 else:
774 else:
775 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
775 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
776
776
777 # exec change
777 # exec change
778 for f, args, msg in actions.get(ACTION_EXEC, []):
778 for f, args, msg in actions.get(ACTION_EXEC, []):
779 repo.dirstate.update_file(
779 repo.dirstate.update_file(
780 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
780 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
781 )
781 )
782
782
783 # keep
783 # keep
784 for f, args, msg in actions.get(ACTION_KEEP, []):
784 for f, args, msg in actions.get(ACTION_KEEP, []):
785 pass
785 pass
786
786
787 # keep deleted
787 # keep deleted
788 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
788 for f, args, msg in actions.get(ACTION_KEEP_ABSENT, []):
789 pass
789 pass
790
790
791 # keep new
791 # keep new
792 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
792 for f, args, msg in actions.get(ACTION_KEEP_NEW, []):
793 pass
793 pass
794
794
795 # get
795 # get
796 for f, args, msg in actions.get(ACTION_GET, []):
796 for f, args, msg in actions.get(ACTION_GET, []):
797 if branchmerge:
797 if branchmerge:
798 # tracked in p1 can be True also but update_file should not care
798 # tracked in p1 can be True also but update_file should not care
799 old_entry = repo.dirstate.get_entry(f)
799 old_entry = repo.dirstate.get_entry(f)
800 p1_tracked = old_entry.any_tracked and not old_entry.added
800 p1_tracked = old_entry.any_tracked and not old_entry.added
801 repo.dirstate.update_file(
801 repo.dirstate.update_file(
802 f,
802 f,
803 p1_tracked=p1_tracked,
803 p1_tracked=p1_tracked,
804 p2_tracked=True,
805 wc_tracked=True,
804 wc_tracked=True,
806 clean_p2=not p1_tracked,
805 p2_info=True,
807 merged=p1_tracked,
808 )
806 )
809 else:
807 else:
810 parentfiledata = getfiledata[f] if getfiledata else None
808 parentfiledata = getfiledata[f] if getfiledata else None
811 repo.dirstate.update_file(
809 repo.dirstate.update_file(
812 f,
810 f,
813 p1_tracked=True,
811 p1_tracked=True,
814 wc_tracked=True,
812 wc_tracked=True,
815 parentfiledata=parentfiledata,
813 parentfiledata=parentfiledata,
816 )
814 )
817
815
818 # merge
816 # merge
819 for f, args, msg in actions.get(ACTION_MERGE, []):
817 for f, args, msg in actions.get(ACTION_MERGE, []):
820 f1, f2, fa, move, anc = args
818 f1, f2, fa, move, anc = args
821 if branchmerge:
819 if branchmerge:
822 # We've done a branch merge, mark this file as merged
820 # We've done a branch merge, mark this file as merged
823 # so that we properly record the merger later
821 # so that we properly record the merger later
824 p1_tracked = f1 == f
822 p1_tracked = f1 == f
825 p2_tracked = f2 == f
826 repo.dirstate.update_file(
823 repo.dirstate.update_file(
827 f,
824 f,
828 p1_tracked=p1_tracked,
825 p1_tracked=p1_tracked,
829 p2_tracked=p2_tracked,
830 wc_tracked=True,
826 wc_tracked=True,
831 merged=p1_tracked,
827 p2_info=True,
832 clean_p2=not p1_tracked,
833 )
828 )
834 if f1 != f2: # copy/rename
829 if f1 != f2: # copy/rename
835 if move:
830 if move:
836 repo.dirstate.update_file(
831 repo.dirstate.update_file(
837 f1, p1_tracked=True, wc_tracked=False
832 f1, p1_tracked=True, wc_tracked=False
838 )
833 )
839 if f1 != f:
834 if f1 != f:
840 repo.dirstate.copy(f1, f)
835 repo.dirstate.copy(f1, f)
841 else:
836 else:
842 repo.dirstate.copy(f2, f)
837 repo.dirstate.copy(f2, f)
843 else:
838 else:
844 # We've update-merged a locally modified file, so
839 # We've update-merged a locally modified file, so
845 # we set the dirstate to emulate a normal checkout
840 # we set the dirstate to emulate a normal checkout
846 # of that file some time in the past. Thus our
841 # of that file some time in the past. Thus our
847 # merge will appear as a normal local file
842 # merge will appear as a normal local file
848 # modification.
843 # modification.
849 if f2 == f: # file not locally copied/moved
844 if f2 == f: # file not locally copied/moved
850 repo.dirstate.update_file(
845 repo.dirstate.update_file(
851 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
846 f, p1_tracked=True, wc_tracked=True, possibly_dirty=True
852 )
847 )
853 if move:
848 if move:
854 repo.dirstate.update_file(
849 repo.dirstate.update_file(
855 f1, p1_tracked=False, wc_tracked=False
850 f1, p1_tracked=False, wc_tracked=False
856 )
851 )
857
852
858 # directory rename, move local
853 # directory rename, move local
859 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
854 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
860 f0, flag = args
855 f0, flag = args
861 if branchmerge:
856 if branchmerge:
862 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
857 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
863 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
858 repo.dirstate.update_file(f0, p1_tracked=True, wc_tracked=False)
864 repo.dirstate.copy(f0, f)
859 repo.dirstate.copy(f0, f)
865 else:
860 else:
866 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
861 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
867 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
862 repo.dirstate.update_file(f0, p1_tracked=False, wc_tracked=False)
868
863
869 # directory rename, get
864 # directory rename, get
870 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
865 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
871 f0, flag = args
866 f0, flag = args
872 if branchmerge:
867 if branchmerge:
873 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
868 repo.dirstate.update_file(f, p1_tracked=False, wc_tracked=True)
874 repo.dirstate.copy(f0, f)
869 repo.dirstate.copy(f0, f)
875 else:
870 else:
876 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
871 repo.dirstate.update_file(f, p1_tracked=True, wc_tracked=True)
General Comments 0
You need to be logged in to leave comments. Login now