##// END OF EJS Templates
dirstate: use keyword arguments to clarify status()'s callers...
Martin von Zweigbergk -
r34345:ac0cd81e default
parent child Browse files
Show More
@@ -1,673 +1,674 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''largefiles utility code: must not import other modules in this package.'''
9 '''largefiles utility code: must not import other modules in this package.'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import hashlib
13 import hashlib
14 import os
14 import os
15 import platform
15 import platform
16 import stat
16 import stat
17
17
18 from mercurial.i18n import _
18 from mercurial.i18n import _
19
19
20 from mercurial import (
20 from mercurial import (
21 dirstate,
21 dirstate,
22 encoding,
22 encoding,
23 error,
23 error,
24 httpconnection,
24 httpconnection,
25 match as matchmod,
25 match as matchmod,
26 node,
26 node,
27 pycompat,
27 pycompat,
28 scmutil,
28 scmutil,
29 sparse,
29 sparse,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33
33
34 shortname = '.hglf'
34 shortname = '.hglf'
35 shortnameslash = shortname + '/'
35 shortnameslash = shortname + '/'
36 longname = 'largefiles'
36 longname = 'largefiles'
37
37
38 # -- Private worker functions ------------------------------------------
38 # -- Private worker functions ------------------------------------------
39
39
40 def getminsize(ui, assumelfiles, opt, default=10):
40 def getminsize(ui, assumelfiles, opt, default=10):
41 lfsize = opt
41 lfsize = opt
42 if not lfsize and assumelfiles:
42 if not lfsize and assumelfiles:
43 lfsize = ui.config(longname, 'minsize', default=default)
43 lfsize = ui.config(longname, 'minsize', default=default)
44 if lfsize:
44 if lfsize:
45 try:
45 try:
46 lfsize = float(lfsize)
46 lfsize = float(lfsize)
47 except ValueError:
47 except ValueError:
48 raise error.Abort(_('largefiles: size must be number (not %s)\n')
48 raise error.Abort(_('largefiles: size must be number (not %s)\n')
49 % lfsize)
49 % lfsize)
50 if lfsize is None:
50 if lfsize is None:
51 raise error.Abort(_('minimum size for largefiles must be specified'))
51 raise error.Abort(_('minimum size for largefiles must be specified'))
52 return lfsize
52 return lfsize
53
53
54 def link(src, dest):
54 def link(src, dest):
55 """Try to create hardlink - if that fails, efficiently make a copy."""
55 """Try to create hardlink - if that fails, efficiently make a copy."""
56 util.makedirs(os.path.dirname(dest))
56 util.makedirs(os.path.dirname(dest))
57 try:
57 try:
58 util.oslink(src, dest)
58 util.oslink(src, dest)
59 except OSError:
59 except OSError:
60 # if hardlinks fail, fallback on atomic copy
60 # if hardlinks fail, fallback on atomic copy
61 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
61 with open(src, 'rb') as srcf, util.atomictempfile(dest) as dstf:
62 for chunk in util.filechunkiter(srcf):
62 for chunk in util.filechunkiter(srcf):
63 dstf.write(chunk)
63 dstf.write(chunk)
64 os.chmod(dest, os.stat(src).st_mode)
64 os.chmod(dest, os.stat(src).st_mode)
65
65
66 def usercachepath(ui, hash):
66 def usercachepath(ui, hash):
67 '''Return the correct location in the "global" largefiles cache for a file
67 '''Return the correct location in the "global" largefiles cache for a file
68 with the given hash.
68 with the given hash.
69 This cache is used for sharing of largefiles across repositories - both
69 This cache is used for sharing of largefiles across repositories - both
70 to preserve download bandwidth and storage space.'''
70 to preserve download bandwidth and storage space.'''
71 return os.path.join(_usercachedir(ui), hash)
71 return os.path.join(_usercachedir(ui), hash)
72
72
73 def _usercachedir(ui):
73 def _usercachedir(ui):
74 '''Return the location of the "global" largefiles cache.'''
74 '''Return the location of the "global" largefiles cache.'''
75 path = ui.configpath(longname, 'usercache', None)
75 path = ui.configpath(longname, 'usercache', None)
76 if path:
76 if path:
77 return path
77 return path
78 if pycompat.osname == 'nt':
78 if pycompat.osname == 'nt':
79 appdata = encoding.environ.get('LOCALAPPDATA',\
79 appdata = encoding.environ.get('LOCALAPPDATA',\
80 encoding.environ.get('APPDATA'))
80 encoding.environ.get('APPDATA'))
81 if appdata:
81 if appdata:
82 return os.path.join(appdata, longname)
82 return os.path.join(appdata, longname)
83 elif platform.system() == 'Darwin':
83 elif platform.system() == 'Darwin':
84 home = encoding.environ.get('HOME')
84 home = encoding.environ.get('HOME')
85 if home:
85 if home:
86 return os.path.join(home, 'Library', 'Caches', longname)
86 return os.path.join(home, 'Library', 'Caches', longname)
87 elif pycompat.osname == 'posix':
87 elif pycompat.osname == 'posix':
88 path = encoding.environ.get('XDG_CACHE_HOME')
88 path = encoding.environ.get('XDG_CACHE_HOME')
89 if path:
89 if path:
90 return os.path.join(path, longname)
90 return os.path.join(path, longname)
91 home = encoding.environ.get('HOME')
91 home = encoding.environ.get('HOME')
92 if home:
92 if home:
93 return os.path.join(home, '.cache', longname)
93 return os.path.join(home, '.cache', longname)
94 else:
94 else:
95 raise error.Abort(_('unknown operating system: %s\n')
95 raise error.Abort(_('unknown operating system: %s\n')
96 % pycompat.osname)
96 % pycompat.osname)
97 raise error.Abort(_('unknown %s usercache location') % longname)
97 raise error.Abort(_('unknown %s usercache location') % longname)
98
98
99 def inusercache(ui, hash):
99 def inusercache(ui, hash):
100 path = usercachepath(ui, hash)
100 path = usercachepath(ui, hash)
101 return os.path.exists(path)
101 return os.path.exists(path)
102
102
103 def findfile(repo, hash):
103 def findfile(repo, hash):
104 '''Return store path of the largefile with the specified hash.
104 '''Return store path of the largefile with the specified hash.
105 As a side effect, the file might be linked from user cache.
105 As a side effect, the file might be linked from user cache.
106 Return None if the file can't be found locally.'''
106 Return None if the file can't be found locally.'''
107 path, exists = findstorepath(repo, hash)
107 path, exists = findstorepath(repo, hash)
108 if exists:
108 if exists:
109 repo.ui.note(_('found %s in store\n') % hash)
109 repo.ui.note(_('found %s in store\n') % hash)
110 return path
110 return path
111 elif inusercache(repo.ui, hash):
111 elif inusercache(repo.ui, hash):
112 repo.ui.note(_('found %s in system cache\n') % hash)
112 repo.ui.note(_('found %s in system cache\n') % hash)
113 path = storepath(repo, hash)
113 path = storepath(repo, hash)
114 link(usercachepath(repo.ui, hash), path)
114 link(usercachepath(repo.ui, hash), path)
115 return path
115 return path
116 return None
116 return None
117
117
118 class largefilesdirstate(dirstate.dirstate):
118 class largefilesdirstate(dirstate.dirstate):
119 def __getitem__(self, key):
119 def __getitem__(self, key):
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
120 return super(largefilesdirstate, self).__getitem__(unixpath(key))
121 def normal(self, f):
121 def normal(self, f):
122 return super(largefilesdirstate, self).normal(unixpath(f))
122 return super(largefilesdirstate, self).normal(unixpath(f))
123 def remove(self, f):
123 def remove(self, f):
124 return super(largefilesdirstate, self).remove(unixpath(f))
124 return super(largefilesdirstate, self).remove(unixpath(f))
125 def add(self, f):
125 def add(self, f):
126 return super(largefilesdirstate, self).add(unixpath(f))
126 return super(largefilesdirstate, self).add(unixpath(f))
127 def drop(self, f):
127 def drop(self, f):
128 return super(largefilesdirstate, self).drop(unixpath(f))
128 return super(largefilesdirstate, self).drop(unixpath(f))
129 def forget(self, f):
129 def forget(self, f):
130 return super(largefilesdirstate, self).forget(unixpath(f))
130 return super(largefilesdirstate, self).forget(unixpath(f))
131 def normallookup(self, f):
131 def normallookup(self, f):
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
132 return super(largefilesdirstate, self).normallookup(unixpath(f))
133 def _ignore(self, f):
133 def _ignore(self, f):
134 return False
134 return False
135 def write(self, tr=False):
135 def write(self, tr=False):
136 # (1) disable PENDING mode always
136 # (1) disable PENDING mode always
137 # (lfdirstate isn't yet managed as a part of the transaction)
137 # (lfdirstate isn't yet managed as a part of the transaction)
138 # (2) avoid develwarn 'use dirstate.write with ....'
138 # (2) avoid develwarn 'use dirstate.write with ....'
139 super(largefilesdirstate, self).write(None)
139 super(largefilesdirstate, self).write(None)
140
140
141 def openlfdirstate(ui, repo, create=True):
141 def openlfdirstate(ui, repo, create=True):
142 '''
142 '''
143 Return a dirstate object that tracks largefiles: i.e. its root is
143 Return a dirstate object that tracks largefiles: i.e. its root is
144 the repo root, but it is saved in .hg/largefiles/dirstate.
144 the repo root, but it is saved in .hg/largefiles/dirstate.
145 '''
145 '''
146 vfs = repo.vfs
146 vfs = repo.vfs
147 lfstoredir = longname
147 lfstoredir = longname
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
148 opener = vfsmod.vfs(vfs.join(lfstoredir))
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
149 lfdirstate = largefilesdirstate(opener, ui, repo.root,
150 repo.dirstate._validate,
150 repo.dirstate._validate,
151 lambda: sparse.matcher(repo))
151 lambda: sparse.matcher(repo))
152
152
153 # If the largefiles dirstate does not exist, populate and create
153 # If the largefiles dirstate does not exist, populate and create
154 # it. This ensures that we create it on the first meaningful
154 # it. This ensures that we create it on the first meaningful
155 # largefiles operation in a new clone.
155 # largefiles operation in a new clone.
156 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
156 if create and not vfs.exists(vfs.join(lfstoredir, 'dirstate')):
157 matcher = getstandinmatcher(repo)
157 matcher = getstandinmatcher(repo)
158 standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False,
158 standins = repo.dirstate.walk(matcher, subrepos=[], unknown=False,
159 ignored=False)
159 ignored=False)
160
160
161 if len(standins) > 0:
161 if len(standins) > 0:
162 vfs.makedirs(lfstoredir)
162 vfs.makedirs(lfstoredir)
163
163
164 for standin in standins:
164 for standin in standins:
165 lfile = splitstandin(standin)
165 lfile = splitstandin(standin)
166 lfdirstate.normallookup(lfile)
166 lfdirstate.normallookup(lfile)
167 return lfdirstate
167 return lfdirstate
168
168
169 def lfdirstatestatus(lfdirstate, repo):
169 def lfdirstatestatus(lfdirstate, repo):
170 pctx = repo['.']
170 pctx = repo['.']
171 match = matchmod.always(repo.root, repo.getcwd())
171 match = matchmod.always(repo.root, repo.getcwd())
172 unsure, s = lfdirstate.status(match, [], False, False, False)
172 unsure, s = lfdirstate.status(match, subrepos=[], ignored=False,
173 clean=False, unknown=False)
173 modified, clean = s.modified, s.clean
174 modified, clean = s.modified, s.clean
174 for lfile in unsure:
175 for lfile in unsure:
175 try:
176 try:
176 fctx = pctx[standin(lfile)]
177 fctx = pctx[standin(lfile)]
177 except LookupError:
178 except LookupError:
178 fctx = None
179 fctx = None
179 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
180 if not fctx or readasstandin(fctx) != hashfile(repo.wjoin(lfile)):
180 modified.append(lfile)
181 modified.append(lfile)
181 else:
182 else:
182 clean.append(lfile)
183 clean.append(lfile)
183 lfdirstate.normal(lfile)
184 lfdirstate.normal(lfile)
184 return s
185 return s
185
186
186 def listlfiles(repo, rev=None, matcher=None):
187 def listlfiles(repo, rev=None, matcher=None):
187 '''return a list of largefiles in the working copy or the
188 '''return a list of largefiles in the working copy or the
188 specified changeset'''
189 specified changeset'''
189
190
190 if matcher is None:
191 if matcher is None:
191 matcher = getstandinmatcher(repo)
192 matcher = getstandinmatcher(repo)
192
193
193 # ignore unknown files in working directory
194 # ignore unknown files in working directory
194 return [splitstandin(f)
195 return [splitstandin(f)
195 for f in repo[rev].walk(matcher)
196 for f in repo[rev].walk(matcher)
196 if rev is not None or repo.dirstate[f] != '?']
197 if rev is not None or repo.dirstate[f] != '?']
197
198
198 def instore(repo, hash, forcelocal=False):
199 def instore(repo, hash, forcelocal=False):
199 '''Return true if a largefile with the given hash exists in the store'''
200 '''Return true if a largefile with the given hash exists in the store'''
200 return os.path.exists(storepath(repo, hash, forcelocal))
201 return os.path.exists(storepath(repo, hash, forcelocal))
201
202
202 def storepath(repo, hash, forcelocal=False):
203 def storepath(repo, hash, forcelocal=False):
203 '''Return the correct location in the repository largefiles store for a
204 '''Return the correct location in the repository largefiles store for a
204 file with the given hash.'''
205 file with the given hash.'''
205 if not forcelocal and repo.shared():
206 if not forcelocal and repo.shared():
206 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
207 return repo.vfs.reljoin(repo.sharedpath, longname, hash)
207 return repo.vfs.join(longname, hash)
208 return repo.vfs.join(longname, hash)
208
209
209 def findstorepath(repo, hash):
210 def findstorepath(repo, hash):
210 '''Search through the local store path(s) to find the file for the given
211 '''Search through the local store path(s) to find the file for the given
211 hash. If the file is not found, its path in the primary store is returned.
212 hash. If the file is not found, its path in the primary store is returned.
212 The return value is a tuple of (path, exists(path)).
213 The return value is a tuple of (path, exists(path)).
213 '''
214 '''
214 # For shared repos, the primary store is in the share source. But for
215 # For shared repos, the primary store is in the share source. But for
215 # backward compatibility, force a lookup in the local store if it wasn't
216 # backward compatibility, force a lookup in the local store if it wasn't
216 # found in the share source.
217 # found in the share source.
217 path = storepath(repo, hash, False)
218 path = storepath(repo, hash, False)
218
219
219 if instore(repo, hash):
220 if instore(repo, hash):
220 return (path, True)
221 return (path, True)
221 elif repo.shared() and instore(repo, hash, True):
222 elif repo.shared() and instore(repo, hash, True):
222 return storepath(repo, hash, True), True
223 return storepath(repo, hash, True), True
223
224
224 return (path, False)
225 return (path, False)
225
226
226 def copyfromcache(repo, hash, filename):
227 def copyfromcache(repo, hash, filename):
227 '''Copy the specified largefile from the repo or system cache to
228 '''Copy the specified largefile from the repo or system cache to
228 filename in the repository. Return true on success or false if the
229 filename in the repository. Return true on success or false if the
229 file was not found in either cache (which should not happened:
230 file was not found in either cache (which should not happened:
230 this is meant to be called only after ensuring that the needed
231 this is meant to be called only after ensuring that the needed
231 largefile exists in the cache).'''
232 largefile exists in the cache).'''
232 wvfs = repo.wvfs
233 wvfs = repo.wvfs
233 path = findfile(repo, hash)
234 path = findfile(repo, hash)
234 if path is None:
235 if path is None:
235 return False
236 return False
236 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
237 wvfs.makedirs(wvfs.dirname(wvfs.join(filename)))
237 # The write may fail before the file is fully written, but we
238 # The write may fail before the file is fully written, but we
238 # don't use atomic writes in the working copy.
239 # don't use atomic writes in the working copy.
239 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd:
240 with open(path, 'rb') as srcfd, wvfs(filename, 'wb') as destfd:
240 gothash = copyandhash(
241 gothash = copyandhash(
241 util.filechunkiter(srcfd), destfd)
242 util.filechunkiter(srcfd), destfd)
242 if gothash != hash:
243 if gothash != hash:
243 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
244 repo.ui.warn(_('%s: data corruption in %s with hash %s\n')
244 % (filename, path, gothash))
245 % (filename, path, gothash))
245 wvfs.unlink(filename)
246 wvfs.unlink(filename)
246 return False
247 return False
247 return True
248 return True
248
249
249 def copytostore(repo, ctx, file, fstandin):
250 def copytostore(repo, ctx, file, fstandin):
250 wvfs = repo.wvfs
251 wvfs = repo.wvfs
251 hash = readasstandin(ctx[fstandin])
252 hash = readasstandin(ctx[fstandin])
252 if instore(repo, hash):
253 if instore(repo, hash):
253 return
254 return
254 if wvfs.exists(file):
255 if wvfs.exists(file):
255 copytostoreabsolute(repo, wvfs.join(file), hash)
256 copytostoreabsolute(repo, wvfs.join(file), hash)
256 else:
257 else:
257 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
258 repo.ui.warn(_("%s: largefile %s not available from local store\n") %
258 (file, hash))
259 (file, hash))
259
260
260 def copyalltostore(repo, node):
261 def copyalltostore(repo, node):
261 '''Copy all largefiles in a given revision to the store'''
262 '''Copy all largefiles in a given revision to the store'''
262
263
263 ctx = repo[node]
264 ctx = repo[node]
264 for filename in ctx.files():
265 for filename in ctx.files():
265 realfile = splitstandin(filename)
266 realfile = splitstandin(filename)
266 if realfile is not None and filename in ctx.manifest():
267 if realfile is not None and filename in ctx.manifest():
267 copytostore(repo, ctx, realfile, filename)
268 copytostore(repo, ctx, realfile, filename)
268
269
269 def copytostoreabsolute(repo, file, hash):
270 def copytostoreabsolute(repo, file, hash):
270 if inusercache(repo.ui, hash):
271 if inusercache(repo.ui, hash):
271 link(usercachepath(repo.ui, hash), storepath(repo, hash))
272 link(usercachepath(repo.ui, hash), storepath(repo, hash))
272 else:
273 else:
273 util.makedirs(os.path.dirname(storepath(repo, hash)))
274 util.makedirs(os.path.dirname(storepath(repo, hash)))
274 with open(file, 'rb') as srcf:
275 with open(file, 'rb') as srcf:
275 with util.atomictempfile(storepath(repo, hash),
276 with util.atomictempfile(storepath(repo, hash),
276 createmode=repo.store.createmode) as dstf:
277 createmode=repo.store.createmode) as dstf:
277 for chunk in util.filechunkiter(srcf):
278 for chunk in util.filechunkiter(srcf):
278 dstf.write(chunk)
279 dstf.write(chunk)
279 linktousercache(repo, hash)
280 linktousercache(repo, hash)
280
281
281 def linktousercache(repo, hash):
282 def linktousercache(repo, hash):
282 '''Link / copy the largefile with the specified hash from the store
283 '''Link / copy the largefile with the specified hash from the store
283 to the cache.'''
284 to the cache.'''
284 path = usercachepath(repo.ui, hash)
285 path = usercachepath(repo.ui, hash)
285 link(storepath(repo, hash), path)
286 link(storepath(repo, hash), path)
286
287
287 def getstandinmatcher(repo, rmatcher=None):
288 def getstandinmatcher(repo, rmatcher=None):
288 '''Return a match object that applies rmatcher to the standin directory'''
289 '''Return a match object that applies rmatcher to the standin directory'''
289 wvfs = repo.wvfs
290 wvfs = repo.wvfs
290 standindir = shortname
291 standindir = shortname
291
292
292 # no warnings about missing files or directories
293 # no warnings about missing files or directories
293 badfn = lambda f, msg: None
294 badfn = lambda f, msg: None
294
295
295 if rmatcher and not rmatcher.always():
296 if rmatcher and not rmatcher.always():
296 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
297 pats = [wvfs.join(standindir, pat) for pat in rmatcher.files()]
297 if not pats:
298 if not pats:
298 pats = [wvfs.join(standindir)]
299 pats = [wvfs.join(standindir)]
299 match = scmutil.match(repo[None], pats, badfn=badfn)
300 match = scmutil.match(repo[None], pats, badfn=badfn)
300 else:
301 else:
301 # no patterns: relative to repo root
302 # no patterns: relative to repo root
302 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
303 match = scmutil.match(repo[None], [wvfs.join(standindir)], badfn=badfn)
303 return match
304 return match
304
305
305 def composestandinmatcher(repo, rmatcher):
306 def composestandinmatcher(repo, rmatcher):
306 '''Return a matcher that accepts standins corresponding to the
307 '''Return a matcher that accepts standins corresponding to the
307 files accepted by rmatcher. Pass the list of files in the matcher
308 files accepted by rmatcher. Pass the list of files in the matcher
308 as the paths specified by the user.'''
309 as the paths specified by the user.'''
309 smatcher = getstandinmatcher(repo, rmatcher)
310 smatcher = getstandinmatcher(repo, rmatcher)
310 isstandin = smatcher.matchfn
311 isstandin = smatcher.matchfn
311 def composedmatchfn(f):
312 def composedmatchfn(f):
312 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
313 return isstandin(f) and rmatcher.matchfn(splitstandin(f))
313 smatcher.matchfn = composedmatchfn
314 smatcher.matchfn = composedmatchfn
314
315
315 return smatcher
316 return smatcher
316
317
317 def standin(filename):
318 def standin(filename):
318 '''Return the repo-relative path to the standin for the specified big
319 '''Return the repo-relative path to the standin for the specified big
319 file.'''
320 file.'''
320 # Notes:
321 # Notes:
321 # 1) Some callers want an absolute path, but for instance addlargefiles
322 # 1) Some callers want an absolute path, but for instance addlargefiles
322 # needs it repo-relative so it can be passed to repo[None].add(). So
323 # needs it repo-relative so it can be passed to repo[None].add(). So
323 # leave it up to the caller to use repo.wjoin() to get an absolute path.
324 # leave it up to the caller to use repo.wjoin() to get an absolute path.
324 # 2) Join with '/' because that's what dirstate always uses, even on
325 # 2) Join with '/' because that's what dirstate always uses, even on
325 # Windows. Change existing separator to '/' first in case we are
326 # Windows. Change existing separator to '/' first in case we are
326 # passed filenames from an external source (like the command line).
327 # passed filenames from an external source (like the command line).
327 return shortnameslash + util.pconvert(filename)
328 return shortnameslash + util.pconvert(filename)
328
329
329 def isstandin(filename):
330 def isstandin(filename):
330 '''Return true if filename is a big file standin. filename must be
331 '''Return true if filename is a big file standin. filename must be
331 in Mercurial's internal form (slash-separated).'''
332 in Mercurial's internal form (slash-separated).'''
332 return filename.startswith(shortnameslash)
333 return filename.startswith(shortnameslash)
333
334
334 def splitstandin(filename):
335 def splitstandin(filename):
335 # Split on / because that's what dirstate always uses, even on Windows.
336 # Split on / because that's what dirstate always uses, even on Windows.
336 # Change local separator to / first just in case we are passed filenames
337 # Change local separator to / first just in case we are passed filenames
337 # from an external source (like the command line).
338 # from an external source (like the command line).
338 bits = util.pconvert(filename).split('/', 1)
339 bits = util.pconvert(filename).split('/', 1)
339 if len(bits) == 2 and bits[0] == shortname:
340 if len(bits) == 2 and bits[0] == shortname:
340 return bits[1]
341 return bits[1]
341 else:
342 else:
342 return None
343 return None
343
344
344 def updatestandin(repo, lfile, standin):
345 def updatestandin(repo, lfile, standin):
345 """Re-calculate hash value of lfile and write it into standin
346 """Re-calculate hash value of lfile and write it into standin
346
347
347 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
348 This assumes that "lfutil.standin(lfile) == standin", for efficiency.
348 """
349 """
349 file = repo.wjoin(lfile)
350 file = repo.wjoin(lfile)
350 if repo.wvfs.exists(lfile):
351 if repo.wvfs.exists(lfile):
351 hash = hashfile(file)
352 hash = hashfile(file)
352 executable = getexecutable(file)
353 executable = getexecutable(file)
353 writestandin(repo, standin, hash, executable)
354 writestandin(repo, standin, hash, executable)
354 else:
355 else:
355 raise error.Abort(_('%s: file not found!') % lfile)
356 raise error.Abort(_('%s: file not found!') % lfile)
356
357
357 def readasstandin(fctx):
358 def readasstandin(fctx):
358 '''read hex hash from given filectx of standin file
359 '''read hex hash from given filectx of standin file
359
360
360 This encapsulates how "standin" data is stored into storage layer.'''
361 This encapsulates how "standin" data is stored into storage layer.'''
361 return fctx.data().strip()
362 return fctx.data().strip()
362
363
363 def writestandin(repo, standin, hash, executable):
364 def writestandin(repo, standin, hash, executable):
364 '''write hash to <repo.root>/<standin>'''
365 '''write hash to <repo.root>/<standin>'''
365 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
366 repo.wwrite(standin, hash + '\n', executable and 'x' or '')
366
367
367 def copyandhash(instream, outfile):
368 def copyandhash(instream, outfile):
368 '''Read bytes from instream (iterable) and write them to outfile,
369 '''Read bytes from instream (iterable) and write them to outfile,
369 computing the SHA-1 hash of the data along the way. Return the hash.'''
370 computing the SHA-1 hash of the data along the way. Return the hash.'''
370 hasher = hashlib.sha1('')
371 hasher = hashlib.sha1('')
371 for data in instream:
372 for data in instream:
372 hasher.update(data)
373 hasher.update(data)
373 outfile.write(data)
374 outfile.write(data)
374 return hasher.hexdigest()
375 return hasher.hexdigest()
375
376
376 def hashfile(file):
377 def hashfile(file):
377 if not os.path.exists(file):
378 if not os.path.exists(file):
378 return ''
379 return ''
379 with open(file, 'rb') as fd:
380 with open(file, 'rb') as fd:
380 return hexsha1(fd)
381 return hexsha1(fd)
381
382
382 def getexecutable(filename):
383 def getexecutable(filename):
383 mode = os.stat(filename).st_mode
384 mode = os.stat(filename).st_mode
384 return ((mode & stat.S_IXUSR) and
385 return ((mode & stat.S_IXUSR) and
385 (mode & stat.S_IXGRP) and
386 (mode & stat.S_IXGRP) and
386 (mode & stat.S_IXOTH))
387 (mode & stat.S_IXOTH))
387
388
388 def urljoin(first, second, *arg):
389 def urljoin(first, second, *arg):
389 def join(left, right):
390 def join(left, right):
390 if not left.endswith('/'):
391 if not left.endswith('/'):
391 left += '/'
392 left += '/'
392 if right.startswith('/'):
393 if right.startswith('/'):
393 right = right[1:]
394 right = right[1:]
394 return left + right
395 return left + right
395
396
396 url = join(first, second)
397 url = join(first, second)
397 for a in arg:
398 for a in arg:
398 url = join(url, a)
399 url = join(url, a)
399 return url
400 return url
400
401
401 def hexsha1(fileobj):
402 def hexsha1(fileobj):
402 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
403 """hexsha1 returns the hex-encoded sha1 sum of the data in the file-like
403 object data"""
404 object data"""
404 h = hashlib.sha1()
405 h = hashlib.sha1()
405 for chunk in util.filechunkiter(fileobj):
406 for chunk in util.filechunkiter(fileobj):
406 h.update(chunk)
407 h.update(chunk)
407 return h.hexdigest()
408 return h.hexdigest()
408
409
409 def httpsendfile(ui, filename):
410 def httpsendfile(ui, filename):
410 return httpconnection.httpsendfile(ui, filename, 'rb')
411 return httpconnection.httpsendfile(ui, filename, 'rb')
411
412
412 def unixpath(path):
413 def unixpath(path):
413 '''Return a version of path normalized for use with the lfdirstate.'''
414 '''Return a version of path normalized for use with the lfdirstate.'''
414 return util.pconvert(os.path.normpath(path))
415 return util.pconvert(os.path.normpath(path))
415
416
416 def islfilesrepo(repo):
417 def islfilesrepo(repo):
417 '''Return true if the repo is a largefile repo.'''
418 '''Return true if the repo is a largefile repo.'''
418 if ('largefiles' in repo.requirements and
419 if ('largefiles' in repo.requirements and
419 any(shortnameslash in f[0] for f in repo.store.datafiles())):
420 any(shortnameslash in f[0] for f in repo.store.datafiles())):
420 return True
421 return True
421
422
422 return any(openlfdirstate(repo.ui, repo, False))
423 return any(openlfdirstate(repo.ui, repo, False))
423
424
424 class storeprotonotcapable(Exception):
425 class storeprotonotcapable(Exception):
425 def __init__(self, storetypes):
426 def __init__(self, storetypes):
426 self.storetypes = storetypes
427 self.storetypes = storetypes
427
428
428 def getstandinsstate(repo):
429 def getstandinsstate(repo):
429 standins = []
430 standins = []
430 matcher = getstandinmatcher(repo)
431 matcher = getstandinmatcher(repo)
431 wctx = repo[None]
432 wctx = repo[None]
432 for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False,
433 for standin in repo.dirstate.walk(matcher, subrepos=[], unknown=False,
433 ignored=False):
434 ignored=False):
434 lfile = splitstandin(standin)
435 lfile = splitstandin(standin)
435 try:
436 try:
436 hash = readasstandin(wctx[standin])
437 hash = readasstandin(wctx[standin])
437 except IOError:
438 except IOError:
438 hash = None
439 hash = None
439 standins.append((lfile, hash))
440 standins.append((lfile, hash))
440 return standins
441 return standins
441
442
442 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
443 def synclfdirstate(repo, lfdirstate, lfile, normallookup):
443 lfstandin = standin(lfile)
444 lfstandin = standin(lfile)
444 if lfstandin in repo.dirstate:
445 if lfstandin in repo.dirstate:
445 stat = repo.dirstate._map[lfstandin]
446 stat = repo.dirstate._map[lfstandin]
446 state, mtime = stat[0], stat[3]
447 state, mtime = stat[0], stat[3]
447 else:
448 else:
448 state, mtime = '?', -1
449 state, mtime = '?', -1
449 if state == 'n':
450 if state == 'n':
450 if (normallookup or mtime < 0 or
451 if (normallookup or mtime < 0 or
451 not repo.wvfs.exists(lfile)):
452 not repo.wvfs.exists(lfile)):
452 # state 'n' doesn't ensure 'clean' in this case
453 # state 'n' doesn't ensure 'clean' in this case
453 lfdirstate.normallookup(lfile)
454 lfdirstate.normallookup(lfile)
454 else:
455 else:
455 lfdirstate.normal(lfile)
456 lfdirstate.normal(lfile)
456 elif state == 'm':
457 elif state == 'm':
457 lfdirstate.normallookup(lfile)
458 lfdirstate.normallookup(lfile)
458 elif state == 'r':
459 elif state == 'r':
459 lfdirstate.remove(lfile)
460 lfdirstate.remove(lfile)
460 elif state == 'a':
461 elif state == 'a':
461 lfdirstate.add(lfile)
462 lfdirstate.add(lfile)
462 elif state == '?':
463 elif state == '?':
463 lfdirstate.drop(lfile)
464 lfdirstate.drop(lfile)
464
465
465 def markcommitted(orig, ctx, node):
466 def markcommitted(orig, ctx, node):
466 repo = ctx.repo()
467 repo = ctx.repo()
467
468
468 orig(node)
469 orig(node)
469
470
470 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
471 # ATTENTION: "ctx.files()" may differ from "repo[node].files()"
471 # because files coming from the 2nd parent are omitted in the latter.
472 # because files coming from the 2nd parent are omitted in the latter.
472 #
473 #
473 # The former should be used to get targets of "synclfdirstate",
474 # The former should be used to get targets of "synclfdirstate",
474 # because such files:
475 # because such files:
475 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
476 # - are marked as "a" by "patch.patch()" (e.g. via transplant), and
476 # - have to be marked as "n" after commit, but
477 # - have to be marked as "n" after commit, but
477 # - aren't listed in "repo[node].files()"
478 # - aren't listed in "repo[node].files()"
478
479
479 lfdirstate = openlfdirstate(repo.ui, repo)
480 lfdirstate = openlfdirstate(repo.ui, repo)
480 for f in ctx.files():
481 for f in ctx.files():
481 lfile = splitstandin(f)
482 lfile = splitstandin(f)
482 if lfile is not None:
483 if lfile is not None:
483 synclfdirstate(repo, lfdirstate, lfile, False)
484 synclfdirstate(repo, lfdirstate, lfile, False)
484 lfdirstate.write()
485 lfdirstate.write()
485
486
486 # As part of committing, copy all of the largefiles into the cache.
487 # As part of committing, copy all of the largefiles into the cache.
487 #
488 #
488 # Using "node" instead of "ctx" implies additional "repo[node]"
489 # Using "node" instead of "ctx" implies additional "repo[node]"
489 # lookup while copyalltostore(), but can omit redundant check for
490 # lookup while copyalltostore(), but can omit redundant check for
490 # files comming from the 2nd parent, which should exist in store
491 # files comming from the 2nd parent, which should exist in store
491 # at merging.
492 # at merging.
492 copyalltostore(repo, node)
493 copyalltostore(repo, node)
493
494
494 def getlfilestoupdate(oldstandins, newstandins):
495 def getlfilestoupdate(oldstandins, newstandins):
495 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
496 changedstandins = set(oldstandins).symmetric_difference(set(newstandins))
496 filelist = []
497 filelist = []
497 for f in changedstandins:
498 for f in changedstandins:
498 if f[0] not in filelist:
499 if f[0] not in filelist:
499 filelist.append(f[0])
500 filelist.append(f[0])
500 return filelist
501 return filelist
501
502
502 def getlfilestoupload(repo, missing, addfunc):
503 def getlfilestoupload(repo, missing, addfunc):
503 for i, n in enumerate(missing):
504 for i, n in enumerate(missing):
504 repo.ui.progress(_('finding outgoing largefiles'), i,
505 repo.ui.progress(_('finding outgoing largefiles'), i,
505 unit=_('revisions'), total=len(missing))
506 unit=_('revisions'), total=len(missing))
506 parents = [p for p in repo[n].parents() if p != node.nullid]
507 parents = [p for p in repo[n].parents() if p != node.nullid]
507
508
508 oldlfstatus = repo.lfstatus
509 oldlfstatus = repo.lfstatus
509 repo.lfstatus = False
510 repo.lfstatus = False
510 try:
511 try:
511 ctx = repo[n]
512 ctx = repo[n]
512 finally:
513 finally:
513 repo.lfstatus = oldlfstatus
514 repo.lfstatus = oldlfstatus
514
515
515 files = set(ctx.files())
516 files = set(ctx.files())
516 if len(parents) == 2:
517 if len(parents) == 2:
517 mc = ctx.manifest()
518 mc = ctx.manifest()
518 mp1 = ctx.parents()[0].manifest()
519 mp1 = ctx.parents()[0].manifest()
519 mp2 = ctx.parents()[1].manifest()
520 mp2 = ctx.parents()[1].manifest()
520 for f in mp1:
521 for f in mp1:
521 if f not in mc:
522 if f not in mc:
522 files.add(f)
523 files.add(f)
523 for f in mp2:
524 for f in mp2:
524 if f not in mc:
525 if f not in mc:
525 files.add(f)
526 files.add(f)
526 for f in mc:
527 for f in mc:
527 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
528 if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
528 files.add(f)
529 files.add(f)
529 for fn in files:
530 for fn in files:
530 if isstandin(fn) and fn in ctx:
531 if isstandin(fn) and fn in ctx:
531 addfunc(fn, readasstandin(ctx[fn]))
532 addfunc(fn, readasstandin(ctx[fn]))
532 repo.ui.progress(_('finding outgoing largefiles'), None)
533 repo.ui.progress(_('finding outgoing largefiles'), None)
533
534
534 def updatestandinsbymatch(repo, match):
535 def updatestandinsbymatch(repo, match):
535 '''Update standins in the working directory according to specified match
536 '''Update standins in the working directory according to specified match
536
537
537 This returns (possibly modified) ``match`` object to be used for
538 This returns (possibly modified) ``match`` object to be used for
538 subsequent commit process.
539 subsequent commit process.
539 '''
540 '''
540
541
541 ui = repo.ui
542 ui = repo.ui
542
543
543 # Case 1: user calls commit with no specific files or
544 # Case 1: user calls commit with no specific files or
544 # include/exclude patterns: refresh and commit all files that
545 # include/exclude patterns: refresh and commit all files that
545 # are "dirty".
546 # are "dirty".
546 if match is None or match.always():
547 if match is None or match.always():
547 # Spend a bit of time here to get a list of files we know
548 # Spend a bit of time here to get a list of files we know
548 # are modified so we can compare only against those.
549 # are modified so we can compare only against those.
549 # It can cost a lot of time (several seconds)
550 # It can cost a lot of time (several seconds)
550 # otherwise to update all standins if the largefiles are
551 # otherwise to update all standins if the largefiles are
551 # large.
552 # large.
552 lfdirstate = openlfdirstate(ui, repo)
553 lfdirstate = openlfdirstate(ui, repo)
553 dirtymatch = matchmod.always(repo.root, repo.getcwd())
554 dirtymatch = matchmod.always(repo.root, repo.getcwd())
554 unsure, s = lfdirstate.status(dirtymatch, [], False, False,
555 unsure, s = lfdirstate.status(dirtymatch, subrepos=[], ignored=False,
555 False)
556 clean=False, unknown=False)
556 modifiedfiles = unsure + s.modified + s.added + s.removed
557 modifiedfiles = unsure + s.modified + s.added + s.removed
557 lfiles = listlfiles(repo)
558 lfiles = listlfiles(repo)
558 # this only loops through largefiles that exist (not
559 # this only loops through largefiles that exist (not
559 # removed/renamed)
560 # removed/renamed)
560 for lfile in lfiles:
561 for lfile in lfiles:
561 if lfile in modifiedfiles:
562 if lfile in modifiedfiles:
562 fstandin = standin(lfile)
563 fstandin = standin(lfile)
563 if repo.wvfs.exists(fstandin):
564 if repo.wvfs.exists(fstandin):
564 # this handles the case where a rebase is being
565 # this handles the case where a rebase is being
565 # performed and the working copy is not updated
566 # performed and the working copy is not updated
566 # yet.
567 # yet.
567 if repo.wvfs.exists(lfile):
568 if repo.wvfs.exists(lfile):
568 updatestandin(repo, lfile, fstandin)
569 updatestandin(repo, lfile, fstandin)
569
570
570 return match
571 return match
571
572
572 lfiles = listlfiles(repo)
573 lfiles = listlfiles(repo)
573 match._files = repo._subdirlfs(match.files(), lfiles)
574 match._files = repo._subdirlfs(match.files(), lfiles)
574
575
575 # Case 2: user calls commit with specified patterns: refresh
576 # Case 2: user calls commit with specified patterns: refresh
576 # any matching big files.
577 # any matching big files.
577 smatcher = composestandinmatcher(repo, match)
578 smatcher = composestandinmatcher(repo, match)
578 standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False,
579 standins = repo.dirstate.walk(smatcher, subrepos=[], unknown=False,
579 ignored=False)
580 ignored=False)
580
581
581 # No matching big files: get out of the way and pass control to
582 # No matching big files: get out of the way and pass control to
582 # the usual commit() method.
583 # the usual commit() method.
583 if not standins:
584 if not standins:
584 return match
585 return match
585
586
586 # Refresh all matching big files. It's possible that the
587 # Refresh all matching big files. It's possible that the
587 # commit will end up failing, in which case the big files will
588 # commit will end up failing, in which case the big files will
588 # stay refreshed. No harm done: the user modified them and
589 # stay refreshed. No harm done: the user modified them and
589 # asked to commit them, so sooner or later we're going to
590 # asked to commit them, so sooner or later we're going to
590 # refresh the standins. Might as well leave them refreshed.
591 # refresh the standins. Might as well leave them refreshed.
591 lfdirstate = openlfdirstate(ui, repo)
592 lfdirstate = openlfdirstate(ui, repo)
592 for fstandin in standins:
593 for fstandin in standins:
593 lfile = splitstandin(fstandin)
594 lfile = splitstandin(fstandin)
594 if lfdirstate[lfile] != 'r':
595 if lfdirstate[lfile] != 'r':
595 updatestandin(repo, lfile, fstandin)
596 updatestandin(repo, lfile, fstandin)
596
597
597 # Cook up a new matcher that only matches regular files or
598 # Cook up a new matcher that only matches regular files or
598 # standins corresponding to the big files requested by the
599 # standins corresponding to the big files requested by the
599 # user. Have to modify _files to prevent commit() from
600 # user. Have to modify _files to prevent commit() from
600 # complaining "not tracked" for big files.
601 # complaining "not tracked" for big files.
601 match = copy.copy(match)
602 match = copy.copy(match)
602 origmatchfn = match.matchfn
603 origmatchfn = match.matchfn
603
604
604 # Check both the list of largefiles and the list of
605 # Check both the list of largefiles and the list of
605 # standins because if a largefile was removed, it
606 # standins because if a largefile was removed, it
606 # won't be in the list of largefiles at this point
607 # won't be in the list of largefiles at this point
607 match._files += sorted(standins)
608 match._files += sorted(standins)
608
609
609 actualfiles = []
610 actualfiles = []
610 for f in match._files:
611 for f in match._files:
611 fstandin = standin(f)
612 fstandin = standin(f)
612
613
613 # For largefiles, only one of the normal and standin should be
614 # For largefiles, only one of the normal and standin should be
614 # committed (except if one of them is a remove). In the case of a
615 # committed (except if one of them is a remove). In the case of a
615 # standin removal, drop the normal file if it is unknown to dirstate.
616 # standin removal, drop the normal file if it is unknown to dirstate.
616 # Thus, skip plain largefile names but keep the standin.
617 # Thus, skip plain largefile names but keep the standin.
617 if f in lfiles or fstandin in standins:
618 if f in lfiles or fstandin in standins:
618 if repo.dirstate[fstandin] != 'r':
619 if repo.dirstate[fstandin] != 'r':
619 if repo.dirstate[f] != 'r':
620 if repo.dirstate[f] != 'r':
620 continue
621 continue
621 elif repo.dirstate[f] == '?':
622 elif repo.dirstate[f] == '?':
622 continue
623 continue
623
624
624 actualfiles.append(f)
625 actualfiles.append(f)
625 match._files = actualfiles
626 match._files = actualfiles
626
627
627 def matchfn(f):
628 def matchfn(f):
628 if origmatchfn(f):
629 if origmatchfn(f):
629 return f not in lfiles
630 return f not in lfiles
630 else:
631 else:
631 return f in standins
632 return f in standins
632
633
633 match.matchfn = matchfn
634 match.matchfn = matchfn
634
635
635 return match
636 return match
636
637
637 class automatedcommithook(object):
638 class automatedcommithook(object):
638 '''Stateful hook to update standins at the 1st commit of resuming
639 '''Stateful hook to update standins at the 1st commit of resuming
639
640
640 For efficiency, updating standins in the working directory should
641 For efficiency, updating standins in the working directory should
641 be avoided while automated committing (like rebase, transplant and
642 be avoided while automated committing (like rebase, transplant and
642 so on), because they should be updated before committing.
643 so on), because they should be updated before committing.
643
644
644 But the 1st commit of resuming automated committing (e.g. ``rebase
645 But the 1st commit of resuming automated committing (e.g. ``rebase
645 --continue``) should update them, because largefiles may be
646 --continue``) should update them, because largefiles may be
646 modified manually.
647 modified manually.
647 '''
648 '''
648 def __init__(self, resuming):
649 def __init__(self, resuming):
649 self.resuming = resuming
650 self.resuming = resuming
650
651
651 def __call__(self, repo, match):
652 def __call__(self, repo, match):
652 if self.resuming:
653 if self.resuming:
653 self.resuming = False # avoids updating at subsequent commits
654 self.resuming = False # avoids updating at subsequent commits
654 return updatestandinsbymatch(repo, match)
655 return updatestandinsbymatch(repo, match)
655 else:
656 else:
656 return match
657 return match
657
658
658 def getstatuswriter(ui, repo, forcibly=None):
659 def getstatuswriter(ui, repo, forcibly=None):
659 '''Return the function to write largefiles specific status out
660 '''Return the function to write largefiles specific status out
660
661
661 If ``forcibly`` is ``None``, this returns the last element of
662 If ``forcibly`` is ``None``, this returns the last element of
662 ``repo._lfstatuswriters`` as "default" writer function.
663 ``repo._lfstatuswriters`` as "default" writer function.
663
664
664 Otherwise, this returns the function to always write out (or
665 Otherwise, this returns the function to always write out (or
665 ignore if ``not forcibly``) status.
666 ignore if ``not forcibly``) status.
666 '''
667 '''
667 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
668 if forcibly is None and util.safehasattr(repo, '_largefilesenabled'):
668 return repo._lfstatuswriters[-1]
669 return repo._lfstatuswriters[-1]
669 else:
670 else:
670 if forcibly:
671 if forcibly:
671 return ui.status # forcibly WRITE OUT
672 return ui.status # forcibly WRITE OUT
672 else:
673 else:
673 return lambda *msg, **opts: None # forcibly IGNORE
674 return lambda *msg, **opts: None # forcibly IGNORE
@@ -1,1470 +1,1472 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
9 '''Overridden Mercurial commands and functions for the largefiles extension'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13 import os
13 import os
14
14
15 from mercurial.i18n import _
15 from mercurial.i18n import _
16
16
17 from mercurial import (
17 from mercurial import (
18 archival,
18 archival,
19 cmdutil,
19 cmdutil,
20 error,
20 error,
21 hg,
21 hg,
22 match as matchmod,
22 match as matchmod,
23 pathutil,
23 pathutil,
24 registrar,
24 registrar,
25 scmutil,
25 scmutil,
26 smartset,
26 smartset,
27 util,
27 util,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 lfcommands,
31 lfcommands,
32 lfutil,
32 lfutil,
33 storefactory,
33 storefactory,
34 )
34 )
35
35
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
36 # -- Utility functions: commonly/repeatedly needed functionality ---------------
37
37
38 def composelargefilematcher(match, manifest):
38 def composelargefilematcher(match, manifest):
39 '''create a matcher that matches only the largefiles in the original
39 '''create a matcher that matches only the largefiles in the original
40 matcher'''
40 matcher'''
41 m = copy.copy(match)
41 m = copy.copy(match)
42 lfile = lambda f: lfutil.standin(f) in manifest
42 lfile = lambda f: lfutil.standin(f) in manifest
43 m._files = filter(lfile, m._files)
43 m._files = filter(lfile, m._files)
44 m._fileset = set(m._files)
44 m._fileset = set(m._files)
45 m.always = lambda: False
45 m.always = lambda: False
46 origmatchfn = m.matchfn
46 origmatchfn = m.matchfn
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
47 m.matchfn = lambda f: lfile(f) and origmatchfn(f)
48 return m
48 return m
49
49
50 def composenormalfilematcher(match, manifest, exclude=None):
50 def composenormalfilematcher(match, manifest, exclude=None):
51 excluded = set()
51 excluded = set()
52 if exclude is not None:
52 if exclude is not None:
53 excluded.update(exclude)
53 excluded.update(exclude)
54
54
55 m = copy.copy(match)
55 m = copy.copy(match)
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
56 notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
57 manifest or f in excluded)
57 manifest or f in excluded)
58 m._files = filter(notlfile, m._files)
58 m._files = filter(notlfile, m._files)
59 m._fileset = set(m._files)
59 m._fileset = set(m._files)
60 m.always = lambda: False
60 m.always = lambda: False
61 origmatchfn = m.matchfn
61 origmatchfn = m.matchfn
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
62 m.matchfn = lambda f: notlfile(f) and origmatchfn(f)
63 return m
63 return m
64
64
65 def installnormalfilesmatchfn(manifest):
65 def installnormalfilesmatchfn(manifest):
66 '''installmatchfn with a matchfn that ignores all largefiles'''
66 '''installmatchfn with a matchfn that ignores all largefiles'''
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
67 def overridematch(ctx, pats=(), opts=None, globbed=False,
68 default='relpath', badfn=None):
68 default='relpath', badfn=None):
69 if opts is None:
69 if opts is None:
70 opts = {}
70 opts = {}
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
71 match = oldmatch(ctx, pats, opts, globbed, default, badfn=badfn)
72 return composenormalfilematcher(match, manifest)
72 return composenormalfilematcher(match, manifest)
73 oldmatch = installmatchfn(overridematch)
73 oldmatch = installmatchfn(overridematch)
74
74
75 def installmatchfn(f):
75 def installmatchfn(f):
76 '''monkey patch the scmutil module with a custom match function.
76 '''monkey patch the scmutil module with a custom match function.
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
77 Warning: it is monkey patching the _module_ on runtime! Not thread safe!'''
78 oldmatch = scmutil.match
78 oldmatch = scmutil.match
79 setattr(f, 'oldmatch', oldmatch)
79 setattr(f, 'oldmatch', oldmatch)
80 scmutil.match = f
80 scmutil.match = f
81 return oldmatch
81 return oldmatch
82
82
83 def restorematchfn():
83 def restorematchfn():
84 '''restores scmutil.match to what it was before installmatchfn
84 '''restores scmutil.match to what it was before installmatchfn
85 was called. no-op if scmutil.match is its original function.
85 was called. no-op if scmutil.match is its original function.
86
86
87 Note that n calls to installmatchfn will require n calls to
87 Note that n calls to installmatchfn will require n calls to
88 restore the original matchfn.'''
88 restore the original matchfn.'''
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
89 scmutil.match = getattr(scmutil.match, 'oldmatch')
90
90
91 def installmatchandpatsfn(f):
91 def installmatchandpatsfn(f):
92 oldmatchandpats = scmutil.matchandpats
92 oldmatchandpats = scmutil.matchandpats
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
93 setattr(f, 'oldmatchandpats', oldmatchandpats)
94 scmutil.matchandpats = f
94 scmutil.matchandpats = f
95 return oldmatchandpats
95 return oldmatchandpats
96
96
97 def restorematchandpatsfn():
97 def restorematchandpatsfn():
98 '''restores scmutil.matchandpats to what it was before
98 '''restores scmutil.matchandpats to what it was before
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
99 installmatchandpatsfn was called. No-op if scmutil.matchandpats
100 is its original function.
100 is its original function.
101
101
102 Note that n calls to installmatchandpatsfn will require n calls
102 Note that n calls to installmatchandpatsfn will require n calls
103 to restore the original matchfn.'''
103 to restore the original matchfn.'''
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
104 scmutil.matchandpats = getattr(scmutil.matchandpats, 'oldmatchandpats',
105 scmutil.matchandpats)
105 scmutil.matchandpats)
106
106
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
107 def addlargefiles(ui, repo, isaddremove, matcher, **opts):
108 large = opts.get(r'large')
108 large = opts.get(r'large')
109 lfsize = lfutil.getminsize(
109 lfsize = lfutil.getminsize(
110 ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize'))
110 ui, lfutil.islfilesrepo(repo), opts.get(r'lfsize'))
111
111
112 lfmatcher = None
112 lfmatcher = None
113 if lfutil.islfilesrepo(repo):
113 if lfutil.islfilesrepo(repo):
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
114 lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
115 if lfpats:
115 if lfpats:
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
116 lfmatcher = matchmod.match(repo.root, '', list(lfpats))
117
117
118 lfnames = []
118 lfnames = []
119 m = matcher
119 m = matcher
120
120
121 wctx = repo[None]
121 wctx = repo[None]
122 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
122 for f in wctx.walk(matchmod.badmatch(m, lambda x, y: None)):
123 exact = m.exact(f)
123 exact = m.exact(f)
124 lfile = lfutil.standin(f) in wctx
124 lfile = lfutil.standin(f) in wctx
125 nfile = f in wctx
125 nfile = f in wctx
126 exists = lfile or nfile
126 exists = lfile or nfile
127
127
128 # addremove in core gets fancy with the name, add doesn't
128 # addremove in core gets fancy with the name, add doesn't
129 if isaddremove:
129 if isaddremove:
130 name = m.uipath(f)
130 name = m.uipath(f)
131 else:
131 else:
132 name = m.rel(f)
132 name = m.rel(f)
133
133
134 # Don't warn the user when they attempt to add a normal tracked file.
134 # Don't warn the user when they attempt to add a normal tracked file.
135 # The normal add code will do that for us.
135 # The normal add code will do that for us.
136 if exact and exists:
136 if exact and exists:
137 if lfile:
137 if lfile:
138 ui.warn(_('%s already a largefile\n') % name)
138 ui.warn(_('%s already a largefile\n') % name)
139 continue
139 continue
140
140
141 if (exact or not exists) and not lfutil.isstandin(f):
141 if (exact or not exists) and not lfutil.isstandin(f):
142 # In case the file was removed previously, but not committed
142 # In case the file was removed previously, but not committed
143 # (issue3507)
143 # (issue3507)
144 if not repo.wvfs.exists(f):
144 if not repo.wvfs.exists(f):
145 continue
145 continue
146
146
147 abovemin = (lfsize and
147 abovemin = (lfsize and
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
148 repo.wvfs.lstat(f).st_size >= lfsize * 1024 * 1024)
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
149 if large or abovemin or (lfmatcher and lfmatcher(f)):
150 lfnames.append(f)
150 lfnames.append(f)
151 if ui.verbose or not exact:
151 if ui.verbose or not exact:
152 ui.status(_('adding %s as a largefile\n') % name)
152 ui.status(_('adding %s as a largefile\n') % name)
153
153
154 bad = []
154 bad = []
155
155
156 # Need to lock, otherwise there could be a race condition between
156 # Need to lock, otherwise there could be a race condition between
157 # when standins are created and added to the repo.
157 # when standins are created and added to the repo.
158 with repo.wlock():
158 with repo.wlock():
159 if not opts.get('dry_run'):
159 if not opts.get('dry_run'):
160 standins = []
160 standins = []
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
161 lfdirstate = lfutil.openlfdirstate(ui, repo)
162 for f in lfnames:
162 for f in lfnames:
163 standinname = lfutil.standin(f)
163 standinname = lfutil.standin(f)
164 lfutil.writestandin(repo, standinname, hash='',
164 lfutil.writestandin(repo, standinname, hash='',
165 executable=lfutil.getexecutable(repo.wjoin(f)))
165 executable=lfutil.getexecutable(repo.wjoin(f)))
166 standins.append(standinname)
166 standins.append(standinname)
167 if lfdirstate[f] == 'r':
167 if lfdirstate[f] == 'r':
168 lfdirstate.normallookup(f)
168 lfdirstate.normallookup(f)
169 else:
169 else:
170 lfdirstate.add(f)
170 lfdirstate.add(f)
171 lfdirstate.write()
171 lfdirstate.write()
172 bad += [lfutil.splitstandin(f)
172 bad += [lfutil.splitstandin(f)
173 for f in repo[None].add(standins)
173 for f in repo[None].add(standins)
174 if f in m.files()]
174 if f in m.files()]
175
175
176 added = [f for f in lfnames if f not in bad]
176 added = [f for f in lfnames if f not in bad]
177 return added, bad
177 return added, bad
178
178
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
179 def removelargefiles(ui, repo, isaddremove, matcher, **opts):
180 after = opts.get('after')
180 after = opts.get('after')
181 m = composelargefilematcher(matcher, repo[None].manifest())
181 m = composelargefilematcher(matcher, repo[None].manifest())
182 try:
182 try:
183 repo.lfstatus = True
183 repo.lfstatus = True
184 s = repo.status(match=m, clean=not isaddremove)
184 s = repo.status(match=m, clean=not isaddremove)
185 finally:
185 finally:
186 repo.lfstatus = False
186 repo.lfstatus = False
187 manifest = repo[None].manifest()
187 manifest = repo[None].manifest()
188 modified, added, deleted, clean = [[f for f in list
188 modified, added, deleted, clean = [[f for f in list
189 if lfutil.standin(f) in manifest]
189 if lfutil.standin(f) in manifest]
190 for list in (s.modified, s.added,
190 for list in (s.modified, s.added,
191 s.deleted, s.clean)]
191 s.deleted, s.clean)]
192
192
193 def warn(files, msg):
193 def warn(files, msg):
194 for f in files:
194 for f in files:
195 ui.warn(msg % m.rel(f))
195 ui.warn(msg % m.rel(f))
196 return int(len(files) > 0)
196 return int(len(files) > 0)
197
197
198 result = 0
198 result = 0
199
199
200 if after:
200 if after:
201 remove = deleted
201 remove = deleted
202 result = warn(modified + added + clean,
202 result = warn(modified + added + clean,
203 _('not removing %s: file still exists\n'))
203 _('not removing %s: file still exists\n'))
204 else:
204 else:
205 remove = deleted + clean
205 remove = deleted + clean
206 result = warn(modified, _('not removing %s: file is modified (use -f'
206 result = warn(modified, _('not removing %s: file is modified (use -f'
207 ' to force removal)\n'))
207 ' to force removal)\n'))
208 result = warn(added, _('not removing %s: file has been marked for add'
208 result = warn(added, _('not removing %s: file has been marked for add'
209 ' (use forget to undo)\n')) or result
209 ' (use forget to undo)\n')) or result
210
210
211 # Need to lock because standin files are deleted then removed from the
211 # Need to lock because standin files are deleted then removed from the
212 # repository and we could race in-between.
212 # repository and we could race in-between.
213 with repo.wlock():
213 with repo.wlock():
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
214 lfdirstate = lfutil.openlfdirstate(ui, repo)
215 for f in sorted(remove):
215 for f in sorted(remove):
216 if ui.verbose or not m.exact(f):
216 if ui.verbose or not m.exact(f):
217 # addremove in core gets fancy with the name, remove doesn't
217 # addremove in core gets fancy with the name, remove doesn't
218 if isaddremove:
218 if isaddremove:
219 name = m.uipath(f)
219 name = m.uipath(f)
220 else:
220 else:
221 name = m.rel(f)
221 name = m.rel(f)
222 ui.status(_('removing %s\n') % name)
222 ui.status(_('removing %s\n') % name)
223
223
224 if not opts.get('dry_run'):
224 if not opts.get('dry_run'):
225 if not after:
225 if not after:
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
226 repo.wvfs.unlinkpath(f, ignoremissing=True)
227
227
228 if opts.get('dry_run'):
228 if opts.get('dry_run'):
229 return result
229 return result
230
230
231 remove = [lfutil.standin(f) for f in remove]
231 remove = [lfutil.standin(f) for f in remove]
232 # If this is being called by addremove, let the original addremove
232 # If this is being called by addremove, let the original addremove
233 # function handle this.
233 # function handle this.
234 if not isaddremove:
234 if not isaddremove:
235 for f in remove:
235 for f in remove:
236 repo.wvfs.unlinkpath(f, ignoremissing=True)
236 repo.wvfs.unlinkpath(f, ignoremissing=True)
237 repo[None].forget(remove)
237 repo[None].forget(remove)
238
238
239 for f in remove:
239 for f in remove:
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
240 lfutil.synclfdirstate(repo, lfdirstate, lfutil.splitstandin(f),
241 False)
241 False)
242
242
243 lfdirstate.write()
243 lfdirstate.write()
244
244
245 return result
245 return result
246
246
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
247 # For overriding mercurial.hgweb.webcommands so that largefiles will
248 # appear at their right place in the manifests.
248 # appear at their right place in the manifests.
249 def decodepath(orig, path):
249 def decodepath(orig, path):
250 return lfutil.splitstandin(path) or path
250 return lfutil.splitstandin(path) or path
251
251
252 # -- Wrappers: modify existing commands --------------------------------
252 # -- Wrappers: modify existing commands --------------------------------
253
253
254 def overrideadd(orig, ui, repo, *pats, **opts):
254 def overrideadd(orig, ui, repo, *pats, **opts):
255 if opts.get('normal') and opts.get('large'):
255 if opts.get('normal') and opts.get('large'):
256 raise error.Abort(_('--normal cannot be used with --large'))
256 raise error.Abort(_('--normal cannot be used with --large'))
257 return orig(ui, repo, *pats, **opts)
257 return orig(ui, repo, *pats, **opts)
258
258
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
259 def cmdutiladd(orig, ui, repo, matcher, prefix, explicitonly, **opts):
260 # The --normal flag short circuits this override
260 # The --normal flag short circuits this override
261 if opts.get(r'normal'):
261 if opts.get(r'normal'):
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
262 return orig(ui, repo, matcher, prefix, explicitonly, **opts)
263
263
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
264 ladded, lbad = addlargefiles(ui, repo, False, matcher, **opts)
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
265 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest(),
266 ladded)
266 ladded)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
267 bad = orig(ui, repo, normalmatcher, prefix, explicitonly, **opts)
268
268
269 bad.extend(f for f in lbad)
269 bad.extend(f for f in lbad)
270 return bad
270 return bad
271
271
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
272 def cmdutilremove(orig, ui, repo, matcher, prefix, after, force, subrepos):
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
273 normalmatcher = composenormalfilematcher(matcher, repo[None].manifest())
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
274 result = orig(ui, repo, normalmatcher, prefix, after, force, subrepos)
275 return removelargefiles(ui, repo, False, matcher, after=after,
275 return removelargefiles(ui, repo, False, matcher, after=after,
276 force=force) or result
276 force=force) or result
277
277
278 def overridestatusfn(orig, repo, rev2, **opts):
278 def overridestatusfn(orig, repo, rev2, **opts):
279 try:
279 try:
280 repo._repo.lfstatus = True
280 repo._repo.lfstatus = True
281 return orig(repo, rev2, **opts)
281 return orig(repo, rev2, **opts)
282 finally:
282 finally:
283 repo._repo.lfstatus = False
283 repo._repo.lfstatus = False
284
284
285 def overridestatus(orig, ui, repo, *pats, **opts):
285 def overridestatus(orig, ui, repo, *pats, **opts):
286 try:
286 try:
287 repo.lfstatus = True
287 repo.lfstatus = True
288 return orig(ui, repo, *pats, **opts)
288 return orig(ui, repo, *pats, **opts)
289 finally:
289 finally:
290 repo.lfstatus = False
290 repo.lfstatus = False
291
291
292 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
292 def overridedirty(orig, repo, ignoreupdate=False, missing=False):
293 try:
293 try:
294 repo._repo.lfstatus = True
294 repo._repo.lfstatus = True
295 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
295 return orig(repo, ignoreupdate=ignoreupdate, missing=missing)
296 finally:
296 finally:
297 repo._repo.lfstatus = False
297 repo._repo.lfstatus = False
298
298
299 def overridelog(orig, ui, repo, *pats, **opts):
299 def overridelog(orig, ui, repo, *pats, **opts):
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
300 def overridematchandpats(ctx, pats=(), opts=None, globbed=False,
301 default='relpath', badfn=None):
301 default='relpath', badfn=None):
302 """Matcher that merges root directory with .hglf, suitable for log.
302 """Matcher that merges root directory with .hglf, suitable for log.
303 It is still possible to match .hglf directly.
303 It is still possible to match .hglf directly.
304 For any listed files run log on the standin too.
304 For any listed files run log on the standin too.
305 matchfn tries both the given filename and with .hglf stripped.
305 matchfn tries both the given filename and with .hglf stripped.
306 """
306 """
307 if opts is None:
307 if opts is None:
308 opts = {}
308 opts = {}
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
309 matchandpats = oldmatchandpats(ctx, pats, opts, globbed, default,
310 badfn=badfn)
310 badfn=badfn)
311 m, p = copy.copy(matchandpats)
311 m, p = copy.copy(matchandpats)
312
312
313 if m.always():
313 if m.always():
314 # We want to match everything anyway, so there's no benefit trying
314 # We want to match everything anyway, so there's no benefit trying
315 # to add standins.
315 # to add standins.
316 return matchandpats
316 return matchandpats
317
317
318 pats = set(p)
318 pats = set(p)
319
319
320 def fixpats(pat, tostandin=lfutil.standin):
320 def fixpats(pat, tostandin=lfutil.standin):
321 if pat.startswith('set:'):
321 if pat.startswith('set:'):
322 return pat
322 return pat
323
323
324 kindpat = matchmod._patsplit(pat, None)
324 kindpat = matchmod._patsplit(pat, None)
325
325
326 if kindpat[0] is not None:
326 if kindpat[0] is not None:
327 return kindpat[0] + ':' + tostandin(kindpat[1])
327 return kindpat[0] + ':' + tostandin(kindpat[1])
328 return tostandin(kindpat[1])
328 return tostandin(kindpat[1])
329
329
330 if m._cwd:
330 if m._cwd:
331 hglf = lfutil.shortname
331 hglf = lfutil.shortname
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
332 back = util.pconvert(m.rel(hglf)[:-len(hglf)])
333
333
334 def tostandin(f):
334 def tostandin(f):
335 # The file may already be a standin, so truncate the back
335 # The file may already be a standin, so truncate the back
336 # prefix and test before mangling it. This avoids turning
336 # prefix and test before mangling it. This avoids turning
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
337 # 'glob:../.hglf/foo*' into 'glob:../.hglf/../.hglf/foo*'.
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
338 if f.startswith(back) and lfutil.splitstandin(f[len(back):]):
339 return f
339 return f
340
340
341 # An absolute path is from outside the repo, so truncate the
341 # An absolute path is from outside the repo, so truncate the
342 # path to the root before building the standin. Otherwise cwd
342 # path to the root before building the standin. Otherwise cwd
343 # is somewhere in the repo, relative to root, and needs to be
343 # is somewhere in the repo, relative to root, and needs to be
344 # prepended before building the standin.
344 # prepended before building the standin.
345 if os.path.isabs(m._cwd):
345 if os.path.isabs(m._cwd):
346 f = f[len(back):]
346 f = f[len(back):]
347 else:
347 else:
348 f = m._cwd + '/' + f
348 f = m._cwd + '/' + f
349 return back + lfutil.standin(f)
349 return back + lfutil.standin(f)
350 else:
350 else:
351 def tostandin(f):
351 def tostandin(f):
352 if lfutil.isstandin(f):
352 if lfutil.isstandin(f):
353 return f
353 return f
354 return lfutil.standin(f)
354 return lfutil.standin(f)
355 pats.update(fixpats(f, tostandin) for f in p)
355 pats.update(fixpats(f, tostandin) for f in p)
356
356
357 for i in range(0, len(m._files)):
357 for i in range(0, len(m._files)):
358 # Don't add '.hglf' to m.files, since that is already covered by '.'
358 # Don't add '.hglf' to m.files, since that is already covered by '.'
359 if m._files[i] == '.':
359 if m._files[i] == '.':
360 continue
360 continue
361 standin = lfutil.standin(m._files[i])
361 standin = lfutil.standin(m._files[i])
362 # If the "standin" is a directory, append instead of replace to
362 # If the "standin" is a directory, append instead of replace to
363 # support naming a directory on the command line with only
363 # support naming a directory on the command line with only
364 # largefiles. The original directory is kept to support normal
364 # largefiles. The original directory is kept to support normal
365 # files.
365 # files.
366 if standin in ctx:
366 if standin in ctx:
367 m._files[i] = standin
367 m._files[i] = standin
368 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
368 elif m._files[i] not in ctx and repo.wvfs.isdir(standin):
369 m._files.append(standin)
369 m._files.append(standin)
370
370
371 m._fileset = set(m._files)
371 m._fileset = set(m._files)
372 m.always = lambda: False
372 m.always = lambda: False
373 origmatchfn = m.matchfn
373 origmatchfn = m.matchfn
374 def lfmatchfn(f):
374 def lfmatchfn(f):
375 lf = lfutil.splitstandin(f)
375 lf = lfutil.splitstandin(f)
376 if lf is not None and origmatchfn(lf):
376 if lf is not None and origmatchfn(lf):
377 return True
377 return True
378 r = origmatchfn(f)
378 r = origmatchfn(f)
379 return r
379 return r
380 m.matchfn = lfmatchfn
380 m.matchfn = lfmatchfn
381
381
382 ui.debug('updated patterns: %s\n' % ', '.join(sorted(pats)))
382 ui.debug('updated patterns: %s\n' % ', '.join(sorted(pats)))
383 return m, pats
383 return m, pats
384
384
385 # For hg log --patch, the match object is used in two different senses:
385 # For hg log --patch, the match object is used in two different senses:
386 # (1) to determine what revisions should be printed out, and
386 # (1) to determine what revisions should be printed out, and
387 # (2) to determine what files to print out diffs for.
387 # (2) to determine what files to print out diffs for.
388 # The magic matchandpats override should be used for case (1) but not for
388 # The magic matchandpats override should be used for case (1) but not for
389 # case (2).
389 # case (2).
390 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
390 def overridemakelogfilematcher(repo, pats, opts, badfn=None):
391 wctx = repo[None]
391 wctx = repo[None]
392 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
392 match, pats = oldmatchandpats(wctx, pats, opts, badfn=badfn)
393 return lambda rev: match
393 return lambda rev: match
394
394
395 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
395 oldmatchandpats = installmatchandpatsfn(overridematchandpats)
396 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
396 oldmakelogfilematcher = cmdutil._makenofollowlogfilematcher
397 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
397 setattr(cmdutil, '_makenofollowlogfilematcher', overridemakelogfilematcher)
398
398
399 try:
399 try:
400 return orig(ui, repo, *pats, **opts)
400 return orig(ui, repo, *pats, **opts)
401 finally:
401 finally:
402 restorematchandpatsfn()
402 restorematchandpatsfn()
403 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
403 setattr(cmdutil, '_makenofollowlogfilematcher', oldmakelogfilematcher)
404
404
405 def overrideverify(orig, ui, repo, *pats, **opts):
405 def overrideverify(orig, ui, repo, *pats, **opts):
406 large = opts.pop('large', False)
406 large = opts.pop('large', False)
407 all = opts.pop('lfa', False)
407 all = opts.pop('lfa', False)
408 contents = opts.pop('lfc', False)
408 contents = opts.pop('lfc', False)
409
409
410 result = orig(ui, repo, *pats, **opts)
410 result = orig(ui, repo, *pats, **opts)
411 if large or all or contents:
411 if large or all or contents:
412 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
412 result = result or lfcommands.verifylfiles(ui, repo, all, contents)
413 return result
413 return result
414
414
415 def overridedebugstate(orig, ui, repo, *pats, **opts):
415 def overridedebugstate(orig, ui, repo, *pats, **opts):
416 large = opts.pop('large', False)
416 large = opts.pop('large', False)
417 if large:
417 if large:
418 class fakerepo(object):
418 class fakerepo(object):
419 dirstate = lfutil.openlfdirstate(ui, repo)
419 dirstate = lfutil.openlfdirstate(ui, repo)
420 orig(ui, fakerepo, *pats, **opts)
420 orig(ui, fakerepo, *pats, **opts)
421 else:
421 else:
422 orig(ui, repo, *pats, **opts)
422 orig(ui, repo, *pats, **opts)
423
423
424 # Before starting the manifest merge, merge.updates will call
424 # Before starting the manifest merge, merge.updates will call
425 # _checkunknownfile to check if there are any files in the merged-in
425 # _checkunknownfile to check if there are any files in the merged-in
426 # changeset that collide with unknown files in the working copy.
426 # changeset that collide with unknown files in the working copy.
427 #
427 #
428 # The largefiles are seen as unknown, so this prevents us from merging
428 # The largefiles are seen as unknown, so this prevents us from merging
429 # in a file 'foo' if we already have a largefile with the same name.
429 # in a file 'foo' if we already have a largefile with the same name.
430 #
430 #
431 # The overridden function filters the unknown files by removing any
431 # The overridden function filters the unknown files by removing any
432 # largefiles. This makes the merge proceed and we can then handle this
432 # largefiles. This makes the merge proceed and we can then handle this
433 # case further in the overridden calculateupdates function below.
433 # case further in the overridden calculateupdates function below.
434 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
434 def overridecheckunknownfile(origfn, repo, wctx, mctx, f, f2=None):
435 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
435 if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
436 return False
436 return False
437 return origfn(repo, wctx, mctx, f, f2)
437 return origfn(repo, wctx, mctx, f, f2)
438
438
439 # The manifest merge handles conflicts on the manifest level. We want
439 # The manifest merge handles conflicts on the manifest level. We want
440 # to handle changes in largefile-ness of files at this level too.
440 # to handle changes in largefile-ness of files at this level too.
441 #
441 #
442 # The strategy is to run the original calculateupdates and then process
442 # The strategy is to run the original calculateupdates and then process
443 # the action list it outputs. There are two cases we need to deal with:
443 # the action list it outputs. There are two cases we need to deal with:
444 #
444 #
445 # 1. Normal file in p1, largefile in p2. Here the largefile is
445 # 1. Normal file in p1, largefile in p2. Here the largefile is
446 # detected via its standin file, which will enter the working copy
446 # detected via its standin file, which will enter the working copy
447 # with a "get" action. It is not "merge" since the standin is all
447 # with a "get" action. It is not "merge" since the standin is all
448 # Mercurial is concerned with at this level -- the link to the
448 # Mercurial is concerned with at this level -- the link to the
449 # existing normal file is not relevant here.
449 # existing normal file is not relevant here.
450 #
450 #
451 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
451 # 2. Largefile in p1, normal file in p2. Here we get a "merge" action
452 # since the largefile will be present in the working copy and
452 # since the largefile will be present in the working copy and
453 # different from the normal file in p2. Mercurial therefore
453 # different from the normal file in p2. Mercurial therefore
454 # triggers a merge action.
454 # triggers a merge action.
455 #
455 #
456 # In both cases, we prompt the user and emit new actions to either
456 # In both cases, we prompt the user and emit new actions to either
457 # remove the standin (if the normal file was kept) or to remove the
457 # remove the standin (if the normal file was kept) or to remove the
458 # normal file and get the standin (if the largefile was kept). The
458 # normal file and get the standin (if the largefile was kept). The
459 # default prompt answer is to use the largefile version since it was
459 # default prompt answer is to use the largefile version since it was
460 # presumably changed on purpose.
460 # presumably changed on purpose.
461 #
461 #
462 # Finally, the merge.applyupdates function will then take care of
462 # Finally, the merge.applyupdates function will then take care of
463 # writing the files into the working copy and lfcommands.updatelfiles
463 # writing the files into the working copy and lfcommands.updatelfiles
464 # will update the largefiles.
464 # will update the largefiles.
465 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
465 def overridecalculateupdates(origfn, repo, p1, p2, pas, branchmerge, force,
466 acceptremote, *args, **kwargs):
466 acceptremote, *args, **kwargs):
467 overwrite = force and not branchmerge
467 overwrite = force and not branchmerge
468 actions, diverge, renamedelete = origfn(
468 actions, diverge, renamedelete = origfn(
469 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
469 repo, p1, p2, pas, branchmerge, force, acceptremote, *args, **kwargs)
470
470
471 if overwrite:
471 if overwrite:
472 return actions, diverge, renamedelete
472 return actions, diverge, renamedelete
473
473
474 # Convert to dictionary with filename as key and action as value.
474 # Convert to dictionary with filename as key and action as value.
475 lfiles = set()
475 lfiles = set()
476 for f in actions:
476 for f in actions:
477 splitstandin = lfutil.splitstandin(f)
477 splitstandin = lfutil.splitstandin(f)
478 if splitstandin in p1:
478 if splitstandin in p1:
479 lfiles.add(splitstandin)
479 lfiles.add(splitstandin)
480 elif lfutil.standin(f) in p1:
480 elif lfutil.standin(f) in p1:
481 lfiles.add(f)
481 lfiles.add(f)
482
482
483 for lfile in sorted(lfiles):
483 for lfile in sorted(lfiles):
484 standin = lfutil.standin(lfile)
484 standin = lfutil.standin(lfile)
485 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
485 (lm, largs, lmsg) = actions.get(lfile, (None, None, None))
486 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
486 (sm, sargs, smsg) = actions.get(standin, (None, None, None))
487 if sm in ('g', 'dc') and lm != 'r':
487 if sm in ('g', 'dc') and lm != 'r':
488 if sm == 'dc':
488 if sm == 'dc':
489 f1, f2, fa, move, anc = sargs
489 f1, f2, fa, move, anc = sargs
490 sargs = (p2[f2].flags(), False)
490 sargs = (p2[f2].flags(), False)
491 # Case 1: normal file in the working copy, largefile in
491 # Case 1: normal file in the working copy, largefile in
492 # the second parent
492 # the second parent
493 usermsg = _('remote turned local normal file %s into a largefile\n'
493 usermsg = _('remote turned local normal file %s into a largefile\n'
494 'use (l)argefile or keep (n)ormal file?'
494 'use (l)argefile or keep (n)ormal file?'
495 '$$ &Largefile $$ &Normal file') % lfile
495 '$$ &Largefile $$ &Normal file') % lfile
496 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
496 if repo.ui.promptchoice(usermsg, 0) == 0: # pick remote largefile
497 actions[lfile] = ('r', None, 'replaced by standin')
497 actions[lfile] = ('r', None, 'replaced by standin')
498 actions[standin] = ('g', sargs, 'replaces standin')
498 actions[standin] = ('g', sargs, 'replaces standin')
499 else: # keep local normal file
499 else: # keep local normal file
500 actions[lfile] = ('k', None, 'replaces standin')
500 actions[lfile] = ('k', None, 'replaces standin')
501 if branchmerge:
501 if branchmerge:
502 actions[standin] = ('k', None, 'replaced by non-standin')
502 actions[standin] = ('k', None, 'replaced by non-standin')
503 else:
503 else:
504 actions[standin] = ('r', None, 'replaced by non-standin')
504 actions[standin] = ('r', None, 'replaced by non-standin')
505 elif lm in ('g', 'dc') and sm != 'r':
505 elif lm in ('g', 'dc') and sm != 'r':
506 if lm == 'dc':
506 if lm == 'dc':
507 f1, f2, fa, move, anc = largs
507 f1, f2, fa, move, anc = largs
508 largs = (p2[f2].flags(), False)
508 largs = (p2[f2].flags(), False)
509 # Case 2: largefile in the working copy, normal file in
509 # Case 2: largefile in the working copy, normal file in
510 # the second parent
510 # the second parent
511 usermsg = _('remote turned local largefile %s into a normal file\n'
511 usermsg = _('remote turned local largefile %s into a normal file\n'
512 'keep (l)argefile or use (n)ormal file?'
512 'keep (l)argefile or use (n)ormal file?'
513 '$$ &Largefile $$ &Normal file') % lfile
513 '$$ &Largefile $$ &Normal file') % lfile
514 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
514 if repo.ui.promptchoice(usermsg, 0) == 0: # keep local largefile
515 if branchmerge:
515 if branchmerge:
516 # largefile can be restored from standin safely
516 # largefile can be restored from standin safely
517 actions[lfile] = ('k', None, 'replaced by standin')
517 actions[lfile] = ('k', None, 'replaced by standin')
518 actions[standin] = ('k', None, 'replaces standin')
518 actions[standin] = ('k', None, 'replaces standin')
519 else:
519 else:
520 # "lfile" should be marked as "removed" without
520 # "lfile" should be marked as "removed" without
521 # removal of itself
521 # removal of itself
522 actions[lfile] = ('lfmr', None,
522 actions[lfile] = ('lfmr', None,
523 'forget non-standin largefile')
523 'forget non-standin largefile')
524
524
525 # linear-merge should treat this largefile as 're-added'
525 # linear-merge should treat this largefile as 're-added'
526 actions[standin] = ('a', None, 'keep standin')
526 actions[standin] = ('a', None, 'keep standin')
527 else: # pick remote normal file
527 else: # pick remote normal file
528 actions[lfile] = ('g', largs, 'replaces standin')
528 actions[lfile] = ('g', largs, 'replaces standin')
529 actions[standin] = ('r', None, 'replaced by non-standin')
529 actions[standin] = ('r', None, 'replaced by non-standin')
530
530
531 return actions, diverge, renamedelete
531 return actions, diverge, renamedelete
532
532
533 def mergerecordupdates(orig, repo, actions, branchmerge):
533 def mergerecordupdates(orig, repo, actions, branchmerge):
534 if 'lfmr' in actions:
534 if 'lfmr' in actions:
535 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
535 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
536 for lfile, args, msg in actions['lfmr']:
536 for lfile, args, msg in actions['lfmr']:
537 # this should be executed before 'orig', to execute 'remove'
537 # this should be executed before 'orig', to execute 'remove'
538 # before all other actions
538 # before all other actions
539 repo.dirstate.remove(lfile)
539 repo.dirstate.remove(lfile)
540 # make sure lfile doesn't get synclfdirstate'd as normal
540 # make sure lfile doesn't get synclfdirstate'd as normal
541 lfdirstate.add(lfile)
541 lfdirstate.add(lfile)
542 lfdirstate.write()
542 lfdirstate.write()
543
543
544 return orig(repo, actions, branchmerge)
544 return orig(repo, actions, branchmerge)
545
545
546 # Override filemerge to prompt the user about how they wish to merge
546 # Override filemerge to prompt the user about how they wish to merge
547 # largefiles. This will handle identical edits without prompting the user.
547 # largefiles. This will handle identical edits without prompting the user.
548 def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca,
548 def overridefilemerge(origfn, premerge, repo, wctx, mynode, orig, fcd, fco, fca,
549 labels=None):
549 labels=None):
550 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
550 if not lfutil.isstandin(orig) or fcd.isabsent() or fco.isabsent():
551 return origfn(premerge, repo, wctx, mynode, orig, fcd, fco, fca,
551 return origfn(premerge, repo, wctx, mynode, orig, fcd, fco, fca,
552 labels=labels)
552 labels=labels)
553
553
554 ahash = lfutil.readasstandin(fca).lower()
554 ahash = lfutil.readasstandin(fca).lower()
555 dhash = lfutil.readasstandin(fcd).lower()
555 dhash = lfutil.readasstandin(fcd).lower()
556 ohash = lfutil.readasstandin(fco).lower()
556 ohash = lfutil.readasstandin(fco).lower()
557 if (ohash != ahash and
557 if (ohash != ahash and
558 ohash != dhash and
558 ohash != dhash and
559 (dhash == ahash or
559 (dhash == ahash or
560 repo.ui.promptchoice(
560 repo.ui.promptchoice(
561 _('largefile %s has a merge conflict\nancestor was %s\n'
561 _('largefile %s has a merge conflict\nancestor was %s\n'
562 'keep (l)ocal %s or\ntake (o)ther %s?'
562 'keep (l)ocal %s or\ntake (o)ther %s?'
563 '$$ &Local $$ &Other') %
563 '$$ &Local $$ &Other') %
564 (lfutil.splitstandin(orig), ahash, dhash, ohash),
564 (lfutil.splitstandin(orig), ahash, dhash, ohash),
565 0) == 1)):
565 0) == 1)):
566 repo.wwrite(fcd.path(), fco.data(), fco.flags())
566 repo.wwrite(fcd.path(), fco.data(), fco.flags())
567 return True, 0, False
567 return True, 0, False
568
568
569 def copiespathcopies(orig, ctx1, ctx2, match=None):
569 def copiespathcopies(orig, ctx1, ctx2, match=None):
570 copies = orig(ctx1, ctx2, match=match)
570 copies = orig(ctx1, ctx2, match=match)
571 updated = {}
571 updated = {}
572
572
573 for k, v in copies.iteritems():
573 for k, v in copies.iteritems():
574 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
574 updated[lfutil.splitstandin(k) or k] = lfutil.splitstandin(v) or v
575
575
576 return updated
576 return updated
577
577
578 # Copy first changes the matchers to match standins instead of
578 # Copy first changes the matchers to match standins instead of
579 # largefiles. Then it overrides util.copyfile in that function it
579 # largefiles. Then it overrides util.copyfile in that function it
580 # checks if the destination largefile already exists. It also keeps a
580 # checks if the destination largefile already exists. It also keeps a
581 # list of copied files so that the largefiles can be copied and the
581 # list of copied files so that the largefiles can be copied and the
582 # dirstate updated.
582 # dirstate updated.
583 def overridecopy(orig, ui, repo, pats, opts, rename=False):
583 def overridecopy(orig, ui, repo, pats, opts, rename=False):
584 # doesn't remove largefile on rename
584 # doesn't remove largefile on rename
585 if len(pats) < 2:
585 if len(pats) < 2:
586 # this isn't legal, let the original function deal with it
586 # this isn't legal, let the original function deal with it
587 return orig(ui, repo, pats, opts, rename)
587 return orig(ui, repo, pats, opts, rename)
588
588
589 # This could copy both lfiles and normal files in one command,
589 # This could copy both lfiles and normal files in one command,
590 # but we don't want to do that. First replace their matcher to
590 # but we don't want to do that. First replace their matcher to
591 # only match normal files and run it, then replace it to just
591 # only match normal files and run it, then replace it to just
592 # match largefiles and run it again.
592 # match largefiles and run it again.
593 nonormalfiles = False
593 nonormalfiles = False
594 nolfiles = False
594 nolfiles = False
595 installnormalfilesmatchfn(repo[None].manifest())
595 installnormalfilesmatchfn(repo[None].manifest())
596 try:
596 try:
597 result = orig(ui, repo, pats, opts, rename)
597 result = orig(ui, repo, pats, opts, rename)
598 except error.Abort as e:
598 except error.Abort as e:
599 if str(e) != _('no files to copy'):
599 if str(e) != _('no files to copy'):
600 raise e
600 raise e
601 else:
601 else:
602 nonormalfiles = True
602 nonormalfiles = True
603 result = 0
603 result = 0
604 finally:
604 finally:
605 restorematchfn()
605 restorematchfn()
606
606
607 # The first rename can cause our current working directory to be removed.
607 # The first rename can cause our current working directory to be removed.
608 # In that case there is nothing left to copy/rename so just quit.
608 # In that case there is nothing left to copy/rename so just quit.
609 try:
609 try:
610 repo.getcwd()
610 repo.getcwd()
611 except OSError:
611 except OSError:
612 return result
612 return result
613
613
614 def makestandin(relpath):
614 def makestandin(relpath):
615 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
615 path = pathutil.canonpath(repo.root, repo.getcwd(), relpath)
616 return repo.wvfs.join(lfutil.standin(path))
616 return repo.wvfs.join(lfutil.standin(path))
617
617
618 fullpats = scmutil.expandpats(pats)
618 fullpats = scmutil.expandpats(pats)
619 dest = fullpats[-1]
619 dest = fullpats[-1]
620
620
621 if os.path.isdir(dest):
621 if os.path.isdir(dest):
622 if not os.path.isdir(makestandin(dest)):
622 if not os.path.isdir(makestandin(dest)):
623 os.makedirs(makestandin(dest))
623 os.makedirs(makestandin(dest))
624
624
625 try:
625 try:
626 # When we call orig below it creates the standins but we don't add
626 # When we call orig below it creates the standins but we don't add
627 # them to the dir state until later so lock during that time.
627 # them to the dir state until later so lock during that time.
628 wlock = repo.wlock()
628 wlock = repo.wlock()
629
629
630 manifest = repo[None].manifest()
630 manifest = repo[None].manifest()
631 def overridematch(ctx, pats=(), opts=None, globbed=False,
631 def overridematch(ctx, pats=(), opts=None, globbed=False,
632 default='relpath', badfn=None):
632 default='relpath', badfn=None):
633 if opts is None:
633 if opts is None:
634 opts = {}
634 opts = {}
635 newpats = []
635 newpats = []
636 # The patterns were previously mangled to add the standin
636 # The patterns were previously mangled to add the standin
637 # directory; we need to remove that now
637 # directory; we need to remove that now
638 for pat in pats:
638 for pat in pats:
639 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
639 if matchmod.patkind(pat) is None and lfutil.shortname in pat:
640 newpats.append(pat.replace(lfutil.shortname, ''))
640 newpats.append(pat.replace(lfutil.shortname, ''))
641 else:
641 else:
642 newpats.append(pat)
642 newpats.append(pat)
643 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
643 match = oldmatch(ctx, newpats, opts, globbed, default, badfn=badfn)
644 m = copy.copy(match)
644 m = copy.copy(match)
645 lfile = lambda f: lfutil.standin(f) in manifest
645 lfile = lambda f: lfutil.standin(f) in manifest
646 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
646 m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
647 m._fileset = set(m._files)
647 m._fileset = set(m._files)
648 origmatchfn = m.matchfn
648 origmatchfn = m.matchfn
649 def matchfn(f):
649 def matchfn(f):
650 lfile = lfutil.splitstandin(f)
650 lfile = lfutil.splitstandin(f)
651 return (lfile is not None and
651 return (lfile is not None and
652 (f in manifest) and
652 (f in manifest) and
653 origmatchfn(lfile) or
653 origmatchfn(lfile) or
654 None)
654 None)
655 m.matchfn = matchfn
655 m.matchfn = matchfn
656 return m
656 return m
657 oldmatch = installmatchfn(overridematch)
657 oldmatch = installmatchfn(overridematch)
658 listpats = []
658 listpats = []
659 for pat in pats:
659 for pat in pats:
660 if matchmod.patkind(pat) is not None:
660 if matchmod.patkind(pat) is not None:
661 listpats.append(pat)
661 listpats.append(pat)
662 else:
662 else:
663 listpats.append(makestandin(pat))
663 listpats.append(makestandin(pat))
664
664
665 try:
665 try:
666 origcopyfile = util.copyfile
666 origcopyfile = util.copyfile
667 copiedfiles = []
667 copiedfiles = []
668 def overridecopyfile(src, dest):
668 def overridecopyfile(src, dest):
669 if (lfutil.shortname in src and
669 if (lfutil.shortname in src and
670 dest.startswith(repo.wjoin(lfutil.shortname))):
670 dest.startswith(repo.wjoin(lfutil.shortname))):
671 destlfile = dest.replace(lfutil.shortname, '')
671 destlfile = dest.replace(lfutil.shortname, '')
672 if not opts['force'] and os.path.exists(destlfile):
672 if not opts['force'] and os.path.exists(destlfile):
673 raise IOError('',
673 raise IOError('',
674 _('destination largefile already exists'))
674 _('destination largefile already exists'))
675 copiedfiles.append((src, dest))
675 copiedfiles.append((src, dest))
676 origcopyfile(src, dest)
676 origcopyfile(src, dest)
677
677
678 util.copyfile = overridecopyfile
678 util.copyfile = overridecopyfile
679 result += orig(ui, repo, listpats, opts, rename)
679 result += orig(ui, repo, listpats, opts, rename)
680 finally:
680 finally:
681 util.copyfile = origcopyfile
681 util.copyfile = origcopyfile
682
682
683 lfdirstate = lfutil.openlfdirstate(ui, repo)
683 lfdirstate = lfutil.openlfdirstate(ui, repo)
684 for (src, dest) in copiedfiles:
684 for (src, dest) in copiedfiles:
685 if (lfutil.shortname in src and
685 if (lfutil.shortname in src and
686 dest.startswith(repo.wjoin(lfutil.shortname))):
686 dest.startswith(repo.wjoin(lfutil.shortname))):
687 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
687 srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
688 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
688 destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
689 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
689 destlfiledir = repo.wvfs.dirname(repo.wjoin(destlfile)) or '.'
690 if not os.path.isdir(destlfiledir):
690 if not os.path.isdir(destlfiledir):
691 os.makedirs(destlfiledir)
691 os.makedirs(destlfiledir)
692 if rename:
692 if rename:
693 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
693 os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
694
694
695 # The file is gone, but this deletes any empty parent
695 # The file is gone, but this deletes any empty parent
696 # directories as a side-effect.
696 # directories as a side-effect.
697 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
697 repo.wvfs.unlinkpath(srclfile, ignoremissing=True)
698 lfdirstate.remove(srclfile)
698 lfdirstate.remove(srclfile)
699 else:
699 else:
700 util.copyfile(repo.wjoin(srclfile),
700 util.copyfile(repo.wjoin(srclfile),
701 repo.wjoin(destlfile))
701 repo.wjoin(destlfile))
702
702
703 lfdirstate.add(destlfile)
703 lfdirstate.add(destlfile)
704 lfdirstate.write()
704 lfdirstate.write()
705 except error.Abort as e:
705 except error.Abort as e:
706 if str(e) != _('no files to copy'):
706 if str(e) != _('no files to copy'):
707 raise e
707 raise e
708 else:
708 else:
709 nolfiles = True
709 nolfiles = True
710 finally:
710 finally:
711 restorematchfn()
711 restorematchfn()
712 wlock.release()
712 wlock.release()
713
713
714 if nolfiles and nonormalfiles:
714 if nolfiles and nonormalfiles:
715 raise error.Abort(_('no files to copy'))
715 raise error.Abort(_('no files to copy'))
716
716
717 return result
717 return result
718
718
719 # When the user calls revert, we have to be careful to not revert any
719 # When the user calls revert, we have to be careful to not revert any
720 # changes to other largefiles accidentally. This means we have to keep
720 # changes to other largefiles accidentally. This means we have to keep
721 # track of the largefiles that are being reverted so we only pull down
721 # track of the largefiles that are being reverted so we only pull down
722 # the necessary largefiles.
722 # the necessary largefiles.
723 #
723 #
724 # Standins are only updated (to match the hash of largefiles) before
724 # Standins are only updated (to match the hash of largefiles) before
725 # commits. Update the standins then run the original revert, changing
725 # commits. Update the standins then run the original revert, changing
726 # the matcher to hit standins instead of largefiles. Based on the
726 # the matcher to hit standins instead of largefiles. Based on the
727 # resulting standins update the largefiles.
727 # resulting standins update the largefiles.
728 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
728 def overriderevert(orig, ui, repo, ctx, parents, *pats, **opts):
729 # Because we put the standins in a bad state (by updating them)
729 # Because we put the standins in a bad state (by updating them)
730 # and then return them to a correct state we need to lock to
730 # and then return them to a correct state we need to lock to
731 # prevent others from changing them in their incorrect state.
731 # prevent others from changing them in their incorrect state.
732 with repo.wlock():
732 with repo.wlock():
733 lfdirstate = lfutil.openlfdirstate(ui, repo)
733 lfdirstate = lfutil.openlfdirstate(ui, repo)
734 s = lfutil.lfdirstatestatus(lfdirstate, repo)
734 s = lfutil.lfdirstatestatus(lfdirstate, repo)
735 lfdirstate.write()
735 lfdirstate.write()
736 for lfile in s.modified:
736 for lfile in s.modified:
737 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
737 lfutil.updatestandin(repo, lfile, lfutil.standin(lfile))
738 for lfile in s.deleted:
738 for lfile in s.deleted:
739 fstandin = lfutil.standin(lfile)
739 fstandin = lfutil.standin(lfile)
740 if (repo.wvfs.exists(fstandin)):
740 if (repo.wvfs.exists(fstandin)):
741 repo.wvfs.unlink(fstandin)
741 repo.wvfs.unlink(fstandin)
742
742
743 oldstandins = lfutil.getstandinsstate(repo)
743 oldstandins = lfutil.getstandinsstate(repo)
744
744
745 def overridematch(mctx, pats=(), opts=None, globbed=False,
745 def overridematch(mctx, pats=(), opts=None, globbed=False,
746 default='relpath', badfn=None):
746 default='relpath', badfn=None):
747 if opts is None:
747 if opts is None:
748 opts = {}
748 opts = {}
749 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
749 match = oldmatch(mctx, pats, opts, globbed, default, badfn=badfn)
750 m = copy.copy(match)
750 m = copy.copy(match)
751
751
752 # revert supports recursing into subrepos, and though largefiles
752 # revert supports recursing into subrepos, and though largefiles
753 # currently doesn't work correctly in that case, this match is
753 # currently doesn't work correctly in that case, this match is
754 # called, so the lfdirstate above may not be the correct one for
754 # called, so the lfdirstate above may not be the correct one for
755 # this invocation of match.
755 # this invocation of match.
756 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
756 lfdirstate = lfutil.openlfdirstate(mctx.repo().ui, mctx.repo(),
757 False)
757 False)
758
758
759 wctx = repo[None]
759 wctx = repo[None]
760 matchfiles = []
760 matchfiles = []
761 for f in m._files:
761 for f in m._files:
762 standin = lfutil.standin(f)
762 standin = lfutil.standin(f)
763 if standin in ctx or standin in mctx:
763 if standin in ctx or standin in mctx:
764 matchfiles.append(standin)
764 matchfiles.append(standin)
765 elif standin in wctx or lfdirstate[f] == 'r':
765 elif standin in wctx or lfdirstate[f] == 'r':
766 continue
766 continue
767 else:
767 else:
768 matchfiles.append(f)
768 matchfiles.append(f)
769 m._files = matchfiles
769 m._files = matchfiles
770 m._fileset = set(m._files)
770 m._fileset = set(m._files)
771 origmatchfn = m.matchfn
771 origmatchfn = m.matchfn
772 def matchfn(f):
772 def matchfn(f):
773 lfile = lfutil.splitstandin(f)
773 lfile = lfutil.splitstandin(f)
774 if lfile is not None:
774 if lfile is not None:
775 return (origmatchfn(lfile) and
775 return (origmatchfn(lfile) and
776 (f in ctx or f in mctx))
776 (f in ctx or f in mctx))
777 return origmatchfn(f)
777 return origmatchfn(f)
778 m.matchfn = matchfn
778 m.matchfn = matchfn
779 return m
779 return m
780 oldmatch = installmatchfn(overridematch)
780 oldmatch = installmatchfn(overridematch)
781 try:
781 try:
782 orig(ui, repo, ctx, parents, *pats, **opts)
782 orig(ui, repo, ctx, parents, *pats, **opts)
783 finally:
783 finally:
784 restorematchfn()
784 restorematchfn()
785
785
786 newstandins = lfutil.getstandinsstate(repo)
786 newstandins = lfutil.getstandinsstate(repo)
787 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
787 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
788 # lfdirstate should be 'normallookup'-ed for updated files,
788 # lfdirstate should be 'normallookup'-ed for updated files,
789 # because reverting doesn't touch dirstate for 'normal' files
789 # because reverting doesn't touch dirstate for 'normal' files
790 # when target revision is explicitly specified: in such case,
790 # when target revision is explicitly specified: in such case,
791 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
791 # 'n' and valid timestamp in dirstate doesn't ensure 'clean'
792 # of target (standin) file.
792 # of target (standin) file.
793 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
793 lfcommands.updatelfiles(ui, repo, filelist, printmessage=False,
794 normallookup=True)
794 normallookup=True)
795
795
796 # after pulling changesets, we need to take some extra care to get
796 # after pulling changesets, we need to take some extra care to get
797 # largefiles updated remotely
797 # largefiles updated remotely
798 def overridepull(orig, ui, repo, source=None, **opts):
798 def overridepull(orig, ui, repo, source=None, **opts):
799 revsprepull = len(repo)
799 revsprepull = len(repo)
800 if not source:
800 if not source:
801 source = 'default'
801 source = 'default'
802 repo.lfpullsource = source
802 repo.lfpullsource = source
803 result = orig(ui, repo, source, **opts)
803 result = orig(ui, repo, source, **opts)
804 revspostpull = len(repo)
804 revspostpull = len(repo)
805 lfrevs = opts.get('lfrev', [])
805 lfrevs = opts.get('lfrev', [])
806 if opts.get('all_largefiles'):
806 if opts.get('all_largefiles'):
807 lfrevs.append('pulled()')
807 lfrevs.append('pulled()')
808 if lfrevs and revspostpull > revsprepull:
808 if lfrevs and revspostpull > revsprepull:
809 numcached = 0
809 numcached = 0
810 repo.firstpulled = revsprepull # for pulled() revset expression
810 repo.firstpulled = revsprepull # for pulled() revset expression
811 try:
811 try:
812 for rev in scmutil.revrange(repo, lfrevs):
812 for rev in scmutil.revrange(repo, lfrevs):
813 ui.note(_('pulling largefiles for revision %s\n') % rev)
813 ui.note(_('pulling largefiles for revision %s\n') % rev)
814 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
814 (cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
815 numcached += len(cached)
815 numcached += len(cached)
816 finally:
816 finally:
817 del repo.firstpulled
817 del repo.firstpulled
818 ui.status(_("%d largefiles cached\n") % numcached)
818 ui.status(_("%d largefiles cached\n") % numcached)
819 return result
819 return result
820
820
821 def overridepush(orig, ui, repo, *args, **kwargs):
821 def overridepush(orig, ui, repo, *args, **kwargs):
822 """Override push command and store --lfrev parameters in opargs"""
822 """Override push command and store --lfrev parameters in opargs"""
823 lfrevs = kwargs.pop('lfrev', None)
823 lfrevs = kwargs.pop('lfrev', None)
824 if lfrevs:
824 if lfrevs:
825 opargs = kwargs.setdefault('opargs', {})
825 opargs = kwargs.setdefault('opargs', {})
826 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
826 opargs['lfrevs'] = scmutil.revrange(repo, lfrevs)
827 return orig(ui, repo, *args, **kwargs)
827 return orig(ui, repo, *args, **kwargs)
828
828
829 def exchangepushoperation(orig, *args, **kwargs):
829 def exchangepushoperation(orig, *args, **kwargs):
830 """Override pushoperation constructor and store lfrevs parameter"""
830 """Override pushoperation constructor and store lfrevs parameter"""
831 lfrevs = kwargs.pop('lfrevs', None)
831 lfrevs = kwargs.pop('lfrevs', None)
832 pushop = orig(*args, **kwargs)
832 pushop = orig(*args, **kwargs)
833 pushop.lfrevs = lfrevs
833 pushop.lfrevs = lfrevs
834 return pushop
834 return pushop
835
835
836 revsetpredicate = registrar.revsetpredicate()
836 revsetpredicate = registrar.revsetpredicate()
837
837
838 @revsetpredicate('pulled()')
838 @revsetpredicate('pulled()')
839 def pulledrevsetsymbol(repo, subset, x):
839 def pulledrevsetsymbol(repo, subset, x):
840 """Changesets that just has been pulled.
840 """Changesets that just has been pulled.
841
841
842 Only available with largefiles from pull --lfrev expressions.
842 Only available with largefiles from pull --lfrev expressions.
843
843
844 .. container:: verbose
844 .. container:: verbose
845
845
846 Some examples:
846 Some examples:
847
847
848 - pull largefiles for all new changesets::
848 - pull largefiles for all new changesets::
849
849
850 hg pull -lfrev "pulled()"
850 hg pull -lfrev "pulled()"
851
851
852 - pull largefiles for all new branch heads::
852 - pull largefiles for all new branch heads::
853
853
854 hg pull -lfrev "head(pulled()) and not closed()"
854 hg pull -lfrev "head(pulled()) and not closed()"
855
855
856 """
856 """
857
857
858 try:
858 try:
859 firstpulled = repo.firstpulled
859 firstpulled = repo.firstpulled
860 except AttributeError:
860 except AttributeError:
861 raise error.Abort(_("pulled() only available in --lfrev"))
861 raise error.Abort(_("pulled() only available in --lfrev"))
862 return smartset.baseset([r for r in subset if r >= firstpulled])
862 return smartset.baseset([r for r in subset if r >= firstpulled])
863
863
864 def overrideclone(orig, ui, source, dest=None, **opts):
864 def overrideclone(orig, ui, source, dest=None, **opts):
865 d = dest
865 d = dest
866 if d is None:
866 if d is None:
867 d = hg.defaultdest(source)
867 d = hg.defaultdest(source)
868 if opts.get('all_largefiles') and not hg.islocal(d):
868 if opts.get('all_largefiles') and not hg.islocal(d):
869 raise error.Abort(_(
869 raise error.Abort(_(
870 '--all-largefiles is incompatible with non-local destination %s') %
870 '--all-largefiles is incompatible with non-local destination %s') %
871 d)
871 d)
872
872
873 return orig(ui, source, dest, **opts)
873 return orig(ui, source, dest, **opts)
874
874
875 def hgclone(orig, ui, opts, *args, **kwargs):
875 def hgclone(orig, ui, opts, *args, **kwargs):
876 result = orig(ui, opts, *args, **kwargs)
876 result = orig(ui, opts, *args, **kwargs)
877
877
878 if result is not None:
878 if result is not None:
879 sourcerepo, destrepo = result
879 sourcerepo, destrepo = result
880 repo = destrepo.local()
880 repo = destrepo.local()
881
881
882 # When cloning to a remote repo (like through SSH), no repo is available
882 # When cloning to a remote repo (like through SSH), no repo is available
883 # from the peer. Therefore the largefiles can't be downloaded and the
883 # from the peer. Therefore the largefiles can't be downloaded and the
884 # hgrc can't be updated.
884 # hgrc can't be updated.
885 if not repo:
885 if not repo:
886 return result
886 return result
887
887
888 # If largefiles is required for this repo, permanently enable it locally
888 # If largefiles is required for this repo, permanently enable it locally
889 if 'largefiles' in repo.requirements:
889 if 'largefiles' in repo.requirements:
890 with repo.vfs('hgrc', 'a', text=True) as fp:
890 with repo.vfs('hgrc', 'a', text=True) as fp:
891 fp.write('\n[extensions]\nlargefiles=\n')
891 fp.write('\n[extensions]\nlargefiles=\n')
892
892
893 # Caching is implicitly limited to 'rev' option, since the dest repo was
893 # Caching is implicitly limited to 'rev' option, since the dest repo was
894 # truncated at that point. The user may expect a download count with
894 # truncated at that point. The user may expect a download count with
895 # this option, so attempt whether or not this is a largefile repo.
895 # this option, so attempt whether or not this is a largefile repo.
896 if opts.get('all_largefiles'):
896 if opts.get('all_largefiles'):
897 success, missing = lfcommands.downloadlfiles(ui, repo, None)
897 success, missing = lfcommands.downloadlfiles(ui, repo, None)
898
898
899 if missing != 0:
899 if missing != 0:
900 return None
900 return None
901
901
902 return result
902 return result
903
903
904 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
904 def hgpostshare(orig, sourcerepo, destrepo, bookmarks=True, defaultpath=None):
905 orig(sourcerepo, destrepo, bookmarks, defaultpath)
905 orig(sourcerepo, destrepo, bookmarks, defaultpath)
906
906
907 # If largefiles is required for this repo, permanently enable it locally
907 # If largefiles is required for this repo, permanently enable it locally
908 if 'largefiles' in destrepo.requirements:
908 if 'largefiles' in destrepo.requirements:
909 with destrepo.vfs('hgrc', 'a+', text=True) as fp:
909 with destrepo.vfs('hgrc', 'a+', text=True) as fp:
910 fp.write('\n[extensions]\nlargefiles=\n')
910 fp.write('\n[extensions]\nlargefiles=\n')
911
911
912 def overriderebase(orig, ui, repo, **opts):
912 def overriderebase(orig, ui, repo, **opts):
913 if not util.safehasattr(repo, '_largefilesenabled'):
913 if not util.safehasattr(repo, '_largefilesenabled'):
914 return orig(ui, repo, **opts)
914 return orig(ui, repo, **opts)
915
915
916 resuming = opts.get('continue')
916 resuming = opts.get('continue')
917 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
917 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
918 repo._lfstatuswriters.append(lambda *msg, **opts: None)
918 repo._lfstatuswriters.append(lambda *msg, **opts: None)
919 try:
919 try:
920 return orig(ui, repo, **opts)
920 return orig(ui, repo, **opts)
921 finally:
921 finally:
922 repo._lfstatuswriters.pop()
922 repo._lfstatuswriters.pop()
923 repo._lfcommithooks.pop()
923 repo._lfcommithooks.pop()
924
924
925 def overridearchivecmd(orig, ui, repo, dest, **opts):
925 def overridearchivecmd(orig, ui, repo, dest, **opts):
926 repo.unfiltered().lfstatus = True
926 repo.unfiltered().lfstatus = True
927
927
928 try:
928 try:
929 return orig(ui, repo.unfiltered(), dest, **opts)
929 return orig(ui, repo.unfiltered(), dest, **opts)
930 finally:
930 finally:
931 repo.unfiltered().lfstatus = False
931 repo.unfiltered().lfstatus = False
932
932
933 def hgwebarchive(orig, web, req, tmpl):
933 def hgwebarchive(orig, web, req, tmpl):
934 web.repo.lfstatus = True
934 web.repo.lfstatus = True
935
935
936 try:
936 try:
937 return orig(web, req, tmpl)
937 return orig(web, req, tmpl)
938 finally:
938 finally:
939 web.repo.lfstatus = False
939 web.repo.lfstatus = False
940
940
941 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
941 def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
942 prefix='', mtime=None, subrepos=None):
942 prefix='', mtime=None, subrepos=None):
943 # For some reason setting repo.lfstatus in hgwebarchive only changes the
943 # For some reason setting repo.lfstatus in hgwebarchive only changes the
944 # unfiltered repo's attr, so check that as well.
944 # unfiltered repo's attr, so check that as well.
945 if not repo.lfstatus and not repo.unfiltered().lfstatus:
945 if not repo.lfstatus and not repo.unfiltered().lfstatus:
946 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
946 return orig(repo, dest, node, kind, decode, matchfn, prefix, mtime,
947 subrepos)
947 subrepos)
948
948
949 # No need to lock because we are only reading history and
949 # No need to lock because we are only reading history and
950 # largefile caches, neither of which are modified.
950 # largefile caches, neither of which are modified.
951 if node is not None:
951 if node is not None:
952 lfcommands.cachelfiles(repo.ui, repo, node)
952 lfcommands.cachelfiles(repo.ui, repo, node)
953
953
954 if kind not in archival.archivers:
954 if kind not in archival.archivers:
955 raise error.Abort(_("unknown archive type '%s'") % kind)
955 raise error.Abort(_("unknown archive type '%s'") % kind)
956
956
957 ctx = repo[node]
957 ctx = repo[node]
958
958
959 if kind == 'files':
959 if kind == 'files':
960 if prefix:
960 if prefix:
961 raise error.Abort(
961 raise error.Abort(
962 _('cannot give prefix when archiving to files'))
962 _('cannot give prefix when archiving to files'))
963 else:
963 else:
964 prefix = archival.tidyprefix(dest, kind, prefix)
964 prefix = archival.tidyprefix(dest, kind, prefix)
965
965
966 def write(name, mode, islink, getdata):
966 def write(name, mode, islink, getdata):
967 if matchfn and not matchfn(name):
967 if matchfn and not matchfn(name):
968 return
968 return
969 data = getdata()
969 data = getdata()
970 if decode:
970 if decode:
971 data = repo.wwritedata(name, data)
971 data = repo.wwritedata(name, data)
972 archiver.addfile(prefix + name, mode, islink, data)
972 archiver.addfile(prefix + name, mode, islink, data)
973
973
974 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
974 archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
975
975
976 if repo.ui.configbool("ui", "archivemeta"):
976 if repo.ui.configbool("ui", "archivemeta"):
977 write('.hg_archival.txt', 0o644, False,
977 write('.hg_archival.txt', 0o644, False,
978 lambda: archival.buildmetadata(ctx))
978 lambda: archival.buildmetadata(ctx))
979
979
980 for f in ctx:
980 for f in ctx:
981 ff = ctx.flags(f)
981 ff = ctx.flags(f)
982 getdata = ctx[f].data
982 getdata = ctx[f].data
983 lfile = lfutil.splitstandin(f)
983 lfile = lfutil.splitstandin(f)
984 if lfile is not None:
984 if lfile is not None:
985 if node is not None:
985 if node is not None:
986 path = lfutil.findfile(repo, getdata().strip())
986 path = lfutil.findfile(repo, getdata().strip())
987
987
988 if path is None:
988 if path is None:
989 raise error.Abort(
989 raise error.Abort(
990 _('largefile %s not found in repo store or system cache')
990 _('largefile %s not found in repo store or system cache')
991 % lfile)
991 % lfile)
992 else:
992 else:
993 path = lfile
993 path = lfile
994
994
995 f = lfile
995 f = lfile
996
996
997 getdata = lambda: util.readfile(path)
997 getdata = lambda: util.readfile(path)
998 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
998 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
999
999
1000 if subrepos:
1000 if subrepos:
1001 for subpath in sorted(ctx.substate):
1001 for subpath in sorted(ctx.substate):
1002 sub = ctx.workingsub(subpath)
1002 sub = ctx.workingsub(subpath)
1003 submatch = matchmod.subdirmatcher(subpath, matchfn)
1003 submatch = matchmod.subdirmatcher(subpath, matchfn)
1004 sub._repo.lfstatus = True
1004 sub._repo.lfstatus = True
1005 sub.archive(archiver, prefix, submatch)
1005 sub.archive(archiver, prefix, submatch)
1006
1006
1007 archiver.done()
1007 archiver.done()
1008
1008
1009 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1009 def hgsubrepoarchive(orig, repo, archiver, prefix, match=None, decode=True):
1010 lfenabled = util.safehasattr(repo._repo, '_largefilesenabled')
1010 lfenabled = util.safehasattr(repo._repo, '_largefilesenabled')
1011 if not lfenabled or not repo._repo.lfstatus:
1011 if not lfenabled or not repo._repo.lfstatus:
1012 return orig(repo, archiver, prefix, match, decode)
1012 return orig(repo, archiver, prefix, match, decode)
1013
1013
1014 repo._get(repo._state + ('hg',))
1014 repo._get(repo._state + ('hg',))
1015 rev = repo._state[1]
1015 rev = repo._state[1]
1016 ctx = repo._repo[rev]
1016 ctx = repo._repo[rev]
1017
1017
1018 if ctx.node() is not None:
1018 if ctx.node() is not None:
1019 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1019 lfcommands.cachelfiles(repo.ui, repo._repo, ctx.node())
1020
1020
1021 def write(name, mode, islink, getdata):
1021 def write(name, mode, islink, getdata):
1022 # At this point, the standin has been replaced with the largefile name,
1022 # At this point, the standin has been replaced with the largefile name,
1023 # so the normal matcher works here without the lfutil variants.
1023 # so the normal matcher works here without the lfutil variants.
1024 if match and not match(f):
1024 if match and not match(f):
1025 return
1025 return
1026 data = getdata()
1026 data = getdata()
1027 if decode:
1027 if decode:
1028 data = repo._repo.wwritedata(name, data)
1028 data = repo._repo.wwritedata(name, data)
1029
1029
1030 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1030 archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
1031
1031
1032 for f in ctx:
1032 for f in ctx:
1033 ff = ctx.flags(f)
1033 ff = ctx.flags(f)
1034 getdata = ctx[f].data
1034 getdata = ctx[f].data
1035 lfile = lfutil.splitstandin(f)
1035 lfile = lfutil.splitstandin(f)
1036 if lfile is not None:
1036 if lfile is not None:
1037 if ctx.node() is not None:
1037 if ctx.node() is not None:
1038 path = lfutil.findfile(repo._repo, getdata().strip())
1038 path = lfutil.findfile(repo._repo, getdata().strip())
1039
1039
1040 if path is None:
1040 if path is None:
1041 raise error.Abort(
1041 raise error.Abort(
1042 _('largefile %s not found in repo store or system cache')
1042 _('largefile %s not found in repo store or system cache')
1043 % lfile)
1043 % lfile)
1044 else:
1044 else:
1045 path = lfile
1045 path = lfile
1046
1046
1047 f = lfile
1047 f = lfile
1048
1048
1049 getdata = lambda: util.readfile(os.path.join(prefix, path))
1049 getdata = lambda: util.readfile(os.path.join(prefix, path))
1050
1050
1051 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1051 write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, getdata)
1052
1052
1053 for subpath in sorted(ctx.substate):
1053 for subpath in sorted(ctx.substate):
1054 sub = ctx.workingsub(subpath)
1054 sub = ctx.workingsub(subpath)
1055 submatch = matchmod.subdirmatcher(subpath, match)
1055 submatch = matchmod.subdirmatcher(subpath, match)
1056 sub._repo.lfstatus = True
1056 sub._repo.lfstatus = True
1057 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1057 sub.archive(archiver, prefix + repo._path + '/', submatch, decode)
1058
1058
1059 # If a largefile is modified, the change is not reflected in its
1059 # If a largefile is modified, the change is not reflected in its
1060 # standin until a commit. cmdutil.bailifchanged() raises an exception
1060 # standin until a commit. cmdutil.bailifchanged() raises an exception
1061 # if the repo has uncommitted changes. Wrap it to also check if
1061 # if the repo has uncommitted changes. Wrap it to also check if
1062 # largefiles were changed. This is used by bisect, backout and fetch.
1062 # largefiles were changed. This is used by bisect, backout and fetch.
1063 def overridebailifchanged(orig, repo, *args, **kwargs):
1063 def overridebailifchanged(orig, repo, *args, **kwargs):
1064 orig(repo, *args, **kwargs)
1064 orig(repo, *args, **kwargs)
1065 repo.lfstatus = True
1065 repo.lfstatus = True
1066 s = repo.status()
1066 s = repo.status()
1067 repo.lfstatus = False
1067 repo.lfstatus = False
1068 if s.modified or s.added or s.removed or s.deleted:
1068 if s.modified or s.added or s.removed or s.deleted:
1069 raise error.Abort(_('uncommitted changes'))
1069 raise error.Abort(_('uncommitted changes'))
1070
1070
1071 def postcommitstatus(orig, repo, *args, **kwargs):
1071 def postcommitstatus(orig, repo, *args, **kwargs):
1072 repo.lfstatus = True
1072 repo.lfstatus = True
1073 try:
1073 try:
1074 return orig(repo, *args, **kwargs)
1074 return orig(repo, *args, **kwargs)
1075 finally:
1075 finally:
1076 repo.lfstatus = False
1076 repo.lfstatus = False
1077
1077
1078 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1078 def cmdutilforget(orig, ui, repo, match, prefix, explicitonly):
1079 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1079 normalmatcher = composenormalfilematcher(match, repo[None].manifest())
1080 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1080 bad, forgot = orig(ui, repo, normalmatcher, prefix, explicitonly)
1081 m = composelargefilematcher(match, repo[None].manifest())
1081 m = composelargefilematcher(match, repo[None].manifest())
1082
1082
1083 try:
1083 try:
1084 repo.lfstatus = True
1084 repo.lfstatus = True
1085 s = repo.status(match=m, clean=True)
1085 s = repo.status(match=m, clean=True)
1086 finally:
1086 finally:
1087 repo.lfstatus = False
1087 repo.lfstatus = False
1088 manifest = repo[None].manifest()
1088 manifest = repo[None].manifest()
1089 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1089 forget = sorted(s.modified + s.added + s.deleted + s.clean)
1090 forget = [f for f in forget if lfutil.standin(f) in manifest]
1090 forget = [f for f in forget if lfutil.standin(f) in manifest]
1091
1091
1092 for f in forget:
1092 for f in forget:
1093 fstandin = lfutil.standin(f)
1093 fstandin = lfutil.standin(f)
1094 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1094 if fstandin not in repo.dirstate and not repo.wvfs.isdir(fstandin):
1095 ui.warn(_('not removing %s: file is already untracked\n')
1095 ui.warn(_('not removing %s: file is already untracked\n')
1096 % m.rel(f))
1096 % m.rel(f))
1097 bad.append(f)
1097 bad.append(f)
1098
1098
1099 for f in forget:
1099 for f in forget:
1100 if ui.verbose or not m.exact(f):
1100 if ui.verbose or not m.exact(f):
1101 ui.status(_('removing %s\n') % m.rel(f))
1101 ui.status(_('removing %s\n') % m.rel(f))
1102
1102
1103 # Need to lock because standin files are deleted then removed from the
1103 # Need to lock because standin files are deleted then removed from the
1104 # repository and we could race in-between.
1104 # repository and we could race in-between.
1105 with repo.wlock():
1105 with repo.wlock():
1106 lfdirstate = lfutil.openlfdirstate(ui, repo)
1106 lfdirstate = lfutil.openlfdirstate(ui, repo)
1107 for f in forget:
1107 for f in forget:
1108 if lfdirstate[f] == 'a':
1108 if lfdirstate[f] == 'a':
1109 lfdirstate.drop(f)
1109 lfdirstate.drop(f)
1110 else:
1110 else:
1111 lfdirstate.remove(f)
1111 lfdirstate.remove(f)
1112 lfdirstate.write()
1112 lfdirstate.write()
1113 standins = [lfutil.standin(f) for f in forget]
1113 standins = [lfutil.standin(f) for f in forget]
1114 for f in standins:
1114 for f in standins:
1115 repo.wvfs.unlinkpath(f, ignoremissing=True)
1115 repo.wvfs.unlinkpath(f, ignoremissing=True)
1116 rejected = repo[None].forget(standins)
1116 rejected = repo[None].forget(standins)
1117
1117
1118 bad.extend(f for f in rejected if f in m.files())
1118 bad.extend(f for f in rejected if f in m.files())
1119 forgot.extend(f for f in forget if f not in rejected)
1119 forgot.extend(f for f in forget if f not in rejected)
1120 return bad, forgot
1120 return bad, forgot
1121
1121
1122 def _getoutgoings(repo, other, missing, addfunc):
1122 def _getoutgoings(repo, other, missing, addfunc):
1123 """get pairs of filename and largefile hash in outgoing revisions
1123 """get pairs of filename and largefile hash in outgoing revisions
1124 in 'missing'.
1124 in 'missing'.
1125
1125
1126 largefiles already existing on 'other' repository are ignored.
1126 largefiles already existing on 'other' repository are ignored.
1127
1127
1128 'addfunc' is invoked with each unique pairs of filename and
1128 'addfunc' is invoked with each unique pairs of filename and
1129 largefile hash value.
1129 largefile hash value.
1130 """
1130 """
1131 knowns = set()
1131 knowns = set()
1132 lfhashes = set()
1132 lfhashes = set()
1133 def dedup(fn, lfhash):
1133 def dedup(fn, lfhash):
1134 k = (fn, lfhash)
1134 k = (fn, lfhash)
1135 if k not in knowns:
1135 if k not in knowns:
1136 knowns.add(k)
1136 knowns.add(k)
1137 lfhashes.add(lfhash)
1137 lfhashes.add(lfhash)
1138 lfutil.getlfilestoupload(repo, missing, dedup)
1138 lfutil.getlfilestoupload(repo, missing, dedup)
1139 if lfhashes:
1139 if lfhashes:
1140 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1140 lfexists = storefactory.openstore(repo, other).exists(lfhashes)
1141 for fn, lfhash in knowns:
1141 for fn, lfhash in knowns:
1142 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1142 if not lfexists[lfhash]: # lfhash doesn't exist on "other"
1143 addfunc(fn, lfhash)
1143 addfunc(fn, lfhash)
1144
1144
1145 def outgoinghook(ui, repo, other, opts, missing):
1145 def outgoinghook(ui, repo, other, opts, missing):
1146 if opts.pop('large', None):
1146 if opts.pop('large', None):
1147 lfhashes = set()
1147 lfhashes = set()
1148 if ui.debugflag:
1148 if ui.debugflag:
1149 toupload = {}
1149 toupload = {}
1150 def addfunc(fn, lfhash):
1150 def addfunc(fn, lfhash):
1151 if fn not in toupload:
1151 if fn not in toupload:
1152 toupload[fn] = []
1152 toupload[fn] = []
1153 toupload[fn].append(lfhash)
1153 toupload[fn].append(lfhash)
1154 lfhashes.add(lfhash)
1154 lfhashes.add(lfhash)
1155 def showhashes(fn):
1155 def showhashes(fn):
1156 for lfhash in sorted(toupload[fn]):
1156 for lfhash in sorted(toupload[fn]):
1157 ui.debug(' %s\n' % (lfhash))
1157 ui.debug(' %s\n' % (lfhash))
1158 else:
1158 else:
1159 toupload = set()
1159 toupload = set()
1160 def addfunc(fn, lfhash):
1160 def addfunc(fn, lfhash):
1161 toupload.add(fn)
1161 toupload.add(fn)
1162 lfhashes.add(lfhash)
1162 lfhashes.add(lfhash)
1163 def showhashes(fn):
1163 def showhashes(fn):
1164 pass
1164 pass
1165 _getoutgoings(repo, other, missing, addfunc)
1165 _getoutgoings(repo, other, missing, addfunc)
1166
1166
1167 if not toupload:
1167 if not toupload:
1168 ui.status(_('largefiles: no files to upload\n'))
1168 ui.status(_('largefiles: no files to upload\n'))
1169 else:
1169 else:
1170 ui.status(_('largefiles to upload (%d entities):\n')
1170 ui.status(_('largefiles to upload (%d entities):\n')
1171 % (len(lfhashes)))
1171 % (len(lfhashes)))
1172 for file in sorted(toupload):
1172 for file in sorted(toupload):
1173 ui.status(lfutil.splitstandin(file) + '\n')
1173 ui.status(lfutil.splitstandin(file) + '\n')
1174 showhashes(file)
1174 showhashes(file)
1175 ui.status('\n')
1175 ui.status('\n')
1176
1176
1177 def summaryremotehook(ui, repo, opts, changes):
1177 def summaryremotehook(ui, repo, opts, changes):
1178 largeopt = opts.get('large', False)
1178 largeopt = opts.get('large', False)
1179 if changes is None:
1179 if changes is None:
1180 if largeopt:
1180 if largeopt:
1181 return (False, True) # only outgoing check is needed
1181 return (False, True) # only outgoing check is needed
1182 else:
1182 else:
1183 return (False, False)
1183 return (False, False)
1184 elif largeopt:
1184 elif largeopt:
1185 url, branch, peer, outgoing = changes[1]
1185 url, branch, peer, outgoing = changes[1]
1186 if peer is None:
1186 if peer is None:
1187 # i18n: column positioning for "hg summary"
1187 # i18n: column positioning for "hg summary"
1188 ui.status(_('largefiles: (no remote repo)\n'))
1188 ui.status(_('largefiles: (no remote repo)\n'))
1189 return
1189 return
1190
1190
1191 toupload = set()
1191 toupload = set()
1192 lfhashes = set()
1192 lfhashes = set()
1193 def addfunc(fn, lfhash):
1193 def addfunc(fn, lfhash):
1194 toupload.add(fn)
1194 toupload.add(fn)
1195 lfhashes.add(lfhash)
1195 lfhashes.add(lfhash)
1196 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1196 _getoutgoings(repo, peer, outgoing.missing, addfunc)
1197
1197
1198 if not toupload:
1198 if not toupload:
1199 # i18n: column positioning for "hg summary"
1199 # i18n: column positioning for "hg summary"
1200 ui.status(_('largefiles: (no files to upload)\n'))
1200 ui.status(_('largefiles: (no files to upload)\n'))
1201 else:
1201 else:
1202 # i18n: column positioning for "hg summary"
1202 # i18n: column positioning for "hg summary"
1203 ui.status(_('largefiles: %d entities for %d files to upload\n')
1203 ui.status(_('largefiles: %d entities for %d files to upload\n')
1204 % (len(lfhashes), len(toupload)))
1204 % (len(lfhashes), len(toupload)))
1205
1205
1206 def overridesummary(orig, ui, repo, *pats, **opts):
1206 def overridesummary(orig, ui, repo, *pats, **opts):
1207 try:
1207 try:
1208 repo.lfstatus = True
1208 repo.lfstatus = True
1209 orig(ui, repo, *pats, **opts)
1209 orig(ui, repo, *pats, **opts)
1210 finally:
1210 finally:
1211 repo.lfstatus = False
1211 repo.lfstatus = False
1212
1212
1213 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1213 def scmutiladdremove(orig, repo, matcher, prefix, opts=None, dry_run=None,
1214 similarity=None):
1214 similarity=None):
1215 if opts is None:
1215 if opts is None:
1216 opts = {}
1216 opts = {}
1217 if not lfutil.islfilesrepo(repo):
1217 if not lfutil.islfilesrepo(repo):
1218 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1218 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1219 # Get the list of missing largefiles so we can remove them
1219 # Get the list of missing largefiles so we can remove them
1220 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1220 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1221 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()), [],
1221 unsure, s = lfdirstate.status(matchmod.always(repo.root, repo.getcwd()),
1222 False, False, False)
1222 subrepos=[], ignored=False, clean=False,
1223 unknown=False)
1223
1224
1224 # Call into the normal remove code, but the removing of the standin, we want
1225 # Call into the normal remove code, but the removing of the standin, we want
1225 # to have handled by original addremove. Monkey patching here makes sure
1226 # to have handled by original addremove. Monkey patching here makes sure
1226 # we don't remove the standin in the largefiles code, preventing a very
1227 # we don't remove the standin in the largefiles code, preventing a very
1227 # confused state later.
1228 # confused state later.
1228 if s.deleted:
1229 if s.deleted:
1229 m = copy.copy(matcher)
1230 m = copy.copy(matcher)
1230
1231
1231 # The m._files and m._map attributes are not changed to the deleted list
1232 # The m._files and m._map attributes are not changed to the deleted list
1232 # because that affects the m.exact() test, which in turn governs whether
1233 # because that affects the m.exact() test, which in turn governs whether
1233 # or not the file name is printed, and how. Simply limit the original
1234 # or not the file name is printed, and how. Simply limit the original
1234 # matches to those in the deleted status list.
1235 # matches to those in the deleted status list.
1235 matchfn = m.matchfn
1236 matchfn = m.matchfn
1236 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1237 m.matchfn = lambda f: f in s.deleted and matchfn(f)
1237
1238
1238 removelargefiles(repo.ui, repo, True, m, **opts)
1239 removelargefiles(repo.ui, repo, True, m, **opts)
1239 # Call into the normal add code, and any files that *should* be added as
1240 # Call into the normal add code, and any files that *should* be added as
1240 # largefiles will be
1241 # largefiles will be
1241 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1242 added, bad = addlargefiles(repo.ui, repo, True, matcher, **opts)
1242 # Now that we've handled largefiles, hand off to the original addremove
1243 # Now that we've handled largefiles, hand off to the original addremove
1243 # function to take care of the rest. Make sure it doesn't do anything with
1244 # function to take care of the rest. Make sure it doesn't do anything with
1244 # largefiles by passing a matcher that will ignore them.
1245 # largefiles by passing a matcher that will ignore them.
1245 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1246 matcher = composenormalfilematcher(matcher, repo[None].manifest(), added)
1246 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1247 return orig(repo, matcher, prefix, opts, dry_run, similarity)
1247
1248
1248 # Calling purge with --all will cause the largefiles to be deleted.
1249 # Calling purge with --all will cause the largefiles to be deleted.
1249 # Override repo.status to prevent this from happening.
1250 # Override repo.status to prevent this from happening.
1250 def overridepurge(orig, ui, repo, *dirs, **opts):
1251 def overridepurge(orig, ui, repo, *dirs, **opts):
1251 # XXX Monkey patching a repoview will not work. The assigned attribute will
1252 # XXX Monkey patching a repoview will not work. The assigned attribute will
1252 # be set on the unfiltered repo, but we will only lookup attributes in the
1253 # be set on the unfiltered repo, but we will only lookup attributes in the
1253 # unfiltered repo if the lookup in the repoview object itself fails. As the
1254 # unfiltered repo if the lookup in the repoview object itself fails. As the
1254 # monkey patched method exists on the repoview class the lookup will not
1255 # monkey patched method exists on the repoview class the lookup will not
1255 # fail. As a result, the original version will shadow the monkey patched
1256 # fail. As a result, the original version will shadow the monkey patched
1256 # one, defeating the monkey patch.
1257 # one, defeating the monkey patch.
1257 #
1258 #
1258 # As a work around we use an unfiltered repo here. We should do something
1259 # As a work around we use an unfiltered repo here. We should do something
1259 # cleaner instead.
1260 # cleaner instead.
1260 repo = repo.unfiltered()
1261 repo = repo.unfiltered()
1261 oldstatus = repo.status
1262 oldstatus = repo.status
1262 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1263 def overridestatus(node1='.', node2=None, match=None, ignored=False,
1263 clean=False, unknown=False, listsubrepos=False):
1264 clean=False, unknown=False, listsubrepos=False):
1264 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1265 r = oldstatus(node1, node2, match, ignored, clean, unknown,
1265 listsubrepos)
1266 listsubrepos)
1266 lfdirstate = lfutil.openlfdirstate(ui, repo)
1267 lfdirstate = lfutil.openlfdirstate(ui, repo)
1267 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1268 unknown = [f for f in r.unknown if lfdirstate[f] == '?']
1268 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1269 ignored = [f for f in r.ignored if lfdirstate[f] == '?']
1269 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1270 return scmutil.status(r.modified, r.added, r.removed, r.deleted,
1270 unknown, ignored, r.clean)
1271 unknown, ignored, r.clean)
1271 repo.status = overridestatus
1272 repo.status = overridestatus
1272 orig(ui, repo, *dirs, **opts)
1273 orig(ui, repo, *dirs, **opts)
1273 repo.status = oldstatus
1274 repo.status = oldstatus
1274 def overriderollback(orig, ui, repo, **opts):
1275 def overriderollback(orig, ui, repo, **opts):
1275 with repo.wlock():
1276 with repo.wlock():
1276 before = repo.dirstate.parents()
1277 before = repo.dirstate.parents()
1277 orphans = set(f for f in repo.dirstate
1278 orphans = set(f for f in repo.dirstate
1278 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1279 if lfutil.isstandin(f) and repo.dirstate[f] != 'r')
1279 result = orig(ui, repo, **opts)
1280 result = orig(ui, repo, **opts)
1280 after = repo.dirstate.parents()
1281 after = repo.dirstate.parents()
1281 if before == after:
1282 if before == after:
1282 return result # no need to restore standins
1283 return result # no need to restore standins
1283
1284
1284 pctx = repo['.']
1285 pctx = repo['.']
1285 for f in repo.dirstate:
1286 for f in repo.dirstate:
1286 if lfutil.isstandin(f):
1287 if lfutil.isstandin(f):
1287 orphans.discard(f)
1288 orphans.discard(f)
1288 if repo.dirstate[f] == 'r':
1289 if repo.dirstate[f] == 'r':
1289 repo.wvfs.unlinkpath(f, ignoremissing=True)
1290 repo.wvfs.unlinkpath(f, ignoremissing=True)
1290 elif f in pctx:
1291 elif f in pctx:
1291 fctx = pctx[f]
1292 fctx = pctx[f]
1292 repo.wwrite(f, fctx.data(), fctx.flags())
1293 repo.wwrite(f, fctx.data(), fctx.flags())
1293 else:
1294 else:
1294 # content of standin is not so important in 'a',
1295 # content of standin is not so important in 'a',
1295 # 'm' or 'n' (coming from the 2nd parent) cases
1296 # 'm' or 'n' (coming from the 2nd parent) cases
1296 lfutil.writestandin(repo, f, '', False)
1297 lfutil.writestandin(repo, f, '', False)
1297 for standin in orphans:
1298 for standin in orphans:
1298 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1299 repo.wvfs.unlinkpath(standin, ignoremissing=True)
1299
1300
1300 lfdirstate = lfutil.openlfdirstate(ui, repo)
1301 lfdirstate = lfutil.openlfdirstate(ui, repo)
1301 orphans = set(lfdirstate)
1302 orphans = set(lfdirstate)
1302 lfiles = lfutil.listlfiles(repo)
1303 lfiles = lfutil.listlfiles(repo)
1303 for file in lfiles:
1304 for file in lfiles:
1304 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1305 lfutil.synclfdirstate(repo, lfdirstate, file, True)
1305 orphans.discard(file)
1306 orphans.discard(file)
1306 for lfile in orphans:
1307 for lfile in orphans:
1307 lfdirstate.drop(lfile)
1308 lfdirstate.drop(lfile)
1308 lfdirstate.write()
1309 lfdirstate.write()
1309 return result
1310 return result
1310
1311
1311 def overridetransplant(orig, ui, repo, *revs, **opts):
1312 def overridetransplant(orig, ui, repo, *revs, **opts):
1312 resuming = opts.get('continue')
1313 resuming = opts.get('continue')
1313 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1314 repo._lfcommithooks.append(lfutil.automatedcommithook(resuming))
1314 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1315 repo._lfstatuswriters.append(lambda *msg, **opts: None)
1315 try:
1316 try:
1316 result = orig(ui, repo, *revs, **opts)
1317 result = orig(ui, repo, *revs, **opts)
1317 finally:
1318 finally:
1318 repo._lfstatuswriters.pop()
1319 repo._lfstatuswriters.pop()
1319 repo._lfcommithooks.pop()
1320 repo._lfcommithooks.pop()
1320 return result
1321 return result
1321
1322
1322 def overridecat(orig, ui, repo, file1, *pats, **opts):
1323 def overridecat(orig, ui, repo, file1, *pats, **opts):
1323 ctx = scmutil.revsingle(repo, opts.get('rev'))
1324 ctx = scmutil.revsingle(repo, opts.get('rev'))
1324 err = 1
1325 err = 1
1325 notbad = set()
1326 notbad = set()
1326 m = scmutil.match(ctx, (file1,) + pats, opts)
1327 m = scmutil.match(ctx, (file1,) + pats, opts)
1327 origmatchfn = m.matchfn
1328 origmatchfn = m.matchfn
1328 def lfmatchfn(f):
1329 def lfmatchfn(f):
1329 if origmatchfn(f):
1330 if origmatchfn(f):
1330 return True
1331 return True
1331 lf = lfutil.splitstandin(f)
1332 lf = lfutil.splitstandin(f)
1332 if lf is None:
1333 if lf is None:
1333 return False
1334 return False
1334 notbad.add(lf)
1335 notbad.add(lf)
1335 return origmatchfn(lf)
1336 return origmatchfn(lf)
1336 m.matchfn = lfmatchfn
1337 m.matchfn = lfmatchfn
1337 origbadfn = m.bad
1338 origbadfn = m.bad
1338 def lfbadfn(f, msg):
1339 def lfbadfn(f, msg):
1339 if not f in notbad:
1340 if not f in notbad:
1340 origbadfn(f, msg)
1341 origbadfn(f, msg)
1341 m.bad = lfbadfn
1342 m.bad = lfbadfn
1342
1343
1343 origvisitdirfn = m.visitdir
1344 origvisitdirfn = m.visitdir
1344 def lfvisitdirfn(dir):
1345 def lfvisitdirfn(dir):
1345 if dir == lfutil.shortname:
1346 if dir == lfutil.shortname:
1346 return True
1347 return True
1347 ret = origvisitdirfn(dir)
1348 ret = origvisitdirfn(dir)
1348 if ret:
1349 if ret:
1349 return ret
1350 return ret
1350 lf = lfutil.splitstandin(dir)
1351 lf = lfutil.splitstandin(dir)
1351 if lf is None:
1352 if lf is None:
1352 return False
1353 return False
1353 return origvisitdirfn(lf)
1354 return origvisitdirfn(lf)
1354 m.visitdir = lfvisitdirfn
1355 m.visitdir = lfvisitdirfn
1355
1356
1356 for f in ctx.walk(m):
1357 for f in ctx.walk(m):
1357 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1358 with cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
1358 pathname=f) as fp:
1359 pathname=f) as fp:
1359 lf = lfutil.splitstandin(f)
1360 lf = lfutil.splitstandin(f)
1360 if lf is None or origmatchfn(f):
1361 if lf is None or origmatchfn(f):
1361 # duplicating unreachable code from commands.cat
1362 # duplicating unreachable code from commands.cat
1362 data = ctx[f].data()
1363 data = ctx[f].data()
1363 if opts.get('decode'):
1364 if opts.get('decode'):
1364 data = repo.wwritedata(f, data)
1365 data = repo.wwritedata(f, data)
1365 fp.write(data)
1366 fp.write(data)
1366 else:
1367 else:
1367 hash = lfutil.readasstandin(ctx[f])
1368 hash = lfutil.readasstandin(ctx[f])
1368 if not lfutil.inusercache(repo.ui, hash):
1369 if not lfutil.inusercache(repo.ui, hash):
1369 store = storefactory.openstore(repo)
1370 store = storefactory.openstore(repo)
1370 success, missing = store.get([(lf, hash)])
1371 success, missing = store.get([(lf, hash)])
1371 if len(success) != 1:
1372 if len(success) != 1:
1372 raise error.Abort(
1373 raise error.Abort(
1373 _('largefile %s is not in cache and could not be '
1374 _('largefile %s is not in cache and could not be '
1374 'downloaded') % lf)
1375 'downloaded') % lf)
1375 path = lfutil.usercachepath(repo.ui, hash)
1376 path = lfutil.usercachepath(repo.ui, hash)
1376 with open(path, "rb") as fpin:
1377 with open(path, "rb") as fpin:
1377 for chunk in util.filechunkiter(fpin):
1378 for chunk in util.filechunkiter(fpin):
1378 fp.write(chunk)
1379 fp.write(chunk)
1379 err = 0
1380 err = 0
1380 return err
1381 return err
1381
1382
1382 def mergeupdate(orig, repo, node, branchmerge, force,
1383 def mergeupdate(orig, repo, node, branchmerge, force,
1383 *args, **kwargs):
1384 *args, **kwargs):
1384 matcher = kwargs.get('matcher', None)
1385 matcher = kwargs.get('matcher', None)
1385 # note if this is a partial update
1386 # note if this is a partial update
1386 partial = matcher and not matcher.always()
1387 partial = matcher and not matcher.always()
1387 with repo.wlock():
1388 with repo.wlock():
1388 # branch | | |
1389 # branch | | |
1389 # merge | force | partial | action
1390 # merge | force | partial | action
1390 # -------+-------+---------+--------------
1391 # -------+-------+---------+--------------
1391 # x | x | x | linear-merge
1392 # x | x | x | linear-merge
1392 # o | x | x | branch-merge
1393 # o | x | x | branch-merge
1393 # x | o | x | overwrite (as clean update)
1394 # x | o | x | overwrite (as clean update)
1394 # o | o | x | force-branch-merge (*1)
1395 # o | o | x | force-branch-merge (*1)
1395 # x | x | o | (*)
1396 # x | x | o | (*)
1396 # o | x | o | (*)
1397 # o | x | o | (*)
1397 # x | o | o | overwrite (as revert)
1398 # x | o | o | overwrite (as revert)
1398 # o | o | o | (*)
1399 # o | o | o | (*)
1399 #
1400 #
1400 # (*) don't care
1401 # (*) don't care
1401 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1402 # (*1) deprecated, but used internally (e.g: "rebase --collapse")
1402
1403
1403 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1404 lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
1404 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1405 unsure, s = lfdirstate.status(matchmod.always(repo.root,
1405 repo.getcwd()),
1406 repo.getcwd()),
1406 [], False, True, False)
1407 subrepos=[], ignored=False,
1408 clean=True, unknown=False)
1407 oldclean = set(s.clean)
1409 oldclean = set(s.clean)
1408 pctx = repo['.']
1410 pctx = repo['.']
1409 dctx = repo[node]
1411 dctx = repo[node]
1410 for lfile in unsure + s.modified:
1412 for lfile in unsure + s.modified:
1411 lfileabs = repo.wvfs.join(lfile)
1413 lfileabs = repo.wvfs.join(lfile)
1412 if not repo.wvfs.exists(lfileabs):
1414 if not repo.wvfs.exists(lfileabs):
1413 continue
1415 continue
1414 lfhash = lfutil.hashfile(lfileabs)
1416 lfhash = lfutil.hashfile(lfileabs)
1415 standin = lfutil.standin(lfile)
1417 standin = lfutil.standin(lfile)
1416 lfutil.writestandin(repo, standin, lfhash,
1418 lfutil.writestandin(repo, standin, lfhash,
1417 lfutil.getexecutable(lfileabs))
1419 lfutil.getexecutable(lfileabs))
1418 if (standin in pctx and
1420 if (standin in pctx and
1419 lfhash == lfutil.readasstandin(pctx[standin])):
1421 lfhash == lfutil.readasstandin(pctx[standin])):
1420 oldclean.add(lfile)
1422 oldclean.add(lfile)
1421 for lfile in s.added:
1423 for lfile in s.added:
1422 fstandin = lfutil.standin(lfile)
1424 fstandin = lfutil.standin(lfile)
1423 if fstandin not in dctx:
1425 if fstandin not in dctx:
1424 # in this case, content of standin file is meaningless
1426 # in this case, content of standin file is meaningless
1425 # (in dctx, lfile is unknown, or normal file)
1427 # (in dctx, lfile is unknown, or normal file)
1426 continue
1428 continue
1427 lfutil.updatestandin(repo, lfile, fstandin)
1429 lfutil.updatestandin(repo, lfile, fstandin)
1428 # mark all clean largefiles as dirty, just in case the update gets
1430 # mark all clean largefiles as dirty, just in case the update gets
1429 # interrupted before largefiles and lfdirstate are synchronized
1431 # interrupted before largefiles and lfdirstate are synchronized
1430 for lfile in oldclean:
1432 for lfile in oldclean:
1431 lfdirstate.normallookup(lfile)
1433 lfdirstate.normallookup(lfile)
1432 lfdirstate.write()
1434 lfdirstate.write()
1433
1435
1434 oldstandins = lfutil.getstandinsstate(repo)
1436 oldstandins = lfutil.getstandinsstate(repo)
1435 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1437 # Make sure the merge runs on disk, not in-memory. largefiles is not a
1436 # good candidate for in-memory merge (large files, custom dirstate,
1438 # good candidate for in-memory merge (large files, custom dirstate,
1437 # matcher usage).
1439 # matcher usage).
1438 kwargs['wc'] = repo[None]
1440 kwargs['wc'] = repo[None]
1439 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1441 result = orig(repo, node, branchmerge, force, *args, **kwargs)
1440
1442
1441 newstandins = lfutil.getstandinsstate(repo)
1443 newstandins = lfutil.getstandinsstate(repo)
1442 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1444 filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
1443
1445
1444 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1446 # to avoid leaving all largefiles as dirty and thus rehash them, mark
1445 # all the ones that didn't change as clean
1447 # all the ones that didn't change as clean
1446 for lfile in oldclean.difference(filelist):
1448 for lfile in oldclean.difference(filelist):
1447 lfdirstate.normal(lfile)
1449 lfdirstate.normal(lfile)
1448 lfdirstate.write()
1450 lfdirstate.write()
1449
1451
1450 if branchmerge or force or partial:
1452 if branchmerge or force or partial:
1451 filelist.extend(s.deleted + s.removed)
1453 filelist.extend(s.deleted + s.removed)
1452
1454
1453 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1455 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1454 normallookup=partial)
1456 normallookup=partial)
1455
1457
1456 return result
1458 return result
1457
1459
1458 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1460 def scmutilmarktouched(orig, repo, files, *args, **kwargs):
1459 result = orig(repo, files, *args, **kwargs)
1461 result = orig(repo, files, *args, **kwargs)
1460
1462
1461 filelist = []
1463 filelist = []
1462 for f in files:
1464 for f in files:
1463 lf = lfutil.splitstandin(f)
1465 lf = lfutil.splitstandin(f)
1464 if lf is not None:
1466 if lf is not None:
1465 filelist.append(lf)
1467 filelist.append(lf)
1466 if filelist:
1468 if filelist:
1467 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1469 lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
1468 printmessage=False, normallookup=True)
1470 printmessage=False, normallookup=True)
1469
1471
1470 return result
1472 return result
@@ -1,391 +1,393 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 from mercurial import (
16 from mercurial import (
17 error,
17 error,
18 localrepo,
18 localrepo,
19 match as matchmod,
19 match as matchmod,
20 scmutil,
20 scmutil,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 lfcommands,
24 lfcommands,
25 lfutil,
25 lfutil,
26 )
26 )
27
27
28 def reposetup(ui, repo):
28 def reposetup(ui, repo):
29 # wire repositories should be given new wireproto functions
29 # wire repositories should be given new wireproto functions
30 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
30 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
31 if not repo.local():
31 if not repo.local():
32 return
32 return
33
33
34 class lfilesrepo(repo.__class__):
34 class lfilesrepo(repo.__class__):
35 # the mark to examine whether "repo" object enables largefiles or not
35 # the mark to examine whether "repo" object enables largefiles or not
36 _largefilesenabled = True
36 _largefilesenabled = True
37
37
38 lfstatus = False
38 lfstatus = False
39 def status_nolfiles(self, *args, **kwargs):
39 def status_nolfiles(self, *args, **kwargs):
40 return super(lfilesrepo, self).status(*args, **kwargs)
40 return super(lfilesrepo, self).status(*args, **kwargs)
41
41
42 # When lfstatus is set, return a context that gives the names
42 # When lfstatus is set, return a context that gives the names
43 # of largefiles instead of their corresponding standins and
43 # of largefiles instead of their corresponding standins and
44 # identifies the largefiles as always binary, regardless of
44 # identifies the largefiles as always binary, regardless of
45 # their actual contents.
45 # their actual contents.
46 def __getitem__(self, changeid):
46 def __getitem__(self, changeid):
47 ctx = super(lfilesrepo, self).__getitem__(changeid)
47 ctx = super(lfilesrepo, self).__getitem__(changeid)
48 if self.lfstatus:
48 if self.lfstatus:
49 class lfilesctx(ctx.__class__):
49 class lfilesctx(ctx.__class__):
50 def files(self):
50 def files(self):
51 filenames = super(lfilesctx, self).files()
51 filenames = super(lfilesctx, self).files()
52 return [lfutil.splitstandin(f) or f for f in filenames]
52 return [lfutil.splitstandin(f) or f for f in filenames]
53 def manifest(self):
53 def manifest(self):
54 man1 = super(lfilesctx, self).manifest()
54 man1 = super(lfilesctx, self).manifest()
55 class lfilesmanifest(man1.__class__):
55 class lfilesmanifest(man1.__class__):
56 def __contains__(self, filename):
56 def __contains__(self, filename):
57 orig = super(lfilesmanifest, self).__contains__
57 orig = super(lfilesmanifest, self).__contains__
58 return (orig(filename) or
58 return (orig(filename) or
59 orig(lfutil.standin(filename)))
59 orig(lfutil.standin(filename)))
60 man1.__class__ = lfilesmanifest
60 man1.__class__ = lfilesmanifest
61 return man1
61 return man1
62 def filectx(self, path, fileid=None, filelog=None):
62 def filectx(self, path, fileid=None, filelog=None):
63 orig = super(lfilesctx, self).filectx
63 orig = super(lfilesctx, self).filectx
64 try:
64 try:
65 if filelog is not None:
65 if filelog is not None:
66 result = orig(path, fileid, filelog)
66 result = orig(path, fileid, filelog)
67 else:
67 else:
68 result = orig(path, fileid)
68 result = orig(path, fileid)
69 except error.LookupError:
69 except error.LookupError:
70 # Adding a null character will cause Mercurial to
70 # Adding a null character will cause Mercurial to
71 # identify this as a binary file.
71 # identify this as a binary file.
72 if filelog is not None:
72 if filelog is not None:
73 result = orig(lfutil.standin(path), fileid,
73 result = orig(lfutil.standin(path), fileid,
74 filelog)
74 filelog)
75 else:
75 else:
76 result = orig(lfutil.standin(path), fileid)
76 result = orig(lfutil.standin(path), fileid)
77 olddata = result.data
77 olddata = result.data
78 result.data = lambda: olddata() + '\0'
78 result.data = lambda: olddata() + '\0'
79 return result
79 return result
80 ctx.__class__ = lfilesctx
80 ctx.__class__ = lfilesctx
81 return ctx
81 return ctx
82
82
83 # Figure out the status of big files and insert them into the
83 # Figure out the status of big files and insert them into the
84 # appropriate list in the result. Also removes standin files
84 # appropriate list in the result. Also removes standin files
85 # from the listing. Revert to the original status if
85 # from the listing. Revert to the original status if
86 # self.lfstatus is False.
86 # self.lfstatus is False.
87 # XXX large file status is buggy when used on repo proxy.
87 # XXX large file status is buggy when used on repo proxy.
88 # XXX this needs to be investigated.
88 # XXX this needs to be investigated.
89 @localrepo.unfilteredmethod
89 @localrepo.unfilteredmethod
90 def status(self, node1='.', node2=None, match=None, ignored=False,
90 def status(self, node1='.', node2=None, match=None, ignored=False,
91 clean=False, unknown=False, listsubrepos=False):
91 clean=False, unknown=False, listsubrepos=False):
92 listignored, listclean, listunknown = ignored, clean, unknown
92 listignored, listclean, listunknown = ignored, clean, unknown
93 orig = super(lfilesrepo, self).status
93 orig = super(lfilesrepo, self).status
94 if not self.lfstatus:
94 if not self.lfstatus:
95 return orig(node1, node2, match, listignored, listclean,
95 return orig(node1, node2, match, listignored, listclean,
96 listunknown, listsubrepos)
96 listunknown, listsubrepos)
97
97
98 # some calls in this function rely on the old version of status
98 # some calls in this function rely on the old version of status
99 self.lfstatus = False
99 self.lfstatus = False
100 ctx1 = self[node1]
100 ctx1 = self[node1]
101 ctx2 = self[node2]
101 ctx2 = self[node2]
102 working = ctx2.rev() is None
102 working = ctx2.rev() is None
103 parentworking = working and ctx1 == self['.']
103 parentworking = working and ctx1 == self['.']
104
104
105 if match is None:
105 if match is None:
106 match = matchmod.always(self.root, self.getcwd())
106 match = matchmod.always(self.root, self.getcwd())
107
107
108 wlock = None
108 wlock = None
109 try:
109 try:
110 try:
110 try:
111 # updating the dirstate is optional
111 # updating the dirstate is optional
112 # so we don't wait on the lock
112 # so we don't wait on the lock
113 wlock = self.wlock(False)
113 wlock = self.wlock(False)
114 except error.LockError:
114 except error.LockError:
115 pass
115 pass
116
116
117 # First check if paths or patterns were specified on the
117 # First check if paths or patterns were specified on the
118 # command line. If there were, and they don't match any
118 # command line. If there were, and they don't match any
119 # largefiles, we should just bail here and let super
119 # largefiles, we should just bail here and let super
120 # handle it -- thus gaining a big performance boost.
120 # handle it -- thus gaining a big performance boost.
121 lfdirstate = lfutil.openlfdirstate(ui, self)
121 lfdirstate = lfutil.openlfdirstate(ui, self)
122 if not match.always():
122 if not match.always():
123 for f in lfdirstate:
123 for f in lfdirstate:
124 if match(f):
124 if match(f):
125 break
125 break
126 else:
126 else:
127 return orig(node1, node2, match, listignored, listclean,
127 return orig(node1, node2, match, listignored, listclean,
128 listunknown, listsubrepos)
128 listunknown, listsubrepos)
129
129
130 # Create a copy of match that matches standins instead
130 # Create a copy of match that matches standins instead
131 # of largefiles.
131 # of largefiles.
132 def tostandins(files):
132 def tostandins(files):
133 if not working:
133 if not working:
134 return files
134 return files
135 newfiles = []
135 newfiles = []
136 dirstate = self.dirstate
136 dirstate = self.dirstate
137 for f in files:
137 for f in files:
138 sf = lfutil.standin(f)
138 sf = lfutil.standin(f)
139 if sf in dirstate:
139 if sf in dirstate:
140 newfiles.append(sf)
140 newfiles.append(sf)
141 elif sf in dirstate.dirs():
141 elif sf in dirstate.dirs():
142 # Directory entries could be regular or
142 # Directory entries could be regular or
143 # standin, check both
143 # standin, check both
144 newfiles.extend((f, sf))
144 newfiles.extend((f, sf))
145 else:
145 else:
146 newfiles.append(f)
146 newfiles.append(f)
147 return newfiles
147 return newfiles
148
148
149 m = copy.copy(match)
149 m = copy.copy(match)
150 m._files = tostandins(m._files)
150 m._files = tostandins(m._files)
151
151
152 result = orig(node1, node2, m, ignored, clean, unknown,
152 result = orig(node1, node2, m, ignored, clean, unknown,
153 listsubrepos)
153 listsubrepos)
154 if working:
154 if working:
155
155
156 def sfindirstate(f):
156 def sfindirstate(f):
157 sf = lfutil.standin(f)
157 sf = lfutil.standin(f)
158 dirstate = self.dirstate
158 dirstate = self.dirstate
159 return sf in dirstate or sf in dirstate.dirs()
159 return sf in dirstate or sf in dirstate.dirs()
160
160
161 match._files = [f for f in match._files
161 match._files = [f for f in match._files
162 if sfindirstate(f)]
162 if sfindirstate(f)]
163 # Don't waste time getting the ignored and unknown
163 # Don't waste time getting the ignored and unknown
164 # files from lfdirstate
164 # files from lfdirstate
165 unsure, s = lfdirstate.status(match, [], False, listclean,
165 unsure, s = lfdirstate.status(match, subrepos=[],
166 False)
166 ignored=False,
167 clean=listclean,
168 unknown=False)
167 (modified, added, removed, deleted, clean) = (
169 (modified, added, removed, deleted, clean) = (
168 s.modified, s.added, s.removed, s.deleted, s.clean)
170 s.modified, s.added, s.removed, s.deleted, s.clean)
169 if parentworking:
171 if parentworking:
170 for lfile in unsure:
172 for lfile in unsure:
171 standin = lfutil.standin(lfile)
173 standin = lfutil.standin(lfile)
172 if standin not in ctx1:
174 if standin not in ctx1:
173 # from second parent
175 # from second parent
174 modified.append(lfile)
176 modified.append(lfile)
175 elif lfutil.readasstandin(ctx1[standin]) \
177 elif lfutil.readasstandin(ctx1[standin]) \
176 != lfutil.hashfile(self.wjoin(lfile)):
178 != lfutil.hashfile(self.wjoin(lfile)):
177 modified.append(lfile)
179 modified.append(lfile)
178 else:
180 else:
179 if listclean:
181 if listclean:
180 clean.append(lfile)
182 clean.append(lfile)
181 lfdirstate.normal(lfile)
183 lfdirstate.normal(lfile)
182 else:
184 else:
183 tocheck = unsure + modified + added + clean
185 tocheck = unsure + modified + added + clean
184 modified, added, clean = [], [], []
186 modified, added, clean = [], [], []
185 checkexec = self.dirstate._checkexec
187 checkexec = self.dirstate._checkexec
186
188
187 for lfile in tocheck:
189 for lfile in tocheck:
188 standin = lfutil.standin(lfile)
190 standin = lfutil.standin(lfile)
189 if standin in ctx1:
191 if standin in ctx1:
190 abslfile = self.wjoin(lfile)
192 abslfile = self.wjoin(lfile)
191 if ((lfutil.readasstandin(ctx1[standin]) !=
193 if ((lfutil.readasstandin(ctx1[standin]) !=
192 lfutil.hashfile(abslfile)) or
194 lfutil.hashfile(abslfile)) or
193 (checkexec and
195 (checkexec and
194 ('x' in ctx1.flags(standin)) !=
196 ('x' in ctx1.flags(standin)) !=
195 bool(lfutil.getexecutable(abslfile)))):
197 bool(lfutil.getexecutable(abslfile)))):
196 modified.append(lfile)
198 modified.append(lfile)
197 elif listclean:
199 elif listclean:
198 clean.append(lfile)
200 clean.append(lfile)
199 else:
201 else:
200 added.append(lfile)
202 added.append(lfile)
201
203
202 # at this point, 'removed' contains largefiles
204 # at this point, 'removed' contains largefiles
203 # marked as 'R' in the working context.
205 # marked as 'R' in the working context.
204 # then, largefiles not managed also in the target
206 # then, largefiles not managed also in the target
205 # context should be excluded from 'removed'.
207 # context should be excluded from 'removed'.
206 removed = [lfile for lfile in removed
208 removed = [lfile for lfile in removed
207 if lfutil.standin(lfile) in ctx1]
209 if lfutil.standin(lfile) in ctx1]
208
210
209 # Standins no longer found in lfdirstate have been deleted
211 # Standins no longer found in lfdirstate have been deleted
210 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
212 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
211 lfile = lfutil.splitstandin(standin)
213 lfile = lfutil.splitstandin(standin)
212 if not match(lfile):
214 if not match(lfile):
213 continue
215 continue
214 if lfile not in lfdirstate:
216 if lfile not in lfdirstate:
215 deleted.append(lfile)
217 deleted.append(lfile)
216 # Sync "largefile has been removed" back to the
218 # Sync "largefile has been removed" back to the
217 # standin. Removing a file as a side effect of
219 # standin. Removing a file as a side effect of
218 # running status is gross, but the alternatives (if
220 # running status is gross, but the alternatives (if
219 # any) are worse.
221 # any) are worse.
220 self.wvfs.unlinkpath(standin, ignoremissing=True)
222 self.wvfs.unlinkpath(standin, ignoremissing=True)
221
223
222 # Filter result lists
224 # Filter result lists
223 result = list(result)
225 result = list(result)
224
226
225 # Largefiles are not really removed when they're
227 # Largefiles are not really removed when they're
226 # still in the normal dirstate. Likewise, normal
228 # still in the normal dirstate. Likewise, normal
227 # files are not really removed if they are still in
229 # files are not really removed if they are still in
228 # lfdirstate. This happens in merges where files
230 # lfdirstate. This happens in merges where files
229 # change type.
231 # change type.
230 removed = [f for f in removed
232 removed = [f for f in removed
231 if f not in self.dirstate]
233 if f not in self.dirstate]
232 result[2] = [f for f in result[2]
234 result[2] = [f for f in result[2]
233 if f not in lfdirstate]
235 if f not in lfdirstate]
234
236
235 lfiles = set(lfdirstate._map)
237 lfiles = set(lfdirstate._map)
236 # Unknown files
238 # Unknown files
237 result[4] = set(result[4]).difference(lfiles)
239 result[4] = set(result[4]).difference(lfiles)
238 # Ignored files
240 # Ignored files
239 result[5] = set(result[5]).difference(lfiles)
241 result[5] = set(result[5]).difference(lfiles)
240 # combine normal files and largefiles
242 # combine normal files and largefiles
241 normals = [[fn for fn in filelist
243 normals = [[fn for fn in filelist
242 if not lfutil.isstandin(fn)]
244 if not lfutil.isstandin(fn)]
243 for filelist in result]
245 for filelist in result]
244 lfstatus = (modified, added, removed, deleted, [], [],
246 lfstatus = (modified, added, removed, deleted, [], [],
245 clean)
247 clean)
246 result = [sorted(list1 + list2)
248 result = [sorted(list1 + list2)
247 for (list1, list2) in zip(normals, lfstatus)]
249 for (list1, list2) in zip(normals, lfstatus)]
248 else: # not against working directory
250 else: # not against working directory
249 result = [[lfutil.splitstandin(f) or f for f in items]
251 result = [[lfutil.splitstandin(f) or f for f in items]
250 for items in result]
252 for items in result]
251
253
252 if wlock:
254 if wlock:
253 lfdirstate.write()
255 lfdirstate.write()
254
256
255 finally:
257 finally:
256 if wlock:
258 if wlock:
257 wlock.release()
259 wlock.release()
258
260
259 self.lfstatus = True
261 self.lfstatus = True
260 return scmutil.status(*result)
262 return scmutil.status(*result)
261
263
262 def commitctx(self, ctx, *args, **kwargs):
264 def commitctx(self, ctx, *args, **kwargs):
263 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
265 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
264 class lfilesctx(ctx.__class__):
266 class lfilesctx(ctx.__class__):
265 def markcommitted(self, node):
267 def markcommitted(self, node):
266 orig = super(lfilesctx, self).markcommitted
268 orig = super(lfilesctx, self).markcommitted
267 return lfutil.markcommitted(orig, self, node)
269 return lfutil.markcommitted(orig, self, node)
268 ctx.__class__ = lfilesctx
270 ctx.__class__ = lfilesctx
269 return node
271 return node
270
272
271 # Before commit, largefile standins have not had their
273 # Before commit, largefile standins have not had their
272 # contents updated to reflect the hash of their largefile.
274 # contents updated to reflect the hash of their largefile.
273 # Do that here.
275 # Do that here.
274 def commit(self, text="", user=None, date=None, match=None,
276 def commit(self, text="", user=None, date=None, match=None,
275 force=False, editor=False, extra=None):
277 force=False, editor=False, extra=None):
276 if extra is None:
278 if extra is None:
277 extra = {}
279 extra = {}
278 orig = super(lfilesrepo, self).commit
280 orig = super(lfilesrepo, self).commit
279
281
280 with self.wlock():
282 with self.wlock():
281 lfcommithook = self._lfcommithooks[-1]
283 lfcommithook = self._lfcommithooks[-1]
282 match = lfcommithook(self, match)
284 match = lfcommithook(self, match)
283 result = orig(text=text, user=user, date=date, match=match,
285 result = orig(text=text, user=user, date=date, match=match,
284 force=force, editor=editor, extra=extra)
286 force=force, editor=editor, extra=extra)
285 return result
287 return result
286
288
287 def push(self, remote, force=False, revs=None, newbranch=False):
289 def push(self, remote, force=False, revs=None, newbranch=False):
288 if remote.local():
290 if remote.local():
289 missing = set(self.requirements) - remote.local().supported
291 missing = set(self.requirements) - remote.local().supported
290 if missing:
292 if missing:
291 msg = _("required features are not"
293 msg = _("required features are not"
292 " supported in the destination:"
294 " supported in the destination:"
293 " %s") % (', '.join(sorted(missing)))
295 " %s") % (', '.join(sorted(missing)))
294 raise error.Abort(msg)
296 raise error.Abort(msg)
295 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
297 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
296 newbranch=newbranch)
298 newbranch=newbranch)
297
299
298 # TODO: _subdirlfs should be moved into "lfutil.py", because
300 # TODO: _subdirlfs should be moved into "lfutil.py", because
299 # it is referred only from "lfutil.updatestandinsbymatch"
301 # it is referred only from "lfutil.updatestandinsbymatch"
300 def _subdirlfs(self, files, lfiles):
302 def _subdirlfs(self, files, lfiles):
301 '''
303 '''
302 Adjust matched file list
304 Adjust matched file list
303 If we pass a directory to commit whose only committable files
305 If we pass a directory to commit whose only committable files
304 are largefiles, the core commit code aborts before finding
306 are largefiles, the core commit code aborts before finding
305 the largefiles.
307 the largefiles.
306 So we do the following:
308 So we do the following:
307 For directories that only have largefiles as matches,
309 For directories that only have largefiles as matches,
308 we explicitly add the largefiles to the match list and remove
310 we explicitly add the largefiles to the match list and remove
309 the directory.
311 the directory.
310 In other cases, we leave the match list unmodified.
312 In other cases, we leave the match list unmodified.
311 '''
313 '''
312 actualfiles = []
314 actualfiles = []
313 dirs = []
315 dirs = []
314 regulars = []
316 regulars = []
315
317
316 for f in files:
318 for f in files:
317 if lfutil.isstandin(f + '/'):
319 if lfutil.isstandin(f + '/'):
318 raise error.Abort(
320 raise error.Abort(
319 _('file "%s" is a largefile standin') % f,
321 _('file "%s" is a largefile standin') % f,
320 hint=('commit the largefile itself instead'))
322 hint=('commit the largefile itself instead'))
321 # Scan directories
323 # Scan directories
322 if self.wvfs.isdir(f):
324 if self.wvfs.isdir(f):
323 dirs.append(f)
325 dirs.append(f)
324 else:
326 else:
325 regulars.append(f)
327 regulars.append(f)
326
328
327 for f in dirs:
329 for f in dirs:
328 matcheddir = False
330 matcheddir = False
329 d = self.dirstate.normalize(f) + '/'
331 d = self.dirstate.normalize(f) + '/'
330 # Check for matched normal files
332 # Check for matched normal files
331 for mf in regulars:
333 for mf in regulars:
332 if self.dirstate.normalize(mf).startswith(d):
334 if self.dirstate.normalize(mf).startswith(d):
333 actualfiles.append(f)
335 actualfiles.append(f)
334 matcheddir = True
336 matcheddir = True
335 break
337 break
336 if not matcheddir:
338 if not matcheddir:
337 # If no normal match, manually append
339 # If no normal match, manually append
338 # any matching largefiles
340 # any matching largefiles
339 for lf in lfiles:
341 for lf in lfiles:
340 if self.dirstate.normalize(lf).startswith(d):
342 if self.dirstate.normalize(lf).startswith(d):
341 actualfiles.append(lf)
343 actualfiles.append(lf)
342 if not matcheddir:
344 if not matcheddir:
343 # There may still be normal files in the dir, so
345 # There may still be normal files in the dir, so
344 # add a directory to the list, which
346 # add a directory to the list, which
345 # forces status/dirstate to walk all files and
347 # forces status/dirstate to walk all files and
346 # call the match function on the matcher, even
348 # call the match function on the matcher, even
347 # on case sensitive filesystems.
349 # on case sensitive filesystems.
348 actualfiles.append('.')
350 actualfiles.append('.')
349 matcheddir = True
351 matcheddir = True
350 # Nothing in dir, so readd it
352 # Nothing in dir, so readd it
351 # and let commit reject it
353 # and let commit reject it
352 if not matcheddir:
354 if not matcheddir:
353 actualfiles.append(f)
355 actualfiles.append(f)
354
356
355 # Always add normal files
357 # Always add normal files
356 actualfiles += regulars
358 actualfiles += regulars
357 return actualfiles
359 return actualfiles
358
360
359 repo.__class__ = lfilesrepo
361 repo.__class__ = lfilesrepo
360
362
361 # stack of hooks being executed before committing.
363 # stack of hooks being executed before committing.
362 # only last element ("_lfcommithooks[-1]") is used for each committing.
364 # only last element ("_lfcommithooks[-1]") is used for each committing.
363 repo._lfcommithooks = [lfutil.updatestandinsbymatch]
365 repo._lfcommithooks = [lfutil.updatestandinsbymatch]
364
366
365 # Stack of status writer functions taking "*msg, **opts" arguments
367 # Stack of status writer functions taking "*msg, **opts" arguments
366 # like "ui.status()". Only last element ("_lfstatuswriters[-1]")
368 # like "ui.status()". Only last element ("_lfstatuswriters[-1]")
367 # is used to write status out.
369 # is used to write status out.
368 repo._lfstatuswriters = [ui.status]
370 repo._lfstatuswriters = [ui.status]
369
371
370 def prepushoutgoinghook(pushop):
372 def prepushoutgoinghook(pushop):
371 """Push largefiles for pushop before pushing revisions."""
373 """Push largefiles for pushop before pushing revisions."""
372 lfrevs = pushop.lfrevs
374 lfrevs = pushop.lfrevs
373 if lfrevs is None:
375 if lfrevs is None:
374 lfrevs = pushop.outgoing.missing
376 lfrevs = pushop.outgoing.missing
375 if lfrevs:
377 if lfrevs:
376 toupload = set()
378 toupload = set()
377 addfunc = lambda fn, lfhash: toupload.add(lfhash)
379 addfunc = lambda fn, lfhash: toupload.add(lfhash)
378 lfutil.getlfilestoupload(pushop.repo, lfrevs,
380 lfutil.getlfilestoupload(pushop.repo, lfrevs,
379 addfunc)
381 addfunc)
380 lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload)
382 lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload)
381 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
383 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
382
384
383 def checkrequireslfiles(ui, repo, **kwargs):
385 def checkrequireslfiles(ui, repo, **kwargs):
384 if 'largefiles' not in repo.requirements and any(
386 if 'largefiles' not in repo.requirements and any(
385 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
387 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
386 repo.requirements.add('largefiles')
388 repo.requirements.add('largefiles')
387 repo._writerequirements()
389 repo._writerequirements()
388
390
389 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
391 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
390 'largefiles')
392 'largefiles')
391 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
393 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
@@ -1,2607 +1,2606 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import os
11 import os
12 import re
12 import re
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 bin,
18 bin,
19 hex,
19 hex,
20 modifiednodeid,
20 modifiednodeid,
21 nullid,
21 nullid,
22 nullrev,
22 nullrev,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirnodes,
25 wdirnodes,
26 wdirrev,
26 wdirrev,
27 )
27 )
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 mdiff,
33 mdiff,
34 obsolete as obsmod,
34 obsolete as obsmod,
35 patch,
35 patch,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 repoview,
39 repoview,
40 revlog,
40 revlog,
41 scmutil,
41 scmutil,
42 sparse,
42 sparse,
43 subrepo,
43 subrepo,
44 util,
44 util,
45 )
45 )
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48
48
49 nonascii = re.compile(r'[^\x21-\x7f]').search
49 nonascii = re.compile(r'[^\x21-\x7f]').search
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58 def __new__(cls, repo, changeid='', *args, **kwargs):
58 def __new__(cls, repo, changeid='', *args, **kwargs):
59 if isinstance(changeid, basectx):
59 if isinstance(changeid, basectx):
60 return changeid
60 return changeid
61
61
62 o = super(basectx, cls).__new__(cls)
62 o = super(basectx, cls).__new__(cls)
63
63
64 o._repo = repo
64 o._repo = repo
65 o._rev = nullrev
65 o._rev = nullrev
66 o._node = nullid
66 o._node = nullid
67
67
68 return o
68 return o
69
69
70 def __bytes__(self):
70 def __bytes__(self):
71 return short(self.node())
71 return short(self.node())
72
72
73 __str__ = encoding.strmethod(__bytes__)
73 __str__ = encoding.strmethod(__bytes__)
74
74
75 def __int__(self):
75 def __int__(self):
76 return self.rev()
76 return self.rev()
77
77
78 def __repr__(self):
78 def __repr__(self):
79 return r"<%s %s>" % (type(self).__name__, str(self))
79 return r"<%s %s>" % (type(self).__name__, str(self))
80
80
81 def __eq__(self, other):
81 def __eq__(self, other):
82 try:
82 try:
83 return type(self) == type(other) and self._rev == other._rev
83 return type(self) == type(other) and self._rev == other._rev
84 except AttributeError:
84 except AttributeError:
85 return False
85 return False
86
86
87 def __ne__(self, other):
87 def __ne__(self, other):
88 return not (self == other)
88 return not (self == other)
89
89
90 def __contains__(self, key):
90 def __contains__(self, key):
91 return key in self._manifest
91 return key in self._manifest
92
92
93 def __getitem__(self, key):
93 def __getitem__(self, key):
94 return self.filectx(key)
94 return self.filectx(key)
95
95
96 def __iter__(self):
96 def __iter__(self):
97 return iter(self._manifest)
97 return iter(self._manifest)
98
98
99 def _buildstatusmanifest(self, status):
99 def _buildstatusmanifest(self, status):
100 """Builds a manifest that includes the given status results, if this is
100 """Builds a manifest that includes the given status results, if this is
101 a working copy context. For non-working copy contexts, it just returns
101 a working copy context. For non-working copy contexts, it just returns
102 the normal manifest."""
102 the normal manifest."""
103 return self.manifest()
103 return self.manifest()
104
104
105 def _matchstatus(self, other, match):
105 def _matchstatus(self, other, match):
106 """This internal method provides a way for child objects to override the
106 """This internal method provides a way for child objects to override the
107 match operator.
107 match operator.
108 """
108 """
109 return match
109 return match
110
110
111 def _buildstatus(self, other, s, match, listignored, listclean,
111 def _buildstatus(self, other, s, match, listignored, listclean,
112 listunknown):
112 listunknown):
113 """build a status with respect to another context"""
113 """build a status with respect to another context"""
114 # Load earliest manifest first for caching reasons. More specifically,
114 # Load earliest manifest first for caching reasons. More specifically,
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
115 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
116 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 # 1000 and cache it so that when you read 1001, we just need to apply a
117 # 1000 and cache it so that when you read 1001, we just need to apply a
118 # delta to what's in the cache. So that's one full reconstruction + one
118 # delta to what's in the cache. So that's one full reconstruction + one
119 # delta application.
119 # delta application.
120 mf2 = None
120 mf2 = None
121 if self.rev() is not None and self.rev() < other.rev():
121 if self.rev() is not None and self.rev() < other.rev():
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123 mf1 = other._buildstatusmanifest(s)
123 mf1 = other._buildstatusmanifest(s)
124 if mf2 is None:
124 if mf2 is None:
125 mf2 = self._buildstatusmanifest(s)
125 mf2 = self._buildstatusmanifest(s)
126
126
127 modified, added = [], []
127 modified, added = [], []
128 removed = []
128 removed = []
129 clean = []
129 clean = []
130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
130 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
131 deletedset = set(deleted)
131 deletedset = set(deleted)
132 d = mf1.diff(mf2, match=match, clean=listclean)
132 d = mf1.diff(mf2, match=match, clean=listclean)
133 for fn, value in d.iteritems():
133 for fn, value in d.iteritems():
134 if fn in deletedset:
134 if fn in deletedset:
135 continue
135 continue
136 if value is None:
136 if value is None:
137 clean.append(fn)
137 clean.append(fn)
138 continue
138 continue
139 (node1, flag1), (node2, flag2) = value
139 (node1, flag1), (node2, flag2) = value
140 if node1 is None:
140 if node1 is None:
141 added.append(fn)
141 added.append(fn)
142 elif node2 is None:
142 elif node2 is None:
143 removed.append(fn)
143 removed.append(fn)
144 elif flag1 != flag2:
144 elif flag1 != flag2:
145 modified.append(fn)
145 modified.append(fn)
146 elif node2 not in wdirnodes:
146 elif node2 not in wdirnodes:
147 # When comparing files between two commits, we save time by
147 # When comparing files between two commits, we save time by
148 # not comparing the file contents when the nodeids differ.
148 # not comparing the file contents when the nodeids differ.
149 # Note that this means we incorrectly report a reverted change
149 # Note that this means we incorrectly report a reverted change
150 # to a file as a modification.
150 # to a file as a modification.
151 modified.append(fn)
151 modified.append(fn)
152 elif self[fn].cmp(other[fn]):
152 elif self[fn].cmp(other[fn]):
153 modified.append(fn)
153 modified.append(fn)
154 else:
154 else:
155 clean.append(fn)
155 clean.append(fn)
156
156
157 if removed:
157 if removed:
158 # need to filter files if they are already reported as removed
158 # need to filter files if they are already reported as removed
159 unknown = [fn for fn in unknown if fn not in mf1 and
159 unknown = [fn for fn in unknown if fn not in mf1 and
160 (not match or match(fn))]
160 (not match or match(fn))]
161 ignored = [fn for fn in ignored if fn not in mf1 and
161 ignored = [fn for fn in ignored if fn not in mf1 and
162 (not match or match(fn))]
162 (not match or match(fn))]
163 # if they're deleted, don't report them as removed
163 # if they're deleted, don't report them as removed
164 removed = [fn for fn in removed if fn not in deletedset]
164 removed = [fn for fn in removed if fn not in deletedset]
165
165
166 return scmutil.status(modified, added, removed, deleted, unknown,
166 return scmutil.status(modified, added, removed, deleted, unknown,
167 ignored, clean)
167 ignored, clean)
168
168
169 @propertycache
169 @propertycache
170 def substate(self):
170 def substate(self):
171 return subrepo.state(self, self._repo.ui)
171 return subrepo.state(self, self._repo.ui)
172
172
173 def subrev(self, subpath):
173 def subrev(self, subpath):
174 return self.substate[subpath][1]
174 return self.substate[subpath][1]
175
175
176 def rev(self):
176 def rev(self):
177 return self._rev
177 return self._rev
178 def node(self):
178 def node(self):
179 return self._node
179 return self._node
180 def hex(self):
180 def hex(self):
181 return hex(self.node())
181 return hex(self.node())
182 def manifest(self):
182 def manifest(self):
183 return self._manifest
183 return self._manifest
184 def manifestctx(self):
184 def manifestctx(self):
185 return self._manifestctx
185 return self._manifestctx
186 def repo(self):
186 def repo(self):
187 return self._repo
187 return self._repo
188 def phasestr(self):
188 def phasestr(self):
189 return phases.phasenames[self.phase()]
189 return phases.phasenames[self.phase()]
190 def mutable(self):
190 def mutable(self):
191 return self.phase() > phases.public
191 return self.phase() > phases.public
192
192
193 def getfileset(self, expr):
193 def getfileset(self, expr):
194 return fileset.getfileset(self, expr)
194 return fileset.getfileset(self, expr)
195
195
196 def obsolete(self):
196 def obsolete(self):
197 """True if the changeset is obsolete"""
197 """True if the changeset is obsolete"""
198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
198 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
199
199
200 def extinct(self):
200 def extinct(self):
201 """True if the changeset is extinct"""
201 """True if the changeset is extinct"""
202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
202 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
203
203
204 def unstable(self):
204 def unstable(self):
205 msg = ("'context.unstable' is deprecated, "
205 msg = ("'context.unstable' is deprecated, "
206 "use 'context.orphan'")
206 "use 'context.orphan'")
207 self._repo.ui.deprecwarn(msg, '4.4')
207 self._repo.ui.deprecwarn(msg, '4.4')
208 return self.orphan()
208 return self.orphan()
209
209
210 def orphan(self):
210 def orphan(self):
211 """True if the changeset is not obsolete but it's ancestor are"""
211 """True if the changeset is not obsolete but it's ancestor are"""
212 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
212 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
213
213
214 def bumped(self):
214 def bumped(self):
215 msg = ("'context.bumped' is deprecated, "
215 msg = ("'context.bumped' is deprecated, "
216 "use 'context.phasedivergent'")
216 "use 'context.phasedivergent'")
217 self._repo.ui.deprecwarn(msg, '4.4')
217 self._repo.ui.deprecwarn(msg, '4.4')
218 return self.phasedivergent()
218 return self.phasedivergent()
219
219
220 def phasedivergent(self):
220 def phasedivergent(self):
221 """True if the changeset try to be a successor of a public changeset
221 """True if the changeset try to be a successor of a public changeset
222
222
223 Only non-public and non-obsolete changesets may be bumped.
223 Only non-public and non-obsolete changesets may be bumped.
224 """
224 """
225 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
225 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
226
226
227 def divergent(self):
227 def divergent(self):
228 msg = ("'context.divergent' is deprecated, "
228 msg = ("'context.divergent' is deprecated, "
229 "use 'context.contentdivergent'")
229 "use 'context.contentdivergent'")
230 self._repo.ui.deprecwarn(msg, '4.4')
230 self._repo.ui.deprecwarn(msg, '4.4')
231 return self.contentdivergent()
231 return self.contentdivergent()
232
232
233 def contentdivergent(self):
233 def contentdivergent(self):
234 """Is a successors of a changeset with multiple possible successors set
234 """Is a successors of a changeset with multiple possible successors set
235
235
236 Only non-public and non-obsolete changesets may be divergent.
236 Only non-public and non-obsolete changesets may be divergent.
237 """
237 """
238 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
238 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
239
239
240 def troubled(self):
240 def troubled(self):
241 msg = ("'context.troubled' is deprecated, "
241 msg = ("'context.troubled' is deprecated, "
242 "use 'context.isunstable'")
242 "use 'context.isunstable'")
243 self._repo.ui.deprecwarn(msg, '4.4')
243 self._repo.ui.deprecwarn(msg, '4.4')
244 return self.isunstable()
244 return self.isunstable()
245
245
246 def isunstable(self):
246 def isunstable(self):
247 """True if the changeset is either unstable, bumped or divergent"""
247 """True if the changeset is either unstable, bumped or divergent"""
248 return self.orphan() or self.phasedivergent() or self.contentdivergent()
248 return self.orphan() or self.phasedivergent() or self.contentdivergent()
249
249
250 def troubles(self):
250 def troubles(self):
251 """Keep the old version around in order to avoid breaking extensions
251 """Keep the old version around in order to avoid breaking extensions
252 about different return values.
252 about different return values.
253 """
253 """
254 msg = ("'context.troubles' is deprecated, "
254 msg = ("'context.troubles' is deprecated, "
255 "use 'context.instabilities'")
255 "use 'context.instabilities'")
256 self._repo.ui.deprecwarn(msg, '4.4')
256 self._repo.ui.deprecwarn(msg, '4.4')
257
257
258 troubles = []
258 troubles = []
259 if self.orphan():
259 if self.orphan():
260 troubles.append('orphan')
260 troubles.append('orphan')
261 if self.phasedivergent():
261 if self.phasedivergent():
262 troubles.append('bumped')
262 troubles.append('bumped')
263 if self.contentdivergent():
263 if self.contentdivergent():
264 troubles.append('divergent')
264 troubles.append('divergent')
265 return troubles
265 return troubles
266
266
267 def instabilities(self):
267 def instabilities(self):
268 """return the list of instabilities affecting this changeset.
268 """return the list of instabilities affecting this changeset.
269
269
270 Instabilities are returned as strings. possible values are:
270 Instabilities are returned as strings. possible values are:
271 - orphan,
271 - orphan,
272 - phase-divergent,
272 - phase-divergent,
273 - content-divergent.
273 - content-divergent.
274 """
274 """
275 instabilities = []
275 instabilities = []
276 if self.orphan():
276 if self.orphan():
277 instabilities.append('orphan')
277 instabilities.append('orphan')
278 if self.phasedivergent():
278 if self.phasedivergent():
279 instabilities.append('phase-divergent')
279 instabilities.append('phase-divergent')
280 if self.contentdivergent():
280 if self.contentdivergent():
281 instabilities.append('content-divergent')
281 instabilities.append('content-divergent')
282 return instabilities
282 return instabilities
283
283
284 def parents(self):
284 def parents(self):
285 """return contexts for each parent changeset"""
285 """return contexts for each parent changeset"""
286 return self._parents
286 return self._parents
287
287
288 def p1(self):
288 def p1(self):
289 return self._parents[0]
289 return self._parents[0]
290
290
291 def p2(self):
291 def p2(self):
292 parents = self._parents
292 parents = self._parents
293 if len(parents) == 2:
293 if len(parents) == 2:
294 return parents[1]
294 return parents[1]
295 return changectx(self._repo, nullrev)
295 return changectx(self._repo, nullrev)
296
296
297 def _fileinfo(self, path):
297 def _fileinfo(self, path):
298 if r'_manifest' in self.__dict__:
298 if r'_manifest' in self.__dict__:
299 try:
299 try:
300 return self._manifest[path], self._manifest.flags(path)
300 return self._manifest[path], self._manifest.flags(path)
301 except KeyError:
301 except KeyError:
302 raise error.ManifestLookupError(self._node, path,
302 raise error.ManifestLookupError(self._node, path,
303 _('not found in manifest'))
303 _('not found in manifest'))
304 if r'_manifestdelta' in self.__dict__ or path in self.files():
304 if r'_manifestdelta' in self.__dict__ or path in self.files():
305 if path in self._manifestdelta:
305 if path in self._manifestdelta:
306 return (self._manifestdelta[path],
306 return (self._manifestdelta[path],
307 self._manifestdelta.flags(path))
307 self._manifestdelta.flags(path))
308 mfl = self._repo.manifestlog
308 mfl = self._repo.manifestlog
309 try:
309 try:
310 node, flag = mfl[self._changeset.manifest].find(path)
310 node, flag = mfl[self._changeset.manifest].find(path)
311 except KeyError:
311 except KeyError:
312 raise error.ManifestLookupError(self._node, path,
312 raise error.ManifestLookupError(self._node, path,
313 _('not found in manifest'))
313 _('not found in manifest'))
314
314
315 return node, flag
315 return node, flag
316
316
317 def filenode(self, path):
317 def filenode(self, path):
318 return self._fileinfo(path)[0]
318 return self._fileinfo(path)[0]
319
319
320 def flags(self, path):
320 def flags(self, path):
321 try:
321 try:
322 return self._fileinfo(path)[1]
322 return self._fileinfo(path)[1]
323 except error.LookupError:
323 except error.LookupError:
324 return ''
324 return ''
325
325
326 def sub(self, path, allowcreate=True):
326 def sub(self, path, allowcreate=True):
327 '''return a subrepo for the stored revision of path, never wdir()'''
327 '''return a subrepo for the stored revision of path, never wdir()'''
328 return subrepo.subrepo(self, path, allowcreate=allowcreate)
328 return subrepo.subrepo(self, path, allowcreate=allowcreate)
329
329
330 def nullsub(self, path, pctx):
330 def nullsub(self, path, pctx):
331 return subrepo.nullsubrepo(self, path, pctx)
331 return subrepo.nullsubrepo(self, path, pctx)
332
332
333 def workingsub(self, path):
333 def workingsub(self, path):
334 '''return a subrepo for the stored revision, or wdir if this is a wdir
334 '''return a subrepo for the stored revision, or wdir if this is a wdir
335 context.
335 context.
336 '''
336 '''
337 return subrepo.subrepo(self, path, allowwdir=True)
337 return subrepo.subrepo(self, path, allowwdir=True)
338
338
339 def match(self, pats=None, include=None, exclude=None, default='glob',
339 def match(self, pats=None, include=None, exclude=None, default='glob',
340 listsubrepos=False, badfn=None):
340 listsubrepos=False, badfn=None):
341 r = self._repo
341 r = self._repo
342 return matchmod.match(r.root, r.getcwd(), pats,
342 return matchmod.match(r.root, r.getcwd(), pats,
343 include, exclude, default,
343 include, exclude, default,
344 auditor=r.nofsauditor, ctx=self,
344 auditor=r.nofsauditor, ctx=self,
345 listsubrepos=listsubrepos, badfn=badfn)
345 listsubrepos=listsubrepos, badfn=badfn)
346
346
347 def diff(self, ctx2=None, match=None, **opts):
347 def diff(self, ctx2=None, match=None, **opts):
348 """Returns a diff generator for the given contexts and matcher"""
348 """Returns a diff generator for the given contexts and matcher"""
349 if ctx2 is None:
349 if ctx2 is None:
350 ctx2 = self.p1()
350 ctx2 = self.p1()
351 if ctx2 is not None:
351 if ctx2 is not None:
352 ctx2 = self._repo[ctx2]
352 ctx2 = self._repo[ctx2]
353 diffopts = patch.diffopts(self._repo.ui, opts)
353 diffopts = patch.diffopts(self._repo.ui, opts)
354 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
354 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
355
355
356 def dirs(self):
356 def dirs(self):
357 return self._manifest.dirs()
357 return self._manifest.dirs()
358
358
359 def hasdir(self, dir):
359 def hasdir(self, dir):
360 return self._manifest.hasdir(dir)
360 return self._manifest.hasdir(dir)
361
361
362 def status(self, other=None, match=None, listignored=False,
362 def status(self, other=None, match=None, listignored=False,
363 listclean=False, listunknown=False, listsubrepos=False):
363 listclean=False, listunknown=False, listsubrepos=False):
364 """return status of files between two nodes or node and working
364 """return status of files between two nodes or node and working
365 directory.
365 directory.
366
366
367 If other is None, compare this node with working directory.
367 If other is None, compare this node with working directory.
368
368
369 returns (modified, added, removed, deleted, unknown, ignored, clean)
369 returns (modified, added, removed, deleted, unknown, ignored, clean)
370 """
370 """
371
371
372 ctx1 = self
372 ctx1 = self
373 ctx2 = self._repo[other]
373 ctx2 = self._repo[other]
374
374
375 # This next code block is, admittedly, fragile logic that tests for
375 # This next code block is, admittedly, fragile logic that tests for
376 # reversing the contexts and wouldn't need to exist if it weren't for
376 # reversing the contexts and wouldn't need to exist if it weren't for
377 # the fast (and common) code path of comparing the working directory
377 # the fast (and common) code path of comparing the working directory
378 # with its first parent.
378 # with its first parent.
379 #
379 #
380 # What we're aiming for here is the ability to call:
380 # What we're aiming for here is the ability to call:
381 #
381 #
382 # workingctx.status(parentctx)
382 # workingctx.status(parentctx)
383 #
383 #
384 # If we always built the manifest for each context and compared those,
384 # If we always built the manifest for each context and compared those,
385 # then we'd be done. But the special case of the above call means we
385 # then we'd be done. But the special case of the above call means we
386 # just copy the manifest of the parent.
386 # just copy the manifest of the parent.
387 reversed = False
387 reversed = False
388 if (not isinstance(ctx1, changectx)
388 if (not isinstance(ctx1, changectx)
389 and isinstance(ctx2, changectx)):
389 and isinstance(ctx2, changectx)):
390 reversed = True
390 reversed = True
391 ctx1, ctx2 = ctx2, ctx1
391 ctx1, ctx2 = ctx2, ctx1
392
392
393 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
393 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
394 match = ctx2._matchstatus(ctx1, match)
394 match = ctx2._matchstatus(ctx1, match)
395 r = scmutil.status([], [], [], [], [], [], [])
395 r = scmutil.status([], [], [], [], [], [], [])
396 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
396 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
397 listunknown)
397 listunknown)
398
398
399 if reversed:
399 if reversed:
400 # Reverse added and removed. Clear deleted, unknown and ignored as
400 # Reverse added and removed. Clear deleted, unknown and ignored as
401 # these make no sense to reverse.
401 # these make no sense to reverse.
402 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
402 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
403 r.clean)
403 r.clean)
404
404
405 if listsubrepos:
405 if listsubrepos:
406 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
406 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
407 try:
407 try:
408 rev2 = ctx2.subrev(subpath)
408 rev2 = ctx2.subrev(subpath)
409 except KeyError:
409 except KeyError:
410 # A subrepo that existed in node1 was deleted between
410 # A subrepo that existed in node1 was deleted between
411 # node1 and node2 (inclusive). Thus, ctx2's substate
411 # node1 and node2 (inclusive). Thus, ctx2's substate
412 # won't contain that subpath. The best we can do ignore it.
412 # won't contain that subpath. The best we can do ignore it.
413 rev2 = None
413 rev2 = None
414 submatch = matchmod.subdirmatcher(subpath, match)
414 submatch = matchmod.subdirmatcher(subpath, match)
415 s = sub.status(rev2, match=submatch, ignored=listignored,
415 s = sub.status(rev2, match=submatch, ignored=listignored,
416 clean=listclean, unknown=listunknown,
416 clean=listclean, unknown=listunknown,
417 listsubrepos=True)
417 listsubrepos=True)
418 for rfiles, sfiles in zip(r, s):
418 for rfiles, sfiles in zip(r, s):
419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
419 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
420
420
421 for l in r:
421 for l in r:
422 l.sort()
422 l.sort()
423
423
424 return r
424 return r
425
425
426 def _filterederror(repo, changeid):
426 def _filterederror(repo, changeid):
427 """build an exception to be raised about a filtered changeid
427 """build an exception to be raised about a filtered changeid
428
428
429 This is extracted in a function to help extensions (eg: evolve) to
429 This is extracted in a function to help extensions (eg: evolve) to
430 experiment with various message variants."""
430 experiment with various message variants."""
431 if repo.filtername.startswith('visible'):
431 if repo.filtername.startswith('visible'):
432 msg = _("hidden revision '%s'") % changeid
432 msg = _("hidden revision '%s'") % changeid
433 hint = _('use --hidden to access hidden revisions')
433 hint = _('use --hidden to access hidden revisions')
434 return error.FilteredRepoLookupError(msg, hint=hint)
434 return error.FilteredRepoLookupError(msg, hint=hint)
435 msg = _("filtered revision '%s' (not in '%s' subset)")
435 msg = _("filtered revision '%s' (not in '%s' subset)")
436 msg %= (changeid, repo.filtername)
436 msg %= (changeid, repo.filtername)
437 return error.FilteredRepoLookupError(msg)
437 return error.FilteredRepoLookupError(msg)
438
438
439 class changectx(basectx):
439 class changectx(basectx):
440 """A changecontext object makes access to data related to a particular
440 """A changecontext object makes access to data related to a particular
441 changeset convenient. It represents a read-only context already present in
441 changeset convenient. It represents a read-only context already present in
442 the repo."""
442 the repo."""
443 def __init__(self, repo, changeid=''):
443 def __init__(self, repo, changeid=''):
444 """changeid is a revision number, node, or tag"""
444 """changeid is a revision number, node, or tag"""
445
445
446 # since basectx.__new__ already took care of copying the object, we
446 # since basectx.__new__ already took care of copying the object, we
447 # don't need to do anything in __init__, so we just exit here
447 # don't need to do anything in __init__, so we just exit here
448 if isinstance(changeid, basectx):
448 if isinstance(changeid, basectx):
449 return
449 return
450
450
451 if changeid == '':
451 if changeid == '':
452 changeid = '.'
452 changeid = '.'
453 self._repo = repo
453 self._repo = repo
454
454
455 try:
455 try:
456 if isinstance(changeid, int):
456 if isinstance(changeid, int):
457 self._node = repo.changelog.node(changeid)
457 self._node = repo.changelog.node(changeid)
458 self._rev = changeid
458 self._rev = changeid
459 return
459 return
460 if not pycompat.ispy3 and isinstance(changeid, long):
460 if not pycompat.ispy3 and isinstance(changeid, long):
461 changeid = str(changeid)
461 changeid = str(changeid)
462 if changeid == 'null':
462 if changeid == 'null':
463 self._node = nullid
463 self._node = nullid
464 self._rev = nullrev
464 self._rev = nullrev
465 return
465 return
466 if changeid == 'tip':
466 if changeid == 'tip':
467 self._node = repo.changelog.tip()
467 self._node = repo.changelog.tip()
468 self._rev = repo.changelog.rev(self._node)
468 self._rev = repo.changelog.rev(self._node)
469 return
469 return
470 if changeid == '.' or changeid == repo.dirstate.p1():
470 if changeid == '.' or changeid == repo.dirstate.p1():
471 # this is a hack to delay/avoid loading obsmarkers
471 # this is a hack to delay/avoid loading obsmarkers
472 # when we know that '.' won't be hidden
472 # when we know that '.' won't be hidden
473 self._node = repo.dirstate.p1()
473 self._node = repo.dirstate.p1()
474 self._rev = repo.unfiltered().changelog.rev(self._node)
474 self._rev = repo.unfiltered().changelog.rev(self._node)
475 return
475 return
476 if len(changeid) == 20:
476 if len(changeid) == 20:
477 try:
477 try:
478 self._node = changeid
478 self._node = changeid
479 self._rev = repo.changelog.rev(changeid)
479 self._rev = repo.changelog.rev(changeid)
480 return
480 return
481 except error.FilteredRepoLookupError:
481 except error.FilteredRepoLookupError:
482 raise
482 raise
483 except LookupError:
483 except LookupError:
484 pass
484 pass
485
485
486 try:
486 try:
487 r = int(changeid)
487 r = int(changeid)
488 if '%d' % r != changeid:
488 if '%d' % r != changeid:
489 raise ValueError
489 raise ValueError
490 l = len(repo.changelog)
490 l = len(repo.changelog)
491 if r < 0:
491 if r < 0:
492 r += l
492 r += l
493 if r < 0 or r >= l and r != wdirrev:
493 if r < 0 or r >= l and r != wdirrev:
494 raise ValueError
494 raise ValueError
495 self._rev = r
495 self._rev = r
496 self._node = repo.changelog.node(r)
496 self._node = repo.changelog.node(r)
497 return
497 return
498 except error.FilteredIndexError:
498 except error.FilteredIndexError:
499 raise
499 raise
500 except (ValueError, OverflowError, IndexError):
500 except (ValueError, OverflowError, IndexError):
501 pass
501 pass
502
502
503 if len(changeid) == 40:
503 if len(changeid) == 40:
504 try:
504 try:
505 self._node = bin(changeid)
505 self._node = bin(changeid)
506 self._rev = repo.changelog.rev(self._node)
506 self._rev = repo.changelog.rev(self._node)
507 return
507 return
508 except error.FilteredLookupError:
508 except error.FilteredLookupError:
509 raise
509 raise
510 except (TypeError, LookupError):
510 except (TypeError, LookupError):
511 pass
511 pass
512
512
513 # lookup bookmarks through the name interface
513 # lookup bookmarks through the name interface
514 try:
514 try:
515 self._node = repo.names.singlenode(repo, changeid)
515 self._node = repo.names.singlenode(repo, changeid)
516 self._rev = repo.changelog.rev(self._node)
516 self._rev = repo.changelog.rev(self._node)
517 return
517 return
518 except KeyError:
518 except KeyError:
519 pass
519 pass
520 except error.FilteredRepoLookupError:
520 except error.FilteredRepoLookupError:
521 raise
521 raise
522 except error.RepoLookupError:
522 except error.RepoLookupError:
523 pass
523 pass
524
524
525 self._node = repo.unfiltered().changelog._partialmatch(changeid)
525 self._node = repo.unfiltered().changelog._partialmatch(changeid)
526 if self._node is not None:
526 if self._node is not None:
527 self._rev = repo.changelog.rev(self._node)
527 self._rev = repo.changelog.rev(self._node)
528 return
528 return
529
529
530 # lookup failed
530 # lookup failed
531 # check if it might have come from damaged dirstate
531 # check if it might have come from damaged dirstate
532 #
532 #
533 # XXX we could avoid the unfiltered if we had a recognizable
533 # XXX we could avoid the unfiltered if we had a recognizable
534 # exception for filtered changeset access
534 # exception for filtered changeset access
535 if changeid in repo.unfiltered().dirstate.parents():
535 if changeid in repo.unfiltered().dirstate.parents():
536 msg = _("working directory has unknown parent '%s'!")
536 msg = _("working directory has unknown parent '%s'!")
537 raise error.Abort(msg % short(changeid))
537 raise error.Abort(msg % short(changeid))
538 try:
538 try:
539 if len(changeid) == 20 and nonascii(changeid):
539 if len(changeid) == 20 and nonascii(changeid):
540 changeid = hex(changeid)
540 changeid = hex(changeid)
541 except TypeError:
541 except TypeError:
542 pass
542 pass
543 except (error.FilteredIndexError, error.FilteredLookupError,
543 except (error.FilteredIndexError, error.FilteredLookupError,
544 error.FilteredRepoLookupError):
544 error.FilteredRepoLookupError):
545 raise _filterederror(repo, changeid)
545 raise _filterederror(repo, changeid)
546 except IndexError:
546 except IndexError:
547 pass
547 pass
548 raise error.RepoLookupError(
548 raise error.RepoLookupError(
549 _("unknown revision '%s'") % changeid)
549 _("unknown revision '%s'") % changeid)
550
550
551 def __hash__(self):
551 def __hash__(self):
552 try:
552 try:
553 return hash(self._rev)
553 return hash(self._rev)
554 except AttributeError:
554 except AttributeError:
555 return id(self)
555 return id(self)
556
556
557 def __nonzero__(self):
557 def __nonzero__(self):
558 return self._rev != nullrev
558 return self._rev != nullrev
559
559
560 __bool__ = __nonzero__
560 __bool__ = __nonzero__
561
561
562 @propertycache
562 @propertycache
563 def _changeset(self):
563 def _changeset(self):
564 return self._repo.changelog.changelogrevision(self.rev())
564 return self._repo.changelog.changelogrevision(self.rev())
565
565
566 @propertycache
566 @propertycache
567 def _manifest(self):
567 def _manifest(self):
568 return self._manifestctx.read()
568 return self._manifestctx.read()
569
569
570 @property
570 @property
571 def _manifestctx(self):
571 def _manifestctx(self):
572 return self._repo.manifestlog[self._changeset.manifest]
572 return self._repo.manifestlog[self._changeset.manifest]
573
573
574 @propertycache
574 @propertycache
575 def _manifestdelta(self):
575 def _manifestdelta(self):
576 return self._manifestctx.readdelta()
576 return self._manifestctx.readdelta()
577
577
578 @propertycache
578 @propertycache
579 def _parents(self):
579 def _parents(self):
580 repo = self._repo
580 repo = self._repo
581 p1, p2 = repo.changelog.parentrevs(self._rev)
581 p1, p2 = repo.changelog.parentrevs(self._rev)
582 if p2 == nullrev:
582 if p2 == nullrev:
583 return [changectx(repo, p1)]
583 return [changectx(repo, p1)]
584 return [changectx(repo, p1), changectx(repo, p2)]
584 return [changectx(repo, p1), changectx(repo, p2)]
585
585
586 def changeset(self):
586 def changeset(self):
587 c = self._changeset
587 c = self._changeset
588 return (
588 return (
589 c.manifest,
589 c.manifest,
590 c.user,
590 c.user,
591 c.date,
591 c.date,
592 c.files,
592 c.files,
593 c.description,
593 c.description,
594 c.extra,
594 c.extra,
595 )
595 )
596 def manifestnode(self):
596 def manifestnode(self):
597 return self._changeset.manifest
597 return self._changeset.manifest
598
598
599 def user(self):
599 def user(self):
600 return self._changeset.user
600 return self._changeset.user
601 def date(self):
601 def date(self):
602 return self._changeset.date
602 return self._changeset.date
603 def files(self):
603 def files(self):
604 return self._changeset.files
604 return self._changeset.files
605 def description(self):
605 def description(self):
606 return self._changeset.description
606 return self._changeset.description
607 def branch(self):
607 def branch(self):
608 return encoding.tolocal(self._changeset.extra.get("branch"))
608 return encoding.tolocal(self._changeset.extra.get("branch"))
609 def closesbranch(self):
609 def closesbranch(self):
610 return 'close' in self._changeset.extra
610 return 'close' in self._changeset.extra
611 def extra(self):
611 def extra(self):
612 return self._changeset.extra
612 return self._changeset.extra
613 def tags(self):
613 def tags(self):
614 return self._repo.nodetags(self._node)
614 return self._repo.nodetags(self._node)
615 def bookmarks(self):
615 def bookmarks(self):
616 return self._repo.nodebookmarks(self._node)
616 return self._repo.nodebookmarks(self._node)
617 def phase(self):
617 def phase(self):
618 return self._repo._phasecache.phase(self._repo, self._rev)
618 return self._repo._phasecache.phase(self._repo, self._rev)
619 def hidden(self):
619 def hidden(self):
620 return self._rev in repoview.filterrevs(self._repo, 'visible')
620 return self._rev in repoview.filterrevs(self._repo, 'visible')
621
621
622 def children(self):
622 def children(self):
623 """return contexts for each child changeset"""
623 """return contexts for each child changeset"""
624 c = self._repo.changelog.children(self._node)
624 c = self._repo.changelog.children(self._node)
625 return [changectx(self._repo, x) for x in c]
625 return [changectx(self._repo, x) for x in c]
626
626
627 def ancestors(self):
627 def ancestors(self):
628 for a in self._repo.changelog.ancestors([self._rev]):
628 for a in self._repo.changelog.ancestors([self._rev]):
629 yield changectx(self._repo, a)
629 yield changectx(self._repo, a)
630
630
631 def descendants(self):
631 def descendants(self):
632 for d in self._repo.changelog.descendants([self._rev]):
632 for d in self._repo.changelog.descendants([self._rev]):
633 yield changectx(self._repo, d)
633 yield changectx(self._repo, d)
634
634
635 def filectx(self, path, fileid=None, filelog=None):
635 def filectx(self, path, fileid=None, filelog=None):
636 """get a file context from this changeset"""
636 """get a file context from this changeset"""
637 if fileid is None:
637 if fileid is None:
638 fileid = self.filenode(path)
638 fileid = self.filenode(path)
639 return filectx(self._repo, path, fileid=fileid,
639 return filectx(self._repo, path, fileid=fileid,
640 changectx=self, filelog=filelog)
640 changectx=self, filelog=filelog)
641
641
642 def ancestor(self, c2, warn=False):
642 def ancestor(self, c2, warn=False):
643 """return the "best" ancestor context of self and c2
643 """return the "best" ancestor context of self and c2
644
644
645 If there are multiple candidates, it will show a message and check
645 If there are multiple candidates, it will show a message and check
646 merge.preferancestor configuration before falling back to the
646 merge.preferancestor configuration before falling back to the
647 revlog ancestor."""
647 revlog ancestor."""
648 # deal with workingctxs
648 # deal with workingctxs
649 n2 = c2._node
649 n2 = c2._node
650 if n2 is None:
650 if n2 is None:
651 n2 = c2._parents[0]._node
651 n2 = c2._parents[0]._node
652 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
652 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
653 if not cahs:
653 if not cahs:
654 anc = nullid
654 anc = nullid
655 elif len(cahs) == 1:
655 elif len(cahs) == 1:
656 anc = cahs[0]
656 anc = cahs[0]
657 else:
657 else:
658 # experimental config: merge.preferancestor
658 # experimental config: merge.preferancestor
659 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
659 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
660 try:
660 try:
661 ctx = changectx(self._repo, r)
661 ctx = changectx(self._repo, r)
662 except error.RepoLookupError:
662 except error.RepoLookupError:
663 continue
663 continue
664 anc = ctx.node()
664 anc = ctx.node()
665 if anc in cahs:
665 if anc in cahs:
666 break
666 break
667 else:
667 else:
668 anc = self._repo.changelog.ancestor(self._node, n2)
668 anc = self._repo.changelog.ancestor(self._node, n2)
669 if warn:
669 if warn:
670 self._repo.ui.status(
670 self._repo.ui.status(
671 (_("note: using %s as ancestor of %s and %s\n") %
671 (_("note: using %s as ancestor of %s and %s\n") %
672 (short(anc), short(self._node), short(n2))) +
672 (short(anc), short(self._node), short(n2))) +
673 ''.join(_(" alternatively, use --config "
673 ''.join(_(" alternatively, use --config "
674 "merge.preferancestor=%s\n") %
674 "merge.preferancestor=%s\n") %
675 short(n) for n in sorted(cahs) if n != anc))
675 short(n) for n in sorted(cahs) if n != anc))
676 return changectx(self._repo, anc)
676 return changectx(self._repo, anc)
677
677
678 def descendant(self, other):
678 def descendant(self, other):
679 """True if other is descendant of this changeset"""
679 """True if other is descendant of this changeset"""
680 return self._repo.changelog.descendant(self._rev, other._rev)
680 return self._repo.changelog.descendant(self._rev, other._rev)
681
681
682 def walk(self, match):
682 def walk(self, match):
683 '''Generates matching file names.'''
683 '''Generates matching file names.'''
684
684
685 # Wrap match.bad method to have message with nodeid
685 # Wrap match.bad method to have message with nodeid
686 def bad(fn, msg):
686 def bad(fn, msg):
687 # The manifest doesn't know about subrepos, so don't complain about
687 # The manifest doesn't know about subrepos, so don't complain about
688 # paths into valid subrepos.
688 # paths into valid subrepos.
689 if any(fn == s or fn.startswith(s + '/')
689 if any(fn == s or fn.startswith(s + '/')
690 for s in self.substate):
690 for s in self.substate):
691 return
691 return
692 match.bad(fn, _('no such file in rev %s') % self)
692 match.bad(fn, _('no such file in rev %s') % self)
693
693
694 m = matchmod.badmatch(match, bad)
694 m = matchmod.badmatch(match, bad)
695 return self._manifest.walk(m)
695 return self._manifest.walk(m)
696
696
697 def matches(self, match):
697 def matches(self, match):
698 return self.walk(match)
698 return self.walk(match)
699
699
700 class basefilectx(object):
700 class basefilectx(object):
701 """A filecontext object represents the common logic for its children:
701 """A filecontext object represents the common logic for its children:
702 filectx: read-only access to a filerevision that is already present
702 filectx: read-only access to a filerevision that is already present
703 in the repo,
703 in the repo,
704 workingfilectx: a filecontext that represents files from the working
704 workingfilectx: a filecontext that represents files from the working
705 directory,
705 directory,
706 memfilectx: a filecontext that represents files in-memory,
706 memfilectx: a filecontext that represents files in-memory,
707 overlayfilectx: duplicate another filecontext with some fields overridden.
707 overlayfilectx: duplicate another filecontext with some fields overridden.
708 """
708 """
709 @propertycache
709 @propertycache
710 def _filelog(self):
710 def _filelog(self):
711 return self._repo.file(self._path)
711 return self._repo.file(self._path)
712
712
713 @propertycache
713 @propertycache
714 def _changeid(self):
714 def _changeid(self):
715 if r'_changeid' in self.__dict__:
715 if r'_changeid' in self.__dict__:
716 return self._changeid
716 return self._changeid
717 elif r'_changectx' in self.__dict__:
717 elif r'_changectx' in self.__dict__:
718 return self._changectx.rev()
718 return self._changectx.rev()
719 elif r'_descendantrev' in self.__dict__:
719 elif r'_descendantrev' in self.__dict__:
720 # this file context was created from a revision with a known
720 # this file context was created from a revision with a known
721 # descendant, we can (lazily) correct for linkrev aliases
721 # descendant, we can (lazily) correct for linkrev aliases
722 return self._adjustlinkrev(self._descendantrev)
722 return self._adjustlinkrev(self._descendantrev)
723 else:
723 else:
724 return self._filelog.linkrev(self._filerev)
724 return self._filelog.linkrev(self._filerev)
725
725
726 @propertycache
726 @propertycache
727 def _filenode(self):
727 def _filenode(self):
728 if r'_fileid' in self.__dict__:
728 if r'_fileid' in self.__dict__:
729 return self._filelog.lookup(self._fileid)
729 return self._filelog.lookup(self._fileid)
730 else:
730 else:
731 return self._changectx.filenode(self._path)
731 return self._changectx.filenode(self._path)
732
732
733 @propertycache
733 @propertycache
734 def _filerev(self):
734 def _filerev(self):
735 return self._filelog.rev(self._filenode)
735 return self._filelog.rev(self._filenode)
736
736
737 @propertycache
737 @propertycache
738 def _repopath(self):
738 def _repopath(self):
739 return self._path
739 return self._path
740
740
741 def __nonzero__(self):
741 def __nonzero__(self):
742 try:
742 try:
743 self._filenode
743 self._filenode
744 return True
744 return True
745 except error.LookupError:
745 except error.LookupError:
746 # file is missing
746 # file is missing
747 return False
747 return False
748
748
749 __bool__ = __nonzero__
749 __bool__ = __nonzero__
750
750
751 def __bytes__(self):
751 def __bytes__(self):
752 try:
752 try:
753 return "%s@%s" % (self.path(), self._changectx)
753 return "%s@%s" % (self.path(), self._changectx)
754 except error.LookupError:
754 except error.LookupError:
755 return "%s@???" % self.path()
755 return "%s@???" % self.path()
756
756
757 __str__ = encoding.strmethod(__bytes__)
757 __str__ = encoding.strmethod(__bytes__)
758
758
759 def __repr__(self):
759 def __repr__(self):
760 return "<%s %s>" % (type(self).__name__, str(self))
760 return "<%s %s>" % (type(self).__name__, str(self))
761
761
762 def __hash__(self):
762 def __hash__(self):
763 try:
763 try:
764 return hash((self._path, self._filenode))
764 return hash((self._path, self._filenode))
765 except AttributeError:
765 except AttributeError:
766 return id(self)
766 return id(self)
767
767
768 def __eq__(self, other):
768 def __eq__(self, other):
769 try:
769 try:
770 return (type(self) == type(other) and self._path == other._path
770 return (type(self) == type(other) and self._path == other._path
771 and self._filenode == other._filenode)
771 and self._filenode == other._filenode)
772 except AttributeError:
772 except AttributeError:
773 return False
773 return False
774
774
775 def __ne__(self, other):
775 def __ne__(self, other):
776 return not (self == other)
776 return not (self == other)
777
777
778 def filerev(self):
778 def filerev(self):
779 return self._filerev
779 return self._filerev
780 def filenode(self):
780 def filenode(self):
781 return self._filenode
781 return self._filenode
782 @propertycache
782 @propertycache
783 def _flags(self):
783 def _flags(self):
784 return self._changectx.flags(self._path)
784 return self._changectx.flags(self._path)
785 def flags(self):
785 def flags(self):
786 return self._flags
786 return self._flags
787 def filelog(self):
787 def filelog(self):
788 return self._filelog
788 return self._filelog
789 def rev(self):
789 def rev(self):
790 return self._changeid
790 return self._changeid
791 def linkrev(self):
791 def linkrev(self):
792 return self._filelog.linkrev(self._filerev)
792 return self._filelog.linkrev(self._filerev)
793 def node(self):
793 def node(self):
794 return self._changectx.node()
794 return self._changectx.node()
795 def hex(self):
795 def hex(self):
796 return self._changectx.hex()
796 return self._changectx.hex()
797 def user(self):
797 def user(self):
798 return self._changectx.user()
798 return self._changectx.user()
799 def date(self):
799 def date(self):
800 return self._changectx.date()
800 return self._changectx.date()
801 def files(self):
801 def files(self):
802 return self._changectx.files()
802 return self._changectx.files()
803 def description(self):
803 def description(self):
804 return self._changectx.description()
804 return self._changectx.description()
805 def branch(self):
805 def branch(self):
806 return self._changectx.branch()
806 return self._changectx.branch()
807 def extra(self):
807 def extra(self):
808 return self._changectx.extra()
808 return self._changectx.extra()
809 def phase(self):
809 def phase(self):
810 return self._changectx.phase()
810 return self._changectx.phase()
811 def phasestr(self):
811 def phasestr(self):
812 return self._changectx.phasestr()
812 return self._changectx.phasestr()
813 def manifest(self):
813 def manifest(self):
814 return self._changectx.manifest()
814 return self._changectx.manifest()
815 def changectx(self):
815 def changectx(self):
816 return self._changectx
816 return self._changectx
817 def renamed(self):
817 def renamed(self):
818 return self._copied
818 return self._copied
819 def repo(self):
819 def repo(self):
820 return self._repo
820 return self._repo
821 def size(self):
821 def size(self):
822 return len(self.data())
822 return len(self.data())
823
823
824 def path(self):
824 def path(self):
825 return self._path
825 return self._path
826
826
827 def isbinary(self):
827 def isbinary(self):
828 try:
828 try:
829 return util.binary(self.data())
829 return util.binary(self.data())
830 except IOError:
830 except IOError:
831 return False
831 return False
832 def isexec(self):
832 def isexec(self):
833 return 'x' in self.flags()
833 return 'x' in self.flags()
834 def islink(self):
834 def islink(self):
835 return 'l' in self.flags()
835 return 'l' in self.flags()
836
836
837 def isabsent(self):
837 def isabsent(self):
838 """whether this filectx represents a file not in self._changectx
838 """whether this filectx represents a file not in self._changectx
839
839
840 This is mainly for merge code to detect change/delete conflicts. This is
840 This is mainly for merge code to detect change/delete conflicts. This is
841 expected to be True for all subclasses of basectx."""
841 expected to be True for all subclasses of basectx."""
842 return False
842 return False
843
843
844 _customcmp = False
844 _customcmp = False
845 def cmp(self, fctx):
845 def cmp(self, fctx):
846 """compare with other file context
846 """compare with other file context
847
847
848 returns True if different than fctx.
848 returns True if different than fctx.
849 """
849 """
850 if fctx._customcmp:
850 if fctx._customcmp:
851 return fctx.cmp(self)
851 return fctx.cmp(self)
852
852
853 if (fctx._filenode is None
853 if (fctx._filenode is None
854 and (self._repo._encodefilterpats
854 and (self._repo._encodefilterpats
855 # if file data starts with '\1\n', empty metadata block is
855 # if file data starts with '\1\n', empty metadata block is
856 # prepended, which adds 4 bytes to filelog.size().
856 # prepended, which adds 4 bytes to filelog.size().
857 or self.size() - 4 == fctx.size())
857 or self.size() - 4 == fctx.size())
858 or self.size() == fctx.size()):
858 or self.size() == fctx.size()):
859 return self._filelog.cmp(self._filenode, fctx.data())
859 return self._filelog.cmp(self._filenode, fctx.data())
860
860
861 return True
861 return True
862
862
863 def _adjustlinkrev(self, srcrev, inclusive=False):
863 def _adjustlinkrev(self, srcrev, inclusive=False):
864 """return the first ancestor of <srcrev> introducing <fnode>
864 """return the first ancestor of <srcrev> introducing <fnode>
865
865
866 If the linkrev of the file revision does not point to an ancestor of
866 If the linkrev of the file revision does not point to an ancestor of
867 srcrev, we'll walk down the ancestors until we find one introducing
867 srcrev, we'll walk down the ancestors until we find one introducing
868 this file revision.
868 this file revision.
869
869
870 :srcrev: the changeset revision we search ancestors from
870 :srcrev: the changeset revision we search ancestors from
871 :inclusive: if true, the src revision will also be checked
871 :inclusive: if true, the src revision will also be checked
872 """
872 """
873 repo = self._repo
873 repo = self._repo
874 cl = repo.unfiltered().changelog
874 cl = repo.unfiltered().changelog
875 mfl = repo.manifestlog
875 mfl = repo.manifestlog
876 # fetch the linkrev
876 # fetch the linkrev
877 lkr = self.linkrev()
877 lkr = self.linkrev()
878 # hack to reuse ancestor computation when searching for renames
878 # hack to reuse ancestor computation when searching for renames
879 memberanc = getattr(self, '_ancestrycontext', None)
879 memberanc = getattr(self, '_ancestrycontext', None)
880 iteranc = None
880 iteranc = None
881 if srcrev is None:
881 if srcrev is None:
882 # wctx case, used by workingfilectx during mergecopy
882 # wctx case, used by workingfilectx during mergecopy
883 revs = [p.rev() for p in self._repo[None].parents()]
883 revs = [p.rev() for p in self._repo[None].parents()]
884 inclusive = True # we skipped the real (revless) source
884 inclusive = True # we skipped the real (revless) source
885 else:
885 else:
886 revs = [srcrev]
886 revs = [srcrev]
887 if memberanc is None:
887 if memberanc is None:
888 memberanc = iteranc = cl.ancestors(revs, lkr,
888 memberanc = iteranc = cl.ancestors(revs, lkr,
889 inclusive=inclusive)
889 inclusive=inclusive)
890 # check if this linkrev is an ancestor of srcrev
890 # check if this linkrev is an ancestor of srcrev
891 if lkr not in memberanc:
891 if lkr not in memberanc:
892 if iteranc is None:
892 if iteranc is None:
893 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
893 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
894 fnode = self._filenode
894 fnode = self._filenode
895 path = self._path
895 path = self._path
896 for a in iteranc:
896 for a in iteranc:
897 ac = cl.read(a) # get changeset data (we avoid object creation)
897 ac = cl.read(a) # get changeset data (we avoid object creation)
898 if path in ac[3]: # checking the 'files' field.
898 if path in ac[3]: # checking the 'files' field.
899 # The file has been touched, check if the content is
899 # The file has been touched, check if the content is
900 # similar to the one we search for.
900 # similar to the one we search for.
901 if fnode == mfl[ac[0]].readfast().get(path):
901 if fnode == mfl[ac[0]].readfast().get(path):
902 return a
902 return a
903 # In theory, we should never get out of that loop without a result.
903 # In theory, we should never get out of that loop without a result.
904 # But if manifest uses a buggy file revision (not children of the
904 # But if manifest uses a buggy file revision (not children of the
905 # one it replaces) we could. Such a buggy situation will likely
905 # one it replaces) we could. Such a buggy situation will likely
906 # result is crash somewhere else at to some point.
906 # result is crash somewhere else at to some point.
907 return lkr
907 return lkr
908
908
909 def introrev(self):
909 def introrev(self):
910 """return the rev of the changeset which introduced this file revision
910 """return the rev of the changeset which introduced this file revision
911
911
912 This method is different from linkrev because it take into account the
912 This method is different from linkrev because it take into account the
913 changeset the filectx was created from. It ensures the returned
913 changeset the filectx was created from. It ensures the returned
914 revision is one of its ancestors. This prevents bugs from
914 revision is one of its ancestors. This prevents bugs from
915 'linkrev-shadowing' when a file revision is used by multiple
915 'linkrev-shadowing' when a file revision is used by multiple
916 changesets.
916 changesets.
917 """
917 """
918 lkr = self.linkrev()
918 lkr = self.linkrev()
919 attrs = vars(self)
919 attrs = vars(self)
920 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
920 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
921 if noctx or self.rev() == lkr:
921 if noctx or self.rev() == lkr:
922 return self.linkrev()
922 return self.linkrev()
923 return self._adjustlinkrev(self.rev(), inclusive=True)
923 return self._adjustlinkrev(self.rev(), inclusive=True)
924
924
925 def _parentfilectx(self, path, fileid, filelog):
925 def _parentfilectx(self, path, fileid, filelog):
926 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
926 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
927 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
927 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
928 if '_changeid' in vars(self) or '_changectx' in vars(self):
928 if '_changeid' in vars(self) or '_changectx' in vars(self):
929 # If self is associated with a changeset (probably explicitly
929 # If self is associated with a changeset (probably explicitly
930 # fed), ensure the created filectx is associated with a
930 # fed), ensure the created filectx is associated with a
931 # changeset that is an ancestor of self.changectx.
931 # changeset that is an ancestor of self.changectx.
932 # This lets us later use _adjustlinkrev to get a correct link.
932 # This lets us later use _adjustlinkrev to get a correct link.
933 fctx._descendantrev = self.rev()
933 fctx._descendantrev = self.rev()
934 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
934 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
935 elif '_descendantrev' in vars(self):
935 elif '_descendantrev' in vars(self):
936 # Otherwise propagate _descendantrev if we have one associated.
936 # Otherwise propagate _descendantrev if we have one associated.
937 fctx._descendantrev = self._descendantrev
937 fctx._descendantrev = self._descendantrev
938 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
938 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
939 return fctx
939 return fctx
940
940
941 def parents(self):
941 def parents(self):
942 _path = self._path
942 _path = self._path
943 fl = self._filelog
943 fl = self._filelog
944 parents = self._filelog.parents(self._filenode)
944 parents = self._filelog.parents(self._filenode)
945 pl = [(_path, node, fl) for node in parents if node != nullid]
945 pl = [(_path, node, fl) for node in parents if node != nullid]
946
946
947 r = fl.renamed(self._filenode)
947 r = fl.renamed(self._filenode)
948 if r:
948 if r:
949 # - In the simple rename case, both parent are nullid, pl is empty.
949 # - In the simple rename case, both parent are nullid, pl is empty.
950 # - In case of merge, only one of the parent is null id and should
950 # - In case of merge, only one of the parent is null id and should
951 # be replaced with the rename information. This parent is -always-
951 # be replaced with the rename information. This parent is -always-
952 # the first one.
952 # the first one.
953 #
953 #
954 # As null id have always been filtered out in the previous list
954 # As null id have always been filtered out in the previous list
955 # comprehension, inserting to 0 will always result in "replacing
955 # comprehension, inserting to 0 will always result in "replacing
956 # first nullid parent with rename information.
956 # first nullid parent with rename information.
957 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
957 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
958
958
959 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
959 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
960
960
961 def p1(self):
961 def p1(self):
962 return self.parents()[0]
962 return self.parents()[0]
963
963
964 def p2(self):
964 def p2(self):
965 p = self.parents()
965 p = self.parents()
966 if len(p) == 2:
966 if len(p) == 2:
967 return p[1]
967 return p[1]
968 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
968 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
969
969
970 def annotate(self, follow=False, linenumber=False, skiprevs=None,
970 def annotate(self, follow=False, linenumber=False, skiprevs=None,
971 diffopts=None):
971 diffopts=None):
972 '''returns a list of tuples of ((ctx, number), line) for each line
972 '''returns a list of tuples of ((ctx, number), line) for each line
973 in the file, where ctx is the filectx of the node where
973 in the file, where ctx is the filectx of the node where
974 that line was last changed; if linenumber parameter is true, number is
974 that line was last changed; if linenumber parameter is true, number is
975 the line number at the first appearance in the managed file, otherwise,
975 the line number at the first appearance in the managed file, otherwise,
976 number has a fixed value of False.
976 number has a fixed value of False.
977 '''
977 '''
978
978
979 def lines(text):
979 def lines(text):
980 if text.endswith("\n"):
980 if text.endswith("\n"):
981 return text.count("\n")
981 return text.count("\n")
982 return text.count("\n") + int(bool(text))
982 return text.count("\n") + int(bool(text))
983
983
984 if linenumber:
984 if linenumber:
985 def decorate(text, rev):
985 def decorate(text, rev):
986 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
986 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
987 else:
987 else:
988 def decorate(text, rev):
988 def decorate(text, rev):
989 return ([(rev, False)] * lines(text), text)
989 return ([(rev, False)] * lines(text), text)
990
990
991 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
991 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
992
992
993 def parents(f):
993 def parents(f):
994 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
994 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
995 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
995 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
996 # from the topmost introrev (= srcrev) down to p.linkrev() if it
996 # from the topmost introrev (= srcrev) down to p.linkrev() if it
997 # isn't an ancestor of the srcrev.
997 # isn't an ancestor of the srcrev.
998 f._changeid
998 f._changeid
999 pl = f.parents()
999 pl = f.parents()
1000
1000
1001 # Don't return renamed parents if we aren't following.
1001 # Don't return renamed parents if we aren't following.
1002 if not follow:
1002 if not follow:
1003 pl = [p for p in pl if p.path() == f.path()]
1003 pl = [p for p in pl if p.path() == f.path()]
1004
1004
1005 # renamed filectx won't have a filelog yet, so set it
1005 # renamed filectx won't have a filelog yet, so set it
1006 # from the cache to save time
1006 # from the cache to save time
1007 for p in pl:
1007 for p in pl:
1008 if not '_filelog' in p.__dict__:
1008 if not '_filelog' in p.__dict__:
1009 p._filelog = getlog(p.path())
1009 p._filelog = getlog(p.path())
1010
1010
1011 return pl
1011 return pl
1012
1012
1013 # use linkrev to find the first changeset where self appeared
1013 # use linkrev to find the first changeset where self appeared
1014 base = self
1014 base = self
1015 introrev = self.introrev()
1015 introrev = self.introrev()
1016 if self.rev() != introrev:
1016 if self.rev() != introrev:
1017 base = self.filectx(self.filenode(), changeid=introrev)
1017 base = self.filectx(self.filenode(), changeid=introrev)
1018 if getattr(base, '_ancestrycontext', None) is None:
1018 if getattr(base, '_ancestrycontext', None) is None:
1019 cl = self._repo.changelog
1019 cl = self._repo.changelog
1020 if introrev is None:
1020 if introrev is None:
1021 # wctx is not inclusive, but works because _ancestrycontext
1021 # wctx is not inclusive, but works because _ancestrycontext
1022 # is used to test filelog revisions
1022 # is used to test filelog revisions
1023 ac = cl.ancestors([p.rev() for p in base.parents()],
1023 ac = cl.ancestors([p.rev() for p in base.parents()],
1024 inclusive=True)
1024 inclusive=True)
1025 else:
1025 else:
1026 ac = cl.ancestors([introrev], inclusive=True)
1026 ac = cl.ancestors([introrev], inclusive=True)
1027 base._ancestrycontext = ac
1027 base._ancestrycontext = ac
1028
1028
1029 # This algorithm would prefer to be recursive, but Python is a
1029 # This algorithm would prefer to be recursive, but Python is a
1030 # bit recursion-hostile. Instead we do an iterative
1030 # bit recursion-hostile. Instead we do an iterative
1031 # depth-first search.
1031 # depth-first search.
1032
1032
1033 # 1st DFS pre-calculates pcache and needed
1033 # 1st DFS pre-calculates pcache and needed
1034 visit = [base]
1034 visit = [base]
1035 pcache = {}
1035 pcache = {}
1036 needed = {base: 1}
1036 needed = {base: 1}
1037 while visit:
1037 while visit:
1038 f = visit.pop()
1038 f = visit.pop()
1039 if f in pcache:
1039 if f in pcache:
1040 continue
1040 continue
1041 pl = parents(f)
1041 pl = parents(f)
1042 pcache[f] = pl
1042 pcache[f] = pl
1043 for p in pl:
1043 for p in pl:
1044 needed[p] = needed.get(p, 0) + 1
1044 needed[p] = needed.get(p, 0) + 1
1045 if p not in pcache:
1045 if p not in pcache:
1046 visit.append(p)
1046 visit.append(p)
1047
1047
1048 # 2nd DFS does the actual annotate
1048 # 2nd DFS does the actual annotate
1049 visit[:] = [base]
1049 visit[:] = [base]
1050 hist = {}
1050 hist = {}
1051 while visit:
1051 while visit:
1052 f = visit[-1]
1052 f = visit[-1]
1053 if f in hist:
1053 if f in hist:
1054 visit.pop()
1054 visit.pop()
1055 continue
1055 continue
1056
1056
1057 ready = True
1057 ready = True
1058 pl = pcache[f]
1058 pl = pcache[f]
1059 for p in pl:
1059 for p in pl:
1060 if p not in hist:
1060 if p not in hist:
1061 ready = False
1061 ready = False
1062 visit.append(p)
1062 visit.append(p)
1063 if ready:
1063 if ready:
1064 visit.pop()
1064 visit.pop()
1065 curr = decorate(f.data(), f)
1065 curr = decorate(f.data(), f)
1066 skipchild = False
1066 skipchild = False
1067 if skiprevs is not None:
1067 if skiprevs is not None:
1068 skipchild = f._changeid in skiprevs
1068 skipchild = f._changeid in skiprevs
1069 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1069 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1070 diffopts)
1070 diffopts)
1071 for p in pl:
1071 for p in pl:
1072 if needed[p] == 1:
1072 if needed[p] == 1:
1073 del hist[p]
1073 del hist[p]
1074 del needed[p]
1074 del needed[p]
1075 else:
1075 else:
1076 needed[p] -= 1
1076 needed[p] -= 1
1077
1077
1078 hist[f] = curr
1078 hist[f] = curr
1079 del pcache[f]
1079 del pcache[f]
1080
1080
1081 return zip(hist[base][0], hist[base][1].splitlines(True))
1081 return zip(hist[base][0], hist[base][1].splitlines(True))
1082
1082
1083 def ancestors(self, followfirst=False):
1083 def ancestors(self, followfirst=False):
1084 visit = {}
1084 visit = {}
1085 c = self
1085 c = self
1086 if followfirst:
1086 if followfirst:
1087 cut = 1
1087 cut = 1
1088 else:
1088 else:
1089 cut = None
1089 cut = None
1090
1090
1091 while True:
1091 while True:
1092 for parent in c.parents()[:cut]:
1092 for parent in c.parents()[:cut]:
1093 visit[(parent.linkrev(), parent.filenode())] = parent
1093 visit[(parent.linkrev(), parent.filenode())] = parent
1094 if not visit:
1094 if not visit:
1095 break
1095 break
1096 c = visit.pop(max(visit))
1096 c = visit.pop(max(visit))
1097 yield c
1097 yield c
1098
1098
1099 def decodeddata(self):
1099 def decodeddata(self):
1100 """Returns `data()` after running repository decoding filters.
1100 """Returns `data()` after running repository decoding filters.
1101
1101
1102 This is often equivalent to how the data would be expressed on disk.
1102 This is often equivalent to how the data would be expressed on disk.
1103 """
1103 """
1104 return self._repo.wwritedata(self.path(), self.data())
1104 return self._repo.wwritedata(self.path(), self.data())
1105
1105
1106 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1106 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1107 r'''
1107 r'''
1108 Given parent and child fctxes and annotate data for parents, for all lines
1108 Given parent and child fctxes and annotate data for parents, for all lines
1109 in either parent that match the child, annotate the child with the parent's
1109 in either parent that match the child, annotate the child with the parent's
1110 data.
1110 data.
1111
1111
1112 Additionally, if `skipchild` is True, replace all other lines with parent
1112 Additionally, if `skipchild` is True, replace all other lines with parent
1113 annotate data as well such that child is never blamed for any lines.
1113 annotate data as well such that child is never blamed for any lines.
1114
1114
1115 >>> oldfctx = b'old'
1115 >>> oldfctx = b'old'
1116 >>> p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
1116 >>> p1fctx, p2fctx, childfctx = b'p1', b'p2', b'c'
1117 >>> olddata = b'a\nb\n'
1117 >>> olddata = b'a\nb\n'
1118 >>> p1data = b'a\nb\nc\n'
1118 >>> p1data = b'a\nb\nc\n'
1119 >>> p2data = b'a\nc\nd\n'
1119 >>> p2data = b'a\nc\nd\n'
1120 >>> childdata = b'a\nb2\nc\nc2\nd\n'
1120 >>> childdata = b'a\nb2\nc\nc2\nd\n'
1121 >>> diffopts = mdiff.diffopts()
1121 >>> diffopts = mdiff.diffopts()
1122
1122
1123 >>> def decorate(text, rev):
1123 >>> def decorate(text, rev):
1124 ... return ([(rev, i) for i in xrange(1, text.count(b'\n') + 1)], text)
1124 ... return ([(rev, i) for i in xrange(1, text.count(b'\n') + 1)], text)
1125
1125
1126 Basic usage:
1126 Basic usage:
1127
1127
1128 >>> oldann = decorate(olddata, oldfctx)
1128 >>> oldann = decorate(olddata, oldfctx)
1129 >>> p1ann = decorate(p1data, p1fctx)
1129 >>> p1ann = decorate(p1data, p1fctx)
1130 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1130 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1131 >>> p1ann[0]
1131 >>> p1ann[0]
1132 [('old', 1), ('old', 2), ('p1', 3)]
1132 [('old', 1), ('old', 2), ('p1', 3)]
1133 >>> p2ann = decorate(p2data, p2fctx)
1133 >>> p2ann = decorate(p2data, p2fctx)
1134 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1134 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1135 >>> p2ann[0]
1135 >>> p2ann[0]
1136 [('old', 1), ('p2', 2), ('p2', 3)]
1136 [('old', 1), ('p2', 2), ('p2', 3)]
1137
1137
1138 Test with multiple parents (note the difference caused by ordering):
1138 Test with multiple parents (note the difference caused by ordering):
1139
1139
1140 >>> childann = decorate(childdata, childfctx)
1140 >>> childann = decorate(childdata, childfctx)
1141 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1141 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1142 ... diffopts)
1142 ... diffopts)
1143 >>> childann[0]
1143 >>> childann[0]
1144 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1144 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1145
1145
1146 >>> childann = decorate(childdata, childfctx)
1146 >>> childann = decorate(childdata, childfctx)
1147 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1147 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1148 ... diffopts)
1148 ... diffopts)
1149 >>> childann[0]
1149 >>> childann[0]
1150 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1150 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1151
1151
1152 Test with skipchild (note the difference caused by ordering):
1152 Test with skipchild (note the difference caused by ordering):
1153
1153
1154 >>> childann = decorate(childdata, childfctx)
1154 >>> childann = decorate(childdata, childfctx)
1155 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1155 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1156 ... diffopts)
1156 ... diffopts)
1157 >>> childann[0]
1157 >>> childann[0]
1158 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1158 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1159
1159
1160 >>> childann = decorate(childdata, childfctx)
1160 >>> childann = decorate(childdata, childfctx)
1161 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1161 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1162 ... diffopts)
1162 ... diffopts)
1163 >>> childann[0]
1163 >>> childann[0]
1164 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1164 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1165 '''
1165 '''
1166 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1166 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1167 for parent in parents]
1167 for parent in parents]
1168
1168
1169 if skipchild:
1169 if skipchild:
1170 # Need to iterate over the blocks twice -- make it a list
1170 # Need to iterate over the blocks twice -- make it a list
1171 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1171 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1172 # Mercurial currently prefers p2 over p1 for annotate.
1172 # Mercurial currently prefers p2 over p1 for annotate.
1173 # TODO: change this?
1173 # TODO: change this?
1174 for parent, blocks in pblocks:
1174 for parent, blocks in pblocks:
1175 for (a1, a2, b1, b2), t in blocks:
1175 for (a1, a2, b1, b2), t in blocks:
1176 # Changed blocks ('!') or blocks made only of blank lines ('~')
1176 # Changed blocks ('!') or blocks made only of blank lines ('~')
1177 # belong to the child.
1177 # belong to the child.
1178 if t == '=':
1178 if t == '=':
1179 child[0][b1:b2] = parent[0][a1:a2]
1179 child[0][b1:b2] = parent[0][a1:a2]
1180
1180
1181 if skipchild:
1181 if skipchild:
1182 # Now try and match up anything that couldn't be matched,
1182 # Now try and match up anything that couldn't be matched,
1183 # Reversing pblocks maintains bias towards p2, matching above
1183 # Reversing pblocks maintains bias towards p2, matching above
1184 # behavior.
1184 # behavior.
1185 pblocks.reverse()
1185 pblocks.reverse()
1186
1186
1187 # The heuristics are:
1187 # The heuristics are:
1188 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1188 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1189 # This could potentially be smarter but works well enough.
1189 # This could potentially be smarter but works well enough.
1190 # * For a non-matching section, do a best-effort fit. Match lines in
1190 # * For a non-matching section, do a best-effort fit. Match lines in
1191 # diff hunks 1:1, dropping lines as necessary.
1191 # diff hunks 1:1, dropping lines as necessary.
1192 # * Repeat the last line as a last resort.
1192 # * Repeat the last line as a last resort.
1193
1193
1194 # First, replace as much as possible without repeating the last line.
1194 # First, replace as much as possible without repeating the last line.
1195 remaining = [(parent, []) for parent, _blocks in pblocks]
1195 remaining = [(parent, []) for parent, _blocks in pblocks]
1196 for idx, (parent, blocks) in enumerate(pblocks):
1196 for idx, (parent, blocks) in enumerate(pblocks):
1197 for (a1, a2, b1, b2), _t in blocks:
1197 for (a1, a2, b1, b2), _t in blocks:
1198 if a2 - a1 >= b2 - b1:
1198 if a2 - a1 >= b2 - b1:
1199 for bk in xrange(b1, b2):
1199 for bk in xrange(b1, b2):
1200 if child[0][bk][0] == childfctx:
1200 if child[0][bk][0] == childfctx:
1201 ak = min(a1 + (bk - b1), a2 - 1)
1201 ak = min(a1 + (bk - b1), a2 - 1)
1202 child[0][bk] = parent[0][ak]
1202 child[0][bk] = parent[0][ak]
1203 else:
1203 else:
1204 remaining[idx][1].append((a1, a2, b1, b2))
1204 remaining[idx][1].append((a1, a2, b1, b2))
1205
1205
1206 # Then, look at anything left, which might involve repeating the last
1206 # Then, look at anything left, which might involve repeating the last
1207 # line.
1207 # line.
1208 for parent, blocks in remaining:
1208 for parent, blocks in remaining:
1209 for a1, a2, b1, b2 in blocks:
1209 for a1, a2, b1, b2 in blocks:
1210 for bk in xrange(b1, b2):
1210 for bk in xrange(b1, b2):
1211 if child[0][bk][0] == childfctx:
1211 if child[0][bk][0] == childfctx:
1212 ak = min(a1 + (bk - b1), a2 - 1)
1212 ak = min(a1 + (bk - b1), a2 - 1)
1213 child[0][bk] = parent[0][ak]
1213 child[0][bk] = parent[0][ak]
1214 return child
1214 return child
1215
1215
1216 class filectx(basefilectx):
1216 class filectx(basefilectx):
1217 """A filecontext object makes access to data related to a particular
1217 """A filecontext object makes access to data related to a particular
1218 filerevision convenient."""
1218 filerevision convenient."""
1219 def __init__(self, repo, path, changeid=None, fileid=None,
1219 def __init__(self, repo, path, changeid=None, fileid=None,
1220 filelog=None, changectx=None):
1220 filelog=None, changectx=None):
1221 """changeid can be a changeset revision, node, or tag.
1221 """changeid can be a changeset revision, node, or tag.
1222 fileid can be a file revision or node."""
1222 fileid can be a file revision or node."""
1223 self._repo = repo
1223 self._repo = repo
1224 self._path = path
1224 self._path = path
1225
1225
1226 assert (changeid is not None
1226 assert (changeid is not None
1227 or fileid is not None
1227 or fileid is not None
1228 or changectx is not None), \
1228 or changectx is not None), \
1229 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1229 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1230 % (changeid, fileid, changectx))
1230 % (changeid, fileid, changectx))
1231
1231
1232 if filelog is not None:
1232 if filelog is not None:
1233 self._filelog = filelog
1233 self._filelog = filelog
1234
1234
1235 if changeid is not None:
1235 if changeid is not None:
1236 self._changeid = changeid
1236 self._changeid = changeid
1237 if changectx is not None:
1237 if changectx is not None:
1238 self._changectx = changectx
1238 self._changectx = changectx
1239 if fileid is not None:
1239 if fileid is not None:
1240 self._fileid = fileid
1240 self._fileid = fileid
1241
1241
1242 @propertycache
1242 @propertycache
1243 def _changectx(self):
1243 def _changectx(self):
1244 try:
1244 try:
1245 return changectx(self._repo, self._changeid)
1245 return changectx(self._repo, self._changeid)
1246 except error.FilteredRepoLookupError:
1246 except error.FilteredRepoLookupError:
1247 # Linkrev may point to any revision in the repository. When the
1247 # Linkrev may point to any revision in the repository. When the
1248 # repository is filtered this may lead to `filectx` trying to build
1248 # repository is filtered this may lead to `filectx` trying to build
1249 # `changectx` for filtered revision. In such case we fallback to
1249 # `changectx` for filtered revision. In such case we fallback to
1250 # creating `changectx` on the unfiltered version of the reposition.
1250 # creating `changectx` on the unfiltered version of the reposition.
1251 # This fallback should not be an issue because `changectx` from
1251 # This fallback should not be an issue because `changectx` from
1252 # `filectx` are not used in complex operations that care about
1252 # `filectx` are not used in complex operations that care about
1253 # filtering.
1253 # filtering.
1254 #
1254 #
1255 # This fallback is a cheap and dirty fix that prevent several
1255 # This fallback is a cheap and dirty fix that prevent several
1256 # crashes. It does not ensure the behavior is correct. However the
1256 # crashes. It does not ensure the behavior is correct. However the
1257 # behavior was not correct before filtering either and "incorrect
1257 # behavior was not correct before filtering either and "incorrect
1258 # behavior" is seen as better as "crash"
1258 # behavior" is seen as better as "crash"
1259 #
1259 #
1260 # Linkrevs have several serious troubles with filtering that are
1260 # Linkrevs have several serious troubles with filtering that are
1261 # complicated to solve. Proper handling of the issue here should be
1261 # complicated to solve. Proper handling of the issue here should be
1262 # considered when solving linkrev issue are on the table.
1262 # considered when solving linkrev issue are on the table.
1263 return changectx(self._repo.unfiltered(), self._changeid)
1263 return changectx(self._repo.unfiltered(), self._changeid)
1264
1264
1265 def filectx(self, fileid, changeid=None):
1265 def filectx(self, fileid, changeid=None):
1266 '''opens an arbitrary revision of the file without
1266 '''opens an arbitrary revision of the file without
1267 opening a new filelog'''
1267 opening a new filelog'''
1268 return filectx(self._repo, self._path, fileid=fileid,
1268 return filectx(self._repo, self._path, fileid=fileid,
1269 filelog=self._filelog, changeid=changeid)
1269 filelog=self._filelog, changeid=changeid)
1270
1270
1271 def rawdata(self):
1271 def rawdata(self):
1272 return self._filelog.revision(self._filenode, raw=True)
1272 return self._filelog.revision(self._filenode, raw=True)
1273
1273
1274 def rawflags(self):
1274 def rawflags(self):
1275 """low-level revlog flags"""
1275 """low-level revlog flags"""
1276 return self._filelog.flags(self._filerev)
1276 return self._filelog.flags(self._filerev)
1277
1277
1278 def data(self):
1278 def data(self):
1279 try:
1279 try:
1280 return self._filelog.read(self._filenode)
1280 return self._filelog.read(self._filenode)
1281 except error.CensoredNodeError:
1281 except error.CensoredNodeError:
1282 if self._repo.ui.config("censor", "policy") == "ignore":
1282 if self._repo.ui.config("censor", "policy") == "ignore":
1283 return ""
1283 return ""
1284 raise error.Abort(_("censored node: %s") % short(self._filenode),
1284 raise error.Abort(_("censored node: %s") % short(self._filenode),
1285 hint=_("set censor.policy to ignore errors"))
1285 hint=_("set censor.policy to ignore errors"))
1286
1286
1287 def size(self):
1287 def size(self):
1288 return self._filelog.size(self._filerev)
1288 return self._filelog.size(self._filerev)
1289
1289
1290 @propertycache
1290 @propertycache
1291 def _copied(self):
1291 def _copied(self):
1292 """check if file was actually renamed in this changeset revision
1292 """check if file was actually renamed in this changeset revision
1293
1293
1294 If rename logged in file revision, we report copy for changeset only
1294 If rename logged in file revision, we report copy for changeset only
1295 if file revisions linkrev points back to the changeset in question
1295 if file revisions linkrev points back to the changeset in question
1296 or both changeset parents contain different file revisions.
1296 or both changeset parents contain different file revisions.
1297 """
1297 """
1298
1298
1299 renamed = self._filelog.renamed(self._filenode)
1299 renamed = self._filelog.renamed(self._filenode)
1300 if not renamed:
1300 if not renamed:
1301 return renamed
1301 return renamed
1302
1302
1303 if self.rev() == self.linkrev():
1303 if self.rev() == self.linkrev():
1304 return renamed
1304 return renamed
1305
1305
1306 name = self.path()
1306 name = self.path()
1307 fnode = self._filenode
1307 fnode = self._filenode
1308 for p in self._changectx.parents():
1308 for p in self._changectx.parents():
1309 try:
1309 try:
1310 if fnode == p.filenode(name):
1310 if fnode == p.filenode(name):
1311 return None
1311 return None
1312 except error.LookupError:
1312 except error.LookupError:
1313 pass
1313 pass
1314 return renamed
1314 return renamed
1315
1315
1316 def children(self):
1316 def children(self):
1317 # hard for renames
1317 # hard for renames
1318 c = self._filelog.children(self._filenode)
1318 c = self._filelog.children(self._filenode)
1319 return [filectx(self._repo, self._path, fileid=x,
1319 return [filectx(self._repo, self._path, fileid=x,
1320 filelog=self._filelog) for x in c]
1320 filelog=self._filelog) for x in c]
1321
1321
1322 class committablectx(basectx):
1322 class committablectx(basectx):
1323 """A committablectx object provides common functionality for a context that
1323 """A committablectx object provides common functionality for a context that
1324 wants the ability to commit, e.g. workingctx or memctx."""
1324 wants the ability to commit, e.g. workingctx or memctx."""
1325 def __init__(self, repo, text="", user=None, date=None, extra=None,
1325 def __init__(self, repo, text="", user=None, date=None, extra=None,
1326 changes=None):
1326 changes=None):
1327 self._repo = repo
1327 self._repo = repo
1328 self._rev = None
1328 self._rev = None
1329 self._node = None
1329 self._node = None
1330 self._text = text
1330 self._text = text
1331 if date:
1331 if date:
1332 self._date = util.parsedate(date)
1332 self._date = util.parsedate(date)
1333 if user:
1333 if user:
1334 self._user = user
1334 self._user = user
1335 if changes:
1335 if changes:
1336 self._status = changes
1336 self._status = changes
1337
1337
1338 self._extra = {}
1338 self._extra = {}
1339 if extra:
1339 if extra:
1340 self._extra = extra.copy()
1340 self._extra = extra.copy()
1341 if 'branch' not in self._extra:
1341 if 'branch' not in self._extra:
1342 try:
1342 try:
1343 branch = encoding.fromlocal(self._repo.dirstate.branch())
1343 branch = encoding.fromlocal(self._repo.dirstate.branch())
1344 except UnicodeDecodeError:
1344 except UnicodeDecodeError:
1345 raise error.Abort(_('branch name not in UTF-8!'))
1345 raise error.Abort(_('branch name not in UTF-8!'))
1346 self._extra['branch'] = branch
1346 self._extra['branch'] = branch
1347 if self._extra['branch'] == '':
1347 if self._extra['branch'] == '':
1348 self._extra['branch'] = 'default'
1348 self._extra['branch'] = 'default'
1349
1349
1350 def __bytes__(self):
1350 def __bytes__(self):
1351 return bytes(self._parents[0]) + "+"
1351 return bytes(self._parents[0]) + "+"
1352
1352
1353 __str__ = encoding.strmethod(__bytes__)
1353 __str__ = encoding.strmethod(__bytes__)
1354
1354
1355 def __nonzero__(self):
1355 def __nonzero__(self):
1356 return True
1356 return True
1357
1357
1358 __bool__ = __nonzero__
1358 __bool__ = __nonzero__
1359
1359
1360 def _buildflagfunc(self):
1360 def _buildflagfunc(self):
1361 # Create a fallback function for getting file flags when the
1361 # Create a fallback function for getting file flags when the
1362 # filesystem doesn't support them
1362 # filesystem doesn't support them
1363
1363
1364 copiesget = self._repo.dirstate.copies().get
1364 copiesget = self._repo.dirstate.copies().get
1365 parents = self.parents()
1365 parents = self.parents()
1366 if len(parents) < 2:
1366 if len(parents) < 2:
1367 # when we have one parent, it's easy: copy from parent
1367 # when we have one parent, it's easy: copy from parent
1368 man = parents[0].manifest()
1368 man = parents[0].manifest()
1369 def func(f):
1369 def func(f):
1370 f = copiesget(f, f)
1370 f = copiesget(f, f)
1371 return man.flags(f)
1371 return man.flags(f)
1372 else:
1372 else:
1373 # merges are tricky: we try to reconstruct the unstored
1373 # merges are tricky: we try to reconstruct the unstored
1374 # result from the merge (issue1802)
1374 # result from the merge (issue1802)
1375 p1, p2 = parents
1375 p1, p2 = parents
1376 pa = p1.ancestor(p2)
1376 pa = p1.ancestor(p2)
1377 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1377 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1378
1378
1379 def func(f):
1379 def func(f):
1380 f = copiesget(f, f) # may be wrong for merges with copies
1380 f = copiesget(f, f) # may be wrong for merges with copies
1381 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1381 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1382 if fl1 == fl2:
1382 if fl1 == fl2:
1383 return fl1
1383 return fl1
1384 if fl1 == fla:
1384 if fl1 == fla:
1385 return fl2
1385 return fl2
1386 if fl2 == fla:
1386 if fl2 == fla:
1387 return fl1
1387 return fl1
1388 return '' # punt for conflicts
1388 return '' # punt for conflicts
1389
1389
1390 return func
1390 return func
1391
1391
1392 @propertycache
1392 @propertycache
1393 def _flagfunc(self):
1393 def _flagfunc(self):
1394 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1394 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1395
1395
1396 @propertycache
1396 @propertycache
1397 def _status(self):
1397 def _status(self):
1398 return self._repo.status()
1398 return self._repo.status()
1399
1399
1400 @propertycache
1400 @propertycache
1401 def _user(self):
1401 def _user(self):
1402 return self._repo.ui.username()
1402 return self._repo.ui.username()
1403
1403
1404 @propertycache
1404 @propertycache
1405 def _date(self):
1405 def _date(self):
1406 ui = self._repo.ui
1406 ui = self._repo.ui
1407 date = ui.configdate('devel', 'default-date')
1407 date = ui.configdate('devel', 'default-date')
1408 if date is None:
1408 if date is None:
1409 date = util.makedate()
1409 date = util.makedate()
1410 return date
1410 return date
1411
1411
1412 def subrev(self, subpath):
1412 def subrev(self, subpath):
1413 return None
1413 return None
1414
1414
1415 def manifestnode(self):
1415 def manifestnode(self):
1416 return None
1416 return None
1417 def user(self):
1417 def user(self):
1418 return self._user or self._repo.ui.username()
1418 return self._user or self._repo.ui.username()
1419 def date(self):
1419 def date(self):
1420 return self._date
1420 return self._date
1421 def description(self):
1421 def description(self):
1422 return self._text
1422 return self._text
1423 def files(self):
1423 def files(self):
1424 return sorted(self._status.modified + self._status.added +
1424 return sorted(self._status.modified + self._status.added +
1425 self._status.removed)
1425 self._status.removed)
1426
1426
1427 def modified(self):
1427 def modified(self):
1428 return self._status.modified
1428 return self._status.modified
1429 def added(self):
1429 def added(self):
1430 return self._status.added
1430 return self._status.added
1431 def removed(self):
1431 def removed(self):
1432 return self._status.removed
1432 return self._status.removed
1433 def deleted(self):
1433 def deleted(self):
1434 return self._status.deleted
1434 return self._status.deleted
1435 def branch(self):
1435 def branch(self):
1436 return encoding.tolocal(self._extra['branch'])
1436 return encoding.tolocal(self._extra['branch'])
1437 def closesbranch(self):
1437 def closesbranch(self):
1438 return 'close' in self._extra
1438 return 'close' in self._extra
1439 def extra(self):
1439 def extra(self):
1440 return self._extra
1440 return self._extra
1441
1441
1442 def tags(self):
1442 def tags(self):
1443 return []
1443 return []
1444
1444
1445 def bookmarks(self):
1445 def bookmarks(self):
1446 b = []
1446 b = []
1447 for p in self.parents():
1447 for p in self.parents():
1448 b.extend(p.bookmarks())
1448 b.extend(p.bookmarks())
1449 return b
1449 return b
1450
1450
1451 def phase(self):
1451 def phase(self):
1452 phase = phases.draft # default phase to draft
1452 phase = phases.draft # default phase to draft
1453 for p in self.parents():
1453 for p in self.parents():
1454 phase = max(phase, p.phase())
1454 phase = max(phase, p.phase())
1455 return phase
1455 return phase
1456
1456
1457 def hidden(self):
1457 def hidden(self):
1458 return False
1458 return False
1459
1459
1460 def children(self):
1460 def children(self):
1461 return []
1461 return []
1462
1462
1463 def flags(self, path):
1463 def flags(self, path):
1464 if r'_manifest' in self.__dict__:
1464 if r'_manifest' in self.__dict__:
1465 try:
1465 try:
1466 return self._manifest.flags(path)
1466 return self._manifest.flags(path)
1467 except KeyError:
1467 except KeyError:
1468 return ''
1468 return ''
1469
1469
1470 try:
1470 try:
1471 return self._flagfunc(path)
1471 return self._flagfunc(path)
1472 except OSError:
1472 except OSError:
1473 return ''
1473 return ''
1474
1474
1475 def ancestor(self, c2):
1475 def ancestor(self, c2):
1476 """return the "best" ancestor context of self and c2"""
1476 """return the "best" ancestor context of self and c2"""
1477 return self._parents[0].ancestor(c2) # punt on two parents for now
1477 return self._parents[0].ancestor(c2) # punt on two parents for now
1478
1478
1479 def walk(self, match):
1479 def walk(self, match):
1480 '''Generates matching file names.'''
1480 '''Generates matching file names.'''
1481 return sorted(self._repo.dirstate.walk(match,
1481 return sorted(self._repo.dirstate.walk(match,
1482 subrepos=sorted(self.substate),
1482 subrepos=sorted(self.substate),
1483 unknown=True, ignored=False))
1483 unknown=True, ignored=False))
1484
1484
1485 def matches(self, match):
1485 def matches(self, match):
1486 return sorted(self._repo.dirstate.matches(match))
1486 return sorted(self._repo.dirstate.matches(match))
1487
1487
1488 def ancestors(self):
1488 def ancestors(self):
1489 for p in self._parents:
1489 for p in self._parents:
1490 yield p
1490 yield p
1491 for a in self._repo.changelog.ancestors(
1491 for a in self._repo.changelog.ancestors(
1492 [p.rev() for p in self._parents]):
1492 [p.rev() for p in self._parents]):
1493 yield changectx(self._repo, a)
1493 yield changectx(self._repo, a)
1494
1494
1495 def markcommitted(self, node):
1495 def markcommitted(self, node):
1496 """Perform post-commit cleanup necessary after committing this ctx
1496 """Perform post-commit cleanup necessary after committing this ctx
1497
1497
1498 Specifically, this updates backing stores this working context
1498 Specifically, this updates backing stores this working context
1499 wraps to reflect the fact that the changes reflected by this
1499 wraps to reflect the fact that the changes reflected by this
1500 workingctx have been committed. For example, it marks
1500 workingctx have been committed. For example, it marks
1501 modified and added files as normal in the dirstate.
1501 modified and added files as normal in the dirstate.
1502
1502
1503 """
1503 """
1504
1504
1505 with self._repo.dirstate.parentchange():
1505 with self._repo.dirstate.parentchange():
1506 for f in self.modified() + self.added():
1506 for f in self.modified() + self.added():
1507 self._repo.dirstate.normal(f)
1507 self._repo.dirstate.normal(f)
1508 for f in self.removed():
1508 for f in self.removed():
1509 self._repo.dirstate.drop(f)
1509 self._repo.dirstate.drop(f)
1510 self._repo.dirstate.setparents(node)
1510 self._repo.dirstate.setparents(node)
1511
1511
1512 # write changes out explicitly, because nesting wlock at
1512 # write changes out explicitly, because nesting wlock at
1513 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1513 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1514 # from immediately doing so for subsequent changing files
1514 # from immediately doing so for subsequent changing files
1515 self._repo.dirstate.write(self._repo.currenttransaction())
1515 self._repo.dirstate.write(self._repo.currenttransaction())
1516
1516
1517 def dirty(self, missing=False, merge=True, branch=True):
1517 def dirty(self, missing=False, merge=True, branch=True):
1518 return False
1518 return False
1519
1519
1520 class workingctx(committablectx):
1520 class workingctx(committablectx):
1521 """A workingctx object makes access to data related to
1521 """A workingctx object makes access to data related to
1522 the current working directory convenient.
1522 the current working directory convenient.
1523 date - any valid date string or (unixtime, offset), or None.
1523 date - any valid date string or (unixtime, offset), or None.
1524 user - username string, or None.
1524 user - username string, or None.
1525 extra - a dictionary of extra values, or None.
1525 extra - a dictionary of extra values, or None.
1526 changes - a list of file lists as returned by localrepo.status()
1526 changes - a list of file lists as returned by localrepo.status()
1527 or None to use the repository status.
1527 or None to use the repository status.
1528 """
1528 """
1529 def __init__(self, repo, text="", user=None, date=None, extra=None,
1529 def __init__(self, repo, text="", user=None, date=None, extra=None,
1530 changes=None):
1530 changes=None):
1531 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1531 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1532
1532
1533 def __iter__(self):
1533 def __iter__(self):
1534 d = self._repo.dirstate
1534 d = self._repo.dirstate
1535 for f in d:
1535 for f in d:
1536 if d[f] != 'r':
1536 if d[f] != 'r':
1537 yield f
1537 yield f
1538
1538
1539 def __contains__(self, key):
1539 def __contains__(self, key):
1540 return self._repo.dirstate[key] not in "?r"
1540 return self._repo.dirstate[key] not in "?r"
1541
1541
1542 def hex(self):
1542 def hex(self):
1543 return hex(wdirid)
1543 return hex(wdirid)
1544
1544
1545 @propertycache
1545 @propertycache
1546 def _parents(self):
1546 def _parents(self):
1547 p = self._repo.dirstate.parents()
1547 p = self._repo.dirstate.parents()
1548 if p[1] == nullid:
1548 if p[1] == nullid:
1549 p = p[:-1]
1549 p = p[:-1]
1550 return [changectx(self._repo, x) for x in p]
1550 return [changectx(self._repo, x) for x in p]
1551
1551
1552 def filectx(self, path, filelog=None):
1552 def filectx(self, path, filelog=None):
1553 """get a file context from the working directory"""
1553 """get a file context from the working directory"""
1554 return workingfilectx(self._repo, path, workingctx=self,
1554 return workingfilectx(self._repo, path, workingctx=self,
1555 filelog=filelog)
1555 filelog=filelog)
1556
1556
1557 def dirty(self, missing=False, merge=True, branch=True):
1557 def dirty(self, missing=False, merge=True, branch=True):
1558 "check whether a working directory is modified"
1558 "check whether a working directory is modified"
1559 # check subrepos first
1559 # check subrepos first
1560 for s in sorted(self.substate):
1560 for s in sorted(self.substate):
1561 if self.sub(s).dirty(missing=missing):
1561 if self.sub(s).dirty(missing=missing):
1562 return True
1562 return True
1563 # check current working dir
1563 # check current working dir
1564 return ((merge and self.p2()) or
1564 return ((merge and self.p2()) or
1565 (branch and self.branch() != self.p1().branch()) or
1565 (branch and self.branch() != self.p1().branch()) or
1566 self.modified() or self.added() or self.removed() or
1566 self.modified() or self.added() or self.removed() or
1567 (missing and self.deleted()))
1567 (missing and self.deleted()))
1568
1568
1569 def add(self, list, prefix=""):
1569 def add(self, list, prefix=""):
1570 with self._repo.wlock():
1570 with self._repo.wlock():
1571 ui, ds = self._repo.ui, self._repo.dirstate
1571 ui, ds = self._repo.ui, self._repo.dirstate
1572 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1572 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1573 rejected = []
1573 rejected = []
1574 lstat = self._repo.wvfs.lstat
1574 lstat = self._repo.wvfs.lstat
1575 for f in list:
1575 for f in list:
1576 # ds.pathto() returns an absolute file when this is invoked from
1576 # ds.pathto() returns an absolute file when this is invoked from
1577 # the keyword extension. That gets flagged as non-portable on
1577 # the keyword extension. That gets flagged as non-portable on
1578 # Windows, since it contains the drive letter and colon.
1578 # Windows, since it contains the drive letter and colon.
1579 scmutil.checkportable(ui, os.path.join(prefix, f))
1579 scmutil.checkportable(ui, os.path.join(prefix, f))
1580 try:
1580 try:
1581 st = lstat(f)
1581 st = lstat(f)
1582 except OSError:
1582 except OSError:
1583 ui.warn(_("%s does not exist!\n") % uipath(f))
1583 ui.warn(_("%s does not exist!\n") % uipath(f))
1584 rejected.append(f)
1584 rejected.append(f)
1585 continue
1585 continue
1586 if st.st_size > 10000000:
1586 if st.st_size > 10000000:
1587 ui.warn(_("%s: up to %d MB of RAM may be required "
1587 ui.warn(_("%s: up to %d MB of RAM may be required "
1588 "to manage this file\n"
1588 "to manage this file\n"
1589 "(use 'hg revert %s' to cancel the "
1589 "(use 'hg revert %s' to cancel the "
1590 "pending addition)\n")
1590 "pending addition)\n")
1591 % (f, 3 * st.st_size // 1000000, uipath(f)))
1591 % (f, 3 * st.st_size // 1000000, uipath(f)))
1592 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1592 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1593 ui.warn(_("%s not added: only files and symlinks "
1593 ui.warn(_("%s not added: only files and symlinks "
1594 "supported currently\n") % uipath(f))
1594 "supported currently\n") % uipath(f))
1595 rejected.append(f)
1595 rejected.append(f)
1596 elif ds[f] in 'amn':
1596 elif ds[f] in 'amn':
1597 ui.warn(_("%s already tracked!\n") % uipath(f))
1597 ui.warn(_("%s already tracked!\n") % uipath(f))
1598 elif ds[f] == 'r':
1598 elif ds[f] == 'r':
1599 ds.normallookup(f)
1599 ds.normallookup(f)
1600 else:
1600 else:
1601 ds.add(f)
1601 ds.add(f)
1602 return rejected
1602 return rejected
1603
1603
1604 def forget(self, files, prefix=""):
1604 def forget(self, files, prefix=""):
1605 with self._repo.wlock():
1605 with self._repo.wlock():
1606 ds = self._repo.dirstate
1606 ds = self._repo.dirstate
1607 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1607 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1608 rejected = []
1608 rejected = []
1609 for f in files:
1609 for f in files:
1610 if f not in self._repo.dirstate:
1610 if f not in self._repo.dirstate:
1611 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1611 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1612 rejected.append(f)
1612 rejected.append(f)
1613 elif self._repo.dirstate[f] != 'a':
1613 elif self._repo.dirstate[f] != 'a':
1614 self._repo.dirstate.remove(f)
1614 self._repo.dirstate.remove(f)
1615 else:
1615 else:
1616 self._repo.dirstate.drop(f)
1616 self._repo.dirstate.drop(f)
1617 return rejected
1617 return rejected
1618
1618
1619 def undelete(self, list):
1619 def undelete(self, list):
1620 pctxs = self.parents()
1620 pctxs = self.parents()
1621 with self._repo.wlock():
1621 with self._repo.wlock():
1622 ds = self._repo.dirstate
1622 ds = self._repo.dirstate
1623 for f in list:
1623 for f in list:
1624 if self._repo.dirstate[f] != 'r':
1624 if self._repo.dirstate[f] != 'r':
1625 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1625 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1626 else:
1626 else:
1627 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1627 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1628 t = fctx.data()
1628 t = fctx.data()
1629 self._repo.wwrite(f, t, fctx.flags())
1629 self._repo.wwrite(f, t, fctx.flags())
1630 self._repo.dirstate.normal(f)
1630 self._repo.dirstate.normal(f)
1631
1631
1632 def copy(self, source, dest):
1632 def copy(self, source, dest):
1633 try:
1633 try:
1634 st = self._repo.wvfs.lstat(dest)
1634 st = self._repo.wvfs.lstat(dest)
1635 except OSError as err:
1635 except OSError as err:
1636 if err.errno != errno.ENOENT:
1636 if err.errno != errno.ENOENT:
1637 raise
1637 raise
1638 self._repo.ui.warn(_("%s does not exist!\n")
1638 self._repo.ui.warn(_("%s does not exist!\n")
1639 % self._repo.dirstate.pathto(dest))
1639 % self._repo.dirstate.pathto(dest))
1640 return
1640 return
1641 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1641 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1642 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1642 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1643 "symbolic link\n")
1643 "symbolic link\n")
1644 % self._repo.dirstate.pathto(dest))
1644 % self._repo.dirstate.pathto(dest))
1645 else:
1645 else:
1646 with self._repo.wlock():
1646 with self._repo.wlock():
1647 if self._repo.dirstate[dest] in '?':
1647 if self._repo.dirstate[dest] in '?':
1648 self._repo.dirstate.add(dest)
1648 self._repo.dirstate.add(dest)
1649 elif self._repo.dirstate[dest] in 'r':
1649 elif self._repo.dirstate[dest] in 'r':
1650 self._repo.dirstate.normallookup(dest)
1650 self._repo.dirstate.normallookup(dest)
1651 self._repo.dirstate.copy(source, dest)
1651 self._repo.dirstate.copy(source, dest)
1652
1652
1653 def match(self, pats=None, include=None, exclude=None, default='glob',
1653 def match(self, pats=None, include=None, exclude=None, default='glob',
1654 listsubrepos=False, badfn=None):
1654 listsubrepos=False, badfn=None):
1655 r = self._repo
1655 r = self._repo
1656
1656
1657 # Only a case insensitive filesystem needs magic to translate user input
1657 # Only a case insensitive filesystem needs magic to translate user input
1658 # to actual case in the filesystem.
1658 # to actual case in the filesystem.
1659 icasefs = not util.fscasesensitive(r.root)
1659 icasefs = not util.fscasesensitive(r.root)
1660 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1660 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1661 default, auditor=r.auditor, ctx=self,
1661 default, auditor=r.auditor, ctx=self,
1662 listsubrepos=listsubrepos, badfn=badfn,
1662 listsubrepos=listsubrepos, badfn=badfn,
1663 icasefs=icasefs)
1663 icasefs=icasefs)
1664
1664
1665 def flushall(self):
1665 def flushall(self):
1666 pass # For overlayworkingfilectx compatibility.
1666 pass # For overlayworkingfilectx compatibility.
1667
1667
1668 def _filtersuspectsymlink(self, files):
1668 def _filtersuspectsymlink(self, files):
1669 if not files or self._repo.dirstate._checklink:
1669 if not files or self._repo.dirstate._checklink:
1670 return files
1670 return files
1671
1671
1672 # Symlink placeholders may get non-symlink-like contents
1672 # Symlink placeholders may get non-symlink-like contents
1673 # via user error or dereferencing by NFS or Samba servers,
1673 # via user error or dereferencing by NFS or Samba servers,
1674 # so we filter out any placeholders that don't look like a
1674 # so we filter out any placeholders that don't look like a
1675 # symlink
1675 # symlink
1676 sane = []
1676 sane = []
1677 for f in files:
1677 for f in files:
1678 if self.flags(f) == 'l':
1678 if self.flags(f) == 'l':
1679 d = self[f].data()
1679 d = self[f].data()
1680 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1680 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1681 self._repo.ui.debug('ignoring suspect symlink placeholder'
1681 self._repo.ui.debug('ignoring suspect symlink placeholder'
1682 ' "%s"\n' % f)
1682 ' "%s"\n' % f)
1683 continue
1683 continue
1684 sane.append(f)
1684 sane.append(f)
1685 return sane
1685 return sane
1686
1686
1687 def _checklookup(self, files):
1687 def _checklookup(self, files):
1688 # check for any possibly clean files
1688 # check for any possibly clean files
1689 if not files:
1689 if not files:
1690 return [], [], []
1690 return [], [], []
1691
1691
1692 modified = []
1692 modified = []
1693 deleted = []
1693 deleted = []
1694 fixup = []
1694 fixup = []
1695 pctx = self._parents[0]
1695 pctx = self._parents[0]
1696 # do a full compare of any files that might have changed
1696 # do a full compare of any files that might have changed
1697 for f in sorted(files):
1697 for f in sorted(files):
1698 try:
1698 try:
1699 # This will return True for a file that got replaced by a
1699 # This will return True for a file that got replaced by a
1700 # directory in the interim, but fixing that is pretty hard.
1700 # directory in the interim, but fixing that is pretty hard.
1701 if (f not in pctx or self.flags(f) != pctx.flags(f)
1701 if (f not in pctx or self.flags(f) != pctx.flags(f)
1702 or pctx[f].cmp(self[f])):
1702 or pctx[f].cmp(self[f])):
1703 modified.append(f)
1703 modified.append(f)
1704 else:
1704 else:
1705 fixup.append(f)
1705 fixup.append(f)
1706 except (IOError, OSError):
1706 except (IOError, OSError):
1707 # A file become inaccessible in between? Mark it as deleted,
1707 # A file become inaccessible in between? Mark it as deleted,
1708 # matching dirstate behavior (issue5584).
1708 # matching dirstate behavior (issue5584).
1709 # The dirstate has more complex behavior around whether a
1709 # The dirstate has more complex behavior around whether a
1710 # missing file matches a directory, etc, but we don't need to
1710 # missing file matches a directory, etc, but we don't need to
1711 # bother with that: if f has made it to this point, we're sure
1711 # bother with that: if f has made it to this point, we're sure
1712 # it's in the dirstate.
1712 # it's in the dirstate.
1713 deleted.append(f)
1713 deleted.append(f)
1714
1714
1715 return modified, deleted, fixup
1715 return modified, deleted, fixup
1716
1716
1717 def _poststatusfixup(self, status, fixup):
1717 def _poststatusfixup(self, status, fixup):
1718 """update dirstate for files that are actually clean"""
1718 """update dirstate for files that are actually clean"""
1719 poststatus = self._repo.postdsstatus()
1719 poststatus = self._repo.postdsstatus()
1720 if fixup or poststatus:
1720 if fixup or poststatus:
1721 try:
1721 try:
1722 oldid = self._repo.dirstate.identity()
1722 oldid = self._repo.dirstate.identity()
1723
1723
1724 # updating the dirstate is optional
1724 # updating the dirstate is optional
1725 # so we don't wait on the lock
1725 # so we don't wait on the lock
1726 # wlock can invalidate the dirstate, so cache normal _after_
1726 # wlock can invalidate the dirstate, so cache normal _after_
1727 # taking the lock
1727 # taking the lock
1728 with self._repo.wlock(False):
1728 with self._repo.wlock(False):
1729 if self._repo.dirstate.identity() == oldid:
1729 if self._repo.dirstate.identity() == oldid:
1730 if fixup:
1730 if fixup:
1731 normal = self._repo.dirstate.normal
1731 normal = self._repo.dirstate.normal
1732 for f in fixup:
1732 for f in fixup:
1733 normal(f)
1733 normal(f)
1734 # write changes out explicitly, because nesting
1734 # write changes out explicitly, because nesting
1735 # wlock at runtime may prevent 'wlock.release()'
1735 # wlock at runtime may prevent 'wlock.release()'
1736 # after this block from doing so for subsequent
1736 # after this block from doing so for subsequent
1737 # changing files
1737 # changing files
1738 tr = self._repo.currenttransaction()
1738 tr = self._repo.currenttransaction()
1739 self._repo.dirstate.write(tr)
1739 self._repo.dirstate.write(tr)
1740
1740
1741 if poststatus:
1741 if poststatus:
1742 for ps in poststatus:
1742 for ps in poststatus:
1743 ps(self, status)
1743 ps(self, status)
1744 else:
1744 else:
1745 # in this case, writing changes out breaks
1745 # in this case, writing changes out breaks
1746 # consistency, because .hg/dirstate was
1746 # consistency, because .hg/dirstate was
1747 # already changed simultaneously after last
1747 # already changed simultaneously after last
1748 # caching (see also issue5584 for detail)
1748 # caching (see also issue5584 for detail)
1749 self._repo.ui.debug('skip updating dirstate: '
1749 self._repo.ui.debug('skip updating dirstate: '
1750 'identity mismatch\n')
1750 'identity mismatch\n')
1751 except error.LockError:
1751 except error.LockError:
1752 pass
1752 pass
1753 finally:
1753 finally:
1754 # Even if the wlock couldn't be grabbed, clear out the list.
1754 # Even if the wlock couldn't be grabbed, clear out the list.
1755 self._repo.clearpostdsstatus()
1755 self._repo.clearpostdsstatus()
1756
1756
1757 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1757 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1758 '''Gets the status from the dirstate -- internal use only.'''
1758 '''Gets the status from the dirstate -- internal use only.'''
1759 listignored, listclean, listunknown = ignored, clean, unknown
1760 subrepos = []
1759 subrepos = []
1761 if '.hgsub' in self:
1760 if '.hgsub' in self:
1762 subrepos = sorted(self.substate)
1761 subrepos = sorted(self.substate)
1763 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1762 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1764 listclean, listunknown)
1763 clean=clean, unknown=unknown)
1765
1764
1766 # check for any possibly clean files
1765 # check for any possibly clean files
1767 fixup = []
1766 fixup = []
1768 if cmp:
1767 if cmp:
1769 modified2, deleted2, fixup = self._checklookup(cmp)
1768 modified2, deleted2, fixup = self._checklookup(cmp)
1770 s.modified.extend(modified2)
1769 s.modified.extend(modified2)
1771 s.deleted.extend(deleted2)
1770 s.deleted.extend(deleted2)
1772
1771
1773 if fixup and listclean:
1772 if fixup and clean:
1774 s.clean.extend(fixup)
1773 s.clean.extend(fixup)
1775
1774
1776 self._poststatusfixup(s, fixup)
1775 self._poststatusfixup(s, fixup)
1777
1776
1778 if match.always():
1777 if match.always():
1779 # cache for performance
1778 # cache for performance
1780 if s.unknown or s.ignored or s.clean:
1779 if s.unknown or s.ignored or s.clean:
1781 # "_status" is cached with list*=False in the normal route
1780 # "_status" is cached with list*=False in the normal route
1782 self._status = scmutil.status(s.modified, s.added, s.removed,
1781 self._status = scmutil.status(s.modified, s.added, s.removed,
1783 s.deleted, [], [], [])
1782 s.deleted, [], [], [])
1784 else:
1783 else:
1785 self._status = s
1784 self._status = s
1786
1785
1787 return s
1786 return s
1788
1787
1789 @propertycache
1788 @propertycache
1790 def _manifest(self):
1789 def _manifest(self):
1791 """generate a manifest corresponding to the values in self._status
1790 """generate a manifest corresponding to the values in self._status
1792
1791
1793 This reuse the file nodeid from parent, but we use special node
1792 This reuse the file nodeid from parent, but we use special node
1794 identifiers for added and modified files. This is used by manifests
1793 identifiers for added and modified files. This is used by manifests
1795 merge to see that files are different and by update logic to avoid
1794 merge to see that files are different and by update logic to avoid
1796 deleting newly added files.
1795 deleting newly added files.
1797 """
1796 """
1798 return self._buildstatusmanifest(self._status)
1797 return self._buildstatusmanifest(self._status)
1799
1798
1800 def _buildstatusmanifest(self, status):
1799 def _buildstatusmanifest(self, status):
1801 """Builds a manifest that includes the given status results."""
1800 """Builds a manifest that includes the given status results."""
1802 parents = self.parents()
1801 parents = self.parents()
1803
1802
1804 man = parents[0].manifest().copy()
1803 man = parents[0].manifest().copy()
1805
1804
1806 ff = self._flagfunc
1805 ff = self._flagfunc
1807 for i, l in ((addednodeid, status.added),
1806 for i, l in ((addednodeid, status.added),
1808 (modifiednodeid, status.modified)):
1807 (modifiednodeid, status.modified)):
1809 for f in l:
1808 for f in l:
1810 man[f] = i
1809 man[f] = i
1811 try:
1810 try:
1812 man.setflag(f, ff(f))
1811 man.setflag(f, ff(f))
1813 except OSError:
1812 except OSError:
1814 pass
1813 pass
1815
1814
1816 for f in status.deleted + status.removed:
1815 for f in status.deleted + status.removed:
1817 if f in man:
1816 if f in man:
1818 del man[f]
1817 del man[f]
1819
1818
1820 return man
1819 return man
1821
1820
1822 def _buildstatus(self, other, s, match, listignored, listclean,
1821 def _buildstatus(self, other, s, match, listignored, listclean,
1823 listunknown):
1822 listunknown):
1824 """build a status with respect to another context
1823 """build a status with respect to another context
1825
1824
1826 This includes logic for maintaining the fast path of status when
1825 This includes logic for maintaining the fast path of status when
1827 comparing the working directory against its parent, which is to skip
1826 comparing the working directory against its parent, which is to skip
1828 building a new manifest if self (working directory) is not comparing
1827 building a new manifest if self (working directory) is not comparing
1829 against its parent (repo['.']).
1828 against its parent (repo['.']).
1830 """
1829 """
1831 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1830 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1832 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1831 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1833 # might have accidentally ended up with the entire contents of the file
1832 # might have accidentally ended up with the entire contents of the file
1834 # they are supposed to be linking to.
1833 # they are supposed to be linking to.
1835 s.modified[:] = self._filtersuspectsymlink(s.modified)
1834 s.modified[:] = self._filtersuspectsymlink(s.modified)
1836 if other != self._repo['.']:
1835 if other != self._repo['.']:
1837 s = super(workingctx, self)._buildstatus(other, s, match,
1836 s = super(workingctx, self)._buildstatus(other, s, match,
1838 listignored, listclean,
1837 listignored, listclean,
1839 listunknown)
1838 listunknown)
1840 return s
1839 return s
1841
1840
1842 def _matchstatus(self, other, match):
1841 def _matchstatus(self, other, match):
1843 """override the match method with a filter for directory patterns
1842 """override the match method with a filter for directory patterns
1844
1843
1845 We use inheritance to customize the match.bad method only in cases of
1844 We use inheritance to customize the match.bad method only in cases of
1846 workingctx since it belongs only to the working directory when
1845 workingctx since it belongs only to the working directory when
1847 comparing against the parent changeset.
1846 comparing against the parent changeset.
1848
1847
1849 If we aren't comparing against the working directory's parent, then we
1848 If we aren't comparing against the working directory's parent, then we
1850 just use the default match object sent to us.
1849 just use the default match object sent to us.
1851 """
1850 """
1852 if other != self._repo['.']:
1851 if other != self._repo['.']:
1853 def bad(f, msg):
1852 def bad(f, msg):
1854 # 'f' may be a directory pattern from 'match.files()',
1853 # 'f' may be a directory pattern from 'match.files()',
1855 # so 'f not in ctx1' is not enough
1854 # so 'f not in ctx1' is not enough
1856 if f not in other and not other.hasdir(f):
1855 if f not in other and not other.hasdir(f):
1857 self._repo.ui.warn('%s: %s\n' %
1856 self._repo.ui.warn('%s: %s\n' %
1858 (self._repo.dirstate.pathto(f), msg))
1857 (self._repo.dirstate.pathto(f), msg))
1859 match.bad = bad
1858 match.bad = bad
1860 return match
1859 return match
1861
1860
1862 def markcommitted(self, node):
1861 def markcommitted(self, node):
1863 super(workingctx, self).markcommitted(node)
1862 super(workingctx, self).markcommitted(node)
1864
1863
1865 sparse.aftercommit(self._repo, node)
1864 sparse.aftercommit(self._repo, node)
1866
1865
1867 class committablefilectx(basefilectx):
1866 class committablefilectx(basefilectx):
1868 """A committablefilectx provides common functionality for a file context
1867 """A committablefilectx provides common functionality for a file context
1869 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1868 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1870 def __init__(self, repo, path, filelog=None, ctx=None):
1869 def __init__(self, repo, path, filelog=None, ctx=None):
1871 self._repo = repo
1870 self._repo = repo
1872 self._path = path
1871 self._path = path
1873 self._changeid = None
1872 self._changeid = None
1874 self._filerev = self._filenode = None
1873 self._filerev = self._filenode = None
1875
1874
1876 if filelog is not None:
1875 if filelog is not None:
1877 self._filelog = filelog
1876 self._filelog = filelog
1878 if ctx:
1877 if ctx:
1879 self._changectx = ctx
1878 self._changectx = ctx
1880
1879
1881 def __nonzero__(self):
1880 def __nonzero__(self):
1882 return True
1881 return True
1883
1882
1884 __bool__ = __nonzero__
1883 __bool__ = __nonzero__
1885
1884
1886 def linkrev(self):
1885 def linkrev(self):
1887 # linked to self._changectx no matter if file is modified or not
1886 # linked to self._changectx no matter if file is modified or not
1888 return self.rev()
1887 return self.rev()
1889
1888
1890 def parents(self):
1889 def parents(self):
1891 '''return parent filectxs, following copies if necessary'''
1890 '''return parent filectxs, following copies if necessary'''
1892 def filenode(ctx, path):
1891 def filenode(ctx, path):
1893 return ctx._manifest.get(path, nullid)
1892 return ctx._manifest.get(path, nullid)
1894
1893
1895 path = self._path
1894 path = self._path
1896 fl = self._filelog
1895 fl = self._filelog
1897 pcl = self._changectx._parents
1896 pcl = self._changectx._parents
1898 renamed = self.renamed()
1897 renamed = self.renamed()
1899
1898
1900 if renamed:
1899 if renamed:
1901 pl = [renamed + (None,)]
1900 pl = [renamed + (None,)]
1902 else:
1901 else:
1903 pl = [(path, filenode(pcl[0], path), fl)]
1902 pl = [(path, filenode(pcl[0], path), fl)]
1904
1903
1905 for pc in pcl[1:]:
1904 for pc in pcl[1:]:
1906 pl.append((path, filenode(pc, path), fl))
1905 pl.append((path, filenode(pc, path), fl))
1907
1906
1908 return [self._parentfilectx(p, fileid=n, filelog=l)
1907 return [self._parentfilectx(p, fileid=n, filelog=l)
1909 for p, n, l in pl if n != nullid]
1908 for p, n, l in pl if n != nullid]
1910
1909
1911 def children(self):
1910 def children(self):
1912 return []
1911 return []
1913
1912
1914 class workingfilectx(committablefilectx):
1913 class workingfilectx(committablefilectx):
1915 """A workingfilectx object makes access to data related to a particular
1914 """A workingfilectx object makes access to data related to a particular
1916 file in the working directory convenient."""
1915 file in the working directory convenient."""
1917 def __init__(self, repo, path, filelog=None, workingctx=None):
1916 def __init__(self, repo, path, filelog=None, workingctx=None):
1918 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1917 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1919
1918
1920 @propertycache
1919 @propertycache
1921 def _changectx(self):
1920 def _changectx(self):
1922 return workingctx(self._repo)
1921 return workingctx(self._repo)
1923
1922
1924 def data(self):
1923 def data(self):
1925 return self._repo.wread(self._path)
1924 return self._repo.wread(self._path)
1926 def renamed(self):
1925 def renamed(self):
1927 rp = self._repo.dirstate.copied(self._path)
1926 rp = self._repo.dirstate.copied(self._path)
1928 if not rp:
1927 if not rp:
1929 return None
1928 return None
1930 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1929 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1931
1930
1932 def size(self):
1931 def size(self):
1933 return self._repo.wvfs.lstat(self._path).st_size
1932 return self._repo.wvfs.lstat(self._path).st_size
1934 def date(self):
1933 def date(self):
1935 t, tz = self._changectx.date()
1934 t, tz = self._changectx.date()
1936 try:
1935 try:
1937 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1936 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1938 except OSError as err:
1937 except OSError as err:
1939 if err.errno != errno.ENOENT:
1938 if err.errno != errno.ENOENT:
1940 raise
1939 raise
1941 return (t, tz)
1940 return (t, tz)
1942
1941
1943 def exists(self):
1942 def exists(self):
1944 return self._repo.wvfs.exists(self._path)
1943 return self._repo.wvfs.exists(self._path)
1945
1944
1946 def lexists(self):
1945 def lexists(self):
1947 return self._repo.wvfs.lexists(self._path)
1946 return self._repo.wvfs.lexists(self._path)
1948
1947
1949 def audit(self):
1948 def audit(self):
1950 return self._repo.wvfs.audit(self._path)
1949 return self._repo.wvfs.audit(self._path)
1951
1950
1952 def cmp(self, fctx):
1951 def cmp(self, fctx):
1953 """compare with other file context
1952 """compare with other file context
1954
1953
1955 returns True if different than fctx.
1954 returns True if different than fctx.
1956 """
1955 """
1957 # fctx should be a filectx (not a workingfilectx)
1956 # fctx should be a filectx (not a workingfilectx)
1958 # invert comparison to reuse the same code path
1957 # invert comparison to reuse the same code path
1959 return fctx.cmp(self)
1958 return fctx.cmp(self)
1960
1959
1961 def remove(self, ignoremissing=False):
1960 def remove(self, ignoremissing=False):
1962 """wraps unlink for a repo's working directory"""
1961 """wraps unlink for a repo's working directory"""
1963 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1962 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1964
1963
1965 def write(self, data, flags, backgroundclose=False):
1964 def write(self, data, flags, backgroundclose=False):
1966 """wraps repo.wwrite"""
1965 """wraps repo.wwrite"""
1967 self._repo.wwrite(self._path, data, flags,
1966 self._repo.wwrite(self._path, data, flags,
1968 backgroundclose=backgroundclose)
1967 backgroundclose=backgroundclose)
1969
1968
1970 def clearunknown(self):
1969 def clearunknown(self):
1971 """Removes conflicting items in the working directory so that
1970 """Removes conflicting items in the working directory so that
1972 ``write()`` can be called successfully.
1971 ``write()`` can be called successfully.
1973 """
1972 """
1974 wvfs = self._repo.wvfs
1973 wvfs = self._repo.wvfs
1975 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1974 if wvfs.isdir(self._path) and not wvfs.islink(self._path):
1976 wvfs.removedirs(self._path)
1975 wvfs.removedirs(self._path)
1977
1976
1978 def setflags(self, l, x):
1977 def setflags(self, l, x):
1979 self._repo.wvfs.setflags(self._path, l, x)
1978 self._repo.wvfs.setflags(self._path, l, x)
1980
1979
1981 class overlayworkingctx(workingctx):
1980 class overlayworkingctx(workingctx):
1982 """Wraps another mutable context with a write-back cache that can be flushed
1981 """Wraps another mutable context with a write-back cache that can be flushed
1983 at a later time.
1982 at a later time.
1984
1983
1985 self._cache[path] maps to a dict with keys: {
1984 self._cache[path] maps to a dict with keys: {
1986 'exists': bool?
1985 'exists': bool?
1987 'date': date?
1986 'date': date?
1988 'data': str?
1987 'data': str?
1989 'flags': str?
1988 'flags': str?
1990 }
1989 }
1991 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1990 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1992 is `False`, the file was deleted.
1991 is `False`, the file was deleted.
1993 """
1992 """
1994
1993
1995 def __init__(self, repo, wrappedctx):
1994 def __init__(self, repo, wrappedctx):
1996 super(overlayworkingctx, self).__init__(repo)
1995 super(overlayworkingctx, self).__init__(repo)
1997 self._repo = repo
1996 self._repo = repo
1998 self._wrappedctx = wrappedctx
1997 self._wrappedctx = wrappedctx
1999 self._clean()
1998 self._clean()
2000
1999
2001 def data(self, path):
2000 def data(self, path):
2002 if self.isdirty(path):
2001 if self.isdirty(path):
2003 if self._cache[path]['exists']:
2002 if self._cache[path]['exists']:
2004 if self._cache[path]['data']:
2003 if self._cache[path]['data']:
2005 return self._cache[path]['data']
2004 return self._cache[path]['data']
2006 else:
2005 else:
2007 # Must fallback here, too, because we only set flags.
2006 # Must fallback here, too, because we only set flags.
2008 return self._wrappedctx[path].data()
2007 return self._wrappedctx[path].data()
2009 else:
2008 else:
2010 raise error.ProgrammingError("No such file or directory: %s" %
2009 raise error.ProgrammingError("No such file or directory: %s" %
2011 self._path)
2010 self._path)
2012 else:
2011 else:
2013 return self._wrappedctx[path].data()
2012 return self._wrappedctx[path].data()
2014
2013
2015 def filedate(self, path):
2014 def filedate(self, path):
2016 if self.isdirty(path):
2015 if self.isdirty(path):
2017 return self._cache[path]['date']
2016 return self._cache[path]['date']
2018 else:
2017 else:
2019 return self._wrappedctx[path].date()
2018 return self._wrappedctx[path].date()
2020
2019
2021 def flags(self, path):
2020 def flags(self, path):
2022 if self.isdirty(path):
2021 if self.isdirty(path):
2023 if self._cache[path]['exists']:
2022 if self._cache[path]['exists']:
2024 return self._cache[path]['flags']
2023 return self._cache[path]['flags']
2025 else:
2024 else:
2026 raise error.ProgrammingError("No such file or directory: %s" %
2025 raise error.ProgrammingError("No such file or directory: %s" %
2027 self._path)
2026 self._path)
2028 else:
2027 else:
2029 return self._wrappedctx[path].flags()
2028 return self._wrappedctx[path].flags()
2030
2029
2031 def write(self, path, data, flags=''):
2030 def write(self, path, data, flags=''):
2032 if data is None:
2031 if data is None:
2033 raise error.ProgrammingError("data must be non-None")
2032 raise error.ProgrammingError("data must be non-None")
2034 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2033 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2035 flags=flags)
2034 flags=flags)
2036
2035
2037 def setflags(self, path, l, x):
2036 def setflags(self, path, l, x):
2038 self._markdirty(path, exists=True, date=util.makedate(),
2037 self._markdirty(path, exists=True, date=util.makedate(),
2039 flags=(l and 'l' or '') + (x and 'x' or ''))
2038 flags=(l and 'l' or '') + (x and 'x' or ''))
2040
2039
2041 def remove(self, path):
2040 def remove(self, path):
2042 self._markdirty(path, exists=False)
2041 self._markdirty(path, exists=False)
2043
2042
2044 def exists(self, path):
2043 def exists(self, path):
2045 """exists behaves like `lexists`, but needs to follow symlinks and
2044 """exists behaves like `lexists`, but needs to follow symlinks and
2046 return False if they are broken.
2045 return False if they are broken.
2047 """
2046 """
2048 if self.isdirty(path):
2047 if self.isdirty(path):
2049 # If this path exists and is a symlink, "follow" it by calling
2048 # If this path exists and is a symlink, "follow" it by calling
2050 # exists on the destination path.
2049 # exists on the destination path.
2051 if (self._cache[path]['exists'] and
2050 if (self._cache[path]['exists'] and
2052 'l' in self._cache[path]['flags']):
2051 'l' in self._cache[path]['flags']):
2053 return self.exists(self._cache[path]['data'].strip())
2052 return self.exists(self._cache[path]['data'].strip())
2054 else:
2053 else:
2055 return self._cache[path]['exists']
2054 return self._cache[path]['exists']
2056 return self._wrappedctx[path].exists()
2055 return self._wrappedctx[path].exists()
2057
2056
2058 def lexists(self, path):
2057 def lexists(self, path):
2059 """lexists returns True if the path exists"""
2058 """lexists returns True if the path exists"""
2060 if self.isdirty(path):
2059 if self.isdirty(path):
2061 return self._cache[path]['exists']
2060 return self._cache[path]['exists']
2062 return self._wrappedctx[path].lexists()
2061 return self._wrappedctx[path].lexists()
2063
2062
2064 def size(self, path):
2063 def size(self, path):
2065 if self.isdirty(path):
2064 if self.isdirty(path):
2066 if self._cache[path]['exists']:
2065 if self._cache[path]['exists']:
2067 return len(self._cache[path]['data'])
2066 return len(self._cache[path]['data'])
2068 else:
2067 else:
2069 raise error.ProgrammingError("No such file or directory: %s" %
2068 raise error.ProgrammingError("No such file or directory: %s" %
2070 self._path)
2069 self._path)
2071 return self._wrappedctx[path].size()
2070 return self._wrappedctx[path].size()
2072
2071
2073 def flushall(self):
2072 def flushall(self):
2074 for path in self._writeorder:
2073 for path in self._writeorder:
2075 entry = self._cache[path]
2074 entry = self._cache[path]
2076 if entry['exists']:
2075 if entry['exists']:
2077 self._wrappedctx[path].clearunknown()
2076 self._wrappedctx[path].clearunknown()
2078 if entry['data'] is not None:
2077 if entry['data'] is not None:
2079 if entry['flags'] is None:
2078 if entry['flags'] is None:
2080 raise error.ProgrammingError('data set but not flags')
2079 raise error.ProgrammingError('data set but not flags')
2081 self._wrappedctx[path].write(
2080 self._wrappedctx[path].write(
2082 entry['data'],
2081 entry['data'],
2083 entry['flags'])
2082 entry['flags'])
2084 else:
2083 else:
2085 self._wrappedctx[path].setflags(
2084 self._wrappedctx[path].setflags(
2086 'l' in entry['flags'],
2085 'l' in entry['flags'],
2087 'x' in entry['flags'])
2086 'x' in entry['flags'])
2088 else:
2087 else:
2089 self._wrappedctx[path].remove(path)
2088 self._wrappedctx[path].remove(path)
2090 self._clean()
2089 self._clean()
2091
2090
2092 def isdirty(self, path):
2091 def isdirty(self, path):
2093 return path in self._cache
2092 return path in self._cache
2094
2093
2095 def _clean(self):
2094 def _clean(self):
2096 self._cache = {}
2095 self._cache = {}
2097 self._writeorder = []
2096 self._writeorder = []
2098
2097
2099 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2098 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2100 if path not in self._cache:
2099 if path not in self._cache:
2101 self._writeorder.append(path)
2100 self._writeorder.append(path)
2102
2101
2103 self._cache[path] = {
2102 self._cache[path] = {
2104 'exists': exists,
2103 'exists': exists,
2105 'data': data,
2104 'data': data,
2106 'date': date,
2105 'date': date,
2107 'flags': flags,
2106 'flags': flags,
2108 }
2107 }
2109
2108
2110 def filectx(self, path, filelog=None):
2109 def filectx(self, path, filelog=None):
2111 return overlayworkingfilectx(self._repo, path, parent=self,
2110 return overlayworkingfilectx(self._repo, path, parent=self,
2112 filelog=filelog)
2111 filelog=filelog)
2113
2112
2114 class overlayworkingfilectx(workingfilectx):
2113 class overlayworkingfilectx(workingfilectx):
2115 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2114 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2116 cache, which can be flushed through later by calling ``flush()``."""
2115 cache, which can be flushed through later by calling ``flush()``."""
2117
2116
2118 def __init__(self, repo, path, filelog=None, parent=None):
2117 def __init__(self, repo, path, filelog=None, parent=None):
2119 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2118 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2120 parent)
2119 parent)
2121 self._repo = repo
2120 self._repo = repo
2122 self._parent = parent
2121 self._parent = parent
2123 self._path = path
2122 self._path = path
2124
2123
2125 def ctx(self):
2124 def ctx(self):
2126 return self._parent
2125 return self._parent
2127
2126
2128 def data(self):
2127 def data(self):
2129 return self._parent.data(self._path)
2128 return self._parent.data(self._path)
2130
2129
2131 def date(self):
2130 def date(self):
2132 return self._parent.filedate(self._path)
2131 return self._parent.filedate(self._path)
2133
2132
2134 def exists(self):
2133 def exists(self):
2135 return self.lexists()
2134 return self.lexists()
2136
2135
2137 def lexists(self):
2136 def lexists(self):
2138 return self._parent.exists(self._path)
2137 return self._parent.exists(self._path)
2139
2138
2140 def renamed(self):
2139 def renamed(self):
2141 # Copies are currently tracked in the dirstate as before. Straight copy
2140 # Copies are currently tracked in the dirstate as before. Straight copy
2142 # from workingfilectx.
2141 # from workingfilectx.
2143 rp = self._repo.dirstate.copied(self._path)
2142 rp = self._repo.dirstate.copied(self._path)
2144 if not rp:
2143 if not rp:
2145 return None
2144 return None
2146 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2145 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2147
2146
2148 def size(self):
2147 def size(self):
2149 return self._parent.size(self._path)
2148 return self._parent.size(self._path)
2150
2149
2151 def audit(self):
2150 def audit(self):
2152 pass
2151 pass
2153
2152
2154 def flags(self):
2153 def flags(self):
2155 return self._parent.flags(self._path)
2154 return self._parent.flags(self._path)
2156
2155
2157 def setflags(self, islink, isexec):
2156 def setflags(self, islink, isexec):
2158 return self._parent.setflags(self._path, islink, isexec)
2157 return self._parent.setflags(self._path, islink, isexec)
2159
2158
2160 def write(self, data, flags, backgroundclose=False):
2159 def write(self, data, flags, backgroundclose=False):
2161 return self._parent.write(self._path, data, flags)
2160 return self._parent.write(self._path, data, flags)
2162
2161
2163 def remove(self, ignoremissing=False):
2162 def remove(self, ignoremissing=False):
2164 return self._parent.remove(self._path)
2163 return self._parent.remove(self._path)
2165
2164
2166 class workingcommitctx(workingctx):
2165 class workingcommitctx(workingctx):
2167 """A workingcommitctx object makes access to data related to
2166 """A workingcommitctx object makes access to data related to
2168 the revision being committed convenient.
2167 the revision being committed convenient.
2169
2168
2170 This hides changes in the working directory, if they aren't
2169 This hides changes in the working directory, if they aren't
2171 committed in this context.
2170 committed in this context.
2172 """
2171 """
2173 def __init__(self, repo, changes,
2172 def __init__(self, repo, changes,
2174 text="", user=None, date=None, extra=None):
2173 text="", user=None, date=None, extra=None):
2175 super(workingctx, self).__init__(repo, text, user, date, extra,
2174 super(workingctx, self).__init__(repo, text, user, date, extra,
2176 changes)
2175 changes)
2177
2176
2178 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2177 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2179 """Return matched files only in ``self._status``
2178 """Return matched files only in ``self._status``
2180
2179
2181 Uncommitted files appear "clean" via this context, even if
2180 Uncommitted files appear "clean" via this context, even if
2182 they aren't actually so in the working directory.
2181 they aren't actually so in the working directory.
2183 """
2182 """
2184 if clean:
2183 if clean:
2185 clean = [f for f in self._manifest if f not in self._changedset]
2184 clean = [f for f in self._manifest if f not in self._changedset]
2186 else:
2185 else:
2187 clean = []
2186 clean = []
2188 return scmutil.status([f for f in self._status.modified if match(f)],
2187 return scmutil.status([f for f in self._status.modified if match(f)],
2189 [f for f in self._status.added if match(f)],
2188 [f for f in self._status.added if match(f)],
2190 [f for f in self._status.removed if match(f)],
2189 [f for f in self._status.removed if match(f)],
2191 [], [], [], clean)
2190 [], [], [], clean)
2192
2191
2193 @propertycache
2192 @propertycache
2194 def _changedset(self):
2193 def _changedset(self):
2195 """Return the set of files changed in this context
2194 """Return the set of files changed in this context
2196 """
2195 """
2197 changed = set(self._status.modified)
2196 changed = set(self._status.modified)
2198 changed.update(self._status.added)
2197 changed.update(self._status.added)
2199 changed.update(self._status.removed)
2198 changed.update(self._status.removed)
2200 return changed
2199 return changed
2201
2200
2202 def makecachingfilectxfn(func):
2201 def makecachingfilectxfn(func):
2203 """Create a filectxfn that caches based on the path.
2202 """Create a filectxfn that caches based on the path.
2204
2203
2205 We can't use util.cachefunc because it uses all arguments as the cache
2204 We can't use util.cachefunc because it uses all arguments as the cache
2206 key and this creates a cycle since the arguments include the repo and
2205 key and this creates a cycle since the arguments include the repo and
2207 memctx.
2206 memctx.
2208 """
2207 """
2209 cache = {}
2208 cache = {}
2210
2209
2211 def getfilectx(repo, memctx, path):
2210 def getfilectx(repo, memctx, path):
2212 if path not in cache:
2211 if path not in cache:
2213 cache[path] = func(repo, memctx, path)
2212 cache[path] = func(repo, memctx, path)
2214 return cache[path]
2213 return cache[path]
2215
2214
2216 return getfilectx
2215 return getfilectx
2217
2216
2218 def memfilefromctx(ctx):
2217 def memfilefromctx(ctx):
2219 """Given a context return a memfilectx for ctx[path]
2218 """Given a context return a memfilectx for ctx[path]
2220
2219
2221 This is a convenience method for building a memctx based on another
2220 This is a convenience method for building a memctx based on another
2222 context.
2221 context.
2223 """
2222 """
2224 def getfilectx(repo, memctx, path):
2223 def getfilectx(repo, memctx, path):
2225 fctx = ctx[path]
2224 fctx = ctx[path]
2226 # this is weird but apparently we only keep track of one parent
2225 # this is weird but apparently we only keep track of one parent
2227 # (why not only store that instead of a tuple?)
2226 # (why not only store that instead of a tuple?)
2228 copied = fctx.renamed()
2227 copied = fctx.renamed()
2229 if copied:
2228 if copied:
2230 copied = copied[0]
2229 copied = copied[0]
2231 return memfilectx(repo, path, fctx.data(),
2230 return memfilectx(repo, path, fctx.data(),
2232 islink=fctx.islink(), isexec=fctx.isexec(),
2231 islink=fctx.islink(), isexec=fctx.isexec(),
2233 copied=copied, memctx=memctx)
2232 copied=copied, memctx=memctx)
2234
2233
2235 return getfilectx
2234 return getfilectx
2236
2235
2237 def memfilefrompatch(patchstore):
2236 def memfilefrompatch(patchstore):
2238 """Given a patch (e.g. patchstore object) return a memfilectx
2237 """Given a patch (e.g. patchstore object) return a memfilectx
2239
2238
2240 This is a convenience method for building a memctx based on a patchstore.
2239 This is a convenience method for building a memctx based on a patchstore.
2241 """
2240 """
2242 def getfilectx(repo, memctx, path):
2241 def getfilectx(repo, memctx, path):
2243 data, mode, copied = patchstore.getfile(path)
2242 data, mode, copied = patchstore.getfile(path)
2244 if data is None:
2243 if data is None:
2245 return None
2244 return None
2246 islink, isexec = mode
2245 islink, isexec = mode
2247 return memfilectx(repo, path, data, islink=islink,
2246 return memfilectx(repo, path, data, islink=islink,
2248 isexec=isexec, copied=copied,
2247 isexec=isexec, copied=copied,
2249 memctx=memctx)
2248 memctx=memctx)
2250
2249
2251 return getfilectx
2250 return getfilectx
2252
2251
2253 class memctx(committablectx):
2252 class memctx(committablectx):
2254 """Use memctx to perform in-memory commits via localrepo.commitctx().
2253 """Use memctx to perform in-memory commits via localrepo.commitctx().
2255
2254
2256 Revision information is supplied at initialization time while
2255 Revision information is supplied at initialization time while
2257 related files data and is made available through a callback
2256 related files data and is made available through a callback
2258 mechanism. 'repo' is the current localrepo, 'parents' is a
2257 mechanism. 'repo' is the current localrepo, 'parents' is a
2259 sequence of two parent revisions identifiers (pass None for every
2258 sequence of two parent revisions identifiers (pass None for every
2260 missing parent), 'text' is the commit message and 'files' lists
2259 missing parent), 'text' is the commit message and 'files' lists
2261 names of files touched by the revision (normalized and relative to
2260 names of files touched by the revision (normalized and relative to
2262 repository root).
2261 repository root).
2263
2262
2264 filectxfn(repo, memctx, path) is a callable receiving the
2263 filectxfn(repo, memctx, path) is a callable receiving the
2265 repository, the current memctx object and the normalized path of
2264 repository, the current memctx object and the normalized path of
2266 requested file, relative to repository root. It is fired by the
2265 requested file, relative to repository root. It is fired by the
2267 commit function for every file in 'files', but calls order is
2266 commit function for every file in 'files', but calls order is
2268 undefined. If the file is available in the revision being
2267 undefined. If the file is available in the revision being
2269 committed (updated or added), filectxfn returns a memfilectx
2268 committed (updated or added), filectxfn returns a memfilectx
2270 object. If the file was removed, filectxfn return None for recent
2269 object. If the file was removed, filectxfn return None for recent
2271 Mercurial. Moved files are represented by marking the source file
2270 Mercurial. Moved files are represented by marking the source file
2272 removed and the new file added with copy information (see
2271 removed and the new file added with copy information (see
2273 memfilectx).
2272 memfilectx).
2274
2273
2275 user receives the committer name and defaults to current
2274 user receives the committer name and defaults to current
2276 repository username, date is the commit date in any format
2275 repository username, date is the commit date in any format
2277 supported by util.parsedate() and defaults to current date, extra
2276 supported by util.parsedate() and defaults to current date, extra
2278 is a dictionary of metadata or is left empty.
2277 is a dictionary of metadata or is left empty.
2279 """
2278 """
2280
2279
2281 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2280 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2282 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2281 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2283 # this field to determine what to do in filectxfn.
2282 # this field to determine what to do in filectxfn.
2284 _returnnoneformissingfiles = True
2283 _returnnoneformissingfiles = True
2285
2284
2286 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2285 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2287 date=None, extra=None, branch=None, editor=False):
2286 date=None, extra=None, branch=None, editor=False):
2288 super(memctx, self).__init__(repo, text, user, date, extra)
2287 super(memctx, self).__init__(repo, text, user, date, extra)
2289 self._rev = None
2288 self._rev = None
2290 self._node = None
2289 self._node = None
2291 parents = [(p or nullid) for p in parents]
2290 parents = [(p or nullid) for p in parents]
2292 p1, p2 = parents
2291 p1, p2 = parents
2293 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2292 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2294 files = sorted(set(files))
2293 files = sorted(set(files))
2295 self._files = files
2294 self._files = files
2296 if branch is not None:
2295 if branch is not None:
2297 self._extra['branch'] = encoding.fromlocal(branch)
2296 self._extra['branch'] = encoding.fromlocal(branch)
2298 self.substate = {}
2297 self.substate = {}
2299
2298
2300 if isinstance(filectxfn, patch.filestore):
2299 if isinstance(filectxfn, patch.filestore):
2301 filectxfn = memfilefrompatch(filectxfn)
2300 filectxfn = memfilefrompatch(filectxfn)
2302 elif not callable(filectxfn):
2301 elif not callable(filectxfn):
2303 # if store is not callable, wrap it in a function
2302 # if store is not callable, wrap it in a function
2304 filectxfn = memfilefromctx(filectxfn)
2303 filectxfn = memfilefromctx(filectxfn)
2305
2304
2306 # memoizing increases performance for e.g. vcs convert scenarios.
2305 # memoizing increases performance for e.g. vcs convert scenarios.
2307 self._filectxfn = makecachingfilectxfn(filectxfn)
2306 self._filectxfn = makecachingfilectxfn(filectxfn)
2308
2307
2309 if editor:
2308 if editor:
2310 self._text = editor(self._repo, self, [])
2309 self._text = editor(self._repo, self, [])
2311 self._repo.savecommitmessage(self._text)
2310 self._repo.savecommitmessage(self._text)
2312
2311
2313 def filectx(self, path, filelog=None):
2312 def filectx(self, path, filelog=None):
2314 """get a file context from the working directory
2313 """get a file context from the working directory
2315
2314
2316 Returns None if file doesn't exist and should be removed."""
2315 Returns None if file doesn't exist and should be removed."""
2317 return self._filectxfn(self._repo, self, path)
2316 return self._filectxfn(self._repo, self, path)
2318
2317
2319 def commit(self):
2318 def commit(self):
2320 """commit context to the repo"""
2319 """commit context to the repo"""
2321 return self._repo.commitctx(self)
2320 return self._repo.commitctx(self)
2322
2321
2323 @propertycache
2322 @propertycache
2324 def _manifest(self):
2323 def _manifest(self):
2325 """generate a manifest based on the return values of filectxfn"""
2324 """generate a manifest based on the return values of filectxfn"""
2326
2325
2327 # keep this simple for now; just worry about p1
2326 # keep this simple for now; just worry about p1
2328 pctx = self._parents[0]
2327 pctx = self._parents[0]
2329 man = pctx.manifest().copy()
2328 man = pctx.manifest().copy()
2330
2329
2331 for f in self._status.modified:
2330 for f in self._status.modified:
2332 p1node = nullid
2331 p1node = nullid
2333 p2node = nullid
2332 p2node = nullid
2334 p = pctx[f].parents() # if file isn't in pctx, check p2?
2333 p = pctx[f].parents() # if file isn't in pctx, check p2?
2335 if len(p) > 0:
2334 if len(p) > 0:
2336 p1node = p[0].filenode()
2335 p1node = p[0].filenode()
2337 if len(p) > 1:
2336 if len(p) > 1:
2338 p2node = p[1].filenode()
2337 p2node = p[1].filenode()
2339 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2338 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2340
2339
2341 for f in self._status.added:
2340 for f in self._status.added:
2342 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2341 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2343
2342
2344 for f in self._status.removed:
2343 for f in self._status.removed:
2345 if f in man:
2344 if f in man:
2346 del man[f]
2345 del man[f]
2347
2346
2348 return man
2347 return man
2349
2348
2350 @propertycache
2349 @propertycache
2351 def _status(self):
2350 def _status(self):
2352 """Calculate exact status from ``files`` specified at construction
2351 """Calculate exact status from ``files`` specified at construction
2353 """
2352 """
2354 man1 = self.p1().manifest()
2353 man1 = self.p1().manifest()
2355 p2 = self._parents[1]
2354 p2 = self._parents[1]
2356 # "1 < len(self._parents)" can't be used for checking
2355 # "1 < len(self._parents)" can't be used for checking
2357 # existence of the 2nd parent, because "memctx._parents" is
2356 # existence of the 2nd parent, because "memctx._parents" is
2358 # explicitly initialized by the list, of which length is 2.
2357 # explicitly initialized by the list, of which length is 2.
2359 if p2.node() != nullid:
2358 if p2.node() != nullid:
2360 man2 = p2.manifest()
2359 man2 = p2.manifest()
2361 managing = lambda f: f in man1 or f in man2
2360 managing = lambda f: f in man1 or f in man2
2362 else:
2361 else:
2363 managing = lambda f: f in man1
2362 managing = lambda f: f in man1
2364
2363
2365 modified, added, removed = [], [], []
2364 modified, added, removed = [], [], []
2366 for f in self._files:
2365 for f in self._files:
2367 if not managing(f):
2366 if not managing(f):
2368 added.append(f)
2367 added.append(f)
2369 elif self[f]:
2368 elif self[f]:
2370 modified.append(f)
2369 modified.append(f)
2371 else:
2370 else:
2372 removed.append(f)
2371 removed.append(f)
2373
2372
2374 return scmutil.status(modified, added, removed, [], [], [], [])
2373 return scmutil.status(modified, added, removed, [], [], [], [])
2375
2374
2376 class memfilectx(committablefilectx):
2375 class memfilectx(committablefilectx):
2377 """memfilectx represents an in-memory file to commit.
2376 """memfilectx represents an in-memory file to commit.
2378
2377
2379 See memctx and committablefilectx for more details.
2378 See memctx and committablefilectx for more details.
2380 """
2379 """
2381 def __init__(self, repo, path, data, islink=False,
2380 def __init__(self, repo, path, data, islink=False,
2382 isexec=False, copied=None, memctx=None):
2381 isexec=False, copied=None, memctx=None):
2383 """
2382 """
2384 path is the normalized file path relative to repository root.
2383 path is the normalized file path relative to repository root.
2385 data is the file content as a string.
2384 data is the file content as a string.
2386 islink is True if the file is a symbolic link.
2385 islink is True if the file is a symbolic link.
2387 isexec is True if the file is executable.
2386 isexec is True if the file is executable.
2388 copied is the source file path if current file was copied in the
2387 copied is the source file path if current file was copied in the
2389 revision being committed, or None."""
2388 revision being committed, or None."""
2390 super(memfilectx, self).__init__(repo, path, None, memctx)
2389 super(memfilectx, self).__init__(repo, path, None, memctx)
2391 self._data = data
2390 self._data = data
2392 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2391 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2393 self._copied = None
2392 self._copied = None
2394 if copied:
2393 if copied:
2395 self._copied = (copied, nullid)
2394 self._copied = (copied, nullid)
2396
2395
2397 def data(self):
2396 def data(self):
2398 return self._data
2397 return self._data
2399
2398
2400 def remove(self, ignoremissing=False):
2399 def remove(self, ignoremissing=False):
2401 """wraps unlink for a repo's working directory"""
2400 """wraps unlink for a repo's working directory"""
2402 # need to figure out what to do here
2401 # need to figure out what to do here
2403 del self._changectx[self._path]
2402 del self._changectx[self._path]
2404
2403
2405 def write(self, data, flags):
2404 def write(self, data, flags):
2406 """wraps repo.wwrite"""
2405 """wraps repo.wwrite"""
2407 self._data = data
2406 self._data = data
2408
2407
2409 class overlayfilectx(committablefilectx):
2408 class overlayfilectx(committablefilectx):
2410 """Like memfilectx but take an original filectx and optional parameters to
2409 """Like memfilectx but take an original filectx and optional parameters to
2411 override parts of it. This is useful when fctx.data() is expensive (i.e.
2410 override parts of it. This is useful when fctx.data() is expensive (i.e.
2412 flag processor is expensive) and raw data, flags, and filenode could be
2411 flag processor is expensive) and raw data, flags, and filenode could be
2413 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2412 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2414 """
2413 """
2415
2414
2416 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2415 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2417 copied=None, ctx=None):
2416 copied=None, ctx=None):
2418 """originalfctx: filecontext to duplicate
2417 """originalfctx: filecontext to duplicate
2419
2418
2420 datafunc: None or a function to override data (file content). It is a
2419 datafunc: None or a function to override data (file content). It is a
2421 function to be lazy. path, flags, copied, ctx: None or overridden value
2420 function to be lazy. path, flags, copied, ctx: None or overridden value
2422
2421
2423 copied could be (path, rev), or False. copied could also be just path,
2422 copied could be (path, rev), or False. copied could also be just path,
2424 and will be converted to (path, nullid). This simplifies some callers.
2423 and will be converted to (path, nullid). This simplifies some callers.
2425 """
2424 """
2426
2425
2427 if path is None:
2426 if path is None:
2428 path = originalfctx.path()
2427 path = originalfctx.path()
2429 if ctx is None:
2428 if ctx is None:
2430 ctx = originalfctx.changectx()
2429 ctx = originalfctx.changectx()
2431 ctxmatch = lambda: True
2430 ctxmatch = lambda: True
2432 else:
2431 else:
2433 ctxmatch = lambda: ctx == originalfctx.changectx()
2432 ctxmatch = lambda: ctx == originalfctx.changectx()
2434
2433
2435 repo = originalfctx.repo()
2434 repo = originalfctx.repo()
2436 flog = originalfctx.filelog()
2435 flog = originalfctx.filelog()
2437 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2436 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2438
2437
2439 if copied is None:
2438 if copied is None:
2440 copied = originalfctx.renamed()
2439 copied = originalfctx.renamed()
2441 copiedmatch = lambda: True
2440 copiedmatch = lambda: True
2442 else:
2441 else:
2443 if copied and not isinstance(copied, tuple):
2442 if copied and not isinstance(copied, tuple):
2444 # repo._filecommit will recalculate copyrev so nullid is okay
2443 # repo._filecommit will recalculate copyrev so nullid is okay
2445 copied = (copied, nullid)
2444 copied = (copied, nullid)
2446 copiedmatch = lambda: copied == originalfctx.renamed()
2445 copiedmatch = lambda: copied == originalfctx.renamed()
2447
2446
2448 # When data, copied (could affect data), ctx (could affect filelog
2447 # When data, copied (could affect data), ctx (could affect filelog
2449 # parents) are not overridden, rawdata, rawflags, and filenode may be
2448 # parents) are not overridden, rawdata, rawflags, and filenode may be
2450 # reused (repo._filecommit should double check filelog parents).
2449 # reused (repo._filecommit should double check filelog parents).
2451 #
2450 #
2452 # path, flags are not hashed in filelog (but in manifestlog) so they do
2451 # path, flags are not hashed in filelog (but in manifestlog) so they do
2453 # not affect reusable here.
2452 # not affect reusable here.
2454 #
2453 #
2455 # If ctx or copied is overridden to a same value with originalfctx,
2454 # If ctx or copied is overridden to a same value with originalfctx,
2456 # still consider it's reusable. originalfctx.renamed() may be a bit
2455 # still consider it's reusable. originalfctx.renamed() may be a bit
2457 # expensive so it's not called unless necessary. Assuming datafunc is
2456 # expensive so it's not called unless necessary. Assuming datafunc is
2458 # always expensive, do not call it for this "reusable" test.
2457 # always expensive, do not call it for this "reusable" test.
2459 reusable = datafunc is None and ctxmatch() and copiedmatch()
2458 reusable = datafunc is None and ctxmatch() and copiedmatch()
2460
2459
2461 if datafunc is None:
2460 if datafunc is None:
2462 datafunc = originalfctx.data
2461 datafunc = originalfctx.data
2463 if flags is None:
2462 if flags is None:
2464 flags = originalfctx.flags()
2463 flags = originalfctx.flags()
2465
2464
2466 self._datafunc = datafunc
2465 self._datafunc = datafunc
2467 self._flags = flags
2466 self._flags = flags
2468 self._copied = copied
2467 self._copied = copied
2469
2468
2470 if reusable:
2469 if reusable:
2471 # copy extra fields from originalfctx
2470 # copy extra fields from originalfctx
2472 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2471 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2473 for attr in attrs:
2472 for attr in attrs:
2474 if util.safehasattr(originalfctx, attr):
2473 if util.safehasattr(originalfctx, attr):
2475 setattr(self, attr, getattr(originalfctx, attr))
2474 setattr(self, attr, getattr(originalfctx, attr))
2476
2475
2477 def data(self):
2476 def data(self):
2478 return self._datafunc()
2477 return self._datafunc()
2479
2478
2480 class metadataonlyctx(committablectx):
2479 class metadataonlyctx(committablectx):
2481 """Like memctx but it's reusing the manifest of different commit.
2480 """Like memctx but it's reusing the manifest of different commit.
2482 Intended to be used by lightweight operations that are creating
2481 Intended to be used by lightweight operations that are creating
2483 metadata-only changes.
2482 metadata-only changes.
2484
2483
2485 Revision information is supplied at initialization time. 'repo' is the
2484 Revision information is supplied at initialization time. 'repo' is the
2486 current localrepo, 'ctx' is original revision which manifest we're reuisng
2485 current localrepo, 'ctx' is original revision which manifest we're reuisng
2487 'parents' is a sequence of two parent revisions identifiers (pass None for
2486 'parents' is a sequence of two parent revisions identifiers (pass None for
2488 every missing parent), 'text' is the commit.
2487 every missing parent), 'text' is the commit.
2489
2488
2490 user receives the committer name and defaults to current repository
2489 user receives the committer name and defaults to current repository
2491 username, date is the commit date in any format supported by
2490 username, date is the commit date in any format supported by
2492 util.parsedate() and defaults to current date, extra is a dictionary of
2491 util.parsedate() and defaults to current date, extra is a dictionary of
2493 metadata or is left empty.
2492 metadata or is left empty.
2494 """
2493 """
2495 def __new__(cls, repo, originalctx, *args, **kwargs):
2494 def __new__(cls, repo, originalctx, *args, **kwargs):
2496 return super(metadataonlyctx, cls).__new__(cls, repo)
2495 return super(metadataonlyctx, cls).__new__(cls, repo)
2497
2496
2498 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2497 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2499 date=None, extra=None, editor=False):
2498 date=None, extra=None, editor=False):
2500 if text is None:
2499 if text is None:
2501 text = originalctx.description()
2500 text = originalctx.description()
2502 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2501 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2503 self._rev = None
2502 self._rev = None
2504 self._node = None
2503 self._node = None
2505 self._originalctx = originalctx
2504 self._originalctx = originalctx
2506 self._manifestnode = originalctx.manifestnode()
2505 self._manifestnode = originalctx.manifestnode()
2507 if parents is None:
2506 if parents is None:
2508 parents = originalctx.parents()
2507 parents = originalctx.parents()
2509 else:
2508 else:
2510 parents = [repo[p] for p in parents if p is not None]
2509 parents = [repo[p] for p in parents if p is not None]
2511 parents = parents[:]
2510 parents = parents[:]
2512 while len(parents) < 2:
2511 while len(parents) < 2:
2513 parents.append(repo[nullid])
2512 parents.append(repo[nullid])
2514 p1, p2 = self._parents = parents
2513 p1, p2 = self._parents = parents
2515
2514
2516 # sanity check to ensure that the reused manifest parents are
2515 # sanity check to ensure that the reused manifest parents are
2517 # manifests of our commit parents
2516 # manifests of our commit parents
2518 mp1, mp2 = self.manifestctx().parents
2517 mp1, mp2 = self.manifestctx().parents
2519 if p1 != nullid and p1.manifestnode() != mp1:
2518 if p1 != nullid and p1.manifestnode() != mp1:
2520 raise RuntimeError('can\'t reuse the manifest: '
2519 raise RuntimeError('can\'t reuse the manifest: '
2521 'its p1 doesn\'t match the new ctx p1')
2520 'its p1 doesn\'t match the new ctx p1')
2522 if p2 != nullid and p2.manifestnode() != mp2:
2521 if p2 != nullid and p2.manifestnode() != mp2:
2523 raise RuntimeError('can\'t reuse the manifest: '
2522 raise RuntimeError('can\'t reuse the manifest: '
2524 'its p2 doesn\'t match the new ctx p2')
2523 'its p2 doesn\'t match the new ctx p2')
2525
2524
2526 self._files = originalctx.files()
2525 self._files = originalctx.files()
2527 self.substate = {}
2526 self.substate = {}
2528
2527
2529 if editor:
2528 if editor:
2530 self._text = editor(self._repo, self, [])
2529 self._text = editor(self._repo, self, [])
2531 self._repo.savecommitmessage(self._text)
2530 self._repo.savecommitmessage(self._text)
2532
2531
2533 def manifestnode(self):
2532 def manifestnode(self):
2534 return self._manifestnode
2533 return self._manifestnode
2535
2534
2536 @property
2535 @property
2537 def _manifestctx(self):
2536 def _manifestctx(self):
2538 return self._repo.manifestlog[self._manifestnode]
2537 return self._repo.manifestlog[self._manifestnode]
2539
2538
2540 def filectx(self, path, filelog=None):
2539 def filectx(self, path, filelog=None):
2541 return self._originalctx.filectx(path, filelog=filelog)
2540 return self._originalctx.filectx(path, filelog=filelog)
2542
2541
2543 def commit(self):
2542 def commit(self):
2544 """commit context to the repo"""
2543 """commit context to the repo"""
2545 return self._repo.commitctx(self)
2544 return self._repo.commitctx(self)
2546
2545
2547 @property
2546 @property
2548 def _manifest(self):
2547 def _manifest(self):
2549 return self._originalctx.manifest()
2548 return self._originalctx.manifest()
2550
2549
2551 @propertycache
2550 @propertycache
2552 def _status(self):
2551 def _status(self):
2553 """Calculate exact status from ``files`` specified in the ``origctx``
2552 """Calculate exact status from ``files`` specified in the ``origctx``
2554 and parents manifests.
2553 and parents manifests.
2555 """
2554 """
2556 man1 = self.p1().manifest()
2555 man1 = self.p1().manifest()
2557 p2 = self._parents[1]
2556 p2 = self._parents[1]
2558 # "1 < len(self._parents)" can't be used for checking
2557 # "1 < len(self._parents)" can't be used for checking
2559 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2558 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2560 # explicitly initialized by the list, of which length is 2.
2559 # explicitly initialized by the list, of which length is 2.
2561 if p2.node() != nullid:
2560 if p2.node() != nullid:
2562 man2 = p2.manifest()
2561 man2 = p2.manifest()
2563 managing = lambda f: f in man1 or f in man2
2562 managing = lambda f: f in man1 or f in man2
2564 else:
2563 else:
2565 managing = lambda f: f in man1
2564 managing = lambda f: f in man1
2566
2565
2567 modified, added, removed = [], [], []
2566 modified, added, removed = [], [], []
2568 for f in self._files:
2567 for f in self._files:
2569 if not managing(f):
2568 if not managing(f):
2570 added.append(f)
2569 added.append(f)
2571 elif f in self:
2570 elif f in self:
2572 modified.append(f)
2571 modified.append(f)
2573 else:
2572 else:
2574 removed.append(f)
2573 removed.append(f)
2575
2574
2576 return scmutil.status(modified, added, removed, [], [], [], [])
2575 return scmutil.status(modified, added, removed, [], [], [], [])
2577
2576
2578 class arbitraryfilectx(object):
2577 class arbitraryfilectx(object):
2579 """Allows you to use filectx-like functions on a file in an arbitrary
2578 """Allows you to use filectx-like functions on a file in an arbitrary
2580 location on disk, possibly not in the working directory.
2579 location on disk, possibly not in the working directory.
2581 """
2580 """
2582 def __init__(self, path):
2581 def __init__(self, path):
2583 self._path = path
2582 self._path = path
2584
2583
2585 def cmp(self, otherfilectx):
2584 def cmp(self, otherfilectx):
2586 return self.data() != otherfilectx.data()
2585 return self.data() != otherfilectx.data()
2587
2586
2588 def path(self):
2587 def path(self):
2589 return self._path
2588 return self._path
2590
2589
2591 def flags(self):
2590 def flags(self):
2592 return ''
2591 return ''
2593
2592
2594 def data(self):
2593 def data(self):
2595 return util.readfile(self._path)
2594 return util.readfile(self._path)
2596
2595
2597 def decodeddata(self):
2596 def decodeddata(self):
2598 with open(self._path, "rb") as f:
2597 with open(self._path, "rb") as f:
2599 return f.read()
2598 return f.read()
2600
2599
2601 def remove(self):
2600 def remove(self):
2602 util.unlink(self._path)
2601 util.unlink(self._path)
2603
2602
2604 def write(self, data, flags):
2603 def write(self, data, flags):
2605 assert not flags
2604 assert not flags
2606 with open(self._path, "w") as f:
2605 with open(self._path, "w") as f:
2607 f.write(data)
2606 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now