##// END OF EJS Templates
context: extract partial nodeid lookup method to scmutil...
Martin von Zweigbergk -
r37522:901e749c default
parent child Browse files
Show More
@@ -1,2570 +1,2570 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirfilenodeids,
25 wdirfilenodeids,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from . import (
29 from . import (
30 dagop,
30 dagop,
31 encoding,
31 encoding,
32 error,
32 error,
33 fileset,
33 fileset,
34 match as matchmod,
34 match as matchmod,
35 obsolete as obsmod,
35 obsolete as obsmod,
36 patch,
36 patch,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 pycompat,
39 pycompat,
40 repoview,
40 repoview,
41 revlog,
41 revlog,
42 scmutil,
42 scmutil,
43 sparse,
43 sparse,
44 subrepo,
44 subrepo,
45 subrepoutil,
45 subrepoutil,
46 util,
46 util,
47 )
47 )
48 from .utils import (
48 from .utils import (
49 dateutil,
49 dateutil,
50 stringutil,
50 stringutil,
51 )
51 )
52
52
53 propertycache = util.propertycache
53 propertycache = util.propertycache
54
54
55 nonascii = re.compile(br'[^\x21-\x7f]').search
55 nonascii = re.compile(br'[^\x21-\x7f]').search
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return r"<%s %s>" % (type(self).__name__, str(self))
74 return r"<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(self, other, s, match, listignored, listclean,
106 def _buildstatus(self, other, s, match, listignored, listclean,
107 listunknown):
107 listunknown):
108 """build a status with respect to another context"""
108 """build a status with respect to another context"""
109 # Load earliest manifest first for caching reasons. More specifically,
109 # Load earliest manifest first for caching reasons. More specifically,
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # 1000 and cache it so that when you read 1001, we just need to apply a
112 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # delta to what's in the cache. So that's one full reconstruction + one
113 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta application.
114 # delta application.
115 mf2 = None
115 mf2 = None
116 if self.rev() is not None and self.rev() < other.rev():
116 if self.rev() is not None and self.rev() < other.rev():
117 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
119 if mf2 is None:
119 if mf2 is None:
120 mf2 = self._buildstatusmanifest(s)
120 mf2 = self._buildstatusmanifest(s)
121
121
122 modified, added = [], []
122 modified, added = [], []
123 removed = []
123 removed = []
124 clean = []
124 clean = []
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deletedset = set(deleted)
126 deletedset = set(deleted)
127 d = mf1.diff(mf2, match=match, clean=listclean)
127 d = mf1.diff(mf2, match=match, clean=listclean)
128 for fn, value in d.iteritems():
128 for fn, value in d.iteritems():
129 if fn in deletedset:
129 if fn in deletedset:
130 continue
130 continue
131 if value is None:
131 if value is None:
132 clean.append(fn)
132 clean.append(fn)
133 continue
133 continue
134 (node1, flag1), (node2, flag2) = value
134 (node1, flag1), (node2, flag2) = value
135 if node1 is None:
135 if node1 is None:
136 added.append(fn)
136 added.append(fn)
137 elif node2 is None:
137 elif node2 is None:
138 removed.append(fn)
138 removed.append(fn)
139 elif flag1 != flag2:
139 elif flag1 != flag2:
140 modified.append(fn)
140 modified.append(fn)
141 elif node2 not in wdirfilenodeids:
141 elif node2 not in wdirfilenodeids:
142 # When comparing files between two commits, we save time by
142 # When comparing files between two commits, we save time by
143 # not comparing the file contents when the nodeids differ.
143 # not comparing the file contents when the nodeids differ.
144 # Note that this means we incorrectly report a reverted change
144 # Note that this means we incorrectly report a reverted change
145 # to a file as a modification.
145 # to a file as a modification.
146 modified.append(fn)
146 modified.append(fn)
147 elif self[fn].cmp(other[fn]):
147 elif self[fn].cmp(other[fn]):
148 modified.append(fn)
148 modified.append(fn)
149 else:
149 else:
150 clean.append(fn)
150 clean.append(fn)
151
151
152 if removed:
152 if removed:
153 # need to filter files if they are already reported as removed
153 # need to filter files if they are already reported as removed
154 unknown = [fn for fn in unknown if fn not in mf1 and
154 unknown = [fn for fn in unknown if fn not in mf1 and
155 (not match or match(fn))]
155 (not match or match(fn))]
156 ignored = [fn for fn in ignored if fn not in mf1 and
156 ignored = [fn for fn in ignored if fn not in mf1 and
157 (not match or match(fn))]
157 (not match or match(fn))]
158 # if they're deleted, don't report them as removed
158 # if they're deleted, don't report them as removed
159 removed = [fn for fn in removed if fn not in deletedset]
159 removed = [fn for fn in removed if fn not in deletedset]
160
160
161 return scmutil.status(modified, added, removed, deleted, unknown,
161 return scmutil.status(modified, added, removed, deleted, unknown,
162 ignored, clean)
162 ignored, clean)
163
163
164 @propertycache
164 @propertycache
165 def substate(self):
165 def substate(self):
166 return subrepoutil.state(self, self._repo.ui)
166 return subrepoutil.state(self, self._repo.ui)
167
167
168 def subrev(self, subpath):
168 def subrev(self, subpath):
169 return self.substate[subpath][1]
169 return self.substate[subpath][1]
170
170
171 def rev(self):
171 def rev(self):
172 return self._rev
172 return self._rev
173 def node(self):
173 def node(self):
174 return self._node
174 return self._node
175 def hex(self):
175 def hex(self):
176 return hex(self.node())
176 return hex(self.node())
177 def manifest(self):
177 def manifest(self):
178 return self._manifest
178 return self._manifest
179 def manifestctx(self):
179 def manifestctx(self):
180 return self._manifestctx
180 return self._manifestctx
181 def repo(self):
181 def repo(self):
182 return self._repo
182 return self._repo
183 def phasestr(self):
183 def phasestr(self):
184 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
185 def mutable(self):
185 def mutable(self):
186 return self.phase() > phases.public
186 return self.phase() > phases.public
187
187
188 def getfileset(self, expr):
188 def getfileset(self, expr):
189 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
190
190
191 def obsolete(self):
191 def obsolete(self):
192 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
194
194
195 def extinct(self):
195 def extinct(self):
196 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
198
198
199 def orphan(self):
199 def orphan(self):
200 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
202
202
203 def phasedivergent(self):
203 def phasedivergent(self):
204 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
205
205
206 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
207 """
207 """
208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
209
209
210 def contentdivergent(self):
210 def contentdivergent(self):
211 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
212
212
213 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
214 """
214 """
215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
216
216
217 def isunstable(self):
217 def isunstable(self):
218 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
220
220
221 def instabilities(self):
221 def instabilities(self):
222 """return the list of instabilities affecting this changeset.
222 """return the list of instabilities affecting this changeset.
223
223
224 Instabilities are returned as strings. possible values are:
224 Instabilities are returned as strings. possible values are:
225 - orphan,
225 - orphan,
226 - phase-divergent,
226 - phase-divergent,
227 - content-divergent.
227 - content-divergent.
228 """
228 """
229 instabilities = []
229 instabilities = []
230 if self.orphan():
230 if self.orphan():
231 instabilities.append('orphan')
231 instabilities.append('orphan')
232 if self.phasedivergent():
232 if self.phasedivergent():
233 instabilities.append('phase-divergent')
233 instabilities.append('phase-divergent')
234 if self.contentdivergent():
234 if self.contentdivergent():
235 instabilities.append('content-divergent')
235 instabilities.append('content-divergent')
236 return instabilities
236 return instabilities
237
237
238 def parents(self):
238 def parents(self):
239 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
240 return self._parents
240 return self._parents
241
241
242 def p1(self):
242 def p1(self):
243 return self._parents[0]
243 return self._parents[0]
244
244
245 def p2(self):
245 def p2(self):
246 parents = self._parents
246 parents = self._parents
247 if len(parents) == 2:
247 if len(parents) == 2:
248 return parents[1]
248 return parents[1]
249 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
250
250
251 def _fileinfo(self, path):
251 def _fileinfo(self, path):
252 if r'_manifest' in self.__dict__:
252 if r'_manifest' in self.__dict__:
253 try:
253 try:
254 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
255 except KeyError:
255 except KeyError:
256 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
257 _('not found in manifest'))
257 _('not found in manifest'))
258 if r'_manifestdelta' in self.__dict__ or path in self.files():
258 if r'_manifestdelta' in self.__dict__ or path in self.files():
259 if path in self._manifestdelta:
259 if path in self._manifestdelta:
260 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
261 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
262 mfl = self._repo.manifestlog
262 mfl = self._repo.manifestlog
263 try:
263 try:
264 node, flag = mfl[self._changeset.manifest].find(path)
264 node, flag = mfl[self._changeset.manifest].find(path)
265 except KeyError:
265 except KeyError:
266 raise error.ManifestLookupError(self._node, path,
266 raise error.ManifestLookupError(self._node, path,
267 _('not found in manifest'))
267 _('not found in manifest'))
268
268
269 return node, flag
269 return node, flag
270
270
271 def filenode(self, path):
271 def filenode(self, path):
272 return self._fileinfo(path)[0]
272 return self._fileinfo(path)[0]
273
273
274 def flags(self, path):
274 def flags(self, path):
275 try:
275 try:
276 return self._fileinfo(path)[1]
276 return self._fileinfo(path)[1]
277 except error.LookupError:
277 except error.LookupError:
278 return ''
278 return ''
279
279
280 def sub(self, path, allowcreate=True):
280 def sub(self, path, allowcreate=True):
281 '''return a subrepo for the stored revision of path, never wdir()'''
281 '''return a subrepo for the stored revision of path, never wdir()'''
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
283
283
284 def nullsub(self, path, pctx):
284 def nullsub(self, path, pctx):
285 return subrepo.nullsubrepo(self, path, pctx)
285 return subrepo.nullsubrepo(self, path, pctx)
286
286
287 def workingsub(self, path):
287 def workingsub(self, path):
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
289 context.
289 context.
290 '''
290 '''
291 return subrepo.subrepo(self, path, allowwdir=True)
291 return subrepo.subrepo(self, path, allowwdir=True)
292
292
293 def match(self, pats=None, include=None, exclude=None, default='glob',
293 def match(self, pats=None, include=None, exclude=None, default='glob',
294 listsubrepos=False, badfn=None):
294 listsubrepos=False, badfn=None):
295 r = self._repo
295 r = self._repo
296 return matchmod.match(r.root, r.getcwd(), pats,
296 return matchmod.match(r.root, r.getcwd(), pats,
297 include, exclude, default,
297 include, exclude, default,
298 auditor=r.nofsauditor, ctx=self,
298 auditor=r.nofsauditor, ctx=self,
299 listsubrepos=listsubrepos, badfn=badfn)
299 listsubrepos=listsubrepos, badfn=badfn)
300
300
301 def diff(self, ctx2=None, match=None, **opts):
301 def diff(self, ctx2=None, match=None, **opts):
302 """Returns a diff generator for the given contexts and matcher"""
302 """Returns a diff generator for the given contexts and matcher"""
303 if ctx2 is None:
303 if ctx2 is None:
304 ctx2 = self.p1()
304 ctx2 = self.p1()
305 if ctx2 is not None:
305 if ctx2 is not None:
306 ctx2 = self._repo[ctx2]
306 ctx2 = self._repo[ctx2]
307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
309
309
310 def dirs(self):
310 def dirs(self):
311 return self._manifest.dirs()
311 return self._manifest.dirs()
312
312
313 def hasdir(self, dir):
313 def hasdir(self, dir):
314 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
315
315
316 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
317 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
318 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
319 directory.
319 directory.
320
320
321 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
322
322
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
324 """
324 """
325
325
326 ctx1 = self
326 ctx1 = self
327 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
328
328
329 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
330 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
331 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
332 # with its first parent.
332 # with its first parent.
333 #
333 #
334 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
335 #
335 #
336 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
337 #
337 #
338 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
339 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
340 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
341 reversed = False
341 reversed = False
342 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
343 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
344 reversed = True
344 reversed = True
345 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
346
346
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
348 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
349 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
351 listunknown)
351 listunknown)
352
352
353 if reversed:
353 if reversed:
354 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
355 # these make no sense to reverse.
355 # these make no sense to reverse.
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
357 r.clean)
357 r.clean)
358
358
359 if listsubrepos:
359 if listsubrepos:
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
361 try:
361 try:
362 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
363 except KeyError:
363 except KeyError:
364 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
365 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
366 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
367 rev2 = None
367 rev2 = None
368 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
369 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
370 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
371 listsubrepos=True)
371 listsubrepos=True)
372 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
374
374
375 for l in r:
375 for l in r:
376 l.sort()
376 l.sort()
377
377
378 return r
378 return r
379
379
380 class changectx(basectx):
380 class changectx(basectx):
381 """A changecontext object makes access to data related to a particular
381 """A changecontext object makes access to data related to a particular
382 changeset convenient. It represents a read-only context already present in
382 changeset convenient. It represents a read-only context already present in
383 the repo."""
383 the repo."""
384 def __init__(self, repo, changeid='.'):
384 def __init__(self, repo, changeid='.'):
385 """changeid is a revision number, node, or tag"""
385 """changeid is a revision number, node, or tag"""
386 super(changectx, self).__init__(repo)
386 super(changectx, self).__init__(repo)
387
387
388 try:
388 try:
389 if isinstance(changeid, int):
389 if isinstance(changeid, int):
390 self._node = repo.changelog.node(changeid)
390 self._node = repo.changelog.node(changeid)
391 self._rev = changeid
391 self._rev = changeid
392 return
392 return
393 if changeid == 'null':
393 if changeid == 'null':
394 self._node = nullid
394 self._node = nullid
395 self._rev = nullrev
395 self._rev = nullrev
396 return
396 return
397 if changeid == 'tip':
397 if changeid == 'tip':
398 self._node = repo.changelog.tip()
398 self._node = repo.changelog.tip()
399 self._rev = repo.changelog.rev(self._node)
399 self._rev = repo.changelog.rev(self._node)
400 return
400 return
401 if (changeid == '.'
401 if (changeid == '.'
402 or repo.local() and changeid == repo.dirstate.p1()):
402 or repo.local() and changeid == repo.dirstate.p1()):
403 # this is a hack to delay/avoid loading obsmarkers
403 # this is a hack to delay/avoid loading obsmarkers
404 # when we know that '.' won't be hidden
404 # when we know that '.' won't be hidden
405 self._node = repo.dirstate.p1()
405 self._node = repo.dirstate.p1()
406 self._rev = repo.unfiltered().changelog.rev(self._node)
406 self._rev = repo.unfiltered().changelog.rev(self._node)
407 return
407 return
408 if len(changeid) == 20:
408 if len(changeid) == 20:
409 try:
409 try:
410 self._node = changeid
410 self._node = changeid
411 self._rev = repo.changelog.rev(changeid)
411 self._rev = repo.changelog.rev(changeid)
412 return
412 return
413 except error.FilteredLookupError:
413 except error.FilteredLookupError:
414 raise
414 raise
415 except LookupError:
415 except LookupError:
416 pass
416 pass
417
417
418 try:
418 try:
419 r = int(changeid)
419 r = int(changeid)
420 if '%d' % r != changeid:
420 if '%d' % r != changeid:
421 raise ValueError
421 raise ValueError
422 l = len(repo.changelog)
422 l = len(repo.changelog)
423 if r < 0:
423 if r < 0:
424 r += l
424 r += l
425 if r < 0 or r >= l and r != wdirrev:
425 if r < 0 or r >= l and r != wdirrev:
426 raise ValueError
426 raise ValueError
427 self._rev = r
427 self._rev = r
428 self._node = repo.changelog.node(r)
428 self._node = repo.changelog.node(r)
429 return
429 return
430 except error.FilteredIndexError:
430 except error.FilteredIndexError:
431 raise
431 raise
432 except (ValueError, OverflowError, IndexError):
432 except (ValueError, OverflowError, IndexError):
433 pass
433 pass
434
434
435 if len(changeid) == 40:
435 if len(changeid) == 40:
436 try:
436 try:
437 self._node = bin(changeid)
437 self._node = bin(changeid)
438 self._rev = repo.changelog.rev(self._node)
438 self._rev = repo.changelog.rev(self._node)
439 return
439 return
440 except error.FilteredLookupError:
440 except error.FilteredLookupError:
441 raise
441 raise
442 except (TypeError, LookupError):
442 except (TypeError, LookupError):
443 pass
443 pass
444
444
445 # lookup bookmarks through the name interface
445 # lookup bookmarks through the name interface
446 try:
446 try:
447 self._node = repo.names.singlenode(repo, changeid)
447 self._node = repo.names.singlenode(repo, changeid)
448 self._rev = repo.changelog.rev(self._node)
448 self._rev = repo.changelog.rev(self._node)
449 return
449 return
450 except KeyError:
450 except KeyError:
451 pass
451 pass
452
452
453 self._node = repo.unfiltered().changelog._partialmatch(changeid)
453 self._node = scmutil.resolvepartialhexnodeid(repo, changeid)
454 if self._node is not None:
454 if self._node is not None:
455 self._rev = repo.changelog.rev(self._node)
455 self._rev = repo.changelog.rev(self._node)
456 return
456 return
457
457
458 # lookup failed
458 # lookup failed
459 # check if it might have come from damaged dirstate
459 # check if it might have come from damaged dirstate
460 #
460 #
461 # XXX we could avoid the unfiltered if we had a recognizable
461 # XXX we could avoid the unfiltered if we had a recognizable
462 # exception for filtered changeset access
462 # exception for filtered changeset access
463 if (repo.local()
463 if (repo.local()
464 and changeid in repo.unfiltered().dirstate.parents()):
464 and changeid in repo.unfiltered().dirstate.parents()):
465 msg = _("working directory has unknown parent '%s'!")
465 msg = _("working directory has unknown parent '%s'!")
466 raise error.Abort(msg % short(changeid))
466 raise error.Abort(msg % short(changeid))
467 try:
467 try:
468 if len(changeid) == 20 and nonascii(changeid):
468 if len(changeid) == 20 and nonascii(changeid):
469 changeid = hex(changeid)
469 changeid = hex(changeid)
470 except TypeError:
470 except TypeError:
471 pass
471 pass
472 except (error.FilteredIndexError, error.FilteredLookupError,
472 except (error.FilteredIndexError, error.FilteredLookupError,
473 error.FilteredRepoLookupError):
473 error.FilteredRepoLookupError):
474 raise
474 raise
475 except IndexError:
475 except IndexError:
476 pass
476 pass
477 raise error.RepoLookupError(
477 raise error.RepoLookupError(
478 _("unknown revision '%s'") % changeid)
478 _("unknown revision '%s'") % changeid)
479
479
480 def __hash__(self):
480 def __hash__(self):
481 try:
481 try:
482 return hash(self._rev)
482 return hash(self._rev)
483 except AttributeError:
483 except AttributeError:
484 return id(self)
484 return id(self)
485
485
486 def __nonzero__(self):
486 def __nonzero__(self):
487 return self._rev != nullrev
487 return self._rev != nullrev
488
488
489 __bool__ = __nonzero__
489 __bool__ = __nonzero__
490
490
491 @propertycache
491 @propertycache
492 def _changeset(self):
492 def _changeset(self):
493 return self._repo.changelog.changelogrevision(self.rev())
493 return self._repo.changelog.changelogrevision(self.rev())
494
494
495 @propertycache
495 @propertycache
496 def _manifest(self):
496 def _manifest(self):
497 return self._manifestctx.read()
497 return self._manifestctx.read()
498
498
499 @property
499 @property
500 def _manifestctx(self):
500 def _manifestctx(self):
501 return self._repo.manifestlog[self._changeset.manifest]
501 return self._repo.manifestlog[self._changeset.manifest]
502
502
503 @propertycache
503 @propertycache
504 def _manifestdelta(self):
504 def _manifestdelta(self):
505 return self._manifestctx.readdelta()
505 return self._manifestctx.readdelta()
506
506
507 @propertycache
507 @propertycache
508 def _parents(self):
508 def _parents(self):
509 repo = self._repo
509 repo = self._repo
510 p1, p2 = repo.changelog.parentrevs(self._rev)
510 p1, p2 = repo.changelog.parentrevs(self._rev)
511 if p2 == nullrev:
511 if p2 == nullrev:
512 return [changectx(repo, p1)]
512 return [changectx(repo, p1)]
513 return [changectx(repo, p1), changectx(repo, p2)]
513 return [changectx(repo, p1), changectx(repo, p2)]
514
514
515 def changeset(self):
515 def changeset(self):
516 c = self._changeset
516 c = self._changeset
517 return (
517 return (
518 c.manifest,
518 c.manifest,
519 c.user,
519 c.user,
520 c.date,
520 c.date,
521 c.files,
521 c.files,
522 c.description,
522 c.description,
523 c.extra,
523 c.extra,
524 )
524 )
525 def manifestnode(self):
525 def manifestnode(self):
526 return self._changeset.manifest
526 return self._changeset.manifest
527
527
528 def user(self):
528 def user(self):
529 return self._changeset.user
529 return self._changeset.user
530 def date(self):
530 def date(self):
531 return self._changeset.date
531 return self._changeset.date
532 def files(self):
532 def files(self):
533 return self._changeset.files
533 return self._changeset.files
534 def description(self):
534 def description(self):
535 return self._changeset.description
535 return self._changeset.description
536 def branch(self):
536 def branch(self):
537 return encoding.tolocal(self._changeset.extra.get("branch"))
537 return encoding.tolocal(self._changeset.extra.get("branch"))
538 def closesbranch(self):
538 def closesbranch(self):
539 return 'close' in self._changeset.extra
539 return 'close' in self._changeset.extra
540 def extra(self):
540 def extra(self):
541 """Return a dict of extra information."""
541 """Return a dict of extra information."""
542 return self._changeset.extra
542 return self._changeset.extra
543 def tags(self):
543 def tags(self):
544 """Return a list of byte tag names"""
544 """Return a list of byte tag names"""
545 return self._repo.nodetags(self._node)
545 return self._repo.nodetags(self._node)
546 def bookmarks(self):
546 def bookmarks(self):
547 """Return a list of byte bookmark names."""
547 """Return a list of byte bookmark names."""
548 return self._repo.nodebookmarks(self._node)
548 return self._repo.nodebookmarks(self._node)
549 def phase(self):
549 def phase(self):
550 return self._repo._phasecache.phase(self._repo, self._rev)
550 return self._repo._phasecache.phase(self._repo, self._rev)
551 def hidden(self):
551 def hidden(self):
552 return self._rev in repoview.filterrevs(self._repo, 'visible')
552 return self._rev in repoview.filterrevs(self._repo, 'visible')
553
553
554 def isinmemory(self):
554 def isinmemory(self):
555 return False
555 return False
556
556
557 def children(self):
557 def children(self):
558 """return list of changectx contexts for each child changeset.
558 """return list of changectx contexts for each child changeset.
559
559
560 This returns only the immediate child changesets. Use descendants() to
560 This returns only the immediate child changesets. Use descendants() to
561 recursively walk children.
561 recursively walk children.
562 """
562 """
563 c = self._repo.changelog.children(self._node)
563 c = self._repo.changelog.children(self._node)
564 return [changectx(self._repo, x) for x in c]
564 return [changectx(self._repo, x) for x in c]
565
565
566 def ancestors(self):
566 def ancestors(self):
567 for a in self._repo.changelog.ancestors([self._rev]):
567 for a in self._repo.changelog.ancestors([self._rev]):
568 yield changectx(self._repo, a)
568 yield changectx(self._repo, a)
569
569
570 def descendants(self):
570 def descendants(self):
571 """Recursively yield all children of the changeset.
571 """Recursively yield all children of the changeset.
572
572
573 For just the immediate children, use children()
573 For just the immediate children, use children()
574 """
574 """
575 for d in self._repo.changelog.descendants([self._rev]):
575 for d in self._repo.changelog.descendants([self._rev]):
576 yield changectx(self._repo, d)
576 yield changectx(self._repo, d)
577
577
578 def filectx(self, path, fileid=None, filelog=None):
578 def filectx(self, path, fileid=None, filelog=None):
579 """get a file context from this changeset"""
579 """get a file context from this changeset"""
580 if fileid is None:
580 if fileid is None:
581 fileid = self.filenode(path)
581 fileid = self.filenode(path)
582 return filectx(self._repo, path, fileid=fileid,
582 return filectx(self._repo, path, fileid=fileid,
583 changectx=self, filelog=filelog)
583 changectx=self, filelog=filelog)
584
584
585 def ancestor(self, c2, warn=False):
585 def ancestor(self, c2, warn=False):
586 """return the "best" ancestor context of self and c2
586 """return the "best" ancestor context of self and c2
587
587
588 If there are multiple candidates, it will show a message and check
588 If there are multiple candidates, it will show a message and check
589 merge.preferancestor configuration before falling back to the
589 merge.preferancestor configuration before falling back to the
590 revlog ancestor."""
590 revlog ancestor."""
591 # deal with workingctxs
591 # deal with workingctxs
592 n2 = c2._node
592 n2 = c2._node
593 if n2 is None:
593 if n2 is None:
594 n2 = c2._parents[0]._node
594 n2 = c2._parents[0]._node
595 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
595 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
596 if not cahs:
596 if not cahs:
597 anc = nullid
597 anc = nullid
598 elif len(cahs) == 1:
598 elif len(cahs) == 1:
599 anc = cahs[0]
599 anc = cahs[0]
600 else:
600 else:
601 # experimental config: merge.preferancestor
601 # experimental config: merge.preferancestor
602 for r in self._repo.ui.configlist('merge', 'preferancestor'):
602 for r in self._repo.ui.configlist('merge', 'preferancestor'):
603 try:
603 try:
604 ctx = scmutil.revsymbol(self._repo, r)
604 ctx = scmutil.revsymbol(self._repo, r)
605 except error.RepoLookupError:
605 except error.RepoLookupError:
606 continue
606 continue
607 anc = ctx.node()
607 anc = ctx.node()
608 if anc in cahs:
608 if anc in cahs:
609 break
609 break
610 else:
610 else:
611 anc = self._repo.changelog.ancestor(self._node, n2)
611 anc = self._repo.changelog.ancestor(self._node, n2)
612 if warn:
612 if warn:
613 self._repo.ui.status(
613 self._repo.ui.status(
614 (_("note: using %s as ancestor of %s and %s\n") %
614 (_("note: using %s as ancestor of %s and %s\n") %
615 (short(anc), short(self._node), short(n2))) +
615 (short(anc), short(self._node), short(n2))) +
616 ''.join(_(" alternatively, use --config "
616 ''.join(_(" alternatively, use --config "
617 "merge.preferancestor=%s\n") %
617 "merge.preferancestor=%s\n") %
618 short(n) for n in sorted(cahs) if n != anc))
618 short(n) for n in sorted(cahs) if n != anc))
619 return changectx(self._repo, anc)
619 return changectx(self._repo, anc)
620
620
621 def descendant(self, other):
621 def descendant(self, other):
622 """True if other is descendant of this changeset"""
622 """True if other is descendant of this changeset"""
623 return self._repo.changelog.descendant(self._rev, other._rev)
623 return self._repo.changelog.descendant(self._rev, other._rev)
624
624
625 def walk(self, match):
625 def walk(self, match):
626 '''Generates matching file names.'''
626 '''Generates matching file names.'''
627
627
628 # Wrap match.bad method to have message with nodeid
628 # Wrap match.bad method to have message with nodeid
629 def bad(fn, msg):
629 def bad(fn, msg):
630 # The manifest doesn't know about subrepos, so don't complain about
630 # The manifest doesn't know about subrepos, so don't complain about
631 # paths into valid subrepos.
631 # paths into valid subrepos.
632 if any(fn == s or fn.startswith(s + '/')
632 if any(fn == s or fn.startswith(s + '/')
633 for s in self.substate):
633 for s in self.substate):
634 return
634 return
635 match.bad(fn, _('no such file in rev %s') % self)
635 match.bad(fn, _('no such file in rev %s') % self)
636
636
637 m = matchmod.badmatch(match, bad)
637 m = matchmod.badmatch(match, bad)
638 return self._manifest.walk(m)
638 return self._manifest.walk(m)
639
639
640 def matches(self, match):
640 def matches(self, match):
641 return self.walk(match)
641 return self.walk(match)
642
642
643 class basefilectx(object):
643 class basefilectx(object):
644 """A filecontext object represents the common logic for its children:
644 """A filecontext object represents the common logic for its children:
645 filectx: read-only access to a filerevision that is already present
645 filectx: read-only access to a filerevision that is already present
646 in the repo,
646 in the repo,
647 workingfilectx: a filecontext that represents files from the working
647 workingfilectx: a filecontext that represents files from the working
648 directory,
648 directory,
649 memfilectx: a filecontext that represents files in-memory,
649 memfilectx: a filecontext that represents files in-memory,
650 overlayfilectx: duplicate another filecontext with some fields overridden.
650 overlayfilectx: duplicate another filecontext with some fields overridden.
651 """
651 """
652 @propertycache
652 @propertycache
653 def _filelog(self):
653 def _filelog(self):
654 return self._repo.file(self._path)
654 return self._repo.file(self._path)
655
655
656 @propertycache
656 @propertycache
657 def _changeid(self):
657 def _changeid(self):
658 if r'_changeid' in self.__dict__:
658 if r'_changeid' in self.__dict__:
659 return self._changeid
659 return self._changeid
660 elif r'_changectx' in self.__dict__:
660 elif r'_changectx' in self.__dict__:
661 return self._changectx.rev()
661 return self._changectx.rev()
662 elif r'_descendantrev' in self.__dict__:
662 elif r'_descendantrev' in self.__dict__:
663 # this file context was created from a revision with a known
663 # this file context was created from a revision with a known
664 # descendant, we can (lazily) correct for linkrev aliases
664 # descendant, we can (lazily) correct for linkrev aliases
665 return self._adjustlinkrev(self._descendantrev)
665 return self._adjustlinkrev(self._descendantrev)
666 else:
666 else:
667 return self._filelog.linkrev(self._filerev)
667 return self._filelog.linkrev(self._filerev)
668
668
669 @propertycache
669 @propertycache
670 def _filenode(self):
670 def _filenode(self):
671 if r'_fileid' in self.__dict__:
671 if r'_fileid' in self.__dict__:
672 return self._filelog.lookup(self._fileid)
672 return self._filelog.lookup(self._fileid)
673 else:
673 else:
674 return self._changectx.filenode(self._path)
674 return self._changectx.filenode(self._path)
675
675
676 @propertycache
676 @propertycache
677 def _filerev(self):
677 def _filerev(self):
678 return self._filelog.rev(self._filenode)
678 return self._filelog.rev(self._filenode)
679
679
680 @propertycache
680 @propertycache
681 def _repopath(self):
681 def _repopath(self):
682 return self._path
682 return self._path
683
683
684 def __nonzero__(self):
684 def __nonzero__(self):
685 try:
685 try:
686 self._filenode
686 self._filenode
687 return True
687 return True
688 except error.LookupError:
688 except error.LookupError:
689 # file is missing
689 # file is missing
690 return False
690 return False
691
691
692 __bool__ = __nonzero__
692 __bool__ = __nonzero__
693
693
694 def __bytes__(self):
694 def __bytes__(self):
695 try:
695 try:
696 return "%s@%s" % (self.path(), self._changectx)
696 return "%s@%s" % (self.path(), self._changectx)
697 except error.LookupError:
697 except error.LookupError:
698 return "%s@???" % self.path()
698 return "%s@???" % self.path()
699
699
700 __str__ = encoding.strmethod(__bytes__)
700 __str__ = encoding.strmethod(__bytes__)
701
701
702 def __repr__(self):
702 def __repr__(self):
703 return r"<%s %s>" % (type(self).__name__, str(self))
703 return r"<%s %s>" % (type(self).__name__, str(self))
704
704
705 def __hash__(self):
705 def __hash__(self):
706 try:
706 try:
707 return hash((self._path, self._filenode))
707 return hash((self._path, self._filenode))
708 except AttributeError:
708 except AttributeError:
709 return id(self)
709 return id(self)
710
710
711 def __eq__(self, other):
711 def __eq__(self, other):
712 try:
712 try:
713 return (type(self) == type(other) and self._path == other._path
713 return (type(self) == type(other) and self._path == other._path
714 and self._filenode == other._filenode)
714 and self._filenode == other._filenode)
715 except AttributeError:
715 except AttributeError:
716 return False
716 return False
717
717
718 def __ne__(self, other):
718 def __ne__(self, other):
719 return not (self == other)
719 return not (self == other)
720
720
721 def filerev(self):
721 def filerev(self):
722 return self._filerev
722 return self._filerev
723 def filenode(self):
723 def filenode(self):
724 return self._filenode
724 return self._filenode
725 @propertycache
725 @propertycache
726 def _flags(self):
726 def _flags(self):
727 return self._changectx.flags(self._path)
727 return self._changectx.flags(self._path)
728 def flags(self):
728 def flags(self):
729 return self._flags
729 return self._flags
730 def filelog(self):
730 def filelog(self):
731 return self._filelog
731 return self._filelog
732 def rev(self):
732 def rev(self):
733 return self._changeid
733 return self._changeid
734 def linkrev(self):
734 def linkrev(self):
735 return self._filelog.linkrev(self._filerev)
735 return self._filelog.linkrev(self._filerev)
736 def node(self):
736 def node(self):
737 return self._changectx.node()
737 return self._changectx.node()
738 def hex(self):
738 def hex(self):
739 return self._changectx.hex()
739 return self._changectx.hex()
740 def user(self):
740 def user(self):
741 return self._changectx.user()
741 return self._changectx.user()
742 def date(self):
742 def date(self):
743 return self._changectx.date()
743 return self._changectx.date()
744 def files(self):
744 def files(self):
745 return self._changectx.files()
745 return self._changectx.files()
746 def description(self):
746 def description(self):
747 return self._changectx.description()
747 return self._changectx.description()
748 def branch(self):
748 def branch(self):
749 return self._changectx.branch()
749 return self._changectx.branch()
750 def extra(self):
750 def extra(self):
751 return self._changectx.extra()
751 return self._changectx.extra()
752 def phase(self):
752 def phase(self):
753 return self._changectx.phase()
753 return self._changectx.phase()
754 def phasestr(self):
754 def phasestr(self):
755 return self._changectx.phasestr()
755 return self._changectx.phasestr()
756 def obsolete(self):
756 def obsolete(self):
757 return self._changectx.obsolete()
757 return self._changectx.obsolete()
758 def instabilities(self):
758 def instabilities(self):
759 return self._changectx.instabilities()
759 return self._changectx.instabilities()
760 def manifest(self):
760 def manifest(self):
761 return self._changectx.manifest()
761 return self._changectx.manifest()
762 def changectx(self):
762 def changectx(self):
763 return self._changectx
763 return self._changectx
764 def renamed(self):
764 def renamed(self):
765 return self._copied
765 return self._copied
766 def repo(self):
766 def repo(self):
767 return self._repo
767 return self._repo
768 def size(self):
768 def size(self):
769 return len(self.data())
769 return len(self.data())
770
770
771 def path(self):
771 def path(self):
772 return self._path
772 return self._path
773
773
774 def isbinary(self):
774 def isbinary(self):
775 try:
775 try:
776 return stringutil.binary(self.data())
776 return stringutil.binary(self.data())
777 except IOError:
777 except IOError:
778 return False
778 return False
779 def isexec(self):
779 def isexec(self):
780 return 'x' in self.flags()
780 return 'x' in self.flags()
781 def islink(self):
781 def islink(self):
782 return 'l' in self.flags()
782 return 'l' in self.flags()
783
783
784 def isabsent(self):
784 def isabsent(self):
785 """whether this filectx represents a file not in self._changectx
785 """whether this filectx represents a file not in self._changectx
786
786
787 This is mainly for merge code to detect change/delete conflicts. This is
787 This is mainly for merge code to detect change/delete conflicts. This is
788 expected to be True for all subclasses of basectx."""
788 expected to be True for all subclasses of basectx."""
789 return False
789 return False
790
790
791 _customcmp = False
791 _customcmp = False
792 def cmp(self, fctx):
792 def cmp(self, fctx):
793 """compare with other file context
793 """compare with other file context
794
794
795 returns True if different than fctx.
795 returns True if different than fctx.
796 """
796 """
797 if fctx._customcmp:
797 if fctx._customcmp:
798 return fctx.cmp(self)
798 return fctx.cmp(self)
799
799
800 if (fctx._filenode is None
800 if (fctx._filenode is None
801 and (self._repo._encodefilterpats
801 and (self._repo._encodefilterpats
802 # if file data starts with '\1\n', empty metadata block is
802 # if file data starts with '\1\n', empty metadata block is
803 # prepended, which adds 4 bytes to filelog.size().
803 # prepended, which adds 4 bytes to filelog.size().
804 or self.size() - 4 == fctx.size())
804 or self.size() - 4 == fctx.size())
805 or self.size() == fctx.size()):
805 or self.size() == fctx.size()):
806 return self._filelog.cmp(self._filenode, fctx.data())
806 return self._filelog.cmp(self._filenode, fctx.data())
807
807
808 return True
808 return True
809
809
810 def _adjustlinkrev(self, srcrev, inclusive=False):
810 def _adjustlinkrev(self, srcrev, inclusive=False):
811 """return the first ancestor of <srcrev> introducing <fnode>
811 """return the first ancestor of <srcrev> introducing <fnode>
812
812
813 If the linkrev of the file revision does not point to an ancestor of
813 If the linkrev of the file revision does not point to an ancestor of
814 srcrev, we'll walk down the ancestors until we find one introducing
814 srcrev, we'll walk down the ancestors until we find one introducing
815 this file revision.
815 this file revision.
816
816
817 :srcrev: the changeset revision we search ancestors from
817 :srcrev: the changeset revision we search ancestors from
818 :inclusive: if true, the src revision will also be checked
818 :inclusive: if true, the src revision will also be checked
819 """
819 """
820 repo = self._repo
820 repo = self._repo
821 cl = repo.unfiltered().changelog
821 cl = repo.unfiltered().changelog
822 mfl = repo.manifestlog
822 mfl = repo.manifestlog
823 # fetch the linkrev
823 # fetch the linkrev
824 lkr = self.linkrev()
824 lkr = self.linkrev()
825 # hack to reuse ancestor computation when searching for renames
825 # hack to reuse ancestor computation when searching for renames
826 memberanc = getattr(self, '_ancestrycontext', None)
826 memberanc = getattr(self, '_ancestrycontext', None)
827 iteranc = None
827 iteranc = None
828 if srcrev is None:
828 if srcrev is None:
829 # wctx case, used by workingfilectx during mergecopy
829 # wctx case, used by workingfilectx during mergecopy
830 revs = [p.rev() for p in self._repo[None].parents()]
830 revs = [p.rev() for p in self._repo[None].parents()]
831 inclusive = True # we skipped the real (revless) source
831 inclusive = True # we skipped the real (revless) source
832 else:
832 else:
833 revs = [srcrev]
833 revs = [srcrev]
834 if memberanc is None:
834 if memberanc is None:
835 memberanc = iteranc = cl.ancestors(revs, lkr,
835 memberanc = iteranc = cl.ancestors(revs, lkr,
836 inclusive=inclusive)
836 inclusive=inclusive)
837 # check if this linkrev is an ancestor of srcrev
837 # check if this linkrev is an ancestor of srcrev
838 if lkr not in memberanc:
838 if lkr not in memberanc:
839 if iteranc is None:
839 if iteranc is None:
840 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
840 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
841 fnode = self._filenode
841 fnode = self._filenode
842 path = self._path
842 path = self._path
843 for a in iteranc:
843 for a in iteranc:
844 ac = cl.read(a) # get changeset data (we avoid object creation)
844 ac = cl.read(a) # get changeset data (we avoid object creation)
845 if path in ac[3]: # checking the 'files' field.
845 if path in ac[3]: # checking the 'files' field.
846 # The file has been touched, check if the content is
846 # The file has been touched, check if the content is
847 # similar to the one we search for.
847 # similar to the one we search for.
848 if fnode == mfl[ac[0]].readfast().get(path):
848 if fnode == mfl[ac[0]].readfast().get(path):
849 return a
849 return a
850 # In theory, we should never get out of that loop without a result.
850 # In theory, we should never get out of that loop without a result.
851 # But if manifest uses a buggy file revision (not children of the
851 # But if manifest uses a buggy file revision (not children of the
852 # one it replaces) we could. Such a buggy situation will likely
852 # one it replaces) we could. Such a buggy situation will likely
853 # result is crash somewhere else at to some point.
853 # result is crash somewhere else at to some point.
854 return lkr
854 return lkr
855
855
856 def introrev(self):
856 def introrev(self):
857 """return the rev of the changeset which introduced this file revision
857 """return the rev of the changeset which introduced this file revision
858
858
859 This method is different from linkrev because it take into account the
859 This method is different from linkrev because it take into account the
860 changeset the filectx was created from. It ensures the returned
860 changeset the filectx was created from. It ensures the returned
861 revision is one of its ancestors. This prevents bugs from
861 revision is one of its ancestors. This prevents bugs from
862 'linkrev-shadowing' when a file revision is used by multiple
862 'linkrev-shadowing' when a file revision is used by multiple
863 changesets.
863 changesets.
864 """
864 """
865 lkr = self.linkrev()
865 lkr = self.linkrev()
866 attrs = vars(self)
866 attrs = vars(self)
867 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
867 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
868 if noctx or self.rev() == lkr:
868 if noctx or self.rev() == lkr:
869 return self.linkrev()
869 return self.linkrev()
870 return self._adjustlinkrev(self.rev(), inclusive=True)
870 return self._adjustlinkrev(self.rev(), inclusive=True)
871
871
872 def introfilectx(self):
872 def introfilectx(self):
873 """Return filectx having identical contents, but pointing to the
873 """Return filectx having identical contents, but pointing to the
874 changeset revision where this filectx was introduced"""
874 changeset revision where this filectx was introduced"""
875 introrev = self.introrev()
875 introrev = self.introrev()
876 if self.rev() == introrev:
876 if self.rev() == introrev:
877 return self
877 return self
878 return self.filectx(self.filenode(), changeid=introrev)
878 return self.filectx(self.filenode(), changeid=introrev)
879
879
880 def _parentfilectx(self, path, fileid, filelog):
880 def _parentfilectx(self, path, fileid, filelog):
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
881 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
882 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
883 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
883 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
884 # If self is associated with a changeset (probably explicitly
884 # If self is associated with a changeset (probably explicitly
885 # fed), ensure the created filectx is associated with a
885 # fed), ensure the created filectx is associated with a
886 # changeset that is an ancestor of self.changectx.
886 # changeset that is an ancestor of self.changectx.
887 # This lets us later use _adjustlinkrev to get a correct link.
887 # This lets us later use _adjustlinkrev to get a correct link.
888 fctx._descendantrev = self.rev()
888 fctx._descendantrev = self.rev()
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
889 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
890 elif r'_descendantrev' in vars(self):
890 elif r'_descendantrev' in vars(self):
891 # Otherwise propagate _descendantrev if we have one associated.
891 # Otherwise propagate _descendantrev if we have one associated.
892 fctx._descendantrev = self._descendantrev
892 fctx._descendantrev = self._descendantrev
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
894 return fctx
894 return fctx
895
895
896 def parents(self):
896 def parents(self):
897 _path = self._path
897 _path = self._path
898 fl = self._filelog
898 fl = self._filelog
899 parents = self._filelog.parents(self._filenode)
899 parents = self._filelog.parents(self._filenode)
900 pl = [(_path, node, fl) for node in parents if node != nullid]
900 pl = [(_path, node, fl) for node in parents if node != nullid]
901
901
902 r = fl.renamed(self._filenode)
902 r = fl.renamed(self._filenode)
903 if r:
903 if r:
904 # - In the simple rename case, both parent are nullid, pl is empty.
904 # - In the simple rename case, both parent are nullid, pl is empty.
905 # - In case of merge, only one of the parent is null id and should
905 # - In case of merge, only one of the parent is null id and should
906 # be replaced with the rename information. This parent is -always-
906 # be replaced with the rename information. This parent is -always-
907 # the first one.
907 # the first one.
908 #
908 #
909 # As null id have always been filtered out in the previous list
909 # As null id have always been filtered out in the previous list
910 # comprehension, inserting to 0 will always result in "replacing
910 # comprehension, inserting to 0 will always result in "replacing
911 # first nullid parent with rename information.
911 # first nullid parent with rename information.
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
912 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
913
913
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
914 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
915
915
916 def p1(self):
916 def p1(self):
917 return self.parents()[0]
917 return self.parents()[0]
918
918
919 def p2(self):
919 def p2(self):
920 p = self.parents()
920 p = self.parents()
921 if len(p) == 2:
921 if len(p) == 2:
922 return p[1]
922 return p[1]
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
923 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
924
924
925 def annotate(self, follow=False, skiprevs=None, diffopts=None):
925 def annotate(self, follow=False, skiprevs=None, diffopts=None):
926 """Returns a list of annotateline objects for each line in the file
926 """Returns a list of annotateline objects for each line in the file
927
927
928 - line.fctx is the filectx of the node where that line was last changed
928 - line.fctx is the filectx of the node where that line was last changed
929 - line.lineno is the line number at the first appearance in the managed
929 - line.lineno is the line number at the first appearance in the managed
930 file
930 file
931 - line.text is the data on that line (including newline character)
931 - line.text is the data on that line (including newline character)
932 """
932 """
933 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
933 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
934
934
935 def parents(f):
935 def parents(f):
936 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
936 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
937 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
937 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
938 # from the topmost introrev (= srcrev) down to p.linkrev() if it
938 # from the topmost introrev (= srcrev) down to p.linkrev() if it
939 # isn't an ancestor of the srcrev.
939 # isn't an ancestor of the srcrev.
940 f._changeid
940 f._changeid
941 pl = f.parents()
941 pl = f.parents()
942
942
943 # Don't return renamed parents if we aren't following.
943 # Don't return renamed parents if we aren't following.
944 if not follow:
944 if not follow:
945 pl = [p for p in pl if p.path() == f.path()]
945 pl = [p for p in pl if p.path() == f.path()]
946
946
947 # renamed filectx won't have a filelog yet, so set it
947 # renamed filectx won't have a filelog yet, so set it
948 # from the cache to save time
948 # from the cache to save time
949 for p in pl:
949 for p in pl:
950 if not r'_filelog' in p.__dict__:
950 if not r'_filelog' in p.__dict__:
951 p._filelog = getlog(p.path())
951 p._filelog = getlog(p.path())
952
952
953 return pl
953 return pl
954
954
955 # use linkrev to find the first changeset where self appeared
955 # use linkrev to find the first changeset where self appeared
956 base = self.introfilectx()
956 base = self.introfilectx()
957 if getattr(base, '_ancestrycontext', None) is None:
957 if getattr(base, '_ancestrycontext', None) is None:
958 cl = self._repo.changelog
958 cl = self._repo.changelog
959 if base.rev() is None:
959 if base.rev() is None:
960 # wctx is not inclusive, but works because _ancestrycontext
960 # wctx is not inclusive, but works because _ancestrycontext
961 # is used to test filelog revisions
961 # is used to test filelog revisions
962 ac = cl.ancestors([p.rev() for p in base.parents()],
962 ac = cl.ancestors([p.rev() for p in base.parents()],
963 inclusive=True)
963 inclusive=True)
964 else:
964 else:
965 ac = cl.ancestors([base.rev()], inclusive=True)
965 ac = cl.ancestors([base.rev()], inclusive=True)
966 base._ancestrycontext = ac
966 base._ancestrycontext = ac
967
967
968 return dagop.annotate(base, parents, skiprevs=skiprevs,
968 return dagop.annotate(base, parents, skiprevs=skiprevs,
969 diffopts=diffopts)
969 diffopts=diffopts)
970
970
971 def ancestors(self, followfirst=False):
971 def ancestors(self, followfirst=False):
972 visit = {}
972 visit = {}
973 c = self
973 c = self
974 if followfirst:
974 if followfirst:
975 cut = 1
975 cut = 1
976 else:
976 else:
977 cut = None
977 cut = None
978
978
979 while True:
979 while True:
980 for parent in c.parents()[:cut]:
980 for parent in c.parents()[:cut]:
981 visit[(parent.linkrev(), parent.filenode())] = parent
981 visit[(parent.linkrev(), parent.filenode())] = parent
982 if not visit:
982 if not visit:
983 break
983 break
984 c = visit.pop(max(visit))
984 c = visit.pop(max(visit))
985 yield c
985 yield c
986
986
987 def decodeddata(self):
987 def decodeddata(self):
988 """Returns `data()` after running repository decoding filters.
988 """Returns `data()` after running repository decoding filters.
989
989
990 This is often equivalent to how the data would be expressed on disk.
990 This is often equivalent to how the data would be expressed on disk.
991 """
991 """
992 return self._repo.wwritedata(self.path(), self.data())
992 return self._repo.wwritedata(self.path(), self.data())
993
993
994 class filectx(basefilectx):
994 class filectx(basefilectx):
995 """A filecontext object makes access to data related to a particular
995 """A filecontext object makes access to data related to a particular
996 filerevision convenient."""
996 filerevision convenient."""
997 def __init__(self, repo, path, changeid=None, fileid=None,
997 def __init__(self, repo, path, changeid=None, fileid=None,
998 filelog=None, changectx=None):
998 filelog=None, changectx=None):
999 """changeid can be a changeset revision, node, or tag.
999 """changeid can be a changeset revision, node, or tag.
1000 fileid can be a file revision or node."""
1000 fileid can be a file revision or node."""
1001 self._repo = repo
1001 self._repo = repo
1002 self._path = path
1002 self._path = path
1003
1003
1004 assert (changeid is not None
1004 assert (changeid is not None
1005 or fileid is not None
1005 or fileid is not None
1006 or changectx is not None), \
1006 or changectx is not None), \
1007 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1007 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1008 % (changeid, fileid, changectx))
1008 % (changeid, fileid, changectx))
1009
1009
1010 if filelog is not None:
1010 if filelog is not None:
1011 self._filelog = filelog
1011 self._filelog = filelog
1012
1012
1013 if changeid is not None:
1013 if changeid is not None:
1014 self._changeid = changeid
1014 self._changeid = changeid
1015 if changectx is not None:
1015 if changectx is not None:
1016 self._changectx = changectx
1016 self._changectx = changectx
1017 if fileid is not None:
1017 if fileid is not None:
1018 self._fileid = fileid
1018 self._fileid = fileid
1019
1019
1020 @propertycache
1020 @propertycache
1021 def _changectx(self):
1021 def _changectx(self):
1022 try:
1022 try:
1023 return changectx(self._repo, self._changeid)
1023 return changectx(self._repo, self._changeid)
1024 except error.FilteredRepoLookupError:
1024 except error.FilteredRepoLookupError:
1025 # Linkrev may point to any revision in the repository. When the
1025 # Linkrev may point to any revision in the repository. When the
1026 # repository is filtered this may lead to `filectx` trying to build
1026 # repository is filtered this may lead to `filectx` trying to build
1027 # `changectx` for filtered revision. In such case we fallback to
1027 # `changectx` for filtered revision. In such case we fallback to
1028 # creating `changectx` on the unfiltered version of the reposition.
1028 # creating `changectx` on the unfiltered version of the reposition.
1029 # This fallback should not be an issue because `changectx` from
1029 # This fallback should not be an issue because `changectx` from
1030 # `filectx` are not used in complex operations that care about
1030 # `filectx` are not used in complex operations that care about
1031 # filtering.
1031 # filtering.
1032 #
1032 #
1033 # This fallback is a cheap and dirty fix that prevent several
1033 # This fallback is a cheap and dirty fix that prevent several
1034 # crashes. It does not ensure the behavior is correct. However the
1034 # crashes. It does not ensure the behavior is correct. However the
1035 # behavior was not correct before filtering either and "incorrect
1035 # behavior was not correct before filtering either and "incorrect
1036 # behavior" is seen as better as "crash"
1036 # behavior" is seen as better as "crash"
1037 #
1037 #
1038 # Linkrevs have several serious troubles with filtering that are
1038 # Linkrevs have several serious troubles with filtering that are
1039 # complicated to solve. Proper handling of the issue here should be
1039 # complicated to solve. Proper handling of the issue here should be
1040 # considered when solving linkrev issue are on the table.
1040 # considered when solving linkrev issue are on the table.
1041 return changectx(self._repo.unfiltered(), self._changeid)
1041 return changectx(self._repo.unfiltered(), self._changeid)
1042
1042
1043 def filectx(self, fileid, changeid=None):
1043 def filectx(self, fileid, changeid=None):
1044 '''opens an arbitrary revision of the file without
1044 '''opens an arbitrary revision of the file without
1045 opening a new filelog'''
1045 opening a new filelog'''
1046 return filectx(self._repo, self._path, fileid=fileid,
1046 return filectx(self._repo, self._path, fileid=fileid,
1047 filelog=self._filelog, changeid=changeid)
1047 filelog=self._filelog, changeid=changeid)
1048
1048
1049 def rawdata(self):
1049 def rawdata(self):
1050 return self._filelog.revision(self._filenode, raw=True)
1050 return self._filelog.revision(self._filenode, raw=True)
1051
1051
1052 def rawflags(self):
1052 def rawflags(self):
1053 """low-level revlog flags"""
1053 """low-level revlog flags"""
1054 return self._filelog.flags(self._filerev)
1054 return self._filelog.flags(self._filerev)
1055
1055
1056 def data(self):
1056 def data(self):
1057 try:
1057 try:
1058 return self._filelog.read(self._filenode)
1058 return self._filelog.read(self._filenode)
1059 except error.CensoredNodeError:
1059 except error.CensoredNodeError:
1060 if self._repo.ui.config("censor", "policy") == "ignore":
1060 if self._repo.ui.config("censor", "policy") == "ignore":
1061 return ""
1061 return ""
1062 raise error.Abort(_("censored node: %s") % short(self._filenode),
1062 raise error.Abort(_("censored node: %s") % short(self._filenode),
1063 hint=_("set censor.policy to ignore errors"))
1063 hint=_("set censor.policy to ignore errors"))
1064
1064
1065 def size(self):
1065 def size(self):
1066 return self._filelog.size(self._filerev)
1066 return self._filelog.size(self._filerev)
1067
1067
1068 @propertycache
1068 @propertycache
1069 def _copied(self):
1069 def _copied(self):
1070 """check if file was actually renamed in this changeset revision
1070 """check if file was actually renamed in this changeset revision
1071
1071
1072 If rename logged in file revision, we report copy for changeset only
1072 If rename logged in file revision, we report copy for changeset only
1073 if file revisions linkrev points back to the changeset in question
1073 if file revisions linkrev points back to the changeset in question
1074 or both changeset parents contain different file revisions.
1074 or both changeset parents contain different file revisions.
1075 """
1075 """
1076
1076
1077 renamed = self._filelog.renamed(self._filenode)
1077 renamed = self._filelog.renamed(self._filenode)
1078 if not renamed:
1078 if not renamed:
1079 return renamed
1079 return renamed
1080
1080
1081 if self.rev() == self.linkrev():
1081 if self.rev() == self.linkrev():
1082 return renamed
1082 return renamed
1083
1083
1084 name = self.path()
1084 name = self.path()
1085 fnode = self._filenode
1085 fnode = self._filenode
1086 for p in self._changectx.parents():
1086 for p in self._changectx.parents():
1087 try:
1087 try:
1088 if fnode == p.filenode(name):
1088 if fnode == p.filenode(name):
1089 return None
1089 return None
1090 except error.LookupError:
1090 except error.LookupError:
1091 pass
1091 pass
1092 return renamed
1092 return renamed
1093
1093
1094 def children(self):
1094 def children(self):
1095 # hard for renames
1095 # hard for renames
1096 c = self._filelog.children(self._filenode)
1096 c = self._filelog.children(self._filenode)
1097 return [filectx(self._repo, self._path, fileid=x,
1097 return [filectx(self._repo, self._path, fileid=x,
1098 filelog=self._filelog) for x in c]
1098 filelog=self._filelog) for x in c]
1099
1099
1100 class committablectx(basectx):
1100 class committablectx(basectx):
1101 """A committablectx object provides common functionality for a context that
1101 """A committablectx object provides common functionality for a context that
1102 wants the ability to commit, e.g. workingctx or memctx."""
1102 wants the ability to commit, e.g. workingctx or memctx."""
1103 def __init__(self, repo, text="", user=None, date=None, extra=None,
1103 def __init__(self, repo, text="", user=None, date=None, extra=None,
1104 changes=None):
1104 changes=None):
1105 super(committablectx, self).__init__(repo)
1105 super(committablectx, self).__init__(repo)
1106 self._rev = None
1106 self._rev = None
1107 self._node = None
1107 self._node = None
1108 self._text = text
1108 self._text = text
1109 if date:
1109 if date:
1110 self._date = dateutil.parsedate(date)
1110 self._date = dateutil.parsedate(date)
1111 if user:
1111 if user:
1112 self._user = user
1112 self._user = user
1113 if changes:
1113 if changes:
1114 self._status = changes
1114 self._status = changes
1115
1115
1116 self._extra = {}
1116 self._extra = {}
1117 if extra:
1117 if extra:
1118 self._extra = extra.copy()
1118 self._extra = extra.copy()
1119 if 'branch' not in self._extra:
1119 if 'branch' not in self._extra:
1120 try:
1120 try:
1121 branch = encoding.fromlocal(self._repo.dirstate.branch())
1121 branch = encoding.fromlocal(self._repo.dirstate.branch())
1122 except UnicodeDecodeError:
1122 except UnicodeDecodeError:
1123 raise error.Abort(_('branch name not in UTF-8!'))
1123 raise error.Abort(_('branch name not in UTF-8!'))
1124 self._extra['branch'] = branch
1124 self._extra['branch'] = branch
1125 if self._extra['branch'] == '':
1125 if self._extra['branch'] == '':
1126 self._extra['branch'] = 'default'
1126 self._extra['branch'] = 'default'
1127
1127
1128 def __bytes__(self):
1128 def __bytes__(self):
1129 return bytes(self._parents[0]) + "+"
1129 return bytes(self._parents[0]) + "+"
1130
1130
1131 __str__ = encoding.strmethod(__bytes__)
1131 __str__ = encoding.strmethod(__bytes__)
1132
1132
1133 def __nonzero__(self):
1133 def __nonzero__(self):
1134 return True
1134 return True
1135
1135
1136 __bool__ = __nonzero__
1136 __bool__ = __nonzero__
1137
1137
1138 def _buildflagfunc(self):
1138 def _buildflagfunc(self):
1139 # Create a fallback function for getting file flags when the
1139 # Create a fallback function for getting file flags when the
1140 # filesystem doesn't support them
1140 # filesystem doesn't support them
1141
1141
1142 copiesget = self._repo.dirstate.copies().get
1142 copiesget = self._repo.dirstate.copies().get
1143 parents = self.parents()
1143 parents = self.parents()
1144 if len(parents) < 2:
1144 if len(parents) < 2:
1145 # when we have one parent, it's easy: copy from parent
1145 # when we have one parent, it's easy: copy from parent
1146 man = parents[0].manifest()
1146 man = parents[0].manifest()
1147 def func(f):
1147 def func(f):
1148 f = copiesget(f, f)
1148 f = copiesget(f, f)
1149 return man.flags(f)
1149 return man.flags(f)
1150 else:
1150 else:
1151 # merges are tricky: we try to reconstruct the unstored
1151 # merges are tricky: we try to reconstruct the unstored
1152 # result from the merge (issue1802)
1152 # result from the merge (issue1802)
1153 p1, p2 = parents
1153 p1, p2 = parents
1154 pa = p1.ancestor(p2)
1154 pa = p1.ancestor(p2)
1155 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1155 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1156
1156
1157 def func(f):
1157 def func(f):
1158 f = copiesget(f, f) # may be wrong for merges with copies
1158 f = copiesget(f, f) # may be wrong for merges with copies
1159 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1159 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1160 if fl1 == fl2:
1160 if fl1 == fl2:
1161 return fl1
1161 return fl1
1162 if fl1 == fla:
1162 if fl1 == fla:
1163 return fl2
1163 return fl2
1164 if fl2 == fla:
1164 if fl2 == fla:
1165 return fl1
1165 return fl1
1166 return '' # punt for conflicts
1166 return '' # punt for conflicts
1167
1167
1168 return func
1168 return func
1169
1169
1170 @propertycache
1170 @propertycache
1171 def _flagfunc(self):
1171 def _flagfunc(self):
1172 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1172 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1173
1173
1174 @propertycache
1174 @propertycache
1175 def _status(self):
1175 def _status(self):
1176 return self._repo.status()
1176 return self._repo.status()
1177
1177
1178 @propertycache
1178 @propertycache
1179 def _user(self):
1179 def _user(self):
1180 return self._repo.ui.username()
1180 return self._repo.ui.username()
1181
1181
1182 @propertycache
1182 @propertycache
1183 def _date(self):
1183 def _date(self):
1184 ui = self._repo.ui
1184 ui = self._repo.ui
1185 date = ui.configdate('devel', 'default-date')
1185 date = ui.configdate('devel', 'default-date')
1186 if date is None:
1186 if date is None:
1187 date = dateutil.makedate()
1187 date = dateutil.makedate()
1188 return date
1188 return date
1189
1189
1190 def subrev(self, subpath):
1190 def subrev(self, subpath):
1191 return None
1191 return None
1192
1192
1193 def manifestnode(self):
1193 def manifestnode(self):
1194 return None
1194 return None
1195 def user(self):
1195 def user(self):
1196 return self._user or self._repo.ui.username()
1196 return self._user or self._repo.ui.username()
1197 def date(self):
1197 def date(self):
1198 return self._date
1198 return self._date
1199 def description(self):
1199 def description(self):
1200 return self._text
1200 return self._text
1201 def files(self):
1201 def files(self):
1202 return sorted(self._status.modified + self._status.added +
1202 return sorted(self._status.modified + self._status.added +
1203 self._status.removed)
1203 self._status.removed)
1204
1204
1205 def modified(self):
1205 def modified(self):
1206 return self._status.modified
1206 return self._status.modified
1207 def added(self):
1207 def added(self):
1208 return self._status.added
1208 return self._status.added
1209 def removed(self):
1209 def removed(self):
1210 return self._status.removed
1210 return self._status.removed
1211 def deleted(self):
1211 def deleted(self):
1212 return self._status.deleted
1212 return self._status.deleted
1213 def branch(self):
1213 def branch(self):
1214 return encoding.tolocal(self._extra['branch'])
1214 return encoding.tolocal(self._extra['branch'])
1215 def closesbranch(self):
1215 def closesbranch(self):
1216 return 'close' in self._extra
1216 return 'close' in self._extra
1217 def extra(self):
1217 def extra(self):
1218 return self._extra
1218 return self._extra
1219
1219
1220 def isinmemory(self):
1220 def isinmemory(self):
1221 return False
1221 return False
1222
1222
1223 def tags(self):
1223 def tags(self):
1224 return []
1224 return []
1225
1225
1226 def bookmarks(self):
1226 def bookmarks(self):
1227 b = []
1227 b = []
1228 for p in self.parents():
1228 for p in self.parents():
1229 b.extend(p.bookmarks())
1229 b.extend(p.bookmarks())
1230 return b
1230 return b
1231
1231
1232 def phase(self):
1232 def phase(self):
1233 phase = phases.draft # default phase to draft
1233 phase = phases.draft # default phase to draft
1234 for p in self.parents():
1234 for p in self.parents():
1235 phase = max(phase, p.phase())
1235 phase = max(phase, p.phase())
1236 return phase
1236 return phase
1237
1237
1238 def hidden(self):
1238 def hidden(self):
1239 return False
1239 return False
1240
1240
1241 def children(self):
1241 def children(self):
1242 return []
1242 return []
1243
1243
1244 def flags(self, path):
1244 def flags(self, path):
1245 if r'_manifest' in self.__dict__:
1245 if r'_manifest' in self.__dict__:
1246 try:
1246 try:
1247 return self._manifest.flags(path)
1247 return self._manifest.flags(path)
1248 except KeyError:
1248 except KeyError:
1249 return ''
1249 return ''
1250
1250
1251 try:
1251 try:
1252 return self._flagfunc(path)
1252 return self._flagfunc(path)
1253 except OSError:
1253 except OSError:
1254 return ''
1254 return ''
1255
1255
1256 def ancestor(self, c2):
1256 def ancestor(self, c2):
1257 """return the "best" ancestor context of self and c2"""
1257 """return the "best" ancestor context of self and c2"""
1258 return self._parents[0].ancestor(c2) # punt on two parents for now
1258 return self._parents[0].ancestor(c2) # punt on two parents for now
1259
1259
1260 def walk(self, match):
1260 def walk(self, match):
1261 '''Generates matching file names.'''
1261 '''Generates matching file names.'''
1262 return sorted(self._repo.dirstate.walk(match,
1262 return sorted(self._repo.dirstate.walk(match,
1263 subrepos=sorted(self.substate),
1263 subrepos=sorted(self.substate),
1264 unknown=True, ignored=False))
1264 unknown=True, ignored=False))
1265
1265
1266 def matches(self, match):
1266 def matches(self, match):
1267 return sorted(self._repo.dirstate.matches(match))
1267 return sorted(self._repo.dirstate.matches(match))
1268
1268
1269 def ancestors(self):
1269 def ancestors(self):
1270 for p in self._parents:
1270 for p in self._parents:
1271 yield p
1271 yield p
1272 for a in self._repo.changelog.ancestors(
1272 for a in self._repo.changelog.ancestors(
1273 [p.rev() for p in self._parents]):
1273 [p.rev() for p in self._parents]):
1274 yield changectx(self._repo, a)
1274 yield changectx(self._repo, a)
1275
1275
1276 def markcommitted(self, node):
1276 def markcommitted(self, node):
1277 """Perform post-commit cleanup necessary after committing this ctx
1277 """Perform post-commit cleanup necessary after committing this ctx
1278
1278
1279 Specifically, this updates backing stores this working context
1279 Specifically, this updates backing stores this working context
1280 wraps to reflect the fact that the changes reflected by this
1280 wraps to reflect the fact that the changes reflected by this
1281 workingctx have been committed. For example, it marks
1281 workingctx have been committed. For example, it marks
1282 modified and added files as normal in the dirstate.
1282 modified and added files as normal in the dirstate.
1283
1283
1284 """
1284 """
1285
1285
1286 with self._repo.dirstate.parentchange():
1286 with self._repo.dirstate.parentchange():
1287 for f in self.modified() + self.added():
1287 for f in self.modified() + self.added():
1288 self._repo.dirstate.normal(f)
1288 self._repo.dirstate.normal(f)
1289 for f in self.removed():
1289 for f in self.removed():
1290 self._repo.dirstate.drop(f)
1290 self._repo.dirstate.drop(f)
1291 self._repo.dirstate.setparents(node)
1291 self._repo.dirstate.setparents(node)
1292
1292
1293 # write changes out explicitly, because nesting wlock at
1293 # write changes out explicitly, because nesting wlock at
1294 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1294 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1295 # from immediately doing so for subsequent changing files
1295 # from immediately doing so for subsequent changing files
1296 self._repo.dirstate.write(self._repo.currenttransaction())
1296 self._repo.dirstate.write(self._repo.currenttransaction())
1297
1297
1298 def dirty(self, missing=False, merge=True, branch=True):
1298 def dirty(self, missing=False, merge=True, branch=True):
1299 return False
1299 return False
1300
1300
1301 class workingctx(committablectx):
1301 class workingctx(committablectx):
1302 """A workingctx object makes access to data related to
1302 """A workingctx object makes access to data related to
1303 the current working directory convenient.
1303 the current working directory convenient.
1304 date - any valid date string or (unixtime, offset), or None.
1304 date - any valid date string or (unixtime, offset), or None.
1305 user - username string, or None.
1305 user - username string, or None.
1306 extra - a dictionary of extra values, or None.
1306 extra - a dictionary of extra values, or None.
1307 changes - a list of file lists as returned by localrepo.status()
1307 changes - a list of file lists as returned by localrepo.status()
1308 or None to use the repository status.
1308 or None to use the repository status.
1309 """
1309 """
1310 def __init__(self, repo, text="", user=None, date=None, extra=None,
1310 def __init__(self, repo, text="", user=None, date=None, extra=None,
1311 changes=None):
1311 changes=None):
1312 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1312 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1313
1313
1314 def __iter__(self):
1314 def __iter__(self):
1315 d = self._repo.dirstate
1315 d = self._repo.dirstate
1316 for f in d:
1316 for f in d:
1317 if d[f] != 'r':
1317 if d[f] != 'r':
1318 yield f
1318 yield f
1319
1319
1320 def __contains__(self, key):
1320 def __contains__(self, key):
1321 return self._repo.dirstate[key] not in "?r"
1321 return self._repo.dirstate[key] not in "?r"
1322
1322
1323 def hex(self):
1323 def hex(self):
1324 return hex(wdirid)
1324 return hex(wdirid)
1325
1325
1326 @propertycache
1326 @propertycache
1327 def _parents(self):
1327 def _parents(self):
1328 p = self._repo.dirstate.parents()
1328 p = self._repo.dirstate.parents()
1329 if p[1] == nullid:
1329 if p[1] == nullid:
1330 p = p[:-1]
1330 p = p[:-1]
1331 return [changectx(self._repo, x) for x in p]
1331 return [changectx(self._repo, x) for x in p]
1332
1332
1333 def _fileinfo(self, path):
1333 def _fileinfo(self, path):
1334 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1334 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1335 self._manifest
1335 self._manifest
1336 return super(workingctx, self)._fileinfo(path)
1336 return super(workingctx, self)._fileinfo(path)
1337
1337
1338 def filectx(self, path, filelog=None):
1338 def filectx(self, path, filelog=None):
1339 """get a file context from the working directory"""
1339 """get a file context from the working directory"""
1340 return workingfilectx(self._repo, path, workingctx=self,
1340 return workingfilectx(self._repo, path, workingctx=self,
1341 filelog=filelog)
1341 filelog=filelog)
1342
1342
1343 def dirty(self, missing=False, merge=True, branch=True):
1343 def dirty(self, missing=False, merge=True, branch=True):
1344 "check whether a working directory is modified"
1344 "check whether a working directory is modified"
1345 # check subrepos first
1345 # check subrepos first
1346 for s in sorted(self.substate):
1346 for s in sorted(self.substate):
1347 if self.sub(s).dirty(missing=missing):
1347 if self.sub(s).dirty(missing=missing):
1348 return True
1348 return True
1349 # check current working dir
1349 # check current working dir
1350 return ((merge and self.p2()) or
1350 return ((merge and self.p2()) or
1351 (branch and self.branch() != self.p1().branch()) or
1351 (branch and self.branch() != self.p1().branch()) or
1352 self.modified() or self.added() or self.removed() or
1352 self.modified() or self.added() or self.removed() or
1353 (missing and self.deleted()))
1353 (missing and self.deleted()))
1354
1354
1355 def add(self, list, prefix=""):
1355 def add(self, list, prefix=""):
1356 with self._repo.wlock():
1356 with self._repo.wlock():
1357 ui, ds = self._repo.ui, self._repo.dirstate
1357 ui, ds = self._repo.ui, self._repo.dirstate
1358 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1358 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1359 rejected = []
1359 rejected = []
1360 lstat = self._repo.wvfs.lstat
1360 lstat = self._repo.wvfs.lstat
1361 for f in list:
1361 for f in list:
1362 # ds.pathto() returns an absolute file when this is invoked from
1362 # ds.pathto() returns an absolute file when this is invoked from
1363 # the keyword extension. That gets flagged as non-portable on
1363 # the keyword extension. That gets flagged as non-portable on
1364 # Windows, since it contains the drive letter and colon.
1364 # Windows, since it contains the drive letter and colon.
1365 scmutil.checkportable(ui, os.path.join(prefix, f))
1365 scmutil.checkportable(ui, os.path.join(prefix, f))
1366 try:
1366 try:
1367 st = lstat(f)
1367 st = lstat(f)
1368 except OSError:
1368 except OSError:
1369 ui.warn(_("%s does not exist!\n") % uipath(f))
1369 ui.warn(_("%s does not exist!\n") % uipath(f))
1370 rejected.append(f)
1370 rejected.append(f)
1371 continue
1371 continue
1372 if st.st_size > 10000000:
1372 if st.st_size > 10000000:
1373 ui.warn(_("%s: up to %d MB of RAM may be required "
1373 ui.warn(_("%s: up to %d MB of RAM may be required "
1374 "to manage this file\n"
1374 "to manage this file\n"
1375 "(use 'hg revert %s' to cancel the "
1375 "(use 'hg revert %s' to cancel the "
1376 "pending addition)\n")
1376 "pending addition)\n")
1377 % (f, 3 * st.st_size // 1000000, uipath(f)))
1377 % (f, 3 * st.st_size // 1000000, uipath(f)))
1378 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1378 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1379 ui.warn(_("%s not added: only files and symlinks "
1379 ui.warn(_("%s not added: only files and symlinks "
1380 "supported currently\n") % uipath(f))
1380 "supported currently\n") % uipath(f))
1381 rejected.append(f)
1381 rejected.append(f)
1382 elif ds[f] in 'amn':
1382 elif ds[f] in 'amn':
1383 ui.warn(_("%s already tracked!\n") % uipath(f))
1383 ui.warn(_("%s already tracked!\n") % uipath(f))
1384 elif ds[f] == 'r':
1384 elif ds[f] == 'r':
1385 ds.normallookup(f)
1385 ds.normallookup(f)
1386 else:
1386 else:
1387 ds.add(f)
1387 ds.add(f)
1388 return rejected
1388 return rejected
1389
1389
1390 def forget(self, files, prefix=""):
1390 def forget(self, files, prefix=""):
1391 with self._repo.wlock():
1391 with self._repo.wlock():
1392 ds = self._repo.dirstate
1392 ds = self._repo.dirstate
1393 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1393 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1394 rejected = []
1394 rejected = []
1395 for f in files:
1395 for f in files:
1396 if f not in self._repo.dirstate:
1396 if f not in self._repo.dirstate:
1397 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1397 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1398 rejected.append(f)
1398 rejected.append(f)
1399 elif self._repo.dirstate[f] != 'a':
1399 elif self._repo.dirstate[f] != 'a':
1400 self._repo.dirstate.remove(f)
1400 self._repo.dirstate.remove(f)
1401 else:
1401 else:
1402 self._repo.dirstate.drop(f)
1402 self._repo.dirstate.drop(f)
1403 return rejected
1403 return rejected
1404
1404
1405 def undelete(self, list):
1405 def undelete(self, list):
1406 pctxs = self.parents()
1406 pctxs = self.parents()
1407 with self._repo.wlock():
1407 with self._repo.wlock():
1408 ds = self._repo.dirstate
1408 ds = self._repo.dirstate
1409 for f in list:
1409 for f in list:
1410 if self._repo.dirstate[f] != 'r':
1410 if self._repo.dirstate[f] != 'r':
1411 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1411 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1412 else:
1412 else:
1413 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1413 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1414 t = fctx.data()
1414 t = fctx.data()
1415 self._repo.wwrite(f, t, fctx.flags())
1415 self._repo.wwrite(f, t, fctx.flags())
1416 self._repo.dirstate.normal(f)
1416 self._repo.dirstate.normal(f)
1417
1417
1418 def copy(self, source, dest):
1418 def copy(self, source, dest):
1419 try:
1419 try:
1420 st = self._repo.wvfs.lstat(dest)
1420 st = self._repo.wvfs.lstat(dest)
1421 except OSError as err:
1421 except OSError as err:
1422 if err.errno != errno.ENOENT:
1422 if err.errno != errno.ENOENT:
1423 raise
1423 raise
1424 self._repo.ui.warn(_("%s does not exist!\n")
1424 self._repo.ui.warn(_("%s does not exist!\n")
1425 % self._repo.dirstate.pathto(dest))
1425 % self._repo.dirstate.pathto(dest))
1426 return
1426 return
1427 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1427 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1428 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1428 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1429 "symbolic link\n")
1429 "symbolic link\n")
1430 % self._repo.dirstate.pathto(dest))
1430 % self._repo.dirstate.pathto(dest))
1431 else:
1431 else:
1432 with self._repo.wlock():
1432 with self._repo.wlock():
1433 if self._repo.dirstate[dest] in '?':
1433 if self._repo.dirstate[dest] in '?':
1434 self._repo.dirstate.add(dest)
1434 self._repo.dirstate.add(dest)
1435 elif self._repo.dirstate[dest] in 'r':
1435 elif self._repo.dirstate[dest] in 'r':
1436 self._repo.dirstate.normallookup(dest)
1436 self._repo.dirstate.normallookup(dest)
1437 self._repo.dirstate.copy(source, dest)
1437 self._repo.dirstate.copy(source, dest)
1438
1438
1439 def match(self, pats=None, include=None, exclude=None, default='glob',
1439 def match(self, pats=None, include=None, exclude=None, default='glob',
1440 listsubrepos=False, badfn=None):
1440 listsubrepos=False, badfn=None):
1441 r = self._repo
1441 r = self._repo
1442
1442
1443 # Only a case insensitive filesystem needs magic to translate user input
1443 # Only a case insensitive filesystem needs magic to translate user input
1444 # to actual case in the filesystem.
1444 # to actual case in the filesystem.
1445 icasefs = not util.fscasesensitive(r.root)
1445 icasefs = not util.fscasesensitive(r.root)
1446 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1446 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1447 default, auditor=r.auditor, ctx=self,
1447 default, auditor=r.auditor, ctx=self,
1448 listsubrepos=listsubrepos, badfn=badfn,
1448 listsubrepos=listsubrepos, badfn=badfn,
1449 icasefs=icasefs)
1449 icasefs=icasefs)
1450
1450
1451 def _filtersuspectsymlink(self, files):
1451 def _filtersuspectsymlink(self, files):
1452 if not files or self._repo.dirstate._checklink:
1452 if not files or self._repo.dirstate._checklink:
1453 return files
1453 return files
1454
1454
1455 # Symlink placeholders may get non-symlink-like contents
1455 # Symlink placeholders may get non-symlink-like contents
1456 # via user error or dereferencing by NFS or Samba servers,
1456 # via user error or dereferencing by NFS or Samba servers,
1457 # so we filter out any placeholders that don't look like a
1457 # so we filter out any placeholders that don't look like a
1458 # symlink
1458 # symlink
1459 sane = []
1459 sane = []
1460 for f in files:
1460 for f in files:
1461 if self.flags(f) == 'l':
1461 if self.flags(f) == 'l':
1462 d = self[f].data()
1462 d = self[f].data()
1463 if (d == '' or len(d) >= 1024 or '\n' in d
1463 if (d == '' or len(d) >= 1024 or '\n' in d
1464 or stringutil.binary(d)):
1464 or stringutil.binary(d)):
1465 self._repo.ui.debug('ignoring suspect symlink placeholder'
1465 self._repo.ui.debug('ignoring suspect symlink placeholder'
1466 ' "%s"\n' % f)
1466 ' "%s"\n' % f)
1467 continue
1467 continue
1468 sane.append(f)
1468 sane.append(f)
1469 return sane
1469 return sane
1470
1470
1471 def _checklookup(self, files):
1471 def _checklookup(self, files):
1472 # check for any possibly clean files
1472 # check for any possibly clean files
1473 if not files:
1473 if not files:
1474 return [], [], []
1474 return [], [], []
1475
1475
1476 modified = []
1476 modified = []
1477 deleted = []
1477 deleted = []
1478 fixup = []
1478 fixup = []
1479 pctx = self._parents[0]
1479 pctx = self._parents[0]
1480 # do a full compare of any files that might have changed
1480 # do a full compare of any files that might have changed
1481 for f in sorted(files):
1481 for f in sorted(files):
1482 try:
1482 try:
1483 # This will return True for a file that got replaced by a
1483 # This will return True for a file that got replaced by a
1484 # directory in the interim, but fixing that is pretty hard.
1484 # directory in the interim, but fixing that is pretty hard.
1485 if (f not in pctx or self.flags(f) != pctx.flags(f)
1485 if (f not in pctx or self.flags(f) != pctx.flags(f)
1486 or pctx[f].cmp(self[f])):
1486 or pctx[f].cmp(self[f])):
1487 modified.append(f)
1487 modified.append(f)
1488 else:
1488 else:
1489 fixup.append(f)
1489 fixup.append(f)
1490 except (IOError, OSError):
1490 except (IOError, OSError):
1491 # A file become inaccessible in between? Mark it as deleted,
1491 # A file become inaccessible in between? Mark it as deleted,
1492 # matching dirstate behavior (issue5584).
1492 # matching dirstate behavior (issue5584).
1493 # The dirstate has more complex behavior around whether a
1493 # The dirstate has more complex behavior around whether a
1494 # missing file matches a directory, etc, but we don't need to
1494 # missing file matches a directory, etc, but we don't need to
1495 # bother with that: if f has made it to this point, we're sure
1495 # bother with that: if f has made it to this point, we're sure
1496 # it's in the dirstate.
1496 # it's in the dirstate.
1497 deleted.append(f)
1497 deleted.append(f)
1498
1498
1499 return modified, deleted, fixup
1499 return modified, deleted, fixup
1500
1500
1501 def _poststatusfixup(self, status, fixup):
1501 def _poststatusfixup(self, status, fixup):
1502 """update dirstate for files that are actually clean"""
1502 """update dirstate for files that are actually clean"""
1503 poststatus = self._repo.postdsstatus()
1503 poststatus = self._repo.postdsstatus()
1504 if fixup or poststatus:
1504 if fixup or poststatus:
1505 try:
1505 try:
1506 oldid = self._repo.dirstate.identity()
1506 oldid = self._repo.dirstate.identity()
1507
1507
1508 # updating the dirstate is optional
1508 # updating the dirstate is optional
1509 # so we don't wait on the lock
1509 # so we don't wait on the lock
1510 # wlock can invalidate the dirstate, so cache normal _after_
1510 # wlock can invalidate the dirstate, so cache normal _after_
1511 # taking the lock
1511 # taking the lock
1512 with self._repo.wlock(False):
1512 with self._repo.wlock(False):
1513 if self._repo.dirstate.identity() == oldid:
1513 if self._repo.dirstate.identity() == oldid:
1514 if fixup:
1514 if fixup:
1515 normal = self._repo.dirstate.normal
1515 normal = self._repo.dirstate.normal
1516 for f in fixup:
1516 for f in fixup:
1517 normal(f)
1517 normal(f)
1518 # write changes out explicitly, because nesting
1518 # write changes out explicitly, because nesting
1519 # wlock at runtime may prevent 'wlock.release()'
1519 # wlock at runtime may prevent 'wlock.release()'
1520 # after this block from doing so for subsequent
1520 # after this block from doing so for subsequent
1521 # changing files
1521 # changing files
1522 tr = self._repo.currenttransaction()
1522 tr = self._repo.currenttransaction()
1523 self._repo.dirstate.write(tr)
1523 self._repo.dirstate.write(tr)
1524
1524
1525 if poststatus:
1525 if poststatus:
1526 for ps in poststatus:
1526 for ps in poststatus:
1527 ps(self, status)
1527 ps(self, status)
1528 else:
1528 else:
1529 # in this case, writing changes out breaks
1529 # in this case, writing changes out breaks
1530 # consistency, because .hg/dirstate was
1530 # consistency, because .hg/dirstate was
1531 # already changed simultaneously after last
1531 # already changed simultaneously after last
1532 # caching (see also issue5584 for detail)
1532 # caching (see also issue5584 for detail)
1533 self._repo.ui.debug('skip updating dirstate: '
1533 self._repo.ui.debug('skip updating dirstate: '
1534 'identity mismatch\n')
1534 'identity mismatch\n')
1535 except error.LockError:
1535 except error.LockError:
1536 pass
1536 pass
1537 finally:
1537 finally:
1538 # Even if the wlock couldn't be grabbed, clear out the list.
1538 # Even if the wlock couldn't be grabbed, clear out the list.
1539 self._repo.clearpostdsstatus()
1539 self._repo.clearpostdsstatus()
1540
1540
1541 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1541 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1542 '''Gets the status from the dirstate -- internal use only.'''
1542 '''Gets the status from the dirstate -- internal use only.'''
1543 subrepos = []
1543 subrepos = []
1544 if '.hgsub' in self:
1544 if '.hgsub' in self:
1545 subrepos = sorted(self.substate)
1545 subrepos = sorted(self.substate)
1546 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1546 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1547 clean=clean, unknown=unknown)
1547 clean=clean, unknown=unknown)
1548
1548
1549 # check for any possibly clean files
1549 # check for any possibly clean files
1550 fixup = []
1550 fixup = []
1551 if cmp:
1551 if cmp:
1552 modified2, deleted2, fixup = self._checklookup(cmp)
1552 modified2, deleted2, fixup = self._checklookup(cmp)
1553 s.modified.extend(modified2)
1553 s.modified.extend(modified2)
1554 s.deleted.extend(deleted2)
1554 s.deleted.extend(deleted2)
1555
1555
1556 if fixup and clean:
1556 if fixup and clean:
1557 s.clean.extend(fixup)
1557 s.clean.extend(fixup)
1558
1558
1559 self._poststatusfixup(s, fixup)
1559 self._poststatusfixup(s, fixup)
1560
1560
1561 if match.always():
1561 if match.always():
1562 # cache for performance
1562 # cache for performance
1563 if s.unknown or s.ignored or s.clean:
1563 if s.unknown or s.ignored or s.clean:
1564 # "_status" is cached with list*=False in the normal route
1564 # "_status" is cached with list*=False in the normal route
1565 self._status = scmutil.status(s.modified, s.added, s.removed,
1565 self._status = scmutil.status(s.modified, s.added, s.removed,
1566 s.deleted, [], [], [])
1566 s.deleted, [], [], [])
1567 else:
1567 else:
1568 self._status = s
1568 self._status = s
1569
1569
1570 return s
1570 return s
1571
1571
1572 @propertycache
1572 @propertycache
1573 def _manifest(self):
1573 def _manifest(self):
1574 """generate a manifest corresponding to the values in self._status
1574 """generate a manifest corresponding to the values in self._status
1575
1575
1576 This reuse the file nodeid from parent, but we use special node
1576 This reuse the file nodeid from parent, but we use special node
1577 identifiers for added and modified files. This is used by manifests
1577 identifiers for added and modified files. This is used by manifests
1578 merge to see that files are different and by update logic to avoid
1578 merge to see that files are different and by update logic to avoid
1579 deleting newly added files.
1579 deleting newly added files.
1580 """
1580 """
1581 return self._buildstatusmanifest(self._status)
1581 return self._buildstatusmanifest(self._status)
1582
1582
1583 def _buildstatusmanifest(self, status):
1583 def _buildstatusmanifest(self, status):
1584 """Builds a manifest that includes the given status results."""
1584 """Builds a manifest that includes the given status results."""
1585 parents = self.parents()
1585 parents = self.parents()
1586
1586
1587 man = parents[0].manifest().copy()
1587 man = parents[0].manifest().copy()
1588
1588
1589 ff = self._flagfunc
1589 ff = self._flagfunc
1590 for i, l in ((addednodeid, status.added),
1590 for i, l in ((addednodeid, status.added),
1591 (modifiednodeid, status.modified)):
1591 (modifiednodeid, status.modified)):
1592 for f in l:
1592 for f in l:
1593 man[f] = i
1593 man[f] = i
1594 try:
1594 try:
1595 man.setflag(f, ff(f))
1595 man.setflag(f, ff(f))
1596 except OSError:
1596 except OSError:
1597 pass
1597 pass
1598
1598
1599 for f in status.deleted + status.removed:
1599 for f in status.deleted + status.removed:
1600 if f in man:
1600 if f in man:
1601 del man[f]
1601 del man[f]
1602
1602
1603 return man
1603 return man
1604
1604
1605 def _buildstatus(self, other, s, match, listignored, listclean,
1605 def _buildstatus(self, other, s, match, listignored, listclean,
1606 listunknown):
1606 listunknown):
1607 """build a status with respect to another context
1607 """build a status with respect to another context
1608
1608
1609 This includes logic for maintaining the fast path of status when
1609 This includes logic for maintaining the fast path of status when
1610 comparing the working directory against its parent, which is to skip
1610 comparing the working directory against its parent, which is to skip
1611 building a new manifest if self (working directory) is not comparing
1611 building a new manifest if self (working directory) is not comparing
1612 against its parent (repo['.']).
1612 against its parent (repo['.']).
1613 """
1613 """
1614 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1614 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1615 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1615 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1616 # might have accidentally ended up with the entire contents of the file
1616 # might have accidentally ended up with the entire contents of the file
1617 # they are supposed to be linking to.
1617 # they are supposed to be linking to.
1618 s.modified[:] = self._filtersuspectsymlink(s.modified)
1618 s.modified[:] = self._filtersuspectsymlink(s.modified)
1619 if other != self._repo['.']:
1619 if other != self._repo['.']:
1620 s = super(workingctx, self)._buildstatus(other, s, match,
1620 s = super(workingctx, self)._buildstatus(other, s, match,
1621 listignored, listclean,
1621 listignored, listclean,
1622 listunknown)
1622 listunknown)
1623 return s
1623 return s
1624
1624
1625 def _matchstatus(self, other, match):
1625 def _matchstatus(self, other, match):
1626 """override the match method with a filter for directory patterns
1626 """override the match method with a filter for directory patterns
1627
1627
1628 We use inheritance to customize the match.bad method only in cases of
1628 We use inheritance to customize the match.bad method only in cases of
1629 workingctx since it belongs only to the working directory when
1629 workingctx since it belongs only to the working directory when
1630 comparing against the parent changeset.
1630 comparing against the parent changeset.
1631
1631
1632 If we aren't comparing against the working directory's parent, then we
1632 If we aren't comparing against the working directory's parent, then we
1633 just use the default match object sent to us.
1633 just use the default match object sent to us.
1634 """
1634 """
1635 if other != self._repo['.']:
1635 if other != self._repo['.']:
1636 def bad(f, msg):
1636 def bad(f, msg):
1637 # 'f' may be a directory pattern from 'match.files()',
1637 # 'f' may be a directory pattern from 'match.files()',
1638 # so 'f not in ctx1' is not enough
1638 # so 'f not in ctx1' is not enough
1639 if f not in other and not other.hasdir(f):
1639 if f not in other and not other.hasdir(f):
1640 self._repo.ui.warn('%s: %s\n' %
1640 self._repo.ui.warn('%s: %s\n' %
1641 (self._repo.dirstate.pathto(f), msg))
1641 (self._repo.dirstate.pathto(f), msg))
1642 match.bad = bad
1642 match.bad = bad
1643 return match
1643 return match
1644
1644
1645 def markcommitted(self, node):
1645 def markcommitted(self, node):
1646 super(workingctx, self).markcommitted(node)
1646 super(workingctx, self).markcommitted(node)
1647
1647
1648 sparse.aftercommit(self._repo, node)
1648 sparse.aftercommit(self._repo, node)
1649
1649
1650 class committablefilectx(basefilectx):
1650 class committablefilectx(basefilectx):
1651 """A committablefilectx provides common functionality for a file context
1651 """A committablefilectx provides common functionality for a file context
1652 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1652 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1653 def __init__(self, repo, path, filelog=None, ctx=None):
1653 def __init__(self, repo, path, filelog=None, ctx=None):
1654 self._repo = repo
1654 self._repo = repo
1655 self._path = path
1655 self._path = path
1656 self._changeid = None
1656 self._changeid = None
1657 self._filerev = self._filenode = None
1657 self._filerev = self._filenode = None
1658
1658
1659 if filelog is not None:
1659 if filelog is not None:
1660 self._filelog = filelog
1660 self._filelog = filelog
1661 if ctx:
1661 if ctx:
1662 self._changectx = ctx
1662 self._changectx = ctx
1663
1663
1664 def __nonzero__(self):
1664 def __nonzero__(self):
1665 return True
1665 return True
1666
1666
1667 __bool__ = __nonzero__
1667 __bool__ = __nonzero__
1668
1668
1669 def linkrev(self):
1669 def linkrev(self):
1670 # linked to self._changectx no matter if file is modified or not
1670 # linked to self._changectx no matter if file is modified or not
1671 return self.rev()
1671 return self.rev()
1672
1672
1673 def parents(self):
1673 def parents(self):
1674 '''return parent filectxs, following copies if necessary'''
1674 '''return parent filectxs, following copies if necessary'''
1675 def filenode(ctx, path):
1675 def filenode(ctx, path):
1676 return ctx._manifest.get(path, nullid)
1676 return ctx._manifest.get(path, nullid)
1677
1677
1678 path = self._path
1678 path = self._path
1679 fl = self._filelog
1679 fl = self._filelog
1680 pcl = self._changectx._parents
1680 pcl = self._changectx._parents
1681 renamed = self.renamed()
1681 renamed = self.renamed()
1682
1682
1683 if renamed:
1683 if renamed:
1684 pl = [renamed + (None,)]
1684 pl = [renamed + (None,)]
1685 else:
1685 else:
1686 pl = [(path, filenode(pcl[0], path), fl)]
1686 pl = [(path, filenode(pcl[0], path), fl)]
1687
1687
1688 for pc in pcl[1:]:
1688 for pc in pcl[1:]:
1689 pl.append((path, filenode(pc, path), fl))
1689 pl.append((path, filenode(pc, path), fl))
1690
1690
1691 return [self._parentfilectx(p, fileid=n, filelog=l)
1691 return [self._parentfilectx(p, fileid=n, filelog=l)
1692 for p, n, l in pl if n != nullid]
1692 for p, n, l in pl if n != nullid]
1693
1693
1694 def children(self):
1694 def children(self):
1695 return []
1695 return []
1696
1696
1697 class workingfilectx(committablefilectx):
1697 class workingfilectx(committablefilectx):
1698 """A workingfilectx object makes access to data related to a particular
1698 """A workingfilectx object makes access to data related to a particular
1699 file in the working directory convenient."""
1699 file in the working directory convenient."""
1700 def __init__(self, repo, path, filelog=None, workingctx=None):
1700 def __init__(self, repo, path, filelog=None, workingctx=None):
1701 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1701 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1702
1702
1703 @propertycache
1703 @propertycache
1704 def _changectx(self):
1704 def _changectx(self):
1705 return workingctx(self._repo)
1705 return workingctx(self._repo)
1706
1706
1707 def data(self):
1707 def data(self):
1708 return self._repo.wread(self._path)
1708 return self._repo.wread(self._path)
1709 def renamed(self):
1709 def renamed(self):
1710 rp = self._repo.dirstate.copied(self._path)
1710 rp = self._repo.dirstate.copied(self._path)
1711 if not rp:
1711 if not rp:
1712 return None
1712 return None
1713 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1713 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1714
1714
1715 def size(self):
1715 def size(self):
1716 return self._repo.wvfs.lstat(self._path).st_size
1716 return self._repo.wvfs.lstat(self._path).st_size
1717 def date(self):
1717 def date(self):
1718 t, tz = self._changectx.date()
1718 t, tz = self._changectx.date()
1719 try:
1719 try:
1720 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1720 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1721 except OSError as err:
1721 except OSError as err:
1722 if err.errno != errno.ENOENT:
1722 if err.errno != errno.ENOENT:
1723 raise
1723 raise
1724 return (t, tz)
1724 return (t, tz)
1725
1725
1726 def exists(self):
1726 def exists(self):
1727 return self._repo.wvfs.exists(self._path)
1727 return self._repo.wvfs.exists(self._path)
1728
1728
1729 def lexists(self):
1729 def lexists(self):
1730 return self._repo.wvfs.lexists(self._path)
1730 return self._repo.wvfs.lexists(self._path)
1731
1731
1732 def audit(self):
1732 def audit(self):
1733 return self._repo.wvfs.audit(self._path)
1733 return self._repo.wvfs.audit(self._path)
1734
1734
1735 def cmp(self, fctx):
1735 def cmp(self, fctx):
1736 """compare with other file context
1736 """compare with other file context
1737
1737
1738 returns True if different than fctx.
1738 returns True if different than fctx.
1739 """
1739 """
1740 # fctx should be a filectx (not a workingfilectx)
1740 # fctx should be a filectx (not a workingfilectx)
1741 # invert comparison to reuse the same code path
1741 # invert comparison to reuse the same code path
1742 return fctx.cmp(self)
1742 return fctx.cmp(self)
1743
1743
1744 def remove(self, ignoremissing=False):
1744 def remove(self, ignoremissing=False):
1745 """wraps unlink for a repo's working directory"""
1745 """wraps unlink for a repo's working directory"""
1746 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1746 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1747
1747
1748 def write(self, data, flags, backgroundclose=False, **kwargs):
1748 def write(self, data, flags, backgroundclose=False, **kwargs):
1749 """wraps repo.wwrite"""
1749 """wraps repo.wwrite"""
1750 self._repo.wwrite(self._path, data, flags,
1750 self._repo.wwrite(self._path, data, flags,
1751 backgroundclose=backgroundclose,
1751 backgroundclose=backgroundclose,
1752 **kwargs)
1752 **kwargs)
1753
1753
1754 def markcopied(self, src):
1754 def markcopied(self, src):
1755 """marks this file a copy of `src`"""
1755 """marks this file a copy of `src`"""
1756 if self._repo.dirstate[self._path] in "nma":
1756 if self._repo.dirstate[self._path] in "nma":
1757 self._repo.dirstate.copy(src, self._path)
1757 self._repo.dirstate.copy(src, self._path)
1758
1758
1759 def clearunknown(self):
1759 def clearunknown(self):
1760 """Removes conflicting items in the working directory so that
1760 """Removes conflicting items in the working directory so that
1761 ``write()`` can be called successfully.
1761 ``write()`` can be called successfully.
1762 """
1762 """
1763 wvfs = self._repo.wvfs
1763 wvfs = self._repo.wvfs
1764 f = self._path
1764 f = self._path
1765 wvfs.audit(f)
1765 wvfs.audit(f)
1766 if wvfs.isdir(f) and not wvfs.islink(f):
1766 if wvfs.isdir(f) and not wvfs.islink(f):
1767 wvfs.rmtree(f, forcibly=True)
1767 wvfs.rmtree(f, forcibly=True)
1768 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1768 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1769 for p in reversed(list(util.finddirs(f))):
1769 for p in reversed(list(util.finddirs(f))):
1770 if wvfs.isfileorlink(p):
1770 if wvfs.isfileorlink(p):
1771 wvfs.unlink(p)
1771 wvfs.unlink(p)
1772 break
1772 break
1773
1773
1774 def setflags(self, l, x):
1774 def setflags(self, l, x):
1775 self._repo.wvfs.setflags(self._path, l, x)
1775 self._repo.wvfs.setflags(self._path, l, x)
1776
1776
1777 class overlayworkingctx(committablectx):
1777 class overlayworkingctx(committablectx):
1778 """Wraps another mutable context with a write-back cache that can be
1778 """Wraps another mutable context with a write-back cache that can be
1779 converted into a commit context.
1779 converted into a commit context.
1780
1780
1781 self._cache[path] maps to a dict with keys: {
1781 self._cache[path] maps to a dict with keys: {
1782 'exists': bool?
1782 'exists': bool?
1783 'date': date?
1783 'date': date?
1784 'data': str?
1784 'data': str?
1785 'flags': str?
1785 'flags': str?
1786 'copied': str? (path or None)
1786 'copied': str? (path or None)
1787 }
1787 }
1788 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1788 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1789 is `False`, the file was deleted.
1789 is `False`, the file was deleted.
1790 """
1790 """
1791
1791
1792 def __init__(self, repo):
1792 def __init__(self, repo):
1793 super(overlayworkingctx, self).__init__(repo)
1793 super(overlayworkingctx, self).__init__(repo)
1794 self.clean()
1794 self.clean()
1795
1795
1796 def setbase(self, wrappedctx):
1796 def setbase(self, wrappedctx):
1797 self._wrappedctx = wrappedctx
1797 self._wrappedctx = wrappedctx
1798 self._parents = [wrappedctx]
1798 self._parents = [wrappedctx]
1799 # Drop old manifest cache as it is now out of date.
1799 # Drop old manifest cache as it is now out of date.
1800 # This is necessary when, e.g., rebasing several nodes with one
1800 # This is necessary when, e.g., rebasing several nodes with one
1801 # ``overlayworkingctx`` (e.g. with --collapse).
1801 # ``overlayworkingctx`` (e.g. with --collapse).
1802 util.clearcachedproperty(self, '_manifest')
1802 util.clearcachedproperty(self, '_manifest')
1803
1803
1804 def data(self, path):
1804 def data(self, path):
1805 if self.isdirty(path):
1805 if self.isdirty(path):
1806 if self._cache[path]['exists']:
1806 if self._cache[path]['exists']:
1807 if self._cache[path]['data']:
1807 if self._cache[path]['data']:
1808 return self._cache[path]['data']
1808 return self._cache[path]['data']
1809 else:
1809 else:
1810 # Must fallback here, too, because we only set flags.
1810 # Must fallback here, too, because we only set flags.
1811 return self._wrappedctx[path].data()
1811 return self._wrappedctx[path].data()
1812 else:
1812 else:
1813 raise error.ProgrammingError("No such file or directory: %s" %
1813 raise error.ProgrammingError("No such file or directory: %s" %
1814 path)
1814 path)
1815 else:
1815 else:
1816 return self._wrappedctx[path].data()
1816 return self._wrappedctx[path].data()
1817
1817
1818 @propertycache
1818 @propertycache
1819 def _manifest(self):
1819 def _manifest(self):
1820 parents = self.parents()
1820 parents = self.parents()
1821 man = parents[0].manifest().copy()
1821 man = parents[0].manifest().copy()
1822
1822
1823 flag = self._flagfunc
1823 flag = self._flagfunc
1824 for path in self.added():
1824 for path in self.added():
1825 man[path] = addednodeid
1825 man[path] = addednodeid
1826 man.setflag(path, flag(path))
1826 man.setflag(path, flag(path))
1827 for path in self.modified():
1827 for path in self.modified():
1828 man[path] = modifiednodeid
1828 man[path] = modifiednodeid
1829 man.setflag(path, flag(path))
1829 man.setflag(path, flag(path))
1830 for path in self.removed():
1830 for path in self.removed():
1831 del man[path]
1831 del man[path]
1832 return man
1832 return man
1833
1833
1834 @propertycache
1834 @propertycache
1835 def _flagfunc(self):
1835 def _flagfunc(self):
1836 def f(path):
1836 def f(path):
1837 return self._cache[path]['flags']
1837 return self._cache[path]['flags']
1838 return f
1838 return f
1839
1839
1840 def files(self):
1840 def files(self):
1841 return sorted(self.added() + self.modified() + self.removed())
1841 return sorted(self.added() + self.modified() + self.removed())
1842
1842
1843 def modified(self):
1843 def modified(self):
1844 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1844 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1845 self._existsinparent(f)]
1845 self._existsinparent(f)]
1846
1846
1847 def added(self):
1847 def added(self):
1848 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1848 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1849 not self._existsinparent(f)]
1849 not self._existsinparent(f)]
1850
1850
1851 def removed(self):
1851 def removed(self):
1852 return [f for f in self._cache.keys() if
1852 return [f for f in self._cache.keys() if
1853 not self._cache[f]['exists'] and self._existsinparent(f)]
1853 not self._cache[f]['exists'] and self._existsinparent(f)]
1854
1854
1855 def isinmemory(self):
1855 def isinmemory(self):
1856 return True
1856 return True
1857
1857
1858 def filedate(self, path):
1858 def filedate(self, path):
1859 if self.isdirty(path):
1859 if self.isdirty(path):
1860 return self._cache[path]['date']
1860 return self._cache[path]['date']
1861 else:
1861 else:
1862 return self._wrappedctx[path].date()
1862 return self._wrappedctx[path].date()
1863
1863
1864 def markcopied(self, path, origin):
1864 def markcopied(self, path, origin):
1865 if self.isdirty(path):
1865 if self.isdirty(path):
1866 self._cache[path]['copied'] = origin
1866 self._cache[path]['copied'] = origin
1867 else:
1867 else:
1868 raise error.ProgrammingError('markcopied() called on clean context')
1868 raise error.ProgrammingError('markcopied() called on clean context')
1869
1869
1870 def copydata(self, path):
1870 def copydata(self, path):
1871 if self.isdirty(path):
1871 if self.isdirty(path):
1872 return self._cache[path]['copied']
1872 return self._cache[path]['copied']
1873 else:
1873 else:
1874 raise error.ProgrammingError('copydata() called on clean context')
1874 raise error.ProgrammingError('copydata() called on clean context')
1875
1875
1876 def flags(self, path):
1876 def flags(self, path):
1877 if self.isdirty(path):
1877 if self.isdirty(path):
1878 if self._cache[path]['exists']:
1878 if self._cache[path]['exists']:
1879 return self._cache[path]['flags']
1879 return self._cache[path]['flags']
1880 else:
1880 else:
1881 raise error.ProgrammingError("No such file or directory: %s" %
1881 raise error.ProgrammingError("No such file or directory: %s" %
1882 self._path)
1882 self._path)
1883 else:
1883 else:
1884 return self._wrappedctx[path].flags()
1884 return self._wrappedctx[path].flags()
1885
1885
1886 def _existsinparent(self, path):
1886 def _existsinparent(self, path):
1887 try:
1887 try:
1888 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1888 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1889 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1889 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1890 # with an ``exists()`` function.
1890 # with an ``exists()`` function.
1891 self._wrappedctx[path]
1891 self._wrappedctx[path]
1892 return True
1892 return True
1893 except error.ManifestLookupError:
1893 except error.ManifestLookupError:
1894 return False
1894 return False
1895
1895
1896 def _auditconflicts(self, path):
1896 def _auditconflicts(self, path):
1897 """Replicates conflict checks done by wvfs.write().
1897 """Replicates conflict checks done by wvfs.write().
1898
1898
1899 Since we never write to the filesystem and never call `applyupdates` in
1899 Since we never write to the filesystem and never call `applyupdates` in
1900 IMM, we'll never check that a path is actually writable -- e.g., because
1900 IMM, we'll never check that a path is actually writable -- e.g., because
1901 it adds `a/foo`, but `a` is actually a file in the other commit.
1901 it adds `a/foo`, but `a` is actually a file in the other commit.
1902 """
1902 """
1903 def fail(path, component):
1903 def fail(path, component):
1904 # p1() is the base and we're receiving "writes" for p2()'s
1904 # p1() is the base and we're receiving "writes" for p2()'s
1905 # files.
1905 # files.
1906 if 'l' in self.p1()[component].flags():
1906 if 'l' in self.p1()[component].flags():
1907 raise error.Abort("error: %s conflicts with symlink %s "
1907 raise error.Abort("error: %s conflicts with symlink %s "
1908 "in %s." % (path, component,
1908 "in %s." % (path, component,
1909 self.p1().rev()))
1909 self.p1().rev()))
1910 else:
1910 else:
1911 raise error.Abort("error: '%s' conflicts with file '%s' in "
1911 raise error.Abort("error: '%s' conflicts with file '%s' in "
1912 "%s." % (path, component,
1912 "%s." % (path, component,
1913 self.p1().rev()))
1913 self.p1().rev()))
1914
1914
1915 # Test that each new directory to be created to write this path from p2
1915 # Test that each new directory to be created to write this path from p2
1916 # is not a file in p1.
1916 # is not a file in p1.
1917 components = path.split('/')
1917 components = path.split('/')
1918 for i in xrange(len(components)):
1918 for i in xrange(len(components)):
1919 component = "/".join(components[0:i])
1919 component = "/".join(components[0:i])
1920 if component in self.p1():
1920 if component in self.p1():
1921 fail(path, component)
1921 fail(path, component)
1922
1922
1923 # Test the other direction -- that this path from p2 isn't a directory
1923 # Test the other direction -- that this path from p2 isn't a directory
1924 # in p1 (test that p1 doesn't any paths matching `path/*`).
1924 # in p1 (test that p1 doesn't any paths matching `path/*`).
1925 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1925 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1926 matches = self.p1().manifest().matches(match)
1926 matches = self.p1().manifest().matches(match)
1927 if len(matches) > 0:
1927 if len(matches) > 0:
1928 if len(matches) == 1 and matches.keys()[0] == path:
1928 if len(matches) == 1 and matches.keys()[0] == path:
1929 return
1929 return
1930 raise error.Abort("error: file '%s' cannot be written because "
1930 raise error.Abort("error: file '%s' cannot be written because "
1931 " '%s/' is a folder in %s (containing %d "
1931 " '%s/' is a folder in %s (containing %d "
1932 "entries: %s)"
1932 "entries: %s)"
1933 % (path, path, self.p1(), len(matches),
1933 % (path, path, self.p1(), len(matches),
1934 ', '.join(matches.keys())))
1934 ', '.join(matches.keys())))
1935
1935
1936 def write(self, path, data, flags='', **kwargs):
1936 def write(self, path, data, flags='', **kwargs):
1937 if data is None:
1937 if data is None:
1938 raise error.ProgrammingError("data must be non-None")
1938 raise error.ProgrammingError("data must be non-None")
1939 self._auditconflicts(path)
1939 self._auditconflicts(path)
1940 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1940 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1941 flags=flags)
1941 flags=flags)
1942
1942
1943 def setflags(self, path, l, x):
1943 def setflags(self, path, l, x):
1944 self._markdirty(path, exists=True, date=dateutil.makedate(),
1944 self._markdirty(path, exists=True, date=dateutil.makedate(),
1945 flags=(l and 'l' or '') + (x and 'x' or ''))
1945 flags=(l and 'l' or '') + (x and 'x' or ''))
1946
1946
1947 def remove(self, path):
1947 def remove(self, path):
1948 self._markdirty(path, exists=False)
1948 self._markdirty(path, exists=False)
1949
1949
1950 def exists(self, path):
1950 def exists(self, path):
1951 """exists behaves like `lexists`, but needs to follow symlinks and
1951 """exists behaves like `lexists`, but needs to follow symlinks and
1952 return False if they are broken.
1952 return False if they are broken.
1953 """
1953 """
1954 if self.isdirty(path):
1954 if self.isdirty(path):
1955 # If this path exists and is a symlink, "follow" it by calling
1955 # If this path exists and is a symlink, "follow" it by calling
1956 # exists on the destination path.
1956 # exists on the destination path.
1957 if (self._cache[path]['exists'] and
1957 if (self._cache[path]['exists'] and
1958 'l' in self._cache[path]['flags']):
1958 'l' in self._cache[path]['flags']):
1959 return self.exists(self._cache[path]['data'].strip())
1959 return self.exists(self._cache[path]['data'].strip())
1960 else:
1960 else:
1961 return self._cache[path]['exists']
1961 return self._cache[path]['exists']
1962
1962
1963 return self._existsinparent(path)
1963 return self._existsinparent(path)
1964
1964
1965 def lexists(self, path):
1965 def lexists(self, path):
1966 """lexists returns True if the path exists"""
1966 """lexists returns True if the path exists"""
1967 if self.isdirty(path):
1967 if self.isdirty(path):
1968 return self._cache[path]['exists']
1968 return self._cache[path]['exists']
1969
1969
1970 return self._existsinparent(path)
1970 return self._existsinparent(path)
1971
1971
1972 def size(self, path):
1972 def size(self, path):
1973 if self.isdirty(path):
1973 if self.isdirty(path):
1974 if self._cache[path]['exists']:
1974 if self._cache[path]['exists']:
1975 return len(self._cache[path]['data'])
1975 return len(self._cache[path]['data'])
1976 else:
1976 else:
1977 raise error.ProgrammingError("No such file or directory: %s" %
1977 raise error.ProgrammingError("No such file or directory: %s" %
1978 self._path)
1978 self._path)
1979 return self._wrappedctx[path].size()
1979 return self._wrappedctx[path].size()
1980
1980
1981 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1981 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1982 user=None, editor=None):
1982 user=None, editor=None):
1983 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1983 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1984 committed.
1984 committed.
1985
1985
1986 ``text`` is the commit message.
1986 ``text`` is the commit message.
1987 ``parents`` (optional) are rev numbers.
1987 ``parents`` (optional) are rev numbers.
1988 """
1988 """
1989 # Default parents to the wrapped contexts' if not passed.
1989 # Default parents to the wrapped contexts' if not passed.
1990 if parents is None:
1990 if parents is None:
1991 parents = self._wrappedctx.parents()
1991 parents = self._wrappedctx.parents()
1992 if len(parents) == 1:
1992 if len(parents) == 1:
1993 parents = (parents[0], None)
1993 parents = (parents[0], None)
1994
1994
1995 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1995 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1996 if parents[1] is None:
1996 if parents[1] is None:
1997 parents = (self._repo[parents[0]], None)
1997 parents = (self._repo[parents[0]], None)
1998 else:
1998 else:
1999 parents = (self._repo[parents[0]], self._repo[parents[1]])
1999 parents = (self._repo[parents[0]], self._repo[parents[1]])
2000
2000
2001 files = self._cache.keys()
2001 files = self._cache.keys()
2002 def getfile(repo, memctx, path):
2002 def getfile(repo, memctx, path):
2003 if self._cache[path]['exists']:
2003 if self._cache[path]['exists']:
2004 return memfilectx(repo, memctx, path,
2004 return memfilectx(repo, memctx, path,
2005 self._cache[path]['data'],
2005 self._cache[path]['data'],
2006 'l' in self._cache[path]['flags'],
2006 'l' in self._cache[path]['flags'],
2007 'x' in self._cache[path]['flags'],
2007 'x' in self._cache[path]['flags'],
2008 self._cache[path]['copied'])
2008 self._cache[path]['copied'])
2009 else:
2009 else:
2010 # Returning None, but including the path in `files`, is
2010 # Returning None, but including the path in `files`, is
2011 # necessary for memctx to register a deletion.
2011 # necessary for memctx to register a deletion.
2012 return None
2012 return None
2013 return memctx(self._repo, parents, text, files, getfile, date=date,
2013 return memctx(self._repo, parents, text, files, getfile, date=date,
2014 extra=extra, user=user, branch=branch, editor=editor)
2014 extra=extra, user=user, branch=branch, editor=editor)
2015
2015
2016 def isdirty(self, path):
2016 def isdirty(self, path):
2017 return path in self._cache
2017 return path in self._cache
2018
2018
2019 def isempty(self):
2019 def isempty(self):
2020 # We need to discard any keys that are actually clean before the empty
2020 # We need to discard any keys that are actually clean before the empty
2021 # commit check.
2021 # commit check.
2022 self._compact()
2022 self._compact()
2023 return len(self._cache) == 0
2023 return len(self._cache) == 0
2024
2024
2025 def clean(self):
2025 def clean(self):
2026 self._cache = {}
2026 self._cache = {}
2027
2027
2028 def _compact(self):
2028 def _compact(self):
2029 """Removes keys from the cache that are actually clean, by comparing
2029 """Removes keys from the cache that are actually clean, by comparing
2030 them with the underlying context.
2030 them with the underlying context.
2031
2031
2032 This can occur during the merge process, e.g. by passing --tool :local
2032 This can occur during the merge process, e.g. by passing --tool :local
2033 to resolve a conflict.
2033 to resolve a conflict.
2034 """
2034 """
2035 keys = []
2035 keys = []
2036 for path in self._cache.keys():
2036 for path in self._cache.keys():
2037 cache = self._cache[path]
2037 cache = self._cache[path]
2038 try:
2038 try:
2039 underlying = self._wrappedctx[path]
2039 underlying = self._wrappedctx[path]
2040 if (underlying.data() == cache['data'] and
2040 if (underlying.data() == cache['data'] and
2041 underlying.flags() == cache['flags']):
2041 underlying.flags() == cache['flags']):
2042 keys.append(path)
2042 keys.append(path)
2043 except error.ManifestLookupError:
2043 except error.ManifestLookupError:
2044 # Path not in the underlying manifest (created).
2044 # Path not in the underlying manifest (created).
2045 continue
2045 continue
2046
2046
2047 for path in keys:
2047 for path in keys:
2048 del self._cache[path]
2048 del self._cache[path]
2049 return keys
2049 return keys
2050
2050
2051 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2051 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2052 self._cache[path] = {
2052 self._cache[path] = {
2053 'exists': exists,
2053 'exists': exists,
2054 'data': data,
2054 'data': data,
2055 'date': date,
2055 'date': date,
2056 'flags': flags,
2056 'flags': flags,
2057 'copied': None,
2057 'copied': None,
2058 }
2058 }
2059
2059
2060 def filectx(self, path, filelog=None):
2060 def filectx(self, path, filelog=None):
2061 return overlayworkingfilectx(self._repo, path, parent=self,
2061 return overlayworkingfilectx(self._repo, path, parent=self,
2062 filelog=filelog)
2062 filelog=filelog)
2063
2063
2064 class overlayworkingfilectx(committablefilectx):
2064 class overlayworkingfilectx(committablefilectx):
2065 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2065 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2066 cache, which can be flushed through later by calling ``flush()``."""
2066 cache, which can be flushed through later by calling ``flush()``."""
2067
2067
2068 def __init__(self, repo, path, filelog=None, parent=None):
2068 def __init__(self, repo, path, filelog=None, parent=None):
2069 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2069 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2070 parent)
2070 parent)
2071 self._repo = repo
2071 self._repo = repo
2072 self._parent = parent
2072 self._parent = parent
2073 self._path = path
2073 self._path = path
2074
2074
2075 def cmp(self, fctx):
2075 def cmp(self, fctx):
2076 return self.data() != fctx.data()
2076 return self.data() != fctx.data()
2077
2077
2078 def changectx(self):
2078 def changectx(self):
2079 return self._parent
2079 return self._parent
2080
2080
2081 def data(self):
2081 def data(self):
2082 return self._parent.data(self._path)
2082 return self._parent.data(self._path)
2083
2083
2084 def date(self):
2084 def date(self):
2085 return self._parent.filedate(self._path)
2085 return self._parent.filedate(self._path)
2086
2086
2087 def exists(self):
2087 def exists(self):
2088 return self.lexists()
2088 return self.lexists()
2089
2089
2090 def lexists(self):
2090 def lexists(self):
2091 return self._parent.exists(self._path)
2091 return self._parent.exists(self._path)
2092
2092
2093 def renamed(self):
2093 def renamed(self):
2094 path = self._parent.copydata(self._path)
2094 path = self._parent.copydata(self._path)
2095 if not path:
2095 if not path:
2096 return None
2096 return None
2097 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2097 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2098
2098
2099 def size(self):
2099 def size(self):
2100 return self._parent.size(self._path)
2100 return self._parent.size(self._path)
2101
2101
2102 def markcopied(self, origin):
2102 def markcopied(self, origin):
2103 self._parent.markcopied(self._path, origin)
2103 self._parent.markcopied(self._path, origin)
2104
2104
2105 def audit(self):
2105 def audit(self):
2106 pass
2106 pass
2107
2107
2108 def flags(self):
2108 def flags(self):
2109 return self._parent.flags(self._path)
2109 return self._parent.flags(self._path)
2110
2110
2111 def setflags(self, islink, isexec):
2111 def setflags(self, islink, isexec):
2112 return self._parent.setflags(self._path, islink, isexec)
2112 return self._parent.setflags(self._path, islink, isexec)
2113
2113
2114 def write(self, data, flags, backgroundclose=False, **kwargs):
2114 def write(self, data, flags, backgroundclose=False, **kwargs):
2115 return self._parent.write(self._path, data, flags, **kwargs)
2115 return self._parent.write(self._path, data, flags, **kwargs)
2116
2116
2117 def remove(self, ignoremissing=False):
2117 def remove(self, ignoremissing=False):
2118 return self._parent.remove(self._path)
2118 return self._parent.remove(self._path)
2119
2119
2120 def clearunknown(self):
2120 def clearunknown(self):
2121 pass
2121 pass
2122
2122
2123 class workingcommitctx(workingctx):
2123 class workingcommitctx(workingctx):
2124 """A workingcommitctx object makes access to data related to
2124 """A workingcommitctx object makes access to data related to
2125 the revision being committed convenient.
2125 the revision being committed convenient.
2126
2126
2127 This hides changes in the working directory, if they aren't
2127 This hides changes in the working directory, if they aren't
2128 committed in this context.
2128 committed in this context.
2129 """
2129 """
2130 def __init__(self, repo, changes,
2130 def __init__(self, repo, changes,
2131 text="", user=None, date=None, extra=None):
2131 text="", user=None, date=None, extra=None):
2132 super(workingctx, self).__init__(repo, text, user, date, extra,
2132 super(workingctx, self).__init__(repo, text, user, date, extra,
2133 changes)
2133 changes)
2134
2134
2135 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2135 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2136 """Return matched files only in ``self._status``
2136 """Return matched files only in ``self._status``
2137
2137
2138 Uncommitted files appear "clean" via this context, even if
2138 Uncommitted files appear "clean" via this context, even if
2139 they aren't actually so in the working directory.
2139 they aren't actually so in the working directory.
2140 """
2140 """
2141 if clean:
2141 if clean:
2142 clean = [f for f in self._manifest if f not in self._changedset]
2142 clean = [f for f in self._manifest if f not in self._changedset]
2143 else:
2143 else:
2144 clean = []
2144 clean = []
2145 return scmutil.status([f for f in self._status.modified if match(f)],
2145 return scmutil.status([f for f in self._status.modified if match(f)],
2146 [f for f in self._status.added if match(f)],
2146 [f for f in self._status.added if match(f)],
2147 [f for f in self._status.removed if match(f)],
2147 [f for f in self._status.removed if match(f)],
2148 [], [], [], clean)
2148 [], [], [], clean)
2149
2149
2150 @propertycache
2150 @propertycache
2151 def _changedset(self):
2151 def _changedset(self):
2152 """Return the set of files changed in this context
2152 """Return the set of files changed in this context
2153 """
2153 """
2154 changed = set(self._status.modified)
2154 changed = set(self._status.modified)
2155 changed.update(self._status.added)
2155 changed.update(self._status.added)
2156 changed.update(self._status.removed)
2156 changed.update(self._status.removed)
2157 return changed
2157 return changed
2158
2158
2159 def makecachingfilectxfn(func):
2159 def makecachingfilectxfn(func):
2160 """Create a filectxfn that caches based on the path.
2160 """Create a filectxfn that caches based on the path.
2161
2161
2162 We can't use util.cachefunc because it uses all arguments as the cache
2162 We can't use util.cachefunc because it uses all arguments as the cache
2163 key and this creates a cycle since the arguments include the repo and
2163 key and this creates a cycle since the arguments include the repo and
2164 memctx.
2164 memctx.
2165 """
2165 """
2166 cache = {}
2166 cache = {}
2167
2167
2168 def getfilectx(repo, memctx, path):
2168 def getfilectx(repo, memctx, path):
2169 if path not in cache:
2169 if path not in cache:
2170 cache[path] = func(repo, memctx, path)
2170 cache[path] = func(repo, memctx, path)
2171 return cache[path]
2171 return cache[path]
2172
2172
2173 return getfilectx
2173 return getfilectx
2174
2174
2175 def memfilefromctx(ctx):
2175 def memfilefromctx(ctx):
2176 """Given a context return a memfilectx for ctx[path]
2176 """Given a context return a memfilectx for ctx[path]
2177
2177
2178 This is a convenience method for building a memctx based on another
2178 This is a convenience method for building a memctx based on another
2179 context.
2179 context.
2180 """
2180 """
2181 def getfilectx(repo, memctx, path):
2181 def getfilectx(repo, memctx, path):
2182 fctx = ctx[path]
2182 fctx = ctx[path]
2183 # this is weird but apparently we only keep track of one parent
2183 # this is weird but apparently we only keep track of one parent
2184 # (why not only store that instead of a tuple?)
2184 # (why not only store that instead of a tuple?)
2185 copied = fctx.renamed()
2185 copied = fctx.renamed()
2186 if copied:
2186 if copied:
2187 copied = copied[0]
2187 copied = copied[0]
2188 return memfilectx(repo, memctx, path, fctx.data(),
2188 return memfilectx(repo, memctx, path, fctx.data(),
2189 islink=fctx.islink(), isexec=fctx.isexec(),
2189 islink=fctx.islink(), isexec=fctx.isexec(),
2190 copied=copied)
2190 copied=copied)
2191
2191
2192 return getfilectx
2192 return getfilectx
2193
2193
2194 def memfilefrompatch(patchstore):
2194 def memfilefrompatch(patchstore):
2195 """Given a patch (e.g. patchstore object) return a memfilectx
2195 """Given a patch (e.g. patchstore object) return a memfilectx
2196
2196
2197 This is a convenience method for building a memctx based on a patchstore.
2197 This is a convenience method for building a memctx based on a patchstore.
2198 """
2198 """
2199 def getfilectx(repo, memctx, path):
2199 def getfilectx(repo, memctx, path):
2200 data, mode, copied = patchstore.getfile(path)
2200 data, mode, copied = patchstore.getfile(path)
2201 if data is None:
2201 if data is None:
2202 return None
2202 return None
2203 islink, isexec = mode
2203 islink, isexec = mode
2204 return memfilectx(repo, memctx, path, data, islink=islink,
2204 return memfilectx(repo, memctx, path, data, islink=islink,
2205 isexec=isexec, copied=copied)
2205 isexec=isexec, copied=copied)
2206
2206
2207 return getfilectx
2207 return getfilectx
2208
2208
2209 class memctx(committablectx):
2209 class memctx(committablectx):
2210 """Use memctx to perform in-memory commits via localrepo.commitctx().
2210 """Use memctx to perform in-memory commits via localrepo.commitctx().
2211
2211
2212 Revision information is supplied at initialization time while
2212 Revision information is supplied at initialization time while
2213 related files data and is made available through a callback
2213 related files data and is made available through a callback
2214 mechanism. 'repo' is the current localrepo, 'parents' is a
2214 mechanism. 'repo' is the current localrepo, 'parents' is a
2215 sequence of two parent revisions identifiers (pass None for every
2215 sequence of two parent revisions identifiers (pass None for every
2216 missing parent), 'text' is the commit message and 'files' lists
2216 missing parent), 'text' is the commit message and 'files' lists
2217 names of files touched by the revision (normalized and relative to
2217 names of files touched by the revision (normalized and relative to
2218 repository root).
2218 repository root).
2219
2219
2220 filectxfn(repo, memctx, path) is a callable receiving the
2220 filectxfn(repo, memctx, path) is a callable receiving the
2221 repository, the current memctx object and the normalized path of
2221 repository, the current memctx object and the normalized path of
2222 requested file, relative to repository root. It is fired by the
2222 requested file, relative to repository root. It is fired by the
2223 commit function for every file in 'files', but calls order is
2223 commit function for every file in 'files', but calls order is
2224 undefined. If the file is available in the revision being
2224 undefined. If the file is available in the revision being
2225 committed (updated or added), filectxfn returns a memfilectx
2225 committed (updated or added), filectxfn returns a memfilectx
2226 object. If the file was removed, filectxfn return None for recent
2226 object. If the file was removed, filectxfn return None for recent
2227 Mercurial. Moved files are represented by marking the source file
2227 Mercurial. Moved files are represented by marking the source file
2228 removed and the new file added with copy information (see
2228 removed and the new file added with copy information (see
2229 memfilectx).
2229 memfilectx).
2230
2230
2231 user receives the committer name and defaults to current
2231 user receives the committer name and defaults to current
2232 repository username, date is the commit date in any format
2232 repository username, date is the commit date in any format
2233 supported by dateutil.parsedate() and defaults to current date, extra
2233 supported by dateutil.parsedate() and defaults to current date, extra
2234 is a dictionary of metadata or is left empty.
2234 is a dictionary of metadata or is left empty.
2235 """
2235 """
2236
2236
2237 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2237 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2238 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2238 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2239 # this field to determine what to do in filectxfn.
2239 # this field to determine what to do in filectxfn.
2240 _returnnoneformissingfiles = True
2240 _returnnoneformissingfiles = True
2241
2241
2242 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2242 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2243 date=None, extra=None, branch=None, editor=False):
2243 date=None, extra=None, branch=None, editor=False):
2244 super(memctx, self).__init__(repo, text, user, date, extra)
2244 super(memctx, self).__init__(repo, text, user, date, extra)
2245 self._rev = None
2245 self._rev = None
2246 self._node = None
2246 self._node = None
2247 parents = [(p or nullid) for p in parents]
2247 parents = [(p or nullid) for p in parents]
2248 p1, p2 = parents
2248 p1, p2 = parents
2249 self._parents = [self._repo[p] for p in (p1, p2)]
2249 self._parents = [self._repo[p] for p in (p1, p2)]
2250 files = sorted(set(files))
2250 files = sorted(set(files))
2251 self._files = files
2251 self._files = files
2252 if branch is not None:
2252 if branch is not None:
2253 self._extra['branch'] = encoding.fromlocal(branch)
2253 self._extra['branch'] = encoding.fromlocal(branch)
2254 self.substate = {}
2254 self.substate = {}
2255
2255
2256 if isinstance(filectxfn, patch.filestore):
2256 if isinstance(filectxfn, patch.filestore):
2257 filectxfn = memfilefrompatch(filectxfn)
2257 filectxfn = memfilefrompatch(filectxfn)
2258 elif not callable(filectxfn):
2258 elif not callable(filectxfn):
2259 # if store is not callable, wrap it in a function
2259 # if store is not callable, wrap it in a function
2260 filectxfn = memfilefromctx(filectxfn)
2260 filectxfn = memfilefromctx(filectxfn)
2261
2261
2262 # memoizing increases performance for e.g. vcs convert scenarios.
2262 # memoizing increases performance for e.g. vcs convert scenarios.
2263 self._filectxfn = makecachingfilectxfn(filectxfn)
2263 self._filectxfn = makecachingfilectxfn(filectxfn)
2264
2264
2265 if editor:
2265 if editor:
2266 self._text = editor(self._repo, self, [])
2266 self._text = editor(self._repo, self, [])
2267 self._repo.savecommitmessage(self._text)
2267 self._repo.savecommitmessage(self._text)
2268
2268
2269 def filectx(self, path, filelog=None):
2269 def filectx(self, path, filelog=None):
2270 """get a file context from the working directory
2270 """get a file context from the working directory
2271
2271
2272 Returns None if file doesn't exist and should be removed."""
2272 Returns None if file doesn't exist and should be removed."""
2273 return self._filectxfn(self._repo, self, path)
2273 return self._filectxfn(self._repo, self, path)
2274
2274
2275 def commit(self):
2275 def commit(self):
2276 """commit context to the repo"""
2276 """commit context to the repo"""
2277 return self._repo.commitctx(self)
2277 return self._repo.commitctx(self)
2278
2278
2279 @propertycache
2279 @propertycache
2280 def _manifest(self):
2280 def _manifest(self):
2281 """generate a manifest based on the return values of filectxfn"""
2281 """generate a manifest based on the return values of filectxfn"""
2282
2282
2283 # keep this simple for now; just worry about p1
2283 # keep this simple for now; just worry about p1
2284 pctx = self._parents[0]
2284 pctx = self._parents[0]
2285 man = pctx.manifest().copy()
2285 man = pctx.manifest().copy()
2286
2286
2287 for f in self._status.modified:
2287 for f in self._status.modified:
2288 p1node = nullid
2288 p1node = nullid
2289 p2node = nullid
2289 p2node = nullid
2290 p = pctx[f].parents() # if file isn't in pctx, check p2?
2290 p = pctx[f].parents() # if file isn't in pctx, check p2?
2291 if len(p) > 0:
2291 if len(p) > 0:
2292 p1node = p[0].filenode()
2292 p1node = p[0].filenode()
2293 if len(p) > 1:
2293 if len(p) > 1:
2294 p2node = p[1].filenode()
2294 p2node = p[1].filenode()
2295 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2295 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2296
2296
2297 for f in self._status.added:
2297 for f in self._status.added:
2298 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2298 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2299
2299
2300 for f in self._status.removed:
2300 for f in self._status.removed:
2301 if f in man:
2301 if f in man:
2302 del man[f]
2302 del man[f]
2303
2303
2304 return man
2304 return man
2305
2305
2306 @propertycache
2306 @propertycache
2307 def _status(self):
2307 def _status(self):
2308 """Calculate exact status from ``files`` specified at construction
2308 """Calculate exact status from ``files`` specified at construction
2309 """
2309 """
2310 man1 = self.p1().manifest()
2310 man1 = self.p1().manifest()
2311 p2 = self._parents[1]
2311 p2 = self._parents[1]
2312 # "1 < len(self._parents)" can't be used for checking
2312 # "1 < len(self._parents)" can't be used for checking
2313 # existence of the 2nd parent, because "memctx._parents" is
2313 # existence of the 2nd parent, because "memctx._parents" is
2314 # explicitly initialized by the list, of which length is 2.
2314 # explicitly initialized by the list, of which length is 2.
2315 if p2.node() != nullid:
2315 if p2.node() != nullid:
2316 man2 = p2.manifest()
2316 man2 = p2.manifest()
2317 managing = lambda f: f in man1 or f in man2
2317 managing = lambda f: f in man1 or f in man2
2318 else:
2318 else:
2319 managing = lambda f: f in man1
2319 managing = lambda f: f in man1
2320
2320
2321 modified, added, removed = [], [], []
2321 modified, added, removed = [], [], []
2322 for f in self._files:
2322 for f in self._files:
2323 if not managing(f):
2323 if not managing(f):
2324 added.append(f)
2324 added.append(f)
2325 elif self[f]:
2325 elif self[f]:
2326 modified.append(f)
2326 modified.append(f)
2327 else:
2327 else:
2328 removed.append(f)
2328 removed.append(f)
2329
2329
2330 return scmutil.status(modified, added, removed, [], [], [], [])
2330 return scmutil.status(modified, added, removed, [], [], [], [])
2331
2331
2332 class memfilectx(committablefilectx):
2332 class memfilectx(committablefilectx):
2333 """memfilectx represents an in-memory file to commit.
2333 """memfilectx represents an in-memory file to commit.
2334
2334
2335 See memctx and committablefilectx for more details.
2335 See memctx and committablefilectx for more details.
2336 """
2336 """
2337 def __init__(self, repo, changectx, path, data, islink=False,
2337 def __init__(self, repo, changectx, path, data, islink=False,
2338 isexec=False, copied=None):
2338 isexec=False, copied=None):
2339 """
2339 """
2340 path is the normalized file path relative to repository root.
2340 path is the normalized file path relative to repository root.
2341 data is the file content as a string.
2341 data is the file content as a string.
2342 islink is True if the file is a symbolic link.
2342 islink is True if the file is a symbolic link.
2343 isexec is True if the file is executable.
2343 isexec is True if the file is executable.
2344 copied is the source file path if current file was copied in the
2344 copied is the source file path if current file was copied in the
2345 revision being committed, or None."""
2345 revision being committed, or None."""
2346 super(memfilectx, self).__init__(repo, path, None, changectx)
2346 super(memfilectx, self).__init__(repo, path, None, changectx)
2347 self._data = data
2347 self._data = data
2348 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2348 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2349 self._copied = None
2349 self._copied = None
2350 if copied:
2350 if copied:
2351 self._copied = (copied, nullid)
2351 self._copied = (copied, nullid)
2352
2352
2353 def data(self):
2353 def data(self):
2354 return self._data
2354 return self._data
2355
2355
2356 def remove(self, ignoremissing=False):
2356 def remove(self, ignoremissing=False):
2357 """wraps unlink for a repo's working directory"""
2357 """wraps unlink for a repo's working directory"""
2358 # need to figure out what to do here
2358 # need to figure out what to do here
2359 del self._changectx[self._path]
2359 del self._changectx[self._path]
2360
2360
2361 def write(self, data, flags, **kwargs):
2361 def write(self, data, flags, **kwargs):
2362 """wraps repo.wwrite"""
2362 """wraps repo.wwrite"""
2363 self._data = data
2363 self._data = data
2364
2364
2365 class overlayfilectx(committablefilectx):
2365 class overlayfilectx(committablefilectx):
2366 """Like memfilectx but take an original filectx and optional parameters to
2366 """Like memfilectx but take an original filectx and optional parameters to
2367 override parts of it. This is useful when fctx.data() is expensive (i.e.
2367 override parts of it. This is useful when fctx.data() is expensive (i.e.
2368 flag processor is expensive) and raw data, flags, and filenode could be
2368 flag processor is expensive) and raw data, flags, and filenode could be
2369 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2369 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2370 """
2370 """
2371
2371
2372 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2372 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2373 copied=None, ctx=None):
2373 copied=None, ctx=None):
2374 """originalfctx: filecontext to duplicate
2374 """originalfctx: filecontext to duplicate
2375
2375
2376 datafunc: None or a function to override data (file content). It is a
2376 datafunc: None or a function to override data (file content). It is a
2377 function to be lazy. path, flags, copied, ctx: None or overridden value
2377 function to be lazy. path, flags, copied, ctx: None or overridden value
2378
2378
2379 copied could be (path, rev), or False. copied could also be just path,
2379 copied could be (path, rev), or False. copied could also be just path,
2380 and will be converted to (path, nullid). This simplifies some callers.
2380 and will be converted to (path, nullid). This simplifies some callers.
2381 """
2381 """
2382
2382
2383 if path is None:
2383 if path is None:
2384 path = originalfctx.path()
2384 path = originalfctx.path()
2385 if ctx is None:
2385 if ctx is None:
2386 ctx = originalfctx.changectx()
2386 ctx = originalfctx.changectx()
2387 ctxmatch = lambda: True
2387 ctxmatch = lambda: True
2388 else:
2388 else:
2389 ctxmatch = lambda: ctx == originalfctx.changectx()
2389 ctxmatch = lambda: ctx == originalfctx.changectx()
2390
2390
2391 repo = originalfctx.repo()
2391 repo = originalfctx.repo()
2392 flog = originalfctx.filelog()
2392 flog = originalfctx.filelog()
2393 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2393 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2394
2394
2395 if copied is None:
2395 if copied is None:
2396 copied = originalfctx.renamed()
2396 copied = originalfctx.renamed()
2397 copiedmatch = lambda: True
2397 copiedmatch = lambda: True
2398 else:
2398 else:
2399 if copied and not isinstance(copied, tuple):
2399 if copied and not isinstance(copied, tuple):
2400 # repo._filecommit will recalculate copyrev so nullid is okay
2400 # repo._filecommit will recalculate copyrev so nullid is okay
2401 copied = (copied, nullid)
2401 copied = (copied, nullid)
2402 copiedmatch = lambda: copied == originalfctx.renamed()
2402 copiedmatch = lambda: copied == originalfctx.renamed()
2403
2403
2404 # When data, copied (could affect data), ctx (could affect filelog
2404 # When data, copied (could affect data), ctx (could affect filelog
2405 # parents) are not overridden, rawdata, rawflags, and filenode may be
2405 # parents) are not overridden, rawdata, rawflags, and filenode may be
2406 # reused (repo._filecommit should double check filelog parents).
2406 # reused (repo._filecommit should double check filelog parents).
2407 #
2407 #
2408 # path, flags are not hashed in filelog (but in manifestlog) so they do
2408 # path, flags are not hashed in filelog (but in manifestlog) so they do
2409 # not affect reusable here.
2409 # not affect reusable here.
2410 #
2410 #
2411 # If ctx or copied is overridden to a same value with originalfctx,
2411 # If ctx or copied is overridden to a same value with originalfctx,
2412 # still consider it's reusable. originalfctx.renamed() may be a bit
2412 # still consider it's reusable. originalfctx.renamed() may be a bit
2413 # expensive so it's not called unless necessary. Assuming datafunc is
2413 # expensive so it's not called unless necessary. Assuming datafunc is
2414 # always expensive, do not call it for this "reusable" test.
2414 # always expensive, do not call it for this "reusable" test.
2415 reusable = datafunc is None and ctxmatch() and copiedmatch()
2415 reusable = datafunc is None and ctxmatch() and copiedmatch()
2416
2416
2417 if datafunc is None:
2417 if datafunc is None:
2418 datafunc = originalfctx.data
2418 datafunc = originalfctx.data
2419 if flags is None:
2419 if flags is None:
2420 flags = originalfctx.flags()
2420 flags = originalfctx.flags()
2421
2421
2422 self._datafunc = datafunc
2422 self._datafunc = datafunc
2423 self._flags = flags
2423 self._flags = flags
2424 self._copied = copied
2424 self._copied = copied
2425
2425
2426 if reusable:
2426 if reusable:
2427 # copy extra fields from originalfctx
2427 # copy extra fields from originalfctx
2428 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2428 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2429 for attr_ in attrs:
2429 for attr_ in attrs:
2430 if util.safehasattr(originalfctx, attr_):
2430 if util.safehasattr(originalfctx, attr_):
2431 setattr(self, attr_, getattr(originalfctx, attr_))
2431 setattr(self, attr_, getattr(originalfctx, attr_))
2432
2432
2433 def data(self):
2433 def data(self):
2434 return self._datafunc()
2434 return self._datafunc()
2435
2435
2436 class metadataonlyctx(committablectx):
2436 class metadataonlyctx(committablectx):
2437 """Like memctx but it's reusing the manifest of different commit.
2437 """Like memctx but it's reusing the manifest of different commit.
2438 Intended to be used by lightweight operations that are creating
2438 Intended to be used by lightweight operations that are creating
2439 metadata-only changes.
2439 metadata-only changes.
2440
2440
2441 Revision information is supplied at initialization time. 'repo' is the
2441 Revision information is supplied at initialization time. 'repo' is the
2442 current localrepo, 'ctx' is original revision which manifest we're reuisng
2442 current localrepo, 'ctx' is original revision which manifest we're reuisng
2443 'parents' is a sequence of two parent revisions identifiers (pass None for
2443 'parents' is a sequence of two parent revisions identifiers (pass None for
2444 every missing parent), 'text' is the commit.
2444 every missing parent), 'text' is the commit.
2445
2445
2446 user receives the committer name and defaults to current repository
2446 user receives the committer name and defaults to current repository
2447 username, date is the commit date in any format supported by
2447 username, date is the commit date in any format supported by
2448 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2448 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2449 metadata or is left empty.
2449 metadata or is left empty.
2450 """
2450 """
2451 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2451 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2452 date=None, extra=None, editor=False):
2452 date=None, extra=None, editor=False):
2453 if text is None:
2453 if text is None:
2454 text = originalctx.description()
2454 text = originalctx.description()
2455 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2455 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2456 self._rev = None
2456 self._rev = None
2457 self._node = None
2457 self._node = None
2458 self._originalctx = originalctx
2458 self._originalctx = originalctx
2459 self._manifestnode = originalctx.manifestnode()
2459 self._manifestnode = originalctx.manifestnode()
2460 if parents is None:
2460 if parents is None:
2461 parents = originalctx.parents()
2461 parents = originalctx.parents()
2462 else:
2462 else:
2463 parents = [repo[p] for p in parents if p is not None]
2463 parents = [repo[p] for p in parents if p is not None]
2464 parents = parents[:]
2464 parents = parents[:]
2465 while len(parents) < 2:
2465 while len(parents) < 2:
2466 parents.append(repo[nullid])
2466 parents.append(repo[nullid])
2467 p1, p2 = self._parents = parents
2467 p1, p2 = self._parents = parents
2468
2468
2469 # sanity check to ensure that the reused manifest parents are
2469 # sanity check to ensure that the reused manifest parents are
2470 # manifests of our commit parents
2470 # manifests of our commit parents
2471 mp1, mp2 = self.manifestctx().parents
2471 mp1, mp2 = self.manifestctx().parents
2472 if p1 != nullid and p1.manifestnode() != mp1:
2472 if p1 != nullid and p1.manifestnode() != mp1:
2473 raise RuntimeError('can\'t reuse the manifest: '
2473 raise RuntimeError('can\'t reuse the manifest: '
2474 'its p1 doesn\'t match the new ctx p1')
2474 'its p1 doesn\'t match the new ctx p1')
2475 if p2 != nullid and p2.manifestnode() != mp2:
2475 if p2 != nullid and p2.manifestnode() != mp2:
2476 raise RuntimeError('can\'t reuse the manifest: '
2476 raise RuntimeError('can\'t reuse the manifest: '
2477 'its p2 doesn\'t match the new ctx p2')
2477 'its p2 doesn\'t match the new ctx p2')
2478
2478
2479 self._files = originalctx.files()
2479 self._files = originalctx.files()
2480 self.substate = {}
2480 self.substate = {}
2481
2481
2482 if editor:
2482 if editor:
2483 self._text = editor(self._repo, self, [])
2483 self._text = editor(self._repo, self, [])
2484 self._repo.savecommitmessage(self._text)
2484 self._repo.savecommitmessage(self._text)
2485
2485
2486 def manifestnode(self):
2486 def manifestnode(self):
2487 return self._manifestnode
2487 return self._manifestnode
2488
2488
2489 @property
2489 @property
2490 def _manifestctx(self):
2490 def _manifestctx(self):
2491 return self._repo.manifestlog[self._manifestnode]
2491 return self._repo.manifestlog[self._manifestnode]
2492
2492
2493 def filectx(self, path, filelog=None):
2493 def filectx(self, path, filelog=None):
2494 return self._originalctx.filectx(path, filelog=filelog)
2494 return self._originalctx.filectx(path, filelog=filelog)
2495
2495
2496 def commit(self):
2496 def commit(self):
2497 """commit context to the repo"""
2497 """commit context to the repo"""
2498 return self._repo.commitctx(self)
2498 return self._repo.commitctx(self)
2499
2499
2500 @property
2500 @property
2501 def _manifest(self):
2501 def _manifest(self):
2502 return self._originalctx.manifest()
2502 return self._originalctx.manifest()
2503
2503
2504 @propertycache
2504 @propertycache
2505 def _status(self):
2505 def _status(self):
2506 """Calculate exact status from ``files`` specified in the ``origctx``
2506 """Calculate exact status from ``files`` specified in the ``origctx``
2507 and parents manifests.
2507 and parents manifests.
2508 """
2508 """
2509 man1 = self.p1().manifest()
2509 man1 = self.p1().manifest()
2510 p2 = self._parents[1]
2510 p2 = self._parents[1]
2511 # "1 < len(self._parents)" can't be used for checking
2511 # "1 < len(self._parents)" can't be used for checking
2512 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2512 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2513 # explicitly initialized by the list, of which length is 2.
2513 # explicitly initialized by the list, of which length is 2.
2514 if p2.node() != nullid:
2514 if p2.node() != nullid:
2515 man2 = p2.manifest()
2515 man2 = p2.manifest()
2516 managing = lambda f: f in man1 or f in man2
2516 managing = lambda f: f in man1 or f in man2
2517 else:
2517 else:
2518 managing = lambda f: f in man1
2518 managing = lambda f: f in man1
2519
2519
2520 modified, added, removed = [], [], []
2520 modified, added, removed = [], [], []
2521 for f in self._files:
2521 for f in self._files:
2522 if not managing(f):
2522 if not managing(f):
2523 added.append(f)
2523 added.append(f)
2524 elif f in self:
2524 elif f in self:
2525 modified.append(f)
2525 modified.append(f)
2526 else:
2526 else:
2527 removed.append(f)
2527 removed.append(f)
2528
2528
2529 return scmutil.status(modified, added, removed, [], [], [], [])
2529 return scmutil.status(modified, added, removed, [], [], [], [])
2530
2530
2531 class arbitraryfilectx(object):
2531 class arbitraryfilectx(object):
2532 """Allows you to use filectx-like functions on a file in an arbitrary
2532 """Allows you to use filectx-like functions on a file in an arbitrary
2533 location on disk, possibly not in the working directory.
2533 location on disk, possibly not in the working directory.
2534 """
2534 """
2535 def __init__(self, path, repo=None):
2535 def __init__(self, path, repo=None):
2536 # Repo is optional because contrib/simplemerge uses this class.
2536 # Repo is optional because contrib/simplemerge uses this class.
2537 self._repo = repo
2537 self._repo = repo
2538 self._path = path
2538 self._path = path
2539
2539
2540 def cmp(self, fctx):
2540 def cmp(self, fctx):
2541 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2541 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2542 # path if either side is a symlink.
2542 # path if either side is a symlink.
2543 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2543 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2544 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2544 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2545 # Add a fast-path for merge if both sides are disk-backed.
2545 # Add a fast-path for merge if both sides are disk-backed.
2546 # Note that filecmp uses the opposite return values (True if same)
2546 # Note that filecmp uses the opposite return values (True if same)
2547 # from our cmp functions (True if different).
2547 # from our cmp functions (True if different).
2548 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2548 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2549 return self.data() != fctx.data()
2549 return self.data() != fctx.data()
2550
2550
2551 def path(self):
2551 def path(self):
2552 return self._path
2552 return self._path
2553
2553
2554 def flags(self):
2554 def flags(self):
2555 return ''
2555 return ''
2556
2556
2557 def data(self):
2557 def data(self):
2558 return util.readfile(self._path)
2558 return util.readfile(self._path)
2559
2559
2560 def decodeddata(self):
2560 def decodeddata(self):
2561 with open(self._path, "rb") as f:
2561 with open(self._path, "rb") as f:
2562 return f.read()
2562 return f.read()
2563
2563
2564 def remove(self):
2564 def remove(self):
2565 util.unlink(self._path)
2565 util.unlink(self._path)
2566
2566
2567 def write(self, data, flags, **kwargs):
2567 def write(self, data, flags, **kwargs):
2568 assert not flags
2568 assert not flags
2569 with open(self._path, "w") as f:
2569 with open(self._path, "w") as f:
2570 f.write(data)
2570 f.write(data)
@@ -1,1489 +1,1498 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 hex,
21 hex,
22 nullid,
22 nullid,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirrev,
25 wdirrev,
26 )
26 )
27
27
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 match as matchmod,
31 match as matchmod,
32 obsolete,
32 obsolete,
33 obsutil,
33 obsutil,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 revsetlang,
37 revsetlang,
38 similar,
38 similar,
39 url,
39 url,
40 util,
40 util,
41 vfs,
41 vfs,
42 )
42 )
43
43
44 from .utils import (
44 from .utils import (
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 if pycompat.iswindows:
49 if pycompat.iswindows:
50 from . import scmwindows as scmplatform
50 from . import scmwindows as scmplatform
51 else:
51 else:
52 from . import scmposix as scmplatform
52 from . import scmposix as scmplatform
53
53
54 termsize = scmplatform.termsize
54 termsize = scmplatform.termsize
55
55
56 class status(tuple):
56 class status(tuple):
57 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
57 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
58 and 'ignored' properties are only relevant to the working copy.
58 and 'ignored' properties are only relevant to the working copy.
59 '''
59 '''
60
60
61 __slots__ = ()
61 __slots__ = ()
62
62
63 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
63 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
64 clean):
64 clean):
65 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
65 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
66 ignored, clean))
66 ignored, clean))
67
67
68 @property
68 @property
69 def modified(self):
69 def modified(self):
70 '''files that have been modified'''
70 '''files that have been modified'''
71 return self[0]
71 return self[0]
72
72
73 @property
73 @property
74 def added(self):
74 def added(self):
75 '''files that have been added'''
75 '''files that have been added'''
76 return self[1]
76 return self[1]
77
77
78 @property
78 @property
79 def removed(self):
79 def removed(self):
80 '''files that have been removed'''
80 '''files that have been removed'''
81 return self[2]
81 return self[2]
82
82
83 @property
83 @property
84 def deleted(self):
84 def deleted(self):
85 '''files that are in the dirstate, but have been deleted from the
85 '''files that are in the dirstate, but have been deleted from the
86 working copy (aka "missing")
86 working copy (aka "missing")
87 '''
87 '''
88 return self[3]
88 return self[3]
89
89
90 @property
90 @property
91 def unknown(self):
91 def unknown(self):
92 '''files not in the dirstate that are not ignored'''
92 '''files not in the dirstate that are not ignored'''
93 return self[4]
93 return self[4]
94
94
95 @property
95 @property
96 def ignored(self):
96 def ignored(self):
97 '''files not in the dirstate that are ignored (by _dirignore())'''
97 '''files not in the dirstate that are ignored (by _dirignore())'''
98 return self[5]
98 return self[5]
99
99
100 @property
100 @property
101 def clean(self):
101 def clean(self):
102 '''files that have not been modified'''
102 '''files that have not been modified'''
103 return self[6]
103 return self[6]
104
104
105 def __repr__(self, *args, **kwargs):
105 def __repr__(self, *args, **kwargs):
106 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
106 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
107 'unknown=%r, ignored=%r, clean=%r>') % self)
107 'unknown=%r, ignored=%r, clean=%r>') % self)
108
108
109 def itersubrepos(ctx1, ctx2):
109 def itersubrepos(ctx1, ctx2):
110 """find subrepos in ctx1 or ctx2"""
110 """find subrepos in ctx1 or ctx2"""
111 # Create a (subpath, ctx) mapping where we prefer subpaths from
111 # Create a (subpath, ctx) mapping where we prefer subpaths from
112 # ctx1. The subpaths from ctx2 are important when the .hgsub file
112 # ctx1. The subpaths from ctx2 are important when the .hgsub file
113 # has been modified (in ctx2) but not yet committed (in ctx1).
113 # has been modified (in ctx2) but not yet committed (in ctx1).
114 subpaths = dict.fromkeys(ctx2.substate, ctx2)
114 subpaths = dict.fromkeys(ctx2.substate, ctx2)
115 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
115 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
116
116
117 missing = set()
117 missing = set()
118
118
119 for subpath in ctx2.substate:
119 for subpath in ctx2.substate:
120 if subpath not in ctx1.substate:
120 if subpath not in ctx1.substate:
121 del subpaths[subpath]
121 del subpaths[subpath]
122 missing.add(subpath)
122 missing.add(subpath)
123
123
124 for subpath, ctx in sorted(subpaths.iteritems()):
124 for subpath, ctx in sorted(subpaths.iteritems()):
125 yield subpath, ctx.sub(subpath)
125 yield subpath, ctx.sub(subpath)
126
126
127 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
127 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
128 # status and diff will have an accurate result when it does
128 # status and diff will have an accurate result when it does
129 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
129 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
130 # against itself.
130 # against itself.
131 for subpath in missing:
131 for subpath in missing:
132 yield subpath, ctx2.nullsub(subpath, ctx1)
132 yield subpath, ctx2.nullsub(subpath, ctx1)
133
133
134 def nochangesfound(ui, repo, excluded=None):
134 def nochangesfound(ui, repo, excluded=None):
135 '''Report no changes for push/pull, excluded is None or a list of
135 '''Report no changes for push/pull, excluded is None or a list of
136 nodes excluded from the push/pull.
136 nodes excluded from the push/pull.
137 '''
137 '''
138 secretlist = []
138 secretlist = []
139 if excluded:
139 if excluded:
140 for n in excluded:
140 for n in excluded:
141 ctx = repo[n]
141 ctx = repo[n]
142 if ctx.phase() >= phases.secret and not ctx.extinct():
142 if ctx.phase() >= phases.secret and not ctx.extinct():
143 secretlist.append(n)
143 secretlist.append(n)
144
144
145 if secretlist:
145 if secretlist:
146 ui.status(_("no changes found (ignored %d secret changesets)\n")
146 ui.status(_("no changes found (ignored %d secret changesets)\n")
147 % len(secretlist))
147 % len(secretlist))
148 else:
148 else:
149 ui.status(_("no changes found\n"))
149 ui.status(_("no changes found\n"))
150
150
151 def callcatch(ui, func):
151 def callcatch(ui, func):
152 """call func() with global exception handling
152 """call func() with global exception handling
153
153
154 return func() if no exception happens. otherwise do some error handling
154 return func() if no exception happens. otherwise do some error handling
155 and return an exit code accordingly. does not handle all exceptions.
155 and return an exit code accordingly. does not handle all exceptions.
156 """
156 """
157 try:
157 try:
158 try:
158 try:
159 return func()
159 return func()
160 except: # re-raises
160 except: # re-raises
161 ui.traceback()
161 ui.traceback()
162 raise
162 raise
163 # Global exception handling, alphabetically
163 # Global exception handling, alphabetically
164 # Mercurial-specific first, followed by built-in and library exceptions
164 # Mercurial-specific first, followed by built-in and library exceptions
165 except error.LockHeld as inst:
165 except error.LockHeld as inst:
166 if inst.errno == errno.ETIMEDOUT:
166 if inst.errno == errno.ETIMEDOUT:
167 reason = _('timed out waiting for lock held by %r') % inst.locker
167 reason = _('timed out waiting for lock held by %r') % inst.locker
168 else:
168 else:
169 reason = _('lock held by %r') % inst.locker
169 reason = _('lock held by %r') % inst.locker
170 ui.warn(_("abort: %s: %s\n")
170 ui.warn(_("abort: %s: %s\n")
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
172 if not inst.locker:
172 if not inst.locker:
173 ui.warn(_("(lock might be very busy)\n"))
173 ui.warn(_("(lock might be very busy)\n"))
174 except error.LockUnavailable as inst:
174 except error.LockUnavailable as inst:
175 ui.warn(_("abort: could not lock %s: %s\n") %
175 ui.warn(_("abort: could not lock %s: %s\n") %
176 (inst.desc or stringutil.forcebytestr(inst.filename),
176 (inst.desc or stringutil.forcebytestr(inst.filename),
177 encoding.strtolocal(inst.strerror)))
177 encoding.strtolocal(inst.strerror)))
178 except error.OutOfBandError as inst:
178 except error.OutOfBandError as inst:
179 if inst.args:
179 if inst.args:
180 msg = _("abort: remote error:\n")
180 msg = _("abort: remote error:\n")
181 else:
181 else:
182 msg = _("abort: remote error\n")
182 msg = _("abort: remote error\n")
183 ui.warn(msg)
183 ui.warn(msg)
184 if inst.args:
184 if inst.args:
185 ui.warn(''.join(inst.args))
185 ui.warn(''.join(inst.args))
186 if inst.hint:
186 if inst.hint:
187 ui.warn('(%s)\n' % inst.hint)
187 ui.warn('(%s)\n' % inst.hint)
188 except error.RepoError as inst:
188 except error.RepoError as inst:
189 ui.warn(_("abort: %s!\n") % inst)
189 ui.warn(_("abort: %s!\n") % inst)
190 if inst.hint:
190 if inst.hint:
191 ui.warn(_("(%s)\n") % inst.hint)
191 ui.warn(_("(%s)\n") % inst.hint)
192 except error.ResponseError as inst:
192 except error.ResponseError as inst:
193 ui.warn(_("abort: %s") % inst.args[0])
193 ui.warn(_("abort: %s") % inst.args[0])
194 msg = inst.args[1]
194 msg = inst.args[1]
195 if isinstance(msg, type(u'')):
195 if isinstance(msg, type(u'')):
196 msg = pycompat.sysbytes(msg)
196 msg = pycompat.sysbytes(msg)
197 if not isinstance(msg, bytes):
197 if not isinstance(msg, bytes):
198 ui.warn(" %r\n" % (msg,))
198 ui.warn(" %r\n" % (msg,))
199 elif not msg:
199 elif not msg:
200 ui.warn(_(" empty string\n"))
200 ui.warn(_(" empty string\n"))
201 else:
201 else:
202 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
202 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
203 except error.CensoredNodeError as inst:
203 except error.CensoredNodeError as inst:
204 ui.warn(_("abort: file censored %s!\n") % inst)
204 ui.warn(_("abort: file censored %s!\n") % inst)
205 except error.RevlogError as inst:
205 except error.RevlogError as inst:
206 ui.warn(_("abort: %s!\n") % inst)
206 ui.warn(_("abort: %s!\n") % inst)
207 except error.InterventionRequired as inst:
207 except error.InterventionRequired as inst:
208 ui.warn("%s\n" % inst)
208 ui.warn("%s\n" % inst)
209 if inst.hint:
209 if inst.hint:
210 ui.warn(_("(%s)\n") % inst.hint)
210 ui.warn(_("(%s)\n") % inst.hint)
211 return 1
211 return 1
212 except error.WdirUnsupported:
212 except error.WdirUnsupported:
213 ui.warn(_("abort: working directory revision cannot be specified\n"))
213 ui.warn(_("abort: working directory revision cannot be specified\n"))
214 except error.Abort as inst:
214 except error.Abort as inst:
215 ui.warn(_("abort: %s\n") % inst)
215 ui.warn(_("abort: %s\n") % inst)
216 if inst.hint:
216 if inst.hint:
217 ui.warn(_("(%s)\n") % inst.hint)
217 ui.warn(_("(%s)\n") % inst.hint)
218 except ImportError as inst:
218 except ImportError as inst:
219 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
219 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
220 m = stringutil.forcebytestr(inst).split()[-1]
220 m = stringutil.forcebytestr(inst).split()[-1]
221 if m in "mpatch bdiff".split():
221 if m in "mpatch bdiff".split():
222 ui.warn(_("(did you forget to compile extensions?)\n"))
222 ui.warn(_("(did you forget to compile extensions?)\n"))
223 elif m in "zlib".split():
223 elif m in "zlib".split():
224 ui.warn(_("(is your Python install correct?)\n"))
224 ui.warn(_("(is your Python install correct?)\n"))
225 except IOError as inst:
225 except IOError as inst:
226 if util.safehasattr(inst, "code"):
226 if util.safehasattr(inst, "code"):
227 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
227 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
228 elif util.safehasattr(inst, "reason"):
228 elif util.safehasattr(inst, "reason"):
229 try: # usually it is in the form (errno, strerror)
229 try: # usually it is in the form (errno, strerror)
230 reason = inst.reason.args[1]
230 reason = inst.reason.args[1]
231 except (AttributeError, IndexError):
231 except (AttributeError, IndexError):
232 # it might be anything, for example a string
232 # it might be anything, for example a string
233 reason = inst.reason
233 reason = inst.reason
234 if isinstance(reason, unicode):
234 if isinstance(reason, unicode):
235 # SSLError of Python 2.7.9 contains a unicode
235 # SSLError of Python 2.7.9 contains a unicode
236 reason = encoding.unitolocal(reason)
236 reason = encoding.unitolocal(reason)
237 ui.warn(_("abort: error: %s\n") % reason)
237 ui.warn(_("abort: error: %s\n") % reason)
238 elif (util.safehasattr(inst, "args")
238 elif (util.safehasattr(inst, "args")
239 and inst.args and inst.args[0] == errno.EPIPE):
239 and inst.args and inst.args[0] == errno.EPIPE):
240 pass
240 pass
241 elif getattr(inst, "strerror", None):
241 elif getattr(inst, "strerror", None):
242 if getattr(inst, "filename", None):
242 if getattr(inst, "filename", None):
243 ui.warn(_("abort: %s: %s\n") % (
243 ui.warn(_("abort: %s: %s\n") % (
244 encoding.strtolocal(inst.strerror),
244 encoding.strtolocal(inst.strerror),
245 stringutil.forcebytestr(inst.filename)))
245 stringutil.forcebytestr(inst.filename)))
246 else:
246 else:
247 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
247 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
248 else:
248 else:
249 raise
249 raise
250 except OSError as inst:
250 except OSError as inst:
251 if getattr(inst, "filename", None) is not None:
251 if getattr(inst, "filename", None) is not None:
252 ui.warn(_("abort: %s: '%s'\n") % (
252 ui.warn(_("abort: %s: '%s'\n") % (
253 encoding.strtolocal(inst.strerror),
253 encoding.strtolocal(inst.strerror),
254 stringutil.forcebytestr(inst.filename)))
254 stringutil.forcebytestr(inst.filename)))
255 else:
255 else:
256 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
256 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
257 except MemoryError:
257 except MemoryError:
258 ui.warn(_("abort: out of memory\n"))
258 ui.warn(_("abort: out of memory\n"))
259 except SystemExit as inst:
259 except SystemExit as inst:
260 # Commands shouldn't sys.exit directly, but give a return code.
260 # Commands shouldn't sys.exit directly, but give a return code.
261 # Just in case catch this and and pass exit code to caller.
261 # Just in case catch this and and pass exit code to caller.
262 return inst.code
262 return inst.code
263 except socket.error as inst:
263 except socket.error as inst:
264 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
264 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
265
265
266 return -1
266 return -1
267
267
268 def checknewlabel(repo, lbl, kind):
268 def checknewlabel(repo, lbl, kind):
269 # Do not use the "kind" parameter in ui output.
269 # Do not use the "kind" parameter in ui output.
270 # It makes strings difficult to translate.
270 # It makes strings difficult to translate.
271 if lbl in ['tip', '.', 'null']:
271 if lbl in ['tip', '.', 'null']:
272 raise error.Abort(_("the name '%s' is reserved") % lbl)
272 raise error.Abort(_("the name '%s' is reserved") % lbl)
273 for c in (':', '\0', '\n', '\r'):
273 for c in (':', '\0', '\n', '\r'):
274 if c in lbl:
274 if c in lbl:
275 raise error.Abort(
275 raise error.Abort(
276 _("%r cannot be used in a name") % pycompat.bytestr(c))
276 _("%r cannot be used in a name") % pycompat.bytestr(c))
277 try:
277 try:
278 int(lbl)
278 int(lbl)
279 raise error.Abort(_("cannot use an integer as a name"))
279 raise error.Abort(_("cannot use an integer as a name"))
280 except ValueError:
280 except ValueError:
281 pass
281 pass
282 if lbl.strip() != lbl:
282 if lbl.strip() != lbl:
283 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
283 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
284
284
285 def checkfilename(f):
285 def checkfilename(f):
286 '''Check that the filename f is an acceptable filename for a tracked file'''
286 '''Check that the filename f is an acceptable filename for a tracked file'''
287 if '\r' in f or '\n' in f:
287 if '\r' in f or '\n' in f:
288 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
288 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
289
289
290 def checkportable(ui, f):
290 def checkportable(ui, f):
291 '''Check if filename f is portable and warn or abort depending on config'''
291 '''Check if filename f is portable and warn or abort depending on config'''
292 checkfilename(f)
292 checkfilename(f)
293 abort, warn = checkportabilityalert(ui)
293 abort, warn = checkportabilityalert(ui)
294 if abort or warn:
294 if abort or warn:
295 msg = util.checkwinfilename(f)
295 msg = util.checkwinfilename(f)
296 if msg:
296 if msg:
297 msg = "%s: %s" % (msg, procutil.shellquote(f))
297 msg = "%s: %s" % (msg, procutil.shellquote(f))
298 if abort:
298 if abort:
299 raise error.Abort(msg)
299 raise error.Abort(msg)
300 ui.warn(_("warning: %s\n") % msg)
300 ui.warn(_("warning: %s\n") % msg)
301
301
302 def checkportabilityalert(ui):
302 def checkportabilityalert(ui):
303 '''check if the user's config requests nothing, a warning, or abort for
303 '''check if the user's config requests nothing, a warning, or abort for
304 non-portable filenames'''
304 non-portable filenames'''
305 val = ui.config('ui', 'portablefilenames')
305 val = ui.config('ui', 'portablefilenames')
306 lval = val.lower()
306 lval = val.lower()
307 bval = stringutil.parsebool(val)
307 bval = stringutil.parsebool(val)
308 abort = pycompat.iswindows or lval == 'abort'
308 abort = pycompat.iswindows or lval == 'abort'
309 warn = bval or lval == 'warn'
309 warn = bval or lval == 'warn'
310 if bval is None and not (warn or abort or lval == 'ignore'):
310 if bval is None and not (warn or abort or lval == 'ignore'):
311 raise error.ConfigError(
311 raise error.ConfigError(
312 _("ui.portablefilenames value is invalid ('%s')") % val)
312 _("ui.portablefilenames value is invalid ('%s')") % val)
313 return abort, warn
313 return abort, warn
314
314
315 class casecollisionauditor(object):
315 class casecollisionauditor(object):
316 def __init__(self, ui, abort, dirstate):
316 def __init__(self, ui, abort, dirstate):
317 self._ui = ui
317 self._ui = ui
318 self._abort = abort
318 self._abort = abort
319 allfiles = '\0'.join(dirstate._map)
319 allfiles = '\0'.join(dirstate._map)
320 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
320 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
321 self._dirstate = dirstate
321 self._dirstate = dirstate
322 # The purpose of _newfiles is so that we don't complain about
322 # The purpose of _newfiles is so that we don't complain about
323 # case collisions if someone were to call this object with the
323 # case collisions if someone were to call this object with the
324 # same filename twice.
324 # same filename twice.
325 self._newfiles = set()
325 self._newfiles = set()
326
326
327 def __call__(self, f):
327 def __call__(self, f):
328 if f in self._newfiles:
328 if f in self._newfiles:
329 return
329 return
330 fl = encoding.lower(f)
330 fl = encoding.lower(f)
331 if fl in self._loweredfiles and f not in self._dirstate:
331 if fl in self._loweredfiles and f not in self._dirstate:
332 msg = _('possible case-folding collision for %s') % f
332 msg = _('possible case-folding collision for %s') % f
333 if self._abort:
333 if self._abort:
334 raise error.Abort(msg)
334 raise error.Abort(msg)
335 self._ui.warn(_("warning: %s\n") % msg)
335 self._ui.warn(_("warning: %s\n") % msg)
336 self._loweredfiles.add(fl)
336 self._loweredfiles.add(fl)
337 self._newfiles.add(f)
337 self._newfiles.add(f)
338
338
339 def filteredhash(repo, maxrev):
339 def filteredhash(repo, maxrev):
340 """build hash of filtered revisions in the current repoview.
340 """build hash of filtered revisions in the current repoview.
341
341
342 Multiple caches perform up-to-date validation by checking that the
342 Multiple caches perform up-to-date validation by checking that the
343 tiprev and tipnode stored in the cache file match the current repository.
343 tiprev and tipnode stored in the cache file match the current repository.
344 However, this is not sufficient for validating repoviews because the set
344 However, this is not sufficient for validating repoviews because the set
345 of revisions in the view may change without the repository tiprev and
345 of revisions in the view may change without the repository tiprev and
346 tipnode changing.
346 tipnode changing.
347
347
348 This function hashes all the revs filtered from the view and returns
348 This function hashes all the revs filtered from the view and returns
349 that SHA-1 digest.
349 that SHA-1 digest.
350 """
350 """
351 cl = repo.changelog
351 cl = repo.changelog
352 if not cl.filteredrevs:
352 if not cl.filteredrevs:
353 return None
353 return None
354 key = None
354 key = None
355 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
355 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
356 if revs:
356 if revs:
357 s = hashlib.sha1()
357 s = hashlib.sha1()
358 for rev in revs:
358 for rev in revs:
359 s.update('%d;' % rev)
359 s.update('%d;' % rev)
360 key = s.digest()
360 key = s.digest()
361 return key
361 return key
362
362
363 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
363 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
364 '''yield every hg repository under path, always recursively.
364 '''yield every hg repository under path, always recursively.
365 The recurse flag will only control recursion into repo working dirs'''
365 The recurse flag will only control recursion into repo working dirs'''
366 def errhandler(err):
366 def errhandler(err):
367 if err.filename == path:
367 if err.filename == path:
368 raise err
368 raise err
369 samestat = getattr(os.path, 'samestat', None)
369 samestat = getattr(os.path, 'samestat', None)
370 if followsym and samestat is not None:
370 if followsym and samestat is not None:
371 def adddir(dirlst, dirname):
371 def adddir(dirlst, dirname):
372 dirstat = os.stat(dirname)
372 dirstat = os.stat(dirname)
373 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
373 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
374 if not match:
374 if not match:
375 dirlst.append(dirstat)
375 dirlst.append(dirstat)
376 return not match
376 return not match
377 else:
377 else:
378 followsym = False
378 followsym = False
379
379
380 if (seen_dirs is None) and followsym:
380 if (seen_dirs is None) and followsym:
381 seen_dirs = []
381 seen_dirs = []
382 adddir(seen_dirs, path)
382 adddir(seen_dirs, path)
383 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
383 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
384 dirs.sort()
384 dirs.sort()
385 if '.hg' in dirs:
385 if '.hg' in dirs:
386 yield root # found a repository
386 yield root # found a repository
387 qroot = os.path.join(root, '.hg', 'patches')
387 qroot = os.path.join(root, '.hg', 'patches')
388 if os.path.isdir(os.path.join(qroot, '.hg')):
388 if os.path.isdir(os.path.join(qroot, '.hg')):
389 yield qroot # we have a patch queue repo here
389 yield qroot # we have a patch queue repo here
390 if recurse:
390 if recurse:
391 # avoid recursing inside the .hg directory
391 # avoid recursing inside the .hg directory
392 dirs.remove('.hg')
392 dirs.remove('.hg')
393 else:
393 else:
394 dirs[:] = [] # don't descend further
394 dirs[:] = [] # don't descend further
395 elif followsym:
395 elif followsym:
396 newdirs = []
396 newdirs = []
397 for d in dirs:
397 for d in dirs:
398 fname = os.path.join(root, d)
398 fname = os.path.join(root, d)
399 if adddir(seen_dirs, fname):
399 if adddir(seen_dirs, fname):
400 if os.path.islink(fname):
400 if os.path.islink(fname):
401 for hgname in walkrepos(fname, True, seen_dirs):
401 for hgname in walkrepos(fname, True, seen_dirs):
402 yield hgname
402 yield hgname
403 else:
403 else:
404 newdirs.append(d)
404 newdirs.append(d)
405 dirs[:] = newdirs
405 dirs[:] = newdirs
406
406
407 def binnode(ctx):
407 def binnode(ctx):
408 """Return binary node id for a given basectx"""
408 """Return binary node id for a given basectx"""
409 node = ctx.node()
409 node = ctx.node()
410 if node is None:
410 if node is None:
411 return wdirid
411 return wdirid
412 return node
412 return node
413
413
414 def intrev(ctx):
414 def intrev(ctx):
415 """Return integer for a given basectx that can be used in comparison or
415 """Return integer for a given basectx that can be used in comparison or
416 arithmetic operation"""
416 arithmetic operation"""
417 rev = ctx.rev()
417 rev = ctx.rev()
418 if rev is None:
418 if rev is None:
419 return wdirrev
419 return wdirrev
420 return rev
420 return rev
421
421
422 def formatchangeid(ctx):
422 def formatchangeid(ctx):
423 """Format changectx as '{rev}:{node|formatnode}', which is the default
423 """Format changectx as '{rev}:{node|formatnode}', which is the default
424 template provided by logcmdutil.changesettemplater"""
424 template provided by logcmdutil.changesettemplater"""
425 repo = ctx.repo()
425 repo = ctx.repo()
426 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
426 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
427
427
428 def formatrevnode(ui, rev, node):
428 def formatrevnode(ui, rev, node):
429 """Format given revision and node depending on the current verbosity"""
429 """Format given revision and node depending on the current verbosity"""
430 if ui.debugflag:
430 if ui.debugflag:
431 hexfunc = hex
431 hexfunc = hex
432 else:
432 else:
433 hexfunc = short
433 hexfunc = short
434 return '%d:%s' % (rev, hexfunc(node))
434 return '%d:%s' % (rev, hexfunc(node))
435
435
436 def resolvepartialhexnodeid(repo, prefix):
437 # Uses unfiltered repo because it's faster when then prefix is ambiguous/
438 # This matches the "shortest" template function.
439 node = repo.unfiltered().changelog._partialmatch(prefix)
440 if node is None:
441 return
442 repo.changelog.rev(node) # make sure node isn't filtered
443 return node
444
436 def isrevsymbol(repo, symbol):
445 def isrevsymbol(repo, symbol):
437 try:
446 try:
438 revsymbol(repo, symbol)
447 revsymbol(repo, symbol)
439 return True
448 return True
440 except error.RepoLookupError:
449 except error.RepoLookupError:
441 return False
450 return False
442
451
443 def revsymbol(repo, symbol):
452 def revsymbol(repo, symbol):
444 """Returns a context given a single revision symbol (as string).
453 """Returns a context given a single revision symbol (as string).
445
454
446 This is similar to revsingle(), but accepts only a single revision symbol,
455 This is similar to revsingle(), but accepts only a single revision symbol,
447 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
456 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
448 not "max(public())".
457 not "max(public())".
449 """
458 """
450 if not isinstance(symbol, bytes):
459 if not isinstance(symbol, bytes):
451 msg = ("symbol (%s of type %s) was not a string, did you mean "
460 msg = ("symbol (%s of type %s) was not a string, did you mean "
452 "repo[symbol]?" % (symbol, type(symbol)))
461 "repo[symbol]?" % (symbol, type(symbol)))
453 raise error.ProgrammingError(msg)
462 raise error.ProgrammingError(msg)
454 try:
463 try:
455 return repo[symbol]
464 return repo[symbol]
456 except (error.FilteredIndexError, error.FilteredLookupError,
465 except (error.FilteredIndexError, error.FilteredLookupError,
457 error.FilteredRepoLookupError):
466 error.FilteredRepoLookupError):
458 raise _filterederror(repo, symbol)
467 raise _filterederror(repo, symbol)
459
468
460 def _filterederror(repo, changeid):
469 def _filterederror(repo, changeid):
461 """build an exception to be raised about a filtered changeid
470 """build an exception to be raised about a filtered changeid
462
471
463 This is extracted in a function to help extensions (eg: evolve) to
472 This is extracted in a function to help extensions (eg: evolve) to
464 experiment with various message variants."""
473 experiment with various message variants."""
465 if repo.filtername.startswith('visible'):
474 if repo.filtername.startswith('visible'):
466
475
467 # Check if the changeset is obsolete
476 # Check if the changeset is obsolete
468 unfilteredrepo = repo.unfiltered()
477 unfilteredrepo = repo.unfiltered()
469 ctx = revsymbol(unfilteredrepo, changeid)
478 ctx = revsymbol(unfilteredrepo, changeid)
470
479
471 # If the changeset is obsolete, enrich the message with the reason
480 # If the changeset is obsolete, enrich the message with the reason
472 # that made this changeset not visible
481 # that made this changeset not visible
473 if ctx.obsolete():
482 if ctx.obsolete():
474 msg = obsutil._getfilteredreason(repo, changeid, ctx)
483 msg = obsutil._getfilteredreason(repo, changeid, ctx)
475 else:
484 else:
476 msg = _("hidden revision '%s'") % changeid
485 msg = _("hidden revision '%s'") % changeid
477
486
478 hint = _('use --hidden to access hidden revisions')
487 hint = _('use --hidden to access hidden revisions')
479
488
480 return error.FilteredRepoLookupError(msg, hint=hint)
489 return error.FilteredRepoLookupError(msg, hint=hint)
481 msg = _("filtered revision '%s' (not in '%s' subset)")
490 msg = _("filtered revision '%s' (not in '%s' subset)")
482 msg %= (changeid, repo.filtername)
491 msg %= (changeid, repo.filtername)
483 return error.FilteredRepoLookupError(msg)
492 return error.FilteredRepoLookupError(msg)
484
493
485 def revsingle(repo, revspec, default='.', localalias=None):
494 def revsingle(repo, revspec, default='.', localalias=None):
486 if not revspec and revspec != 0:
495 if not revspec and revspec != 0:
487 return repo[default]
496 return repo[default]
488
497
489 l = revrange(repo, [revspec], localalias=localalias)
498 l = revrange(repo, [revspec], localalias=localalias)
490 if not l:
499 if not l:
491 raise error.Abort(_('empty revision set'))
500 raise error.Abort(_('empty revision set'))
492 return repo[l.last()]
501 return repo[l.last()]
493
502
494 def _pairspec(revspec):
503 def _pairspec(revspec):
495 tree = revsetlang.parse(revspec)
504 tree = revsetlang.parse(revspec)
496 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
505 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
497
506
498 def revpairnodes(repo, revs):
507 def revpairnodes(repo, revs):
499 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
508 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
500 ctx1, ctx2 = revpair(repo, revs)
509 ctx1, ctx2 = revpair(repo, revs)
501 return ctx1.node(), ctx2.node()
510 return ctx1.node(), ctx2.node()
502
511
503 def revpair(repo, revs):
512 def revpair(repo, revs):
504 if not revs:
513 if not revs:
505 return repo['.'], repo[None]
514 return repo['.'], repo[None]
506
515
507 l = revrange(repo, revs)
516 l = revrange(repo, revs)
508
517
509 if not l:
518 if not l:
510 first = second = None
519 first = second = None
511 elif l.isascending():
520 elif l.isascending():
512 first = l.min()
521 first = l.min()
513 second = l.max()
522 second = l.max()
514 elif l.isdescending():
523 elif l.isdescending():
515 first = l.max()
524 first = l.max()
516 second = l.min()
525 second = l.min()
517 else:
526 else:
518 first = l.first()
527 first = l.first()
519 second = l.last()
528 second = l.last()
520
529
521 if first is None:
530 if first is None:
522 raise error.Abort(_('empty revision range'))
531 raise error.Abort(_('empty revision range'))
523 if (first == second and len(revs) >= 2
532 if (first == second and len(revs) >= 2
524 and not all(revrange(repo, [r]) for r in revs)):
533 and not all(revrange(repo, [r]) for r in revs)):
525 raise error.Abort(_('empty revision on one side of range'))
534 raise error.Abort(_('empty revision on one side of range'))
526
535
527 # if top-level is range expression, the result must always be a pair
536 # if top-level is range expression, the result must always be a pair
528 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
537 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
529 return repo[first], repo[None]
538 return repo[first], repo[None]
530
539
531 return repo[first], repo[second]
540 return repo[first], repo[second]
532
541
533 def revrange(repo, specs, localalias=None):
542 def revrange(repo, specs, localalias=None):
534 """Execute 1 to many revsets and return the union.
543 """Execute 1 to many revsets and return the union.
535
544
536 This is the preferred mechanism for executing revsets using user-specified
545 This is the preferred mechanism for executing revsets using user-specified
537 config options, such as revset aliases.
546 config options, such as revset aliases.
538
547
539 The revsets specified by ``specs`` will be executed via a chained ``OR``
548 The revsets specified by ``specs`` will be executed via a chained ``OR``
540 expression. If ``specs`` is empty, an empty result is returned.
549 expression. If ``specs`` is empty, an empty result is returned.
541
550
542 ``specs`` can contain integers, in which case they are assumed to be
551 ``specs`` can contain integers, in which case they are assumed to be
543 revision numbers.
552 revision numbers.
544
553
545 It is assumed the revsets are already formatted. If you have arguments
554 It is assumed the revsets are already formatted. If you have arguments
546 that need to be expanded in the revset, call ``revsetlang.formatspec()``
555 that need to be expanded in the revset, call ``revsetlang.formatspec()``
547 and pass the result as an element of ``specs``.
556 and pass the result as an element of ``specs``.
548
557
549 Specifying a single revset is allowed.
558 Specifying a single revset is allowed.
550
559
551 Returns a ``revset.abstractsmartset`` which is a list-like interface over
560 Returns a ``revset.abstractsmartset`` which is a list-like interface over
552 integer revisions.
561 integer revisions.
553 """
562 """
554 allspecs = []
563 allspecs = []
555 for spec in specs:
564 for spec in specs:
556 if isinstance(spec, int):
565 if isinstance(spec, int):
557 spec = revsetlang.formatspec('rev(%d)', spec)
566 spec = revsetlang.formatspec('rev(%d)', spec)
558 allspecs.append(spec)
567 allspecs.append(spec)
559 return repo.anyrevs(allspecs, user=True, localalias=localalias)
568 return repo.anyrevs(allspecs, user=True, localalias=localalias)
560
569
561 def meaningfulparents(repo, ctx):
570 def meaningfulparents(repo, ctx):
562 """Return list of meaningful (or all if debug) parentrevs for rev.
571 """Return list of meaningful (or all if debug) parentrevs for rev.
563
572
564 For merges (two non-nullrev revisions) both parents are meaningful.
573 For merges (two non-nullrev revisions) both parents are meaningful.
565 Otherwise the first parent revision is considered meaningful if it
574 Otherwise the first parent revision is considered meaningful if it
566 is not the preceding revision.
575 is not the preceding revision.
567 """
576 """
568 parents = ctx.parents()
577 parents = ctx.parents()
569 if len(parents) > 1:
578 if len(parents) > 1:
570 return parents
579 return parents
571 if repo.ui.debugflag:
580 if repo.ui.debugflag:
572 return [parents[0], repo['null']]
581 return [parents[0], repo['null']]
573 if parents[0].rev() >= intrev(ctx) - 1:
582 if parents[0].rev() >= intrev(ctx) - 1:
574 return []
583 return []
575 return parents
584 return parents
576
585
577 def expandpats(pats):
586 def expandpats(pats):
578 '''Expand bare globs when running on windows.
587 '''Expand bare globs when running on windows.
579 On posix we assume it already has already been done by sh.'''
588 On posix we assume it already has already been done by sh.'''
580 if not util.expandglobs:
589 if not util.expandglobs:
581 return list(pats)
590 return list(pats)
582 ret = []
591 ret = []
583 for kindpat in pats:
592 for kindpat in pats:
584 kind, pat = matchmod._patsplit(kindpat, None)
593 kind, pat = matchmod._patsplit(kindpat, None)
585 if kind is None:
594 if kind is None:
586 try:
595 try:
587 globbed = glob.glob(pat)
596 globbed = glob.glob(pat)
588 except re.error:
597 except re.error:
589 globbed = [pat]
598 globbed = [pat]
590 if globbed:
599 if globbed:
591 ret.extend(globbed)
600 ret.extend(globbed)
592 continue
601 continue
593 ret.append(kindpat)
602 ret.append(kindpat)
594 return ret
603 return ret
595
604
596 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
605 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
597 badfn=None):
606 badfn=None):
598 '''Return a matcher and the patterns that were used.
607 '''Return a matcher and the patterns that were used.
599 The matcher will warn about bad matches, unless an alternate badfn callback
608 The matcher will warn about bad matches, unless an alternate badfn callback
600 is provided.'''
609 is provided.'''
601 if pats == ("",):
610 if pats == ("",):
602 pats = []
611 pats = []
603 if opts is None:
612 if opts is None:
604 opts = {}
613 opts = {}
605 if not globbed and default == 'relpath':
614 if not globbed and default == 'relpath':
606 pats = expandpats(pats or [])
615 pats = expandpats(pats or [])
607
616
608 def bad(f, msg):
617 def bad(f, msg):
609 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
618 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
610
619
611 if badfn is None:
620 if badfn is None:
612 badfn = bad
621 badfn = bad
613
622
614 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
623 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
615 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
624 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
616
625
617 if m.always():
626 if m.always():
618 pats = []
627 pats = []
619 return m, pats
628 return m, pats
620
629
621 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
630 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
622 badfn=None):
631 badfn=None):
623 '''Return a matcher that will warn about bad matches.'''
632 '''Return a matcher that will warn about bad matches.'''
624 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
633 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
625
634
626 def matchall(repo):
635 def matchall(repo):
627 '''Return a matcher that will efficiently match everything.'''
636 '''Return a matcher that will efficiently match everything.'''
628 return matchmod.always(repo.root, repo.getcwd())
637 return matchmod.always(repo.root, repo.getcwd())
629
638
630 def matchfiles(repo, files, badfn=None):
639 def matchfiles(repo, files, badfn=None):
631 '''Return a matcher that will efficiently match exactly these files.'''
640 '''Return a matcher that will efficiently match exactly these files.'''
632 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
641 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
633
642
634 def parsefollowlinespattern(repo, rev, pat, msg):
643 def parsefollowlinespattern(repo, rev, pat, msg):
635 """Return a file name from `pat` pattern suitable for usage in followlines
644 """Return a file name from `pat` pattern suitable for usage in followlines
636 logic.
645 logic.
637 """
646 """
638 if not matchmod.patkind(pat):
647 if not matchmod.patkind(pat):
639 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
648 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
640 else:
649 else:
641 ctx = repo[rev]
650 ctx = repo[rev]
642 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
651 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
643 files = [f for f in ctx if m(f)]
652 files = [f for f in ctx if m(f)]
644 if len(files) != 1:
653 if len(files) != 1:
645 raise error.ParseError(msg)
654 raise error.ParseError(msg)
646 return files[0]
655 return files[0]
647
656
648 def origpath(ui, repo, filepath):
657 def origpath(ui, repo, filepath):
649 '''customize where .orig files are created
658 '''customize where .orig files are created
650
659
651 Fetch user defined path from config file: [ui] origbackuppath = <path>
660 Fetch user defined path from config file: [ui] origbackuppath = <path>
652 Fall back to default (filepath with .orig suffix) if not specified
661 Fall back to default (filepath with .orig suffix) if not specified
653 '''
662 '''
654 origbackuppath = ui.config('ui', 'origbackuppath')
663 origbackuppath = ui.config('ui', 'origbackuppath')
655 if not origbackuppath:
664 if not origbackuppath:
656 return filepath + ".orig"
665 return filepath + ".orig"
657
666
658 # Convert filepath from an absolute path into a path inside the repo.
667 # Convert filepath from an absolute path into a path inside the repo.
659 filepathfromroot = util.normpath(os.path.relpath(filepath,
668 filepathfromroot = util.normpath(os.path.relpath(filepath,
660 start=repo.root))
669 start=repo.root))
661
670
662 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
671 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
663 origbackupdir = origvfs.dirname(filepathfromroot)
672 origbackupdir = origvfs.dirname(filepathfromroot)
664 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
673 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
665 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
674 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
666
675
667 # Remove any files that conflict with the backup file's path
676 # Remove any files that conflict with the backup file's path
668 for f in reversed(list(util.finddirs(filepathfromroot))):
677 for f in reversed(list(util.finddirs(filepathfromroot))):
669 if origvfs.isfileorlink(f):
678 if origvfs.isfileorlink(f):
670 ui.note(_('removing conflicting file: %s\n')
679 ui.note(_('removing conflicting file: %s\n')
671 % origvfs.join(f))
680 % origvfs.join(f))
672 origvfs.unlink(f)
681 origvfs.unlink(f)
673 break
682 break
674
683
675 origvfs.makedirs(origbackupdir)
684 origvfs.makedirs(origbackupdir)
676
685
677 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
686 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
678 ui.note(_('removing conflicting directory: %s\n')
687 ui.note(_('removing conflicting directory: %s\n')
679 % origvfs.join(filepathfromroot))
688 % origvfs.join(filepathfromroot))
680 origvfs.rmtree(filepathfromroot, forcibly=True)
689 origvfs.rmtree(filepathfromroot, forcibly=True)
681
690
682 return origvfs.join(filepathfromroot)
691 return origvfs.join(filepathfromroot)
683
692
684 class _containsnode(object):
693 class _containsnode(object):
685 """proxy __contains__(node) to container.__contains__ which accepts revs"""
694 """proxy __contains__(node) to container.__contains__ which accepts revs"""
686
695
687 def __init__(self, repo, revcontainer):
696 def __init__(self, repo, revcontainer):
688 self._torev = repo.changelog.rev
697 self._torev = repo.changelog.rev
689 self._revcontains = revcontainer.__contains__
698 self._revcontains = revcontainer.__contains__
690
699
691 def __contains__(self, node):
700 def __contains__(self, node):
692 return self._revcontains(self._torev(node))
701 return self._revcontains(self._torev(node))
693
702
694 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
703 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
695 """do common cleanups when old nodes are replaced by new nodes
704 """do common cleanups when old nodes are replaced by new nodes
696
705
697 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
706 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
698 (we might also want to move working directory parent in the future)
707 (we might also want to move working directory parent in the future)
699
708
700 By default, bookmark moves are calculated automatically from 'replacements',
709 By default, bookmark moves are calculated automatically from 'replacements',
701 but 'moves' can be used to override that. Also, 'moves' may include
710 but 'moves' can be used to override that. Also, 'moves' may include
702 additional bookmark moves that should not have associated obsmarkers.
711 additional bookmark moves that should not have associated obsmarkers.
703
712
704 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
713 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
705 have replacements. operation is a string, like "rebase".
714 have replacements. operation is a string, like "rebase".
706
715
707 metadata is dictionary containing metadata to be stored in obsmarker if
716 metadata is dictionary containing metadata to be stored in obsmarker if
708 obsolescence is enabled.
717 obsolescence is enabled.
709 """
718 """
710 if not replacements and not moves:
719 if not replacements and not moves:
711 return
720 return
712
721
713 # translate mapping's other forms
722 # translate mapping's other forms
714 if not util.safehasattr(replacements, 'items'):
723 if not util.safehasattr(replacements, 'items'):
715 replacements = {n: () for n in replacements}
724 replacements = {n: () for n in replacements}
716
725
717 # Calculate bookmark movements
726 # Calculate bookmark movements
718 if moves is None:
727 if moves is None:
719 moves = {}
728 moves = {}
720 # Unfiltered repo is needed since nodes in replacements might be hidden.
729 # Unfiltered repo is needed since nodes in replacements might be hidden.
721 unfi = repo.unfiltered()
730 unfi = repo.unfiltered()
722 for oldnode, newnodes in replacements.items():
731 for oldnode, newnodes in replacements.items():
723 if oldnode in moves:
732 if oldnode in moves:
724 continue
733 continue
725 if len(newnodes) > 1:
734 if len(newnodes) > 1:
726 # usually a split, take the one with biggest rev number
735 # usually a split, take the one with biggest rev number
727 newnode = next(unfi.set('max(%ln)', newnodes)).node()
736 newnode = next(unfi.set('max(%ln)', newnodes)).node()
728 elif len(newnodes) == 0:
737 elif len(newnodes) == 0:
729 # move bookmark backwards
738 # move bookmark backwards
730 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
739 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
731 list(replacements)))
740 list(replacements)))
732 if roots:
741 if roots:
733 newnode = roots[0].node()
742 newnode = roots[0].node()
734 else:
743 else:
735 newnode = nullid
744 newnode = nullid
736 else:
745 else:
737 newnode = newnodes[0]
746 newnode = newnodes[0]
738 moves[oldnode] = newnode
747 moves[oldnode] = newnode
739
748
740 with repo.transaction('cleanup') as tr:
749 with repo.transaction('cleanup') as tr:
741 # Move bookmarks
750 # Move bookmarks
742 bmarks = repo._bookmarks
751 bmarks = repo._bookmarks
743 bmarkchanges = []
752 bmarkchanges = []
744 allnewnodes = [n for ns in replacements.values() for n in ns]
753 allnewnodes = [n for ns in replacements.values() for n in ns]
745 for oldnode, newnode in moves.items():
754 for oldnode, newnode in moves.items():
746 oldbmarks = repo.nodebookmarks(oldnode)
755 oldbmarks = repo.nodebookmarks(oldnode)
747 if not oldbmarks:
756 if not oldbmarks:
748 continue
757 continue
749 from . import bookmarks # avoid import cycle
758 from . import bookmarks # avoid import cycle
750 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
759 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
751 (util.rapply(pycompat.maybebytestr, oldbmarks),
760 (util.rapply(pycompat.maybebytestr, oldbmarks),
752 hex(oldnode), hex(newnode)))
761 hex(oldnode), hex(newnode)))
753 # Delete divergent bookmarks being parents of related newnodes
762 # Delete divergent bookmarks being parents of related newnodes
754 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
763 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
755 allnewnodes, newnode, oldnode)
764 allnewnodes, newnode, oldnode)
756 deletenodes = _containsnode(repo, deleterevs)
765 deletenodes = _containsnode(repo, deleterevs)
757 for name in oldbmarks:
766 for name in oldbmarks:
758 bmarkchanges.append((name, newnode))
767 bmarkchanges.append((name, newnode))
759 for b in bookmarks.divergent2delete(repo, deletenodes, name):
768 for b in bookmarks.divergent2delete(repo, deletenodes, name):
760 bmarkchanges.append((b, None))
769 bmarkchanges.append((b, None))
761
770
762 if bmarkchanges:
771 if bmarkchanges:
763 bmarks.applychanges(repo, tr, bmarkchanges)
772 bmarks.applychanges(repo, tr, bmarkchanges)
764
773
765 # Obsolete or strip nodes
774 # Obsolete or strip nodes
766 if obsolete.isenabled(repo, obsolete.createmarkersopt):
775 if obsolete.isenabled(repo, obsolete.createmarkersopt):
767 # If a node is already obsoleted, and we want to obsolete it
776 # If a node is already obsoleted, and we want to obsolete it
768 # without a successor, skip that obssolete request since it's
777 # without a successor, skip that obssolete request since it's
769 # unnecessary. That's the "if s or not isobs(n)" check below.
778 # unnecessary. That's the "if s or not isobs(n)" check below.
770 # Also sort the node in topology order, that might be useful for
779 # Also sort the node in topology order, that might be useful for
771 # some obsstore logic.
780 # some obsstore logic.
772 # NOTE: the filtering and sorting might belong to createmarkers.
781 # NOTE: the filtering and sorting might belong to createmarkers.
773 isobs = unfi.obsstore.successors.__contains__
782 isobs = unfi.obsstore.successors.__contains__
774 torev = unfi.changelog.rev
783 torev = unfi.changelog.rev
775 sortfunc = lambda ns: torev(ns[0])
784 sortfunc = lambda ns: torev(ns[0])
776 rels = [(unfi[n], tuple(unfi[m] for m in s))
785 rels = [(unfi[n], tuple(unfi[m] for m in s))
777 for n, s in sorted(replacements.items(), key=sortfunc)
786 for n, s in sorted(replacements.items(), key=sortfunc)
778 if s or not isobs(n)]
787 if s or not isobs(n)]
779 if rels:
788 if rels:
780 obsolete.createmarkers(repo, rels, operation=operation,
789 obsolete.createmarkers(repo, rels, operation=operation,
781 metadata=metadata)
790 metadata=metadata)
782 else:
791 else:
783 from . import repair # avoid import cycle
792 from . import repair # avoid import cycle
784 tostrip = list(replacements)
793 tostrip = list(replacements)
785 if tostrip:
794 if tostrip:
786 repair.delayedstrip(repo.ui, repo, tostrip, operation)
795 repair.delayedstrip(repo.ui, repo, tostrip, operation)
787
796
788 def addremove(repo, matcher, prefix, opts=None):
797 def addremove(repo, matcher, prefix, opts=None):
789 if opts is None:
798 if opts is None:
790 opts = {}
799 opts = {}
791 m = matcher
800 m = matcher
792 dry_run = opts.get('dry_run')
801 dry_run = opts.get('dry_run')
793 try:
802 try:
794 similarity = float(opts.get('similarity') or 0)
803 similarity = float(opts.get('similarity') or 0)
795 except ValueError:
804 except ValueError:
796 raise error.Abort(_('similarity must be a number'))
805 raise error.Abort(_('similarity must be a number'))
797 if similarity < 0 or similarity > 100:
806 if similarity < 0 or similarity > 100:
798 raise error.Abort(_('similarity must be between 0 and 100'))
807 raise error.Abort(_('similarity must be between 0 and 100'))
799 similarity /= 100.0
808 similarity /= 100.0
800
809
801 ret = 0
810 ret = 0
802 join = lambda f: os.path.join(prefix, f)
811 join = lambda f: os.path.join(prefix, f)
803
812
804 wctx = repo[None]
813 wctx = repo[None]
805 for subpath in sorted(wctx.substate):
814 for subpath in sorted(wctx.substate):
806 submatch = matchmod.subdirmatcher(subpath, m)
815 submatch = matchmod.subdirmatcher(subpath, m)
807 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
816 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
808 sub = wctx.sub(subpath)
817 sub = wctx.sub(subpath)
809 try:
818 try:
810 if sub.addremove(submatch, prefix, opts):
819 if sub.addremove(submatch, prefix, opts):
811 ret = 1
820 ret = 1
812 except error.LookupError:
821 except error.LookupError:
813 repo.ui.status(_("skipping missing subrepository: %s\n")
822 repo.ui.status(_("skipping missing subrepository: %s\n")
814 % join(subpath))
823 % join(subpath))
815
824
816 rejected = []
825 rejected = []
817 def badfn(f, msg):
826 def badfn(f, msg):
818 if f in m.files():
827 if f in m.files():
819 m.bad(f, msg)
828 m.bad(f, msg)
820 rejected.append(f)
829 rejected.append(f)
821
830
822 badmatch = matchmod.badmatch(m, badfn)
831 badmatch = matchmod.badmatch(m, badfn)
823 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
832 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
824 badmatch)
833 badmatch)
825
834
826 unknownset = set(unknown + forgotten)
835 unknownset = set(unknown + forgotten)
827 toprint = unknownset.copy()
836 toprint = unknownset.copy()
828 toprint.update(deleted)
837 toprint.update(deleted)
829 for abs in sorted(toprint):
838 for abs in sorted(toprint):
830 if repo.ui.verbose or not m.exact(abs):
839 if repo.ui.verbose or not m.exact(abs):
831 if abs in unknownset:
840 if abs in unknownset:
832 status = _('adding %s\n') % m.uipath(abs)
841 status = _('adding %s\n') % m.uipath(abs)
833 else:
842 else:
834 status = _('removing %s\n') % m.uipath(abs)
843 status = _('removing %s\n') % m.uipath(abs)
835 repo.ui.status(status)
844 repo.ui.status(status)
836
845
837 renames = _findrenames(repo, m, added + unknown, removed + deleted,
846 renames = _findrenames(repo, m, added + unknown, removed + deleted,
838 similarity)
847 similarity)
839
848
840 if not dry_run:
849 if not dry_run:
841 _markchanges(repo, unknown + forgotten, deleted, renames)
850 _markchanges(repo, unknown + forgotten, deleted, renames)
842
851
843 for f in rejected:
852 for f in rejected:
844 if f in m.files():
853 if f in m.files():
845 return 1
854 return 1
846 return ret
855 return ret
847
856
848 def marktouched(repo, files, similarity=0.0):
857 def marktouched(repo, files, similarity=0.0):
849 '''Assert that files have somehow been operated upon. files are relative to
858 '''Assert that files have somehow been operated upon. files are relative to
850 the repo root.'''
859 the repo root.'''
851 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
860 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
852 rejected = []
861 rejected = []
853
862
854 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
863 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
855
864
856 if repo.ui.verbose:
865 if repo.ui.verbose:
857 unknownset = set(unknown + forgotten)
866 unknownset = set(unknown + forgotten)
858 toprint = unknownset.copy()
867 toprint = unknownset.copy()
859 toprint.update(deleted)
868 toprint.update(deleted)
860 for abs in sorted(toprint):
869 for abs in sorted(toprint):
861 if abs in unknownset:
870 if abs in unknownset:
862 status = _('adding %s\n') % abs
871 status = _('adding %s\n') % abs
863 else:
872 else:
864 status = _('removing %s\n') % abs
873 status = _('removing %s\n') % abs
865 repo.ui.status(status)
874 repo.ui.status(status)
866
875
867 renames = _findrenames(repo, m, added + unknown, removed + deleted,
876 renames = _findrenames(repo, m, added + unknown, removed + deleted,
868 similarity)
877 similarity)
869
878
870 _markchanges(repo, unknown + forgotten, deleted, renames)
879 _markchanges(repo, unknown + forgotten, deleted, renames)
871
880
872 for f in rejected:
881 for f in rejected:
873 if f in m.files():
882 if f in m.files():
874 return 1
883 return 1
875 return 0
884 return 0
876
885
877 def _interestingfiles(repo, matcher):
886 def _interestingfiles(repo, matcher):
878 '''Walk dirstate with matcher, looking for files that addremove would care
887 '''Walk dirstate with matcher, looking for files that addremove would care
879 about.
888 about.
880
889
881 This is different from dirstate.status because it doesn't care about
890 This is different from dirstate.status because it doesn't care about
882 whether files are modified or clean.'''
891 whether files are modified or clean.'''
883 added, unknown, deleted, removed, forgotten = [], [], [], [], []
892 added, unknown, deleted, removed, forgotten = [], [], [], [], []
884 audit_path = pathutil.pathauditor(repo.root, cached=True)
893 audit_path = pathutil.pathauditor(repo.root, cached=True)
885
894
886 ctx = repo[None]
895 ctx = repo[None]
887 dirstate = repo.dirstate
896 dirstate = repo.dirstate
888 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
897 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
889 unknown=True, ignored=False, full=False)
898 unknown=True, ignored=False, full=False)
890 for abs, st in walkresults.iteritems():
899 for abs, st in walkresults.iteritems():
891 dstate = dirstate[abs]
900 dstate = dirstate[abs]
892 if dstate == '?' and audit_path.check(abs):
901 if dstate == '?' and audit_path.check(abs):
893 unknown.append(abs)
902 unknown.append(abs)
894 elif dstate != 'r' and not st:
903 elif dstate != 'r' and not st:
895 deleted.append(abs)
904 deleted.append(abs)
896 elif dstate == 'r' and st:
905 elif dstate == 'r' and st:
897 forgotten.append(abs)
906 forgotten.append(abs)
898 # for finding renames
907 # for finding renames
899 elif dstate == 'r' and not st:
908 elif dstate == 'r' and not st:
900 removed.append(abs)
909 removed.append(abs)
901 elif dstate == 'a':
910 elif dstate == 'a':
902 added.append(abs)
911 added.append(abs)
903
912
904 return added, unknown, deleted, removed, forgotten
913 return added, unknown, deleted, removed, forgotten
905
914
906 def _findrenames(repo, matcher, added, removed, similarity):
915 def _findrenames(repo, matcher, added, removed, similarity):
907 '''Find renames from removed files to added ones.'''
916 '''Find renames from removed files to added ones.'''
908 renames = {}
917 renames = {}
909 if similarity > 0:
918 if similarity > 0:
910 for old, new, score in similar.findrenames(repo, added, removed,
919 for old, new, score in similar.findrenames(repo, added, removed,
911 similarity):
920 similarity):
912 if (repo.ui.verbose or not matcher.exact(old)
921 if (repo.ui.verbose or not matcher.exact(old)
913 or not matcher.exact(new)):
922 or not matcher.exact(new)):
914 repo.ui.status(_('recording removal of %s as rename to %s '
923 repo.ui.status(_('recording removal of %s as rename to %s '
915 '(%d%% similar)\n') %
924 '(%d%% similar)\n') %
916 (matcher.rel(old), matcher.rel(new),
925 (matcher.rel(old), matcher.rel(new),
917 score * 100))
926 score * 100))
918 renames[new] = old
927 renames[new] = old
919 return renames
928 return renames
920
929
921 def _markchanges(repo, unknown, deleted, renames):
930 def _markchanges(repo, unknown, deleted, renames):
922 '''Marks the files in unknown as added, the files in deleted as removed,
931 '''Marks the files in unknown as added, the files in deleted as removed,
923 and the files in renames as copied.'''
932 and the files in renames as copied.'''
924 wctx = repo[None]
933 wctx = repo[None]
925 with repo.wlock():
934 with repo.wlock():
926 wctx.forget(deleted)
935 wctx.forget(deleted)
927 wctx.add(unknown)
936 wctx.add(unknown)
928 for new, old in renames.iteritems():
937 for new, old in renames.iteritems():
929 wctx.copy(old, new)
938 wctx.copy(old, new)
930
939
931 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
940 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
932 """Update the dirstate to reflect the intent of copying src to dst. For
941 """Update the dirstate to reflect the intent of copying src to dst. For
933 different reasons it might not end with dst being marked as copied from src.
942 different reasons it might not end with dst being marked as copied from src.
934 """
943 """
935 origsrc = repo.dirstate.copied(src) or src
944 origsrc = repo.dirstate.copied(src) or src
936 if dst == origsrc: # copying back a copy?
945 if dst == origsrc: # copying back a copy?
937 if repo.dirstate[dst] not in 'mn' and not dryrun:
946 if repo.dirstate[dst] not in 'mn' and not dryrun:
938 repo.dirstate.normallookup(dst)
947 repo.dirstate.normallookup(dst)
939 else:
948 else:
940 if repo.dirstate[origsrc] == 'a' and origsrc == src:
949 if repo.dirstate[origsrc] == 'a' and origsrc == src:
941 if not ui.quiet:
950 if not ui.quiet:
942 ui.warn(_("%s has not been committed yet, so no copy "
951 ui.warn(_("%s has not been committed yet, so no copy "
943 "data will be stored for %s.\n")
952 "data will be stored for %s.\n")
944 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
953 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
945 if repo.dirstate[dst] in '?r' and not dryrun:
954 if repo.dirstate[dst] in '?r' and not dryrun:
946 wctx.add([dst])
955 wctx.add([dst])
947 elif not dryrun:
956 elif not dryrun:
948 wctx.copy(origsrc, dst)
957 wctx.copy(origsrc, dst)
949
958
950 def readrequires(opener, supported):
959 def readrequires(opener, supported):
951 '''Reads and parses .hg/requires and checks if all entries found
960 '''Reads and parses .hg/requires and checks if all entries found
952 are in the list of supported features.'''
961 are in the list of supported features.'''
953 requirements = set(opener.read("requires").splitlines())
962 requirements = set(opener.read("requires").splitlines())
954 missings = []
963 missings = []
955 for r in requirements:
964 for r in requirements:
956 if r not in supported:
965 if r not in supported:
957 if not r or not r[0:1].isalnum():
966 if not r or not r[0:1].isalnum():
958 raise error.RequirementError(_(".hg/requires file is corrupt"))
967 raise error.RequirementError(_(".hg/requires file is corrupt"))
959 missings.append(r)
968 missings.append(r)
960 missings.sort()
969 missings.sort()
961 if missings:
970 if missings:
962 raise error.RequirementError(
971 raise error.RequirementError(
963 _("repository requires features unknown to this Mercurial: %s")
972 _("repository requires features unknown to this Mercurial: %s")
964 % " ".join(missings),
973 % " ".join(missings),
965 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
974 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
966 " for more information"))
975 " for more information"))
967 return requirements
976 return requirements
968
977
969 def writerequires(opener, requirements):
978 def writerequires(opener, requirements):
970 with opener('requires', 'w') as fp:
979 with opener('requires', 'w') as fp:
971 for r in sorted(requirements):
980 for r in sorted(requirements):
972 fp.write("%s\n" % r)
981 fp.write("%s\n" % r)
973
982
974 class filecachesubentry(object):
983 class filecachesubentry(object):
975 def __init__(self, path, stat):
984 def __init__(self, path, stat):
976 self.path = path
985 self.path = path
977 self.cachestat = None
986 self.cachestat = None
978 self._cacheable = None
987 self._cacheable = None
979
988
980 if stat:
989 if stat:
981 self.cachestat = filecachesubentry.stat(self.path)
990 self.cachestat = filecachesubentry.stat(self.path)
982
991
983 if self.cachestat:
992 if self.cachestat:
984 self._cacheable = self.cachestat.cacheable()
993 self._cacheable = self.cachestat.cacheable()
985 else:
994 else:
986 # None means we don't know yet
995 # None means we don't know yet
987 self._cacheable = None
996 self._cacheable = None
988
997
989 def refresh(self):
998 def refresh(self):
990 if self.cacheable():
999 if self.cacheable():
991 self.cachestat = filecachesubentry.stat(self.path)
1000 self.cachestat = filecachesubentry.stat(self.path)
992
1001
993 def cacheable(self):
1002 def cacheable(self):
994 if self._cacheable is not None:
1003 if self._cacheable is not None:
995 return self._cacheable
1004 return self._cacheable
996
1005
997 # we don't know yet, assume it is for now
1006 # we don't know yet, assume it is for now
998 return True
1007 return True
999
1008
1000 def changed(self):
1009 def changed(self):
1001 # no point in going further if we can't cache it
1010 # no point in going further if we can't cache it
1002 if not self.cacheable():
1011 if not self.cacheable():
1003 return True
1012 return True
1004
1013
1005 newstat = filecachesubentry.stat(self.path)
1014 newstat = filecachesubentry.stat(self.path)
1006
1015
1007 # we may not know if it's cacheable yet, check again now
1016 # we may not know if it's cacheable yet, check again now
1008 if newstat and self._cacheable is None:
1017 if newstat and self._cacheable is None:
1009 self._cacheable = newstat.cacheable()
1018 self._cacheable = newstat.cacheable()
1010
1019
1011 # check again
1020 # check again
1012 if not self._cacheable:
1021 if not self._cacheable:
1013 return True
1022 return True
1014
1023
1015 if self.cachestat != newstat:
1024 if self.cachestat != newstat:
1016 self.cachestat = newstat
1025 self.cachestat = newstat
1017 return True
1026 return True
1018 else:
1027 else:
1019 return False
1028 return False
1020
1029
1021 @staticmethod
1030 @staticmethod
1022 def stat(path):
1031 def stat(path):
1023 try:
1032 try:
1024 return util.cachestat(path)
1033 return util.cachestat(path)
1025 except OSError as e:
1034 except OSError as e:
1026 if e.errno != errno.ENOENT:
1035 if e.errno != errno.ENOENT:
1027 raise
1036 raise
1028
1037
1029 class filecacheentry(object):
1038 class filecacheentry(object):
1030 def __init__(self, paths, stat=True):
1039 def __init__(self, paths, stat=True):
1031 self._entries = []
1040 self._entries = []
1032 for path in paths:
1041 for path in paths:
1033 self._entries.append(filecachesubentry(path, stat))
1042 self._entries.append(filecachesubentry(path, stat))
1034
1043
1035 def changed(self):
1044 def changed(self):
1036 '''true if any entry has changed'''
1045 '''true if any entry has changed'''
1037 for entry in self._entries:
1046 for entry in self._entries:
1038 if entry.changed():
1047 if entry.changed():
1039 return True
1048 return True
1040 return False
1049 return False
1041
1050
1042 def refresh(self):
1051 def refresh(self):
1043 for entry in self._entries:
1052 for entry in self._entries:
1044 entry.refresh()
1053 entry.refresh()
1045
1054
1046 class filecache(object):
1055 class filecache(object):
1047 '''A property like decorator that tracks files under .hg/ for updates.
1056 '''A property like decorator that tracks files under .hg/ for updates.
1048
1057
1049 Records stat info when called in _filecache.
1058 Records stat info when called in _filecache.
1050
1059
1051 On subsequent calls, compares old stat info with new info, and recreates the
1060 On subsequent calls, compares old stat info with new info, and recreates the
1052 object when any of the files changes, updating the new stat info in
1061 object when any of the files changes, updating the new stat info in
1053 _filecache.
1062 _filecache.
1054
1063
1055 Mercurial either atomic renames or appends for files under .hg,
1064 Mercurial either atomic renames or appends for files under .hg,
1056 so to ensure the cache is reliable we need the filesystem to be able
1065 so to ensure the cache is reliable we need the filesystem to be able
1057 to tell us if a file has been replaced. If it can't, we fallback to
1066 to tell us if a file has been replaced. If it can't, we fallback to
1058 recreating the object on every call (essentially the same behavior as
1067 recreating the object on every call (essentially the same behavior as
1059 propertycache).
1068 propertycache).
1060
1069
1061 '''
1070 '''
1062 def __init__(self, *paths):
1071 def __init__(self, *paths):
1063 self.paths = paths
1072 self.paths = paths
1064
1073
1065 def join(self, obj, fname):
1074 def join(self, obj, fname):
1066 """Used to compute the runtime path of a cached file.
1075 """Used to compute the runtime path of a cached file.
1067
1076
1068 Users should subclass filecache and provide their own version of this
1077 Users should subclass filecache and provide their own version of this
1069 function to call the appropriate join function on 'obj' (an instance
1078 function to call the appropriate join function on 'obj' (an instance
1070 of the class that its member function was decorated).
1079 of the class that its member function was decorated).
1071 """
1080 """
1072 raise NotImplementedError
1081 raise NotImplementedError
1073
1082
1074 def __call__(self, func):
1083 def __call__(self, func):
1075 self.func = func
1084 self.func = func
1076 self.name = func.__name__.encode('ascii')
1085 self.name = func.__name__.encode('ascii')
1077 return self
1086 return self
1078
1087
1079 def __get__(self, obj, type=None):
1088 def __get__(self, obj, type=None):
1080 # if accessed on the class, return the descriptor itself.
1089 # if accessed on the class, return the descriptor itself.
1081 if obj is None:
1090 if obj is None:
1082 return self
1091 return self
1083 # do we need to check if the file changed?
1092 # do we need to check if the file changed?
1084 if self.name in obj.__dict__:
1093 if self.name in obj.__dict__:
1085 assert self.name in obj._filecache, self.name
1094 assert self.name in obj._filecache, self.name
1086 return obj.__dict__[self.name]
1095 return obj.__dict__[self.name]
1087
1096
1088 entry = obj._filecache.get(self.name)
1097 entry = obj._filecache.get(self.name)
1089
1098
1090 if entry:
1099 if entry:
1091 if entry.changed():
1100 if entry.changed():
1092 entry.obj = self.func(obj)
1101 entry.obj = self.func(obj)
1093 else:
1102 else:
1094 paths = [self.join(obj, path) for path in self.paths]
1103 paths = [self.join(obj, path) for path in self.paths]
1095
1104
1096 # We stat -before- creating the object so our cache doesn't lie if
1105 # We stat -before- creating the object so our cache doesn't lie if
1097 # a writer modified between the time we read and stat
1106 # a writer modified between the time we read and stat
1098 entry = filecacheentry(paths, True)
1107 entry = filecacheentry(paths, True)
1099 entry.obj = self.func(obj)
1108 entry.obj = self.func(obj)
1100
1109
1101 obj._filecache[self.name] = entry
1110 obj._filecache[self.name] = entry
1102
1111
1103 obj.__dict__[self.name] = entry.obj
1112 obj.__dict__[self.name] = entry.obj
1104 return entry.obj
1113 return entry.obj
1105
1114
1106 def __set__(self, obj, value):
1115 def __set__(self, obj, value):
1107 if self.name not in obj._filecache:
1116 if self.name not in obj._filecache:
1108 # we add an entry for the missing value because X in __dict__
1117 # we add an entry for the missing value because X in __dict__
1109 # implies X in _filecache
1118 # implies X in _filecache
1110 paths = [self.join(obj, path) for path in self.paths]
1119 paths = [self.join(obj, path) for path in self.paths]
1111 ce = filecacheentry(paths, False)
1120 ce = filecacheentry(paths, False)
1112 obj._filecache[self.name] = ce
1121 obj._filecache[self.name] = ce
1113 else:
1122 else:
1114 ce = obj._filecache[self.name]
1123 ce = obj._filecache[self.name]
1115
1124
1116 ce.obj = value # update cached copy
1125 ce.obj = value # update cached copy
1117 obj.__dict__[self.name] = value # update copy returned by obj.x
1126 obj.__dict__[self.name] = value # update copy returned by obj.x
1118
1127
1119 def __delete__(self, obj):
1128 def __delete__(self, obj):
1120 try:
1129 try:
1121 del obj.__dict__[self.name]
1130 del obj.__dict__[self.name]
1122 except KeyError:
1131 except KeyError:
1123 raise AttributeError(self.name)
1132 raise AttributeError(self.name)
1124
1133
1125 def extdatasource(repo, source):
1134 def extdatasource(repo, source):
1126 """Gather a map of rev -> value dict from the specified source
1135 """Gather a map of rev -> value dict from the specified source
1127
1136
1128 A source spec is treated as a URL, with a special case shell: type
1137 A source spec is treated as a URL, with a special case shell: type
1129 for parsing the output from a shell command.
1138 for parsing the output from a shell command.
1130
1139
1131 The data is parsed as a series of newline-separated records where
1140 The data is parsed as a series of newline-separated records where
1132 each record is a revision specifier optionally followed by a space
1141 each record is a revision specifier optionally followed by a space
1133 and a freeform string value. If the revision is known locally, it
1142 and a freeform string value. If the revision is known locally, it
1134 is converted to a rev, otherwise the record is skipped.
1143 is converted to a rev, otherwise the record is skipped.
1135
1144
1136 Note that both key and value are treated as UTF-8 and converted to
1145 Note that both key and value are treated as UTF-8 and converted to
1137 the local encoding. This allows uniformity between local and
1146 the local encoding. This allows uniformity between local and
1138 remote data sources.
1147 remote data sources.
1139 """
1148 """
1140
1149
1141 spec = repo.ui.config("extdata", source)
1150 spec = repo.ui.config("extdata", source)
1142 if not spec:
1151 if not spec:
1143 raise error.Abort(_("unknown extdata source '%s'") % source)
1152 raise error.Abort(_("unknown extdata source '%s'") % source)
1144
1153
1145 data = {}
1154 data = {}
1146 src = proc = None
1155 src = proc = None
1147 try:
1156 try:
1148 if spec.startswith("shell:"):
1157 if spec.startswith("shell:"):
1149 # external commands should be run relative to the repo root
1158 # external commands should be run relative to the repo root
1150 cmd = spec[6:]
1159 cmd = spec[6:]
1151 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1160 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1152 close_fds=procutil.closefds,
1161 close_fds=procutil.closefds,
1153 stdout=subprocess.PIPE, cwd=repo.root)
1162 stdout=subprocess.PIPE, cwd=repo.root)
1154 src = proc.stdout
1163 src = proc.stdout
1155 else:
1164 else:
1156 # treat as a URL or file
1165 # treat as a URL or file
1157 src = url.open(repo.ui, spec)
1166 src = url.open(repo.ui, spec)
1158 for l in src:
1167 for l in src:
1159 if " " in l:
1168 if " " in l:
1160 k, v = l.strip().split(" ", 1)
1169 k, v = l.strip().split(" ", 1)
1161 else:
1170 else:
1162 k, v = l.strip(), ""
1171 k, v = l.strip(), ""
1163
1172
1164 k = encoding.tolocal(k)
1173 k = encoding.tolocal(k)
1165 try:
1174 try:
1166 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1175 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1167 except (error.LookupError, error.RepoLookupError):
1176 except (error.LookupError, error.RepoLookupError):
1168 pass # we ignore data for nodes that don't exist locally
1177 pass # we ignore data for nodes that don't exist locally
1169 finally:
1178 finally:
1170 if proc:
1179 if proc:
1171 proc.communicate()
1180 proc.communicate()
1172 if src:
1181 if src:
1173 src.close()
1182 src.close()
1174 if proc and proc.returncode != 0:
1183 if proc and proc.returncode != 0:
1175 raise error.Abort(_("extdata command '%s' failed: %s")
1184 raise error.Abort(_("extdata command '%s' failed: %s")
1176 % (cmd, procutil.explainexit(proc.returncode)))
1185 % (cmd, procutil.explainexit(proc.returncode)))
1177
1186
1178 return data
1187 return data
1179
1188
1180 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1189 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1181 if lock is None:
1190 if lock is None:
1182 raise error.LockInheritanceContractViolation(
1191 raise error.LockInheritanceContractViolation(
1183 'lock can only be inherited while held')
1192 'lock can only be inherited while held')
1184 if environ is None:
1193 if environ is None:
1185 environ = {}
1194 environ = {}
1186 with lock.inherit() as locker:
1195 with lock.inherit() as locker:
1187 environ[envvar] = locker
1196 environ[envvar] = locker
1188 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1197 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1189
1198
1190 def wlocksub(repo, cmd, *args, **kwargs):
1199 def wlocksub(repo, cmd, *args, **kwargs):
1191 """run cmd as a subprocess that allows inheriting repo's wlock
1200 """run cmd as a subprocess that allows inheriting repo's wlock
1192
1201
1193 This can only be called while the wlock is held. This takes all the
1202 This can only be called while the wlock is held. This takes all the
1194 arguments that ui.system does, and returns the exit code of the
1203 arguments that ui.system does, and returns the exit code of the
1195 subprocess."""
1204 subprocess."""
1196 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1205 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1197 **kwargs)
1206 **kwargs)
1198
1207
1199 def gdinitconfig(ui):
1208 def gdinitconfig(ui):
1200 """helper function to know if a repo should be created as general delta
1209 """helper function to know if a repo should be created as general delta
1201 """
1210 """
1202 # experimental config: format.generaldelta
1211 # experimental config: format.generaldelta
1203 return (ui.configbool('format', 'generaldelta')
1212 return (ui.configbool('format', 'generaldelta')
1204 or ui.configbool('format', 'usegeneraldelta'))
1213 or ui.configbool('format', 'usegeneraldelta'))
1205
1214
1206 def gddeltaconfig(ui):
1215 def gddeltaconfig(ui):
1207 """helper function to know if incoming delta should be optimised
1216 """helper function to know if incoming delta should be optimised
1208 """
1217 """
1209 # experimental config: format.generaldelta
1218 # experimental config: format.generaldelta
1210 return ui.configbool('format', 'generaldelta')
1219 return ui.configbool('format', 'generaldelta')
1211
1220
1212 class simplekeyvaluefile(object):
1221 class simplekeyvaluefile(object):
1213 """A simple file with key=value lines
1222 """A simple file with key=value lines
1214
1223
1215 Keys must be alphanumerics and start with a letter, values must not
1224 Keys must be alphanumerics and start with a letter, values must not
1216 contain '\n' characters"""
1225 contain '\n' characters"""
1217 firstlinekey = '__firstline'
1226 firstlinekey = '__firstline'
1218
1227
1219 def __init__(self, vfs, path, keys=None):
1228 def __init__(self, vfs, path, keys=None):
1220 self.vfs = vfs
1229 self.vfs = vfs
1221 self.path = path
1230 self.path = path
1222
1231
1223 def read(self, firstlinenonkeyval=False):
1232 def read(self, firstlinenonkeyval=False):
1224 """Read the contents of a simple key-value file
1233 """Read the contents of a simple key-value file
1225
1234
1226 'firstlinenonkeyval' indicates whether the first line of file should
1235 'firstlinenonkeyval' indicates whether the first line of file should
1227 be treated as a key-value pair or reuturned fully under the
1236 be treated as a key-value pair or reuturned fully under the
1228 __firstline key."""
1237 __firstline key."""
1229 lines = self.vfs.readlines(self.path)
1238 lines = self.vfs.readlines(self.path)
1230 d = {}
1239 d = {}
1231 if firstlinenonkeyval:
1240 if firstlinenonkeyval:
1232 if not lines:
1241 if not lines:
1233 e = _("empty simplekeyvalue file")
1242 e = _("empty simplekeyvalue file")
1234 raise error.CorruptedState(e)
1243 raise error.CorruptedState(e)
1235 # we don't want to include '\n' in the __firstline
1244 # we don't want to include '\n' in the __firstline
1236 d[self.firstlinekey] = lines[0][:-1]
1245 d[self.firstlinekey] = lines[0][:-1]
1237 del lines[0]
1246 del lines[0]
1238
1247
1239 try:
1248 try:
1240 # the 'if line.strip()' part prevents us from failing on empty
1249 # the 'if line.strip()' part prevents us from failing on empty
1241 # lines which only contain '\n' therefore are not skipped
1250 # lines which only contain '\n' therefore are not skipped
1242 # by 'if line'
1251 # by 'if line'
1243 updatedict = dict(line[:-1].split('=', 1) for line in lines
1252 updatedict = dict(line[:-1].split('=', 1) for line in lines
1244 if line.strip())
1253 if line.strip())
1245 if self.firstlinekey in updatedict:
1254 if self.firstlinekey in updatedict:
1246 e = _("%r can't be used as a key")
1255 e = _("%r can't be used as a key")
1247 raise error.CorruptedState(e % self.firstlinekey)
1256 raise error.CorruptedState(e % self.firstlinekey)
1248 d.update(updatedict)
1257 d.update(updatedict)
1249 except ValueError as e:
1258 except ValueError as e:
1250 raise error.CorruptedState(str(e))
1259 raise error.CorruptedState(str(e))
1251 return d
1260 return d
1252
1261
1253 def write(self, data, firstline=None):
1262 def write(self, data, firstline=None):
1254 """Write key=>value mapping to a file
1263 """Write key=>value mapping to a file
1255 data is a dict. Keys must be alphanumerical and start with a letter.
1264 data is a dict. Keys must be alphanumerical and start with a letter.
1256 Values must not contain newline characters.
1265 Values must not contain newline characters.
1257
1266
1258 If 'firstline' is not None, it is written to file before
1267 If 'firstline' is not None, it is written to file before
1259 everything else, as it is, not in a key=value form"""
1268 everything else, as it is, not in a key=value form"""
1260 lines = []
1269 lines = []
1261 if firstline is not None:
1270 if firstline is not None:
1262 lines.append('%s\n' % firstline)
1271 lines.append('%s\n' % firstline)
1263
1272
1264 for k, v in data.items():
1273 for k, v in data.items():
1265 if k == self.firstlinekey:
1274 if k == self.firstlinekey:
1266 e = "key name '%s' is reserved" % self.firstlinekey
1275 e = "key name '%s' is reserved" % self.firstlinekey
1267 raise error.ProgrammingError(e)
1276 raise error.ProgrammingError(e)
1268 if not k[0:1].isalpha():
1277 if not k[0:1].isalpha():
1269 e = "keys must start with a letter in a key-value file"
1278 e = "keys must start with a letter in a key-value file"
1270 raise error.ProgrammingError(e)
1279 raise error.ProgrammingError(e)
1271 if not k.isalnum():
1280 if not k.isalnum():
1272 e = "invalid key name in a simple key-value file"
1281 e = "invalid key name in a simple key-value file"
1273 raise error.ProgrammingError(e)
1282 raise error.ProgrammingError(e)
1274 if '\n' in v:
1283 if '\n' in v:
1275 e = "invalid value in a simple key-value file"
1284 e = "invalid value in a simple key-value file"
1276 raise error.ProgrammingError(e)
1285 raise error.ProgrammingError(e)
1277 lines.append("%s=%s\n" % (k, v))
1286 lines.append("%s=%s\n" % (k, v))
1278 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1287 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1279 fp.write(''.join(lines))
1288 fp.write(''.join(lines))
1280
1289
1281 _reportobsoletedsource = [
1290 _reportobsoletedsource = [
1282 'debugobsolete',
1291 'debugobsolete',
1283 'pull',
1292 'pull',
1284 'push',
1293 'push',
1285 'serve',
1294 'serve',
1286 'unbundle',
1295 'unbundle',
1287 ]
1296 ]
1288
1297
1289 _reportnewcssource = [
1298 _reportnewcssource = [
1290 'pull',
1299 'pull',
1291 'unbundle',
1300 'unbundle',
1292 ]
1301 ]
1293
1302
1294 # a list of (repo, ctx, files) functions called by various commands to allow
1303 # a list of (repo, ctx, files) functions called by various commands to allow
1295 # extensions to ensure the corresponding files are available locally, before the
1304 # extensions to ensure the corresponding files are available locally, before the
1296 # command uses them.
1305 # command uses them.
1297 fileprefetchhooks = util.hooks()
1306 fileprefetchhooks = util.hooks()
1298
1307
1299 # A marker that tells the evolve extension to suppress its own reporting
1308 # A marker that tells the evolve extension to suppress its own reporting
1300 _reportstroubledchangesets = True
1309 _reportstroubledchangesets = True
1301
1310
1302 def registersummarycallback(repo, otr, txnname=''):
1311 def registersummarycallback(repo, otr, txnname=''):
1303 """register a callback to issue a summary after the transaction is closed
1312 """register a callback to issue a summary after the transaction is closed
1304 """
1313 """
1305 def txmatch(sources):
1314 def txmatch(sources):
1306 return any(txnname.startswith(source) for source in sources)
1315 return any(txnname.startswith(source) for source in sources)
1307
1316
1308 categories = []
1317 categories = []
1309
1318
1310 def reportsummary(func):
1319 def reportsummary(func):
1311 """decorator for report callbacks."""
1320 """decorator for report callbacks."""
1312 # The repoview life cycle is shorter than the one of the actual
1321 # The repoview life cycle is shorter than the one of the actual
1313 # underlying repository. So the filtered object can die before the
1322 # underlying repository. So the filtered object can die before the
1314 # weakref is used leading to troubles. We keep a reference to the
1323 # weakref is used leading to troubles. We keep a reference to the
1315 # unfiltered object and restore the filtering when retrieving the
1324 # unfiltered object and restore the filtering when retrieving the
1316 # repository through the weakref.
1325 # repository through the weakref.
1317 filtername = repo.filtername
1326 filtername = repo.filtername
1318 reporef = weakref.ref(repo.unfiltered())
1327 reporef = weakref.ref(repo.unfiltered())
1319 def wrapped(tr):
1328 def wrapped(tr):
1320 repo = reporef()
1329 repo = reporef()
1321 if filtername:
1330 if filtername:
1322 repo = repo.filtered(filtername)
1331 repo = repo.filtered(filtername)
1323 func(repo, tr)
1332 func(repo, tr)
1324 newcat = '%02i-txnreport' % len(categories)
1333 newcat = '%02i-txnreport' % len(categories)
1325 otr.addpostclose(newcat, wrapped)
1334 otr.addpostclose(newcat, wrapped)
1326 categories.append(newcat)
1335 categories.append(newcat)
1327 return wrapped
1336 return wrapped
1328
1337
1329 if txmatch(_reportobsoletedsource):
1338 if txmatch(_reportobsoletedsource):
1330 @reportsummary
1339 @reportsummary
1331 def reportobsoleted(repo, tr):
1340 def reportobsoleted(repo, tr):
1332 obsoleted = obsutil.getobsoleted(repo, tr)
1341 obsoleted = obsutil.getobsoleted(repo, tr)
1333 if obsoleted:
1342 if obsoleted:
1334 repo.ui.status(_('obsoleted %i changesets\n')
1343 repo.ui.status(_('obsoleted %i changesets\n')
1335 % len(obsoleted))
1344 % len(obsoleted))
1336
1345
1337 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1346 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1338 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1347 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1339 instabilitytypes = [
1348 instabilitytypes = [
1340 ('orphan', 'orphan'),
1349 ('orphan', 'orphan'),
1341 ('phase-divergent', 'phasedivergent'),
1350 ('phase-divergent', 'phasedivergent'),
1342 ('content-divergent', 'contentdivergent'),
1351 ('content-divergent', 'contentdivergent'),
1343 ]
1352 ]
1344
1353
1345 def getinstabilitycounts(repo):
1354 def getinstabilitycounts(repo):
1346 filtered = repo.changelog.filteredrevs
1355 filtered = repo.changelog.filteredrevs
1347 counts = {}
1356 counts = {}
1348 for instability, revset in instabilitytypes:
1357 for instability, revset in instabilitytypes:
1349 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1358 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1350 filtered)
1359 filtered)
1351 return counts
1360 return counts
1352
1361
1353 oldinstabilitycounts = getinstabilitycounts(repo)
1362 oldinstabilitycounts = getinstabilitycounts(repo)
1354 @reportsummary
1363 @reportsummary
1355 def reportnewinstabilities(repo, tr):
1364 def reportnewinstabilities(repo, tr):
1356 newinstabilitycounts = getinstabilitycounts(repo)
1365 newinstabilitycounts = getinstabilitycounts(repo)
1357 for instability, revset in instabilitytypes:
1366 for instability, revset in instabilitytypes:
1358 delta = (newinstabilitycounts[instability] -
1367 delta = (newinstabilitycounts[instability] -
1359 oldinstabilitycounts[instability])
1368 oldinstabilitycounts[instability])
1360 if delta > 0:
1369 if delta > 0:
1361 repo.ui.warn(_('%i new %s changesets\n') %
1370 repo.ui.warn(_('%i new %s changesets\n') %
1362 (delta, instability))
1371 (delta, instability))
1363
1372
1364 if txmatch(_reportnewcssource):
1373 if txmatch(_reportnewcssource):
1365 @reportsummary
1374 @reportsummary
1366 def reportnewcs(repo, tr):
1375 def reportnewcs(repo, tr):
1367 """Report the range of new revisions pulled/unbundled."""
1376 """Report the range of new revisions pulled/unbundled."""
1368 newrevs = tr.changes.get('revs', xrange(0, 0))
1377 newrevs = tr.changes.get('revs', xrange(0, 0))
1369 if not newrevs:
1378 if not newrevs:
1370 return
1379 return
1371
1380
1372 # Compute the bounds of new revisions' range, excluding obsoletes.
1381 # Compute the bounds of new revisions' range, excluding obsoletes.
1373 unfi = repo.unfiltered()
1382 unfi = repo.unfiltered()
1374 revs = unfi.revs('%ld and not obsolete()', newrevs)
1383 revs = unfi.revs('%ld and not obsolete()', newrevs)
1375 if not revs:
1384 if not revs:
1376 # Got only obsoletes.
1385 # Got only obsoletes.
1377 return
1386 return
1378 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1387 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1379
1388
1380 if minrev == maxrev:
1389 if minrev == maxrev:
1381 revrange = minrev
1390 revrange = minrev
1382 else:
1391 else:
1383 revrange = '%s:%s' % (minrev, maxrev)
1392 revrange = '%s:%s' % (minrev, maxrev)
1384 repo.ui.status(_('new changesets %s\n') % revrange)
1393 repo.ui.status(_('new changesets %s\n') % revrange)
1385
1394
1386 def nodesummaries(repo, nodes, maxnumnodes=4):
1395 def nodesummaries(repo, nodes, maxnumnodes=4):
1387 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1396 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1388 return ' '.join(short(h) for h in nodes)
1397 return ' '.join(short(h) for h in nodes)
1389 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1398 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1390 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1399 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1391
1400
1392 def enforcesinglehead(repo, tr, desc):
1401 def enforcesinglehead(repo, tr, desc):
1393 """check that no named branch has multiple heads"""
1402 """check that no named branch has multiple heads"""
1394 if desc in ('strip', 'repair'):
1403 if desc in ('strip', 'repair'):
1395 # skip the logic during strip
1404 # skip the logic during strip
1396 return
1405 return
1397 visible = repo.filtered('visible')
1406 visible = repo.filtered('visible')
1398 # possible improvement: we could restrict the check to affected branch
1407 # possible improvement: we could restrict the check to affected branch
1399 for name, heads in visible.branchmap().iteritems():
1408 for name, heads in visible.branchmap().iteritems():
1400 if len(heads) > 1:
1409 if len(heads) > 1:
1401 msg = _('rejecting multiple heads on branch "%s"')
1410 msg = _('rejecting multiple heads on branch "%s"')
1402 msg %= name
1411 msg %= name
1403 hint = _('%d heads: %s')
1412 hint = _('%d heads: %s')
1404 hint %= (len(heads), nodesummaries(repo, heads))
1413 hint %= (len(heads), nodesummaries(repo, heads))
1405 raise error.Abort(msg, hint=hint)
1414 raise error.Abort(msg, hint=hint)
1406
1415
1407 def wrapconvertsink(sink):
1416 def wrapconvertsink(sink):
1408 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1417 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1409 before it is used, whether or not the convert extension was formally loaded.
1418 before it is used, whether or not the convert extension was formally loaded.
1410 """
1419 """
1411 return sink
1420 return sink
1412
1421
1413 def unhidehashlikerevs(repo, specs, hiddentype):
1422 def unhidehashlikerevs(repo, specs, hiddentype):
1414 """parse the user specs and unhide changesets whose hash or revision number
1423 """parse the user specs and unhide changesets whose hash or revision number
1415 is passed.
1424 is passed.
1416
1425
1417 hiddentype can be: 1) 'warn': warn while unhiding changesets
1426 hiddentype can be: 1) 'warn': warn while unhiding changesets
1418 2) 'nowarn': don't warn while unhiding changesets
1427 2) 'nowarn': don't warn while unhiding changesets
1419
1428
1420 returns a repo object with the required changesets unhidden
1429 returns a repo object with the required changesets unhidden
1421 """
1430 """
1422 if not repo.filtername or not repo.ui.configbool('experimental',
1431 if not repo.filtername or not repo.ui.configbool('experimental',
1423 'directaccess'):
1432 'directaccess'):
1424 return repo
1433 return repo
1425
1434
1426 if repo.filtername not in ('visible', 'visible-hidden'):
1435 if repo.filtername not in ('visible', 'visible-hidden'):
1427 return repo
1436 return repo
1428
1437
1429 symbols = set()
1438 symbols = set()
1430 for spec in specs:
1439 for spec in specs:
1431 try:
1440 try:
1432 tree = revsetlang.parse(spec)
1441 tree = revsetlang.parse(spec)
1433 except error.ParseError: # will be reported by scmutil.revrange()
1442 except error.ParseError: # will be reported by scmutil.revrange()
1434 continue
1443 continue
1435
1444
1436 symbols.update(revsetlang.gethashlikesymbols(tree))
1445 symbols.update(revsetlang.gethashlikesymbols(tree))
1437
1446
1438 if not symbols:
1447 if not symbols:
1439 return repo
1448 return repo
1440
1449
1441 revs = _getrevsfromsymbols(repo, symbols)
1450 revs = _getrevsfromsymbols(repo, symbols)
1442
1451
1443 if not revs:
1452 if not revs:
1444 return repo
1453 return repo
1445
1454
1446 if hiddentype == 'warn':
1455 if hiddentype == 'warn':
1447 unfi = repo.unfiltered()
1456 unfi = repo.unfiltered()
1448 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1457 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1449 repo.ui.warn(_("warning: accessing hidden changesets for write "
1458 repo.ui.warn(_("warning: accessing hidden changesets for write "
1450 "operation: %s\n") % revstr)
1459 "operation: %s\n") % revstr)
1451
1460
1452 # we have to use new filtername to separate branch/tags cache until we can
1461 # we have to use new filtername to separate branch/tags cache until we can
1453 # disbale these cache when revisions are dynamically pinned.
1462 # disbale these cache when revisions are dynamically pinned.
1454 return repo.filtered('visible-hidden', revs)
1463 return repo.filtered('visible-hidden', revs)
1455
1464
1456 def _getrevsfromsymbols(repo, symbols):
1465 def _getrevsfromsymbols(repo, symbols):
1457 """parse the list of symbols and returns a set of revision numbers of hidden
1466 """parse the list of symbols and returns a set of revision numbers of hidden
1458 changesets present in symbols"""
1467 changesets present in symbols"""
1459 revs = set()
1468 revs = set()
1460 unfi = repo.unfiltered()
1469 unfi = repo.unfiltered()
1461 unficl = unfi.changelog
1470 unficl = unfi.changelog
1462 cl = repo.changelog
1471 cl = repo.changelog
1463 tiprev = len(unficl)
1472 tiprev = len(unficl)
1464 pmatch = unficl._partialmatch
1473 pmatch = unficl._partialmatch
1465 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1474 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1466 for s in symbols:
1475 for s in symbols:
1467 try:
1476 try:
1468 n = int(s)
1477 n = int(s)
1469 if n <= tiprev:
1478 if n <= tiprev:
1470 if not allowrevnums:
1479 if not allowrevnums:
1471 continue
1480 continue
1472 else:
1481 else:
1473 if n not in cl:
1482 if n not in cl:
1474 revs.add(n)
1483 revs.add(n)
1475 continue
1484 continue
1476 except ValueError:
1485 except ValueError:
1477 pass
1486 pass
1478
1487
1479 try:
1488 try:
1480 s = pmatch(s)
1489 s = pmatch(s)
1481 except (error.LookupError, error.WdirUnsupported):
1490 except (error.LookupError, error.WdirUnsupported):
1482 s = None
1491 s = None
1483
1492
1484 if s is not None:
1493 if s is not None:
1485 rev = unficl.rev(s)
1494 rev = unficl.rev(s)
1486 if rev not in cl:
1495 if rev not in cl:
1487 revs.add(rev)
1496 revs.add(rev)
1488
1497
1489 return revs
1498 return revs
General Comments 0
You need to be logged in to leave comments. Login now