##// END OF EJS Templates
changectx: extract explicit computechangesetfilesremoved method from context...
marmoute -
r42937:53c07f08 default
parent child Browse files
Show More
@@ -1,2584 +1,2579 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from . import (
26 from . import (
27 copies,
27 copies,
28 dagop,
28 dagop,
29 encoding,
29 encoding,
30 error,
30 error,
31 fileset,
31 fileset,
32 match as matchmod,
32 match as matchmod,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51
51
52 class basectx(object):
52 class basectx(object):
53 """A basectx object represents the common logic for its children:
53 """A basectx object represents the common logic for its children:
54 changectx: read-only context that is already present in the repo,
54 changectx: read-only context that is already present in the repo,
55 workingctx: a context that represents the working directory and can
55 workingctx: a context that represents the working directory and can
56 be committed,
56 be committed,
57 memctx: a context that represents changes in-memory and can also
57 memctx: a context that represents changes in-memory and can also
58 be committed."""
58 be committed."""
59
59
60 def __init__(self, repo):
60 def __init__(self, repo):
61 self._repo = repo
61 self._repo = repo
62
62
63 def __bytes__(self):
63 def __bytes__(self):
64 return short(self.node())
64 return short(self.node())
65
65
66 __str__ = encoding.strmethod(__bytes__)
66 __str__ = encoding.strmethod(__bytes__)
67
67
68 def __repr__(self):
68 def __repr__(self):
69 return r"<%s %s>" % (type(self).__name__, str(self))
69 return r"<%s %s>" % (type(self).__name__, str(self))
70
70
71 def __eq__(self, other):
71 def __eq__(self, other):
72 try:
72 try:
73 return type(self) == type(other) and self._rev == other._rev
73 return type(self) == type(other) and self._rev == other._rev
74 except AttributeError:
74 except AttributeError:
75 return False
75 return False
76
76
77 def __ne__(self, other):
77 def __ne__(self, other):
78 return not (self == other)
78 return not (self == other)
79
79
80 def __contains__(self, key):
80 def __contains__(self, key):
81 return key in self._manifest
81 return key in self._manifest
82
82
83 def __getitem__(self, key):
83 def __getitem__(self, key):
84 return self.filectx(key)
84 return self.filectx(key)
85
85
86 def __iter__(self):
86 def __iter__(self):
87 return iter(self._manifest)
87 return iter(self._manifest)
88
88
89 def _buildstatusmanifest(self, status):
89 def _buildstatusmanifest(self, status):
90 """Builds a manifest that includes the given status results, if this is
90 """Builds a manifest that includes the given status results, if this is
91 a working copy context. For non-working copy contexts, it just returns
91 a working copy context. For non-working copy contexts, it just returns
92 the normal manifest."""
92 the normal manifest."""
93 return self.manifest()
93 return self.manifest()
94
94
95 def _matchstatus(self, other, match):
95 def _matchstatus(self, other, match):
96 """This internal method provides a way for child objects to override the
96 """This internal method provides a way for child objects to override the
97 match operator.
97 match operator.
98 """
98 """
99 return match
99 return match
100
100
101 def _buildstatus(self, other, s, match, listignored, listclean,
101 def _buildstatus(self, other, s, match, listignored, listclean,
102 listunknown):
102 listunknown):
103 """build a status with respect to another context"""
103 """build a status with respect to another context"""
104 # Load earliest manifest first for caching reasons. More specifically,
104 # Load earliest manifest first for caching reasons. More specifically,
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # if you have revisions 1000 and 1001, 1001 is probably stored as a
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
107 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # 1000 and cache it so that when you read 1001, we just need to apply a
108 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta to what's in the cache. So that's one full reconstruction + one
109 # delta application.
109 # delta application.
110 mf2 = None
110 mf2 = None
111 if self.rev() is not None and self.rev() < other.rev():
111 if self.rev() is not None and self.rev() < other.rev():
112 mf2 = self._buildstatusmanifest(s)
112 mf2 = self._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
113 mf1 = other._buildstatusmanifest(s)
114 if mf2 is None:
114 if mf2 is None:
115 mf2 = self._buildstatusmanifest(s)
115 mf2 = self._buildstatusmanifest(s)
116
116
117 modified, added = [], []
117 modified, added = [], []
118 removed = []
118 removed = []
119 clean = []
119 clean = []
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
121 deletedset = set(deleted)
121 deletedset = set(deleted)
122 d = mf1.diff(mf2, match=match, clean=listclean)
122 d = mf1.diff(mf2, match=match, clean=listclean)
123 for fn, value in d.iteritems():
123 for fn, value in d.iteritems():
124 if fn in deletedset:
124 if fn in deletedset:
125 continue
125 continue
126 if value is None:
126 if value is None:
127 clean.append(fn)
127 clean.append(fn)
128 continue
128 continue
129 (node1, flag1), (node2, flag2) = value
129 (node1, flag1), (node2, flag2) = value
130 if node1 is None:
130 if node1 is None:
131 added.append(fn)
131 added.append(fn)
132 elif node2 is None:
132 elif node2 is None:
133 removed.append(fn)
133 removed.append(fn)
134 elif flag1 != flag2:
134 elif flag1 != flag2:
135 modified.append(fn)
135 modified.append(fn)
136 elif node2 not in wdirfilenodeids:
136 elif node2 not in wdirfilenodeids:
137 # When comparing files between two commits, we save time by
137 # When comparing files between two commits, we save time by
138 # not comparing the file contents when the nodeids differ.
138 # not comparing the file contents when the nodeids differ.
139 # Note that this means we incorrectly report a reverted change
139 # Note that this means we incorrectly report a reverted change
140 # to a file as a modification.
140 # to a file as a modification.
141 modified.append(fn)
141 modified.append(fn)
142 elif self[fn].cmp(other[fn]):
142 elif self[fn].cmp(other[fn]):
143 modified.append(fn)
143 modified.append(fn)
144 else:
144 else:
145 clean.append(fn)
145 clean.append(fn)
146
146
147 if removed:
147 if removed:
148 # need to filter files if they are already reported as removed
148 # need to filter files if they are already reported as removed
149 unknown = [fn for fn in unknown if fn not in mf1 and
149 unknown = [fn for fn in unknown if fn not in mf1 and
150 (not match or match(fn))]
150 (not match or match(fn))]
151 ignored = [fn for fn in ignored if fn not in mf1 and
151 ignored = [fn for fn in ignored if fn not in mf1 and
152 (not match or match(fn))]
152 (not match or match(fn))]
153 # if they're deleted, don't report them as removed
153 # if they're deleted, don't report them as removed
154 removed = [fn for fn in removed if fn not in deletedset]
154 removed = [fn for fn in removed if fn not in deletedset]
155
155
156 return scmutil.status(modified, added, removed, deleted, unknown,
156 return scmutil.status(modified, added, removed, deleted, unknown,
157 ignored, clean)
157 ignored, clean)
158
158
159 @propertycache
159 @propertycache
160 def substate(self):
160 def substate(self):
161 return subrepoutil.state(self, self._repo.ui)
161 return subrepoutil.state(self, self._repo.ui)
162
162
163 def subrev(self, subpath):
163 def subrev(self, subpath):
164 return self.substate[subpath][1]
164 return self.substate[subpath][1]
165
165
166 def rev(self):
166 def rev(self):
167 return self._rev
167 return self._rev
168 def node(self):
168 def node(self):
169 return self._node
169 return self._node
170 def hex(self):
170 def hex(self):
171 return hex(self.node())
171 return hex(self.node())
172 def manifest(self):
172 def manifest(self):
173 return self._manifest
173 return self._manifest
174 def manifestctx(self):
174 def manifestctx(self):
175 return self._manifestctx
175 return self._manifestctx
176 def repo(self):
176 def repo(self):
177 return self._repo
177 return self._repo
178 def phasestr(self):
178 def phasestr(self):
179 return phases.phasenames[self.phase()]
179 return phases.phasenames[self.phase()]
180 def mutable(self):
180 def mutable(self):
181 return self.phase() > phases.public
181 return self.phase() > phases.public
182
182
183 def matchfileset(self, expr, badfn=None):
183 def matchfileset(self, expr, badfn=None):
184 return fileset.match(self, expr, badfn=badfn)
184 return fileset.match(self, expr, badfn=badfn)
185
185
186 def obsolete(self):
186 def obsolete(self):
187 """True if the changeset is obsolete"""
187 """True if the changeset is obsolete"""
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
189
189
190 def extinct(self):
190 def extinct(self):
191 """True if the changeset is extinct"""
191 """True if the changeset is extinct"""
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
193
193
194 def orphan(self):
194 def orphan(self):
195 """True if the changeset is not obsolete, but its ancestor is"""
195 """True if the changeset is not obsolete, but its ancestor is"""
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
197
197
198 def phasedivergent(self):
198 def phasedivergent(self):
199 """True if the changeset tries to be a successor of a public changeset
199 """True if the changeset tries to be a successor of a public changeset
200
200
201 Only non-public and non-obsolete changesets may be phase-divergent.
201 Only non-public and non-obsolete changesets may be phase-divergent.
202 """
202 """
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
204
204
205 def contentdivergent(self):
205 def contentdivergent(self):
206 """Is a successor of a changeset with multiple possible successor sets
206 """Is a successor of a changeset with multiple possible successor sets
207
207
208 Only non-public and non-obsolete changesets may be content-divergent.
208 Only non-public and non-obsolete changesets may be content-divergent.
209 """
209 """
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
211
211
212 def isunstable(self):
212 def isunstable(self):
213 """True if the changeset is either orphan, phase-divergent or
213 """True if the changeset is either orphan, phase-divergent or
214 content-divergent"""
214 content-divergent"""
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215 return self.orphan() or self.phasedivergent() or self.contentdivergent()
216
216
217 def instabilities(self):
217 def instabilities(self):
218 """return the list of instabilities affecting this changeset.
218 """return the list of instabilities affecting this changeset.
219
219
220 Instabilities are returned as strings. possible values are:
220 Instabilities are returned as strings. possible values are:
221 - orphan,
221 - orphan,
222 - phase-divergent,
222 - phase-divergent,
223 - content-divergent.
223 - content-divergent.
224 """
224 """
225 instabilities = []
225 instabilities = []
226 if self.orphan():
226 if self.orphan():
227 instabilities.append('orphan')
227 instabilities.append('orphan')
228 if self.phasedivergent():
228 if self.phasedivergent():
229 instabilities.append('phase-divergent')
229 instabilities.append('phase-divergent')
230 if self.contentdivergent():
230 if self.contentdivergent():
231 instabilities.append('content-divergent')
231 instabilities.append('content-divergent')
232 return instabilities
232 return instabilities
233
233
234 def parents(self):
234 def parents(self):
235 """return contexts for each parent changeset"""
235 """return contexts for each parent changeset"""
236 return self._parents
236 return self._parents
237
237
238 def p1(self):
238 def p1(self):
239 return self._parents[0]
239 return self._parents[0]
240
240
241 def p2(self):
241 def p2(self):
242 parents = self._parents
242 parents = self._parents
243 if len(parents) == 2:
243 if len(parents) == 2:
244 return parents[1]
244 return parents[1]
245 return self._repo[nullrev]
245 return self._repo[nullrev]
246
246
247 def _fileinfo(self, path):
247 def _fileinfo(self, path):
248 if r'_manifest' in self.__dict__:
248 if r'_manifest' in self.__dict__:
249 try:
249 try:
250 return self._manifest[path], self._manifest.flags(path)
250 return self._manifest[path], self._manifest.flags(path)
251 except KeyError:
251 except KeyError:
252 raise error.ManifestLookupError(self._node, path,
252 raise error.ManifestLookupError(self._node, path,
253 _('not found in manifest'))
253 _('not found in manifest'))
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if r'_manifestdelta' in self.__dict__ or path in self.files():
255 if path in self._manifestdelta:
255 if path in self._manifestdelta:
256 return (self._manifestdelta[path],
256 return (self._manifestdelta[path],
257 self._manifestdelta.flags(path))
257 self._manifestdelta.flags(path))
258 mfl = self._repo.manifestlog
258 mfl = self._repo.manifestlog
259 try:
259 try:
260 node, flag = mfl[self._changeset.manifest].find(path)
260 node, flag = mfl[self._changeset.manifest].find(path)
261 except KeyError:
261 except KeyError:
262 raise error.ManifestLookupError(self._node, path,
262 raise error.ManifestLookupError(self._node, path,
263 _('not found in manifest'))
263 _('not found in manifest'))
264
264
265 return node, flag
265 return node, flag
266
266
267 def filenode(self, path):
267 def filenode(self, path):
268 return self._fileinfo(path)[0]
268 return self._fileinfo(path)[0]
269
269
270 def flags(self, path):
270 def flags(self, path):
271 try:
271 try:
272 return self._fileinfo(path)[1]
272 return self._fileinfo(path)[1]
273 except error.LookupError:
273 except error.LookupError:
274 return ''
274 return ''
275
275
276 @propertycache
276 @propertycache
277 def _copies(self):
277 def _copies(self):
278 return copies.computechangesetcopies(self)
278 return copies.computechangesetcopies(self)
279 def p1copies(self):
279 def p1copies(self):
280 return self._copies[0]
280 return self._copies[0]
281 def p2copies(self):
281 def p2copies(self):
282 return self._copies[1]
282 return self._copies[1]
283
283
284 def sub(self, path, allowcreate=True):
284 def sub(self, path, allowcreate=True):
285 '''return a subrepo for the stored revision of path, never wdir()'''
285 '''return a subrepo for the stored revision of path, never wdir()'''
286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
286 return subrepo.subrepo(self, path, allowcreate=allowcreate)
287
287
288 def nullsub(self, path, pctx):
288 def nullsub(self, path, pctx):
289 return subrepo.nullsubrepo(self, path, pctx)
289 return subrepo.nullsubrepo(self, path, pctx)
290
290
291 def workingsub(self, path):
291 def workingsub(self, path):
292 '''return a subrepo for the stored revision, or wdir if this is a wdir
292 '''return a subrepo for the stored revision, or wdir if this is a wdir
293 context.
293 context.
294 '''
294 '''
295 return subrepo.subrepo(self, path, allowwdir=True)
295 return subrepo.subrepo(self, path, allowwdir=True)
296
296
297 def match(self, pats=None, include=None, exclude=None, default='glob',
297 def match(self, pats=None, include=None, exclude=None, default='glob',
298 listsubrepos=False, badfn=None):
298 listsubrepos=False, badfn=None):
299 r = self._repo
299 r = self._repo
300 return matchmod.match(r.root, r.getcwd(), pats,
300 return matchmod.match(r.root, r.getcwd(), pats,
301 include, exclude, default,
301 include, exclude, default,
302 auditor=r.nofsauditor, ctx=self,
302 auditor=r.nofsauditor, ctx=self,
303 listsubrepos=listsubrepos, badfn=badfn)
303 listsubrepos=listsubrepos, badfn=badfn)
304
304
305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
305 def diff(self, ctx2=None, match=None, changes=None, opts=None,
306 losedatafn=None, pathfn=None, copy=None,
306 losedatafn=None, pathfn=None, copy=None,
307 copysourcematch=None, hunksfilterfn=None):
307 copysourcematch=None, hunksfilterfn=None):
308 """Returns a diff generator for the given contexts and matcher"""
308 """Returns a diff generator for the given contexts and matcher"""
309 if ctx2 is None:
309 if ctx2 is None:
310 ctx2 = self.p1()
310 ctx2 = self.p1()
311 if ctx2 is not None:
311 if ctx2 is not None:
312 ctx2 = self._repo[ctx2]
312 ctx2 = self._repo[ctx2]
313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
313 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
314 opts=opts, losedatafn=losedatafn, pathfn=pathfn,
315 copy=copy, copysourcematch=copysourcematch,
315 copy=copy, copysourcematch=copysourcematch,
316 hunksfilterfn=hunksfilterfn)
316 hunksfilterfn=hunksfilterfn)
317
317
318 def dirs(self):
318 def dirs(self):
319 return self._manifest.dirs()
319 return self._manifest.dirs()
320
320
321 def hasdir(self, dir):
321 def hasdir(self, dir):
322 return self._manifest.hasdir(dir)
322 return self._manifest.hasdir(dir)
323
323
324 def status(self, other=None, match=None, listignored=False,
324 def status(self, other=None, match=None, listignored=False,
325 listclean=False, listunknown=False, listsubrepos=False):
325 listclean=False, listunknown=False, listsubrepos=False):
326 """return status of files between two nodes or node and working
326 """return status of files between two nodes or node and working
327 directory.
327 directory.
328
328
329 If other is None, compare this node with working directory.
329 If other is None, compare this node with working directory.
330
330
331 returns (modified, added, removed, deleted, unknown, ignored, clean)
331 returns (modified, added, removed, deleted, unknown, ignored, clean)
332 """
332 """
333
333
334 ctx1 = self
334 ctx1 = self
335 ctx2 = self._repo[other]
335 ctx2 = self._repo[other]
336
336
337 # This next code block is, admittedly, fragile logic that tests for
337 # This next code block is, admittedly, fragile logic that tests for
338 # reversing the contexts and wouldn't need to exist if it weren't for
338 # reversing the contexts and wouldn't need to exist if it weren't for
339 # the fast (and common) code path of comparing the working directory
339 # the fast (and common) code path of comparing the working directory
340 # with its first parent.
340 # with its first parent.
341 #
341 #
342 # What we're aiming for here is the ability to call:
342 # What we're aiming for here is the ability to call:
343 #
343 #
344 # workingctx.status(parentctx)
344 # workingctx.status(parentctx)
345 #
345 #
346 # If we always built the manifest for each context and compared those,
346 # If we always built the manifest for each context and compared those,
347 # then we'd be done. But the special case of the above call means we
347 # then we'd be done. But the special case of the above call means we
348 # just copy the manifest of the parent.
348 # just copy the manifest of the parent.
349 reversed = False
349 reversed = False
350 if (not isinstance(ctx1, changectx)
350 if (not isinstance(ctx1, changectx)
351 and isinstance(ctx2, changectx)):
351 and isinstance(ctx2, changectx)):
352 reversed = True
352 reversed = True
353 ctx1, ctx2 = ctx2, ctx1
353 ctx1, ctx2 = ctx2, ctx1
354
354
355 match = self._repo.narrowmatch(match)
355 match = self._repo.narrowmatch(match)
356 match = ctx2._matchstatus(ctx1, match)
356 match = ctx2._matchstatus(ctx1, match)
357 r = scmutil.status([], [], [], [], [], [], [])
357 r = scmutil.status([], [], [], [], [], [], [])
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
358 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
359 listunknown)
359 listunknown)
360
360
361 if reversed:
361 if reversed:
362 # Reverse added and removed. Clear deleted, unknown and ignored as
362 # Reverse added and removed. Clear deleted, unknown and ignored as
363 # these make no sense to reverse.
363 # these make no sense to reverse.
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
364 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
365 r.clean)
365 r.clean)
366
366
367 if listsubrepos:
367 if listsubrepos:
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
368 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
369 try:
369 try:
370 rev2 = ctx2.subrev(subpath)
370 rev2 = ctx2.subrev(subpath)
371 except KeyError:
371 except KeyError:
372 # A subrepo that existed in node1 was deleted between
372 # A subrepo that existed in node1 was deleted between
373 # node1 and node2 (inclusive). Thus, ctx2's substate
373 # node1 and node2 (inclusive). Thus, ctx2's substate
374 # won't contain that subpath. The best we can do ignore it.
374 # won't contain that subpath. The best we can do ignore it.
375 rev2 = None
375 rev2 = None
376 submatch = matchmod.subdirmatcher(subpath, match)
376 submatch = matchmod.subdirmatcher(subpath, match)
377 s = sub.status(rev2, match=submatch, ignored=listignored,
377 s = sub.status(rev2, match=submatch, ignored=listignored,
378 clean=listclean, unknown=listunknown,
378 clean=listclean, unknown=listunknown,
379 listsubrepos=True)
379 listsubrepos=True)
380 for rfiles, sfiles in zip(r, s):
380 for rfiles, sfiles in zip(r, s):
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
381 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
382
382
383 for l in r:
383 for l in r:
384 l.sort()
384 l.sort()
385
385
386 return r
386 return r
387
387
388 class changectx(basectx):
388 class changectx(basectx):
389 """A changecontext object makes access to data related to a particular
389 """A changecontext object makes access to data related to a particular
390 changeset convenient. It represents a read-only context already present in
390 changeset convenient. It represents a read-only context already present in
391 the repo."""
391 the repo."""
392 def __init__(self, repo, rev, node):
392 def __init__(self, repo, rev, node):
393 super(changectx, self).__init__(repo)
393 super(changectx, self).__init__(repo)
394 self._rev = rev
394 self._rev = rev
395 self._node = node
395 self._node = node
396
396
397 def __hash__(self):
397 def __hash__(self):
398 try:
398 try:
399 return hash(self._rev)
399 return hash(self._rev)
400 except AttributeError:
400 except AttributeError:
401 return id(self)
401 return id(self)
402
402
403 def __nonzero__(self):
403 def __nonzero__(self):
404 return self._rev != nullrev
404 return self._rev != nullrev
405
405
406 __bool__ = __nonzero__
406 __bool__ = __nonzero__
407
407
408 @propertycache
408 @propertycache
409 def _changeset(self):
409 def _changeset(self):
410 return self._repo.changelog.changelogrevision(self.rev())
410 return self._repo.changelog.changelogrevision(self.rev())
411
411
412 @propertycache
412 @propertycache
413 def _manifest(self):
413 def _manifest(self):
414 return self._manifestctx.read()
414 return self._manifestctx.read()
415
415
416 @property
416 @property
417 def _manifestctx(self):
417 def _manifestctx(self):
418 return self._repo.manifestlog[self._changeset.manifest]
418 return self._repo.manifestlog[self._changeset.manifest]
419
419
420 @propertycache
420 @propertycache
421 def _manifestdelta(self):
421 def _manifestdelta(self):
422 return self._manifestctx.readdelta()
422 return self._manifestctx.readdelta()
423
423
424 @propertycache
424 @propertycache
425 def _parents(self):
425 def _parents(self):
426 repo = self._repo
426 repo = self._repo
427 p1, p2 = repo.changelog.parentrevs(self._rev)
427 p1, p2 = repo.changelog.parentrevs(self._rev)
428 if p2 == nullrev:
428 if p2 == nullrev:
429 return [repo[p1]]
429 return [repo[p1]]
430 return [repo[p1], repo[p2]]
430 return [repo[p1], repo[p2]]
431
431
432 def changeset(self):
432 def changeset(self):
433 c = self._changeset
433 c = self._changeset
434 return (
434 return (
435 c.manifest,
435 c.manifest,
436 c.user,
436 c.user,
437 c.date,
437 c.date,
438 c.files,
438 c.files,
439 c.description,
439 c.description,
440 c.extra,
440 c.extra,
441 )
441 )
442 def manifestnode(self):
442 def manifestnode(self):
443 return self._changeset.manifest
443 return self._changeset.manifest
444
444
445 def user(self):
445 def user(self):
446 return self._changeset.user
446 return self._changeset.user
447 def date(self):
447 def date(self):
448 return self._changeset.date
448 return self._changeset.date
449 def files(self):
449 def files(self):
450 return self._changeset.files
450 return self._changeset.files
451 def filesmodified(self):
451 def filesmodified(self):
452 modified = set(self.files())
452 modified = set(self.files())
453 modified.difference_update(self.filesadded())
453 modified.difference_update(self.filesadded())
454 modified.difference_update(self.filesremoved())
454 modified.difference_update(self.filesremoved())
455 return sorted(modified)
455 return sorted(modified)
456 def filesadded(self):
456 def filesadded(self):
457 source = self._repo.ui.config('experimental', 'copies.read-from')
457 source = self._repo.ui.config('experimental', 'copies.read-from')
458 if (source == 'changeset-only' or
458 if (source == 'changeset-only' or
459 (source == 'compatibility' and
459 (source == 'compatibility' and
460 self._changeset.filesadded is not None)):
460 self._changeset.filesadded is not None)):
461 return self._changeset.filesadded or []
461 return self._changeset.filesadded or []
462 return scmutil.computechangesetfilesadded(self)
462 return scmutil.computechangesetfilesadded(self)
463 def filesremoved(self):
463 def filesremoved(self):
464 source = self._repo.ui.config('experimental', 'copies.read-from')
464 source = self._repo.ui.config('experimental', 'copies.read-from')
465 if (source == 'changeset-only' or
465 if (source == 'changeset-only' or
466 (source == 'compatibility' and
466 (source == 'compatibility' and
467 self._changeset.filesremoved is not None)):
467 self._changeset.filesremoved is not None)):
468 return self._changeset.filesremoved or []
468 return self._changeset.filesremoved or []
469
469 return scmutil.computechangesetfilesremoved(self)
470 removed = []
471 for f in self.files():
472 if f not in self:
473 removed.append(f)
474 return removed
475
470
476 @propertycache
471 @propertycache
477 def _copies(self):
472 def _copies(self):
478 source = self._repo.ui.config('experimental', 'copies.read-from')
473 source = self._repo.ui.config('experimental', 'copies.read-from')
479 p1copies = self._changeset.p1copies
474 p1copies = self._changeset.p1copies
480 p2copies = self._changeset.p2copies
475 p2copies = self._changeset.p2copies
481 # If config says to get copy metadata only from changeset, then return
476 # If config says to get copy metadata only from changeset, then return
482 # that, defaulting to {} if there was no copy metadata.
477 # that, defaulting to {} if there was no copy metadata.
483 # In compatibility mode, we return copy data from the changeset if
478 # In compatibility mode, we return copy data from the changeset if
484 # it was recorded there, and otherwise we fall back to getting it from
479 # it was recorded there, and otherwise we fall back to getting it from
485 # the filelogs (below).
480 # the filelogs (below).
486 if (source == 'changeset-only' or
481 if (source == 'changeset-only' or
487 (source == 'compatibility' and p1copies is not None)):
482 (source == 'compatibility' and p1copies is not None)):
488 return p1copies or {}, p2copies or {}
483 return p1copies or {}, p2copies or {}
489
484
490 # Otherwise (config said to read only from filelog, or we are in
485 # Otherwise (config said to read only from filelog, or we are in
491 # compatiblity mode and there is not data in the changeset), we get
486 # compatiblity mode and there is not data in the changeset), we get
492 # the copy metadata from the filelogs.
487 # the copy metadata from the filelogs.
493 return super(changectx, self)._copies
488 return super(changectx, self)._copies
494 def description(self):
489 def description(self):
495 return self._changeset.description
490 return self._changeset.description
496 def branch(self):
491 def branch(self):
497 return encoding.tolocal(self._changeset.extra.get("branch"))
492 return encoding.tolocal(self._changeset.extra.get("branch"))
498 def closesbranch(self):
493 def closesbranch(self):
499 return 'close' in self._changeset.extra
494 return 'close' in self._changeset.extra
500 def extra(self):
495 def extra(self):
501 """Return a dict of extra information."""
496 """Return a dict of extra information."""
502 return self._changeset.extra
497 return self._changeset.extra
503 def tags(self):
498 def tags(self):
504 """Return a list of byte tag names"""
499 """Return a list of byte tag names"""
505 return self._repo.nodetags(self._node)
500 return self._repo.nodetags(self._node)
506 def bookmarks(self):
501 def bookmarks(self):
507 """Return a list of byte bookmark names."""
502 """Return a list of byte bookmark names."""
508 return self._repo.nodebookmarks(self._node)
503 return self._repo.nodebookmarks(self._node)
509 def phase(self):
504 def phase(self):
510 return self._repo._phasecache.phase(self._repo, self._rev)
505 return self._repo._phasecache.phase(self._repo, self._rev)
511 def hidden(self):
506 def hidden(self):
512 return self._rev in repoview.filterrevs(self._repo, 'visible')
507 return self._rev in repoview.filterrevs(self._repo, 'visible')
513
508
514 def isinmemory(self):
509 def isinmemory(self):
515 return False
510 return False
516
511
517 def children(self):
512 def children(self):
518 """return list of changectx contexts for each child changeset.
513 """return list of changectx contexts for each child changeset.
519
514
520 This returns only the immediate child changesets. Use descendants() to
515 This returns only the immediate child changesets. Use descendants() to
521 recursively walk children.
516 recursively walk children.
522 """
517 """
523 c = self._repo.changelog.children(self._node)
518 c = self._repo.changelog.children(self._node)
524 return [self._repo[x] for x in c]
519 return [self._repo[x] for x in c]
525
520
526 def ancestors(self):
521 def ancestors(self):
527 for a in self._repo.changelog.ancestors([self._rev]):
522 for a in self._repo.changelog.ancestors([self._rev]):
528 yield self._repo[a]
523 yield self._repo[a]
529
524
530 def descendants(self):
525 def descendants(self):
531 """Recursively yield all children of the changeset.
526 """Recursively yield all children of the changeset.
532
527
533 For just the immediate children, use children()
528 For just the immediate children, use children()
534 """
529 """
535 for d in self._repo.changelog.descendants([self._rev]):
530 for d in self._repo.changelog.descendants([self._rev]):
536 yield self._repo[d]
531 yield self._repo[d]
537
532
538 def filectx(self, path, fileid=None, filelog=None):
533 def filectx(self, path, fileid=None, filelog=None):
539 """get a file context from this changeset"""
534 """get a file context from this changeset"""
540 if fileid is None:
535 if fileid is None:
541 fileid = self.filenode(path)
536 fileid = self.filenode(path)
542 return filectx(self._repo, path, fileid=fileid,
537 return filectx(self._repo, path, fileid=fileid,
543 changectx=self, filelog=filelog)
538 changectx=self, filelog=filelog)
544
539
545 def ancestor(self, c2, warn=False):
540 def ancestor(self, c2, warn=False):
546 """return the "best" ancestor context of self and c2
541 """return the "best" ancestor context of self and c2
547
542
548 If there are multiple candidates, it will show a message and check
543 If there are multiple candidates, it will show a message and check
549 merge.preferancestor configuration before falling back to the
544 merge.preferancestor configuration before falling back to the
550 revlog ancestor."""
545 revlog ancestor."""
551 # deal with workingctxs
546 # deal with workingctxs
552 n2 = c2._node
547 n2 = c2._node
553 if n2 is None:
548 if n2 is None:
554 n2 = c2._parents[0]._node
549 n2 = c2._parents[0]._node
555 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
550 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
556 if not cahs:
551 if not cahs:
557 anc = nullid
552 anc = nullid
558 elif len(cahs) == 1:
553 elif len(cahs) == 1:
559 anc = cahs[0]
554 anc = cahs[0]
560 else:
555 else:
561 # experimental config: merge.preferancestor
556 # experimental config: merge.preferancestor
562 for r in self._repo.ui.configlist('merge', 'preferancestor'):
557 for r in self._repo.ui.configlist('merge', 'preferancestor'):
563 try:
558 try:
564 ctx = scmutil.revsymbol(self._repo, r)
559 ctx = scmutil.revsymbol(self._repo, r)
565 except error.RepoLookupError:
560 except error.RepoLookupError:
566 continue
561 continue
567 anc = ctx.node()
562 anc = ctx.node()
568 if anc in cahs:
563 if anc in cahs:
569 break
564 break
570 else:
565 else:
571 anc = self._repo.changelog.ancestor(self._node, n2)
566 anc = self._repo.changelog.ancestor(self._node, n2)
572 if warn:
567 if warn:
573 self._repo.ui.status(
568 self._repo.ui.status(
574 (_("note: using %s as ancestor of %s and %s\n") %
569 (_("note: using %s as ancestor of %s and %s\n") %
575 (short(anc), short(self._node), short(n2))) +
570 (short(anc), short(self._node), short(n2))) +
576 ''.join(_(" alternatively, use --config "
571 ''.join(_(" alternatively, use --config "
577 "merge.preferancestor=%s\n") %
572 "merge.preferancestor=%s\n") %
578 short(n) for n in sorted(cahs) if n != anc))
573 short(n) for n in sorted(cahs) if n != anc))
579 return self._repo[anc]
574 return self._repo[anc]
580
575
581 def isancestorof(self, other):
576 def isancestorof(self, other):
582 """True if this changeset is an ancestor of other"""
577 """True if this changeset is an ancestor of other"""
583 return self._repo.changelog.isancestorrev(self._rev, other._rev)
578 return self._repo.changelog.isancestorrev(self._rev, other._rev)
584
579
585 def walk(self, match):
580 def walk(self, match):
586 '''Generates matching file names.'''
581 '''Generates matching file names.'''
587
582
588 # Wrap match.bad method to have message with nodeid
583 # Wrap match.bad method to have message with nodeid
589 def bad(fn, msg):
584 def bad(fn, msg):
590 # The manifest doesn't know about subrepos, so don't complain about
585 # The manifest doesn't know about subrepos, so don't complain about
591 # paths into valid subrepos.
586 # paths into valid subrepos.
592 if any(fn == s or fn.startswith(s + '/')
587 if any(fn == s or fn.startswith(s + '/')
593 for s in self.substate):
588 for s in self.substate):
594 return
589 return
595 match.bad(fn, _('no such file in rev %s') % self)
590 match.bad(fn, _('no such file in rev %s') % self)
596
591
597 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
592 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
598 return self._manifest.walk(m)
593 return self._manifest.walk(m)
599
594
600 def matches(self, match):
595 def matches(self, match):
601 return self.walk(match)
596 return self.walk(match)
602
597
603 class basefilectx(object):
598 class basefilectx(object):
604 """A filecontext object represents the common logic for its children:
599 """A filecontext object represents the common logic for its children:
605 filectx: read-only access to a filerevision that is already present
600 filectx: read-only access to a filerevision that is already present
606 in the repo,
601 in the repo,
607 workingfilectx: a filecontext that represents files from the working
602 workingfilectx: a filecontext that represents files from the working
608 directory,
603 directory,
609 memfilectx: a filecontext that represents files in-memory,
604 memfilectx: a filecontext that represents files in-memory,
610 """
605 """
611 @propertycache
606 @propertycache
612 def _filelog(self):
607 def _filelog(self):
613 return self._repo.file(self._path)
608 return self._repo.file(self._path)
614
609
615 @propertycache
610 @propertycache
616 def _changeid(self):
611 def _changeid(self):
617 if r'_changectx' in self.__dict__:
612 if r'_changectx' in self.__dict__:
618 return self._changectx.rev()
613 return self._changectx.rev()
619 elif r'_descendantrev' in self.__dict__:
614 elif r'_descendantrev' in self.__dict__:
620 # this file context was created from a revision with a known
615 # this file context was created from a revision with a known
621 # descendant, we can (lazily) correct for linkrev aliases
616 # descendant, we can (lazily) correct for linkrev aliases
622 return self._adjustlinkrev(self._descendantrev)
617 return self._adjustlinkrev(self._descendantrev)
623 else:
618 else:
624 return self._filelog.linkrev(self._filerev)
619 return self._filelog.linkrev(self._filerev)
625
620
626 @propertycache
621 @propertycache
627 def _filenode(self):
622 def _filenode(self):
628 if r'_fileid' in self.__dict__:
623 if r'_fileid' in self.__dict__:
629 return self._filelog.lookup(self._fileid)
624 return self._filelog.lookup(self._fileid)
630 else:
625 else:
631 return self._changectx.filenode(self._path)
626 return self._changectx.filenode(self._path)
632
627
633 @propertycache
628 @propertycache
634 def _filerev(self):
629 def _filerev(self):
635 return self._filelog.rev(self._filenode)
630 return self._filelog.rev(self._filenode)
636
631
637 @propertycache
632 @propertycache
638 def _repopath(self):
633 def _repopath(self):
639 return self._path
634 return self._path
640
635
641 def __nonzero__(self):
636 def __nonzero__(self):
642 try:
637 try:
643 self._filenode
638 self._filenode
644 return True
639 return True
645 except error.LookupError:
640 except error.LookupError:
646 # file is missing
641 # file is missing
647 return False
642 return False
648
643
649 __bool__ = __nonzero__
644 __bool__ = __nonzero__
650
645
651 def __bytes__(self):
646 def __bytes__(self):
652 try:
647 try:
653 return "%s@%s" % (self.path(), self._changectx)
648 return "%s@%s" % (self.path(), self._changectx)
654 except error.LookupError:
649 except error.LookupError:
655 return "%s@???" % self.path()
650 return "%s@???" % self.path()
656
651
657 __str__ = encoding.strmethod(__bytes__)
652 __str__ = encoding.strmethod(__bytes__)
658
653
659 def __repr__(self):
654 def __repr__(self):
660 return r"<%s %s>" % (type(self).__name__, str(self))
655 return r"<%s %s>" % (type(self).__name__, str(self))
661
656
662 def __hash__(self):
657 def __hash__(self):
663 try:
658 try:
664 return hash((self._path, self._filenode))
659 return hash((self._path, self._filenode))
665 except AttributeError:
660 except AttributeError:
666 return id(self)
661 return id(self)
667
662
668 def __eq__(self, other):
663 def __eq__(self, other):
669 try:
664 try:
670 return (type(self) == type(other) and self._path == other._path
665 return (type(self) == type(other) and self._path == other._path
671 and self._filenode == other._filenode)
666 and self._filenode == other._filenode)
672 except AttributeError:
667 except AttributeError:
673 return False
668 return False
674
669
675 def __ne__(self, other):
670 def __ne__(self, other):
676 return not (self == other)
671 return not (self == other)
677
672
678 def filerev(self):
673 def filerev(self):
679 return self._filerev
674 return self._filerev
680 def filenode(self):
675 def filenode(self):
681 return self._filenode
676 return self._filenode
682 @propertycache
677 @propertycache
683 def _flags(self):
678 def _flags(self):
684 return self._changectx.flags(self._path)
679 return self._changectx.flags(self._path)
685 def flags(self):
680 def flags(self):
686 return self._flags
681 return self._flags
687 def filelog(self):
682 def filelog(self):
688 return self._filelog
683 return self._filelog
689 def rev(self):
684 def rev(self):
690 return self._changeid
685 return self._changeid
691 def linkrev(self):
686 def linkrev(self):
692 return self._filelog.linkrev(self._filerev)
687 return self._filelog.linkrev(self._filerev)
693 def node(self):
688 def node(self):
694 return self._changectx.node()
689 return self._changectx.node()
695 def hex(self):
690 def hex(self):
696 return self._changectx.hex()
691 return self._changectx.hex()
697 def user(self):
692 def user(self):
698 return self._changectx.user()
693 return self._changectx.user()
699 def date(self):
694 def date(self):
700 return self._changectx.date()
695 return self._changectx.date()
701 def files(self):
696 def files(self):
702 return self._changectx.files()
697 return self._changectx.files()
703 def description(self):
698 def description(self):
704 return self._changectx.description()
699 return self._changectx.description()
705 def branch(self):
700 def branch(self):
706 return self._changectx.branch()
701 return self._changectx.branch()
707 def extra(self):
702 def extra(self):
708 return self._changectx.extra()
703 return self._changectx.extra()
709 def phase(self):
704 def phase(self):
710 return self._changectx.phase()
705 return self._changectx.phase()
711 def phasestr(self):
706 def phasestr(self):
712 return self._changectx.phasestr()
707 return self._changectx.phasestr()
713 def obsolete(self):
708 def obsolete(self):
714 return self._changectx.obsolete()
709 return self._changectx.obsolete()
715 def instabilities(self):
710 def instabilities(self):
716 return self._changectx.instabilities()
711 return self._changectx.instabilities()
717 def manifest(self):
712 def manifest(self):
718 return self._changectx.manifest()
713 return self._changectx.manifest()
719 def changectx(self):
714 def changectx(self):
720 return self._changectx
715 return self._changectx
721 def renamed(self):
716 def renamed(self):
722 return self._copied
717 return self._copied
723 def copysource(self):
718 def copysource(self):
724 return self._copied and self._copied[0]
719 return self._copied and self._copied[0]
725 def repo(self):
720 def repo(self):
726 return self._repo
721 return self._repo
727 def size(self):
722 def size(self):
728 return len(self.data())
723 return len(self.data())
729
724
730 def path(self):
725 def path(self):
731 return self._path
726 return self._path
732
727
733 def isbinary(self):
728 def isbinary(self):
734 try:
729 try:
735 return stringutil.binary(self.data())
730 return stringutil.binary(self.data())
736 except IOError:
731 except IOError:
737 return False
732 return False
738 def isexec(self):
733 def isexec(self):
739 return 'x' in self.flags()
734 return 'x' in self.flags()
740 def islink(self):
735 def islink(self):
741 return 'l' in self.flags()
736 return 'l' in self.flags()
742
737
743 def isabsent(self):
738 def isabsent(self):
744 """whether this filectx represents a file not in self._changectx
739 """whether this filectx represents a file not in self._changectx
745
740
746 This is mainly for merge code to detect change/delete conflicts. This is
741 This is mainly for merge code to detect change/delete conflicts. This is
747 expected to be True for all subclasses of basectx."""
742 expected to be True for all subclasses of basectx."""
748 return False
743 return False
749
744
750 _customcmp = False
745 _customcmp = False
751 def cmp(self, fctx):
746 def cmp(self, fctx):
752 """compare with other file context
747 """compare with other file context
753
748
754 returns True if different than fctx.
749 returns True if different than fctx.
755 """
750 """
756 if fctx._customcmp:
751 if fctx._customcmp:
757 return fctx.cmp(self)
752 return fctx.cmp(self)
758
753
759 if self._filenode is None:
754 if self._filenode is None:
760 raise error.ProgrammingError(
755 raise error.ProgrammingError(
761 'filectx.cmp() must be reimplemented if not backed by revlog')
756 'filectx.cmp() must be reimplemented if not backed by revlog')
762
757
763 if fctx._filenode is None:
758 if fctx._filenode is None:
764 if self._repo._encodefilterpats:
759 if self._repo._encodefilterpats:
765 # can't rely on size() because wdir content may be decoded
760 # can't rely on size() because wdir content may be decoded
766 return self._filelog.cmp(self._filenode, fctx.data())
761 return self._filelog.cmp(self._filenode, fctx.data())
767 if self.size() - 4 == fctx.size():
762 if self.size() - 4 == fctx.size():
768 # size() can match:
763 # size() can match:
769 # if file data starts with '\1\n', empty metadata block is
764 # if file data starts with '\1\n', empty metadata block is
770 # prepended, which adds 4 bytes to filelog.size().
765 # prepended, which adds 4 bytes to filelog.size().
771 return self._filelog.cmp(self._filenode, fctx.data())
766 return self._filelog.cmp(self._filenode, fctx.data())
772 if self.size() == fctx.size():
767 if self.size() == fctx.size():
773 # size() matches: need to compare content
768 # size() matches: need to compare content
774 return self._filelog.cmp(self._filenode, fctx.data())
769 return self._filelog.cmp(self._filenode, fctx.data())
775
770
776 # size() differs
771 # size() differs
777 return True
772 return True
778
773
779 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
774 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
780 """return the first ancestor of <srcrev> introducing <fnode>
775 """return the first ancestor of <srcrev> introducing <fnode>
781
776
782 If the linkrev of the file revision does not point to an ancestor of
777 If the linkrev of the file revision does not point to an ancestor of
783 srcrev, we'll walk down the ancestors until we find one introducing
778 srcrev, we'll walk down the ancestors until we find one introducing
784 this file revision.
779 this file revision.
785
780
786 :srcrev: the changeset revision we search ancestors from
781 :srcrev: the changeset revision we search ancestors from
787 :inclusive: if true, the src revision will also be checked
782 :inclusive: if true, the src revision will also be checked
788 :stoprev: an optional revision to stop the walk at. If no introduction
783 :stoprev: an optional revision to stop the walk at. If no introduction
789 of this file content could be found before this floor
784 of this file content could be found before this floor
790 revision, the function will returns "None" and stops its
785 revision, the function will returns "None" and stops its
791 iteration.
786 iteration.
792 """
787 """
793 repo = self._repo
788 repo = self._repo
794 cl = repo.unfiltered().changelog
789 cl = repo.unfiltered().changelog
795 mfl = repo.manifestlog
790 mfl = repo.manifestlog
796 # fetch the linkrev
791 # fetch the linkrev
797 lkr = self.linkrev()
792 lkr = self.linkrev()
798 if srcrev == lkr:
793 if srcrev == lkr:
799 return lkr
794 return lkr
800 # hack to reuse ancestor computation when searching for renames
795 # hack to reuse ancestor computation when searching for renames
801 memberanc = getattr(self, '_ancestrycontext', None)
796 memberanc = getattr(self, '_ancestrycontext', None)
802 iteranc = None
797 iteranc = None
803 if srcrev is None:
798 if srcrev is None:
804 # wctx case, used by workingfilectx during mergecopy
799 # wctx case, used by workingfilectx during mergecopy
805 revs = [p.rev() for p in self._repo[None].parents()]
800 revs = [p.rev() for p in self._repo[None].parents()]
806 inclusive = True # we skipped the real (revless) source
801 inclusive = True # we skipped the real (revless) source
807 else:
802 else:
808 revs = [srcrev]
803 revs = [srcrev]
809 if memberanc is None:
804 if memberanc is None:
810 memberanc = iteranc = cl.ancestors(revs, lkr,
805 memberanc = iteranc = cl.ancestors(revs, lkr,
811 inclusive=inclusive)
806 inclusive=inclusive)
812 # check if this linkrev is an ancestor of srcrev
807 # check if this linkrev is an ancestor of srcrev
813 if lkr not in memberanc:
808 if lkr not in memberanc:
814 if iteranc is None:
809 if iteranc is None:
815 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
810 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
816 fnode = self._filenode
811 fnode = self._filenode
817 path = self._path
812 path = self._path
818 for a in iteranc:
813 for a in iteranc:
819 if stoprev is not None and a < stoprev:
814 if stoprev is not None and a < stoprev:
820 return None
815 return None
821 ac = cl.read(a) # get changeset data (we avoid object creation)
816 ac = cl.read(a) # get changeset data (we avoid object creation)
822 if path in ac[3]: # checking the 'files' field.
817 if path in ac[3]: # checking the 'files' field.
823 # The file has been touched, check if the content is
818 # The file has been touched, check if the content is
824 # similar to the one we search for.
819 # similar to the one we search for.
825 if fnode == mfl[ac[0]].readfast().get(path):
820 if fnode == mfl[ac[0]].readfast().get(path):
826 return a
821 return a
827 # In theory, we should never get out of that loop without a result.
822 # In theory, we should never get out of that loop without a result.
828 # But if manifest uses a buggy file revision (not children of the
823 # But if manifest uses a buggy file revision (not children of the
829 # one it replaces) we could. Such a buggy situation will likely
824 # one it replaces) we could. Such a buggy situation will likely
830 # result is crash somewhere else at to some point.
825 # result is crash somewhere else at to some point.
831 return lkr
826 return lkr
832
827
833 def isintroducedafter(self, changelogrev):
828 def isintroducedafter(self, changelogrev):
834 """True if a filectx has been introduced after a given floor revision
829 """True if a filectx has been introduced after a given floor revision
835 """
830 """
836 if self.linkrev() >= changelogrev:
831 if self.linkrev() >= changelogrev:
837 return True
832 return True
838 introrev = self._introrev(stoprev=changelogrev)
833 introrev = self._introrev(stoprev=changelogrev)
839 if introrev is None:
834 if introrev is None:
840 return False
835 return False
841 return introrev >= changelogrev
836 return introrev >= changelogrev
842
837
843 def introrev(self):
838 def introrev(self):
844 """return the rev of the changeset which introduced this file revision
839 """return the rev of the changeset which introduced this file revision
845
840
846 This method is different from linkrev because it take into account the
841 This method is different from linkrev because it take into account the
847 changeset the filectx was created from. It ensures the returned
842 changeset the filectx was created from. It ensures the returned
848 revision is one of its ancestors. This prevents bugs from
843 revision is one of its ancestors. This prevents bugs from
849 'linkrev-shadowing' when a file revision is used by multiple
844 'linkrev-shadowing' when a file revision is used by multiple
850 changesets.
845 changesets.
851 """
846 """
852 return self._introrev()
847 return self._introrev()
853
848
854 def _introrev(self, stoprev=None):
849 def _introrev(self, stoprev=None):
855 """
850 """
856 Same as `introrev` but, with an extra argument to limit changelog
851 Same as `introrev` but, with an extra argument to limit changelog
857 iteration range in some internal usecase.
852 iteration range in some internal usecase.
858
853
859 If `stoprev` is set, the `introrev` will not be searched past that
854 If `stoprev` is set, the `introrev` will not be searched past that
860 `stoprev` revision and "None" might be returned. This is useful to
855 `stoprev` revision and "None" might be returned. This is useful to
861 limit the iteration range.
856 limit the iteration range.
862 """
857 """
863 toprev = None
858 toprev = None
864 attrs = vars(self)
859 attrs = vars(self)
865 if r'_changeid' in attrs:
860 if r'_changeid' in attrs:
866 # We have a cached value already
861 # We have a cached value already
867 toprev = self._changeid
862 toprev = self._changeid
868 elif r'_changectx' in attrs:
863 elif r'_changectx' in attrs:
869 # We know which changelog entry we are coming from
864 # We know which changelog entry we are coming from
870 toprev = self._changectx.rev()
865 toprev = self._changectx.rev()
871
866
872 if toprev is not None:
867 if toprev is not None:
873 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
868 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
874 elif r'_descendantrev' in attrs:
869 elif r'_descendantrev' in attrs:
875 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
870 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
876 # be nice and cache the result of the computation
871 # be nice and cache the result of the computation
877 if introrev is not None:
872 if introrev is not None:
878 self._changeid = introrev
873 self._changeid = introrev
879 return introrev
874 return introrev
880 else:
875 else:
881 return self.linkrev()
876 return self.linkrev()
882
877
883 def introfilectx(self):
878 def introfilectx(self):
884 """Return filectx having identical contents, but pointing to the
879 """Return filectx having identical contents, but pointing to the
885 changeset revision where this filectx was introduced"""
880 changeset revision where this filectx was introduced"""
886 introrev = self.introrev()
881 introrev = self.introrev()
887 if self.rev() == introrev:
882 if self.rev() == introrev:
888 return self
883 return self
889 return self.filectx(self.filenode(), changeid=introrev)
884 return self.filectx(self.filenode(), changeid=introrev)
890
885
891 def _parentfilectx(self, path, fileid, filelog):
886 def _parentfilectx(self, path, fileid, filelog):
892 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
887 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
893 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
888 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
894 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
889 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
895 # If self is associated with a changeset (probably explicitly
890 # If self is associated with a changeset (probably explicitly
896 # fed), ensure the created filectx is associated with a
891 # fed), ensure the created filectx is associated with a
897 # changeset that is an ancestor of self.changectx.
892 # changeset that is an ancestor of self.changectx.
898 # This lets us later use _adjustlinkrev to get a correct link.
893 # This lets us later use _adjustlinkrev to get a correct link.
899 fctx._descendantrev = self.rev()
894 fctx._descendantrev = self.rev()
900 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
895 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
901 elif r'_descendantrev' in vars(self):
896 elif r'_descendantrev' in vars(self):
902 # Otherwise propagate _descendantrev if we have one associated.
897 # Otherwise propagate _descendantrev if we have one associated.
903 fctx._descendantrev = self._descendantrev
898 fctx._descendantrev = self._descendantrev
904 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
899 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
905 return fctx
900 return fctx
906
901
907 def parents(self):
902 def parents(self):
908 _path = self._path
903 _path = self._path
909 fl = self._filelog
904 fl = self._filelog
910 parents = self._filelog.parents(self._filenode)
905 parents = self._filelog.parents(self._filenode)
911 pl = [(_path, node, fl) for node in parents if node != nullid]
906 pl = [(_path, node, fl) for node in parents if node != nullid]
912
907
913 r = fl.renamed(self._filenode)
908 r = fl.renamed(self._filenode)
914 if r:
909 if r:
915 # - In the simple rename case, both parent are nullid, pl is empty.
910 # - In the simple rename case, both parent are nullid, pl is empty.
916 # - In case of merge, only one of the parent is null id and should
911 # - In case of merge, only one of the parent is null id and should
917 # be replaced with the rename information. This parent is -always-
912 # be replaced with the rename information. This parent is -always-
918 # the first one.
913 # the first one.
919 #
914 #
920 # As null id have always been filtered out in the previous list
915 # As null id have always been filtered out in the previous list
921 # comprehension, inserting to 0 will always result in "replacing
916 # comprehension, inserting to 0 will always result in "replacing
922 # first nullid parent with rename information.
917 # first nullid parent with rename information.
923 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
918 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
924
919
925 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
920 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
926
921
927 def p1(self):
922 def p1(self):
928 return self.parents()[0]
923 return self.parents()[0]
929
924
930 def p2(self):
925 def p2(self):
931 p = self.parents()
926 p = self.parents()
932 if len(p) == 2:
927 if len(p) == 2:
933 return p[1]
928 return p[1]
934 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
929 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
935
930
936 def annotate(self, follow=False, skiprevs=None, diffopts=None):
931 def annotate(self, follow=False, skiprevs=None, diffopts=None):
937 """Returns a list of annotateline objects for each line in the file
932 """Returns a list of annotateline objects for each line in the file
938
933
939 - line.fctx is the filectx of the node where that line was last changed
934 - line.fctx is the filectx of the node where that line was last changed
940 - line.lineno is the line number at the first appearance in the managed
935 - line.lineno is the line number at the first appearance in the managed
941 file
936 file
942 - line.text is the data on that line (including newline character)
937 - line.text is the data on that line (including newline character)
943 """
938 """
944 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
939 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
945
940
946 def parents(f):
941 def parents(f):
947 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
942 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
948 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
943 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
949 # from the topmost introrev (= srcrev) down to p.linkrev() if it
944 # from the topmost introrev (= srcrev) down to p.linkrev() if it
950 # isn't an ancestor of the srcrev.
945 # isn't an ancestor of the srcrev.
951 f._changeid
946 f._changeid
952 pl = f.parents()
947 pl = f.parents()
953
948
954 # Don't return renamed parents if we aren't following.
949 # Don't return renamed parents if we aren't following.
955 if not follow:
950 if not follow:
956 pl = [p for p in pl if p.path() == f.path()]
951 pl = [p for p in pl if p.path() == f.path()]
957
952
958 # renamed filectx won't have a filelog yet, so set it
953 # renamed filectx won't have a filelog yet, so set it
959 # from the cache to save time
954 # from the cache to save time
960 for p in pl:
955 for p in pl:
961 if not r'_filelog' in p.__dict__:
956 if not r'_filelog' in p.__dict__:
962 p._filelog = getlog(p.path())
957 p._filelog = getlog(p.path())
963
958
964 return pl
959 return pl
965
960
966 # use linkrev to find the first changeset where self appeared
961 # use linkrev to find the first changeset where self appeared
967 base = self.introfilectx()
962 base = self.introfilectx()
968 if getattr(base, '_ancestrycontext', None) is None:
963 if getattr(base, '_ancestrycontext', None) is None:
969 cl = self._repo.changelog
964 cl = self._repo.changelog
970 if base.rev() is None:
965 if base.rev() is None:
971 # wctx is not inclusive, but works because _ancestrycontext
966 # wctx is not inclusive, but works because _ancestrycontext
972 # is used to test filelog revisions
967 # is used to test filelog revisions
973 ac = cl.ancestors([p.rev() for p in base.parents()],
968 ac = cl.ancestors([p.rev() for p in base.parents()],
974 inclusive=True)
969 inclusive=True)
975 else:
970 else:
976 ac = cl.ancestors([base.rev()], inclusive=True)
971 ac = cl.ancestors([base.rev()], inclusive=True)
977 base._ancestrycontext = ac
972 base._ancestrycontext = ac
978
973
979 return dagop.annotate(base, parents, skiprevs=skiprevs,
974 return dagop.annotate(base, parents, skiprevs=skiprevs,
980 diffopts=diffopts)
975 diffopts=diffopts)
981
976
982 def ancestors(self, followfirst=False):
977 def ancestors(self, followfirst=False):
983 visit = {}
978 visit = {}
984 c = self
979 c = self
985 if followfirst:
980 if followfirst:
986 cut = 1
981 cut = 1
987 else:
982 else:
988 cut = None
983 cut = None
989
984
990 while True:
985 while True:
991 for parent in c.parents()[:cut]:
986 for parent in c.parents()[:cut]:
992 visit[(parent.linkrev(), parent.filenode())] = parent
987 visit[(parent.linkrev(), parent.filenode())] = parent
993 if not visit:
988 if not visit:
994 break
989 break
995 c = visit.pop(max(visit))
990 c = visit.pop(max(visit))
996 yield c
991 yield c
997
992
998 def decodeddata(self):
993 def decodeddata(self):
999 """Returns `data()` after running repository decoding filters.
994 """Returns `data()` after running repository decoding filters.
1000
995
1001 This is often equivalent to how the data would be expressed on disk.
996 This is often equivalent to how the data would be expressed on disk.
1002 """
997 """
1003 return self._repo.wwritedata(self.path(), self.data())
998 return self._repo.wwritedata(self.path(), self.data())
1004
999
1005 class filectx(basefilectx):
1000 class filectx(basefilectx):
1006 """A filecontext object makes access to data related to a particular
1001 """A filecontext object makes access to data related to a particular
1007 filerevision convenient."""
1002 filerevision convenient."""
1008 def __init__(self, repo, path, changeid=None, fileid=None,
1003 def __init__(self, repo, path, changeid=None, fileid=None,
1009 filelog=None, changectx=None):
1004 filelog=None, changectx=None):
1010 """changeid must be a revision number, if specified.
1005 """changeid must be a revision number, if specified.
1011 fileid can be a file revision or node."""
1006 fileid can be a file revision or node."""
1012 self._repo = repo
1007 self._repo = repo
1013 self._path = path
1008 self._path = path
1014
1009
1015 assert (changeid is not None
1010 assert (changeid is not None
1016 or fileid is not None
1011 or fileid is not None
1017 or changectx is not None), (
1012 or changectx is not None), (
1018 "bad args: changeid=%r, fileid=%r, changectx=%r"
1013 "bad args: changeid=%r, fileid=%r, changectx=%r"
1019 % (changeid, fileid, changectx))
1014 % (changeid, fileid, changectx))
1020
1015
1021 if filelog is not None:
1016 if filelog is not None:
1022 self._filelog = filelog
1017 self._filelog = filelog
1023
1018
1024 if changeid is not None:
1019 if changeid is not None:
1025 self._changeid = changeid
1020 self._changeid = changeid
1026 if changectx is not None:
1021 if changectx is not None:
1027 self._changectx = changectx
1022 self._changectx = changectx
1028 if fileid is not None:
1023 if fileid is not None:
1029 self._fileid = fileid
1024 self._fileid = fileid
1030
1025
1031 @propertycache
1026 @propertycache
1032 def _changectx(self):
1027 def _changectx(self):
1033 try:
1028 try:
1034 return self._repo[self._changeid]
1029 return self._repo[self._changeid]
1035 except error.FilteredRepoLookupError:
1030 except error.FilteredRepoLookupError:
1036 # Linkrev may point to any revision in the repository. When the
1031 # Linkrev may point to any revision in the repository. When the
1037 # repository is filtered this may lead to `filectx` trying to build
1032 # repository is filtered this may lead to `filectx` trying to build
1038 # `changectx` for filtered revision. In such case we fallback to
1033 # `changectx` for filtered revision. In such case we fallback to
1039 # creating `changectx` on the unfiltered version of the reposition.
1034 # creating `changectx` on the unfiltered version of the reposition.
1040 # This fallback should not be an issue because `changectx` from
1035 # This fallback should not be an issue because `changectx` from
1041 # `filectx` are not used in complex operations that care about
1036 # `filectx` are not used in complex operations that care about
1042 # filtering.
1037 # filtering.
1043 #
1038 #
1044 # This fallback is a cheap and dirty fix that prevent several
1039 # This fallback is a cheap and dirty fix that prevent several
1045 # crashes. It does not ensure the behavior is correct. However the
1040 # crashes. It does not ensure the behavior is correct. However the
1046 # behavior was not correct before filtering either and "incorrect
1041 # behavior was not correct before filtering either and "incorrect
1047 # behavior" is seen as better as "crash"
1042 # behavior" is seen as better as "crash"
1048 #
1043 #
1049 # Linkrevs have several serious troubles with filtering that are
1044 # Linkrevs have several serious troubles with filtering that are
1050 # complicated to solve. Proper handling of the issue here should be
1045 # complicated to solve. Proper handling of the issue here should be
1051 # considered when solving linkrev issue are on the table.
1046 # considered when solving linkrev issue are on the table.
1052 return self._repo.unfiltered()[self._changeid]
1047 return self._repo.unfiltered()[self._changeid]
1053
1048
1054 def filectx(self, fileid, changeid=None):
1049 def filectx(self, fileid, changeid=None):
1055 '''opens an arbitrary revision of the file without
1050 '''opens an arbitrary revision of the file without
1056 opening a new filelog'''
1051 opening a new filelog'''
1057 return filectx(self._repo, self._path, fileid=fileid,
1052 return filectx(self._repo, self._path, fileid=fileid,
1058 filelog=self._filelog, changeid=changeid)
1053 filelog=self._filelog, changeid=changeid)
1059
1054
1060 def rawdata(self):
1055 def rawdata(self):
1061 return self._filelog.revision(self._filenode, raw=True)
1056 return self._filelog.revision(self._filenode, raw=True)
1062
1057
1063 def rawflags(self):
1058 def rawflags(self):
1064 """low-level revlog flags"""
1059 """low-level revlog flags"""
1065 return self._filelog.flags(self._filerev)
1060 return self._filelog.flags(self._filerev)
1066
1061
1067 def data(self):
1062 def data(self):
1068 try:
1063 try:
1069 return self._filelog.read(self._filenode)
1064 return self._filelog.read(self._filenode)
1070 except error.CensoredNodeError:
1065 except error.CensoredNodeError:
1071 if self._repo.ui.config("censor", "policy") == "ignore":
1066 if self._repo.ui.config("censor", "policy") == "ignore":
1072 return ""
1067 return ""
1073 raise error.Abort(_("censored node: %s") % short(self._filenode),
1068 raise error.Abort(_("censored node: %s") % short(self._filenode),
1074 hint=_("set censor.policy to ignore errors"))
1069 hint=_("set censor.policy to ignore errors"))
1075
1070
1076 def size(self):
1071 def size(self):
1077 return self._filelog.size(self._filerev)
1072 return self._filelog.size(self._filerev)
1078
1073
1079 @propertycache
1074 @propertycache
1080 def _copied(self):
1075 def _copied(self):
1081 """check if file was actually renamed in this changeset revision
1076 """check if file was actually renamed in this changeset revision
1082
1077
1083 If rename logged in file revision, we report copy for changeset only
1078 If rename logged in file revision, we report copy for changeset only
1084 if file revisions linkrev points back to the changeset in question
1079 if file revisions linkrev points back to the changeset in question
1085 or both changeset parents contain different file revisions.
1080 or both changeset parents contain different file revisions.
1086 """
1081 """
1087
1082
1088 renamed = self._filelog.renamed(self._filenode)
1083 renamed = self._filelog.renamed(self._filenode)
1089 if not renamed:
1084 if not renamed:
1090 return None
1085 return None
1091
1086
1092 if self.rev() == self.linkrev():
1087 if self.rev() == self.linkrev():
1093 return renamed
1088 return renamed
1094
1089
1095 name = self.path()
1090 name = self.path()
1096 fnode = self._filenode
1091 fnode = self._filenode
1097 for p in self._changectx.parents():
1092 for p in self._changectx.parents():
1098 try:
1093 try:
1099 if fnode == p.filenode(name):
1094 if fnode == p.filenode(name):
1100 return None
1095 return None
1101 except error.LookupError:
1096 except error.LookupError:
1102 pass
1097 pass
1103 return renamed
1098 return renamed
1104
1099
1105 def children(self):
1100 def children(self):
1106 # hard for renames
1101 # hard for renames
1107 c = self._filelog.children(self._filenode)
1102 c = self._filelog.children(self._filenode)
1108 return [filectx(self._repo, self._path, fileid=x,
1103 return [filectx(self._repo, self._path, fileid=x,
1109 filelog=self._filelog) for x in c]
1104 filelog=self._filelog) for x in c]
1110
1105
1111 class committablectx(basectx):
1106 class committablectx(basectx):
1112 """A committablectx object provides common functionality for a context that
1107 """A committablectx object provides common functionality for a context that
1113 wants the ability to commit, e.g. workingctx or memctx."""
1108 wants the ability to commit, e.g. workingctx or memctx."""
1114 def __init__(self, repo, text="", user=None, date=None, extra=None,
1109 def __init__(self, repo, text="", user=None, date=None, extra=None,
1115 changes=None, branch=None):
1110 changes=None, branch=None):
1116 super(committablectx, self).__init__(repo)
1111 super(committablectx, self).__init__(repo)
1117 self._rev = None
1112 self._rev = None
1118 self._node = None
1113 self._node = None
1119 self._text = text
1114 self._text = text
1120 if date:
1115 if date:
1121 self._date = dateutil.parsedate(date)
1116 self._date = dateutil.parsedate(date)
1122 if user:
1117 if user:
1123 self._user = user
1118 self._user = user
1124 if changes:
1119 if changes:
1125 self._status = changes
1120 self._status = changes
1126
1121
1127 self._extra = {}
1122 self._extra = {}
1128 if extra:
1123 if extra:
1129 self._extra = extra.copy()
1124 self._extra = extra.copy()
1130 if branch is not None:
1125 if branch is not None:
1131 self._extra['branch'] = encoding.fromlocal(branch)
1126 self._extra['branch'] = encoding.fromlocal(branch)
1132 if not self._extra.get('branch'):
1127 if not self._extra.get('branch'):
1133 self._extra['branch'] = 'default'
1128 self._extra['branch'] = 'default'
1134
1129
1135 def __bytes__(self):
1130 def __bytes__(self):
1136 return bytes(self._parents[0]) + "+"
1131 return bytes(self._parents[0]) + "+"
1137
1132
1138 __str__ = encoding.strmethod(__bytes__)
1133 __str__ = encoding.strmethod(__bytes__)
1139
1134
1140 def __nonzero__(self):
1135 def __nonzero__(self):
1141 return True
1136 return True
1142
1137
1143 __bool__ = __nonzero__
1138 __bool__ = __nonzero__
1144
1139
1145 @propertycache
1140 @propertycache
1146 def _status(self):
1141 def _status(self):
1147 return self._repo.status()
1142 return self._repo.status()
1148
1143
1149 @propertycache
1144 @propertycache
1150 def _user(self):
1145 def _user(self):
1151 return self._repo.ui.username()
1146 return self._repo.ui.username()
1152
1147
1153 @propertycache
1148 @propertycache
1154 def _date(self):
1149 def _date(self):
1155 ui = self._repo.ui
1150 ui = self._repo.ui
1156 date = ui.configdate('devel', 'default-date')
1151 date = ui.configdate('devel', 'default-date')
1157 if date is None:
1152 if date is None:
1158 date = dateutil.makedate()
1153 date = dateutil.makedate()
1159 return date
1154 return date
1160
1155
1161 def subrev(self, subpath):
1156 def subrev(self, subpath):
1162 return None
1157 return None
1163
1158
1164 def manifestnode(self):
1159 def manifestnode(self):
1165 return None
1160 return None
1166 def user(self):
1161 def user(self):
1167 return self._user or self._repo.ui.username()
1162 return self._user or self._repo.ui.username()
1168 def date(self):
1163 def date(self):
1169 return self._date
1164 return self._date
1170 def description(self):
1165 def description(self):
1171 return self._text
1166 return self._text
1172 def files(self):
1167 def files(self):
1173 return sorted(self._status.modified + self._status.added +
1168 return sorted(self._status.modified + self._status.added +
1174 self._status.removed)
1169 self._status.removed)
1175 def modified(self):
1170 def modified(self):
1176 return self._status.modified
1171 return self._status.modified
1177 def added(self):
1172 def added(self):
1178 return self._status.added
1173 return self._status.added
1179 def removed(self):
1174 def removed(self):
1180 return self._status.removed
1175 return self._status.removed
1181 def deleted(self):
1176 def deleted(self):
1182 return self._status.deleted
1177 return self._status.deleted
1183 filesmodified = modified
1178 filesmodified = modified
1184 filesadded = added
1179 filesadded = added
1185 filesremoved = removed
1180 filesremoved = removed
1186
1181
1187 def branch(self):
1182 def branch(self):
1188 return encoding.tolocal(self._extra['branch'])
1183 return encoding.tolocal(self._extra['branch'])
1189 def closesbranch(self):
1184 def closesbranch(self):
1190 return 'close' in self._extra
1185 return 'close' in self._extra
1191 def extra(self):
1186 def extra(self):
1192 return self._extra
1187 return self._extra
1193
1188
1194 def isinmemory(self):
1189 def isinmemory(self):
1195 return False
1190 return False
1196
1191
1197 def tags(self):
1192 def tags(self):
1198 return []
1193 return []
1199
1194
1200 def bookmarks(self):
1195 def bookmarks(self):
1201 b = []
1196 b = []
1202 for p in self.parents():
1197 for p in self.parents():
1203 b.extend(p.bookmarks())
1198 b.extend(p.bookmarks())
1204 return b
1199 return b
1205
1200
1206 def phase(self):
1201 def phase(self):
1207 phase = phases.draft # default phase to draft
1202 phase = phases.draft # default phase to draft
1208 for p in self.parents():
1203 for p in self.parents():
1209 phase = max(phase, p.phase())
1204 phase = max(phase, p.phase())
1210 return phase
1205 return phase
1211
1206
1212 def hidden(self):
1207 def hidden(self):
1213 return False
1208 return False
1214
1209
1215 def children(self):
1210 def children(self):
1216 return []
1211 return []
1217
1212
1218 def ancestor(self, c2):
1213 def ancestor(self, c2):
1219 """return the "best" ancestor context of self and c2"""
1214 """return the "best" ancestor context of self and c2"""
1220 return self._parents[0].ancestor(c2) # punt on two parents for now
1215 return self._parents[0].ancestor(c2) # punt on two parents for now
1221
1216
1222 def ancestors(self):
1217 def ancestors(self):
1223 for p in self._parents:
1218 for p in self._parents:
1224 yield p
1219 yield p
1225 for a in self._repo.changelog.ancestors(
1220 for a in self._repo.changelog.ancestors(
1226 [p.rev() for p in self._parents]):
1221 [p.rev() for p in self._parents]):
1227 yield self._repo[a]
1222 yield self._repo[a]
1228
1223
1229 def markcommitted(self, node):
1224 def markcommitted(self, node):
1230 """Perform post-commit cleanup necessary after committing this ctx
1225 """Perform post-commit cleanup necessary after committing this ctx
1231
1226
1232 Specifically, this updates backing stores this working context
1227 Specifically, this updates backing stores this working context
1233 wraps to reflect the fact that the changes reflected by this
1228 wraps to reflect the fact that the changes reflected by this
1234 workingctx have been committed. For example, it marks
1229 workingctx have been committed. For example, it marks
1235 modified and added files as normal in the dirstate.
1230 modified and added files as normal in the dirstate.
1236
1231
1237 """
1232 """
1238
1233
1239 def dirty(self, missing=False, merge=True, branch=True):
1234 def dirty(self, missing=False, merge=True, branch=True):
1240 return False
1235 return False
1241
1236
1242 class workingctx(committablectx):
1237 class workingctx(committablectx):
1243 """A workingctx object makes access to data related to
1238 """A workingctx object makes access to data related to
1244 the current working directory convenient.
1239 the current working directory convenient.
1245 date - any valid date string or (unixtime, offset), or None.
1240 date - any valid date string or (unixtime, offset), or None.
1246 user - username string, or None.
1241 user - username string, or None.
1247 extra - a dictionary of extra values, or None.
1242 extra - a dictionary of extra values, or None.
1248 changes - a list of file lists as returned by localrepo.status()
1243 changes - a list of file lists as returned by localrepo.status()
1249 or None to use the repository status.
1244 or None to use the repository status.
1250 """
1245 """
1251 def __init__(self, repo, text="", user=None, date=None, extra=None,
1246 def __init__(self, repo, text="", user=None, date=None, extra=None,
1252 changes=None):
1247 changes=None):
1253 branch = None
1248 branch = None
1254 if not extra or 'branch' not in extra:
1249 if not extra or 'branch' not in extra:
1255 try:
1250 try:
1256 branch = repo.dirstate.branch()
1251 branch = repo.dirstate.branch()
1257 except UnicodeDecodeError:
1252 except UnicodeDecodeError:
1258 raise error.Abort(_('branch name not in UTF-8!'))
1253 raise error.Abort(_('branch name not in UTF-8!'))
1259 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1254 super(workingctx, self).__init__(repo, text, user, date, extra, changes,
1260 branch=branch)
1255 branch=branch)
1261
1256
1262 def __iter__(self):
1257 def __iter__(self):
1263 d = self._repo.dirstate
1258 d = self._repo.dirstate
1264 for f in d:
1259 for f in d:
1265 if d[f] != 'r':
1260 if d[f] != 'r':
1266 yield f
1261 yield f
1267
1262
1268 def __contains__(self, key):
1263 def __contains__(self, key):
1269 return self._repo.dirstate[key] not in "?r"
1264 return self._repo.dirstate[key] not in "?r"
1270
1265
1271 def hex(self):
1266 def hex(self):
1272 return wdirhex
1267 return wdirhex
1273
1268
1274 @propertycache
1269 @propertycache
1275 def _parents(self):
1270 def _parents(self):
1276 p = self._repo.dirstate.parents()
1271 p = self._repo.dirstate.parents()
1277 if p[1] == nullid:
1272 if p[1] == nullid:
1278 p = p[:-1]
1273 p = p[:-1]
1279 # use unfiltered repo to delay/avoid loading obsmarkers
1274 # use unfiltered repo to delay/avoid loading obsmarkers
1280 unfi = self._repo.unfiltered()
1275 unfi = self._repo.unfiltered()
1281 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1276 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1282
1277
1283 def _fileinfo(self, path):
1278 def _fileinfo(self, path):
1284 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1279 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1285 self._manifest
1280 self._manifest
1286 return super(workingctx, self)._fileinfo(path)
1281 return super(workingctx, self)._fileinfo(path)
1287
1282
1288 def _buildflagfunc(self):
1283 def _buildflagfunc(self):
1289 # Create a fallback function for getting file flags when the
1284 # Create a fallback function for getting file flags when the
1290 # filesystem doesn't support them
1285 # filesystem doesn't support them
1291
1286
1292 copiesget = self._repo.dirstate.copies().get
1287 copiesget = self._repo.dirstate.copies().get
1293 parents = self.parents()
1288 parents = self.parents()
1294 if len(parents) < 2:
1289 if len(parents) < 2:
1295 # when we have one parent, it's easy: copy from parent
1290 # when we have one parent, it's easy: copy from parent
1296 man = parents[0].manifest()
1291 man = parents[0].manifest()
1297 def func(f):
1292 def func(f):
1298 f = copiesget(f, f)
1293 f = copiesget(f, f)
1299 return man.flags(f)
1294 return man.flags(f)
1300 else:
1295 else:
1301 # merges are tricky: we try to reconstruct the unstored
1296 # merges are tricky: we try to reconstruct the unstored
1302 # result from the merge (issue1802)
1297 # result from the merge (issue1802)
1303 p1, p2 = parents
1298 p1, p2 = parents
1304 pa = p1.ancestor(p2)
1299 pa = p1.ancestor(p2)
1305 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1300 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1306
1301
1307 def func(f):
1302 def func(f):
1308 f = copiesget(f, f) # may be wrong for merges with copies
1303 f = copiesget(f, f) # may be wrong for merges with copies
1309 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1304 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1310 if fl1 == fl2:
1305 if fl1 == fl2:
1311 return fl1
1306 return fl1
1312 if fl1 == fla:
1307 if fl1 == fla:
1313 return fl2
1308 return fl2
1314 if fl2 == fla:
1309 if fl2 == fla:
1315 return fl1
1310 return fl1
1316 return '' # punt for conflicts
1311 return '' # punt for conflicts
1317
1312
1318 return func
1313 return func
1319
1314
1320 @propertycache
1315 @propertycache
1321 def _flagfunc(self):
1316 def _flagfunc(self):
1322 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1317 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1323
1318
1324 def flags(self, path):
1319 def flags(self, path):
1325 if r'_manifest' in self.__dict__:
1320 if r'_manifest' in self.__dict__:
1326 try:
1321 try:
1327 return self._manifest.flags(path)
1322 return self._manifest.flags(path)
1328 except KeyError:
1323 except KeyError:
1329 return ''
1324 return ''
1330
1325
1331 try:
1326 try:
1332 return self._flagfunc(path)
1327 return self._flagfunc(path)
1333 except OSError:
1328 except OSError:
1334 return ''
1329 return ''
1335
1330
1336 def filectx(self, path, filelog=None):
1331 def filectx(self, path, filelog=None):
1337 """get a file context from the working directory"""
1332 """get a file context from the working directory"""
1338 return workingfilectx(self._repo, path, workingctx=self,
1333 return workingfilectx(self._repo, path, workingctx=self,
1339 filelog=filelog)
1334 filelog=filelog)
1340
1335
1341 def dirty(self, missing=False, merge=True, branch=True):
1336 def dirty(self, missing=False, merge=True, branch=True):
1342 "check whether a working directory is modified"
1337 "check whether a working directory is modified"
1343 # check subrepos first
1338 # check subrepos first
1344 for s in sorted(self.substate):
1339 for s in sorted(self.substate):
1345 if self.sub(s).dirty(missing=missing):
1340 if self.sub(s).dirty(missing=missing):
1346 return True
1341 return True
1347 # check current working dir
1342 # check current working dir
1348 return ((merge and self.p2()) or
1343 return ((merge and self.p2()) or
1349 (branch and self.branch() != self.p1().branch()) or
1344 (branch and self.branch() != self.p1().branch()) or
1350 self.modified() or self.added() or self.removed() or
1345 self.modified() or self.added() or self.removed() or
1351 (missing and self.deleted()))
1346 (missing and self.deleted()))
1352
1347
1353 def add(self, list, prefix=""):
1348 def add(self, list, prefix=""):
1354 with self._repo.wlock():
1349 with self._repo.wlock():
1355 ui, ds = self._repo.ui, self._repo.dirstate
1350 ui, ds = self._repo.ui, self._repo.dirstate
1356 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1351 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1357 rejected = []
1352 rejected = []
1358 lstat = self._repo.wvfs.lstat
1353 lstat = self._repo.wvfs.lstat
1359 for f in list:
1354 for f in list:
1360 # ds.pathto() returns an absolute file when this is invoked from
1355 # ds.pathto() returns an absolute file when this is invoked from
1361 # the keyword extension. That gets flagged as non-portable on
1356 # the keyword extension. That gets flagged as non-portable on
1362 # Windows, since it contains the drive letter and colon.
1357 # Windows, since it contains the drive letter and colon.
1363 scmutil.checkportable(ui, os.path.join(prefix, f))
1358 scmutil.checkportable(ui, os.path.join(prefix, f))
1364 try:
1359 try:
1365 st = lstat(f)
1360 st = lstat(f)
1366 except OSError:
1361 except OSError:
1367 ui.warn(_("%s does not exist!\n") % uipath(f))
1362 ui.warn(_("%s does not exist!\n") % uipath(f))
1368 rejected.append(f)
1363 rejected.append(f)
1369 continue
1364 continue
1370 limit = ui.configbytes('ui', 'large-file-limit')
1365 limit = ui.configbytes('ui', 'large-file-limit')
1371 if limit != 0 and st.st_size > limit:
1366 if limit != 0 and st.st_size > limit:
1372 ui.warn(_("%s: up to %d MB of RAM may be required "
1367 ui.warn(_("%s: up to %d MB of RAM may be required "
1373 "to manage this file\n"
1368 "to manage this file\n"
1374 "(use 'hg revert %s' to cancel the "
1369 "(use 'hg revert %s' to cancel the "
1375 "pending addition)\n")
1370 "pending addition)\n")
1376 % (f, 3 * st.st_size // 1000000, uipath(f)))
1371 % (f, 3 * st.st_size // 1000000, uipath(f)))
1377 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1372 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1378 ui.warn(_("%s not added: only files and symlinks "
1373 ui.warn(_("%s not added: only files and symlinks "
1379 "supported currently\n") % uipath(f))
1374 "supported currently\n") % uipath(f))
1380 rejected.append(f)
1375 rejected.append(f)
1381 elif ds[f] in 'amn':
1376 elif ds[f] in 'amn':
1382 ui.warn(_("%s already tracked!\n") % uipath(f))
1377 ui.warn(_("%s already tracked!\n") % uipath(f))
1383 elif ds[f] == 'r':
1378 elif ds[f] == 'r':
1384 ds.normallookup(f)
1379 ds.normallookup(f)
1385 else:
1380 else:
1386 ds.add(f)
1381 ds.add(f)
1387 return rejected
1382 return rejected
1388
1383
1389 def forget(self, files, prefix=""):
1384 def forget(self, files, prefix=""):
1390 with self._repo.wlock():
1385 with self._repo.wlock():
1391 ds = self._repo.dirstate
1386 ds = self._repo.dirstate
1392 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1387 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1393 rejected = []
1388 rejected = []
1394 for f in files:
1389 for f in files:
1395 if f not in ds:
1390 if f not in ds:
1396 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1391 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1397 rejected.append(f)
1392 rejected.append(f)
1398 elif ds[f] != 'a':
1393 elif ds[f] != 'a':
1399 ds.remove(f)
1394 ds.remove(f)
1400 else:
1395 else:
1401 ds.drop(f)
1396 ds.drop(f)
1402 return rejected
1397 return rejected
1403
1398
1404 def copy(self, source, dest):
1399 def copy(self, source, dest):
1405 try:
1400 try:
1406 st = self._repo.wvfs.lstat(dest)
1401 st = self._repo.wvfs.lstat(dest)
1407 except OSError as err:
1402 except OSError as err:
1408 if err.errno != errno.ENOENT:
1403 if err.errno != errno.ENOENT:
1409 raise
1404 raise
1410 self._repo.ui.warn(_("%s does not exist!\n")
1405 self._repo.ui.warn(_("%s does not exist!\n")
1411 % self._repo.dirstate.pathto(dest))
1406 % self._repo.dirstate.pathto(dest))
1412 return
1407 return
1413 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1408 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1414 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1409 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1415 "symbolic link\n")
1410 "symbolic link\n")
1416 % self._repo.dirstate.pathto(dest))
1411 % self._repo.dirstate.pathto(dest))
1417 else:
1412 else:
1418 with self._repo.wlock():
1413 with self._repo.wlock():
1419 ds = self._repo.dirstate
1414 ds = self._repo.dirstate
1420 if ds[dest] in '?':
1415 if ds[dest] in '?':
1421 ds.add(dest)
1416 ds.add(dest)
1422 elif ds[dest] in 'r':
1417 elif ds[dest] in 'r':
1423 ds.normallookup(dest)
1418 ds.normallookup(dest)
1424 ds.copy(source, dest)
1419 ds.copy(source, dest)
1425
1420
1426 def match(self, pats=None, include=None, exclude=None, default='glob',
1421 def match(self, pats=None, include=None, exclude=None, default='glob',
1427 listsubrepos=False, badfn=None):
1422 listsubrepos=False, badfn=None):
1428 r = self._repo
1423 r = self._repo
1429
1424
1430 # Only a case insensitive filesystem needs magic to translate user input
1425 # Only a case insensitive filesystem needs magic to translate user input
1431 # to actual case in the filesystem.
1426 # to actual case in the filesystem.
1432 icasefs = not util.fscasesensitive(r.root)
1427 icasefs = not util.fscasesensitive(r.root)
1433 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1428 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1434 default, auditor=r.auditor, ctx=self,
1429 default, auditor=r.auditor, ctx=self,
1435 listsubrepos=listsubrepos, badfn=badfn,
1430 listsubrepos=listsubrepos, badfn=badfn,
1436 icasefs=icasefs)
1431 icasefs=icasefs)
1437
1432
1438 def _filtersuspectsymlink(self, files):
1433 def _filtersuspectsymlink(self, files):
1439 if not files or self._repo.dirstate._checklink:
1434 if not files or self._repo.dirstate._checklink:
1440 return files
1435 return files
1441
1436
1442 # Symlink placeholders may get non-symlink-like contents
1437 # Symlink placeholders may get non-symlink-like contents
1443 # via user error or dereferencing by NFS or Samba servers,
1438 # via user error or dereferencing by NFS or Samba servers,
1444 # so we filter out any placeholders that don't look like a
1439 # so we filter out any placeholders that don't look like a
1445 # symlink
1440 # symlink
1446 sane = []
1441 sane = []
1447 for f in files:
1442 for f in files:
1448 if self.flags(f) == 'l':
1443 if self.flags(f) == 'l':
1449 d = self[f].data()
1444 d = self[f].data()
1450 if (d == '' or len(d) >= 1024 or '\n' in d
1445 if (d == '' or len(d) >= 1024 or '\n' in d
1451 or stringutil.binary(d)):
1446 or stringutil.binary(d)):
1452 self._repo.ui.debug('ignoring suspect symlink placeholder'
1447 self._repo.ui.debug('ignoring suspect symlink placeholder'
1453 ' "%s"\n' % f)
1448 ' "%s"\n' % f)
1454 continue
1449 continue
1455 sane.append(f)
1450 sane.append(f)
1456 return sane
1451 return sane
1457
1452
1458 def _checklookup(self, files):
1453 def _checklookup(self, files):
1459 # check for any possibly clean files
1454 # check for any possibly clean files
1460 if not files:
1455 if not files:
1461 return [], [], []
1456 return [], [], []
1462
1457
1463 modified = []
1458 modified = []
1464 deleted = []
1459 deleted = []
1465 fixup = []
1460 fixup = []
1466 pctx = self._parents[0]
1461 pctx = self._parents[0]
1467 # do a full compare of any files that might have changed
1462 # do a full compare of any files that might have changed
1468 for f in sorted(files):
1463 for f in sorted(files):
1469 try:
1464 try:
1470 # This will return True for a file that got replaced by a
1465 # This will return True for a file that got replaced by a
1471 # directory in the interim, but fixing that is pretty hard.
1466 # directory in the interim, but fixing that is pretty hard.
1472 if (f not in pctx or self.flags(f) != pctx.flags(f)
1467 if (f not in pctx or self.flags(f) != pctx.flags(f)
1473 or pctx[f].cmp(self[f])):
1468 or pctx[f].cmp(self[f])):
1474 modified.append(f)
1469 modified.append(f)
1475 else:
1470 else:
1476 fixup.append(f)
1471 fixup.append(f)
1477 except (IOError, OSError):
1472 except (IOError, OSError):
1478 # A file become inaccessible in between? Mark it as deleted,
1473 # A file become inaccessible in between? Mark it as deleted,
1479 # matching dirstate behavior (issue5584).
1474 # matching dirstate behavior (issue5584).
1480 # The dirstate has more complex behavior around whether a
1475 # The dirstate has more complex behavior around whether a
1481 # missing file matches a directory, etc, but we don't need to
1476 # missing file matches a directory, etc, but we don't need to
1482 # bother with that: if f has made it to this point, we're sure
1477 # bother with that: if f has made it to this point, we're sure
1483 # it's in the dirstate.
1478 # it's in the dirstate.
1484 deleted.append(f)
1479 deleted.append(f)
1485
1480
1486 return modified, deleted, fixup
1481 return modified, deleted, fixup
1487
1482
1488 def _poststatusfixup(self, status, fixup):
1483 def _poststatusfixup(self, status, fixup):
1489 """update dirstate for files that are actually clean"""
1484 """update dirstate for files that are actually clean"""
1490 poststatus = self._repo.postdsstatus()
1485 poststatus = self._repo.postdsstatus()
1491 if fixup or poststatus:
1486 if fixup or poststatus:
1492 try:
1487 try:
1493 oldid = self._repo.dirstate.identity()
1488 oldid = self._repo.dirstate.identity()
1494
1489
1495 # updating the dirstate is optional
1490 # updating the dirstate is optional
1496 # so we don't wait on the lock
1491 # so we don't wait on the lock
1497 # wlock can invalidate the dirstate, so cache normal _after_
1492 # wlock can invalidate the dirstate, so cache normal _after_
1498 # taking the lock
1493 # taking the lock
1499 with self._repo.wlock(False):
1494 with self._repo.wlock(False):
1500 if self._repo.dirstate.identity() == oldid:
1495 if self._repo.dirstate.identity() == oldid:
1501 if fixup:
1496 if fixup:
1502 normal = self._repo.dirstate.normal
1497 normal = self._repo.dirstate.normal
1503 for f in fixup:
1498 for f in fixup:
1504 normal(f)
1499 normal(f)
1505 # write changes out explicitly, because nesting
1500 # write changes out explicitly, because nesting
1506 # wlock at runtime may prevent 'wlock.release()'
1501 # wlock at runtime may prevent 'wlock.release()'
1507 # after this block from doing so for subsequent
1502 # after this block from doing so for subsequent
1508 # changing files
1503 # changing files
1509 tr = self._repo.currenttransaction()
1504 tr = self._repo.currenttransaction()
1510 self._repo.dirstate.write(tr)
1505 self._repo.dirstate.write(tr)
1511
1506
1512 if poststatus:
1507 if poststatus:
1513 for ps in poststatus:
1508 for ps in poststatus:
1514 ps(self, status)
1509 ps(self, status)
1515 else:
1510 else:
1516 # in this case, writing changes out breaks
1511 # in this case, writing changes out breaks
1517 # consistency, because .hg/dirstate was
1512 # consistency, because .hg/dirstate was
1518 # already changed simultaneously after last
1513 # already changed simultaneously after last
1519 # caching (see also issue5584 for detail)
1514 # caching (see also issue5584 for detail)
1520 self._repo.ui.debug('skip updating dirstate: '
1515 self._repo.ui.debug('skip updating dirstate: '
1521 'identity mismatch\n')
1516 'identity mismatch\n')
1522 except error.LockError:
1517 except error.LockError:
1523 pass
1518 pass
1524 finally:
1519 finally:
1525 # Even if the wlock couldn't be grabbed, clear out the list.
1520 # Even if the wlock couldn't be grabbed, clear out the list.
1526 self._repo.clearpostdsstatus()
1521 self._repo.clearpostdsstatus()
1527
1522
1528 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1523 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1529 '''Gets the status from the dirstate -- internal use only.'''
1524 '''Gets the status from the dirstate -- internal use only.'''
1530 subrepos = []
1525 subrepos = []
1531 if '.hgsub' in self:
1526 if '.hgsub' in self:
1532 subrepos = sorted(self.substate)
1527 subrepos = sorted(self.substate)
1533 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1528 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1534 clean=clean, unknown=unknown)
1529 clean=clean, unknown=unknown)
1535
1530
1536 # check for any possibly clean files
1531 # check for any possibly clean files
1537 fixup = []
1532 fixup = []
1538 if cmp:
1533 if cmp:
1539 modified2, deleted2, fixup = self._checklookup(cmp)
1534 modified2, deleted2, fixup = self._checklookup(cmp)
1540 s.modified.extend(modified2)
1535 s.modified.extend(modified2)
1541 s.deleted.extend(deleted2)
1536 s.deleted.extend(deleted2)
1542
1537
1543 if fixup and clean:
1538 if fixup and clean:
1544 s.clean.extend(fixup)
1539 s.clean.extend(fixup)
1545
1540
1546 self._poststatusfixup(s, fixup)
1541 self._poststatusfixup(s, fixup)
1547
1542
1548 if match.always():
1543 if match.always():
1549 # cache for performance
1544 # cache for performance
1550 if s.unknown or s.ignored or s.clean:
1545 if s.unknown or s.ignored or s.clean:
1551 # "_status" is cached with list*=False in the normal route
1546 # "_status" is cached with list*=False in the normal route
1552 self._status = scmutil.status(s.modified, s.added, s.removed,
1547 self._status = scmutil.status(s.modified, s.added, s.removed,
1553 s.deleted, [], [], [])
1548 s.deleted, [], [], [])
1554 else:
1549 else:
1555 self._status = s
1550 self._status = s
1556
1551
1557 return s
1552 return s
1558
1553
1559 @propertycache
1554 @propertycache
1560 def _copies(self):
1555 def _copies(self):
1561 p1copies = {}
1556 p1copies = {}
1562 p2copies = {}
1557 p2copies = {}
1563 parents = self._repo.dirstate.parents()
1558 parents = self._repo.dirstate.parents()
1564 p1manifest = self._repo[parents[0]].manifest()
1559 p1manifest = self._repo[parents[0]].manifest()
1565 p2manifest = self._repo[parents[1]].manifest()
1560 p2manifest = self._repo[parents[1]].manifest()
1566 narrowmatch = self._repo.narrowmatch()
1561 narrowmatch = self._repo.narrowmatch()
1567 for dst, src in self._repo.dirstate.copies().items():
1562 for dst, src in self._repo.dirstate.copies().items():
1568 if not narrowmatch(dst):
1563 if not narrowmatch(dst):
1569 continue
1564 continue
1570 if src in p1manifest:
1565 if src in p1manifest:
1571 p1copies[dst] = src
1566 p1copies[dst] = src
1572 elif src in p2manifest:
1567 elif src in p2manifest:
1573 p2copies[dst] = src
1568 p2copies[dst] = src
1574 return p1copies, p2copies
1569 return p1copies, p2copies
1575
1570
1576 @propertycache
1571 @propertycache
1577 def _manifest(self):
1572 def _manifest(self):
1578 """generate a manifest corresponding to the values in self._status
1573 """generate a manifest corresponding to the values in self._status
1579
1574
1580 This reuse the file nodeid from parent, but we use special node
1575 This reuse the file nodeid from parent, but we use special node
1581 identifiers for added and modified files. This is used by manifests
1576 identifiers for added and modified files. This is used by manifests
1582 merge to see that files are different and by update logic to avoid
1577 merge to see that files are different and by update logic to avoid
1583 deleting newly added files.
1578 deleting newly added files.
1584 """
1579 """
1585 return self._buildstatusmanifest(self._status)
1580 return self._buildstatusmanifest(self._status)
1586
1581
1587 def _buildstatusmanifest(self, status):
1582 def _buildstatusmanifest(self, status):
1588 """Builds a manifest that includes the given status results."""
1583 """Builds a manifest that includes the given status results."""
1589 parents = self.parents()
1584 parents = self.parents()
1590
1585
1591 man = parents[0].manifest().copy()
1586 man = parents[0].manifest().copy()
1592
1587
1593 ff = self._flagfunc
1588 ff = self._flagfunc
1594 for i, l in ((addednodeid, status.added),
1589 for i, l in ((addednodeid, status.added),
1595 (modifiednodeid, status.modified)):
1590 (modifiednodeid, status.modified)):
1596 for f in l:
1591 for f in l:
1597 man[f] = i
1592 man[f] = i
1598 try:
1593 try:
1599 man.setflag(f, ff(f))
1594 man.setflag(f, ff(f))
1600 except OSError:
1595 except OSError:
1601 pass
1596 pass
1602
1597
1603 for f in status.deleted + status.removed:
1598 for f in status.deleted + status.removed:
1604 if f in man:
1599 if f in man:
1605 del man[f]
1600 del man[f]
1606
1601
1607 return man
1602 return man
1608
1603
1609 def _buildstatus(self, other, s, match, listignored, listclean,
1604 def _buildstatus(self, other, s, match, listignored, listclean,
1610 listunknown):
1605 listunknown):
1611 """build a status with respect to another context
1606 """build a status with respect to another context
1612
1607
1613 This includes logic for maintaining the fast path of status when
1608 This includes logic for maintaining the fast path of status when
1614 comparing the working directory against its parent, which is to skip
1609 comparing the working directory against its parent, which is to skip
1615 building a new manifest if self (working directory) is not comparing
1610 building a new manifest if self (working directory) is not comparing
1616 against its parent (repo['.']).
1611 against its parent (repo['.']).
1617 """
1612 """
1618 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1613 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1619 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1614 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1620 # might have accidentally ended up with the entire contents of the file
1615 # might have accidentally ended up with the entire contents of the file
1621 # they are supposed to be linking to.
1616 # they are supposed to be linking to.
1622 s.modified[:] = self._filtersuspectsymlink(s.modified)
1617 s.modified[:] = self._filtersuspectsymlink(s.modified)
1623 if other != self._repo['.']:
1618 if other != self._repo['.']:
1624 s = super(workingctx, self)._buildstatus(other, s, match,
1619 s = super(workingctx, self)._buildstatus(other, s, match,
1625 listignored, listclean,
1620 listignored, listclean,
1626 listunknown)
1621 listunknown)
1627 return s
1622 return s
1628
1623
1629 def _matchstatus(self, other, match):
1624 def _matchstatus(self, other, match):
1630 """override the match method with a filter for directory patterns
1625 """override the match method with a filter for directory patterns
1631
1626
1632 We use inheritance to customize the match.bad method only in cases of
1627 We use inheritance to customize the match.bad method only in cases of
1633 workingctx since it belongs only to the working directory when
1628 workingctx since it belongs only to the working directory when
1634 comparing against the parent changeset.
1629 comparing against the parent changeset.
1635
1630
1636 If we aren't comparing against the working directory's parent, then we
1631 If we aren't comparing against the working directory's parent, then we
1637 just use the default match object sent to us.
1632 just use the default match object sent to us.
1638 """
1633 """
1639 if other != self._repo['.']:
1634 if other != self._repo['.']:
1640 def bad(f, msg):
1635 def bad(f, msg):
1641 # 'f' may be a directory pattern from 'match.files()',
1636 # 'f' may be a directory pattern from 'match.files()',
1642 # so 'f not in ctx1' is not enough
1637 # so 'f not in ctx1' is not enough
1643 if f not in other and not other.hasdir(f):
1638 if f not in other and not other.hasdir(f):
1644 self._repo.ui.warn('%s: %s\n' %
1639 self._repo.ui.warn('%s: %s\n' %
1645 (self._repo.dirstate.pathto(f), msg))
1640 (self._repo.dirstate.pathto(f), msg))
1646 match.bad = bad
1641 match.bad = bad
1647 return match
1642 return match
1648
1643
1649 def walk(self, match):
1644 def walk(self, match):
1650 '''Generates matching file names.'''
1645 '''Generates matching file names.'''
1651 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1646 return sorted(self._repo.dirstate.walk(self._repo.narrowmatch(match),
1652 subrepos=sorted(self.substate),
1647 subrepos=sorted(self.substate),
1653 unknown=True, ignored=False))
1648 unknown=True, ignored=False))
1654
1649
1655 def matches(self, match):
1650 def matches(self, match):
1656 match = self._repo.narrowmatch(match)
1651 match = self._repo.narrowmatch(match)
1657 ds = self._repo.dirstate
1652 ds = self._repo.dirstate
1658 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1653 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1659
1654
1660 def markcommitted(self, node):
1655 def markcommitted(self, node):
1661 with self._repo.dirstate.parentchange():
1656 with self._repo.dirstate.parentchange():
1662 for f in self.modified() + self.added():
1657 for f in self.modified() + self.added():
1663 self._repo.dirstate.normal(f)
1658 self._repo.dirstate.normal(f)
1664 for f in self.removed():
1659 for f in self.removed():
1665 self._repo.dirstate.drop(f)
1660 self._repo.dirstate.drop(f)
1666 self._repo.dirstate.setparents(node)
1661 self._repo.dirstate.setparents(node)
1667
1662
1668 # write changes out explicitly, because nesting wlock at
1663 # write changes out explicitly, because nesting wlock at
1669 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1664 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1670 # from immediately doing so for subsequent changing files
1665 # from immediately doing so for subsequent changing files
1671 self._repo.dirstate.write(self._repo.currenttransaction())
1666 self._repo.dirstate.write(self._repo.currenttransaction())
1672
1667
1673 sparse.aftercommit(self._repo, node)
1668 sparse.aftercommit(self._repo, node)
1674
1669
1675 class committablefilectx(basefilectx):
1670 class committablefilectx(basefilectx):
1676 """A committablefilectx provides common functionality for a file context
1671 """A committablefilectx provides common functionality for a file context
1677 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1672 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1678 def __init__(self, repo, path, filelog=None, ctx=None):
1673 def __init__(self, repo, path, filelog=None, ctx=None):
1679 self._repo = repo
1674 self._repo = repo
1680 self._path = path
1675 self._path = path
1681 self._changeid = None
1676 self._changeid = None
1682 self._filerev = self._filenode = None
1677 self._filerev = self._filenode = None
1683
1678
1684 if filelog is not None:
1679 if filelog is not None:
1685 self._filelog = filelog
1680 self._filelog = filelog
1686 if ctx:
1681 if ctx:
1687 self._changectx = ctx
1682 self._changectx = ctx
1688
1683
1689 def __nonzero__(self):
1684 def __nonzero__(self):
1690 return True
1685 return True
1691
1686
1692 __bool__ = __nonzero__
1687 __bool__ = __nonzero__
1693
1688
1694 def linkrev(self):
1689 def linkrev(self):
1695 # linked to self._changectx no matter if file is modified or not
1690 # linked to self._changectx no matter if file is modified or not
1696 return self.rev()
1691 return self.rev()
1697
1692
1698 def renamed(self):
1693 def renamed(self):
1699 path = self.copysource()
1694 path = self.copysource()
1700 if not path:
1695 if not path:
1701 return None
1696 return None
1702 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1697 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1703
1698
1704 def parents(self):
1699 def parents(self):
1705 '''return parent filectxs, following copies if necessary'''
1700 '''return parent filectxs, following copies if necessary'''
1706 def filenode(ctx, path):
1701 def filenode(ctx, path):
1707 return ctx._manifest.get(path, nullid)
1702 return ctx._manifest.get(path, nullid)
1708
1703
1709 path = self._path
1704 path = self._path
1710 fl = self._filelog
1705 fl = self._filelog
1711 pcl = self._changectx._parents
1706 pcl = self._changectx._parents
1712 renamed = self.renamed()
1707 renamed = self.renamed()
1713
1708
1714 if renamed:
1709 if renamed:
1715 pl = [renamed + (None,)]
1710 pl = [renamed + (None,)]
1716 else:
1711 else:
1717 pl = [(path, filenode(pcl[0], path), fl)]
1712 pl = [(path, filenode(pcl[0], path), fl)]
1718
1713
1719 for pc in pcl[1:]:
1714 for pc in pcl[1:]:
1720 pl.append((path, filenode(pc, path), fl))
1715 pl.append((path, filenode(pc, path), fl))
1721
1716
1722 return [self._parentfilectx(p, fileid=n, filelog=l)
1717 return [self._parentfilectx(p, fileid=n, filelog=l)
1723 for p, n, l in pl if n != nullid]
1718 for p, n, l in pl if n != nullid]
1724
1719
1725 def children(self):
1720 def children(self):
1726 return []
1721 return []
1727
1722
1728 class workingfilectx(committablefilectx):
1723 class workingfilectx(committablefilectx):
1729 """A workingfilectx object makes access to data related to a particular
1724 """A workingfilectx object makes access to data related to a particular
1730 file in the working directory convenient."""
1725 file in the working directory convenient."""
1731 def __init__(self, repo, path, filelog=None, workingctx=None):
1726 def __init__(self, repo, path, filelog=None, workingctx=None):
1732 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1727 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1733
1728
1734 @propertycache
1729 @propertycache
1735 def _changectx(self):
1730 def _changectx(self):
1736 return workingctx(self._repo)
1731 return workingctx(self._repo)
1737
1732
1738 def data(self):
1733 def data(self):
1739 return self._repo.wread(self._path)
1734 return self._repo.wread(self._path)
1740 def copysource(self):
1735 def copysource(self):
1741 return self._repo.dirstate.copied(self._path)
1736 return self._repo.dirstate.copied(self._path)
1742
1737
1743 def size(self):
1738 def size(self):
1744 return self._repo.wvfs.lstat(self._path).st_size
1739 return self._repo.wvfs.lstat(self._path).st_size
1745 def lstat(self):
1740 def lstat(self):
1746 return self._repo.wvfs.lstat(self._path)
1741 return self._repo.wvfs.lstat(self._path)
1747 def date(self):
1742 def date(self):
1748 t, tz = self._changectx.date()
1743 t, tz = self._changectx.date()
1749 try:
1744 try:
1750 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1745 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1751 except OSError as err:
1746 except OSError as err:
1752 if err.errno != errno.ENOENT:
1747 if err.errno != errno.ENOENT:
1753 raise
1748 raise
1754 return (t, tz)
1749 return (t, tz)
1755
1750
1756 def exists(self):
1751 def exists(self):
1757 return self._repo.wvfs.exists(self._path)
1752 return self._repo.wvfs.exists(self._path)
1758
1753
1759 def lexists(self):
1754 def lexists(self):
1760 return self._repo.wvfs.lexists(self._path)
1755 return self._repo.wvfs.lexists(self._path)
1761
1756
1762 def audit(self):
1757 def audit(self):
1763 return self._repo.wvfs.audit(self._path)
1758 return self._repo.wvfs.audit(self._path)
1764
1759
1765 def cmp(self, fctx):
1760 def cmp(self, fctx):
1766 """compare with other file context
1761 """compare with other file context
1767
1762
1768 returns True if different than fctx.
1763 returns True if different than fctx.
1769 """
1764 """
1770 # fctx should be a filectx (not a workingfilectx)
1765 # fctx should be a filectx (not a workingfilectx)
1771 # invert comparison to reuse the same code path
1766 # invert comparison to reuse the same code path
1772 return fctx.cmp(self)
1767 return fctx.cmp(self)
1773
1768
1774 def remove(self, ignoremissing=False):
1769 def remove(self, ignoremissing=False):
1775 """wraps unlink for a repo's working directory"""
1770 """wraps unlink for a repo's working directory"""
1776 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1771 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1777 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1772 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1778 rmdir=rmdir)
1773 rmdir=rmdir)
1779
1774
1780 def write(self, data, flags, backgroundclose=False, **kwargs):
1775 def write(self, data, flags, backgroundclose=False, **kwargs):
1781 """wraps repo.wwrite"""
1776 """wraps repo.wwrite"""
1782 return self._repo.wwrite(self._path, data, flags,
1777 return self._repo.wwrite(self._path, data, flags,
1783 backgroundclose=backgroundclose,
1778 backgroundclose=backgroundclose,
1784 **kwargs)
1779 **kwargs)
1785
1780
1786 def markcopied(self, src):
1781 def markcopied(self, src):
1787 """marks this file a copy of `src`"""
1782 """marks this file a copy of `src`"""
1788 self._repo.dirstate.copy(src, self._path)
1783 self._repo.dirstate.copy(src, self._path)
1789
1784
1790 def clearunknown(self):
1785 def clearunknown(self):
1791 """Removes conflicting items in the working directory so that
1786 """Removes conflicting items in the working directory so that
1792 ``write()`` can be called successfully.
1787 ``write()`` can be called successfully.
1793 """
1788 """
1794 wvfs = self._repo.wvfs
1789 wvfs = self._repo.wvfs
1795 f = self._path
1790 f = self._path
1796 wvfs.audit(f)
1791 wvfs.audit(f)
1797 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1792 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1798 # remove files under the directory as they should already be
1793 # remove files under the directory as they should already be
1799 # warned and backed up
1794 # warned and backed up
1800 if wvfs.isdir(f) and not wvfs.islink(f):
1795 if wvfs.isdir(f) and not wvfs.islink(f):
1801 wvfs.rmtree(f, forcibly=True)
1796 wvfs.rmtree(f, forcibly=True)
1802 for p in reversed(list(util.finddirs(f))):
1797 for p in reversed(list(util.finddirs(f))):
1803 if wvfs.isfileorlink(p):
1798 if wvfs.isfileorlink(p):
1804 wvfs.unlink(p)
1799 wvfs.unlink(p)
1805 break
1800 break
1806 else:
1801 else:
1807 # don't remove files if path conflicts are not processed
1802 # don't remove files if path conflicts are not processed
1808 if wvfs.isdir(f) and not wvfs.islink(f):
1803 if wvfs.isdir(f) and not wvfs.islink(f):
1809 wvfs.removedirs(f)
1804 wvfs.removedirs(f)
1810
1805
1811 def setflags(self, l, x):
1806 def setflags(self, l, x):
1812 self._repo.wvfs.setflags(self._path, l, x)
1807 self._repo.wvfs.setflags(self._path, l, x)
1813
1808
1814 class overlayworkingctx(committablectx):
1809 class overlayworkingctx(committablectx):
1815 """Wraps another mutable context with a write-back cache that can be
1810 """Wraps another mutable context with a write-back cache that can be
1816 converted into a commit context.
1811 converted into a commit context.
1817
1812
1818 self._cache[path] maps to a dict with keys: {
1813 self._cache[path] maps to a dict with keys: {
1819 'exists': bool?
1814 'exists': bool?
1820 'date': date?
1815 'date': date?
1821 'data': str?
1816 'data': str?
1822 'flags': str?
1817 'flags': str?
1823 'copied': str? (path or None)
1818 'copied': str? (path or None)
1824 }
1819 }
1825 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1820 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1826 is `False`, the file was deleted.
1821 is `False`, the file was deleted.
1827 """
1822 """
1828
1823
1829 def __init__(self, repo):
1824 def __init__(self, repo):
1830 super(overlayworkingctx, self).__init__(repo)
1825 super(overlayworkingctx, self).__init__(repo)
1831 self.clean()
1826 self.clean()
1832
1827
1833 def setbase(self, wrappedctx):
1828 def setbase(self, wrappedctx):
1834 self._wrappedctx = wrappedctx
1829 self._wrappedctx = wrappedctx
1835 self._parents = [wrappedctx]
1830 self._parents = [wrappedctx]
1836 # Drop old manifest cache as it is now out of date.
1831 # Drop old manifest cache as it is now out of date.
1837 # This is necessary when, e.g., rebasing several nodes with one
1832 # This is necessary when, e.g., rebasing several nodes with one
1838 # ``overlayworkingctx`` (e.g. with --collapse).
1833 # ``overlayworkingctx`` (e.g. with --collapse).
1839 util.clearcachedproperty(self, '_manifest')
1834 util.clearcachedproperty(self, '_manifest')
1840
1835
1841 def data(self, path):
1836 def data(self, path):
1842 if self.isdirty(path):
1837 if self.isdirty(path):
1843 if self._cache[path]['exists']:
1838 if self._cache[path]['exists']:
1844 if self._cache[path]['data'] is not None:
1839 if self._cache[path]['data'] is not None:
1845 return self._cache[path]['data']
1840 return self._cache[path]['data']
1846 else:
1841 else:
1847 # Must fallback here, too, because we only set flags.
1842 # Must fallback here, too, because we only set flags.
1848 return self._wrappedctx[path].data()
1843 return self._wrappedctx[path].data()
1849 else:
1844 else:
1850 raise error.ProgrammingError("No such file or directory: %s" %
1845 raise error.ProgrammingError("No such file or directory: %s" %
1851 path)
1846 path)
1852 else:
1847 else:
1853 return self._wrappedctx[path].data()
1848 return self._wrappedctx[path].data()
1854
1849
1855 @propertycache
1850 @propertycache
1856 def _manifest(self):
1851 def _manifest(self):
1857 parents = self.parents()
1852 parents = self.parents()
1858 man = parents[0].manifest().copy()
1853 man = parents[0].manifest().copy()
1859
1854
1860 flag = self._flagfunc
1855 flag = self._flagfunc
1861 for path in self.added():
1856 for path in self.added():
1862 man[path] = addednodeid
1857 man[path] = addednodeid
1863 man.setflag(path, flag(path))
1858 man.setflag(path, flag(path))
1864 for path in self.modified():
1859 for path in self.modified():
1865 man[path] = modifiednodeid
1860 man[path] = modifiednodeid
1866 man.setflag(path, flag(path))
1861 man.setflag(path, flag(path))
1867 for path in self.removed():
1862 for path in self.removed():
1868 del man[path]
1863 del man[path]
1869 return man
1864 return man
1870
1865
1871 @propertycache
1866 @propertycache
1872 def _flagfunc(self):
1867 def _flagfunc(self):
1873 def f(path):
1868 def f(path):
1874 return self._cache[path]['flags']
1869 return self._cache[path]['flags']
1875 return f
1870 return f
1876
1871
1877 def files(self):
1872 def files(self):
1878 return sorted(self.added() + self.modified() + self.removed())
1873 return sorted(self.added() + self.modified() + self.removed())
1879
1874
1880 def modified(self):
1875 def modified(self):
1881 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1876 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1882 self._existsinparent(f)]
1877 self._existsinparent(f)]
1883
1878
1884 def added(self):
1879 def added(self):
1885 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1880 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1886 not self._existsinparent(f)]
1881 not self._existsinparent(f)]
1887
1882
1888 def removed(self):
1883 def removed(self):
1889 return [f for f in self._cache.keys() if
1884 return [f for f in self._cache.keys() if
1890 not self._cache[f]['exists'] and self._existsinparent(f)]
1885 not self._cache[f]['exists'] and self._existsinparent(f)]
1891
1886
1892 def p1copies(self):
1887 def p1copies(self):
1893 copies = self._repo._wrappedctx.p1copies().copy()
1888 copies = self._repo._wrappedctx.p1copies().copy()
1894 narrowmatch = self._repo.narrowmatch()
1889 narrowmatch = self._repo.narrowmatch()
1895 for f in self._cache.keys():
1890 for f in self._cache.keys():
1896 if not narrowmatch(f):
1891 if not narrowmatch(f):
1897 continue
1892 continue
1898 copies.pop(f, None) # delete if it exists
1893 copies.pop(f, None) # delete if it exists
1899 source = self._cache[f]['copied']
1894 source = self._cache[f]['copied']
1900 if source:
1895 if source:
1901 copies[f] = source
1896 copies[f] = source
1902 return copies
1897 return copies
1903
1898
1904 def p2copies(self):
1899 def p2copies(self):
1905 copies = self._repo._wrappedctx.p2copies().copy()
1900 copies = self._repo._wrappedctx.p2copies().copy()
1906 narrowmatch = self._repo.narrowmatch()
1901 narrowmatch = self._repo.narrowmatch()
1907 for f in self._cache.keys():
1902 for f in self._cache.keys():
1908 if not narrowmatch(f):
1903 if not narrowmatch(f):
1909 continue
1904 continue
1910 copies.pop(f, None) # delete if it exists
1905 copies.pop(f, None) # delete if it exists
1911 source = self._cache[f]['copied']
1906 source = self._cache[f]['copied']
1912 if source:
1907 if source:
1913 copies[f] = source
1908 copies[f] = source
1914 return copies
1909 return copies
1915
1910
1916 def isinmemory(self):
1911 def isinmemory(self):
1917 return True
1912 return True
1918
1913
1919 def filedate(self, path):
1914 def filedate(self, path):
1920 if self.isdirty(path):
1915 if self.isdirty(path):
1921 return self._cache[path]['date']
1916 return self._cache[path]['date']
1922 else:
1917 else:
1923 return self._wrappedctx[path].date()
1918 return self._wrappedctx[path].date()
1924
1919
1925 def markcopied(self, path, origin):
1920 def markcopied(self, path, origin):
1926 self._markdirty(path, exists=True, date=self.filedate(path),
1921 self._markdirty(path, exists=True, date=self.filedate(path),
1927 flags=self.flags(path), copied=origin)
1922 flags=self.flags(path), copied=origin)
1928
1923
1929 def copydata(self, path):
1924 def copydata(self, path):
1930 if self.isdirty(path):
1925 if self.isdirty(path):
1931 return self._cache[path]['copied']
1926 return self._cache[path]['copied']
1932 else:
1927 else:
1933 return None
1928 return None
1934
1929
1935 def flags(self, path):
1930 def flags(self, path):
1936 if self.isdirty(path):
1931 if self.isdirty(path):
1937 if self._cache[path]['exists']:
1932 if self._cache[path]['exists']:
1938 return self._cache[path]['flags']
1933 return self._cache[path]['flags']
1939 else:
1934 else:
1940 raise error.ProgrammingError("No such file or directory: %s" %
1935 raise error.ProgrammingError("No such file or directory: %s" %
1941 self._path)
1936 self._path)
1942 else:
1937 else:
1943 return self._wrappedctx[path].flags()
1938 return self._wrappedctx[path].flags()
1944
1939
1945 def __contains__(self, key):
1940 def __contains__(self, key):
1946 if key in self._cache:
1941 if key in self._cache:
1947 return self._cache[key]['exists']
1942 return self._cache[key]['exists']
1948 return key in self.p1()
1943 return key in self.p1()
1949
1944
1950 def _existsinparent(self, path):
1945 def _existsinparent(self, path):
1951 try:
1946 try:
1952 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1947 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1953 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1948 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1954 # with an ``exists()`` function.
1949 # with an ``exists()`` function.
1955 self._wrappedctx[path]
1950 self._wrappedctx[path]
1956 return True
1951 return True
1957 except error.ManifestLookupError:
1952 except error.ManifestLookupError:
1958 return False
1953 return False
1959
1954
1960 def _auditconflicts(self, path):
1955 def _auditconflicts(self, path):
1961 """Replicates conflict checks done by wvfs.write().
1956 """Replicates conflict checks done by wvfs.write().
1962
1957
1963 Since we never write to the filesystem and never call `applyupdates` in
1958 Since we never write to the filesystem and never call `applyupdates` in
1964 IMM, we'll never check that a path is actually writable -- e.g., because
1959 IMM, we'll never check that a path is actually writable -- e.g., because
1965 it adds `a/foo`, but `a` is actually a file in the other commit.
1960 it adds `a/foo`, but `a` is actually a file in the other commit.
1966 """
1961 """
1967 def fail(path, component):
1962 def fail(path, component):
1968 # p1() is the base and we're receiving "writes" for p2()'s
1963 # p1() is the base and we're receiving "writes" for p2()'s
1969 # files.
1964 # files.
1970 if 'l' in self.p1()[component].flags():
1965 if 'l' in self.p1()[component].flags():
1971 raise error.Abort("error: %s conflicts with symlink %s "
1966 raise error.Abort("error: %s conflicts with symlink %s "
1972 "in %d." % (path, component,
1967 "in %d." % (path, component,
1973 self.p1().rev()))
1968 self.p1().rev()))
1974 else:
1969 else:
1975 raise error.Abort("error: '%s' conflicts with file '%s' in "
1970 raise error.Abort("error: '%s' conflicts with file '%s' in "
1976 "%d." % (path, component,
1971 "%d." % (path, component,
1977 self.p1().rev()))
1972 self.p1().rev()))
1978
1973
1979 # Test that each new directory to be created to write this path from p2
1974 # Test that each new directory to be created to write this path from p2
1980 # is not a file in p1.
1975 # is not a file in p1.
1981 components = path.split('/')
1976 components = path.split('/')
1982 for i in pycompat.xrange(len(components)):
1977 for i in pycompat.xrange(len(components)):
1983 component = "/".join(components[0:i])
1978 component = "/".join(components[0:i])
1984 if component in self:
1979 if component in self:
1985 fail(path, component)
1980 fail(path, component)
1986
1981
1987 # Test the other direction -- that this path from p2 isn't a directory
1982 # Test the other direction -- that this path from p2 isn't a directory
1988 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1983 # in p1 (test that p1 doesn't have any paths matching `path/*`).
1989 match = self.match([path], default=b'path')
1984 match = self.match([path], default=b'path')
1990 matches = self.p1().manifest().matches(match)
1985 matches = self.p1().manifest().matches(match)
1991 mfiles = matches.keys()
1986 mfiles = matches.keys()
1992 if len(mfiles) > 0:
1987 if len(mfiles) > 0:
1993 if len(mfiles) == 1 and mfiles[0] == path:
1988 if len(mfiles) == 1 and mfiles[0] == path:
1994 return
1989 return
1995 # omit the files which are deleted in current IMM wctx
1990 # omit the files which are deleted in current IMM wctx
1996 mfiles = [m for m in mfiles if m in self]
1991 mfiles = [m for m in mfiles if m in self]
1997 if not mfiles:
1992 if not mfiles:
1998 return
1993 return
1999 raise error.Abort("error: file '%s' cannot be written because "
1994 raise error.Abort("error: file '%s' cannot be written because "
2000 " '%s/' is a directory in %s (containing %d "
1995 " '%s/' is a directory in %s (containing %d "
2001 "entries: %s)"
1996 "entries: %s)"
2002 % (path, path, self.p1(), len(mfiles),
1997 % (path, path, self.p1(), len(mfiles),
2003 ', '.join(mfiles)))
1998 ', '.join(mfiles)))
2004
1999
2005 def write(self, path, data, flags='', **kwargs):
2000 def write(self, path, data, flags='', **kwargs):
2006 if data is None:
2001 if data is None:
2007 raise error.ProgrammingError("data must be non-None")
2002 raise error.ProgrammingError("data must be non-None")
2008 self._auditconflicts(path)
2003 self._auditconflicts(path)
2009 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2004 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
2010 flags=flags)
2005 flags=flags)
2011
2006
2012 def setflags(self, path, l, x):
2007 def setflags(self, path, l, x):
2013 flag = ''
2008 flag = ''
2014 if l:
2009 if l:
2015 flag = 'l'
2010 flag = 'l'
2016 elif x:
2011 elif x:
2017 flag = 'x'
2012 flag = 'x'
2018 self._markdirty(path, exists=True, date=dateutil.makedate(),
2013 self._markdirty(path, exists=True, date=dateutil.makedate(),
2019 flags=flag)
2014 flags=flag)
2020
2015
2021 def remove(self, path):
2016 def remove(self, path):
2022 self._markdirty(path, exists=False)
2017 self._markdirty(path, exists=False)
2023
2018
2024 def exists(self, path):
2019 def exists(self, path):
2025 """exists behaves like `lexists`, but needs to follow symlinks and
2020 """exists behaves like `lexists`, but needs to follow symlinks and
2026 return False if they are broken.
2021 return False if they are broken.
2027 """
2022 """
2028 if self.isdirty(path):
2023 if self.isdirty(path):
2029 # If this path exists and is a symlink, "follow" it by calling
2024 # If this path exists and is a symlink, "follow" it by calling
2030 # exists on the destination path.
2025 # exists on the destination path.
2031 if (self._cache[path]['exists'] and
2026 if (self._cache[path]['exists'] and
2032 'l' in self._cache[path]['flags']):
2027 'l' in self._cache[path]['flags']):
2033 return self.exists(self._cache[path]['data'].strip())
2028 return self.exists(self._cache[path]['data'].strip())
2034 else:
2029 else:
2035 return self._cache[path]['exists']
2030 return self._cache[path]['exists']
2036
2031
2037 return self._existsinparent(path)
2032 return self._existsinparent(path)
2038
2033
2039 def lexists(self, path):
2034 def lexists(self, path):
2040 """lexists returns True if the path exists"""
2035 """lexists returns True if the path exists"""
2041 if self.isdirty(path):
2036 if self.isdirty(path):
2042 return self._cache[path]['exists']
2037 return self._cache[path]['exists']
2043
2038
2044 return self._existsinparent(path)
2039 return self._existsinparent(path)
2045
2040
2046 def size(self, path):
2041 def size(self, path):
2047 if self.isdirty(path):
2042 if self.isdirty(path):
2048 if self._cache[path]['exists']:
2043 if self._cache[path]['exists']:
2049 return len(self._cache[path]['data'])
2044 return len(self._cache[path]['data'])
2050 else:
2045 else:
2051 raise error.ProgrammingError("No such file or directory: %s" %
2046 raise error.ProgrammingError("No such file or directory: %s" %
2052 self._path)
2047 self._path)
2053 return self._wrappedctx[path].size()
2048 return self._wrappedctx[path].size()
2054
2049
2055 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2050 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2056 user=None, editor=None):
2051 user=None, editor=None):
2057 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2052 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2058 committed.
2053 committed.
2059
2054
2060 ``text`` is the commit message.
2055 ``text`` is the commit message.
2061 ``parents`` (optional) are rev numbers.
2056 ``parents`` (optional) are rev numbers.
2062 """
2057 """
2063 # Default parents to the wrapped contexts' if not passed.
2058 # Default parents to the wrapped contexts' if not passed.
2064 if parents is None:
2059 if parents is None:
2065 parents = self._wrappedctx.parents()
2060 parents = self._wrappedctx.parents()
2066 if len(parents) == 1:
2061 if len(parents) == 1:
2067 parents = (parents[0], None)
2062 parents = (parents[0], None)
2068
2063
2069 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2064 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2070 if parents[1] is None:
2065 if parents[1] is None:
2071 parents = (self._repo[parents[0]], None)
2066 parents = (self._repo[parents[0]], None)
2072 else:
2067 else:
2073 parents = (self._repo[parents[0]], self._repo[parents[1]])
2068 parents = (self._repo[parents[0]], self._repo[parents[1]])
2074
2069
2075 files = self.files()
2070 files = self.files()
2076 def getfile(repo, memctx, path):
2071 def getfile(repo, memctx, path):
2077 if self._cache[path]['exists']:
2072 if self._cache[path]['exists']:
2078 return memfilectx(repo, memctx, path,
2073 return memfilectx(repo, memctx, path,
2079 self._cache[path]['data'],
2074 self._cache[path]['data'],
2080 'l' in self._cache[path]['flags'],
2075 'l' in self._cache[path]['flags'],
2081 'x' in self._cache[path]['flags'],
2076 'x' in self._cache[path]['flags'],
2082 self._cache[path]['copied'])
2077 self._cache[path]['copied'])
2083 else:
2078 else:
2084 # Returning None, but including the path in `files`, is
2079 # Returning None, but including the path in `files`, is
2085 # necessary for memctx to register a deletion.
2080 # necessary for memctx to register a deletion.
2086 return None
2081 return None
2087 return memctx(self._repo, parents, text, files, getfile, date=date,
2082 return memctx(self._repo, parents, text, files, getfile, date=date,
2088 extra=extra, user=user, branch=branch, editor=editor)
2083 extra=extra, user=user, branch=branch, editor=editor)
2089
2084
2090 def isdirty(self, path):
2085 def isdirty(self, path):
2091 return path in self._cache
2086 return path in self._cache
2092
2087
2093 def isempty(self):
2088 def isempty(self):
2094 # We need to discard any keys that are actually clean before the empty
2089 # We need to discard any keys that are actually clean before the empty
2095 # commit check.
2090 # commit check.
2096 self._compact()
2091 self._compact()
2097 return len(self._cache) == 0
2092 return len(self._cache) == 0
2098
2093
2099 def clean(self):
2094 def clean(self):
2100 self._cache = {}
2095 self._cache = {}
2101
2096
2102 def _compact(self):
2097 def _compact(self):
2103 """Removes keys from the cache that are actually clean, by comparing
2098 """Removes keys from the cache that are actually clean, by comparing
2104 them with the underlying context.
2099 them with the underlying context.
2105
2100
2106 This can occur during the merge process, e.g. by passing --tool :local
2101 This can occur during the merge process, e.g. by passing --tool :local
2107 to resolve a conflict.
2102 to resolve a conflict.
2108 """
2103 """
2109 keys = []
2104 keys = []
2110 # This won't be perfect, but can help performance significantly when
2105 # This won't be perfect, but can help performance significantly when
2111 # using things like remotefilelog.
2106 # using things like remotefilelog.
2112 scmutil.prefetchfiles(
2107 scmutil.prefetchfiles(
2113 self.repo(), [self.p1().rev()],
2108 self.repo(), [self.p1().rev()],
2114 scmutil.matchfiles(self.repo(), self._cache.keys()))
2109 scmutil.matchfiles(self.repo(), self._cache.keys()))
2115
2110
2116 for path in self._cache.keys():
2111 for path in self._cache.keys():
2117 cache = self._cache[path]
2112 cache = self._cache[path]
2118 try:
2113 try:
2119 underlying = self._wrappedctx[path]
2114 underlying = self._wrappedctx[path]
2120 if (underlying.data() == cache['data'] and
2115 if (underlying.data() == cache['data'] and
2121 underlying.flags() == cache['flags']):
2116 underlying.flags() == cache['flags']):
2122 keys.append(path)
2117 keys.append(path)
2123 except error.ManifestLookupError:
2118 except error.ManifestLookupError:
2124 # Path not in the underlying manifest (created).
2119 # Path not in the underlying manifest (created).
2125 continue
2120 continue
2126
2121
2127 for path in keys:
2122 for path in keys:
2128 del self._cache[path]
2123 del self._cache[path]
2129 return keys
2124 return keys
2130
2125
2131 def _markdirty(self, path, exists, data=None, date=None, flags='',
2126 def _markdirty(self, path, exists, data=None, date=None, flags='',
2132 copied=None):
2127 copied=None):
2133 # data not provided, let's see if we already have some; if not, let's
2128 # data not provided, let's see if we already have some; if not, let's
2134 # grab it from our underlying context, so that we always have data if
2129 # grab it from our underlying context, so that we always have data if
2135 # the file is marked as existing.
2130 # the file is marked as existing.
2136 if exists and data is None:
2131 if exists and data is None:
2137 oldentry = self._cache.get(path) or {}
2132 oldentry = self._cache.get(path) or {}
2138 data = oldentry.get('data')
2133 data = oldentry.get('data')
2139 if data is None:
2134 if data is None:
2140 data = self._wrappedctx[path].data()
2135 data = self._wrappedctx[path].data()
2141
2136
2142 self._cache[path] = {
2137 self._cache[path] = {
2143 'exists': exists,
2138 'exists': exists,
2144 'data': data,
2139 'data': data,
2145 'date': date,
2140 'date': date,
2146 'flags': flags,
2141 'flags': flags,
2147 'copied': copied,
2142 'copied': copied,
2148 }
2143 }
2149
2144
2150 def filectx(self, path, filelog=None):
2145 def filectx(self, path, filelog=None):
2151 return overlayworkingfilectx(self._repo, path, parent=self,
2146 return overlayworkingfilectx(self._repo, path, parent=self,
2152 filelog=filelog)
2147 filelog=filelog)
2153
2148
2154 class overlayworkingfilectx(committablefilectx):
2149 class overlayworkingfilectx(committablefilectx):
2155 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2150 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2156 cache, which can be flushed through later by calling ``flush()``."""
2151 cache, which can be flushed through later by calling ``flush()``."""
2157
2152
2158 def __init__(self, repo, path, filelog=None, parent=None):
2153 def __init__(self, repo, path, filelog=None, parent=None):
2159 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2154 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2160 parent)
2155 parent)
2161 self._repo = repo
2156 self._repo = repo
2162 self._parent = parent
2157 self._parent = parent
2163 self._path = path
2158 self._path = path
2164
2159
2165 def cmp(self, fctx):
2160 def cmp(self, fctx):
2166 return self.data() != fctx.data()
2161 return self.data() != fctx.data()
2167
2162
2168 def changectx(self):
2163 def changectx(self):
2169 return self._parent
2164 return self._parent
2170
2165
2171 def data(self):
2166 def data(self):
2172 return self._parent.data(self._path)
2167 return self._parent.data(self._path)
2173
2168
2174 def date(self):
2169 def date(self):
2175 return self._parent.filedate(self._path)
2170 return self._parent.filedate(self._path)
2176
2171
2177 def exists(self):
2172 def exists(self):
2178 return self.lexists()
2173 return self.lexists()
2179
2174
2180 def lexists(self):
2175 def lexists(self):
2181 return self._parent.exists(self._path)
2176 return self._parent.exists(self._path)
2182
2177
2183 def copysource(self):
2178 def copysource(self):
2184 return self._parent.copydata(self._path)
2179 return self._parent.copydata(self._path)
2185
2180
2186 def size(self):
2181 def size(self):
2187 return self._parent.size(self._path)
2182 return self._parent.size(self._path)
2188
2183
2189 def markcopied(self, origin):
2184 def markcopied(self, origin):
2190 self._parent.markcopied(self._path, origin)
2185 self._parent.markcopied(self._path, origin)
2191
2186
2192 def audit(self):
2187 def audit(self):
2193 pass
2188 pass
2194
2189
2195 def flags(self):
2190 def flags(self):
2196 return self._parent.flags(self._path)
2191 return self._parent.flags(self._path)
2197
2192
2198 def setflags(self, islink, isexec):
2193 def setflags(self, islink, isexec):
2199 return self._parent.setflags(self._path, islink, isexec)
2194 return self._parent.setflags(self._path, islink, isexec)
2200
2195
2201 def write(self, data, flags, backgroundclose=False, **kwargs):
2196 def write(self, data, flags, backgroundclose=False, **kwargs):
2202 return self._parent.write(self._path, data, flags, **kwargs)
2197 return self._parent.write(self._path, data, flags, **kwargs)
2203
2198
2204 def remove(self, ignoremissing=False):
2199 def remove(self, ignoremissing=False):
2205 return self._parent.remove(self._path)
2200 return self._parent.remove(self._path)
2206
2201
2207 def clearunknown(self):
2202 def clearunknown(self):
2208 pass
2203 pass
2209
2204
2210 class workingcommitctx(workingctx):
2205 class workingcommitctx(workingctx):
2211 """A workingcommitctx object makes access to data related to
2206 """A workingcommitctx object makes access to data related to
2212 the revision being committed convenient.
2207 the revision being committed convenient.
2213
2208
2214 This hides changes in the working directory, if they aren't
2209 This hides changes in the working directory, if they aren't
2215 committed in this context.
2210 committed in this context.
2216 """
2211 """
2217 def __init__(self, repo, changes,
2212 def __init__(self, repo, changes,
2218 text="", user=None, date=None, extra=None):
2213 text="", user=None, date=None, extra=None):
2219 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2214 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2220 changes)
2215 changes)
2221
2216
2222 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2217 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2223 """Return matched files only in ``self._status``
2218 """Return matched files only in ``self._status``
2224
2219
2225 Uncommitted files appear "clean" via this context, even if
2220 Uncommitted files appear "clean" via this context, even if
2226 they aren't actually so in the working directory.
2221 they aren't actually so in the working directory.
2227 """
2222 """
2228 if clean:
2223 if clean:
2229 clean = [f for f in self._manifest if f not in self._changedset]
2224 clean = [f for f in self._manifest if f not in self._changedset]
2230 else:
2225 else:
2231 clean = []
2226 clean = []
2232 return scmutil.status([f for f in self._status.modified if match(f)],
2227 return scmutil.status([f for f in self._status.modified if match(f)],
2233 [f for f in self._status.added if match(f)],
2228 [f for f in self._status.added if match(f)],
2234 [f for f in self._status.removed if match(f)],
2229 [f for f in self._status.removed if match(f)],
2235 [], [], [], clean)
2230 [], [], [], clean)
2236
2231
2237 @propertycache
2232 @propertycache
2238 def _changedset(self):
2233 def _changedset(self):
2239 """Return the set of files changed in this context
2234 """Return the set of files changed in this context
2240 """
2235 """
2241 changed = set(self._status.modified)
2236 changed = set(self._status.modified)
2242 changed.update(self._status.added)
2237 changed.update(self._status.added)
2243 changed.update(self._status.removed)
2238 changed.update(self._status.removed)
2244 return changed
2239 return changed
2245
2240
2246 def makecachingfilectxfn(func):
2241 def makecachingfilectxfn(func):
2247 """Create a filectxfn that caches based on the path.
2242 """Create a filectxfn that caches based on the path.
2248
2243
2249 We can't use util.cachefunc because it uses all arguments as the cache
2244 We can't use util.cachefunc because it uses all arguments as the cache
2250 key and this creates a cycle since the arguments include the repo and
2245 key and this creates a cycle since the arguments include the repo and
2251 memctx.
2246 memctx.
2252 """
2247 """
2253 cache = {}
2248 cache = {}
2254
2249
2255 def getfilectx(repo, memctx, path):
2250 def getfilectx(repo, memctx, path):
2256 if path not in cache:
2251 if path not in cache:
2257 cache[path] = func(repo, memctx, path)
2252 cache[path] = func(repo, memctx, path)
2258 return cache[path]
2253 return cache[path]
2259
2254
2260 return getfilectx
2255 return getfilectx
2261
2256
2262 def memfilefromctx(ctx):
2257 def memfilefromctx(ctx):
2263 """Given a context return a memfilectx for ctx[path]
2258 """Given a context return a memfilectx for ctx[path]
2264
2259
2265 This is a convenience method for building a memctx based on another
2260 This is a convenience method for building a memctx based on another
2266 context.
2261 context.
2267 """
2262 """
2268 def getfilectx(repo, memctx, path):
2263 def getfilectx(repo, memctx, path):
2269 fctx = ctx[path]
2264 fctx = ctx[path]
2270 copysource = fctx.copysource()
2265 copysource = fctx.copysource()
2271 return memfilectx(repo, memctx, path, fctx.data(),
2266 return memfilectx(repo, memctx, path, fctx.data(),
2272 islink=fctx.islink(), isexec=fctx.isexec(),
2267 islink=fctx.islink(), isexec=fctx.isexec(),
2273 copysource=copysource)
2268 copysource=copysource)
2274
2269
2275 return getfilectx
2270 return getfilectx
2276
2271
2277 def memfilefrompatch(patchstore):
2272 def memfilefrompatch(patchstore):
2278 """Given a patch (e.g. patchstore object) return a memfilectx
2273 """Given a patch (e.g. patchstore object) return a memfilectx
2279
2274
2280 This is a convenience method for building a memctx based on a patchstore.
2275 This is a convenience method for building a memctx based on a patchstore.
2281 """
2276 """
2282 def getfilectx(repo, memctx, path):
2277 def getfilectx(repo, memctx, path):
2283 data, mode, copysource = patchstore.getfile(path)
2278 data, mode, copysource = patchstore.getfile(path)
2284 if data is None:
2279 if data is None:
2285 return None
2280 return None
2286 islink, isexec = mode
2281 islink, isexec = mode
2287 return memfilectx(repo, memctx, path, data, islink=islink,
2282 return memfilectx(repo, memctx, path, data, islink=islink,
2288 isexec=isexec, copysource=copysource)
2283 isexec=isexec, copysource=copysource)
2289
2284
2290 return getfilectx
2285 return getfilectx
2291
2286
2292 class memctx(committablectx):
2287 class memctx(committablectx):
2293 """Use memctx to perform in-memory commits via localrepo.commitctx().
2288 """Use memctx to perform in-memory commits via localrepo.commitctx().
2294
2289
2295 Revision information is supplied at initialization time while
2290 Revision information is supplied at initialization time while
2296 related files data and is made available through a callback
2291 related files data and is made available through a callback
2297 mechanism. 'repo' is the current localrepo, 'parents' is a
2292 mechanism. 'repo' is the current localrepo, 'parents' is a
2298 sequence of two parent revisions identifiers (pass None for every
2293 sequence of two parent revisions identifiers (pass None for every
2299 missing parent), 'text' is the commit message and 'files' lists
2294 missing parent), 'text' is the commit message and 'files' lists
2300 names of files touched by the revision (normalized and relative to
2295 names of files touched by the revision (normalized and relative to
2301 repository root).
2296 repository root).
2302
2297
2303 filectxfn(repo, memctx, path) is a callable receiving the
2298 filectxfn(repo, memctx, path) is a callable receiving the
2304 repository, the current memctx object and the normalized path of
2299 repository, the current memctx object and the normalized path of
2305 requested file, relative to repository root. It is fired by the
2300 requested file, relative to repository root. It is fired by the
2306 commit function for every file in 'files', but calls order is
2301 commit function for every file in 'files', but calls order is
2307 undefined. If the file is available in the revision being
2302 undefined. If the file is available in the revision being
2308 committed (updated or added), filectxfn returns a memfilectx
2303 committed (updated or added), filectxfn returns a memfilectx
2309 object. If the file was removed, filectxfn return None for recent
2304 object. If the file was removed, filectxfn return None for recent
2310 Mercurial. Moved files are represented by marking the source file
2305 Mercurial. Moved files are represented by marking the source file
2311 removed and the new file added with copy information (see
2306 removed and the new file added with copy information (see
2312 memfilectx).
2307 memfilectx).
2313
2308
2314 user receives the committer name and defaults to current
2309 user receives the committer name and defaults to current
2315 repository username, date is the commit date in any format
2310 repository username, date is the commit date in any format
2316 supported by dateutil.parsedate() and defaults to current date, extra
2311 supported by dateutil.parsedate() and defaults to current date, extra
2317 is a dictionary of metadata or is left empty.
2312 is a dictionary of metadata or is left empty.
2318 """
2313 """
2319
2314
2320 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2315 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2321 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2316 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2322 # this field to determine what to do in filectxfn.
2317 # this field to determine what to do in filectxfn.
2323 _returnnoneformissingfiles = True
2318 _returnnoneformissingfiles = True
2324
2319
2325 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2320 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2326 date=None, extra=None, branch=None, editor=False):
2321 date=None, extra=None, branch=None, editor=False):
2327 super(memctx, self).__init__(repo, text, user, date, extra,
2322 super(memctx, self).__init__(repo, text, user, date, extra,
2328 branch=branch)
2323 branch=branch)
2329 self._rev = None
2324 self._rev = None
2330 self._node = None
2325 self._node = None
2331 parents = [(p or nullid) for p in parents]
2326 parents = [(p or nullid) for p in parents]
2332 p1, p2 = parents
2327 p1, p2 = parents
2333 self._parents = [self._repo[p] for p in (p1, p2)]
2328 self._parents = [self._repo[p] for p in (p1, p2)]
2334 files = sorted(set(files))
2329 files = sorted(set(files))
2335 self._files = files
2330 self._files = files
2336 self.substate = {}
2331 self.substate = {}
2337
2332
2338 if isinstance(filectxfn, patch.filestore):
2333 if isinstance(filectxfn, patch.filestore):
2339 filectxfn = memfilefrompatch(filectxfn)
2334 filectxfn = memfilefrompatch(filectxfn)
2340 elif not callable(filectxfn):
2335 elif not callable(filectxfn):
2341 # if store is not callable, wrap it in a function
2336 # if store is not callable, wrap it in a function
2342 filectxfn = memfilefromctx(filectxfn)
2337 filectxfn = memfilefromctx(filectxfn)
2343
2338
2344 # memoizing increases performance for e.g. vcs convert scenarios.
2339 # memoizing increases performance for e.g. vcs convert scenarios.
2345 self._filectxfn = makecachingfilectxfn(filectxfn)
2340 self._filectxfn = makecachingfilectxfn(filectxfn)
2346
2341
2347 if editor:
2342 if editor:
2348 self._text = editor(self._repo, self, [])
2343 self._text = editor(self._repo, self, [])
2349 self._repo.savecommitmessage(self._text)
2344 self._repo.savecommitmessage(self._text)
2350
2345
2351 def filectx(self, path, filelog=None):
2346 def filectx(self, path, filelog=None):
2352 """get a file context from the working directory
2347 """get a file context from the working directory
2353
2348
2354 Returns None if file doesn't exist and should be removed."""
2349 Returns None if file doesn't exist and should be removed."""
2355 return self._filectxfn(self._repo, self, path)
2350 return self._filectxfn(self._repo, self, path)
2356
2351
2357 def commit(self):
2352 def commit(self):
2358 """commit context to the repo"""
2353 """commit context to the repo"""
2359 return self._repo.commitctx(self)
2354 return self._repo.commitctx(self)
2360
2355
2361 @propertycache
2356 @propertycache
2362 def _manifest(self):
2357 def _manifest(self):
2363 """generate a manifest based on the return values of filectxfn"""
2358 """generate a manifest based on the return values of filectxfn"""
2364
2359
2365 # keep this simple for now; just worry about p1
2360 # keep this simple for now; just worry about p1
2366 pctx = self._parents[0]
2361 pctx = self._parents[0]
2367 man = pctx.manifest().copy()
2362 man = pctx.manifest().copy()
2368
2363
2369 for f in self._status.modified:
2364 for f in self._status.modified:
2370 man[f] = modifiednodeid
2365 man[f] = modifiednodeid
2371
2366
2372 for f in self._status.added:
2367 for f in self._status.added:
2373 man[f] = addednodeid
2368 man[f] = addednodeid
2374
2369
2375 for f in self._status.removed:
2370 for f in self._status.removed:
2376 if f in man:
2371 if f in man:
2377 del man[f]
2372 del man[f]
2378
2373
2379 return man
2374 return man
2380
2375
2381 @propertycache
2376 @propertycache
2382 def _status(self):
2377 def _status(self):
2383 """Calculate exact status from ``files`` specified at construction
2378 """Calculate exact status from ``files`` specified at construction
2384 """
2379 """
2385 man1 = self.p1().manifest()
2380 man1 = self.p1().manifest()
2386 p2 = self._parents[1]
2381 p2 = self._parents[1]
2387 # "1 < len(self._parents)" can't be used for checking
2382 # "1 < len(self._parents)" can't be used for checking
2388 # existence of the 2nd parent, because "memctx._parents" is
2383 # existence of the 2nd parent, because "memctx._parents" is
2389 # explicitly initialized by the list, of which length is 2.
2384 # explicitly initialized by the list, of which length is 2.
2390 if p2.node() != nullid:
2385 if p2.node() != nullid:
2391 man2 = p2.manifest()
2386 man2 = p2.manifest()
2392 managing = lambda f: f in man1 or f in man2
2387 managing = lambda f: f in man1 or f in man2
2393 else:
2388 else:
2394 managing = lambda f: f in man1
2389 managing = lambda f: f in man1
2395
2390
2396 modified, added, removed = [], [], []
2391 modified, added, removed = [], [], []
2397 for f in self._files:
2392 for f in self._files:
2398 if not managing(f):
2393 if not managing(f):
2399 added.append(f)
2394 added.append(f)
2400 elif self[f]:
2395 elif self[f]:
2401 modified.append(f)
2396 modified.append(f)
2402 else:
2397 else:
2403 removed.append(f)
2398 removed.append(f)
2404
2399
2405 return scmutil.status(modified, added, removed, [], [], [], [])
2400 return scmutil.status(modified, added, removed, [], [], [], [])
2406
2401
2407 class memfilectx(committablefilectx):
2402 class memfilectx(committablefilectx):
2408 """memfilectx represents an in-memory file to commit.
2403 """memfilectx represents an in-memory file to commit.
2409
2404
2410 See memctx and committablefilectx for more details.
2405 See memctx and committablefilectx for more details.
2411 """
2406 """
2412 def __init__(self, repo, changectx, path, data, islink=False,
2407 def __init__(self, repo, changectx, path, data, islink=False,
2413 isexec=False, copysource=None):
2408 isexec=False, copysource=None):
2414 """
2409 """
2415 path is the normalized file path relative to repository root.
2410 path is the normalized file path relative to repository root.
2416 data is the file content as a string.
2411 data is the file content as a string.
2417 islink is True if the file is a symbolic link.
2412 islink is True if the file is a symbolic link.
2418 isexec is True if the file is executable.
2413 isexec is True if the file is executable.
2419 copied is the source file path if current file was copied in the
2414 copied is the source file path if current file was copied in the
2420 revision being committed, or None."""
2415 revision being committed, or None."""
2421 super(memfilectx, self).__init__(repo, path, None, changectx)
2416 super(memfilectx, self).__init__(repo, path, None, changectx)
2422 self._data = data
2417 self._data = data
2423 if islink:
2418 if islink:
2424 self._flags = 'l'
2419 self._flags = 'l'
2425 elif isexec:
2420 elif isexec:
2426 self._flags = 'x'
2421 self._flags = 'x'
2427 else:
2422 else:
2428 self._flags = ''
2423 self._flags = ''
2429 self._copysource = copysource
2424 self._copysource = copysource
2430
2425
2431 def copysource(self):
2426 def copysource(self):
2432 return self._copysource
2427 return self._copysource
2433
2428
2434 def cmp(self, fctx):
2429 def cmp(self, fctx):
2435 return self.data() != fctx.data()
2430 return self.data() != fctx.data()
2436
2431
2437 def data(self):
2432 def data(self):
2438 return self._data
2433 return self._data
2439
2434
2440 def remove(self, ignoremissing=False):
2435 def remove(self, ignoremissing=False):
2441 """wraps unlink for a repo's working directory"""
2436 """wraps unlink for a repo's working directory"""
2442 # need to figure out what to do here
2437 # need to figure out what to do here
2443 del self._changectx[self._path]
2438 del self._changectx[self._path]
2444
2439
2445 def write(self, data, flags, **kwargs):
2440 def write(self, data, flags, **kwargs):
2446 """wraps repo.wwrite"""
2441 """wraps repo.wwrite"""
2447 self._data = data
2442 self._data = data
2448
2443
2449
2444
2450 class metadataonlyctx(committablectx):
2445 class metadataonlyctx(committablectx):
2451 """Like memctx but it's reusing the manifest of different commit.
2446 """Like memctx but it's reusing the manifest of different commit.
2452 Intended to be used by lightweight operations that are creating
2447 Intended to be used by lightweight operations that are creating
2453 metadata-only changes.
2448 metadata-only changes.
2454
2449
2455 Revision information is supplied at initialization time. 'repo' is the
2450 Revision information is supplied at initialization time. 'repo' is the
2456 current localrepo, 'ctx' is original revision which manifest we're reuisng
2451 current localrepo, 'ctx' is original revision which manifest we're reuisng
2457 'parents' is a sequence of two parent revisions identifiers (pass None for
2452 'parents' is a sequence of two parent revisions identifiers (pass None for
2458 every missing parent), 'text' is the commit.
2453 every missing parent), 'text' is the commit.
2459
2454
2460 user receives the committer name and defaults to current repository
2455 user receives the committer name and defaults to current repository
2461 username, date is the commit date in any format supported by
2456 username, date is the commit date in any format supported by
2462 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2457 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2463 metadata or is left empty.
2458 metadata or is left empty.
2464 """
2459 """
2465 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2460 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2466 date=None, extra=None, editor=False):
2461 date=None, extra=None, editor=False):
2467 if text is None:
2462 if text is None:
2468 text = originalctx.description()
2463 text = originalctx.description()
2469 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2464 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2470 self._rev = None
2465 self._rev = None
2471 self._node = None
2466 self._node = None
2472 self._originalctx = originalctx
2467 self._originalctx = originalctx
2473 self._manifestnode = originalctx.manifestnode()
2468 self._manifestnode = originalctx.manifestnode()
2474 if parents is None:
2469 if parents is None:
2475 parents = originalctx.parents()
2470 parents = originalctx.parents()
2476 else:
2471 else:
2477 parents = [repo[p] for p in parents if p is not None]
2472 parents = [repo[p] for p in parents if p is not None]
2478 parents = parents[:]
2473 parents = parents[:]
2479 while len(parents) < 2:
2474 while len(parents) < 2:
2480 parents.append(repo[nullid])
2475 parents.append(repo[nullid])
2481 p1, p2 = self._parents = parents
2476 p1, p2 = self._parents = parents
2482
2477
2483 # sanity check to ensure that the reused manifest parents are
2478 # sanity check to ensure that the reused manifest parents are
2484 # manifests of our commit parents
2479 # manifests of our commit parents
2485 mp1, mp2 = self.manifestctx().parents
2480 mp1, mp2 = self.manifestctx().parents
2486 if p1 != nullid and p1.manifestnode() != mp1:
2481 if p1 != nullid and p1.manifestnode() != mp1:
2487 raise RuntimeError(r"can't reuse the manifest: its p1 "
2482 raise RuntimeError(r"can't reuse the manifest: its p1 "
2488 r"doesn't match the new ctx p1")
2483 r"doesn't match the new ctx p1")
2489 if p2 != nullid and p2.manifestnode() != mp2:
2484 if p2 != nullid and p2.manifestnode() != mp2:
2490 raise RuntimeError(r"can't reuse the manifest: "
2485 raise RuntimeError(r"can't reuse the manifest: "
2491 r"its p2 doesn't match the new ctx p2")
2486 r"its p2 doesn't match the new ctx p2")
2492
2487
2493 self._files = originalctx.files()
2488 self._files = originalctx.files()
2494 self.substate = {}
2489 self.substate = {}
2495
2490
2496 if editor:
2491 if editor:
2497 self._text = editor(self._repo, self, [])
2492 self._text = editor(self._repo, self, [])
2498 self._repo.savecommitmessage(self._text)
2493 self._repo.savecommitmessage(self._text)
2499
2494
2500 def manifestnode(self):
2495 def manifestnode(self):
2501 return self._manifestnode
2496 return self._manifestnode
2502
2497
2503 @property
2498 @property
2504 def _manifestctx(self):
2499 def _manifestctx(self):
2505 return self._repo.manifestlog[self._manifestnode]
2500 return self._repo.manifestlog[self._manifestnode]
2506
2501
2507 def filectx(self, path, filelog=None):
2502 def filectx(self, path, filelog=None):
2508 return self._originalctx.filectx(path, filelog=filelog)
2503 return self._originalctx.filectx(path, filelog=filelog)
2509
2504
2510 def commit(self):
2505 def commit(self):
2511 """commit context to the repo"""
2506 """commit context to the repo"""
2512 return self._repo.commitctx(self)
2507 return self._repo.commitctx(self)
2513
2508
2514 @property
2509 @property
2515 def _manifest(self):
2510 def _manifest(self):
2516 return self._originalctx.manifest()
2511 return self._originalctx.manifest()
2517
2512
2518 @propertycache
2513 @propertycache
2519 def _status(self):
2514 def _status(self):
2520 """Calculate exact status from ``files`` specified in the ``origctx``
2515 """Calculate exact status from ``files`` specified in the ``origctx``
2521 and parents manifests.
2516 and parents manifests.
2522 """
2517 """
2523 man1 = self.p1().manifest()
2518 man1 = self.p1().manifest()
2524 p2 = self._parents[1]
2519 p2 = self._parents[1]
2525 # "1 < len(self._parents)" can't be used for checking
2520 # "1 < len(self._parents)" can't be used for checking
2526 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2521 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2527 # explicitly initialized by the list, of which length is 2.
2522 # explicitly initialized by the list, of which length is 2.
2528 if p2.node() != nullid:
2523 if p2.node() != nullid:
2529 man2 = p2.manifest()
2524 man2 = p2.manifest()
2530 managing = lambda f: f in man1 or f in man2
2525 managing = lambda f: f in man1 or f in man2
2531 else:
2526 else:
2532 managing = lambda f: f in man1
2527 managing = lambda f: f in man1
2533
2528
2534 modified, added, removed = [], [], []
2529 modified, added, removed = [], [], []
2535 for f in self._files:
2530 for f in self._files:
2536 if not managing(f):
2531 if not managing(f):
2537 added.append(f)
2532 added.append(f)
2538 elif f in self:
2533 elif f in self:
2539 modified.append(f)
2534 modified.append(f)
2540 else:
2535 else:
2541 removed.append(f)
2536 removed.append(f)
2542
2537
2543 return scmutil.status(modified, added, removed, [], [], [], [])
2538 return scmutil.status(modified, added, removed, [], [], [], [])
2544
2539
2545 class arbitraryfilectx(object):
2540 class arbitraryfilectx(object):
2546 """Allows you to use filectx-like functions on a file in an arbitrary
2541 """Allows you to use filectx-like functions on a file in an arbitrary
2547 location on disk, possibly not in the working directory.
2542 location on disk, possibly not in the working directory.
2548 """
2543 """
2549 def __init__(self, path, repo=None):
2544 def __init__(self, path, repo=None):
2550 # Repo is optional because contrib/simplemerge uses this class.
2545 # Repo is optional because contrib/simplemerge uses this class.
2551 self._repo = repo
2546 self._repo = repo
2552 self._path = path
2547 self._path = path
2553
2548
2554 def cmp(self, fctx):
2549 def cmp(self, fctx):
2555 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2550 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2556 # path if either side is a symlink.
2551 # path if either side is a symlink.
2557 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2552 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2558 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2553 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2559 # Add a fast-path for merge if both sides are disk-backed.
2554 # Add a fast-path for merge if both sides are disk-backed.
2560 # Note that filecmp uses the opposite return values (True if same)
2555 # Note that filecmp uses the opposite return values (True if same)
2561 # from our cmp functions (True if different).
2556 # from our cmp functions (True if different).
2562 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2557 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2563 return self.data() != fctx.data()
2558 return self.data() != fctx.data()
2564
2559
2565 def path(self):
2560 def path(self):
2566 return self._path
2561 return self._path
2567
2562
2568 def flags(self):
2563 def flags(self):
2569 return ''
2564 return ''
2570
2565
2571 def data(self):
2566 def data(self):
2572 return util.readfile(self._path)
2567 return util.readfile(self._path)
2573
2568
2574 def decodeddata(self):
2569 def decodeddata(self):
2575 with open(self._path, "rb") as f:
2570 with open(self._path, "rb") as f:
2576 return f.read()
2571 return f.read()
2577
2572
2578 def remove(self):
2573 def remove(self):
2579 util.unlink(self._path)
2574 util.unlink(self._path)
2580
2575
2581 def write(self, data, flags, **kwargs):
2576 def write(self, data, flags, **kwargs):
2582 assert not flags
2577 assert not flags
2583 with open(self._path, "wb") as f:
2578 with open(self._path, "wb") as f:
2584 f.write(data)
2579 f.write(data)
@@ -1,1995 +1,2004 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import posixpath
14 import posixpath
15 import re
15 import re
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 short,
25 short,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 revsetlang,
41 revsetlang,
42 similar,
42 similar,
43 smartset,
43 smartset,
44 url,
44 url,
45 util,
45 util,
46 vfs,
46 vfs,
47 )
47 )
48
48
49 from .utils import (
49 from .utils import (
50 procutil,
50 procutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 if pycompat.iswindows:
54 if pycompat.iswindows:
55 from . import scmwindows as scmplatform
55 from . import scmwindows as scmplatform
56 else:
56 else:
57 from . import scmposix as scmplatform
57 from . import scmposix as scmplatform
58
58
59 parsers = policy.importmod(r'parsers')
59 parsers = policy.importmod(r'parsers')
60
60
61 termsize = scmplatform.termsize
61 termsize = scmplatform.termsize
62
62
63 class status(tuple):
63 class status(tuple):
64 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
65 and 'ignored' properties are only relevant to the working copy.
65 and 'ignored' properties are only relevant to the working copy.
66 '''
66 '''
67
67
68 __slots__ = ()
68 __slots__ = ()
69
69
70 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
71 clean):
71 clean):
72 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
73 ignored, clean))
73 ignored, clean))
74
74
75 @property
75 @property
76 def modified(self):
76 def modified(self):
77 '''files that have been modified'''
77 '''files that have been modified'''
78 return self[0]
78 return self[0]
79
79
80 @property
80 @property
81 def added(self):
81 def added(self):
82 '''files that have been added'''
82 '''files that have been added'''
83 return self[1]
83 return self[1]
84
84
85 @property
85 @property
86 def removed(self):
86 def removed(self):
87 '''files that have been removed'''
87 '''files that have been removed'''
88 return self[2]
88 return self[2]
89
89
90 @property
90 @property
91 def deleted(self):
91 def deleted(self):
92 '''files that are in the dirstate, but have been deleted from the
92 '''files that are in the dirstate, but have been deleted from the
93 working copy (aka "missing")
93 working copy (aka "missing")
94 '''
94 '''
95 return self[3]
95 return self[3]
96
96
97 @property
97 @property
98 def unknown(self):
98 def unknown(self):
99 '''files not in the dirstate that are not ignored'''
99 '''files not in the dirstate that are not ignored'''
100 return self[4]
100 return self[4]
101
101
102 @property
102 @property
103 def ignored(self):
103 def ignored(self):
104 '''files not in the dirstate that are ignored (by _dirignore())'''
104 '''files not in the dirstate that are ignored (by _dirignore())'''
105 return self[5]
105 return self[5]
106
106
107 @property
107 @property
108 def clean(self):
108 def clean(self):
109 '''files that have not been modified'''
109 '''files that have not been modified'''
110 return self[6]
110 return self[6]
111
111
112 def __repr__(self, *args, **kwargs):
112 def __repr__(self, *args, **kwargs):
113 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
114 r'unknown=%s, ignored=%s, clean=%s>') %
114 r'unknown=%s, ignored=%s, clean=%s>') %
115 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
116
116
117 def itersubrepos(ctx1, ctx2):
117 def itersubrepos(ctx1, ctx2):
118 """find subrepos in ctx1 or ctx2"""
118 """find subrepos in ctx1 or ctx2"""
119 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 # Create a (subpath, ctx) mapping where we prefer subpaths from
120 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 # ctx1. The subpaths from ctx2 are important when the .hgsub file
121 # has been modified (in ctx2) but not yet committed (in ctx1).
121 # has been modified (in ctx2) but not yet committed (in ctx1).
122 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 subpaths = dict.fromkeys(ctx2.substate, ctx2)
123 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
124
124
125 missing = set()
125 missing = set()
126
126
127 for subpath in ctx2.substate:
127 for subpath in ctx2.substate:
128 if subpath not in ctx1.substate:
128 if subpath not in ctx1.substate:
129 del subpaths[subpath]
129 del subpaths[subpath]
130 missing.add(subpath)
130 missing.add(subpath)
131
131
132 for subpath, ctx in sorted(subpaths.iteritems()):
132 for subpath, ctx in sorted(subpaths.iteritems()):
133 yield subpath, ctx.sub(subpath)
133 yield subpath, ctx.sub(subpath)
134
134
135 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
136 # status and diff will have an accurate result when it does
136 # status and diff will have an accurate result when it does
137 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
138 # against itself.
138 # against itself.
139 for subpath in missing:
139 for subpath in missing:
140 yield subpath, ctx2.nullsub(subpath, ctx1)
140 yield subpath, ctx2.nullsub(subpath, ctx1)
141
141
142 def nochangesfound(ui, repo, excluded=None):
142 def nochangesfound(ui, repo, excluded=None):
143 '''Report no changes for push/pull, excluded is None or a list of
143 '''Report no changes for push/pull, excluded is None or a list of
144 nodes excluded from the push/pull.
144 nodes excluded from the push/pull.
145 '''
145 '''
146 secretlist = []
146 secretlist = []
147 if excluded:
147 if excluded:
148 for n in excluded:
148 for n in excluded:
149 ctx = repo[n]
149 ctx = repo[n]
150 if ctx.phase() >= phases.secret and not ctx.extinct():
150 if ctx.phase() >= phases.secret and not ctx.extinct():
151 secretlist.append(n)
151 secretlist.append(n)
152
152
153 if secretlist:
153 if secretlist:
154 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 ui.status(_("no changes found (ignored %d secret changesets)\n")
155 % len(secretlist))
155 % len(secretlist))
156 else:
156 else:
157 ui.status(_("no changes found\n"))
157 ui.status(_("no changes found\n"))
158
158
159 def callcatch(ui, func):
159 def callcatch(ui, func):
160 """call func() with global exception handling
160 """call func() with global exception handling
161
161
162 return func() if no exception happens. otherwise do some error handling
162 return func() if no exception happens. otherwise do some error handling
163 and return an exit code accordingly. does not handle all exceptions.
163 and return an exit code accordingly. does not handle all exceptions.
164 """
164 """
165 try:
165 try:
166 try:
166 try:
167 return func()
167 return func()
168 except: # re-raises
168 except: # re-raises
169 ui.traceback()
169 ui.traceback()
170 raise
170 raise
171 # Global exception handling, alphabetically
171 # Global exception handling, alphabetically
172 # Mercurial-specific first, followed by built-in and library exceptions
172 # Mercurial-specific first, followed by built-in and library exceptions
173 except error.LockHeld as inst:
173 except error.LockHeld as inst:
174 if inst.errno == errno.ETIMEDOUT:
174 if inst.errno == errno.ETIMEDOUT:
175 reason = _('timed out waiting for lock held by %r') % (
175 reason = _('timed out waiting for lock held by %r') % (
176 pycompat.bytestr(inst.locker))
176 pycompat.bytestr(inst.locker))
177 else:
177 else:
178 reason = _('lock held by %r') % inst.locker
178 reason = _('lock held by %r') % inst.locker
179 ui.error(_("abort: %s: %s\n") % (
179 ui.error(_("abort: %s: %s\n") % (
180 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 inst.desc or stringutil.forcebytestr(inst.filename), reason))
181 if not inst.locker:
181 if not inst.locker:
182 ui.error(_("(lock might be very busy)\n"))
182 ui.error(_("(lock might be very busy)\n"))
183 except error.LockUnavailable as inst:
183 except error.LockUnavailable as inst:
184 ui.error(_("abort: could not lock %s: %s\n") %
184 ui.error(_("abort: could not lock %s: %s\n") %
185 (inst.desc or stringutil.forcebytestr(inst.filename),
185 (inst.desc or stringutil.forcebytestr(inst.filename),
186 encoding.strtolocal(inst.strerror)))
186 encoding.strtolocal(inst.strerror)))
187 except error.OutOfBandError as inst:
187 except error.OutOfBandError as inst:
188 if inst.args:
188 if inst.args:
189 msg = _("abort: remote error:\n")
189 msg = _("abort: remote error:\n")
190 else:
190 else:
191 msg = _("abort: remote error\n")
191 msg = _("abort: remote error\n")
192 ui.error(msg)
192 ui.error(msg)
193 if inst.args:
193 if inst.args:
194 ui.error(''.join(inst.args))
194 ui.error(''.join(inst.args))
195 if inst.hint:
195 if inst.hint:
196 ui.error('(%s)\n' % inst.hint)
196 ui.error('(%s)\n' % inst.hint)
197 except error.RepoError as inst:
197 except error.RepoError as inst:
198 ui.error(_("abort: %s!\n") % inst)
198 ui.error(_("abort: %s!\n") % inst)
199 if inst.hint:
199 if inst.hint:
200 ui.error(_("(%s)\n") % inst.hint)
200 ui.error(_("(%s)\n") % inst.hint)
201 except error.ResponseError as inst:
201 except error.ResponseError as inst:
202 ui.error(_("abort: %s") % inst.args[0])
202 ui.error(_("abort: %s") % inst.args[0])
203 msg = inst.args[1]
203 msg = inst.args[1]
204 if isinstance(msg, type(u'')):
204 if isinstance(msg, type(u'')):
205 msg = pycompat.sysbytes(msg)
205 msg = pycompat.sysbytes(msg)
206 if not isinstance(msg, bytes):
206 if not isinstance(msg, bytes):
207 ui.error(" %r\n" % (msg,))
207 ui.error(" %r\n" % (msg,))
208 elif not msg:
208 elif not msg:
209 ui.error(_(" empty string\n"))
209 ui.error(_(" empty string\n"))
210 else:
210 else:
211 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
212 except error.CensoredNodeError as inst:
212 except error.CensoredNodeError as inst:
213 ui.error(_("abort: file censored %s!\n") % inst)
213 ui.error(_("abort: file censored %s!\n") % inst)
214 except error.StorageError as inst:
214 except error.StorageError as inst:
215 ui.error(_("abort: %s!\n") % inst)
215 ui.error(_("abort: %s!\n") % inst)
216 if inst.hint:
216 if inst.hint:
217 ui.error(_("(%s)\n") % inst.hint)
217 ui.error(_("(%s)\n") % inst.hint)
218 except error.InterventionRequired as inst:
218 except error.InterventionRequired as inst:
219 ui.error("%s\n" % inst)
219 ui.error("%s\n" % inst)
220 if inst.hint:
220 if inst.hint:
221 ui.error(_("(%s)\n") % inst.hint)
221 ui.error(_("(%s)\n") % inst.hint)
222 return 1
222 return 1
223 except error.WdirUnsupported:
223 except error.WdirUnsupported:
224 ui.error(_("abort: working directory revision cannot be specified\n"))
224 ui.error(_("abort: working directory revision cannot be specified\n"))
225 except error.Abort as inst:
225 except error.Abort as inst:
226 ui.error(_("abort: %s\n") % inst)
226 ui.error(_("abort: %s\n") % inst)
227 if inst.hint:
227 if inst.hint:
228 ui.error(_("(%s)\n") % inst.hint)
228 ui.error(_("(%s)\n") % inst.hint)
229 except ImportError as inst:
229 except ImportError as inst:
230 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
230 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
231 m = stringutil.forcebytestr(inst).split()[-1]
231 m = stringutil.forcebytestr(inst).split()[-1]
232 if m in "mpatch bdiff".split():
232 if m in "mpatch bdiff".split():
233 ui.error(_("(did you forget to compile extensions?)\n"))
233 ui.error(_("(did you forget to compile extensions?)\n"))
234 elif m in "zlib".split():
234 elif m in "zlib".split():
235 ui.error(_("(is your Python install correct?)\n"))
235 ui.error(_("(is your Python install correct?)\n"))
236 except (IOError, OSError) as inst:
236 except (IOError, OSError) as inst:
237 if util.safehasattr(inst, "code"): # HTTPError
237 if util.safehasattr(inst, "code"): # HTTPError
238 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
238 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
239 elif util.safehasattr(inst, "reason"): # URLError or SSLError
239 elif util.safehasattr(inst, "reason"): # URLError or SSLError
240 try: # usually it is in the form (errno, strerror)
240 try: # usually it is in the form (errno, strerror)
241 reason = inst.reason.args[1]
241 reason = inst.reason.args[1]
242 except (AttributeError, IndexError):
242 except (AttributeError, IndexError):
243 # it might be anything, for example a string
243 # it might be anything, for example a string
244 reason = inst.reason
244 reason = inst.reason
245 if isinstance(reason, pycompat.unicode):
245 if isinstance(reason, pycompat.unicode):
246 # SSLError of Python 2.7.9 contains a unicode
246 # SSLError of Python 2.7.9 contains a unicode
247 reason = encoding.unitolocal(reason)
247 reason = encoding.unitolocal(reason)
248 ui.error(_("abort: error: %s\n") % reason)
248 ui.error(_("abort: error: %s\n") % reason)
249 elif (util.safehasattr(inst, "args")
249 elif (util.safehasattr(inst, "args")
250 and inst.args and inst.args[0] == errno.EPIPE):
250 and inst.args and inst.args[0] == errno.EPIPE):
251 pass
251 pass
252 elif getattr(inst, "strerror", None): # common IOError or OSError
252 elif getattr(inst, "strerror", None): # common IOError or OSError
253 if getattr(inst, "filename", None) is not None:
253 if getattr(inst, "filename", None) is not None:
254 ui.error(_("abort: %s: '%s'\n") % (
254 ui.error(_("abort: %s: '%s'\n") % (
255 encoding.strtolocal(inst.strerror),
255 encoding.strtolocal(inst.strerror),
256 stringutil.forcebytestr(inst.filename)))
256 stringutil.forcebytestr(inst.filename)))
257 else:
257 else:
258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 else: # suspicious IOError
259 else: # suspicious IOError
260 raise
260 raise
261 except MemoryError:
261 except MemoryError:
262 ui.error(_("abort: out of memory\n"))
262 ui.error(_("abort: out of memory\n"))
263 except SystemExit as inst:
263 except SystemExit as inst:
264 # Commands shouldn't sys.exit directly, but give a return code.
264 # Commands shouldn't sys.exit directly, but give a return code.
265 # Just in case catch this and and pass exit code to caller.
265 # Just in case catch this and and pass exit code to caller.
266 return inst.code
266 return inst.code
267
267
268 return -1
268 return -1
269
269
270 def checknewlabel(repo, lbl, kind):
270 def checknewlabel(repo, lbl, kind):
271 # Do not use the "kind" parameter in ui output.
271 # Do not use the "kind" parameter in ui output.
272 # It makes strings difficult to translate.
272 # It makes strings difficult to translate.
273 if lbl in ['tip', '.', 'null']:
273 if lbl in ['tip', '.', 'null']:
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 for c in (':', '\0', '\n', '\r'):
275 for c in (':', '\0', '\n', '\r'):
276 if c in lbl:
276 if c in lbl:
277 raise error.Abort(
277 raise error.Abort(
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 try:
279 try:
280 int(lbl)
280 int(lbl)
281 raise error.Abort(_("cannot use an integer as a name"))
281 raise error.Abort(_("cannot use an integer as a name"))
282 except ValueError:
282 except ValueError:
283 pass
283 pass
284 if lbl.strip() != lbl:
284 if lbl.strip() != lbl:
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286
286
287 def checkfilename(f):
287 def checkfilename(f):
288 '''Check that the filename f is an acceptable filename for a tracked file'''
288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 if '\r' in f or '\n' in f:
289 if '\r' in f or '\n' in f:
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 % pycompat.bytestr(f))
291 % pycompat.bytestr(f))
292
292
293 def checkportable(ui, f):
293 def checkportable(ui, f):
294 '''Check if filename f is portable and warn or abort depending on config'''
294 '''Check if filename f is portable and warn or abort depending on config'''
295 checkfilename(f)
295 checkfilename(f)
296 abort, warn = checkportabilityalert(ui)
296 abort, warn = checkportabilityalert(ui)
297 if abort or warn:
297 if abort or warn:
298 msg = util.checkwinfilename(f)
298 msg = util.checkwinfilename(f)
299 if msg:
299 if msg:
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 if abort:
301 if abort:
302 raise error.Abort(msg)
302 raise error.Abort(msg)
303 ui.warn(_("warning: %s\n") % msg)
303 ui.warn(_("warning: %s\n") % msg)
304
304
305 def checkportabilityalert(ui):
305 def checkportabilityalert(ui):
306 '''check if the user's config requests nothing, a warning, or abort for
306 '''check if the user's config requests nothing, a warning, or abort for
307 non-portable filenames'''
307 non-portable filenames'''
308 val = ui.config('ui', 'portablefilenames')
308 val = ui.config('ui', 'portablefilenames')
309 lval = val.lower()
309 lval = val.lower()
310 bval = stringutil.parsebool(val)
310 bval = stringutil.parsebool(val)
311 abort = pycompat.iswindows or lval == 'abort'
311 abort = pycompat.iswindows or lval == 'abort'
312 warn = bval or lval == 'warn'
312 warn = bval or lval == 'warn'
313 if bval is None and not (warn or abort or lval == 'ignore'):
313 if bval is None and not (warn or abort or lval == 'ignore'):
314 raise error.ConfigError(
314 raise error.ConfigError(
315 _("ui.portablefilenames value is invalid ('%s')") % val)
315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 return abort, warn
316 return abort, warn
317
317
318 class casecollisionauditor(object):
318 class casecollisionauditor(object):
319 def __init__(self, ui, abort, dirstate):
319 def __init__(self, ui, abort, dirstate):
320 self._ui = ui
320 self._ui = ui
321 self._abort = abort
321 self._abort = abort
322 allfiles = '\0'.join(dirstate._map)
322 allfiles = '\0'.join(dirstate._map)
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 self._dirstate = dirstate
324 self._dirstate = dirstate
325 # The purpose of _newfiles is so that we don't complain about
325 # The purpose of _newfiles is so that we don't complain about
326 # case collisions if someone were to call this object with the
326 # case collisions if someone were to call this object with the
327 # same filename twice.
327 # same filename twice.
328 self._newfiles = set()
328 self._newfiles = set()
329
329
330 def __call__(self, f):
330 def __call__(self, f):
331 if f in self._newfiles:
331 if f in self._newfiles:
332 return
332 return
333 fl = encoding.lower(f)
333 fl = encoding.lower(f)
334 if fl in self._loweredfiles and f not in self._dirstate:
334 if fl in self._loweredfiles and f not in self._dirstate:
335 msg = _('possible case-folding collision for %s') % f
335 msg = _('possible case-folding collision for %s') % f
336 if self._abort:
336 if self._abort:
337 raise error.Abort(msg)
337 raise error.Abort(msg)
338 self._ui.warn(_("warning: %s\n") % msg)
338 self._ui.warn(_("warning: %s\n") % msg)
339 self._loweredfiles.add(fl)
339 self._loweredfiles.add(fl)
340 self._newfiles.add(f)
340 self._newfiles.add(f)
341
341
342 def filteredhash(repo, maxrev):
342 def filteredhash(repo, maxrev):
343 """build hash of filtered revisions in the current repoview.
343 """build hash of filtered revisions in the current repoview.
344
344
345 Multiple caches perform up-to-date validation by checking that the
345 Multiple caches perform up-to-date validation by checking that the
346 tiprev and tipnode stored in the cache file match the current repository.
346 tiprev and tipnode stored in the cache file match the current repository.
347 However, this is not sufficient for validating repoviews because the set
347 However, this is not sufficient for validating repoviews because the set
348 of revisions in the view may change without the repository tiprev and
348 of revisions in the view may change without the repository tiprev and
349 tipnode changing.
349 tipnode changing.
350
350
351 This function hashes all the revs filtered from the view and returns
351 This function hashes all the revs filtered from the view and returns
352 that SHA-1 digest.
352 that SHA-1 digest.
353 """
353 """
354 cl = repo.changelog
354 cl = repo.changelog
355 if not cl.filteredrevs:
355 if not cl.filteredrevs:
356 return None
356 return None
357 key = None
357 key = None
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 if revs:
359 if revs:
360 s = hashlib.sha1()
360 s = hashlib.sha1()
361 for rev in revs:
361 for rev in revs:
362 s.update('%d;' % rev)
362 s.update('%d;' % rev)
363 key = s.digest()
363 key = s.digest()
364 return key
364 return key
365
365
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 '''yield every hg repository under path, always recursively.
367 '''yield every hg repository under path, always recursively.
368 The recurse flag will only control recursion into repo working dirs'''
368 The recurse flag will only control recursion into repo working dirs'''
369 def errhandler(err):
369 def errhandler(err):
370 if err.filename == path:
370 if err.filename == path:
371 raise err
371 raise err
372 samestat = getattr(os.path, 'samestat', None)
372 samestat = getattr(os.path, 'samestat', None)
373 if followsym and samestat is not None:
373 if followsym and samestat is not None:
374 def adddir(dirlst, dirname):
374 def adddir(dirlst, dirname):
375 dirstat = os.stat(dirname)
375 dirstat = os.stat(dirname)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 if not match:
377 if not match:
378 dirlst.append(dirstat)
378 dirlst.append(dirstat)
379 return not match
379 return not match
380 else:
380 else:
381 followsym = False
381 followsym = False
382
382
383 if (seen_dirs is None) and followsym:
383 if (seen_dirs is None) and followsym:
384 seen_dirs = []
384 seen_dirs = []
385 adddir(seen_dirs, path)
385 adddir(seen_dirs, path)
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 dirs.sort()
387 dirs.sort()
388 if '.hg' in dirs:
388 if '.hg' in dirs:
389 yield root # found a repository
389 yield root # found a repository
390 qroot = os.path.join(root, '.hg', 'patches')
390 qroot = os.path.join(root, '.hg', 'patches')
391 if os.path.isdir(os.path.join(qroot, '.hg')):
391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 yield qroot # we have a patch queue repo here
392 yield qroot # we have a patch queue repo here
393 if recurse:
393 if recurse:
394 # avoid recursing inside the .hg directory
394 # avoid recursing inside the .hg directory
395 dirs.remove('.hg')
395 dirs.remove('.hg')
396 else:
396 else:
397 dirs[:] = [] # don't descend further
397 dirs[:] = [] # don't descend further
398 elif followsym:
398 elif followsym:
399 newdirs = []
399 newdirs = []
400 for d in dirs:
400 for d in dirs:
401 fname = os.path.join(root, d)
401 fname = os.path.join(root, d)
402 if adddir(seen_dirs, fname):
402 if adddir(seen_dirs, fname):
403 if os.path.islink(fname):
403 if os.path.islink(fname):
404 for hgname in walkrepos(fname, True, seen_dirs):
404 for hgname in walkrepos(fname, True, seen_dirs):
405 yield hgname
405 yield hgname
406 else:
406 else:
407 newdirs.append(d)
407 newdirs.append(d)
408 dirs[:] = newdirs
408 dirs[:] = newdirs
409
409
410 def binnode(ctx):
410 def binnode(ctx):
411 """Return binary node id for a given basectx"""
411 """Return binary node id for a given basectx"""
412 node = ctx.node()
412 node = ctx.node()
413 if node is None:
413 if node is None:
414 return wdirid
414 return wdirid
415 return node
415 return node
416
416
417 def intrev(ctx):
417 def intrev(ctx):
418 """Return integer for a given basectx that can be used in comparison or
418 """Return integer for a given basectx that can be used in comparison or
419 arithmetic operation"""
419 arithmetic operation"""
420 rev = ctx.rev()
420 rev = ctx.rev()
421 if rev is None:
421 if rev is None:
422 return wdirrev
422 return wdirrev
423 return rev
423 return rev
424
424
425 def formatchangeid(ctx):
425 def formatchangeid(ctx):
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 template provided by logcmdutil.changesettemplater"""
427 template provided by logcmdutil.changesettemplater"""
428 repo = ctx.repo()
428 repo = ctx.repo()
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430
430
431 def formatrevnode(ui, rev, node):
431 def formatrevnode(ui, rev, node):
432 """Format given revision and node depending on the current verbosity"""
432 """Format given revision and node depending on the current verbosity"""
433 if ui.debugflag:
433 if ui.debugflag:
434 hexfunc = hex
434 hexfunc = hex
435 else:
435 else:
436 hexfunc = short
436 hexfunc = short
437 return '%d:%s' % (rev, hexfunc(node))
437 return '%d:%s' % (rev, hexfunc(node))
438
438
439 def resolvehexnodeidprefix(repo, prefix):
439 def resolvehexnodeidprefix(repo, prefix):
440 if (prefix.startswith('x') and
440 if (prefix.startswith('x') and
441 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
441 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
442 prefix = prefix[1:]
442 prefix = prefix[1:]
443 try:
443 try:
444 # Uses unfiltered repo because it's faster when prefix is ambiguous/
444 # Uses unfiltered repo because it's faster when prefix is ambiguous/
445 # This matches the shortesthexnodeidprefix() function below.
445 # This matches the shortesthexnodeidprefix() function below.
446 node = repo.unfiltered().changelog._partialmatch(prefix)
446 node = repo.unfiltered().changelog._partialmatch(prefix)
447 except error.AmbiguousPrefixLookupError:
447 except error.AmbiguousPrefixLookupError:
448 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
448 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
449 if revset:
449 if revset:
450 # Clear config to avoid infinite recursion
450 # Clear config to avoid infinite recursion
451 configoverrides = {('experimental',
451 configoverrides = {('experimental',
452 'revisions.disambiguatewithin'): None}
452 'revisions.disambiguatewithin'): None}
453 with repo.ui.configoverride(configoverrides):
453 with repo.ui.configoverride(configoverrides):
454 revs = repo.anyrevs([revset], user=True)
454 revs = repo.anyrevs([revset], user=True)
455 matches = []
455 matches = []
456 for rev in revs:
456 for rev in revs:
457 node = repo.changelog.node(rev)
457 node = repo.changelog.node(rev)
458 if hex(node).startswith(prefix):
458 if hex(node).startswith(prefix):
459 matches.append(node)
459 matches.append(node)
460 if len(matches) == 1:
460 if len(matches) == 1:
461 return matches[0]
461 return matches[0]
462 raise
462 raise
463 if node is None:
463 if node is None:
464 return
464 return
465 repo.changelog.rev(node) # make sure node isn't filtered
465 repo.changelog.rev(node) # make sure node isn't filtered
466 return node
466 return node
467
467
468 def mayberevnum(repo, prefix):
468 def mayberevnum(repo, prefix):
469 """Checks if the given prefix may be mistaken for a revision number"""
469 """Checks if the given prefix may be mistaken for a revision number"""
470 try:
470 try:
471 i = int(prefix)
471 i = int(prefix)
472 # if we are a pure int, then starting with zero will not be
472 # if we are a pure int, then starting with zero will not be
473 # confused as a rev; or, obviously, if the int is larger
473 # confused as a rev; or, obviously, if the int is larger
474 # than the value of the tip rev. We still need to disambiguate if
474 # than the value of the tip rev. We still need to disambiguate if
475 # prefix == '0', since that *is* a valid revnum.
475 # prefix == '0', since that *is* a valid revnum.
476 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
476 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
477 return False
477 return False
478 return True
478 return True
479 except ValueError:
479 except ValueError:
480 return False
480 return False
481
481
482 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
482 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
483 """Find the shortest unambiguous prefix that matches hexnode.
483 """Find the shortest unambiguous prefix that matches hexnode.
484
484
485 If "cache" is not None, it must be a dictionary that can be used for
485 If "cache" is not None, it must be a dictionary that can be used for
486 caching between calls to this method.
486 caching between calls to this method.
487 """
487 """
488 # _partialmatch() of filtered changelog could take O(len(repo)) time,
488 # _partialmatch() of filtered changelog could take O(len(repo)) time,
489 # which would be unacceptably slow. so we look for hash collision in
489 # which would be unacceptably slow. so we look for hash collision in
490 # unfiltered space, which means some hashes may be slightly longer.
490 # unfiltered space, which means some hashes may be slightly longer.
491
491
492 minlength=max(minlength, 1)
492 minlength=max(minlength, 1)
493
493
494 def disambiguate(prefix):
494 def disambiguate(prefix):
495 """Disambiguate against revnums."""
495 """Disambiguate against revnums."""
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 if mayberevnum(repo, prefix):
497 if mayberevnum(repo, prefix):
498 return 'x' + prefix
498 return 'x' + prefix
499 else:
499 else:
500 return prefix
500 return prefix
501
501
502 hexnode = hex(node)
502 hexnode = hex(node)
503 for length in range(len(prefix), len(hexnode) + 1):
503 for length in range(len(prefix), len(hexnode) + 1):
504 prefix = hexnode[:length]
504 prefix = hexnode[:length]
505 if not mayberevnum(repo, prefix):
505 if not mayberevnum(repo, prefix):
506 return prefix
506 return prefix
507
507
508 cl = repo.unfiltered().changelog
508 cl = repo.unfiltered().changelog
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 if revset:
510 if revset:
511 revs = None
511 revs = None
512 if cache is not None:
512 if cache is not None:
513 revs = cache.get('disambiguationrevset')
513 revs = cache.get('disambiguationrevset')
514 if revs is None:
514 if revs is None:
515 revs = repo.anyrevs([revset], user=True)
515 revs = repo.anyrevs([revset], user=True)
516 if cache is not None:
516 if cache is not None:
517 cache['disambiguationrevset'] = revs
517 cache['disambiguationrevset'] = revs
518 if cl.rev(node) in revs:
518 if cl.rev(node) in revs:
519 hexnode = hex(node)
519 hexnode = hex(node)
520 nodetree = None
520 nodetree = None
521 if cache is not None:
521 if cache is not None:
522 nodetree = cache.get('disambiguationnodetree')
522 nodetree = cache.get('disambiguationnodetree')
523 if not nodetree:
523 if not nodetree:
524 try:
524 try:
525 nodetree = parsers.nodetree(cl.index, len(revs))
525 nodetree = parsers.nodetree(cl.index, len(revs))
526 except AttributeError:
526 except AttributeError:
527 # no native nodetree
527 # no native nodetree
528 pass
528 pass
529 else:
529 else:
530 for r in revs:
530 for r in revs:
531 nodetree.insert(r)
531 nodetree.insert(r)
532 if cache is not None:
532 if cache is not None:
533 cache['disambiguationnodetree'] = nodetree
533 cache['disambiguationnodetree'] = nodetree
534 if nodetree is not None:
534 if nodetree is not None:
535 length = max(nodetree.shortest(node), minlength)
535 length = max(nodetree.shortest(node), minlength)
536 prefix = hexnode[:length]
536 prefix = hexnode[:length]
537 return disambiguate(prefix)
537 return disambiguate(prefix)
538 for length in range(minlength, len(hexnode) + 1):
538 for length in range(minlength, len(hexnode) + 1):
539 matches = []
539 matches = []
540 prefix = hexnode[:length]
540 prefix = hexnode[:length]
541 for rev in revs:
541 for rev in revs:
542 otherhexnode = repo[rev].hex()
542 otherhexnode = repo[rev].hex()
543 if prefix == otherhexnode[:length]:
543 if prefix == otherhexnode[:length]:
544 matches.append(otherhexnode)
544 matches.append(otherhexnode)
545 if len(matches) == 1:
545 if len(matches) == 1:
546 return disambiguate(prefix)
546 return disambiguate(prefix)
547
547
548 try:
548 try:
549 return disambiguate(cl.shortest(node, minlength))
549 return disambiguate(cl.shortest(node, minlength))
550 except error.LookupError:
550 except error.LookupError:
551 raise error.RepoLookupError()
551 raise error.RepoLookupError()
552
552
553 def isrevsymbol(repo, symbol):
553 def isrevsymbol(repo, symbol):
554 """Checks if a symbol exists in the repo.
554 """Checks if a symbol exists in the repo.
555
555
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 symbol is an ambiguous nodeid prefix.
557 symbol is an ambiguous nodeid prefix.
558 """
558 """
559 try:
559 try:
560 revsymbol(repo, symbol)
560 revsymbol(repo, symbol)
561 return True
561 return True
562 except error.RepoLookupError:
562 except error.RepoLookupError:
563 return False
563 return False
564
564
565 def revsymbol(repo, symbol):
565 def revsymbol(repo, symbol):
566 """Returns a context given a single revision symbol (as string).
566 """Returns a context given a single revision symbol (as string).
567
567
568 This is similar to revsingle(), but accepts only a single revision symbol,
568 This is similar to revsingle(), but accepts only a single revision symbol,
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 not "max(public())".
570 not "max(public())".
571 """
571 """
572 if not isinstance(symbol, bytes):
572 if not isinstance(symbol, bytes):
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 "repo[symbol]?" % (symbol, type(symbol)))
574 "repo[symbol]?" % (symbol, type(symbol)))
575 raise error.ProgrammingError(msg)
575 raise error.ProgrammingError(msg)
576 try:
576 try:
577 if symbol in ('.', 'tip', 'null'):
577 if symbol in ('.', 'tip', 'null'):
578 return repo[symbol]
578 return repo[symbol]
579
579
580 try:
580 try:
581 r = int(symbol)
581 r = int(symbol)
582 if '%d' % r != symbol:
582 if '%d' % r != symbol:
583 raise ValueError
583 raise ValueError
584 l = len(repo.changelog)
584 l = len(repo.changelog)
585 if r < 0:
585 if r < 0:
586 r += l
586 r += l
587 if r < 0 or r >= l and r != wdirrev:
587 if r < 0 or r >= l and r != wdirrev:
588 raise ValueError
588 raise ValueError
589 return repo[r]
589 return repo[r]
590 except error.FilteredIndexError:
590 except error.FilteredIndexError:
591 raise
591 raise
592 except (ValueError, OverflowError, IndexError):
592 except (ValueError, OverflowError, IndexError):
593 pass
593 pass
594
594
595 if len(symbol) == 40:
595 if len(symbol) == 40:
596 try:
596 try:
597 node = bin(symbol)
597 node = bin(symbol)
598 rev = repo.changelog.rev(node)
598 rev = repo.changelog.rev(node)
599 return repo[rev]
599 return repo[rev]
600 except error.FilteredLookupError:
600 except error.FilteredLookupError:
601 raise
601 raise
602 except (TypeError, LookupError):
602 except (TypeError, LookupError):
603 pass
603 pass
604
604
605 # look up bookmarks through the name interface
605 # look up bookmarks through the name interface
606 try:
606 try:
607 node = repo.names.singlenode(repo, symbol)
607 node = repo.names.singlenode(repo, symbol)
608 rev = repo.changelog.rev(node)
608 rev = repo.changelog.rev(node)
609 return repo[rev]
609 return repo[rev]
610 except KeyError:
610 except KeyError:
611 pass
611 pass
612
612
613 node = resolvehexnodeidprefix(repo, symbol)
613 node = resolvehexnodeidprefix(repo, symbol)
614 if node is not None:
614 if node is not None:
615 rev = repo.changelog.rev(node)
615 rev = repo.changelog.rev(node)
616 return repo[rev]
616 return repo[rev]
617
617
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619
619
620 except error.WdirUnsupported:
620 except error.WdirUnsupported:
621 return repo[None]
621 return repo[None]
622 except (error.FilteredIndexError, error.FilteredLookupError,
622 except (error.FilteredIndexError, error.FilteredLookupError,
623 error.FilteredRepoLookupError):
623 error.FilteredRepoLookupError):
624 raise _filterederror(repo, symbol)
624 raise _filterederror(repo, symbol)
625
625
626 def _filterederror(repo, changeid):
626 def _filterederror(repo, changeid):
627 """build an exception to be raised about a filtered changeid
627 """build an exception to be raised about a filtered changeid
628
628
629 This is extracted in a function to help extensions (eg: evolve) to
629 This is extracted in a function to help extensions (eg: evolve) to
630 experiment with various message variants."""
630 experiment with various message variants."""
631 if repo.filtername.startswith('visible'):
631 if repo.filtername.startswith('visible'):
632
632
633 # Check if the changeset is obsolete
633 # Check if the changeset is obsolete
634 unfilteredrepo = repo.unfiltered()
634 unfilteredrepo = repo.unfiltered()
635 ctx = revsymbol(unfilteredrepo, changeid)
635 ctx = revsymbol(unfilteredrepo, changeid)
636
636
637 # If the changeset is obsolete, enrich the message with the reason
637 # If the changeset is obsolete, enrich the message with the reason
638 # that made this changeset not visible
638 # that made this changeset not visible
639 if ctx.obsolete():
639 if ctx.obsolete():
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 else:
641 else:
642 msg = _("hidden revision '%s'") % changeid
642 msg = _("hidden revision '%s'") % changeid
643
643
644 hint = _('use --hidden to access hidden revisions')
644 hint = _('use --hidden to access hidden revisions')
645
645
646 return error.FilteredRepoLookupError(msg, hint=hint)
646 return error.FilteredRepoLookupError(msg, hint=hint)
647 msg = _("filtered revision '%s' (not in '%s' subset)")
647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 msg %= (changeid, repo.filtername)
648 msg %= (changeid, repo.filtername)
649 return error.FilteredRepoLookupError(msg)
649 return error.FilteredRepoLookupError(msg)
650
650
651 def revsingle(repo, revspec, default='.', localalias=None):
651 def revsingle(repo, revspec, default='.', localalias=None):
652 if not revspec and revspec != 0:
652 if not revspec and revspec != 0:
653 return repo[default]
653 return repo[default]
654
654
655 l = revrange(repo, [revspec], localalias=localalias)
655 l = revrange(repo, [revspec], localalias=localalias)
656 if not l:
656 if not l:
657 raise error.Abort(_('empty revision set'))
657 raise error.Abort(_('empty revision set'))
658 return repo[l.last()]
658 return repo[l.last()]
659
659
660 def _pairspec(revspec):
660 def _pairspec(revspec):
661 tree = revsetlang.parse(revspec)
661 tree = revsetlang.parse(revspec)
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663
663
664 def revpair(repo, revs):
664 def revpair(repo, revs):
665 if not revs:
665 if not revs:
666 return repo['.'], repo[None]
666 return repo['.'], repo[None]
667
667
668 l = revrange(repo, revs)
668 l = revrange(repo, revs)
669
669
670 if not l:
670 if not l:
671 raise error.Abort(_('empty revision range'))
671 raise error.Abort(_('empty revision range'))
672
672
673 first = l.first()
673 first = l.first()
674 second = l.last()
674 second = l.last()
675
675
676 if (first == second and len(revs) >= 2
676 if (first == second and len(revs) >= 2
677 and not all(revrange(repo, [r]) for r in revs)):
677 and not all(revrange(repo, [r]) for r in revs)):
678 raise error.Abort(_('empty revision on one side of range'))
678 raise error.Abort(_('empty revision on one side of range'))
679
679
680 # if top-level is range expression, the result must always be a pair
680 # if top-level is range expression, the result must always be a pair
681 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
681 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
682 return repo[first], repo[None]
682 return repo[first], repo[None]
683
683
684 return repo[first], repo[second]
684 return repo[first], repo[second]
685
685
686 def revrange(repo, specs, localalias=None):
686 def revrange(repo, specs, localalias=None):
687 """Execute 1 to many revsets and return the union.
687 """Execute 1 to many revsets and return the union.
688
688
689 This is the preferred mechanism for executing revsets using user-specified
689 This is the preferred mechanism for executing revsets using user-specified
690 config options, such as revset aliases.
690 config options, such as revset aliases.
691
691
692 The revsets specified by ``specs`` will be executed via a chained ``OR``
692 The revsets specified by ``specs`` will be executed via a chained ``OR``
693 expression. If ``specs`` is empty, an empty result is returned.
693 expression. If ``specs`` is empty, an empty result is returned.
694
694
695 ``specs`` can contain integers, in which case they are assumed to be
695 ``specs`` can contain integers, in which case they are assumed to be
696 revision numbers.
696 revision numbers.
697
697
698 It is assumed the revsets are already formatted. If you have arguments
698 It is assumed the revsets are already formatted. If you have arguments
699 that need to be expanded in the revset, call ``revsetlang.formatspec()``
699 that need to be expanded in the revset, call ``revsetlang.formatspec()``
700 and pass the result as an element of ``specs``.
700 and pass the result as an element of ``specs``.
701
701
702 Specifying a single revset is allowed.
702 Specifying a single revset is allowed.
703
703
704 Returns a ``revset.abstractsmartset`` which is a list-like interface over
704 Returns a ``revset.abstractsmartset`` which is a list-like interface over
705 integer revisions.
705 integer revisions.
706 """
706 """
707 allspecs = []
707 allspecs = []
708 for spec in specs:
708 for spec in specs:
709 if isinstance(spec, int):
709 if isinstance(spec, int):
710 spec = revsetlang.formatspec('%d', spec)
710 spec = revsetlang.formatspec('%d', spec)
711 allspecs.append(spec)
711 allspecs.append(spec)
712 return repo.anyrevs(allspecs, user=True, localalias=localalias)
712 return repo.anyrevs(allspecs, user=True, localalias=localalias)
713
713
714 def meaningfulparents(repo, ctx):
714 def meaningfulparents(repo, ctx):
715 """Return list of meaningful (or all if debug) parentrevs for rev.
715 """Return list of meaningful (or all if debug) parentrevs for rev.
716
716
717 For merges (two non-nullrev revisions) both parents are meaningful.
717 For merges (two non-nullrev revisions) both parents are meaningful.
718 Otherwise the first parent revision is considered meaningful if it
718 Otherwise the first parent revision is considered meaningful if it
719 is not the preceding revision.
719 is not the preceding revision.
720 """
720 """
721 parents = ctx.parents()
721 parents = ctx.parents()
722 if len(parents) > 1:
722 if len(parents) > 1:
723 return parents
723 return parents
724 if repo.ui.debugflag:
724 if repo.ui.debugflag:
725 return [parents[0], repo[nullrev]]
725 return [parents[0], repo[nullrev]]
726 if parents[0].rev() >= intrev(ctx) - 1:
726 if parents[0].rev() >= intrev(ctx) - 1:
727 return []
727 return []
728 return parents
728 return parents
729
729
730 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
730 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
731 """Return a function that produced paths for presenting to the user.
731 """Return a function that produced paths for presenting to the user.
732
732
733 The returned function takes a repo-relative path and produces a path
733 The returned function takes a repo-relative path and produces a path
734 that can be presented in the UI.
734 that can be presented in the UI.
735
735
736 Depending on the value of ui.relative-paths, either a repo-relative or
736 Depending on the value of ui.relative-paths, either a repo-relative or
737 cwd-relative path will be produced.
737 cwd-relative path will be produced.
738
738
739 legacyrelativevalue is the value to use if ui.relative-paths=legacy
739 legacyrelativevalue is the value to use if ui.relative-paths=legacy
740
740
741 If forcerelativevalue is not None, then that value will be used regardless
741 If forcerelativevalue is not None, then that value will be used regardless
742 of what ui.relative-paths is set to.
742 of what ui.relative-paths is set to.
743 """
743 """
744 if forcerelativevalue is not None:
744 if forcerelativevalue is not None:
745 relative = forcerelativevalue
745 relative = forcerelativevalue
746 else:
746 else:
747 config = repo.ui.config('ui', 'relative-paths')
747 config = repo.ui.config('ui', 'relative-paths')
748 if config == 'legacy':
748 if config == 'legacy':
749 relative = legacyrelativevalue
749 relative = legacyrelativevalue
750 else:
750 else:
751 relative = stringutil.parsebool(config)
751 relative = stringutil.parsebool(config)
752 if relative is None:
752 if relative is None:
753 raise error.ConfigError(
753 raise error.ConfigError(
754 _("ui.relative-paths is not a boolean ('%s')") % config)
754 _("ui.relative-paths is not a boolean ('%s')") % config)
755
755
756 if relative:
756 if relative:
757 cwd = repo.getcwd()
757 cwd = repo.getcwd()
758 pathto = repo.pathto
758 pathto = repo.pathto
759 return lambda f: pathto(f, cwd)
759 return lambda f: pathto(f, cwd)
760 elif repo.ui.configbool('ui', 'slash'):
760 elif repo.ui.configbool('ui', 'slash'):
761 return lambda f: f
761 return lambda f: f
762 else:
762 else:
763 return util.localpath
763 return util.localpath
764
764
765 def subdiruipathfn(subpath, uipathfn):
765 def subdiruipathfn(subpath, uipathfn):
766 '''Create a new uipathfn that treats the file as relative to subpath.'''
766 '''Create a new uipathfn that treats the file as relative to subpath.'''
767 return lambda f: uipathfn(posixpath.join(subpath, f))
767 return lambda f: uipathfn(posixpath.join(subpath, f))
768
768
769 def anypats(pats, opts):
769 def anypats(pats, opts):
770 '''Checks if any patterns, including --include and --exclude were given.
770 '''Checks if any patterns, including --include and --exclude were given.
771
771
772 Some commands (e.g. addremove) use this condition for deciding whether to
772 Some commands (e.g. addremove) use this condition for deciding whether to
773 print absolute or relative paths.
773 print absolute or relative paths.
774 '''
774 '''
775 return bool(pats or opts.get('include') or opts.get('exclude'))
775 return bool(pats or opts.get('include') or opts.get('exclude'))
776
776
777 def expandpats(pats):
777 def expandpats(pats):
778 '''Expand bare globs when running on windows.
778 '''Expand bare globs when running on windows.
779 On posix we assume it already has already been done by sh.'''
779 On posix we assume it already has already been done by sh.'''
780 if not util.expandglobs:
780 if not util.expandglobs:
781 return list(pats)
781 return list(pats)
782 ret = []
782 ret = []
783 for kindpat in pats:
783 for kindpat in pats:
784 kind, pat = matchmod._patsplit(kindpat, None)
784 kind, pat = matchmod._patsplit(kindpat, None)
785 if kind is None:
785 if kind is None:
786 try:
786 try:
787 globbed = glob.glob(pat)
787 globbed = glob.glob(pat)
788 except re.error:
788 except re.error:
789 globbed = [pat]
789 globbed = [pat]
790 if globbed:
790 if globbed:
791 ret.extend(globbed)
791 ret.extend(globbed)
792 continue
792 continue
793 ret.append(kindpat)
793 ret.append(kindpat)
794 return ret
794 return ret
795
795
796 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
796 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
797 badfn=None):
797 badfn=None):
798 '''Return a matcher and the patterns that were used.
798 '''Return a matcher and the patterns that were used.
799 The matcher will warn about bad matches, unless an alternate badfn callback
799 The matcher will warn about bad matches, unless an alternate badfn callback
800 is provided.'''
800 is provided.'''
801 if opts is None:
801 if opts is None:
802 opts = {}
802 opts = {}
803 if not globbed and default == 'relpath':
803 if not globbed and default == 'relpath':
804 pats = expandpats(pats or [])
804 pats = expandpats(pats or [])
805
805
806 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
806 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
807 def bad(f, msg):
807 def bad(f, msg):
808 ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg))
808 ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg))
809
809
810 if badfn is None:
810 if badfn is None:
811 badfn = bad
811 badfn = bad
812
812
813 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
813 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
814 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
814 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
815
815
816 if m.always():
816 if m.always():
817 pats = []
817 pats = []
818 return m, pats
818 return m, pats
819
819
820 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
820 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
821 badfn=None):
821 badfn=None):
822 '''Return a matcher that will warn about bad matches.'''
822 '''Return a matcher that will warn about bad matches.'''
823 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
823 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
824
824
825 def matchall(repo):
825 def matchall(repo):
826 '''Return a matcher that will efficiently match everything.'''
826 '''Return a matcher that will efficiently match everything.'''
827 return matchmod.always()
827 return matchmod.always()
828
828
829 def matchfiles(repo, files, badfn=None):
829 def matchfiles(repo, files, badfn=None):
830 '''Return a matcher that will efficiently match exactly these files.'''
830 '''Return a matcher that will efficiently match exactly these files.'''
831 return matchmod.exact(files, badfn=badfn)
831 return matchmod.exact(files, badfn=badfn)
832
832
833 def parsefollowlinespattern(repo, rev, pat, msg):
833 def parsefollowlinespattern(repo, rev, pat, msg):
834 """Return a file name from `pat` pattern suitable for usage in followlines
834 """Return a file name from `pat` pattern suitable for usage in followlines
835 logic.
835 logic.
836 """
836 """
837 if not matchmod.patkind(pat):
837 if not matchmod.patkind(pat):
838 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
838 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
839 else:
839 else:
840 ctx = repo[rev]
840 ctx = repo[rev]
841 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
841 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
842 files = [f for f in ctx if m(f)]
842 files = [f for f in ctx if m(f)]
843 if len(files) != 1:
843 if len(files) != 1:
844 raise error.ParseError(msg)
844 raise error.ParseError(msg)
845 return files[0]
845 return files[0]
846
846
847 def getorigvfs(ui, repo):
847 def getorigvfs(ui, repo):
848 """return a vfs suitable to save 'orig' file
848 """return a vfs suitable to save 'orig' file
849
849
850 return None if no special directory is configured"""
850 return None if no special directory is configured"""
851 origbackuppath = ui.config('ui', 'origbackuppath')
851 origbackuppath = ui.config('ui', 'origbackuppath')
852 if not origbackuppath:
852 if not origbackuppath:
853 return None
853 return None
854 return vfs.vfs(repo.wvfs.join(origbackuppath))
854 return vfs.vfs(repo.wvfs.join(origbackuppath))
855
855
856 def backuppath(ui, repo, filepath):
856 def backuppath(ui, repo, filepath):
857 '''customize where working copy backup files (.orig files) are created
857 '''customize where working copy backup files (.orig files) are created
858
858
859 Fetch user defined path from config file: [ui] origbackuppath = <path>
859 Fetch user defined path from config file: [ui] origbackuppath = <path>
860 Fall back to default (filepath with .orig suffix) if not specified
860 Fall back to default (filepath with .orig suffix) if not specified
861
861
862 filepath is repo-relative
862 filepath is repo-relative
863
863
864 Returns an absolute path
864 Returns an absolute path
865 '''
865 '''
866 origvfs = getorigvfs(ui, repo)
866 origvfs = getorigvfs(ui, repo)
867 if origvfs is None:
867 if origvfs is None:
868 return repo.wjoin(filepath + ".orig")
868 return repo.wjoin(filepath + ".orig")
869
869
870 origbackupdir = origvfs.dirname(filepath)
870 origbackupdir = origvfs.dirname(filepath)
871 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
871 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
872 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
872 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
873
873
874 # Remove any files that conflict with the backup file's path
874 # Remove any files that conflict with the backup file's path
875 for f in reversed(list(util.finddirs(filepath))):
875 for f in reversed(list(util.finddirs(filepath))):
876 if origvfs.isfileorlink(f):
876 if origvfs.isfileorlink(f):
877 ui.note(_('removing conflicting file: %s\n')
877 ui.note(_('removing conflicting file: %s\n')
878 % origvfs.join(f))
878 % origvfs.join(f))
879 origvfs.unlink(f)
879 origvfs.unlink(f)
880 break
880 break
881
881
882 origvfs.makedirs(origbackupdir)
882 origvfs.makedirs(origbackupdir)
883
883
884 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
884 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
885 ui.note(_('removing conflicting directory: %s\n')
885 ui.note(_('removing conflicting directory: %s\n')
886 % origvfs.join(filepath))
886 % origvfs.join(filepath))
887 origvfs.rmtree(filepath, forcibly=True)
887 origvfs.rmtree(filepath, forcibly=True)
888
888
889 return origvfs.join(filepath)
889 return origvfs.join(filepath)
890
890
891 class _containsnode(object):
891 class _containsnode(object):
892 """proxy __contains__(node) to container.__contains__ which accepts revs"""
892 """proxy __contains__(node) to container.__contains__ which accepts revs"""
893
893
894 def __init__(self, repo, revcontainer):
894 def __init__(self, repo, revcontainer):
895 self._torev = repo.changelog.rev
895 self._torev = repo.changelog.rev
896 self._revcontains = revcontainer.__contains__
896 self._revcontains = revcontainer.__contains__
897
897
898 def __contains__(self, node):
898 def __contains__(self, node):
899 return self._revcontains(self._torev(node))
899 return self._revcontains(self._torev(node))
900
900
901 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
901 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
902 fixphase=False, targetphase=None, backup=True):
902 fixphase=False, targetphase=None, backup=True):
903 """do common cleanups when old nodes are replaced by new nodes
903 """do common cleanups when old nodes are replaced by new nodes
904
904
905 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
905 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
906 (we might also want to move working directory parent in the future)
906 (we might also want to move working directory parent in the future)
907
907
908 By default, bookmark moves are calculated automatically from 'replacements',
908 By default, bookmark moves are calculated automatically from 'replacements',
909 but 'moves' can be used to override that. Also, 'moves' may include
909 but 'moves' can be used to override that. Also, 'moves' may include
910 additional bookmark moves that should not have associated obsmarkers.
910 additional bookmark moves that should not have associated obsmarkers.
911
911
912 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
912 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
913 have replacements. operation is a string, like "rebase".
913 have replacements. operation is a string, like "rebase".
914
914
915 metadata is dictionary containing metadata to be stored in obsmarker if
915 metadata is dictionary containing metadata to be stored in obsmarker if
916 obsolescence is enabled.
916 obsolescence is enabled.
917 """
917 """
918 assert fixphase or targetphase is None
918 assert fixphase or targetphase is None
919 if not replacements and not moves:
919 if not replacements and not moves:
920 return
920 return
921
921
922 # translate mapping's other forms
922 # translate mapping's other forms
923 if not util.safehasattr(replacements, 'items'):
923 if not util.safehasattr(replacements, 'items'):
924 replacements = {(n,): () for n in replacements}
924 replacements = {(n,): () for n in replacements}
925 else:
925 else:
926 # upgrading non tuple "source" to tuple ones for BC
926 # upgrading non tuple "source" to tuple ones for BC
927 repls = {}
927 repls = {}
928 for key, value in replacements.items():
928 for key, value in replacements.items():
929 if not isinstance(key, tuple):
929 if not isinstance(key, tuple):
930 key = (key,)
930 key = (key,)
931 repls[key] = value
931 repls[key] = value
932 replacements = repls
932 replacements = repls
933
933
934 # Unfiltered repo is needed since nodes in replacements might be hidden.
934 # Unfiltered repo is needed since nodes in replacements might be hidden.
935 unfi = repo.unfiltered()
935 unfi = repo.unfiltered()
936
936
937 # Calculate bookmark movements
937 # Calculate bookmark movements
938 if moves is None:
938 if moves is None:
939 moves = {}
939 moves = {}
940 for oldnodes, newnodes in replacements.items():
940 for oldnodes, newnodes in replacements.items():
941 for oldnode in oldnodes:
941 for oldnode in oldnodes:
942 if oldnode in moves:
942 if oldnode in moves:
943 continue
943 continue
944 if len(newnodes) > 1:
944 if len(newnodes) > 1:
945 # usually a split, take the one with biggest rev number
945 # usually a split, take the one with biggest rev number
946 newnode = next(unfi.set('max(%ln)', newnodes)).node()
946 newnode = next(unfi.set('max(%ln)', newnodes)).node()
947 elif len(newnodes) == 0:
947 elif len(newnodes) == 0:
948 # move bookmark backwards
948 # move bookmark backwards
949 allreplaced = []
949 allreplaced = []
950 for rep in replacements:
950 for rep in replacements:
951 allreplaced.extend(rep)
951 allreplaced.extend(rep)
952 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
952 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
953 allreplaced))
953 allreplaced))
954 if roots:
954 if roots:
955 newnode = roots[0].node()
955 newnode = roots[0].node()
956 else:
956 else:
957 newnode = nullid
957 newnode = nullid
958 else:
958 else:
959 newnode = newnodes[0]
959 newnode = newnodes[0]
960 moves[oldnode] = newnode
960 moves[oldnode] = newnode
961
961
962 allnewnodes = [n for ns in replacements.values() for n in ns]
962 allnewnodes = [n for ns in replacements.values() for n in ns]
963 toretract = {}
963 toretract = {}
964 toadvance = {}
964 toadvance = {}
965 if fixphase:
965 if fixphase:
966 precursors = {}
966 precursors = {}
967 for oldnodes, newnodes in replacements.items():
967 for oldnodes, newnodes in replacements.items():
968 for oldnode in oldnodes:
968 for oldnode in oldnodes:
969 for newnode in newnodes:
969 for newnode in newnodes:
970 precursors.setdefault(newnode, []).append(oldnode)
970 precursors.setdefault(newnode, []).append(oldnode)
971
971
972 allnewnodes.sort(key=lambda n: unfi[n].rev())
972 allnewnodes.sort(key=lambda n: unfi[n].rev())
973 newphases = {}
973 newphases = {}
974 def phase(ctx):
974 def phase(ctx):
975 return newphases.get(ctx.node(), ctx.phase())
975 return newphases.get(ctx.node(), ctx.phase())
976 for newnode in allnewnodes:
976 for newnode in allnewnodes:
977 ctx = unfi[newnode]
977 ctx = unfi[newnode]
978 parentphase = max(phase(p) for p in ctx.parents())
978 parentphase = max(phase(p) for p in ctx.parents())
979 if targetphase is None:
979 if targetphase is None:
980 oldphase = max(unfi[oldnode].phase()
980 oldphase = max(unfi[oldnode].phase()
981 for oldnode in precursors[newnode])
981 for oldnode in precursors[newnode])
982 newphase = max(oldphase, parentphase)
982 newphase = max(oldphase, parentphase)
983 else:
983 else:
984 newphase = max(targetphase, parentphase)
984 newphase = max(targetphase, parentphase)
985 newphases[newnode] = newphase
985 newphases[newnode] = newphase
986 if newphase > ctx.phase():
986 if newphase > ctx.phase():
987 toretract.setdefault(newphase, []).append(newnode)
987 toretract.setdefault(newphase, []).append(newnode)
988 elif newphase < ctx.phase():
988 elif newphase < ctx.phase():
989 toadvance.setdefault(newphase, []).append(newnode)
989 toadvance.setdefault(newphase, []).append(newnode)
990
990
991 with repo.transaction('cleanup') as tr:
991 with repo.transaction('cleanup') as tr:
992 # Move bookmarks
992 # Move bookmarks
993 bmarks = repo._bookmarks
993 bmarks = repo._bookmarks
994 bmarkchanges = []
994 bmarkchanges = []
995 for oldnode, newnode in moves.items():
995 for oldnode, newnode in moves.items():
996 oldbmarks = repo.nodebookmarks(oldnode)
996 oldbmarks = repo.nodebookmarks(oldnode)
997 if not oldbmarks:
997 if not oldbmarks:
998 continue
998 continue
999 from . import bookmarks # avoid import cycle
999 from . import bookmarks # avoid import cycle
1000 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
1000 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
1001 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1001 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1002 hex(oldnode), hex(newnode)))
1002 hex(oldnode), hex(newnode)))
1003 # Delete divergent bookmarks being parents of related newnodes
1003 # Delete divergent bookmarks being parents of related newnodes
1004 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
1004 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
1005 allnewnodes, newnode, oldnode)
1005 allnewnodes, newnode, oldnode)
1006 deletenodes = _containsnode(repo, deleterevs)
1006 deletenodes = _containsnode(repo, deleterevs)
1007 for name in oldbmarks:
1007 for name in oldbmarks:
1008 bmarkchanges.append((name, newnode))
1008 bmarkchanges.append((name, newnode))
1009 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1009 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1010 bmarkchanges.append((b, None))
1010 bmarkchanges.append((b, None))
1011
1011
1012 if bmarkchanges:
1012 if bmarkchanges:
1013 bmarks.applychanges(repo, tr, bmarkchanges)
1013 bmarks.applychanges(repo, tr, bmarkchanges)
1014
1014
1015 for phase, nodes in toretract.items():
1015 for phase, nodes in toretract.items():
1016 phases.retractboundary(repo, tr, phase, nodes)
1016 phases.retractboundary(repo, tr, phase, nodes)
1017 for phase, nodes in toadvance.items():
1017 for phase, nodes in toadvance.items():
1018 phases.advanceboundary(repo, tr, phase, nodes)
1018 phases.advanceboundary(repo, tr, phase, nodes)
1019
1019
1020 mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived')
1020 mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived')
1021 # Obsolete or strip nodes
1021 # Obsolete or strip nodes
1022 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1022 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1023 # If a node is already obsoleted, and we want to obsolete it
1023 # If a node is already obsoleted, and we want to obsolete it
1024 # without a successor, skip that obssolete request since it's
1024 # without a successor, skip that obssolete request since it's
1025 # unnecessary. That's the "if s or not isobs(n)" check below.
1025 # unnecessary. That's the "if s or not isobs(n)" check below.
1026 # Also sort the node in topology order, that might be useful for
1026 # Also sort the node in topology order, that might be useful for
1027 # some obsstore logic.
1027 # some obsstore logic.
1028 # NOTE: the sorting might belong to createmarkers.
1028 # NOTE: the sorting might belong to createmarkers.
1029 torev = unfi.changelog.rev
1029 torev = unfi.changelog.rev
1030 sortfunc = lambda ns: torev(ns[0][0])
1030 sortfunc = lambda ns: torev(ns[0][0])
1031 rels = []
1031 rels = []
1032 for ns, s in sorted(replacements.items(), key=sortfunc):
1032 for ns, s in sorted(replacements.items(), key=sortfunc):
1033 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1033 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1034 rels.append(rel)
1034 rels.append(rel)
1035 if rels:
1035 if rels:
1036 obsolete.createmarkers(repo, rels, operation=operation,
1036 obsolete.createmarkers(repo, rels, operation=operation,
1037 metadata=metadata)
1037 metadata=metadata)
1038 elif phases.supportinternal(repo) and mayusearchived:
1038 elif phases.supportinternal(repo) and mayusearchived:
1039 # this assume we do not have "unstable" nodes above the cleaned ones
1039 # this assume we do not have "unstable" nodes above the cleaned ones
1040 allreplaced = set()
1040 allreplaced = set()
1041 for ns in replacements.keys():
1041 for ns in replacements.keys():
1042 allreplaced.update(ns)
1042 allreplaced.update(ns)
1043 if backup:
1043 if backup:
1044 from . import repair # avoid import cycle
1044 from . import repair # avoid import cycle
1045 node = min(allreplaced, key=repo.changelog.rev)
1045 node = min(allreplaced, key=repo.changelog.rev)
1046 repair.backupbundle(repo, allreplaced, allreplaced, node,
1046 repair.backupbundle(repo, allreplaced, allreplaced, node,
1047 operation)
1047 operation)
1048 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1048 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1049 else:
1049 else:
1050 from . import repair # avoid import cycle
1050 from . import repair # avoid import cycle
1051 tostrip = list(n for ns in replacements for n in ns)
1051 tostrip = list(n for ns in replacements for n in ns)
1052 if tostrip:
1052 if tostrip:
1053 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1053 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1054 backup=backup)
1054 backup=backup)
1055
1055
1056 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1056 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1057 if opts is None:
1057 if opts is None:
1058 opts = {}
1058 opts = {}
1059 m = matcher
1059 m = matcher
1060 dry_run = opts.get('dry_run')
1060 dry_run = opts.get('dry_run')
1061 try:
1061 try:
1062 similarity = float(opts.get('similarity') or 0)
1062 similarity = float(opts.get('similarity') or 0)
1063 except ValueError:
1063 except ValueError:
1064 raise error.Abort(_('similarity must be a number'))
1064 raise error.Abort(_('similarity must be a number'))
1065 if similarity < 0 or similarity > 100:
1065 if similarity < 0 or similarity > 100:
1066 raise error.Abort(_('similarity must be between 0 and 100'))
1066 raise error.Abort(_('similarity must be between 0 and 100'))
1067 similarity /= 100.0
1067 similarity /= 100.0
1068
1068
1069 ret = 0
1069 ret = 0
1070
1070
1071 wctx = repo[None]
1071 wctx = repo[None]
1072 for subpath in sorted(wctx.substate):
1072 for subpath in sorted(wctx.substate):
1073 submatch = matchmod.subdirmatcher(subpath, m)
1073 submatch = matchmod.subdirmatcher(subpath, m)
1074 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1074 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1075 sub = wctx.sub(subpath)
1075 sub = wctx.sub(subpath)
1076 subprefix = repo.wvfs.reljoin(prefix, subpath)
1076 subprefix = repo.wvfs.reljoin(prefix, subpath)
1077 subuipathfn = subdiruipathfn(subpath, uipathfn)
1077 subuipathfn = subdiruipathfn(subpath, uipathfn)
1078 try:
1078 try:
1079 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1079 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1080 ret = 1
1080 ret = 1
1081 except error.LookupError:
1081 except error.LookupError:
1082 repo.ui.status(_("skipping missing subrepository: %s\n")
1082 repo.ui.status(_("skipping missing subrepository: %s\n")
1083 % uipathfn(subpath))
1083 % uipathfn(subpath))
1084
1084
1085 rejected = []
1085 rejected = []
1086 def badfn(f, msg):
1086 def badfn(f, msg):
1087 if f in m.files():
1087 if f in m.files():
1088 m.bad(f, msg)
1088 m.bad(f, msg)
1089 rejected.append(f)
1089 rejected.append(f)
1090
1090
1091 badmatch = matchmod.badmatch(m, badfn)
1091 badmatch = matchmod.badmatch(m, badfn)
1092 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1092 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1093 badmatch)
1093 badmatch)
1094
1094
1095 unknownset = set(unknown + forgotten)
1095 unknownset = set(unknown + forgotten)
1096 toprint = unknownset.copy()
1096 toprint = unknownset.copy()
1097 toprint.update(deleted)
1097 toprint.update(deleted)
1098 for abs in sorted(toprint):
1098 for abs in sorted(toprint):
1099 if repo.ui.verbose or not m.exact(abs):
1099 if repo.ui.verbose or not m.exact(abs):
1100 if abs in unknownset:
1100 if abs in unknownset:
1101 status = _('adding %s\n') % uipathfn(abs)
1101 status = _('adding %s\n') % uipathfn(abs)
1102 label = 'ui.addremove.added'
1102 label = 'ui.addremove.added'
1103 else:
1103 else:
1104 status = _('removing %s\n') % uipathfn(abs)
1104 status = _('removing %s\n') % uipathfn(abs)
1105 label = 'ui.addremove.removed'
1105 label = 'ui.addremove.removed'
1106 repo.ui.status(status, label=label)
1106 repo.ui.status(status, label=label)
1107
1107
1108 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1108 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1109 similarity, uipathfn)
1109 similarity, uipathfn)
1110
1110
1111 if not dry_run:
1111 if not dry_run:
1112 _markchanges(repo, unknown + forgotten, deleted, renames)
1112 _markchanges(repo, unknown + forgotten, deleted, renames)
1113
1113
1114 for f in rejected:
1114 for f in rejected:
1115 if f in m.files():
1115 if f in m.files():
1116 return 1
1116 return 1
1117 return ret
1117 return ret
1118
1118
1119 def marktouched(repo, files, similarity=0.0):
1119 def marktouched(repo, files, similarity=0.0):
1120 '''Assert that files have somehow been operated upon. files are relative to
1120 '''Assert that files have somehow been operated upon. files are relative to
1121 the repo root.'''
1121 the repo root.'''
1122 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1122 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1123 rejected = []
1123 rejected = []
1124
1124
1125 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1125 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1126
1126
1127 if repo.ui.verbose:
1127 if repo.ui.verbose:
1128 unknownset = set(unknown + forgotten)
1128 unknownset = set(unknown + forgotten)
1129 toprint = unknownset.copy()
1129 toprint = unknownset.copy()
1130 toprint.update(deleted)
1130 toprint.update(deleted)
1131 for abs in sorted(toprint):
1131 for abs in sorted(toprint):
1132 if abs in unknownset:
1132 if abs in unknownset:
1133 status = _('adding %s\n') % abs
1133 status = _('adding %s\n') % abs
1134 else:
1134 else:
1135 status = _('removing %s\n') % abs
1135 status = _('removing %s\n') % abs
1136 repo.ui.status(status)
1136 repo.ui.status(status)
1137
1137
1138 # TODO: We should probably have the caller pass in uipathfn and apply it to
1138 # TODO: We should probably have the caller pass in uipathfn and apply it to
1139 # the messages above too. legacyrelativevalue=True is consistent with how
1139 # the messages above too. legacyrelativevalue=True is consistent with how
1140 # it used to work.
1140 # it used to work.
1141 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1141 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1142 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1142 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1143 similarity, uipathfn)
1143 similarity, uipathfn)
1144
1144
1145 _markchanges(repo, unknown + forgotten, deleted, renames)
1145 _markchanges(repo, unknown + forgotten, deleted, renames)
1146
1146
1147 for f in rejected:
1147 for f in rejected:
1148 if f in m.files():
1148 if f in m.files():
1149 return 1
1149 return 1
1150 return 0
1150 return 0
1151
1151
1152 def _interestingfiles(repo, matcher):
1152 def _interestingfiles(repo, matcher):
1153 '''Walk dirstate with matcher, looking for files that addremove would care
1153 '''Walk dirstate with matcher, looking for files that addremove would care
1154 about.
1154 about.
1155
1155
1156 This is different from dirstate.status because it doesn't care about
1156 This is different from dirstate.status because it doesn't care about
1157 whether files are modified or clean.'''
1157 whether files are modified or clean.'''
1158 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1158 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1159 audit_path = pathutil.pathauditor(repo.root, cached=True)
1159 audit_path = pathutil.pathauditor(repo.root, cached=True)
1160
1160
1161 ctx = repo[None]
1161 ctx = repo[None]
1162 dirstate = repo.dirstate
1162 dirstate = repo.dirstate
1163 matcher = repo.narrowmatch(matcher, includeexact=True)
1163 matcher = repo.narrowmatch(matcher, includeexact=True)
1164 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1164 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1165 unknown=True, ignored=False, full=False)
1165 unknown=True, ignored=False, full=False)
1166 for abs, st in walkresults.iteritems():
1166 for abs, st in walkresults.iteritems():
1167 dstate = dirstate[abs]
1167 dstate = dirstate[abs]
1168 if dstate == '?' and audit_path.check(abs):
1168 if dstate == '?' and audit_path.check(abs):
1169 unknown.append(abs)
1169 unknown.append(abs)
1170 elif dstate != 'r' and not st:
1170 elif dstate != 'r' and not st:
1171 deleted.append(abs)
1171 deleted.append(abs)
1172 elif dstate == 'r' and st:
1172 elif dstate == 'r' and st:
1173 forgotten.append(abs)
1173 forgotten.append(abs)
1174 # for finding renames
1174 # for finding renames
1175 elif dstate == 'r' and not st:
1175 elif dstate == 'r' and not st:
1176 removed.append(abs)
1176 removed.append(abs)
1177 elif dstate == 'a':
1177 elif dstate == 'a':
1178 added.append(abs)
1178 added.append(abs)
1179
1179
1180 return added, unknown, deleted, removed, forgotten
1180 return added, unknown, deleted, removed, forgotten
1181
1181
1182 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1182 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1183 '''Find renames from removed files to added ones.'''
1183 '''Find renames from removed files to added ones.'''
1184 renames = {}
1184 renames = {}
1185 if similarity > 0:
1185 if similarity > 0:
1186 for old, new, score in similar.findrenames(repo, added, removed,
1186 for old, new, score in similar.findrenames(repo, added, removed,
1187 similarity):
1187 similarity):
1188 if (repo.ui.verbose or not matcher.exact(old)
1188 if (repo.ui.verbose or not matcher.exact(old)
1189 or not matcher.exact(new)):
1189 or not matcher.exact(new)):
1190 repo.ui.status(_('recording removal of %s as rename to %s '
1190 repo.ui.status(_('recording removal of %s as rename to %s '
1191 '(%d%% similar)\n') %
1191 '(%d%% similar)\n') %
1192 (uipathfn(old), uipathfn(new),
1192 (uipathfn(old), uipathfn(new),
1193 score * 100))
1193 score * 100))
1194 renames[new] = old
1194 renames[new] = old
1195 return renames
1195 return renames
1196
1196
1197 def _markchanges(repo, unknown, deleted, renames):
1197 def _markchanges(repo, unknown, deleted, renames):
1198 '''Marks the files in unknown as added, the files in deleted as removed,
1198 '''Marks the files in unknown as added, the files in deleted as removed,
1199 and the files in renames as copied.'''
1199 and the files in renames as copied.'''
1200 wctx = repo[None]
1200 wctx = repo[None]
1201 with repo.wlock():
1201 with repo.wlock():
1202 wctx.forget(deleted)
1202 wctx.forget(deleted)
1203 wctx.add(unknown)
1203 wctx.add(unknown)
1204 for new, old in renames.iteritems():
1204 for new, old in renames.iteritems():
1205 wctx.copy(old, new)
1205 wctx.copy(old, new)
1206
1206
1207 def getrenamedfn(repo, endrev=None):
1207 def getrenamedfn(repo, endrev=None):
1208 if copiesmod.usechangesetcentricalgo(repo):
1208 if copiesmod.usechangesetcentricalgo(repo):
1209 def getrenamed(fn, rev):
1209 def getrenamed(fn, rev):
1210 ctx = repo[rev]
1210 ctx = repo[rev]
1211 p1copies = ctx.p1copies()
1211 p1copies = ctx.p1copies()
1212 if fn in p1copies:
1212 if fn in p1copies:
1213 return p1copies[fn]
1213 return p1copies[fn]
1214 p2copies = ctx.p2copies()
1214 p2copies = ctx.p2copies()
1215 if fn in p2copies:
1215 if fn in p2copies:
1216 return p2copies[fn]
1216 return p2copies[fn]
1217 return None
1217 return None
1218 return getrenamed
1218 return getrenamed
1219
1219
1220 rcache = {}
1220 rcache = {}
1221 if endrev is None:
1221 if endrev is None:
1222 endrev = len(repo)
1222 endrev = len(repo)
1223
1223
1224 def getrenamed(fn, rev):
1224 def getrenamed(fn, rev):
1225 '''looks up all renames for a file (up to endrev) the first
1225 '''looks up all renames for a file (up to endrev) the first
1226 time the file is given. It indexes on the changerev and only
1226 time the file is given. It indexes on the changerev and only
1227 parses the manifest if linkrev != changerev.
1227 parses the manifest if linkrev != changerev.
1228 Returns rename info for fn at changerev rev.'''
1228 Returns rename info for fn at changerev rev.'''
1229 if fn not in rcache:
1229 if fn not in rcache:
1230 rcache[fn] = {}
1230 rcache[fn] = {}
1231 fl = repo.file(fn)
1231 fl = repo.file(fn)
1232 for i in fl:
1232 for i in fl:
1233 lr = fl.linkrev(i)
1233 lr = fl.linkrev(i)
1234 renamed = fl.renamed(fl.node(i))
1234 renamed = fl.renamed(fl.node(i))
1235 rcache[fn][lr] = renamed and renamed[0]
1235 rcache[fn][lr] = renamed and renamed[0]
1236 if lr >= endrev:
1236 if lr >= endrev:
1237 break
1237 break
1238 if rev in rcache[fn]:
1238 if rev in rcache[fn]:
1239 return rcache[fn][rev]
1239 return rcache[fn][rev]
1240
1240
1241 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1241 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1242 # filectx logic.
1242 # filectx logic.
1243 try:
1243 try:
1244 return repo[rev][fn].copysource()
1244 return repo[rev][fn].copysource()
1245 except error.LookupError:
1245 except error.LookupError:
1246 return None
1246 return None
1247
1247
1248 return getrenamed
1248 return getrenamed
1249
1249
1250 def getcopiesfn(repo, endrev=None):
1250 def getcopiesfn(repo, endrev=None):
1251 if copiesmod.usechangesetcentricalgo(repo):
1251 if copiesmod.usechangesetcentricalgo(repo):
1252 def copiesfn(ctx):
1252 def copiesfn(ctx):
1253 if ctx.p2copies():
1253 if ctx.p2copies():
1254 allcopies = ctx.p1copies().copy()
1254 allcopies = ctx.p1copies().copy()
1255 # There should be no overlap
1255 # There should be no overlap
1256 allcopies.update(ctx.p2copies())
1256 allcopies.update(ctx.p2copies())
1257 return sorted(allcopies.items())
1257 return sorted(allcopies.items())
1258 else:
1258 else:
1259 return sorted(ctx.p1copies().items())
1259 return sorted(ctx.p1copies().items())
1260 else:
1260 else:
1261 getrenamed = getrenamedfn(repo, endrev)
1261 getrenamed = getrenamedfn(repo, endrev)
1262 def copiesfn(ctx):
1262 def copiesfn(ctx):
1263 copies = []
1263 copies = []
1264 for fn in ctx.files():
1264 for fn in ctx.files():
1265 rename = getrenamed(fn, ctx.rev())
1265 rename = getrenamed(fn, ctx.rev())
1266 if rename:
1266 if rename:
1267 copies.append((fn, rename))
1267 copies.append((fn, rename))
1268 return copies
1268 return copies
1269
1269
1270 return copiesfn
1270 return copiesfn
1271
1271
1272 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1272 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1273 """Update the dirstate to reflect the intent of copying src to dst. For
1273 """Update the dirstate to reflect the intent of copying src to dst. For
1274 different reasons it might not end with dst being marked as copied from src.
1274 different reasons it might not end with dst being marked as copied from src.
1275 """
1275 """
1276 origsrc = repo.dirstate.copied(src) or src
1276 origsrc = repo.dirstate.copied(src) or src
1277 if dst == origsrc: # copying back a copy?
1277 if dst == origsrc: # copying back a copy?
1278 if repo.dirstate[dst] not in 'mn' and not dryrun:
1278 if repo.dirstate[dst] not in 'mn' and not dryrun:
1279 repo.dirstate.normallookup(dst)
1279 repo.dirstate.normallookup(dst)
1280 else:
1280 else:
1281 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1281 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1282 if not ui.quiet:
1282 if not ui.quiet:
1283 ui.warn(_("%s has not been committed yet, so no copy "
1283 ui.warn(_("%s has not been committed yet, so no copy "
1284 "data will be stored for %s.\n")
1284 "data will be stored for %s.\n")
1285 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1285 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1286 if repo.dirstate[dst] in '?r' and not dryrun:
1286 if repo.dirstate[dst] in '?r' and not dryrun:
1287 wctx.add([dst])
1287 wctx.add([dst])
1288 elif not dryrun:
1288 elif not dryrun:
1289 wctx.copy(origsrc, dst)
1289 wctx.copy(origsrc, dst)
1290
1290
1291 def movedirstate(repo, newctx, match=None):
1291 def movedirstate(repo, newctx, match=None):
1292 """Move the dirstate to newctx and adjust it as necessary.
1292 """Move the dirstate to newctx and adjust it as necessary.
1293
1293
1294 A matcher can be provided as an optimization. It is probably a bug to pass
1294 A matcher can be provided as an optimization. It is probably a bug to pass
1295 a matcher that doesn't match all the differences between the parent of the
1295 a matcher that doesn't match all the differences between the parent of the
1296 working copy and newctx.
1296 working copy and newctx.
1297 """
1297 """
1298 oldctx = repo['.']
1298 oldctx = repo['.']
1299 ds = repo.dirstate
1299 ds = repo.dirstate
1300 ds.setparents(newctx.node(), nullid)
1300 ds.setparents(newctx.node(), nullid)
1301 copies = dict(ds.copies())
1301 copies = dict(ds.copies())
1302 s = newctx.status(oldctx, match=match)
1302 s = newctx.status(oldctx, match=match)
1303 for f in s.modified:
1303 for f in s.modified:
1304 if ds[f] == 'r':
1304 if ds[f] == 'r':
1305 # modified + removed -> removed
1305 # modified + removed -> removed
1306 continue
1306 continue
1307 ds.normallookup(f)
1307 ds.normallookup(f)
1308
1308
1309 for f in s.added:
1309 for f in s.added:
1310 if ds[f] == 'r':
1310 if ds[f] == 'r':
1311 # added + removed -> unknown
1311 # added + removed -> unknown
1312 ds.drop(f)
1312 ds.drop(f)
1313 elif ds[f] != 'a':
1313 elif ds[f] != 'a':
1314 ds.add(f)
1314 ds.add(f)
1315
1315
1316 for f in s.removed:
1316 for f in s.removed:
1317 if ds[f] == 'a':
1317 if ds[f] == 'a':
1318 # removed + added -> normal
1318 # removed + added -> normal
1319 ds.normallookup(f)
1319 ds.normallookup(f)
1320 elif ds[f] != 'r':
1320 elif ds[f] != 'r':
1321 ds.remove(f)
1321 ds.remove(f)
1322
1322
1323 # Merge old parent and old working dir copies
1323 # Merge old parent and old working dir copies
1324 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1324 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1325 oldcopies.update(copies)
1325 oldcopies.update(copies)
1326 copies = dict((dst, oldcopies.get(src, src))
1326 copies = dict((dst, oldcopies.get(src, src))
1327 for dst, src in oldcopies.iteritems())
1327 for dst, src in oldcopies.iteritems())
1328 # Adjust the dirstate copies
1328 # Adjust the dirstate copies
1329 for dst, src in copies.iteritems():
1329 for dst, src in copies.iteritems():
1330 if (src not in newctx or dst in newctx or ds[dst] != 'a'):
1330 if (src not in newctx or dst in newctx or ds[dst] != 'a'):
1331 src = None
1331 src = None
1332 ds.copy(src, dst)
1332 ds.copy(src, dst)
1333
1333
1334 def writerequires(opener, requirements):
1334 def writerequires(opener, requirements):
1335 with opener('requires', 'w', atomictemp=True) as fp:
1335 with opener('requires', 'w', atomictemp=True) as fp:
1336 for r in sorted(requirements):
1336 for r in sorted(requirements):
1337 fp.write("%s\n" % r)
1337 fp.write("%s\n" % r)
1338
1338
1339 class filecachesubentry(object):
1339 class filecachesubentry(object):
1340 def __init__(self, path, stat):
1340 def __init__(self, path, stat):
1341 self.path = path
1341 self.path = path
1342 self.cachestat = None
1342 self.cachestat = None
1343 self._cacheable = None
1343 self._cacheable = None
1344
1344
1345 if stat:
1345 if stat:
1346 self.cachestat = filecachesubentry.stat(self.path)
1346 self.cachestat = filecachesubentry.stat(self.path)
1347
1347
1348 if self.cachestat:
1348 if self.cachestat:
1349 self._cacheable = self.cachestat.cacheable()
1349 self._cacheable = self.cachestat.cacheable()
1350 else:
1350 else:
1351 # None means we don't know yet
1351 # None means we don't know yet
1352 self._cacheable = None
1352 self._cacheable = None
1353
1353
1354 def refresh(self):
1354 def refresh(self):
1355 if self.cacheable():
1355 if self.cacheable():
1356 self.cachestat = filecachesubentry.stat(self.path)
1356 self.cachestat = filecachesubentry.stat(self.path)
1357
1357
1358 def cacheable(self):
1358 def cacheable(self):
1359 if self._cacheable is not None:
1359 if self._cacheable is not None:
1360 return self._cacheable
1360 return self._cacheable
1361
1361
1362 # we don't know yet, assume it is for now
1362 # we don't know yet, assume it is for now
1363 return True
1363 return True
1364
1364
1365 def changed(self):
1365 def changed(self):
1366 # no point in going further if we can't cache it
1366 # no point in going further if we can't cache it
1367 if not self.cacheable():
1367 if not self.cacheable():
1368 return True
1368 return True
1369
1369
1370 newstat = filecachesubentry.stat(self.path)
1370 newstat = filecachesubentry.stat(self.path)
1371
1371
1372 # we may not know if it's cacheable yet, check again now
1372 # we may not know if it's cacheable yet, check again now
1373 if newstat and self._cacheable is None:
1373 if newstat and self._cacheable is None:
1374 self._cacheable = newstat.cacheable()
1374 self._cacheable = newstat.cacheable()
1375
1375
1376 # check again
1376 # check again
1377 if not self._cacheable:
1377 if not self._cacheable:
1378 return True
1378 return True
1379
1379
1380 if self.cachestat != newstat:
1380 if self.cachestat != newstat:
1381 self.cachestat = newstat
1381 self.cachestat = newstat
1382 return True
1382 return True
1383 else:
1383 else:
1384 return False
1384 return False
1385
1385
1386 @staticmethod
1386 @staticmethod
1387 def stat(path):
1387 def stat(path):
1388 try:
1388 try:
1389 return util.cachestat(path)
1389 return util.cachestat(path)
1390 except OSError as e:
1390 except OSError as e:
1391 if e.errno != errno.ENOENT:
1391 if e.errno != errno.ENOENT:
1392 raise
1392 raise
1393
1393
1394 class filecacheentry(object):
1394 class filecacheentry(object):
1395 def __init__(self, paths, stat=True):
1395 def __init__(self, paths, stat=True):
1396 self._entries = []
1396 self._entries = []
1397 for path in paths:
1397 for path in paths:
1398 self._entries.append(filecachesubentry(path, stat))
1398 self._entries.append(filecachesubentry(path, stat))
1399
1399
1400 def changed(self):
1400 def changed(self):
1401 '''true if any entry has changed'''
1401 '''true if any entry has changed'''
1402 for entry in self._entries:
1402 for entry in self._entries:
1403 if entry.changed():
1403 if entry.changed():
1404 return True
1404 return True
1405 return False
1405 return False
1406
1406
1407 def refresh(self):
1407 def refresh(self):
1408 for entry in self._entries:
1408 for entry in self._entries:
1409 entry.refresh()
1409 entry.refresh()
1410
1410
1411 class filecache(object):
1411 class filecache(object):
1412 """A property like decorator that tracks files under .hg/ for updates.
1412 """A property like decorator that tracks files under .hg/ for updates.
1413
1413
1414 On first access, the files defined as arguments are stat()ed and the
1414 On first access, the files defined as arguments are stat()ed and the
1415 results cached. The decorated function is called. The results are stashed
1415 results cached. The decorated function is called. The results are stashed
1416 away in a ``_filecache`` dict on the object whose method is decorated.
1416 away in a ``_filecache`` dict on the object whose method is decorated.
1417
1417
1418 On subsequent access, the cached result is used as it is set to the
1418 On subsequent access, the cached result is used as it is set to the
1419 instance dictionary.
1419 instance dictionary.
1420
1420
1421 On external property set/delete operations, the caller must update the
1421 On external property set/delete operations, the caller must update the
1422 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1422 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1423 instead of directly setting <attr>.
1423 instead of directly setting <attr>.
1424
1424
1425 When using the property API, the cached data is always used if available.
1425 When using the property API, the cached data is always used if available.
1426 No stat() is performed to check if the file has changed.
1426 No stat() is performed to check if the file has changed.
1427
1427
1428 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1428 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1429 can populate an entry before the property's getter is called. In this case,
1429 can populate an entry before the property's getter is called. In this case,
1430 entries in ``_filecache`` will be used during property operations,
1430 entries in ``_filecache`` will be used during property operations,
1431 if available. If the underlying file changes, it is up to external callers
1431 if available. If the underlying file changes, it is up to external callers
1432 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1432 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1433 method result as well as possibly calling ``del obj._filecache[attr]`` to
1433 method result as well as possibly calling ``del obj._filecache[attr]`` to
1434 remove the ``filecacheentry``.
1434 remove the ``filecacheentry``.
1435 """
1435 """
1436
1436
1437 def __init__(self, *paths):
1437 def __init__(self, *paths):
1438 self.paths = paths
1438 self.paths = paths
1439
1439
1440 def join(self, obj, fname):
1440 def join(self, obj, fname):
1441 """Used to compute the runtime path of a cached file.
1441 """Used to compute the runtime path of a cached file.
1442
1442
1443 Users should subclass filecache and provide their own version of this
1443 Users should subclass filecache and provide their own version of this
1444 function to call the appropriate join function on 'obj' (an instance
1444 function to call the appropriate join function on 'obj' (an instance
1445 of the class that its member function was decorated).
1445 of the class that its member function was decorated).
1446 """
1446 """
1447 raise NotImplementedError
1447 raise NotImplementedError
1448
1448
1449 def __call__(self, func):
1449 def __call__(self, func):
1450 self.func = func
1450 self.func = func
1451 self.sname = func.__name__
1451 self.sname = func.__name__
1452 self.name = pycompat.sysbytes(self.sname)
1452 self.name = pycompat.sysbytes(self.sname)
1453 return self
1453 return self
1454
1454
1455 def __get__(self, obj, type=None):
1455 def __get__(self, obj, type=None):
1456 # if accessed on the class, return the descriptor itself.
1456 # if accessed on the class, return the descriptor itself.
1457 if obj is None:
1457 if obj is None:
1458 return self
1458 return self
1459
1459
1460 assert self.sname not in obj.__dict__
1460 assert self.sname not in obj.__dict__
1461
1461
1462 entry = obj._filecache.get(self.name)
1462 entry = obj._filecache.get(self.name)
1463
1463
1464 if entry:
1464 if entry:
1465 if entry.changed():
1465 if entry.changed():
1466 entry.obj = self.func(obj)
1466 entry.obj = self.func(obj)
1467 else:
1467 else:
1468 paths = [self.join(obj, path) for path in self.paths]
1468 paths = [self.join(obj, path) for path in self.paths]
1469
1469
1470 # We stat -before- creating the object so our cache doesn't lie if
1470 # We stat -before- creating the object so our cache doesn't lie if
1471 # a writer modified between the time we read and stat
1471 # a writer modified between the time we read and stat
1472 entry = filecacheentry(paths, True)
1472 entry = filecacheentry(paths, True)
1473 entry.obj = self.func(obj)
1473 entry.obj = self.func(obj)
1474
1474
1475 obj._filecache[self.name] = entry
1475 obj._filecache[self.name] = entry
1476
1476
1477 obj.__dict__[self.sname] = entry.obj
1477 obj.__dict__[self.sname] = entry.obj
1478 return entry.obj
1478 return entry.obj
1479
1479
1480 # don't implement __set__(), which would make __dict__ lookup as slow as
1480 # don't implement __set__(), which would make __dict__ lookup as slow as
1481 # function call.
1481 # function call.
1482
1482
1483 def set(self, obj, value):
1483 def set(self, obj, value):
1484 if self.name not in obj._filecache:
1484 if self.name not in obj._filecache:
1485 # we add an entry for the missing value because X in __dict__
1485 # we add an entry for the missing value because X in __dict__
1486 # implies X in _filecache
1486 # implies X in _filecache
1487 paths = [self.join(obj, path) for path in self.paths]
1487 paths = [self.join(obj, path) for path in self.paths]
1488 ce = filecacheentry(paths, False)
1488 ce = filecacheentry(paths, False)
1489 obj._filecache[self.name] = ce
1489 obj._filecache[self.name] = ce
1490 else:
1490 else:
1491 ce = obj._filecache[self.name]
1491 ce = obj._filecache[self.name]
1492
1492
1493 ce.obj = value # update cached copy
1493 ce.obj = value # update cached copy
1494 obj.__dict__[self.sname] = value # update copy returned by obj.x
1494 obj.__dict__[self.sname] = value # update copy returned by obj.x
1495
1495
1496 def extdatasource(repo, source):
1496 def extdatasource(repo, source):
1497 """Gather a map of rev -> value dict from the specified source
1497 """Gather a map of rev -> value dict from the specified source
1498
1498
1499 A source spec is treated as a URL, with a special case shell: type
1499 A source spec is treated as a URL, with a special case shell: type
1500 for parsing the output from a shell command.
1500 for parsing the output from a shell command.
1501
1501
1502 The data is parsed as a series of newline-separated records where
1502 The data is parsed as a series of newline-separated records where
1503 each record is a revision specifier optionally followed by a space
1503 each record is a revision specifier optionally followed by a space
1504 and a freeform string value. If the revision is known locally, it
1504 and a freeform string value. If the revision is known locally, it
1505 is converted to a rev, otherwise the record is skipped.
1505 is converted to a rev, otherwise the record is skipped.
1506
1506
1507 Note that both key and value are treated as UTF-8 and converted to
1507 Note that both key and value are treated as UTF-8 and converted to
1508 the local encoding. This allows uniformity between local and
1508 the local encoding. This allows uniformity between local and
1509 remote data sources.
1509 remote data sources.
1510 """
1510 """
1511
1511
1512 spec = repo.ui.config("extdata", source)
1512 spec = repo.ui.config("extdata", source)
1513 if not spec:
1513 if not spec:
1514 raise error.Abort(_("unknown extdata source '%s'") % source)
1514 raise error.Abort(_("unknown extdata source '%s'") % source)
1515
1515
1516 data = {}
1516 data = {}
1517 src = proc = None
1517 src = proc = None
1518 try:
1518 try:
1519 if spec.startswith("shell:"):
1519 if spec.startswith("shell:"):
1520 # external commands should be run relative to the repo root
1520 # external commands should be run relative to the repo root
1521 cmd = spec[6:]
1521 cmd = spec[6:]
1522 proc = subprocess.Popen(procutil.tonativestr(cmd),
1522 proc = subprocess.Popen(procutil.tonativestr(cmd),
1523 shell=True, bufsize=-1,
1523 shell=True, bufsize=-1,
1524 close_fds=procutil.closefds,
1524 close_fds=procutil.closefds,
1525 stdout=subprocess.PIPE,
1525 stdout=subprocess.PIPE,
1526 cwd=procutil.tonativestr(repo.root))
1526 cwd=procutil.tonativestr(repo.root))
1527 src = proc.stdout
1527 src = proc.stdout
1528 else:
1528 else:
1529 # treat as a URL or file
1529 # treat as a URL or file
1530 src = url.open(repo.ui, spec)
1530 src = url.open(repo.ui, spec)
1531 for l in src:
1531 for l in src:
1532 if " " in l:
1532 if " " in l:
1533 k, v = l.strip().split(" ", 1)
1533 k, v = l.strip().split(" ", 1)
1534 else:
1534 else:
1535 k, v = l.strip(), ""
1535 k, v = l.strip(), ""
1536
1536
1537 k = encoding.tolocal(k)
1537 k = encoding.tolocal(k)
1538 try:
1538 try:
1539 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1539 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1540 except (error.LookupError, error.RepoLookupError):
1540 except (error.LookupError, error.RepoLookupError):
1541 pass # we ignore data for nodes that don't exist locally
1541 pass # we ignore data for nodes that don't exist locally
1542 finally:
1542 finally:
1543 if proc:
1543 if proc:
1544 try:
1544 try:
1545 proc.communicate()
1545 proc.communicate()
1546 except ValueError:
1546 except ValueError:
1547 # This happens if we started iterating src and then
1547 # This happens if we started iterating src and then
1548 # get a parse error on a line. It should be safe to ignore.
1548 # get a parse error on a line. It should be safe to ignore.
1549 pass
1549 pass
1550 if src:
1550 if src:
1551 src.close()
1551 src.close()
1552 if proc and proc.returncode != 0:
1552 if proc and proc.returncode != 0:
1553 raise error.Abort(_("extdata command '%s' failed: %s")
1553 raise error.Abort(_("extdata command '%s' failed: %s")
1554 % (cmd, procutil.explainexit(proc.returncode)))
1554 % (cmd, procutil.explainexit(proc.returncode)))
1555
1555
1556 return data
1556 return data
1557
1557
1558 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1558 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1559 if lock is None:
1559 if lock is None:
1560 raise error.LockInheritanceContractViolation(
1560 raise error.LockInheritanceContractViolation(
1561 'lock can only be inherited while held')
1561 'lock can only be inherited while held')
1562 if environ is None:
1562 if environ is None:
1563 environ = {}
1563 environ = {}
1564 with lock.inherit() as locker:
1564 with lock.inherit() as locker:
1565 environ[envvar] = locker
1565 environ[envvar] = locker
1566 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1566 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1567
1567
1568 def wlocksub(repo, cmd, *args, **kwargs):
1568 def wlocksub(repo, cmd, *args, **kwargs):
1569 """run cmd as a subprocess that allows inheriting repo's wlock
1569 """run cmd as a subprocess that allows inheriting repo's wlock
1570
1570
1571 This can only be called while the wlock is held. This takes all the
1571 This can only be called while the wlock is held. This takes all the
1572 arguments that ui.system does, and returns the exit code of the
1572 arguments that ui.system does, and returns the exit code of the
1573 subprocess."""
1573 subprocess."""
1574 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1574 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1575 **kwargs)
1575 **kwargs)
1576
1576
1577 class progress(object):
1577 class progress(object):
1578 def __init__(self, ui, updatebar, topic, unit="", total=None):
1578 def __init__(self, ui, updatebar, topic, unit="", total=None):
1579 self.ui = ui
1579 self.ui = ui
1580 self.pos = 0
1580 self.pos = 0
1581 self.topic = topic
1581 self.topic = topic
1582 self.unit = unit
1582 self.unit = unit
1583 self.total = total
1583 self.total = total
1584 self.debug = ui.configbool('progress', 'debug')
1584 self.debug = ui.configbool('progress', 'debug')
1585 self._updatebar = updatebar
1585 self._updatebar = updatebar
1586
1586
1587 def __enter__(self):
1587 def __enter__(self):
1588 return self
1588 return self
1589
1589
1590 def __exit__(self, exc_type, exc_value, exc_tb):
1590 def __exit__(self, exc_type, exc_value, exc_tb):
1591 self.complete()
1591 self.complete()
1592
1592
1593 def update(self, pos, item="", total=None):
1593 def update(self, pos, item="", total=None):
1594 assert pos is not None
1594 assert pos is not None
1595 if total:
1595 if total:
1596 self.total = total
1596 self.total = total
1597 self.pos = pos
1597 self.pos = pos
1598 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1598 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1599 if self.debug:
1599 if self.debug:
1600 self._printdebug(item)
1600 self._printdebug(item)
1601
1601
1602 def increment(self, step=1, item="", total=None):
1602 def increment(self, step=1, item="", total=None):
1603 self.update(self.pos + step, item, total)
1603 self.update(self.pos + step, item, total)
1604
1604
1605 def complete(self):
1605 def complete(self):
1606 self.pos = None
1606 self.pos = None
1607 self.unit = ""
1607 self.unit = ""
1608 self.total = None
1608 self.total = None
1609 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1609 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1610
1610
1611 def _printdebug(self, item):
1611 def _printdebug(self, item):
1612 if self.unit:
1612 if self.unit:
1613 unit = ' ' + self.unit
1613 unit = ' ' + self.unit
1614 if item:
1614 if item:
1615 item = ' ' + item
1615 item = ' ' + item
1616
1616
1617 if self.total:
1617 if self.total:
1618 pct = 100.0 * self.pos / self.total
1618 pct = 100.0 * self.pos / self.total
1619 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1619 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1620 % (self.topic, item, self.pos, self.total, unit, pct))
1620 % (self.topic, item, self.pos, self.total, unit, pct))
1621 else:
1621 else:
1622 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1622 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1623
1623
1624 def gdinitconfig(ui):
1624 def gdinitconfig(ui):
1625 """helper function to know if a repo should be created as general delta
1625 """helper function to know if a repo should be created as general delta
1626 """
1626 """
1627 # experimental config: format.generaldelta
1627 # experimental config: format.generaldelta
1628 return (ui.configbool('format', 'generaldelta')
1628 return (ui.configbool('format', 'generaldelta')
1629 or ui.configbool('format', 'usegeneraldelta'))
1629 or ui.configbool('format', 'usegeneraldelta'))
1630
1630
1631 def gddeltaconfig(ui):
1631 def gddeltaconfig(ui):
1632 """helper function to know if incoming delta should be optimised
1632 """helper function to know if incoming delta should be optimised
1633 """
1633 """
1634 # experimental config: format.generaldelta
1634 # experimental config: format.generaldelta
1635 return ui.configbool('format', 'generaldelta')
1635 return ui.configbool('format', 'generaldelta')
1636
1636
1637 class simplekeyvaluefile(object):
1637 class simplekeyvaluefile(object):
1638 """A simple file with key=value lines
1638 """A simple file with key=value lines
1639
1639
1640 Keys must be alphanumerics and start with a letter, values must not
1640 Keys must be alphanumerics and start with a letter, values must not
1641 contain '\n' characters"""
1641 contain '\n' characters"""
1642 firstlinekey = '__firstline'
1642 firstlinekey = '__firstline'
1643
1643
1644 def __init__(self, vfs, path, keys=None):
1644 def __init__(self, vfs, path, keys=None):
1645 self.vfs = vfs
1645 self.vfs = vfs
1646 self.path = path
1646 self.path = path
1647
1647
1648 def read(self, firstlinenonkeyval=False):
1648 def read(self, firstlinenonkeyval=False):
1649 """Read the contents of a simple key-value file
1649 """Read the contents of a simple key-value file
1650
1650
1651 'firstlinenonkeyval' indicates whether the first line of file should
1651 'firstlinenonkeyval' indicates whether the first line of file should
1652 be treated as a key-value pair or reuturned fully under the
1652 be treated as a key-value pair or reuturned fully under the
1653 __firstline key."""
1653 __firstline key."""
1654 lines = self.vfs.readlines(self.path)
1654 lines = self.vfs.readlines(self.path)
1655 d = {}
1655 d = {}
1656 if firstlinenonkeyval:
1656 if firstlinenonkeyval:
1657 if not lines:
1657 if not lines:
1658 e = _("empty simplekeyvalue file")
1658 e = _("empty simplekeyvalue file")
1659 raise error.CorruptedState(e)
1659 raise error.CorruptedState(e)
1660 # we don't want to include '\n' in the __firstline
1660 # we don't want to include '\n' in the __firstline
1661 d[self.firstlinekey] = lines[0][:-1]
1661 d[self.firstlinekey] = lines[0][:-1]
1662 del lines[0]
1662 del lines[0]
1663
1663
1664 try:
1664 try:
1665 # the 'if line.strip()' part prevents us from failing on empty
1665 # the 'if line.strip()' part prevents us from failing on empty
1666 # lines which only contain '\n' therefore are not skipped
1666 # lines which only contain '\n' therefore are not skipped
1667 # by 'if line'
1667 # by 'if line'
1668 updatedict = dict(line[:-1].split('=', 1) for line in lines
1668 updatedict = dict(line[:-1].split('=', 1) for line in lines
1669 if line.strip())
1669 if line.strip())
1670 if self.firstlinekey in updatedict:
1670 if self.firstlinekey in updatedict:
1671 e = _("%r can't be used as a key")
1671 e = _("%r can't be used as a key")
1672 raise error.CorruptedState(e % self.firstlinekey)
1672 raise error.CorruptedState(e % self.firstlinekey)
1673 d.update(updatedict)
1673 d.update(updatedict)
1674 except ValueError as e:
1674 except ValueError as e:
1675 raise error.CorruptedState(str(e))
1675 raise error.CorruptedState(str(e))
1676 return d
1676 return d
1677
1677
1678 def write(self, data, firstline=None):
1678 def write(self, data, firstline=None):
1679 """Write key=>value mapping to a file
1679 """Write key=>value mapping to a file
1680 data is a dict. Keys must be alphanumerical and start with a letter.
1680 data is a dict. Keys must be alphanumerical and start with a letter.
1681 Values must not contain newline characters.
1681 Values must not contain newline characters.
1682
1682
1683 If 'firstline' is not None, it is written to file before
1683 If 'firstline' is not None, it is written to file before
1684 everything else, as it is, not in a key=value form"""
1684 everything else, as it is, not in a key=value form"""
1685 lines = []
1685 lines = []
1686 if firstline is not None:
1686 if firstline is not None:
1687 lines.append('%s\n' % firstline)
1687 lines.append('%s\n' % firstline)
1688
1688
1689 for k, v in data.items():
1689 for k, v in data.items():
1690 if k == self.firstlinekey:
1690 if k == self.firstlinekey:
1691 e = "key name '%s' is reserved" % self.firstlinekey
1691 e = "key name '%s' is reserved" % self.firstlinekey
1692 raise error.ProgrammingError(e)
1692 raise error.ProgrammingError(e)
1693 if not k[0:1].isalpha():
1693 if not k[0:1].isalpha():
1694 e = "keys must start with a letter in a key-value file"
1694 e = "keys must start with a letter in a key-value file"
1695 raise error.ProgrammingError(e)
1695 raise error.ProgrammingError(e)
1696 if not k.isalnum():
1696 if not k.isalnum():
1697 e = "invalid key name in a simple key-value file"
1697 e = "invalid key name in a simple key-value file"
1698 raise error.ProgrammingError(e)
1698 raise error.ProgrammingError(e)
1699 if '\n' in v:
1699 if '\n' in v:
1700 e = "invalid value in a simple key-value file"
1700 e = "invalid value in a simple key-value file"
1701 raise error.ProgrammingError(e)
1701 raise error.ProgrammingError(e)
1702 lines.append("%s=%s\n" % (k, v))
1702 lines.append("%s=%s\n" % (k, v))
1703 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1703 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1704 fp.write(''.join(lines))
1704 fp.write(''.join(lines))
1705
1705
1706 _reportobsoletedsource = [
1706 _reportobsoletedsource = [
1707 'debugobsolete',
1707 'debugobsolete',
1708 'pull',
1708 'pull',
1709 'push',
1709 'push',
1710 'serve',
1710 'serve',
1711 'unbundle',
1711 'unbundle',
1712 ]
1712 ]
1713
1713
1714 _reportnewcssource = [
1714 _reportnewcssource = [
1715 'pull',
1715 'pull',
1716 'unbundle',
1716 'unbundle',
1717 ]
1717 ]
1718
1718
1719 def prefetchfiles(repo, revs, match):
1719 def prefetchfiles(repo, revs, match):
1720 """Invokes the registered file prefetch functions, allowing extensions to
1720 """Invokes the registered file prefetch functions, allowing extensions to
1721 ensure the corresponding files are available locally, before the command
1721 ensure the corresponding files are available locally, before the command
1722 uses them."""
1722 uses them."""
1723 if match:
1723 if match:
1724 # The command itself will complain about files that don't exist, so
1724 # The command itself will complain about files that don't exist, so
1725 # don't duplicate the message.
1725 # don't duplicate the message.
1726 match = matchmod.badmatch(match, lambda fn, msg: None)
1726 match = matchmod.badmatch(match, lambda fn, msg: None)
1727 else:
1727 else:
1728 match = matchall(repo)
1728 match = matchall(repo)
1729
1729
1730 fileprefetchhooks(repo, revs, match)
1730 fileprefetchhooks(repo, revs, match)
1731
1731
1732 # a list of (repo, revs, match) prefetch functions
1732 # a list of (repo, revs, match) prefetch functions
1733 fileprefetchhooks = util.hooks()
1733 fileprefetchhooks = util.hooks()
1734
1734
1735 # A marker that tells the evolve extension to suppress its own reporting
1735 # A marker that tells the evolve extension to suppress its own reporting
1736 _reportstroubledchangesets = True
1736 _reportstroubledchangesets = True
1737
1737
1738 def registersummarycallback(repo, otr, txnname=''):
1738 def registersummarycallback(repo, otr, txnname=''):
1739 """register a callback to issue a summary after the transaction is closed
1739 """register a callback to issue a summary after the transaction is closed
1740 """
1740 """
1741 def txmatch(sources):
1741 def txmatch(sources):
1742 return any(txnname.startswith(source) for source in sources)
1742 return any(txnname.startswith(source) for source in sources)
1743
1743
1744 categories = []
1744 categories = []
1745
1745
1746 def reportsummary(func):
1746 def reportsummary(func):
1747 """decorator for report callbacks."""
1747 """decorator for report callbacks."""
1748 # The repoview life cycle is shorter than the one of the actual
1748 # The repoview life cycle is shorter than the one of the actual
1749 # underlying repository. So the filtered object can die before the
1749 # underlying repository. So the filtered object can die before the
1750 # weakref is used leading to troubles. We keep a reference to the
1750 # weakref is used leading to troubles. We keep a reference to the
1751 # unfiltered object and restore the filtering when retrieving the
1751 # unfiltered object and restore the filtering when retrieving the
1752 # repository through the weakref.
1752 # repository through the weakref.
1753 filtername = repo.filtername
1753 filtername = repo.filtername
1754 reporef = weakref.ref(repo.unfiltered())
1754 reporef = weakref.ref(repo.unfiltered())
1755 def wrapped(tr):
1755 def wrapped(tr):
1756 repo = reporef()
1756 repo = reporef()
1757 if filtername:
1757 if filtername:
1758 repo = repo.filtered(filtername)
1758 repo = repo.filtered(filtername)
1759 func(repo, tr)
1759 func(repo, tr)
1760 newcat = '%02i-txnreport' % len(categories)
1760 newcat = '%02i-txnreport' % len(categories)
1761 otr.addpostclose(newcat, wrapped)
1761 otr.addpostclose(newcat, wrapped)
1762 categories.append(newcat)
1762 categories.append(newcat)
1763 return wrapped
1763 return wrapped
1764
1764
1765 if txmatch(_reportobsoletedsource):
1765 if txmatch(_reportobsoletedsource):
1766 @reportsummary
1766 @reportsummary
1767 def reportobsoleted(repo, tr):
1767 def reportobsoleted(repo, tr):
1768 obsoleted = obsutil.getobsoleted(repo, tr)
1768 obsoleted = obsutil.getobsoleted(repo, tr)
1769 if obsoleted:
1769 if obsoleted:
1770 repo.ui.status(_('obsoleted %i changesets\n')
1770 repo.ui.status(_('obsoleted %i changesets\n')
1771 % len(obsoleted))
1771 % len(obsoleted))
1772
1772
1773 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1773 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1774 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1774 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1775 instabilitytypes = [
1775 instabilitytypes = [
1776 ('orphan', 'orphan'),
1776 ('orphan', 'orphan'),
1777 ('phase-divergent', 'phasedivergent'),
1777 ('phase-divergent', 'phasedivergent'),
1778 ('content-divergent', 'contentdivergent'),
1778 ('content-divergent', 'contentdivergent'),
1779 ]
1779 ]
1780
1780
1781 def getinstabilitycounts(repo):
1781 def getinstabilitycounts(repo):
1782 filtered = repo.changelog.filteredrevs
1782 filtered = repo.changelog.filteredrevs
1783 counts = {}
1783 counts = {}
1784 for instability, revset in instabilitytypes:
1784 for instability, revset in instabilitytypes:
1785 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1785 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1786 filtered)
1786 filtered)
1787 return counts
1787 return counts
1788
1788
1789 oldinstabilitycounts = getinstabilitycounts(repo)
1789 oldinstabilitycounts = getinstabilitycounts(repo)
1790 @reportsummary
1790 @reportsummary
1791 def reportnewinstabilities(repo, tr):
1791 def reportnewinstabilities(repo, tr):
1792 newinstabilitycounts = getinstabilitycounts(repo)
1792 newinstabilitycounts = getinstabilitycounts(repo)
1793 for instability, revset in instabilitytypes:
1793 for instability, revset in instabilitytypes:
1794 delta = (newinstabilitycounts[instability] -
1794 delta = (newinstabilitycounts[instability] -
1795 oldinstabilitycounts[instability])
1795 oldinstabilitycounts[instability])
1796 msg = getinstabilitymessage(delta, instability)
1796 msg = getinstabilitymessage(delta, instability)
1797 if msg:
1797 if msg:
1798 repo.ui.warn(msg)
1798 repo.ui.warn(msg)
1799
1799
1800 if txmatch(_reportnewcssource):
1800 if txmatch(_reportnewcssource):
1801 @reportsummary
1801 @reportsummary
1802 def reportnewcs(repo, tr):
1802 def reportnewcs(repo, tr):
1803 """Report the range of new revisions pulled/unbundled."""
1803 """Report the range of new revisions pulled/unbundled."""
1804 origrepolen = tr.changes.get('origrepolen', len(repo))
1804 origrepolen = tr.changes.get('origrepolen', len(repo))
1805 unfi = repo.unfiltered()
1805 unfi = repo.unfiltered()
1806 if origrepolen >= len(unfi):
1806 if origrepolen >= len(unfi):
1807 return
1807 return
1808
1808
1809 # Compute the bounds of new visible revisions' range.
1809 # Compute the bounds of new visible revisions' range.
1810 revs = smartset.spanset(repo, start=origrepolen)
1810 revs = smartset.spanset(repo, start=origrepolen)
1811 if revs:
1811 if revs:
1812 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1812 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1813
1813
1814 if minrev == maxrev:
1814 if minrev == maxrev:
1815 revrange = minrev
1815 revrange = minrev
1816 else:
1816 else:
1817 revrange = '%s:%s' % (minrev, maxrev)
1817 revrange = '%s:%s' % (minrev, maxrev)
1818 draft = len(repo.revs('%ld and draft()', revs))
1818 draft = len(repo.revs('%ld and draft()', revs))
1819 secret = len(repo.revs('%ld and secret()', revs))
1819 secret = len(repo.revs('%ld and secret()', revs))
1820 if not (draft or secret):
1820 if not (draft or secret):
1821 msg = _('new changesets %s\n') % revrange
1821 msg = _('new changesets %s\n') % revrange
1822 elif draft and secret:
1822 elif draft and secret:
1823 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1823 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1824 msg %= (revrange, draft, secret)
1824 msg %= (revrange, draft, secret)
1825 elif draft:
1825 elif draft:
1826 msg = _('new changesets %s (%d drafts)\n')
1826 msg = _('new changesets %s (%d drafts)\n')
1827 msg %= (revrange, draft)
1827 msg %= (revrange, draft)
1828 elif secret:
1828 elif secret:
1829 msg = _('new changesets %s (%d secrets)\n')
1829 msg = _('new changesets %s (%d secrets)\n')
1830 msg %= (revrange, secret)
1830 msg %= (revrange, secret)
1831 else:
1831 else:
1832 errormsg = 'entered unreachable condition'
1832 errormsg = 'entered unreachable condition'
1833 raise error.ProgrammingError(errormsg)
1833 raise error.ProgrammingError(errormsg)
1834 repo.ui.status(msg)
1834 repo.ui.status(msg)
1835
1835
1836 # search new changesets directly pulled as obsolete
1836 # search new changesets directly pulled as obsolete
1837 duplicates = tr.changes.get('revduplicates', ())
1837 duplicates = tr.changes.get('revduplicates', ())
1838 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1838 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1839 origrepolen, duplicates)
1839 origrepolen, duplicates)
1840 cl = repo.changelog
1840 cl = repo.changelog
1841 extinctadded = [r for r in obsadded if r not in cl]
1841 extinctadded = [r for r in obsadded if r not in cl]
1842 if extinctadded:
1842 if extinctadded:
1843 # They are not just obsolete, but obsolete and invisible
1843 # They are not just obsolete, but obsolete and invisible
1844 # we call them "extinct" internally but the terms have not been
1844 # we call them "extinct" internally but the terms have not been
1845 # exposed to users.
1845 # exposed to users.
1846 msg = '(%d other changesets obsolete on arrival)\n'
1846 msg = '(%d other changesets obsolete on arrival)\n'
1847 repo.ui.status(msg % len(extinctadded))
1847 repo.ui.status(msg % len(extinctadded))
1848
1848
1849 @reportsummary
1849 @reportsummary
1850 def reportphasechanges(repo, tr):
1850 def reportphasechanges(repo, tr):
1851 """Report statistics of phase changes for changesets pre-existing
1851 """Report statistics of phase changes for changesets pre-existing
1852 pull/unbundle.
1852 pull/unbundle.
1853 """
1853 """
1854 origrepolen = tr.changes.get('origrepolen', len(repo))
1854 origrepolen = tr.changes.get('origrepolen', len(repo))
1855 phasetracking = tr.changes.get('phases', {})
1855 phasetracking = tr.changes.get('phases', {})
1856 if not phasetracking:
1856 if not phasetracking:
1857 return
1857 return
1858 published = [
1858 published = [
1859 rev for rev, (old, new) in phasetracking.iteritems()
1859 rev for rev, (old, new) in phasetracking.iteritems()
1860 if new == phases.public and rev < origrepolen
1860 if new == phases.public and rev < origrepolen
1861 ]
1861 ]
1862 if not published:
1862 if not published:
1863 return
1863 return
1864 repo.ui.status(_('%d local changesets published\n')
1864 repo.ui.status(_('%d local changesets published\n')
1865 % len(published))
1865 % len(published))
1866
1866
1867 def getinstabilitymessage(delta, instability):
1867 def getinstabilitymessage(delta, instability):
1868 """function to return the message to show warning about new instabilities
1868 """function to return the message to show warning about new instabilities
1869
1869
1870 exists as a separate function so that extension can wrap to show more
1870 exists as a separate function so that extension can wrap to show more
1871 information like how to fix instabilities"""
1871 information like how to fix instabilities"""
1872 if delta > 0:
1872 if delta > 0:
1873 return _('%i new %s changesets\n') % (delta, instability)
1873 return _('%i new %s changesets\n') % (delta, instability)
1874
1874
1875 def nodesummaries(repo, nodes, maxnumnodes=4):
1875 def nodesummaries(repo, nodes, maxnumnodes=4):
1876 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1876 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1877 return ' '.join(short(h) for h in nodes)
1877 return ' '.join(short(h) for h in nodes)
1878 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1878 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1879 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1879 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1880
1880
1881 def enforcesinglehead(repo, tr, desc):
1881 def enforcesinglehead(repo, tr, desc):
1882 """check that no named branch has multiple heads"""
1882 """check that no named branch has multiple heads"""
1883 if desc in ('strip', 'repair'):
1883 if desc in ('strip', 'repair'):
1884 # skip the logic during strip
1884 # skip the logic during strip
1885 return
1885 return
1886 visible = repo.filtered('visible')
1886 visible = repo.filtered('visible')
1887 # possible improvement: we could restrict the check to affected branch
1887 # possible improvement: we could restrict the check to affected branch
1888 for name, heads in visible.branchmap().iteritems():
1888 for name, heads in visible.branchmap().iteritems():
1889 if len(heads) > 1:
1889 if len(heads) > 1:
1890 msg = _('rejecting multiple heads on branch "%s"')
1890 msg = _('rejecting multiple heads on branch "%s"')
1891 msg %= name
1891 msg %= name
1892 hint = _('%d heads: %s')
1892 hint = _('%d heads: %s')
1893 hint %= (len(heads), nodesummaries(repo, heads))
1893 hint %= (len(heads), nodesummaries(repo, heads))
1894 raise error.Abort(msg, hint=hint)
1894 raise error.Abort(msg, hint=hint)
1895
1895
1896 def wrapconvertsink(sink):
1896 def wrapconvertsink(sink):
1897 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1897 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1898 before it is used, whether or not the convert extension was formally loaded.
1898 before it is used, whether or not the convert extension was formally loaded.
1899 """
1899 """
1900 return sink
1900 return sink
1901
1901
1902 def unhidehashlikerevs(repo, specs, hiddentype):
1902 def unhidehashlikerevs(repo, specs, hiddentype):
1903 """parse the user specs and unhide changesets whose hash or revision number
1903 """parse the user specs and unhide changesets whose hash or revision number
1904 is passed.
1904 is passed.
1905
1905
1906 hiddentype can be: 1) 'warn': warn while unhiding changesets
1906 hiddentype can be: 1) 'warn': warn while unhiding changesets
1907 2) 'nowarn': don't warn while unhiding changesets
1907 2) 'nowarn': don't warn while unhiding changesets
1908
1908
1909 returns a repo object with the required changesets unhidden
1909 returns a repo object with the required changesets unhidden
1910 """
1910 """
1911 if not repo.filtername or not repo.ui.configbool('experimental',
1911 if not repo.filtername or not repo.ui.configbool('experimental',
1912 'directaccess'):
1912 'directaccess'):
1913 return repo
1913 return repo
1914
1914
1915 if repo.filtername not in ('visible', 'visible-hidden'):
1915 if repo.filtername not in ('visible', 'visible-hidden'):
1916 return repo
1916 return repo
1917
1917
1918 symbols = set()
1918 symbols = set()
1919 for spec in specs:
1919 for spec in specs:
1920 try:
1920 try:
1921 tree = revsetlang.parse(spec)
1921 tree = revsetlang.parse(spec)
1922 except error.ParseError: # will be reported by scmutil.revrange()
1922 except error.ParseError: # will be reported by scmutil.revrange()
1923 continue
1923 continue
1924
1924
1925 symbols.update(revsetlang.gethashlikesymbols(tree))
1925 symbols.update(revsetlang.gethashlikesymbols(tree))
1926
1926
1927 if not symbols:
1927 if not symbols:
1928 return repo
1928 return repo
1929
1929
1930 revs = _getrevsfromsymbols(repo, symbols)
1930 revs = _getrevsfromsymbols(repo, symbols)
1931
1931
1932 if not revs:
1932 if not revs:
1933 return repo
1933 return repo
1934
1934
1935 if hiddentype == 'warn':
1935 if hiddentype == 'warn':
1936 unfi = repo.unfiltered()
1936 unfi = repo.unfiltered()
1937 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1937 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1938 repo.ui.warn(_("warning: accessing hidden changesets for write "
1938 repo.ui.warn(_("warning: accessing hidden changesets for write "
1939 "operation: %s\n") % revstr)
1939 "operation: %s\n") % revstr)
1940
1940
1941 # we have to use new filtername to separate branch/tags cache until we can
1941 # we have to use new filtername to separate branch/tags cache until we can
1942 # disbale these cache when revisions are dynamically pinned.
1942 # disbale these cache when revisions are dynamically pinned.
1943 return repo.filtered('visible-hidden', revs)
1943 return repo.filtered('visible-hidden', revs)
1944
1944
1945 def _getrevsfromsymbols(repo, symbols):
1945 def _getrevsfromsymbols(repo, symbols):
1946 """parse the list of symbols and returns a set of revision numbers of hidden
1946 """parse the list of symbols and returns a set of revision numbers of hidden
1947 changesets present in symbols"""
1947 changesets present in symbols"""
1948 revs = set()
1948 revs = set()
1949 unfi = repo.unfiltered()
1949 unfi = repo.unfiltered()
1950 unficl = unfi.changelog
1950 unficl = unfi.changelog
1951 cl = repo.changelog
1951 cl = repo.changelog
1952 tiprev = len(unficl)
1952 tiprev = len(unficl)
1953 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1953 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1954 for s in symbols:
1954 for s in symbols:
1955 try:
1955 try:
1956 n = int(s)
1956 n = int(s)
1957 if n <= tiprev:
1957 if n <= tiprev:
1958 if not allowrevnums:
1958 if not allowrevnums:
1959 continue
1959 continue
1960 else:
1960 else:
1961 if n not in cl:
1961 if n not in cl:
1962 revs.add(n)
1962 revs.add(n)
1963 continue
1963 continue
1964 except ValueError:
1964 except ValueError:
1965 pass
1965 pass
1966
1966
1967 try:
1967 try:
1968 s = resolvehexnodeidprefix(unfi, s)
1968 s = resolvehexnodeidprefix(unfi, s)
1969 except (error.LookupError, error.WdirUnsupported):
1969 except (error.LookupError, error.WdirUnsupported):
1970 s = None
1970 s = None
1971
1971
1972 if s is not None:
1972 if s is not None:
1973 rev = unficl.rev(s)
1973 rev = unficl.rev(s)
1974 if rev not in cl:
1974 if rev not in cl:
1975 revs.add(rev)
1975 revs.add(rev)
1976
1976
1977 return revs
1977 return revs
1978
1978
1979 def bookmarkrevs(repo, mark):
1979 def bookmarkrevs(repo, mark):
1980 """
1980 """
1981 Select revisions reachable by a given bookmark
1981 Select revisions reachable by a given bookmark
1982 """
1982 """
1983 return repo.revs("ancestors(bookmark(%s)) - "
1983 return repo.revs("ancestors(bookmark(%s)) - "
1984 "ancestors(head() and not bookmark(%s)) - "
1984 "ancestors(head() and not bookmark(%s)) - "
1985 "ancestors(bookmark() and not bookmark(%s))",
1985 "ancestors(bookmark() and not bookmark(%s))",
1986 mark, mark, mark)
1986 mark, mark, mark)
1987
1987
1988 def computechangesetfilesadded(ctx):
1988 def computechangesetfilesadded(ctx):
1989 """return the list of files added in a changeset
1989 """return the list of files added in a changeset
1990 """
1990 """
1991 added = []
1991 added = []
1992 for f in ctx.files():
1992 for f in ctx.files():
1993 if not any(f in p for p in ctx.parents()):
1993 if not any(f in p for p in ctx.parents()):
1994 added.append(f)
1994 added.append(f)
1995 return added
1995 return added
1996
1997 def computechangesetfilesremoved(ctx):
1998 """return the list of files removed in a changeset
1999 """
2000 removed = []
2001 for f in ctx.files():
2002 if f not in ctx:
2003 removed.append(f)
2004 return removed
General Comments 0
You need to be logged in to leave comments. Login now