##// END OF EJS Templates
context: move handling of filtering error to revsymbol() (API)...
Martin von Zweigbergk -
r37403:ecd3f690 default
parent child Browse files
Show More
@@ -1,2595 +1,2569 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirnodes,
26 wdirnodes,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from . import (
29 from . import (
30 dagop,
30 dagop,
31 encoding,
31 encoding,
32 error,
32 error,
33 fileset,
33 fileset,
34 match as matchmod,
34 match as matchmod,
35 obsolete as obsmod,
35 obsolete as obsmod,
36 obsutil,
37 patch,
36 patch,
38 pathutil,
37 pathutil,
39 phases,
38 phases,
40 pycompat,
39 pycompat,
41 repoview,
40 repoview,
42 revlog,
41 revlog,
43 scmutil,
42 scmutil,
44 sparse,
43 sparse,
45 subrepo,
44 subrepo,
46 subrepoutil,
45 subrepoutil,
47 util,
46 util,
48 )
47 )
49 from .utils import (
48 from .utils import (
50 dateutil,
49 dateutil,
51 stringutil,
50 stringutil,
52 )
51 )
53
52
54 propertycache = util.propertycache
53 propertycache = util.propertycache
55
54
56 nonascii = re.compile(br'[^\x21-\x7f]').search
55 nonascii = re.compile(br'[^\x21-\x7f]').search
57
56
58 class basectx(object):
57 class basectx(object):
59 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
62 be committed,
61 be committed,
63 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
64 be committed."""
63 be committed."""
65
64
66 def __init__(self, repo):
65 def __init__(self, repo):
67 self._repo = repo
66 self._repo = repo
68
67
69 def __bytes__(self):
68 def __bytes__(self):
70 return short(self.node())
69 return short(self.node())
71
70
72 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
73
72
74 def __repr__(self):
73 def __repr__(self):
75 return r"<%s %s>" % (type(self).__name__, str(self))
74 return r"<%s %s>" % (type(self).__name__, str(self))
76
75
77 def __eq__(self, other):
76 def __eq__(self, other):
78 try:
77 try:
79 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
79 except AttributeError:
81 return False
80 return False
82
81
83 def __ne__(self, other):
82 def __ne__(self, other):
84 return not (self == other)
83 return not (self == other)
85
84
86 def __contains__(self, key):
85 def __contains__(self, key):
87 return key in self._manifest
86 return key in self._manifest
88
87
89 def __getitem__(self, key):
88 def __getitem__(self, key):
90 return self.filectx(key)
89 return self.filectx(key)
91
90
92 def __iter__(self):
91 def __iter__(self):
93 return iter(self._manifest)
92 return iter(self._manifest)
94
93
95 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
96 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
97 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
98 the normal manifest."""
97 the normal manifest."""
99 return self.manifest()
98 return self.manifest()
100
99
101 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
102 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
103 match operator.
102 match operator.
104 """
103 """
105 return match
104 return match
106
105
107 def _buildstatus(self, other, s, match, listignored, listclean,
106 def _buildstatus(self, other, s, match, listignored, listclean,
108 listunknown):
107 listunknown):
109 """build a status with respect to another context"""
108 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
109 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
112 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
113 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
114 # delta application.
116 mf2 = None
115 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
116 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
119 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
120 mf2 = self._buildstatusmanifest(s)
122
121
123 modified, added = [], []
122 modified, added = [], []
124 removed = []
123 removed = []
125 clean = []
124 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
126 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
127 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in d.iteritems():
128 for fn, value in d.iteritems():
130 if fn in deletedset:
129 if fn in deletedset:
131 continue
130 continue
132 if value is None:
131 if value is None:
133 clean.append(fn)
132 clean.append(fn)
134 continue
133 continue
135 (node1, flag1), (node2, flag2) = value
134 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
135 if node1 is None:
137 added.append(fn)
136 added.append(fn)
138 elif node2 is None:
137 elif node2 is None:
139 removed.append(fn)
138 removed.append(fn)
140 elif flag1 != flag2:
139 elif flag1 != flag2:
141 modified.append(fn)
140 modified.append(fn)
142 elif node2 not in wdirnodes:
141 elif node2 not in wdirnodes:
143 # When comparing files between two commits, we save time by
142 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
143 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
144 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
145 # to a file as a modification.
147 modified.append(fn)
146 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
147 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
148 modified.append(fn)
150 else:
149 else:
151 clean.append(fn)
150 clean.append(fn)
152
151
153 if removed:
152 if removed:
154 # need to filter files if they are already reported as removed
153 # need to filter files if they are already reported as removed
155 unknown = [fn for fn in unknown if fn not in mf1 and
154 unknown = [fn for fn in unknown if fn not in mf1 and
156 (not match or match(fn))]
155 (not match or match(fn))]
157 ignored = [fn for fn in ignored if fn not in mf1 and
156 ignored = [fn for fn in ignored if fn not in mf1 and
158 (not match or match(fn))]
157 (not match or match(fn))]
159 # if they're deleted, don't report them as removed
158 # if they're deleted, don't report them as removed
160 removed = [fn for fn in removed if fn not in deletedset]
159 removed = [fn for fn in removed if fn not in deletedset]
161
160
162 return scmutil.status(modified, added, removed, deleted, unknown,
161 return scmutil.status(modified, added, removed, deleted, unknown,
163 ignored, clean)
162 ignored, clean)
164
163
165 @propertycache
164 @propertycache
166 def substate(self):
165 def substate(self):
167 return subrepoutil.state(self, self._repo.ui)
166 return subrepoutil.state(self, self._repo.ui)
168
167
169 def subrev(self, subpath):
168 def subrev(self, subpath):
170 return self.substate[subpath][1]
169 return self.substate[subpath][1]
171
170
172 def rev(self):
171 def rev(self):
173 return self._rev
172 return self._rev
174 def node(self):
173 def node(self):
175 return self._node
174 return self._node
176 def hex(self):
175 def hex(self):
177 return hex(self.node())
176 return hex(self.node())
178 def manifest(self):
177 def manifest(self):
179 return self._manifest
178 return self._manifest
180 def manifestctx(self):
179 def manifestctx(self):
181 return self._manifestctx
180 return self._manifestctx
182 def repo(self):
181 def repo(self):
183 return self._repo
182 return self._repo
184 def phasestr(self):
183 def phasestr(self):
185 return phases.phasenames[self.phase()]
184 return phases.phasenames[self.phase()]
186 def mutable(self):
185 def mutable(self):
187 return self.phase() > phases.public
186 return self.phase() > phases.public
188
187
189 def getfileset(self, expr):
188 def getfileset(self, expr):
190 return fileset.getfileset(self, expr)
189 return fileset.getfileset(self, expr)
191
190
192 def obsolete(self):
191 def obsolete(self):
193 """True if the changeset is obsolete"""
192 """True if the changeset is obsolete"""
194 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
193 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
195
194
196 def extinct(self):
195 def extinct(self):
197 """True if the changeset is extinct"""
196 """True if the changeset is extinct"""
198 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
197 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
199
198
200 def orphan(self):
199 def orphan(self):
201 """True if the changeset is not obsolete but it's ancestor are"""
200 """True if the changeset is not obsolete but it's ancestor are"""
202 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
201 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
203
202
204 def phasedivergent(self):
203 def phasedivergent(self):
205 """True if the changeset try to be a successor of a public changeset
204 """True if the changeset try to be a successor of a public changeset
206
205
207 Only non-public and non-obsolete changesets may be bumped.
206 Only non-public and non-obsolete changesets may be bumped.
208 """
207 """
209 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
208 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
210
209
211 def contentdivergent(self):
210 def contentdivergent(self):
212 """Is a successors of a changeset with multiple possible successors set
211 """Is a successors of a changeset with multiple possible successors set
213
212
214 Only non-public and non-obsolete changesets may be divergent.
213 Only non-public and non-obsolete changesets may be divergent.
215 """
214 """
216 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
215 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
217
216
218 def isunstable(self):
217 def isunstable(self):
219 """True if the changeset is either unstable, bumped or divergent"""
218 """True if the changeset is either unstable, bumped or divergent"""
220 return self.orphan() or self.phasedivergent() or self.contentdivergent()
219 return self.orphan() or self.phasedivergent() or self.contentdivergent()
221
220
222 def instabilities(self):
221 def instabilities(self):
223 """return the list of instabilities affecting this changeset.
222 """return the list of instabilities affecting this changeset.
224
223
225 Instabilities are returned as strings. possible values are:
224 Instabilities are returned as strings. possible values are:
226 - orphan,
225 - orphan,
227 - phase-divergent,
226 - phase-divergent,
228 - content-divergent.
227 - content-divergent.
229 """
228 """
230 instabilities = []
229 instabilities = []
231 if self.orphan():
230 if self.orphan():
232 instabilities.append('orphan')
231 instabilities.append('orphan')
233 if self.phasedivergent():
232 if self.phasedivergent():
234 instabilities.append('phase-divergent')
233 instabilities.append('phase-divergent')
235 if self.contentdivergent():
234 if self.contentdivergent():
236 instabilities.append('content-divergent')
235 instabilities.append('content-divergent')
237 return instabilities
236 return instabilities
238
237
239 def parents(self):
238 def parents(self):
240 """return contexts for each parent changeset"""
239 """return contexts for each parent changeset"""
241 return self._parents
240 return self._parents
242
241
243 def p1(self):
242 def p1(self):
244 return self._parents[0]
243 return self._parents[0]
245
244
246 def p2(self):
245 def p2(self):
247 parents = self._parents
246 parents = self._parents
248 if len(parents) == 2:
247 if len(parents) == 2:
249 return parents[1]
248 return parents[1]
250 return changectx(self._repo, nullrev)
249 return changectx(self._repo, nullrev)
251
250
252 def _fileinfo(self, path):
251 def _fileinfo(self, path):
253 if r'_manifest' in self.__dict__:
252 if r'_manifest' in self.__dict__:
254 try:
253 try:
255 return self._manifest[path], self._manifest.flags(path)
254 return self._manifest[path], self._manifest.flags(path)
256 except KeyError:
255 except KeyError:
257 raise error.ManifestLookupError(self._node, path,
256 raise error.ManifestLookupError(self._node, path,
258 _('not found in manifest'))
257 _('not found in manifest'))
259 if r'_manifestdelta' in self.__dict__ or path in self.files():
258 if r'_manifestdelta' in self.__dict__ or path in self.files():
260 if path in self._manifestdelta:
259 if path in self._manifestdelta:
261 return (self._manifestdelta[path],
260 return (self._manifestdelta[path],
262 self._manifestdelta.flags(path))
261 self._manifestdelta.flags(path))
263 mfl = self._repo.manifestlog
262 mfl = self._repo.manifestlog
264 try:
263 try:
265 node, flag = mfl[self._changeset.manifest].find(path)
264 node, flag = mfl[self._changeset.manifest].find(path)
266 except KeyError:
265 except KeyError:
267 raise error.ManifestLookupError(self._node, path,
266 raise error.ManifestLookupError(self._node, path,
268 _('not found in manifest'))
267 _('not found in manifest'))
269
268
270 return node, flag
269 return node, flag
271
270
272 def filenode(self, path):
271 def filenode(self, path):
273 return self._fileinfo(path)[0]
272 return self._fileinfo(path)[0]
274
273
275 def flags(self, path):
274 def flags(self, path):
276 try:
275 try:
277 return self._fileinfo(path)[1]
276 return self._fileinfo(path)[1]
278 except error.LookupError:
277 except error.LookupError:
279 return ''
278 return ''
280
279
281 def sub(self, path, allowcreate=True):
280 def sub(self, path, allowcreate=True):
282 '''return a subrepo for the stored revision of path, never wdir()'''
281 '''return a subrepo for the stored revision of path, never wdir()'''
283 return subrepo.subrepo(self, path, allowcreate=allowcreate)
282 return subrepo.subrepo(self, path, allowcreate=allowcreate)
284
283
285 def nullsub(self, path, pctx):
284 def nullsub(self, path, pctx):
286 return subrepo.nullsubrepo(self, path, pctx)
285 return subrepo.nullsubrepo(self, path, pctx)
287
286
288 def workingsub(self, path):
287 def workingsub(self, path):
289 '''return a subrepo for the stored revision, or wdir if this is a wdir
288 '''return a subrepo for the stored revision, or wdir if this is a wdir
290 context.
289 context.
291 '''
290 '''
292 return subrepo.subrepo(self, path, allowwdir=True)
291 return subrepo.subrepo(self, path, allowwdir=True)
293
292
294 def match(self, pats=None, include=None, exclude=None, default='glob',
293 def match(self, pats=None, include=None, exclude=None, default='glob',
295 listsubrepos=False, badfn=None):
294 listsubrepos=False, badfn=None):
296 r = self._repo
295 r = self._repo
297 return matchmod.match(r.root, r.getcwd(), pats,
296 return matchmod.match(r.root, r.getcwd(), pats,
298 include, exclude, default,
297 include, exclude, default,
299 auditor=r.nofsauditor, ctx=self,
298 auditor=r.nofsauditor, ctx=self,
300 listsubrepos=listsubrepos, badfn=badfn)
299 listsubrepos=listsubrepos, badfn=badfn)
301
300
302 def diff(self, ctx2=None, match=None, **opts):
301 def diff(self, ctx2=None, match=None, **opts):
303 """Returns a diff generator for the given contexts and matcher"""
302 """Returns a diff generator for the given contexts and matcher"""
304 if ctx2 is None:
303 if ctx2 is None:
305 ctx2 = self.p1()
304 ctx2 = self.p1()
306 if ctx2 is not None:
305 if ctx2 is not None:
307 ctx2 = self._repo[ctx2]
306 ctx2 = self._repo[ctx2]
308 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
307 diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts))
309 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
308 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
310
309
311 def dirs(self):
310 def dirs(self):
312 return self._manifest.dirs()
311 return self._manifest.dirs()
313
312
314 def hasdir(self, dir):
313 def hasdir(self, dir):
315 return self._manifest.hasdir(dir)
314 return self._manifest.hasdir(dir)
316
315
317 def status(self, other=None, match=None, listignored=False,
316 def status(self, other=None, match=None, listignored=False,
318 listclean=False, listunknown=False, listsubrepos=False):
317 listclean=False, listunknown=False, listsubrepos=False):
319 """return status of files between two nodes or node and working
318 """return status of files between two nodes or node and working
320 directory.
319 directory.
321
320
322 If other is None, compare this node with working directory.
321 If other is None, compare this node with working directory.
323
322
324 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 returns (modified, added, removed, deleted, unknown, ignored, clean)
325 """
324 """
326
325
327 ctx1 = self
326 ctx1 = self
328 ctx2 = self._repo[other]
327 ctx2 = self._repo[other]
329
328
330 # This next code block is, admittedly, fragile logic that tests for
329 # This next code block is, admittedly, fragile logic that tests for
331 # reversing the contexts and wouldn't need to exist if it weren't for
330 # reversing the contexts and wouldn't need to exist if it weren't for
332 # the fast (and common) code path of comparing the working directory
331 # the fast (and common) code path of comparing the working directory
333 # with its first parent.
332 # with its first parent.
334 #
333 #
335 # What we're aiming for here is the ability to call:
334 # What we're aiming for here is the ability to call:
336 #
335 #
337 # workingctx.status(parentctx)
336 # workingctx.status(parentctx)
338 #
337 #
339 # If we always built the manifest for each context and compared those,
338 # If we always built the manifest for each context and compared those,
340 # then we'd be done. But the special case of the above call means we
339 # then we'd be done. But the special case of the above call means we
341 # just copy the manifest of the parent.
340 # just copy the manifest of the parent.
342 reversed = False
341 reversed = False
343 if (not isinstance(ctx1, changectx)
342 if (not isinstance(ctx1, changectx)
344 and isinstance(ctx2, changectx)):
343 and isinstance(ctx2, changectx)):
345 reversed = True
344 reversed = True
346 ctx1, ctx2 = ctx2, ctx1
345 ctx1, ctx2 = ctx2, ctx1
347
346
348 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
349 match = ctx2._matchstatus(ctx1, match)
348 match = ctx2._matchstatus(ctx1, match)
350 r = scmutil.status([], [], [], [], [], [], [])
349 r = scmutil.status([], [], [], [], [], [], [])
351 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
352 listunknown)
351 listunknown)
353
352
354 if reversed:
353 if reversed:
355 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # Reverse added and removed. Clear deleted, unknown and ignored as
356 # these make no sense to reverse.
355 # these make no sense to reverse.
357 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
358 r.clean)
357 r.clean)
359
358
360 if listsubrepos:
359 if listsubrepos:
361 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
362 try:
361 try:
363 rev2 = ctx2.subrev(subpath)
362 rev2 = ctx2.subrev(subpath)
364 except KeyError:
363 except KeyError:
365 # A subrepo that existed in node1 was deleted between
364 # A subrepo that existed in node1 was deleted between
366 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # node1 and node2 (inclusive). Thus, ctx2's substate
367 # won't contain that subpath. The best we can do ignore it.
366 # won't contain that subpath. The best we can do ignore it.
368 rev2 = None
367 rev2 = None
369 submatch = matchmod.subdirmatcher(subpath, match)
368 submatch = matchmod.subdirmatcher(subpath, match)
370 s = sub.status(rev2, match=submatch, ignored=listignored,
369 s = sub.status(rev2, match=submatch, ignored=listignored,
371 clean=listclean, unknown=listunknown,
370 clean=listclean, unknown=listunknown,
372 listsubrepos=True)
371 listsubrepos=True)
373 for rfiles, sfiles in zip(r, s):
372 for rfiles, sfiles in zip(r, s):
374 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
375
374
376 for l in r:
375 for l in r:
377 l.sort()
376 l.sort()
378
377
379 return r
378 return r
380
379
381 def _filterederror(repo, changeid):
382 """build an exception to be raised about a filtered changeid
383
384 This is extracted in a function to help extensions (eg: evolve) to
385 experiment with various message variants."""
386 if repo.filtername.startswith('visible'):
387
388 # Check if the changeset is obsolete
389 unfilteredrepo = repo.unfiltered()
390 ctx = unfilteredrepo[changeid]
391
392 # If the changeset is obsolete, enrich the message with the reason
393 # that made this changeset not visible
394 if ctx.obsolete():
395 msg = obsutil._getfilteredreason(repo, changeid, ctx)
396 else:
397 msg = _("hidden revision '%s'") % changeid
398
399 hint = _('use --hidden to access hidden revisions')
400
401 return error.FilteredRepoLookupError(msg, hint=hint)
402 msg = _("filtered revision '%s' (not in '%s' subset)")
403 msg %= (changeid, repo.filtername)
404 return error.FilteredRepoLookupError(msg)
405
406 class changectx(basectx):
380 class changectx(basectx):
407 """A changecontext object makes access to data related to a particular
381 """A changecontext object makes access to data related to a particular
408 changeset convenient. It represents a read-only context already present in
382 changeset convenient. It represents a read-only context already present in
409 the repo."""
383 the repo."""
410 def __init__(self, repo, changeid='.'):
384 def __init__(self, repo, changeid='.'):
411 """changeid is a revision number, node, or tag"""
385 """changeid is a revision number, node, or tag"""
412 super(changectx, self).__init__(repo)
386 super(changectx, self).__init__(repo)
413
387
414 try:
388 try:
415 if isinstance(changeid, int):
389 if isinstance(changeid, int):
416 self._node = repo.changelog.node(changeid)
390 self._node = repo.changelog.node(changeid)
417 self._rev = changeid
391 self._rev = changeid
418 return
392 return
419 if changeid == 'null':
393 if changeid == 'null':
420 self._node = nullid
394 self._node = nullid
421 self._rev = nullrev
395 self._rev = nullrev
422 return
396 return
423 if changeid == 'tip':
397 if changeid == 'tip':
424 self._node = repo.changelog.tip()
398 self._node = repo.changelog.tip()
425 self._rev = repo.changelog.rev(self._node)
399 self._rev = repo.changelog.rev(self._node)
426 return
400 return
427 if (changeid == '.'
401 if (changeid == '.'
428 or repo.local() and changeid == repo.dirstate.p1()):
402 or repo.local() and changeid == repo.dirstate.p1()):
429 # this is a hack to delay/avoid loading obsmarkers
403 # this is a hack to delay/avoid loading obsmarkers
430 # when we know that '.' won't be hidden
404 # when we know that '.' won't be hidden
431 self._node = repo.dirstate.p1()
405 self._node = repo.dirstate.p1()
432 self._rev = repo.unfiltered().changelog.rev(self._node)
406 self._rev = repo.unfiltered().changelog.rev(self._node)
433 return
407 return
434 if len(changeid) == 20:
408 if len(changeid) == 20:
435 try:
409 try:
436 self._node = changeid
410 self._node = changeid
437 self._rev = repo.changelog.rev(changeid)
411 self._rev = repo.changelog.rev(changeid)
438 return
412 return
439 except error.FilteredRepoLookupError:
413 except error.FilteredRepoLookupError:
440 raise
414 raise
441 except LookupError:
415 except LookupError:
442 pass
416 pass
443
417
444 try:
418 try:
445 r = int(changeid)
419 r = int(changeid)
446 if '%d' % r != changeid:
420 if '%d' % r != changeid:
447 raise ValueError
421 raise ValueError
448 l = len(repo.changelog)
422 l = len(repo.changelog)
449 if r < 0:
423 if r < 0:
450 r += l
424 r += l
451 if r < 0 or r >= l and r != wdirrev:
425 if r < 0 or r >= l and r != wdirrev:
452 raise ValueError
426 raise ValueError
453 self._rev = r
427 self._rev = r
454 self._node = repo.changelog.node(r)
428 self._node = repo.changelog.node(r)
455 return
429 return
456 except error.FilteredIndexError:
430 except error.FilteredIndexError:
457 raise
431 raise
458 except (ValueError, OverflowError, IndexError):
432 except (ValueError, OverflowError, IndexError):
459 pass
433 pass
460
434
461 if len(changeid) == 40:
435 if len(changeid) == 40:
462 try:
436 try:
463 self._node = bin(changeid)
437 self._node = bin(changeid)
464 self._rev = repo.changelog.rev(self._node)
438 self._rev = repo.changelog.rev(self._node)
465 return
439 return
466 except error.FilteredLookupError:
440 except error.FilteredLookupError:
467 raise
441 raise
468 except (TypeError, LookupError):
442 except (TypeError, LookupError):
469 pass
443 pass
470
444
471 # lookup bookmarks through the name interface
445 # lookup bookmarks through the name interface
472 try:
446 try:
473 self._node = repo.names.singlenode(repo, changeid)
447 self._node = repo.names.singlenode(repo, changeid)
474 self._rev = repo.changelog.rev(self._node)
448 self._rev = repo.changelog.rev(self._node)
475 return
449 return
476 except KeyError:
450 except KeyError:
477 pass
451 pass
478 except error.FilteredRepoLookupError:
452 except error.FilteredRepoLookupError:
479 raise
453 raise
480 except error.RepoLookupError:
454 except error.RepoLookupError:
481 pass
455 pass
482
456
483 self._node = repo.unfiltered().changelog._partialmatch(changeid)
457 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 if self._node is not None:
458 if self._node is not None:
485 self._rev = repo.changelog.rev(self._node)
459 self._rev = repo.changelog.rev(self._node)
486 return
460 return
487
461
488 # lookup failed
462 # lookup failed
489 # check if it might have come from damaged dirstate
463 # check if it might have come from damaged dirstate
490 #
464 #
491 # XXX we could avoid the unfiltered if we had a recognizable
465 # XXX we could avoid the unfiltered if we had a recognizable
492 # exception for filtered changeset access
466 # exception for filtered changeset access
493 if (repo.local()
467 if (repo.local()
494 and changeid in repo.unfiltered().dirstate.parents()):
468 and changeid in repo.unfiltered().dirstate.parents()):
495 msg = _("working directory has unknown parent '%s'!")
469 msg = _("working directory has unknown parent '%s'!")
496 raise error.Abort(msg % short(changeid))
470 raise error.Abort(msg % short(changeid))
497 try:
471 try:
498 if len(changeid) == 20 and nonascii(changeid):
472 if len(changeid) == 20 and nonascii(changeid):
499 changeid = hex(changeid)
473 changeid = hex(changeid)
500 except TypeError:
474 except TypeError:
501 pass
475 pass
502 except (error.FilteredIndexError, error.FilteredLookupError,
476 except (error.FilteredIndexError, error.FilteredLookupError,
503 error.FilteredRepoLookupError):
477 error.FilteredRepoLookupError):
504 raise _filterederror(repo, changeid)
478 raise
505 except IndexError:
479 except IndexError:
506 pass
480 pass
507 raise error.RepoLookupError(
481 raise error.RepoLookupError(
508 _("unknown revision '%s'") % changeid)
482 _("unknown revision '%s'") % changeid)
509
483
510 def __hash__(self):
484 def __hash__(self):
511 try:
485 try:
512 return hash(self._rev)
486 return hash(self._rev)
513 except AttributeError:
487 except AttributeError:
514 return id(self)
488 return id(self)
515
489
516 def __nonzero__(self):
490 def __nonzero__(self):
517 return self._rev != nullrev
491 return self._rev != nullrev
518
492
519 __bool__ = __nonzero__
493 __bool__ = __nonzero__
520
494
521 @propertycache
495 @propertycache
522 def _changeset(self):
496 def _changeset(self):
523 return self._repo.changelog.changelogrevision(self.rev())
497 return self._repo.changelog.changelogrevision(self.rev())
524
498
525 @propertycache
499 @propertycache
526 def _manifest(self):
500 def _manifest(self):
527 return self._manifestctx.read()
501 return self._manifestctx.read()
528
502
529 @property
503 @property
530 def _manifestctx(self):
504 def _manifestctx(self):
531 return self._repo.manifestlog[self._changeset.manifest]
505 return self._repo.manifestlog[self._changeset.manifest]
532
506
533 @propertycache
507 @propertycache
534 def _manifestdelta(self):
508 def _manifestdelta(self):
535 return self._manifestctx.readdelta()
509 return self._manifestctx.readdelta()
536
510
537 @propertycache
511 @propertycache
538 def _parents(self):
512 def _parents(self):
539 repo = self._repo
513 repo = self._repo
540 p1, p2 = repo.changelog.parentrevs(self._rev)
514 p1, p2 = repo.changelog.parentrevs(self._rev)
541 if p2 == nullrev:
515 if p2 == nullrev:
542 return [changectx(repo, p1)]
516 return [changectx(repo, p1)]
543 return [changectx(repo, p1), changectx(repo, p2)]
517 return [changectx(repo, p1), changectx(repo, p2)]
544
518
545 def changeset(self):
519 def changeset(self):
546 c = self._changeset
520 c = self._changeset
547 return (
521 return (
548 c.manifest,
522 c.manifest,
549 c.user,
523 c.user,
550 c.date,
524 c.date,
551 c.files,
525 c.files,
552 c.description,
526 c.description,
553 c.extra,
527 c.extra,
554 )
528 )
555 def manifestnode(self):
529 def manifestnode(self):
556 return self._changeset.manifest
530 return self._changeset.manifest
557
531
558 def user(self):
532 def user(self):
559 return self._changeset.user
533 return self._changeset.user
560 def date(self):
534 def date(self):
561 return self._changeset.date
535 return self._changeset.date
562 def files(self):
536 def files(self):
563 return self._changeset.files
537 return self._changeset.files
564 def description(self):
538 def description(self):
565 return self._changeset.description
539 return self._changeset.description
566 def branch(self):
540 def branch(self):
567 return encoding.tolocal(self._changeset.extra.get("branch"))
541 return encoding.tolocal(self._changeset.extra.get("branch"))
568 def closesbranch(self):
542 def closesbranch(self):
569 return 'close' in self._changeset.extra
543 return 'close' in self._changeset.extra
570 def extra(self):
544 def extra(self):
571 """Return a dict of extra information."""
545 """Return a dict of extra information."""
572 return self._changeset.extra
546 return self._changeset.extra
573 def tags(self):
547 def tags(self):
574 """Return a list of byte tag names"""
548 """Return a list of byte tag names"""
575 return self._repo.nodetags(self._node)
549 return self._repo.nodetags(self._node)
576 def bookmarks(self):
550 def bookmarks(self):
577 """Return a list of byte bookmark names."""
551 """Return a list of byte bookmark names."""
578 return self._repo.nodebookmarks(self._node)
552 return self._repo.nodebookmarks(self._node)
579 def phase(self):
553 def phase(self):
580 return self._repo._phasecache.phase(self._repo, self._rev)
554 return self._repo._phasecache.phase(self._repo, self._rev)
581 def hidden(self):
555 def hidden(self):
582 return self._rev in repoview.filterrevs(self._repo, 'visible')
556 return self._rev in repoview.filterrevs(self._repo, 'visible')
583
557
584 def isinmemory(self):
558 def isinmemory(self):
585 return False
559 return False
586
560
587 def children(self):
561 def children(self):
588 """return list of changectx contexts for each child changeset.
562 """return list of changectx contexts for each child changeset.
589
563
590 This returns only the immediate child changesets. Use descendants() to
564 This returns only the immediate child changesets. Use descendants() to
591 recursively walk children.
565 recursively walk children.
592 """
566 """
593 c = self._repo.changelog.children(self._node)
567 c = self._repo.changelog.children(self._node)
594 return [changectx(self._repo, x) for x in c]
568 return [changectx(self._repo, x) for x in c]
595
569
596 def ancestors(self):
570 def ancestors(self):
597 for a in self._repo.changelog.ancestors([self._rev]):
571 for a in self._repo.changelog.ancestors([self._rev]):
598 yield changectx(self._repo, a)
572 yield changectx(self._repo, a)
599
573
600 def descendants(self):
574 def descendants(self):
601 """Recursively yield all children of the changeset.
575 """Recursively yield all children of the changeset.
602
576
603 For just the immediate children, use children()
577 For just the immediate children, use children()
604 """
578 """
605 for d in self._repo.changelog.descendants([self._rev]):
579 for d in self._repo.changelog.descendants([self._rev]):
606 yield changectx(self._repo, d)
580 yield changectx(self._repo, d)
607
581
608 def filectx(self, path, fileid=None, filelog=None):
582 def filectx(self, path, fileid=None, filelog=None):
609 """get a file context from this changeset"""
583 """get a file context from this changeset"""
610 if fileid is None:
584 if fileid is None:
611 fileid = self.filenode(path)
585 fileid = self.filenode(path)
612 return filectx(self._repo, path, fileid=fileid,
586 return filectx(self._repo, path, fileid=fileid,
613 changectx=self, filelog=filelog)
587 changectx=self, filelog=filelog)
614
588
615 def ancestor(self, c2, warn=False):
589 def ancestor(self, c2, warn=False):
616 """return the "best" ancestor context of self and c2
590 """return the "best" ancestor context of self and c2
617
591
618 If there are multiple candidates, it will show a message and check
592 If there are multiple candidates, it will show a message and check
619 merge.preferancestor configuration before falling back to the
593 merge.preferancestor configuration before falling back to the
620 revlog ancestor."""
594 revlog ancestor."""
621 # deal with workingctxs
595 # deal with workingctxs
622 n2 = c2._node
596 n2 = c2._node
623 if n2 is None:
597 if n2 is None:
624 n2 = c2._parents[0]._node
598 n2 = c2._parents[0]._node
625 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
599 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
626 if not cahs:
600 if not cahs:
627 anc = nullid
601 anc = nullid
628 elif len(cahs) == 1:
602 elif len(cahs) == 1:
629 anc = cahs[0]
603 anc = cahs[0]
630 else:
604 else:
631 # experimental config: merge.preferancestor
605 # experimental config: merge.preferancestor
632 for r in self._repo.ui.configlist('merge', 'preferancestor'):
606 for r in self._repo.ui.configlist('merge', 'preferancestor'):
633 try:
607 try:
634 ctx = scmutil.revsymbol(self._repo, r)
608 ctx = scmutil.revsymbol(self._repo, r)
635 except error.RepoLookupError:
609 except error.RepoLookupError:
636 continue
610 continue
637 anc = ctx.node()
611 anc = ctx.node()
638 if anc in cahs:
612 if anc in cahs:
639 break
613 break
640 else:
614 else:
641 anc = self._repo.changelog.ancestor(self._node, n2)
615 anc = self._repo.changelog.ancestor(self._node, n2)
642 if warn:
616 if warn:
643 self._repo.ui.status(
617 self._repo.ui.status(
644 (_("note: using %s as ancestor of %s and %s\n") %
618 (_("note: using %s as ancestor of %s and %s\n") %
645 (short(anc), short(self._node), short(n2))) +
619 (short(anc), short(self._node), short(n2))) +
646 ''.join(_(" alternatively, use --config "
620 ''.join(_(" alternatively, use --config "
647 "merge.preferancestor=%s\n") %
621 "merge.preferancestor=%s\n") %
648 short(n) for n in sorted(cahs) if n != anc))
622 short(n) for n in sorted(cahs) if n != anc))
649 return changectx(self._repo, anc)
623 return changectx(self._repo, anc)
650
624
651 def descendant(self, other):
625 def descendant(self, other):
652 """True if other is descendant of this changeset"""
626 """True if other is descendant of this changeset"""
653 return self._repo.changelog.descendant(self._rev, other._rev)
627 return self._repo.changelog.descendant(self._rev, other._rev)
654
628
655 def walk(self, match):
629 def walk(self, match):
656 '''Generates matching file names.'''
630 '''Generates matching file names.'''
657
631
658 # Wrap match.bad method to have message with nodeid
632 # Wrap match.bad method to have message with nodeid
659 def bad(fn, msg):
633 def bad(fn, msg):
660 # The manifest doesn't know about subrepos, so don't complain about
634 # The manifest doesn't know about subrepos, so don't complain about
661 # paths into valid subrepos.
635 # paths into valid subrepos.
662 if any(fn == s or fn.startswith(s + '/')
636 if any(fn == s or fn.startswith(s + '/')
663 for s in self.substate):
637 for s in self.substate):
664 return
638 return
665 match.bad(fn, _('no such file in rev %s') % self)
639 match.bad(fn, _('no such file in rev %s') % self)
666
640
667 m = matchmod.badmatch(match, bad)
641 m = matchmod.badmatch(match, bad)
668 return self._manifest.walk(m)
642 return self._manifest.walk(m)
669
643
670 def matches(self, match):
644 def matches(self, match):
671 return self.walk(match)
645 return self.walk(match)
672
646
673 class basefilectx(object):
647 class basefilectx(object):
674 """A filecontext object represents the common logic for its children:
648 """A filecontext object represents the common logic for its children:
675 filectx: read-only access to a filerevision that is already present
649 filectx: read-only access to a filerevision that is already present
676 in the repo,
650 in the repo,
677 workingfilectx: a filecontext that represents files from the working
651 workingfilectx: a filecontext that represents files from the working
678 directory,
652 directory,
679 memfilectx: a filecontext that represents files in-memory,
653 memfilectx: a filecontext that represents files in-memory,
680 overlayfilectx: duplicate another filecontext with some fields overridden.
654 overlayfilectx: duplicate another filecontext with some fields overridden.
681 """
655 """
682 @propertycache
656 @propertycache
683 def _filelog(self):
657 def _filelog(self):
684 return self._repo.file(self._path)
658 return self._repo.file(self._path)
685
659
686 @propertycache
660 @propertycache
687 def _changeid(self):
661 def _changeid(self):
688 if r'_changeid' in self.__dict__:
662 if r'_changeid' in self.__dict__:
689 return self._changeid
663 return self._changeid
690 elif r'_changectx' in self.__dict__:
664 elif r'_changectx' in self.__dict__:
691 return self._changectx.rev()
665 return self._changectx.rev()
692 elif r'_descendantrev' in self.__dict__:
666 elif r'_descendantrev' in self.__dict__:
693 # this file context was created from a revision with a known
667 # this file context was created from a revision with a known
694 # descendant, we can (lazily) correct for linkrev aliases
668 # descendant, we can (lazily) correct for linkrev aliases
695 return self._adjustlinkrev(self._descendantrev)
669 return self._adjustlinkrev(self._descendantrev)
696 else:
670 else:
697 return self._filelog.linkrev(self._filerev)
671 return self._filelog.linkrev(self._filerev)
698
672
699 @propertycache
673 @propertycache
700 def _filenode(self):
674 def _filenode(self):
701 if r'_fileid' in self.__dict__:
675 if r'_fileid' in self.__dict__:
702 return self._filelog.lookup(self._fileid)
676 return self._filelog.lookup(self._fileid)
703 else:
677 else:
704 return self._changectx.filenode(self._path)
678 return self._changectx.filenode(self._path)
705
679
706 @propertycache
680 @propertycache
707 def _filerev(self):
681 def _filerev(self):
708 return self._filelog.rev(self._filenode)
682 return self._filelog.rev(self._filenode)
709
683
710 @propertycache
684 @propertycache
711 def _repopath(self):
685 def _repopath(self):
712 return self._path
686 return self._path
713
687
714 def __nonzero__(self):
688 def __nonzero__(self):
715 try:
689 try:
716 self._filenode
690 self._filenode
717 return True
691 return True
718 except error.LookupError:
692 except error.LookupError:
719 # file is missing
693 # file is missing
720 return False
694 return False
721
695
722 __bool__ = __nonzero__
696 __bool__ = __nonzero__
723
697
724 def __bytes__(self):
698 def __bytes__(self):
725 try:
699 try:
726 return "%s@%s" % (self.path(), self._changectx)
700 return "%s@%s" % (self.path(), self._changectx)
727 except error.LookupError:
701 except error.LookupError:
728 return "%s@???" % self.path()
702 return "%s@???" % self.path()
729
703
730 __str__ = encoding.strmethod(__bytes__)
704 __str__ = encoding.strmethod(__bytes__)
731
705
732 def __repr__(self):
706 def __repr__(self):
733 return r"<%s %s>" % (type(self).__name__, str(self))
707 return r"<%s %s>" % (type(self).__name__, str(self))
734
708
735 def __hash__(self):
709 def __hash__(self):
736 try:
710 try:
737 return hash((self._path, self._filenode))
711 return hash((self._path, self._filenode))
738 except AttributeError:
712 except AttributeError:
739 return id(self)
713 return id(self)
740
714
741 def __eq__(self, other):
715 def __eq__(self, other):
742 try:
716 try:
743 return (type(self) == type(other) and self._path == other._path
717 return (type(self) == type(other) and self._path == other._path
744 and self._filenode == other._filenode)
718 and self._filenode == other._filenode)
745 except AttributeError:
719 except AttributeError:
746 return False
720 return False
747
721
748 def __ne__(self, other):
722 def __ne__(self, other):
749 return not (self == other)
723 return not (self == other)
750
724
751 def filerev(self):
725 def filerev(self):
752 return self._filerev
726 return self._filerev
753 def filenode(self):
727 def filenode(self):
754 return self._filenode
728 return self._filenode
755 @propertycache
729 @propertycache
756 def _flags(self):
730 def _flags(self):
757 return self._changectx.flags(self._path)
731 return self._changectx.flags(self._path)
758 def flags(self):
732 def flags(self):
759 return self._flags
733 return self._flags
760 def filelog(self):
734 def filelog(self):
761 return self._filelog
735 return self._filelog
762 def rev(self):
736 def rev(self):
763 return self._changeid
737 return self._changeid
764 def linkrev(self):
738 def linkrev(self):
765 return self._filelog.linkrev(self._filerev)
739 return self._filelog.linkrev(self._filerev)
766 def node(self):
740 def node(self):
767 return self._changectx.node()
741 return self._changectx.node()
768 def hex(self):
742 def hex(self):
769 return self._changectx.hex()
743 return self._changectx.hex()
770 def user(self):
744 def user(self):
771 return self._changectx.user()
745 return self._changectx.user()
772 def date(self):
746 def date(self):
773 return self._changectx.date()
747 return self._changectx.date()
774 def files(self):
748 def files(self):
775 return self._changectx.files()
749 return self._changectx.files()
776 def description(self):
750 def description(self):
777 return self._changectx.description()
751 return self._changectx.description()
778 def branch(self):
752 def branch(self):
779 return self._changectx.branch()
753 return self._changectx.branch()
780 def extra(self):
754 def extra(self):
781 return self._changectx.extra()
755 return self._changectx.extra()
782 def phase(self):
756 def phase(self):
783 return self._changectx.phase()
757 return self._changectx.phase()
784 def phasestr(self):
758 def phasestr(self):
785 return self._changectx.phasestr()
759 return self._changectx.phasestr()
786 def obsolete(self):
760 def obsolete(self):
787 return self._changectx.obsolete()
761 return self._changectx.obsolete()
788 def instabilities(self):
762 def instabilities(self):
789 return self._changectx.instabilities()
763 return self._changectx.instabilities()
790 def manifest(self):
764 def manifest(self):
791 return self._changectx.manifest()
765 return self._changectx.manifest()
792 def changectx(self):
766 def changectx(self):
793 return self._changectx
767 return self._changectx
794 def renamed(self):
768 def renamed(self):
795 return self._copied
769 return self._copied
796 def repo(self):
770 def repo(self):
797 return self._repo
771 return self._repo
798 def size(self):
772 def size(self):
799 return len(self.data())
773 return len(self.data())
800
774
801 def path(self):
775 def path(self):
802 return self._path
776 return self._path
803
777
804 def isbinary(self):
778 def isbinary(self):
805 try:
779 try:
806 return stringutil.binary(self.data())
780 return stringutil.binary(self.data())
807 except IOError:
781 except IOError:
808 return False
782 return False
809 def isexec(self):
783 def isexec(self):
810 return 'x' in self.flags()
784 return 'x' in self.flags()
811 def islink(self):
785 def islink(self):
812 return 'l' in self.flags()
786 return 'l' in self.flags()
813
787
814 def isabsent(self):
788 def isabsent(self):
815 """whether this filectx represents a file not in self._changectx
789 """whether this filectx represents a file not in self._changectx
816
790
817 This is mainly for merge code to detect change/delete conflicts. This is
791 This is mainly for merge code to detect change/delete conflicts. This is
818 expected to be True for all subclasses of basectx."""
792 expected to be True for all subclasses of basectx."""
819 return False
793 return False
820
794
821 _customcmp = False
795 _customcmp = False
822 def cmp(self, fctx):
796 def cmp(self, fctx):
823 """compare with other file context
797 """compare with other file context
824
798
825 returns True if different than fctx.
799 returns True if different than fctx.
826 """
800 """
827 if fctx._customcmp:
801 if fctx._customcmp:
828 return fctx.cmp(self)
802 return fctx.cmp(self)
829
803
830 if (fctx._filenode is None
804 if (fctx._filenode is None
831 and (self._repo._encodefilterpats
805 and (self._repo._encodefilterpats
832 # if file data starts with '\1\n', empty metadata block is
806 # if file data starts with '\1\n', empty metadata block is
833 # prepended, which adds 4 bytes to filelog.size().
807 # prepended, which adds 4 bytes to filelog.size().
834 or self.size() - 4 == fctx.size())
808 or self.size() - 4 == fctx.size())
835 or self.size() == fctx.size()):
809 or self.size() == fctx.size()):
836 return self._filelog.cmp(self._filenode, fctx.data())
810 return self._filelog.cmp(self._filenode, fctx.data())
837
811
838 return True
812 return True
839
813
840 def _adjustlinkrev(self, srcrev, inclusive=False):
814 def _adjustlinkrev(self, srcrev, inclusive=False):
841 """return the first ancestor of <srcrev> introducing <fnode>
815 """return the first ancestor of <srcrev> introducing <fnode>
842
816
843 If the linkrev of the file revision does not point to an ancestor of
817 If the linkrev of the file revision does not point to an ancestor of
844 srcrev, we'll walk down the ancestors until we find one introducing
818 srcrev, we'll walk down the ancestors until we find one introducing
845 this file revision.
819 this file revision.
846
820
847 :srcrev: the changeset revision we search ancestors from
821 :srcrev: the changeset revision we search ancestors from
848 :inclusive: if true, the src revision will also be checked
822 :inclusive: if true, the src revision will also be checked
849 """
823 """
850 repo = self._repo
824 repo = self._repo
851 cl = repo.unfiltered().changelog
825 cl = repo.unfiltered().changelog
852 mfl = repo.manifestlog
826 mfl = repo.manifestlog
853 # fetch the linkrev
827 # fetch the linkrev
854 lkr = self.linkrev()
828 lkr = self.linkrev()
855 # hack to reuse ancestor computation when searching for renames
829 # hack to reuse ancestor computation when searching for renames
856 memberanc = getattr(self, '_ancestrycontext', None)
830 memberanc = getattr(self, '_ancestrycontext', None)
857 iteranc = None
831 iteranc = None
858 if srcrev is None:
832 if srcrev is None:
859 # wctx case, used by workingfilectx during mergecopy
833 # wctx case, used by workingfilectx during mergecopy
860 revs = [p.rev() for p in self._repo[None].parents()]
834 revs = [p.rev() for p in self._repo[None].parents()]
861 inclusive = True # we skipped the real (revless) source
835 inclusive = True # we skipped the real (revless) source
862 else:
836 else:
863 revs = [srcrev]
837 revs = [srcrev]
864 if memberanc is None:
838 if memberanc is None:
865 memberanc = iteranc = cl.ancestors(revs, lkr,
839 memberanc = iteranc = cl.ancestors(revs, lkr,
866 inclusive=inclusive)
840 inclusive=inclusive)
867 # check if this linkrev is an ancestor of srcrev
841 # check if this linkrev is an ancestor of srcrev
868 if lkr not in memberanc:
842 if lkr not in memberanc:
869 if iteranc is None:
843 if iteranc is None:
870 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
844 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
871 fnode = self._filenode
845 fnode = self._filenode
872 path = self._path
846 path = self._path
873 for a in iteranc:
847 for a in iteranc:
874 ac = cl.read(a) # get changeset data (we avoid object creation)
848 ac = cl.read(a) # get changeset data (we avoid object creation)
875 if path in ac[3]: # checking the 'files' field.
849 if path in ac[3]: # checking the 'files' field.
876 # The file has been touched, check if the content is
850 # The file has been touched, check if the content is
877 # similar to the one we search for.
851 # similar to the one we search for.
878 if fnode == mfl[ac[0]].readfast().get(path):
852 if fnode == mfl[ac[0]].readfast().get(path):
879 return a
853 return a
880 # In theory, we should never get out of that loop without a result.
854 # In theory, we should never get out of that loop without a result.
881 # But if manifest uses a buggy file revision (not children of the
855 # But if manifest uses a buggy file revision (not children of the
882 # one it replaces) we could. Such a buggy situation will likely
856 # one it replaces) we could. Such a buggy situation will likely
883 # result is crash somewhere else at to some point.
857 # result is crash somewhere else at to some point.
884 return lkr
858 return lkr
885
859
886 def introrev(self):
860 def introrev(self):
887 """return the rev of the changeset which introduced this file revision
861 """return the rev of the changeset which introduced this file revision
888
862
889 This method is different from linkrev because it take into account the
863 This method is different from linkrev because it take into account the
890 changeset the filectx was created from. It ensures the returned
864 changeset the filectx was created from. It ensures the returned
891 revision is one of its ancestors. This prevents bugs from
865 revision is one of its ancestors. This prevents bugs from
892 'linkrev-shadowing' when a file revision is used by multiple
866 'linkrev-shadowing' when a file revision is used by multiple
893 changesets.
867 changesets.
894 """
868 """
895 lkr = self.linkrev()
869 lkr = self.linkrev()
896 attrs = vars(self)
870 attrs = vars(self)
897 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
871 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
898 if noctx or self.rev() == lkr:
872 if noctx or self.rev() == lkr:
899 return self.linkrev()
873 return self.linkrev()
900 return self._adjustlinkrev(self.rev(), inclusive=True)
874 return self._adjustlinkrev(self.rev(), inclusive=True)
901
875
902 def introfilectx(self):
876 def introfilectx(self):
903 """Return filectx having identical contents, but pointing to the
877 """Return filectx having identical contents, but pointing to the
904 changeset revision where this filectx was introduced"""
878 changeset revision where this filectx was introduced"""
905 introrev = self.introrev()
879 introrev = self.introrev()
906 if self.rev() == introrev:
880 if self.rev() == introrev:
907 return self
881 return self
908 return self.filectx(self.filenode(), changeid=introrev)
882 return self.filectx(self.filenode(), changeid=introrev)
909
883
910 def _parentfilectx(self, path, fileid, filelog):
884 def _parentfilectx(self, path, fileid, filelog):
911 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
885 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
912 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
886 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
913 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
887 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
914 # If self is associated with a changeset (probably explicitly
888 # If self is associated with a changeset (probably explicitly
915 # fed), ensure the created filectx is associated with a
889 # fed), ensure the created filectx is associated with a
916 # changeset that is an ancestor of self.changectx.
890 # changeset that is an ancestor of self.changectx.
917 # This lets us later use _adjustlinkrev to get a correct link.
891 # This lets us later use _adjustlinkrev to get a correct link.
918 fctx._descendantrev = self.rev()
892 fctx._descendantrev = self.rev()
919 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
920 elif r'_descendantrev' in vars(self):
894 elif r'_descendantrev' in vars(self):
921 # Otherwise propagate _descendantrev if we have one associated.
895 # Otherwise propagate _descendantrev if we have one associated.
922 fctx._descendantrev = self._descendantrev
896 fctx._descendantrev = self._descendantrev
923 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
897 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
924 return fctx
898 return fctx
925
899
926 def parents(self):
900 def parents(self):
927 _path = self._path
901 _path = self._path
928 fl = self._filelog
902 fl = self._filelog
929 parents = self._filelog.parents(self._filenode)
903 parents = self._filelog.parents(self._filenode)
930 pl = [(_path, node, fl) for node in parents if node != nullid]
904 pl = [(_path, node, fl) for node in parents if node != nullid]
931
905
932 r = fl.renamed(self._filenode)
906 r = fl.renamed(self._filenode)
933 if r:
907 if r:
934 # - In the simple rename case, both parent are nullid, pl is empty.
908 # - In the simple rename case, both parent are nullid, pl is empty.
935 # - In case of merge, only one of the parent is null id and should
909 # - In case of merge, only one of the parent is null id and should
936 # be replaced with the rename information. This parent is -always-
910 # be replaced with the rename information. This parent is -always-
937 # the first one.
911 # the first one.
938 #
912 #
939 # As null id have always been filtered out in the previous list
913 # As null id have always been filtered out in the previous list
940 # comprehension, inserting to 0 will always result in "replacing
914 # comprehension, inserting to 0 will always result in "replacing
941 # first nullid parent with rename information.
915 # first nullid parent with rename information.
942 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
916 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
943
917
944 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
918 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
945
919
946 def p1(self):
920 def p1(self):
947 return self.parents()[0]
921 return self.parents()[0]
948
922
949 def p2(self):
923 def p2(self):
950 p = self.parents()
924 p = self.parents()
951 if len(p) == 2:
925 if len(p) == 2:
952 return p[1]
926 return p[1]
953 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
927 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
954
928
955 def annotate(self, follow=False, skiprevs=None, diffopts=None):
929 def annotate(self, follow=False, skiprevs=None, diffopts=None):
956 """Returns a list of annotateline objects for each line in the file
930 """Returns a list of annotateline objects for each line in the file
957
931
958 - line.fctx is the filectx of the node where that line was last changed
932 - line.fctx is the filectx of the node where that line was last changed
959 - line.lineno is the line number at the first appearance in the managed
933 - line.lineno is the line number at the first appearance in the managed
960 file
934 file
961 - line.text is the data on that line (including newline character)
935 - line.text is the data on that line (including newline character)
962 """
936 """
963 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
937 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
964
938
965 def parents(f):
939 def parents(f):
966 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
940 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
967 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
941 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
968 # from the topmost introrev (= srcrev) down to p.linkrev() if it
942 # from the topmost introrev (= srcrev) down to p.linkrev() if it
969 # isn't an ancestor of the srcrev.
943 # isn't an ancestor of the srcrev.
970 f._changeid
944 f._changeid
971 pl = f.parents()
945 pl = f.parents()
972
946
973 # Don't return renamed parents if we aren't following.
947 # Don't return renamed parents if we aren't following.
974 if not follow:
948 if not follow:
975 pl = [p for p in pl if p.path() == f.path()]
949 pl = [p for p in pl if p.path() == f.path()]
976
950
977 # renamed filectx won't have a filelog yet, so set it
951 # renamed filectx won't have a filelog yet, so set it
978 # from the cache to save time
952 # from the cache to save time
979 for p in pl:
953 for p in pl:
980 if not r'_filelog' in p.__dict__:
954 if not r'_filelog' in p.__dict__:
981 p._filelog = getlog(p.path())
955 p._filelog = getlog(p.path())
982
956
983 return pl
957 return pl
984
958
985 # use linkrev to find the first changeset where self appeared
959 # use linkrev to find the first changeset where self appeared
986 base = self.introfilectx()
960 base = self.introfilectx()
987 if getattr(base, '_ancestrycontext', None) is None:
961 if getattr(base, '_ancestrycontext', None) is None:
988 cl = self._repo.changelog
962 cl = self._repo.changelog
989 if base.rev() is None:
963 if base.rev() is None:
990 # wctx is not inclusive, but works because _ancestrycontext
964 # wctx is not inclusive, but works because _ancestrycontext
991 # is used to test filelog revisions
965 # is used to test filelog revisions
992 ac = cl.ancestors([p.rev() for p in base.parents()],
966 ac = cl.ancestors([p.rev() for p in base.parents()],
993 inclusive=True)
967 inclusive=True)
994 else:
968 else:
995 ac = cl.ancestors([base.rev()], inclusive=True)
969 ac = cl.ancestors([base.rev()], inclusive=True)
996 base._ancestrycontext = ac
970 base._ancestrycontext = ac
997
971
998 return dagop.annotate(base, parents, skiprevs=skiprevs,
972 return dagop.annotate(base, parents, skiprevs=skiprevs,
999 diffopts=diffopts)
973 diffopts=diffopts)
1000
974
1001 def ancestors(self, followfirst=False):
975 def ancestors(self, followfirst=False):
1002 visit = {}
976 visit = {}
1003 c = self
977 c = self
1004 if followfirst:
978 if followfirst:
1005 cut = 1
979 cut = 1
1006 else:
980 else:
1007 cut = None
981 cut = None
1008
982
1009 while True:
983 while True:
1010 for parent in c.parents()[:cut]:
984 for parent in c.parents()[:cut]:
1011 visit[(parent.linkrev(), parent.filenode())] = parent
985 visit[(parent.linkrev(), parent.filenode())] = parent
1012 if not visit:
986 if not visit:
1013 break
987 break
1014 c = visit.pop(max(visit))
988 c = visit.pop(max(visit))
1015 yield c
989 yield c
1016
990
1017 def decodeddata(self):
991 def decodeddata(self):
1018 """Returns `data()` after running repository decoding filters.
992 """Returns `data()` after running repository decoding filters.
1019
993
1020 This is often equivalent to how the data would be expressed on disk.
994 This is often equivalent to how the data would be expressed on disk.
1021 """
995 """
1022 return self._repo.wwritedata(self.path(), self.data())
996 return self._repo.wwritedata(self.path(), self.data())
1023
997
1024 class filectx(basefilectx):
998 class filectx(basefilectx):
1025 """A filecontext object makes access to data related to a particular
999 """A filecontext object makes access to data related to a particular
1026 filerevision convenient."""
1000 filerevision convenient."""
1027 def __init__(self, repo, path, changeid=None, fileid=None,
1001 def __init__(self, repo, path, changeid=None, fileid=None,
1028 filelog=None, changectx=None):
1002 filelog=None, changectx=None):
1029 """changeid can be a changeset revision, node, or tag.
1003 """changeid can be a changeset revision, node, or tag.
1030 fileid can be a file revision or node."""
1004 fileid can be a file revision or node."""
1031 self._repo = repo
1005 self._repo = repo
1032 self._path = path
1006 self._path = path
1033
1007
1034 assert (changeid is not None
1008 assert (changeid is not None
1035 or fileid is not None
1009 or fileid is not None
1036 or changectx is not None), \
1010 or changectx is not None), \
1037 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1011 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1038 % (changeid, fileid, changectx))
1012 % (changeid, fileid, changectx))
1039
1013
1040 if filelog is not None:
1014 if filelog is not None:
1041 self._filelog = filelog
1015 self._filelog = filelog
1042
1016
1043 if changeid is not None:
1017 if changeid is not None:
1044 self._changeid = changeid
1018 self._changeid = changeid
1045 if changectx is not None:
1019 if changectx is not None:
1046 self._changectx = changectx
1020 self._changectx = changectx
1047 if fileid is not None:
1021 if fileid is not None:
1048 self._fileid = fileid
1022 self._fileid = fileid
1049
1023
1050 @propertycache
1024 @propertycache
1051 def _changectx(self):
1025 def _changectx(self):
1052 try:
1026 try:
1053 return changectx(self._repo, self._changeid)
1027 return changectx(self._repo, self._changeid)
1054 except error.FilteredRepoLookupError:
1028 except error.FilteredRepoLookupError:
1055 # Linkrev may point to any revision in the repository. When the
1029 # Linkrev may point to any revision in the repository. When the
1056 # repository is filtered this may lead to `filectx` trying to build
1030 # repository is filtered this may lead to `filectx` trying to build
1057 # `changectx` for filtered revision. In such case we fallback to
1031 # `changectx` for filtered revision. In such case we fallback to
1058 # creating `changectx` on the unfiltered version of the reposition.
1032 # creating `changectx` on the unfiltered version of the reposition.
1059 # This fallback should not be an issue because `changectx` from
1033 # This fallback should not be an issue because `changectx` from
1060 # `filectx` are not used in complex operations that care about
1034 # `filectx` are not used in complex operations that care about
1061 # filtering.
1035 # filtering.
1062 #
1036 #
1063 # This fallback is a cheap and dirty fix that prevent several
1037 # This fallback is a cheap and dirty fix that prevent several
1064 # crashes. It does not ensure the behavior is correct. However the
1038 # crashes. It does not ensure the behavior is correct. However the
1065 # behavior was not correct before filtering either and "incorrect
1039 # behavior was not correct before filtering either and "incorrect
1066 # behavior" is seen as better as "crash"
1040 # behavior" is seen as better as "crash"
1067 #
1041 #
1068 # Linkrevs have several serious troubles with filtering that are
1042 # Linkrevs have several serious troubles with filtering that are
1069 # complicated to solve. Proper handling of the issue here should be
1043 # complicated to solve. Proper handling of the issue here should be
1070 # considered when solving linkrev issue are on the table.
1044 # considered when solving linkrev issue are on the table.
1071 return changectx(self._repo.unfiltered(), self._changeid)
1045 return changectx(self._repo.unfiltered(), self._changeid)
1072
1046
1073 def filectx(self, fileid, changeid=None):
1047 def filectx(self, fileid, changeid=None):
1074 '''opens an arbitrary revision of the file without
1048 '''opens an arbitrary revision of the file without
1075 opening a new filelog'''
1049 opening a new filelog'''
1076 return filectx(self._repo, self._path, fileid=fileid,
1050 return filectx(self._repo, self._path, fileid=fileid,
1077 filelog=self._filelog, changeid=changeid)
1051 filelog=self._filelog, changeid=changeid)
1078
1052
1079 def rawdata(self):
1053 def rawdata(self):
1080 return self._filelog.revision(self._filenode, raw=True)
1054 return self._filelog.revision(self._filenode, raw=True)
1081
1055
1082 def rawflags(self):
1056 def rawflags(self):
1083 """low-level revlog flags"""
1057 """low-level revlog flags"""
1084 return self._filelog.flags(self._filerev)
1058 return self._filelog.flags(self._filerev)
1085
1059
1086 def data(self):
1060 def data(self):
1087 try:
1061 try:
1088 return self._filelog.read(self._filenode)
1062 return self._filelog.read(self._filenode)
1089 except error.CensoredNodeError:
1063 except error.CensoredNodeError:
1090 if self._repo.ui.config("censor", "policy") == "ignore":
1064 if self._repo.ui.config("censor", "policy") == "ignore":
1091 return ""
1065 return ""
1092 raise error.Abort(_("censored node: %s") % short(self._filenode),
1066 raise error.Abort(_("censored node: %s") % short(self._filenode),
1093 hint=_("set censor.policy to ignore errors"))
1067 hint=_("set censor.policy to ignore errors"))
1094
1068
1095 def size(self):
1069 def size(self):
1096 return self._filelog.size(self._filerev)
1070 return self._filelog.size(self._filerev)
1097
1071
1098 @propertycache
1072 @propertycache
1099 def _copied(self):
1073 def _copied(self):
1100 """check if file was actually renamed in this changeset revision
1074 """check if file was actually renamed in this changeset revision
1101
1075
1102 If rename logged in file revision, we report copy for changeset only
1076 If rename logged in file revision, we report copy for changeset only
1103 if file revisions linkrev points back to the changeset in question
1077 if file revisions linkrev points back to the changeset in question
1104 or both changeset parents contain different file revisions.
1078 or both changeset parents contain different file revisions.
1105 """
1079 """
1106
1080
1107 renamed = self._filelog.renamed(self._filenode)
1081 renamed = self._filelog.renamed(self._filenode)
1108 if not renamed:
1082 if not renamed:
1109 return renamed
1083 return renamed
1110
1084
1111 if self.rev() == self.linkrev():
1085 if self.rev() == self.linkrev():
1112 return renamed
1086 return renamed
1113
1087
1114 name = self.path()
1088 name = self.path()
1115 fnode = self._filenode
1089 fnode = self._filenode
1116 for p in self._changectx.parents():
1090 for p in self._changectx.parents():
1117 try:
1091 try:
1118 if fnode == p.filenode(name):
1092 if fnode == p.filenode(name):
1119 return None
1093 return None
1120 except error.LookupError:
1094 except error.LookupError:
1121 pass
1095 pass
1122 return renamed
1096 return renamed
1123
1097
1124 def children(self):
1098 def children(self):
1125 # hard for renames
1099 # hard for renames
1126 c = self._filelog.children(self._filenode)
1100 c = self._filelog.children(self._filenode)
1127 return [filectx(self._repo, self._path, fileid=x,
1101 return [filectx(self._repo, self._path, fileid=x,
1128 filelog=self._filelog) for x in c]
1102 filelog=self._filelog) for x in c]
1129
1103
1130 class committablectx(basectx):
1104 class committablectx(basectx):
1131 """A committablectx object provides common functionality for a context that
1105 """A committablectx object provides common functionality for a context that
1132 wants the ability to commit, e.g. workingctx or memctx."""
1106 wants the ability to commit, e.g. workingctx or memctx."""
1133 def __init__(self, repo, text="", user=None, date=None, extra=None,
1107 def __init__(self, repo, text="", user=None, date=None, extra=None,
1134 changes=None):
1108 changes=None):
1135 super(committablectx, self).__init__(repo)
1109 super(committablectx, self).__init__(repo)
1136 self._rev = None
1110 self._rev = None
1137 self._node = None
1111 self._node = None
1138 self._text = text
1112 self._text = text
1139 if date:
1113 if date:
1140 self._date = dateutil.parsedate(date)
1114 self._date = dateutil.parsedate(date)
1141 if user:
1115 if user:
1142 self._user = user
1116 self._user = user
1143 if changes:
1117 if changes:
1144 self._status = changes
1118 self._status = changes
1145
1119
1146 self._extra = {}
1120 self._extra = {}
1147 if extra:
1121 if extra:
1148 self._extra = extra.copy()
1122 self._extra = extra.copy()
1149 if 'branch' not in self._extra:
1123 if 'branch' not in self._extra:
1150 try:
1124 try:
1151 branch = encoding.fromlocal(self._repo.dirstate.branch())
1125 branch = encoding.fromlocal(self._repo.dirstate.branch())
1152 except UnicodeDecodeError:
1126 except UnicodeDecodeError:
1153 raise error.Abort(_('branch name not in UTF-8!'))
1127 raise error.Abort(_('branch name not in UTF-8!'))
1154 self._extra['branch'] = branch
1128 self._extra['branch'] = branch
1155 if self._extra['branch'] == '':
1129 if self._extra['branch'] == '':
1156 self._extra['branch'] = 'default'
1130 self._extra['branch'] = 'default'
1157
1131
1158 def __bytes__(self):
1132 def __bytes__(self):
1159 return bytes(self._parents[0]) + "+"
1133 return bytes(self._parents[0]) + "+"
1160
1134
1161 __str__ = encoding.strmethod(__bytes__)
1135 __str__ = encoding.strmethod(__bytes__)
1162
1136
1163 def __nonzero__(self):
1137 def __nonzero__(self):
1164 return True
1138 return True
1165
1139
1166 __bool__ = __nonzero__
1140 __bool__ = __nonzero__
1167
1141
1168 def _buildflagfunc(self):
1142 def _buildflagfunc(self):
1169 # Create a fallback function for getting file flags when the
1143 # Create a fallback function for getting file flags when the
1170 # filesystem doesn't support them
1144 # filesystem doesn't support them
1171
1145
1172 copiesget = self._repo.dirstate.copies().get
1146 copiesget = self._repo.dirstate.copies().get
1173 parents = self.parents()
1147 parents = self.parents()
1174 if len(parents) < 2:
1148 if len(parents) < 2:
1175 # when we have one parent, it's easy: copy from parent
1149 # when we have one parent, it's easy: copy from parent
1176 man = parents[0].manifest()
1150 man = parents[0].manifest()
1177 def func(f):
1151 def func(f):
1178 f = copiesget(f, f)
1152 f = copiesget(f, f)
1179 return man.flags(f)
1153 return man.flags(f)
1180 else:
1154 else:
1181 # merges are tricky: we try to reconstruct the unstored
1155 # merges are tricky: we try to reconstruct the unstored
1182 # result from the merge (issue1802)
1156 # result from the merge (issue1802)
1183 p1, p2 = parents
1157 p1, p2 = parents
1184 pa = p1.ancestor(p2)
1158 pa = p1.ancestor(p2)
1185 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1159 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1186
1160
1187 def func(f):
1161 def func(f):
1188 f = copiesget(f, f) # may be wrong for merges with copies
1162 f = copiesget(f, f) # may be wrong for merges with copies
1189 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1163 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1190 if fl1 == fl2:
1164 if fl1 == fl2:
1191 return fl1
1165 return fl1
1192 if fl1 == fla:
1166 if fl1 == fla:
1193 return fl2
1167 return fl2
1194 if fl2 == fla:
1168 if fl2 == fla:
1195 return fl1
1169 return fl1
1196 return '' # punt for conflicts
1170 return '' # punt for conflicts
1197
1171
1198 return func
1172 return func
1199
1173
1200 @propertycache
1174 @propertycache
1201 def _flagfunc(self):
1175 def _flagfunc(self):
1202 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1176 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1203
1177
1204 @propertycache
1178 @propertycache
1205 def _status(self):
1179 def _status(self):
1206 return self._repo.status()
1180 return self._repo.status()
1207
1181
1208 @propertycache
1182 @propertycache
1209 def _user(self):
1183 def _user(self):
1210 return self._repo.ui.username()
1184 return self._repo.ui.username()
1211
1185
1212 @propertycache
1186 @propertycache
1213 def _date(self):
1187 def _date(self):
1214 ui = self._repo.ui
1188 ui = self._repo.ui
1215 date = ui.configdate('devel', 'default-date')
1189 date = ui.configdate('devel', 'default-date')
1216 if date is None:
1190 if date is None:
1217 date = dateutil.makedate()
1191 date = dateutil.makedate()
1218 return date
1192 return date
1219
1193
1220 def subrev(self, subpath):
1194 def subrev(self, subpath):
1221 return None
1195 return None
1222
1196
1223 def manifestnode(self):
1197 def manifestnode(self):
1224 return None
1198 return None
1225 def user(self):
1199 def user(self):
1226 return self._user or self._repo.ui.username()
1200 return self._user or self._repo.ui.username()
1227 def date(self):
1201 def date(self):
1228 return self._date
1202 return self._date
1229 def description(self):
1203 def description(self):
1230 return self._text
1204 return self._text
1231 def files(self):
1205 def files(self):
1232 return sorted(self._status.modified + self._status.added +
1206 return sorted(self._status.modified + self._status.added +
1233 self._status.removed)
1207 self._status.removed)
1234
1208
1235 def modified(self):
1209 def modified(self):
1236 return self._status.modified
1210 return self._status.modified
1237 def added(self):
1211 def added(self):
1238 return self._status.added
1212 return self._status.added
1239 def removed(self):
1213 def removed(self):
1240 return self._status.removed
1214 return self._status.removed
1241 def deleted(self):
1215 def deleted(self):
1242 return self._status.deleted
1216 return self._status.deleted
1243 def branch(self):
1217 def branch(self):
1244 return encoding.tolocal(self._extra['branch'])
1218 return encoding.tolocal(self._extra['branch'])
1245 def closesbranch(self):
1219 def closesbranch(self):
1246 return 'close' in self._extra
1220 return 'close' in self._extra
1247 def extra(self):
1221 def extra(self):
1248 return self._extra
1222 return self._extra
1249
1223
1250 def isinmemory(self):
1224 def isinmemory(self):
1251 return False
1225 return False
1252
1226
1253 def tags(self):
1227 def tags(self):
1254 return []
1228 return []
1255
1229
1256 def bookmarks(self):
1230 def bookmarks(self):
1257 b = []
1231 b = []
1258 for p in self.parents():
1232 for p in self.parents():
1259 b.extend(p.bookmarks())
1233 b.extend(p.bookmarks())
1260 return b
1234 return b
1261
1235
1262 def phase(self):
1236 def phase(self):
1263 phase = phases.draft # default phase to draft
1237 phase = phases.draft # default phase to draft
1264 for p in self.parents():
1238 for p in self.parents():
1265 phase = max(phase, p.phase())
1239 phase = max(phase, p.phase())
1266 return phase
1240 return phase
1267
1241
1268 def hidden(self):
1242 def hidden(self):
1269 return False
1243 return False
1270
1244
1271 def children(self):
1245 def children(self):
1272 return []
1246 return []
1273
1247
1274 def flags(self, path):
1248 def flags(self, path):
1275 if r'_manifest' in self.__dict__:
1249 if r'_manifest' in self.__dict__:
1276 try:
1250 try:
1277 return self._manifest.flags(path)
1251 return self._manifest.flags(path)
1278 except KeyError:
1252 except KeyError:
1279 return ''
1253 return ''
1280
1254
1281 try:
1255 try:
1282 return self._flagfunc(path)
1256 return self._flagfunc(path)
1283 except OSError:
1257 except OSError:
1284 return ''
1258 return ''
1285
1259
1286 def ancestor(self, c2):
1260 def ancestor(self, c2):
1287 """return the "best" ancestor context of self and c2"""
1261 """return the "best" ancestor context of self and c2"""
1288 return self._parents[0].ancestor(c2) # punt on two parents for now
1262 return self._parents[0].ancestor(c2) # punt on two parents for now
1289
1263
1290 def walk(self, match):
1264 def walk(self, match):
1291 '''Generates matching file names.'''
1265 '''Generates matching file names.'''
1292 return sorted(self._repo.dirstate.walk(match,
1266 return sorted(self._repo.dirstate.walk(match,
1293 subrepos=sorted(self.substate),
1267 subrepos=sorted(self.substate),
1294 unknown=True, ignored=False))
1268 unknown=True, ignored=False))
1295
1269
1296 def matches(self, match):
1270 def matches(self, match):
1297 return sorted(self._repo.dirstate.matches(match))
1271 return sorted(self._repo.dirstate.matches(match))
1298
1272
1299 def ancestors(self):
1273 def ancestors(self):
1300 for p in self._parents:
1274 for p in self._parents:
1301 yield p
1275 yield p
1302 for a in self._repo.changelog.ancestors(
1276 for a in self._repo.changelog.ancestors(
1303 [p.rev() for p in self._parents]):
1277 [p.rev() for p in self._parents]):
1304 yield changectx(self._repo, a)
1278 yield changectx(self._repo, a)
1305
1279
1306 def markcommitted(self, node):
1280 def markcommitted(self, node):
1307 """Perform post-commit cleanup necessary after committing this ctx
1281 """Perform post-commit cleanup necessary after committing this ctx
1308
1282
1309 Specifically, this updates backing stores this working context
1283 Specifically, this updates backing stores this working context
1310 wraps to reflect the fact that the changes reflected by this
1284 wraps to reflect the fact that the changes reflected by this
1311 workingctx have been committed. For example, it marks
1285 workingctx have been committed. For example, it marks
1312 modified and added files as normal in the dirstate.
1286 modified and added files as normal in the dirstate.
1313
1287
1314 """
1288 """
1315
1289
1316 with self._repo.dirstate.parentchange():
1290 with self._repo.dirstate.parentchange():
1317 for f in self.modified() + self.added():
1291 for f in self.modified() + self.added():
1318 self._repo.dirstate.normal(f)
1292 self._repo.dirstate.normal(f)
1319 for f in self.removed():
1293 for f in self.removed():
1320 self._repo.dirstate.drop(f)
1294 self._repo.dirstate.drop(f)
1321 self._repo.dirstate.setparents(node)
1295 self._repo.dirstate.setparents(node)
1322
1296
1323 # write changes out explicitly, because nesting wlock at
1297 # write changes out explicitly, because nesting wlock at
1324 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1298 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1325 # from immediately doing so for subsequent changing files
1299 # from immediately doing so for subsequent changing files
1326 self._repo.dirstate.write(self._repo.currenttransaction())
1300 self._repo.dirstate.write(self._repo.currenttransaction())
1327
1301
1328 def dirty(self, missing=False, merge=True, branch=True):
1302 def dirty(self, missing=False, merge=True, branch=True):
1329 return False
1303 return False
1330
1304
1331 class workingctx(committablectx):
1305 class workingctx(committablectx):
1332 """A workingctx object makes access to data related to
1306 """A workingctx object makes access to data related to
1333 the current working directory convenient.
1307 the current working directory convenient.
1334 date - any valid date string or (unixtime, offset), or None.
1308 date - any valid date string or (unixtime, offset), or None.
1335 user - username string, or None.
1309 user - username string, or None.
1336 extra - a dictionary of extra values, or None.
1310 extra - a dictionary of extra values, or None.
1337 changes - a list of file lists as returned by localrepo.status()
1311 changes - a list of file lists as returned by localrepo.status()
1338 or None to use the repository status.
1312 or None to use the repository status.
1339 """
1313 """
1340 def __init__(self, repo, text="", user=None, date=None, extra=None,
1314 def __init__(self, repo, text="", user=None, date=None, extra=None,
1341 changes=None):
1315 changes=None):
1342 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1316 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1343
1317
1344 def __iter__(self):
1318 def __iter__(self):
1345 d = self._repo.dirstate
1319 d = self._repo.dirstate
1346 for f in d:
1320 for f in d:
1347 if d[f] != 'r':
1321 if d[f] != 'r':
1348 yield f
1322 yield f
1349
1323
1350 def __contains__(self, key):
1324 def __contains__(self, key):
1351 return self._repo.dirstate[key] not in "?r"
1325 return self._repo.dirstate[key] not in "?r"
1352
1326
1353 def hex(self):
1327 def hex(self):
1354 return hex(wdirid)
1328 return hex(wdirid)
1355
1329
1356 @propertycache
1330 @propertycache
1357 def _parents(self):
1331 def _parents(self):
1358 p = self._repo.dirstate.parents()
1332 p = self._repo.dirstate.parents()
1359 if p[1] == nullid:
1333 if p[1] == nullid:
1360 p = p[:-1]
1334 p = p[:-1]
1361 return [changectx(self._repo, x) for x in p]
1335 return [changectx(self._repo, x) for x in p]
1362
1336
1363 def filectx(self, path, filelog=None):
1337 def filectx(self, path, filelog=None):
1364 """get a file context from the working directory"""
1338 """get a file context from the working directory"""
1365 return workingfilectx(self._repo, path, workingctx=self,
1339 return workingfilectx(self._repo, path, workingctx=self,
1366 filelog=filelog)
1340 filelog=filelog)
1367
1341
1368 def dirty(self, missing=False, merge=True, branch=True):
1342 def dirty(self, missing=False, merge=True, branch=True):
1369 "check whether a working directory is modified"
1343 "check whether a working directory is modified"
1370 # check subrepos first
1344 # check subrepos first
1371 for s in sorted(self.substate):
1345 for s in sorted(self.substate):
1372 if self.sub(s).dirty(missing=missing):
1346 if self.sub(s).dirty(missing=missing):
1373 return True
1347 return True
1374 # check current working dir
1348 # check current working dir
1375 return ((merge and self.p2()) or
1349 return ((merge and self.p2()) or
1376 (branch and self.branch() != self.p1().branch()) or
1350 (branch and self.branch() != self.p1().branch()) or
1377 self.modified() or self.added() or self.removed() or
1351 self.modified() or self.added() or self.removed() or
1378 (missing and self.deleted()))
1352 (missing and self.deleted()))
1379
1353
1380 def add(self, list, prefix=""):
1354 def add(self, list, prefix=""):
1381 with self._repo.wlock():
1355 with self._repo.wlock():
1382 ui, ds = self._repo.ui, self._repo.dirstate
1356 ui, ds = self._repo.ui, self._repo.dirstate
1383 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1357 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1384 rejected = []
1358 rejected = []
1385 lstat = self._repo.wvfs.lstat
1359 lstat = self._repo.wvfs.lstat
1386 for f in list:
1360 for f in list:
1387 # ds.pathto() returns an absolute file when this is invoked from
1361 # ds.pathto() returns an absolute file when this is invoked from
1388 # the keyword extension. That gets flagged as non-portable on
1362 # the keyword extension. That gets flagged as non-portable on
1389 # Windows, since it contains the drive letter and colon.
1363 # Windows, since it contains the drive letter and colon.
1390 scmutil.checkportable(ui, os.path.join(prefix, f))
1364 scmutil.checkportable(ui, os.path.join(prefix, f))
1391 try:
1365 try:
1392 st = lstat(f)
1366 st = lstat(f)
1393 except OSError:
1367 except OSError:
1394 ui.warn(_("%s does not exist!\n") % uipath(f))
1368 ui.warn(_("%s does not exist!\n") % uipath(f))
1395 rejected.append(f)
1369 rejected.append(f)
1396 continue
1370 continue
1397 if st.st_size > 10000000:
1371 if st.st_size > 10000000:
1398 ui.warn(_("%s: up to %d MB of RAM may be required "
1372 ui.warn(_("%s: up to %d MB of RAM may be required "
1399 "to manage this file\n"
1373 "to manage this file\n"
1400 "(use 'hg revert %s' to cancel the "
1374 "(use 'hg revert %s' to cancel the "
1401 "pending addition)\n")
1375 "pending addition)\n")
1402 % (f, 3 * st.st_size // 1000000, uipath(f)))
1376 % (f, 3 * st.st_size // 1000000, uipath(f)))
1403 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1377 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1404 ui.warn(_("%s not added: only files and symlinks "
1378 ui.warn(_("%s not added: only files and symlinks "
1405 "supported currently\n") % uipath(f))
1379 "supported currently\n") % uipath(f))
1406 rejected.append(f)
1380 rejected.append(f)
1407 elif ds[f] in 'amn':
1381 elif ds[f] in 'amn':
1408 ui.warn(_("%s already tracked!\n") % uipath(f))
1382 ui.warn(_("%s already tracked!\n") % uipath(f))
1409 elif ds[f] == 'r':
1383 elif ds[f] == 'r':
1410 ds.normallookup(f)
1384 ds.normallookup(f)
1411 else:
1385 else:
1412 ds.add(f)
1386 ds.add(f)
1413 return rejected
1387 return rejected
1414
1388
1415 def forget(self, files, prefix=""):
1389 def forget(self, files, prefix=""):
1416 with self._repo.wlock():
1390 with self._repo.wlock():
1417 ds = self._repo.dirstate
1391 ds = self._repo.dirstate
1418 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1392 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1419 rejected = []
1393 rejected = []
1420 for f in files:
1394 for f in files:
1421 if f not in self._repo.dirstate:
1395 if f not in self._repo.dirstate:
1422 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1396 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1423 rejected.append(f)
1397 rejected.append(f)
1424 elif self._repo.dirstate[f] != 'a':
1398 elif self._repo.dirstate[f] != 'a':
1425 self._repo.dirstate.remove(f)
1399 self._repo.dirstate.remove(f)
1426 else:
1400 else:
1427 self._repo.dirstate.drop(f)
1401 self._repo.dirstate.drop(f)
1428 return rejected
1402 return rejected
1429
1403
1430 def undelete(self, list):
1404 def undelete(self, list):
1431 pctxs = self.parents()
1405 pctxs = self.parents()
1432 with self._repo.wlock():
1406 with self._repo.wlock():
1433 ds = self._repo.dirstate
1407 ds = self._repo.dirstate
1434 for f in list:
1408 for f in list:
1435 if self._repo.dirstate[f] != 'r':
1409 if self._repo.dirstate[f] != 'r':
1436 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1410 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1437 else:
1411 else:
1438 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1412 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1439 t = fctx.data()
1413 t = fctx.data()
1440 self._repo.wwrite(f, t, fctx.flags())
1414 self._repo.wwrite(f, t, fctx.flags())
1441 self._repo.dirstate.normal(f)
1415 self._repo.dirstate.normal(f)
1442
1416
1443 def copy(self, source, dest):
1417 def copy(self, source, dest):
1444 try:
1418 try:
1445 st = self._repo.wvfs.lstat(dest)
1419 st = self._repo.wvfs.lstat(dest)
1446 except OSError as err:
1420 except OSError as err:
1447 if err.errno != errno.ENOENT:
1421 if err.errno != errno.ENOENT:
1448 raise
1422 raise
1449 self._repo.ui.warn(_("%s does not exist!\n")
1423 self._repo.ui.warn(_("%s does not exist!\n")
1450 % self._repo.dirstate.pathto(dest))
1424 % self._repo.dirstate.pathto(dest))
1451 return
1425 return
1452 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1426 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1453 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1427 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1454 "symbolic link\n")
1428 "symbolic link\n")
1455 % self._repo.dirstate.pathto(dest))
1429 % self._repo.dirstate.pathto(dest))
1456 else:
1430 else:
1457 with self._repo.wlock():
1431 with self._repo.wlock():
1458 if self._repo.dirstate[dest] in '?':
1432 if self._repo.dirstate[dest] in '?':
1459 self._repo.dirstate.add(dest)
1433 self._repo.dirstate.add(dest)
1460 elif self._repo.dirstate[dest] in 'r':
1434 elif self._repo.dirstate[dest] in 'r':
1461 self._repo.dirstate.normallookup(dest)
1435 self._repo.dirstate.normallookup(dest)
1462 self._repo.dirstate.copy(source, dest)
1436 self._repo.dirstate.copy(source, dest)
1463
1437
1464 def match(self, pats=None, include=None, exclude=None, default='glob',
1438 def match(self, pats=None, include=None, exclude=None, default='glob',
1465 listsubrepos=False, badfn=None):
1439 listsubrepos=False, badfn=None):
1466 r = self._repo
1440 r = self._repo
1467
1441
1468 # Only a case insensitive filesystem needs magic to translate user input
1442 # Only a case insensitive filesystem needs magic to translate user input
1469 # to actual case in the filesystem.
1443 # to actual case in the filesystem.
1470 icasefs = not util.fscasesensitive(r.root)
1444 icasefs = not util.fscasesensitive(r.root)
1471 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1445 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1472 default, auditor=r.auditor, ctx=self,
1446 default, auditor=r.auditor, ctx=self,
1473 listsubrepos=listsubrepos, badfn=badfn,
1447 listsubrepos=listsubrepos, badfn=badfn,
1474 icasefs=icasefs)
1448 icasefs=icasefs)
1475
1449
1476 def _filtersuspectsymlink(self, files):
1450 def _filtersuspectsymlink(self, files):
1477 if not files or self._repo.dirstate._checklink:
1451 if not files or self._repo.dirstate._checklink:
1478 return files
1452 return files
1479
1453
1480 # Symlink placeholders may get non-symlink-like contents
1454 # Symlink placeholders may get non-symlink-like contents
1481 # via user error or dereferencing by NFS or Samba servers,
1455 # via user error or dereferencing by NFS or Samba servers,
1482 # so we filter out any placeholders that don't look like a
1456 # so we filter out any placeholders that don't look like a
1483 # symlink
1457 # symlink
1484 sane = []
1458 sane = []
1485 for f in files:
1459 for f in files:
1486 if self.flags(f) == 'l':
1460 if self.flags(f) == 'l':
1487 d = self[f].data()
1461 d = self[f].data()
1488 if (d == '' or len(d) >= 1024 or '\n' in d
1462 if (d == '' or len(d) >= 1024 or '\n' in d
1489 or stringutil.binary(d)):
1463 or stringutil.binary(d)):
1490 self._repo.ui.debug('ignoring suspect symlink placeholder'
1464 self._repo.ui.debug('ignoring suspect symlink placeholder'
1491 ' "%s"\n' % f)
1465 ' "%s"\n' % f)
1492 continue
1466 continue
1493 sane.append(f)
1467 sane.append(f)
1494 return sane
1468 return sane
1495
1469
1496 def _checklookup(self, files):
1470 def _checklookup(self, files):
1497 # check for any possibly clean files
1471 # check for any possibly clean files
1498 if not files:
1472 if not files:
1499 return [], [], []
1473 return [], [], []
1500
1474
1501 modified = []
1475 modified = []
1502 deleted = []
1476 deleted = []
1503 fixup = []
1477 fixup = []
1504 pctx = self._parents[0]
1478 pctx = self._parents[0]
1505 # do a full compare of any files that might have changed
1479 # do a full compare of any files that might have changed
1506 for f in sorted(files):
1480 for f in sorted(files):
1507 try:
1481 try:
1508 # This will return True for a file that got replaced by a
1482 # This will return True for a file that got replaced by a
1509 # directory in the interim, but fixing that is pretty hard.
1483 # directory in the interim, but fixing that is pretty hard.
1510 if (f not in pctx or self.flags(f) != pctx.flags(f)
1484 if (f not in pctx or self.flags(f) != pctx.flags(f)
1511 or pctx[f].cmp(self[f])):
1485 or pctx[f].cmp(self[f])):
1512 modified.append(f)
1486 modified.append(f)
1513 else:
1487 else:
1514 fixup.append(f)
1488 fixup.append(f)
1515 except (IOError, OSError):
1489 except (IOError, OSError):
1516 # A file become inaccessible in between? Mark it as deleted,
1490 # A file become inaccessible in between? Mark it as deleted,
1517 # matching dirstate behavior (issue5584).
1491 # matching dirstate behavior (issue5584).
1518 # The dirstate has more complex behavior around whether a
1492 # The dirstate has more complex behavior around whether a
1519 # missing file matches a directory, etc, but we don't need to
1493 # missing file matches a directory, etc, but we don't need to
1520 # bother with that: if f has made it to this point, we're sure
1494 # bother with that: if f has made it to this point, we're sure
1521 # it's in the dirstate.
1495 # it's in the dirstate.
1522 deleted.append(f)
1496 deleted.append(f)
1523
1497
1524 return modified, deleted, fixup
1498 return modified, deleted, fixup
1525
1499
1526 def _poststatusfixup(self, status, fixup):
1500 def _poststatusfixup(self, status, fixup):
1527 """update dirstate for files that are actually clean"""
1501 """update dirstate for files that are actually clean"""
1528 poststatus = self._repo.postdsstatus()
1502 poststatus = self._repo.postdsstatus()
1529 if fixup or poststatus:
1503 if fixup or poststatus:
1530 try:
1504 try:
1531 oldid = self._repo.dirstate.identity()
1505 oldid = self._repo.dirstate.identity()
1532
1506
1533 # updating the dirstate is optional
1507 # updating the dirstate is optional
1534 # so we don't wait on the lock
1508 # so we don't wait on the lock
1535 # wlock can invalidate the dirstate, so cache normal _after_
1509 # wlock can invalidate the dirstate, so cache normal _after_
1536 # taking the lock
1510 # taking the lock
1537 with self._repo.wlock(False):
1511 with self._repo.wlock(False):
1538 if self._repo.dirstate.identity() == oldid:
1512 if self._repo.dirstate.identity() == oldid:
1539 if fixup:
1513 if fixup:
1540 normal = self._repo.dirstate.normal
1514 normal = self._repo.dirstate.normal
1541 for f in fixup:
1515 for f in fixup:
1542 normal(f)
1516 normal(f)
1543 # write changes out explicitly, because nesting
1517 # write changes out explicitly, because nesting
1544 # wlock at runtime may prevent 'wlock.release()'
1518 # wlock at runtime may prevent 'wlock.release()'
1545 # after this block from doing so for subsequent
1519 # after this block from doing so for subsequent
1546 # changing files
1520 # changing files
1547 tr = self._repo.currenttransaction()
1521 tr = self._repo.currenttransaction()
1548 self._repo.dirstate.write(tr)
1522 self._repo.dirstate.write(tr)
1549
1523
1550 if poststatus:
1524 if poststatus:
1551 for ps in poststatus:
1525 for ps in poststatus:
1552 ps(self, status)
1526 ps(self, status)
1553 else:
1527 else:
1554 # in this case, writing changes out breaks
1528 # in this case, writing changes out breaks
1555 # consistency, because .hg/dirstate was
1529 # consistency, because .hg/dirstate was
1556 # already changed simultaneously after last
1530 # already changed simultaneously after last
1557 # caching (see also issue5584 for detail)
1531 # caching (see also issue5584 for detail)
1558 self._repo.ui.debug('skip updating dirstate: '
1532 self._repo.ui.debug('skip updating dirstate: '
1559 'identity mismatch\n')
1533 'identity mismatch\n')
1560 except error.LockError:
1534 except error.LockError:
1561 pass
1535 pass
1562 finally:
1536 finally:
1563 # Even if the wlock couldn't be grabbed, clear out the list.
1537 # Even if the wlock couldn't be grabbed, clear out the list.
1564 self._repo.clearpostdsstatus()
1538 self._repo.clearpostdsstatus()
1565
1539
1566 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1540 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1567 '''Gets the status from the dirstate -- internal use only.'''
1541 '''Gets the status from the dirstate -- internal use only.'''
1568 subrepos = []
1542 subrepos = []
1569 if '.hgsub' in self:
1543 if '.hgsub' in self:
1570 subrepos = sorted(self.substate)
1544 subrepos = sorted(self.substate)
1571 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1545 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1572 clean=clean, unknown=unknown)
1546 clean=clean, unknown=unknown)
1573
1547
1574 # check for any possibly clean files
1548 # check for any possibly clean files
1575 fixup = []
1549 fixup = []
1576 if cmp:
1550 if cmp:
1577 modified2, deleted2, fixup = self._checklookup(cmp)
1551 modified2, deleted2, fixup = self._checklookup(cmp)
1578 s.modified.extend(modified2)
1552 s.modified.extend(modified2)
1579 s.deleted.extend(deleted2)
1553 s.deleted.extend(deleted2)
1580
1554
1581 if fixup and clean:
1555 if fixup and clean:
1582 s.clean.extend(fixup)
1556 s.clean.extend(fixup)
1583
1557
1584 self._poststatusfixup(s, fixup)
1558 self._poststatusfixup(s, fixup)
1585
1559
1586 if match.always():
1560 if match.always():
1587 # cache for performance
1561 # cache for performance
1588 if s.unknown or s.ignored or s.clean:
1562 if s.unknown or s.ignored or s.clean:
1589 # "_status" is cached with list*=False in the normal route
1563 # "_status" is cached with list*=False in the normal route
1590 self._status = scmutil.status(s.modified, s.added, s.removed,
1564 self._status = scmutil.status(s.modified, s.added, s.removed,
1591 s.deleted, [], [], [])
1565 s.deleted, [], [], [])
1592 else:
1566 else:
1593 self._status = s
1567 self._status = s
1594
1568
1595 return s
1569 return s
1596
1570
1597 @propertycache
1571 @propertycache
1598 def _manifest(self):
1572 def _manifest(self):
1599 """generate a manifest corresponding to the values in self._status
1573 """generate a manifest corresponding to the values in self._status
1600
1574
1601 This reuse the file nodeid from parent, but we use special node
1575 This reuse the file nodeid from parent, but we use special node
1602 identifiers for added and modified files. This is used by manifests
1576 identifiers for added and modified files. This is used by manifests
1603 merge to see that files are different and by update logic to avoid
1577 merge to see that files are different and by update logic to avoid
1604 deleting newly added files.
1578 deleting newly added files.
1605 """
1579 """
1606 return self._buildstatusmanifest(self._status)
1580 return self._buildstatusmanifest(self._status)
1607
1581
1608 def _buildstatusmanifest(self, status):
1582 def _buildstatusmanifest(self, status):
1609 """Builds a manifest that includes the given status results."""
1583 """Builds a manifest that includes the given status results."""
1610 parents = self.parents()
1584 parents = self.parents()
1611
1585
1612 man = parents[0].manifest().copy()
1586 man = parents[0].manifest().copy()
1613
1587
1614 ff = self._flagfunc
1588 ff = self._flagfunc
1615 for i, l in ((addednodeid, status.added),
1589 for i, l in ((addednodeid, status.added),
1616 (modifiednodeid, status.modified)):
1590 (modifiednodeid, status.modified)):
1617 for f in l:
1591 for f in l:
1618 man[f] = i
1592 man[f] = i
1619 try:
1593 try:
1620 man.setflag(f, ff(f))
1594 man.setflag(f, ff(f))
1621 except OSError:
1595 except OSError:
1622 pass
1596 pass
1623
1597
1624 for f in status.deleted + status.removed:
1598 for f in status.deleted + status.removed:
1625 if f in man:
1599 if f in man:
1626 del man[f]
1600 del man[f]
1627
1601
1628 return man
1602 return man
1629
1603
1630 def _buildstatus(self, other, s, match, listignored, listclean,
1604 def _buildstatus(self, other, s, match, listignored, listclean,
1631 listunknown):
1605 listunknown):
1632 """build a status with respect to another context
1606 """build a status with respect to another context
1633
1607
1634 This includes logic for maintaining the fast path of status when
1608 This includes logic for maintaining the fast path of status when
1635 comparing the working directory against its parent, which is to skip
1609 comparing the working directory against its parent, which is to skip
1636 building a new manifest if self (working directory) is not comparing
1610 building a new manifest if self (working directory) is not comparing
1637 against its parent (repo['.']).
1611 against its parent (repo['.']).
1638 """
1612 """
1639 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1613 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1640 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1614 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1641 # might have accidentally ended up with the entire contents of the file
1615 # might have accidentally ended up with the entire contents of the file
1642 # they are supposed to be linking to.
1616 # they are supposed to be linking to.
1643 s.modified[:] = self._filtersuspectsymlink(s.modified)
1617 s.modified[:] = self._filtersuspectsymlink(s.modified)
1644 if other != self._repo['.']:
1618 if other != self._repo['.']:
1645 s = super(workingctx, self)._buildstatus(other, s, match,
1619 s = super(workingctx, self)._buildstatus(other, s, match,
1646 listignored, listclean,
1620 listignored, listclean,
1647 listunknown)
1621 listunknown)
1648 return s
1622 return s
1649
1623
1650 def _matchstatus(self, other, match):
1624 def _matchstatus(self, other, match):
1651 """override the match method with a filter for directory patterns
1625 """override the match method with a filter for directory patterns
1652
1626
1653 We use inheritance to customize the match.bad method only in cases of
1627 We use inheritance to customize the match.bad method only in cases of
1654 workingctx since it belongs only to the working directory when
1628 workingctx since it belongs only to the working directory when
1655 comparing against the parent changeset.
1629 comparing against the parent changeset.
1656
1630
1657 If we aren't comparing against the working directory's parent, then we
1631 If we aren't comparing against the working directory's parent, then we
1658 just use the default match object sent to us.
1632 just use the default match object sent to us.
1659 """
1633 """
1660 if other != self._repo['.']:
1634 if other != self._repo['.']:
1661 def bad(f, msg):
1635 def bad(f, msg):
1662 # 'f' may be a directory pattern from 'match.files()',
1636 # 'f' may be a directory pattern from 'match.files()',
1663 # so 'f not in ctx1' is not enough
1637 # so 'f not in ctx1' is not enough
1664 if f not in other and not other.hasdir(f):
1638 if f not in other and not other.hasdir(f):
1665 self._repo.ui.warn('%s: %s\n' %
1639 self._repo.ui.warn('%s: %s\n' %
1666 (self._repo.dirstate.pathto(f), msg))
1640 (self._repo.dirstate.pathto(f), msg))
1667 match.bad = bad
1641 match.bad = bad
1668 return match
1642 return match
1669
1643
1670 def markcommitted(self, node):
1644 def markcommitted(self, node):
1671 super(workingctx, self).markcommitted(node)
1645 super(workingctx, self).markcommitted(node)
1672
1646
1673 sparse.aftercommit(self._repo, node)
1647 sparse.aftercommit(self._repo, node)
1674
1648
1675 class committablefilectx(basefilectx):
1649 class committablefilectx(basefilectx):
1676 """A committablefilectx provides common functionality for a file context
1650 """A committablefilectx provides common functionality for a file context
1677 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1651 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1678 def __init__(self, repo, path, filelog=None, ctx=None):
1652 def __init__(self, repo, path, filelog=None, ctx=None):
1679 self._repo = repo
1653 self._repo = repo
1680 self._path = path
1654 self._path = path
1681 self._changeid = None
1655 self._changeid = None
1682 self._filerev = self._filenode = None
1656 self._filerev = self._filenode = None
1683
1657
1684 if filelog is not None:
1658 if filelog is not None:
1685 self._filelog = filelog
1659 self._filelog = filelog
1686 if ctx:
1660 if ctx:
1687 self._changectx = ctx
1661 self._changectx = ctx
1688
1662
1689 def __nonzero__(self):
1663 def __nonzero__(self):
1690 return True
1664 return True
1691
1665
1692 __bool__ = __nonzero__
1666 __bool__ = __nonzero__
1693
1667
1694 def linkrev(self):
1668 def linkrev(self):
1695 # linked to self._changectx no matter if file is modified or not
1669 # linked to self._changectx no matter if file is modified or not
1696 return self.rev()
1670 return self.rev()
1697
1671
1698 def parents(self):
1672 def parents(self):
1699 '''return parent filectxs, following copies if necessary'''
1673 '''return parent filectxs, following copies if necessary'''
1700 def filenode(ctx, path):
1674 def filenode(ctx, path):
1701 return ctx._manifest.get(path, nullid)
1675 return ctx._manifest.get(path, nullid)
1702
1676
1703 path = self._path
1677 path = self._path
1704 fl = self._filelog
1678 fl = self._filelog
1705 pcl = self._changectx._parents
1679 pcl = self._changectx._parents
1706 renamed = self.renamed()
1680 renamed = self.renamed()
1707
1681
1708 if renamed:
1682 if renamed:
1709 pl = [renamed + (None,)]
1683 pl = [renamed + (None,)]
1710 else:
1684 else:
1711 pl = [(path, filenode(pcl[0], path), fl)]
1685 pl = [(path, filenode(pcl[0], path), fl)]
1712
1686
1713 for pc in pcl[1:]:
1687 for pc in pcl[1:]:
1714 pl.append((path, filenode(pc, path), fl))
1688 pl.append((path, filenode(pc, path), fl))
1715
1689
1716 return [self._parentfilectx(p, fileid=n, filelog=l)
1690 return [self._parentfilectx(p, fileid=n, filelog=l)
1717 for p, n, l in pl if n != nullid]
1691 for p, n, l in pl if n != nullid]
1718
1692
1719 def children(self):
1693 def children(self):
1720 return []
1694 return []
1721
1695
1722 class workingfilectx(committablefilectx):
1696 class workingfilectx(committablefilectx):
1723 """A workingfilectx object makes access to data related to a particular
1697 """A workingfilectx object makes access to data related to a particular
1724 file in the working directory convenient."""
1698 file in the working directory convenient."""
1725 def __init__(self, repo, path, filelog=None, workingctx=None):
1699 def __init__(self, repo, path, filelog=None, workingctx=None):
1726 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1700 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1727
1701
1728 @propertycache
1702 @propertycache
1729 def _changectx(self):
1703 def _changectx(self):
1730 return workingctx(self._repo)
1704 return workingctx(self._repo)
1731
1705
1732 def data(self):
1706 def data(self):
1733 return self._repo.wread(self._path)
1707 return self._repo.wread(self._path)
1734 def renamed(self):
1708 def renamed(self):
1735 rp = self._repo.dirstate.copied(self._path)
1709 rp = self._repo.dirstate.copied(self._path)
1736 if not rp:
1710 if not rp:
1737 return None
1711 return None
1738 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1712 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1739
1713
1740 def size(self):
1714 def size(self):
1741 return self._repo.wvfs.lstat(self._path).st_size
1715 return self._repo.wvfs.lstat(self._path).st_size
1742 def date(self):
1716 def date(self):
1743 t, tz = self._changectx.date()
1717 t, tz = self._changectx.date()
1744 try:
1718 try:
1745 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1719 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1746 except OSError as err:
1720 except OSError as err:
1747 if err.errno != errno.ENOENT:
1721 if err.errno != errno.ENOENT:
1748 raise
1722 raise
1749 return (t, tz)
1723 return (t, tz)
1750
1724
1751 def exists(self):
1725 def exists(self):
1752 return self._repo.wvfs.exists(self._path)
1726 return self._repo.wvfs.exists(self._path)
1753
1727
1754 def lexists(self):
1728 def lexists(self):
1755 return self._repo.wvfs.lexists(self._path)
1729 return self._repo.wvfs.lexists(self._path)
1756
1730
1757 def audit(self):
1731 def audit(self):
1758 return self._repo.wvfs.audit(self._path)
1732 return self._repo.wvfs.audit(self._path)
1759
1733
1760 def cmp(self, fctx):
1734 def cmp(self, fctx):
1761 """compare with other file context
1735 """compare with other file context
1762
1736
1763 returns True if different than fctx.
1737 returns True if different than fctx.
1764 """
1738 """
1765 # fctx should be a filectx (not a workingfilectx)
1739 # fctx should be a filectx (not a workingfilectx)
1766 # invert comparison to reuse the same code path
1740 # invert comparison to reuse the same code path
1767 return fctx.cmp(self)
1741 return fctx.cmp(self)
1768
1742
1769 def remove(self, ignoremissing=False):
1743 def remove(self, ignoremissing=False):
1770 """wraps unlink for a repo's working directory"""
1744 """wraps unlink for a repo's working directory"""
1771 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1745 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1772
1746
1773 def write(self, data, flags, backgroundclose=False, **kwargs):
1747 def write(self, data, flags, backgroundclose=False, **kwargs):
1774 """wraps repo.wwrite"""
1748 """wraps repo.wwrite"""
1775 self._repo.wwrite(self._path, data, flags,
1749 self._repo.wwrite(self._path, data, flags,
1776 backgroundclose=backgroundclose,
1750 backgroundclose=backgroundclose,
1777 **kwargs)
1751 **kwargs)
1778
1752
1779 def markcopied(self, src):
1753 def markcopied(self, src):
1780 """marks this file a copy of `src`"""
1754 """marks this file a copy of `src`"""
1781 if self._repo.dirstate[self._path] in "nma":
1755 if self._repo.dirstate[self._path] in "nma":
1782 self._repo.dirstate.copy(src, self._path)
1756 self._repo.dirstate.copy(src, self._path)
1783
1757
1784 def clearunknown(self):
1758 def clearunknown(self):
1785 """Removes conflicting items in the working directory so that
1759 """Removes conflicting items in the working directory so that
1786 ``write()`` can be called successfully.
1760 ``write()`` can be called successfully.
1787 """
1761 """
1788 wvfs = self._repo.wvfs
1762 wvfs = self._repo.wvfs
1789 f = self._path
1763 f = self._path
1790 wvfs.audit(f)
1764 wvfs.audit(f)
1791 if wvfs.isdir(f) and not wvfs.islink(f):
1765 if wvfs.isdir(f) and not wvfs.islink(f):
1792 wvfs.rmtree(f, forcibly=True)
1766 wvfs.rmtree(f, forcibly=True)
1793 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1767 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1794 for p in reversed(list(util.finddirs(f))):
1768 for p in reversed(list(util.finddirs(f))):
1795 if wvfs.isfileorlink(p):
1769 if wvfs.isfileorlink(p):
1796 wvfs.unlink(p)
1770 wvfs.unlink(p)
1797 break
1771 break
1798
1772
1799 def setflags(self, l, x):
1773 def setflags(self, l, x):
1800 self._repo.wvfs.setflags(self._path, l, x)
1774 self._repo.wvfs.setflags(self._path, l, x)
1801
1775
1802 class overlayworkingctx(committablectx):
1776 class overlayworkingctx(committablectx):
1803 """Wraps another mutable context with a write-back cache that can be
1777 """Wraps another mutable context with a write-back cache that can be
1804 converted into a commit context.
1778 converted into a commit context.
1805
1779
1806 self._cache[path] maps to a dict with keys: {
1780 self._cache[path] maps to a dict with keys: {
1807 'exists': bool?
1781 'exists': bool?
1808 'date': date?
1782 'date': date?
1809 'data': str?
1783 'data': str?
1810 'flags': str?
1784 'flags': str?
1811 'copied': str? (path or None)
1785 'copied': str? (path or None)
1812 }
1786 }
1813 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1787 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1814 is `False`, the file was deleted.
1788 is `False`, the file was deleted.
1815 """
1789 """
1816
1790
1817 def __init__(self, repo):
1791 def __init__(self, repo):
1818 super(overlayworkingctx, self).__init__(repo)
1792 super(overlayworkingctx, self).__init__(repo)
1819 self.clean()
1793 self.clean()
1820
1794
1821 def setbase(self, wrappedctx):
1795 def setbase(self, wrappedctx):
1822 self._wrappedctx = wrappedctx
1796 self._wrappedctx = wrappedctx
1823 self._parents = [wrappedctx]
1797 self._parents = [wrappedctx]
1824 # Drop old manifest cache as it is now out of date.
1798 # Drop old manifest cache as it is now out of date.
1825 # This is necessary when, e.g., rebasing several nodes with one
1799 # This is necessary when, e.g., rebasing several nodes with one
1826 # ``overlayworkingctx`` (e.g. with --collapse).
1800 # ``overlayworkingctx`` (e.g. with --collapse).
1827 util.clearcachedproperty(self, '_manifest')
1801 util.clearcachedproperty(self, '_manifest')
1828
1802
1829 def data(self, path):
1803 def data(self, path):
1830 if self.isdirty(path):
1804 if self.isdirty(path):
1831 if self._cache[path]['exists']:
1805 if self._cache[path]['exists']:
1832 if self._cache[path]['data']:
1806 if self._cache[path]['data']:
1833 return self._cache[path]['data']
1807 return self._cache[path]['data']
1834 else:
1808 else:
1835 # Must fallback here, too, because we only set flags.
1809 # Must fallback here, too, because we only set flags.
1836 return self._wrappedctx[path].data()
1810 return self._wrappedctx[path].data()
1837 else:
1811 else:
1838 raise error.ProgrammingError("No such file or directory: %s" %
1812 raise error.ProgrammingError("No such file or directory: %s" %
1839 path)
1813 path)
1840 else:
1814 else:
1841 return self._wrappedctx[path].data()
1815 return self._wrappedctx[path].data()
1842
1816
1843 @propertycache
1817 @propertycache
1844 def _manifest(self):
1818 def _manifest(self):
1845 parents = self.parents()
1819 parents = self.parents()
1846 man = parents[0].manifest().copy()
1820 man = parents[0].manifest().copy()
1847
1821
1848 flag = self._flagfunc
1822 flag = self._flagfunc
1849 for path in self.added():
1823 for path in self.added():
1850 man[path] = addednodeid
1824 man[path] = addednodeid
1851 man.setflag(path, flag(path))
1825 man.setflag(path, flag(path))
1852 for path in self.modified():
1826 for path in self.modified():
1853 man[path] = modifiednodeid
1827 man[path] = modifiednodeid
1854 man.setflag(path, flag(path))
1828 man.setflag(path, flag(path))
1855 for path in self.removed():
1829 for path in self.removed():
1856 del man[path]
1830 del man[path]
1857 return man
1831 return man
1858
1832
1859 @propertycache
1833 @propertycache
1860 def _flagfunc(self):
1834 def _flagfunc(self):
1861 def f(path):
1835 def f(path):
1862 return self._cache[path]['flags']
1836 return self._cache[path]['flags']
1863 return f
1837 return f
1864
1838
1865 def files(self):
1839 def files(self):
1866 return sorted(self.added() + self.modified() + self.removed())
1840 return sorted(self.added() + self.modified() + self.removed())
1867
1841
1868 def modified(self):
1842 def modified(self):
1869 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1843 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1870 self._existsinparent(f)]
1844 self._existsinparent(f)]
1871
1845
1872 def added(self):
1846 def added(self):
1873 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1847 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1874 not self._existsinparent(f)]
1848 not self._existsinparent(f)]
1875
1849
1876 def removed(self):
1850 def removed(self):
1877 return [f for f in self._cache.keys() if
1851 return [f for f in self._cache.keys() if
1878 not self._cache[f]['exists'] and self._existsinparent(f)]
1852 not self._cache[f]['exists'] and self._existsinparent(f)]
1879
1853
1880 def isinmemory(self):
1854 def isinmemory(self):
1881 return True
1855 return True
1882
1856
1883 def filedate(self, path):
1857 def filedate(self, path):
1884 if self.isdirty(path):
1858 if self.isdirty(path):
1885 return self._cache[path]['date']
1859 return self._cache[path]['date']
1886 else:
1860 else:
1887 return self._wrappedctx[path].date()
1861 return self._wrappedctx[path].date()
1888
1862
1889 def markcopied(self, path, origin):
1863 def markcopied(self, path, origin):
1890 if self.isdirty(path):
1864 if self.isdirty(path):
1891 self._cache[path]['copied'] = origin
1865 self._cache[path]['copied'] = origin
1892 else:
1866 else:
1893 raise error.ProgrammingError('markcopied() called on clean context')
1867 raise error.ProgrammingError('markcopied() called on clean context')
1894
1868
1895 def copydata(self, path):
1869 def copydata(self, path):
1896 if self.isdirty(path):
1870 if self.isdirty(path):
1897 return self._cache[path]['copied']
1871 return self._cache[path]['copied']
1898 else:
1872 else:
1899 raise error.ProgrammingError('copydata() called on clean context')
1873 raise error.ProgrammingError('copydata() called on clean context')
1900
1874
1901 def flags(self, path):
1875 def flags(self, path):
1902 if self.isdirty(path):
1876 if self.isdirty(path):
1903 if self._cache[path]['exists']:
1877 if self._cache[path]['exists']:
1904 return self._cache[path]['flags']
1878 return self._cache[path]['flags']
1905 else:
1879 else:
1906 raise error.ProgrammingError("No such file or directory: %s" %
1880 raise error.ProgrammingError("No such file or directory: %s" %
1907 self._path)
1881 self._path)
1908 else:
1882 else:
1909 return self._wrappedctx[path].flags()
1883 return self._wrappedctx[path].flags()
1910
1884
1911 def _existsinparent(self, path):
1885 def _existsinparent(self, path):
1912 try:
1886 try:
1913 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1887 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1914 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1888 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1915 # with an ``exists()`` function.
1889 # with an ``exists()`` function.
1916 self._wrappedctx[path]
1890 self._wrappedctx[path]
1917 return True
1891 return True
1918 except error.ManifestLookupError:
1892 except error.ManifestLookupError:
1919 return False
1893 return False
1920
1894
1921 def _auditconflicts(self, path):
1895 def _auditconflicts(self, path):
1922 """Replicates conflict checks done by wvfs.write().
1896 """Replicates conflict checks done by wvfs.write().
1923
1897
1924 Since we never write to the filesystem and never call `applyupdates` in
1898 Since we never write to the filesystem and never call `applyupdates` in
1925 IMM, we'll never check that a path is actually writable -- e.g., because
1899 IMM, we'll never check that a path is actually writable -- e.g., because
1926 it adds `a/foo`, but `a` is actually a file in the other commit.
1900 it adds `a/foo`, but `a` is actually a file in the other commit.
1927 """
1901 """
1928 def fail(path, component):
1902 def fail(path, component):
1929 # p1() is the base and we're receiving "writes" for p2()'s
1903 # p1() is the base and we're receiving "writes" for p2()'s
1930 # files.
1904 # files.
1931 if 'l' in self.p1()[component].flags():
1905 if 'l' in self.p1()[component].flags():
1932 raise error.Abort("error: %s conflicts with symlink %s "
1906 raise error.Abort("error: %s conflicts with symlink %s "
1933 "in %s." % (path, component,
1907 "in %s." % (path, component,
1934 self.p1().rev()))
1908 self.p1().rev()))
1935 else:
1909 else:
1936 raise error.Abort("error: '%s' conflicts with file '%s' in "
1910 raise error.Abort("error: '%s' conflicts with file '%s' in "
1937 "%s." % (path, component,
1911 "%s." % (path, component,
1938 self.p1().rev()))
1912 self.p1().rev()))
1939
1913
1940 # Test that each new directory to be created to write this path from p2
1914 # Test that each new directory to be created to write this path from p2
1941 # is not a file in p1.
1915 # is not a file in p1.
1942 components = path.split('/')
1916 components = path.split('/')
1943 for i in xrange(len(components)):
1917 for i in xrange(len(components)):
1944 component = "/".join(components[0:i])
1918 component = "/".join(components[0:i])
1945 if component in self.p1():
1919 if component in self.p1():
1946 fail(path, component)
1920 fail(path, component)
1947
1921
1948 # Test the other direction -- that this path from p2 isn't a directory
1922 # Test the other direction -- that this path from p2 isn't a directory
1949 # in p1 (test that p1 doesn't any paths matching `path/*`).
1923 # in p1 (test that p1 doesn't any paths matching `path/*`).
1950 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1924 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1951 matches = self.p1().manifest().matches(match)
1925 matches = self.p1().manifest().matches(match)
1952 if len(matches) > 0:
1926 if len(matches) > 0:
1953 if len(matches) == 1 and matches.keys()[0] == path:
1927 if len(matches) == 1 and matches.keys()[0] == path:
1954 return
1928 return
1955 raise error.Abort("error: file '%s' cannot be written because "
1929 raise error.Abort("error: file '%s' cannot be written because "
1956 " '%s/' is a folder in %s (containing %d "
1930 " '%s/' is a folder in %s (containing %d "
1957 "entries: %s)"
1931 "entries: %s)"
1958 % (path, path, self.p1(), len(matches),
1932 % (path, path, self.p1(), len(matches),
1959 ', '.join(matches.keys())))
1933 ', '.join(matches.keys())))
1960
1934
1961 def write(self, path, data, flags='', **kwargs):
1935 def write(self, path, data, flags='', **kwargs):
1962 if data is None:
1936 if data is None:
1963 raise error.ProgrammingError("data must be non-None")
1937 raise error.ProgrammingError("data must be non-None")
1964 self._auditconflicts(path)
1938 self._auditconflicts(path)
1965 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1939 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1966 flags=flags)
1940 flags=flags)
1967
1941
1968 def setflags(self, path, l, x):
1942 def setflags(self, path, l, x):
1969 self._markdirty(path, exists=True, date=dateutil.makedate(),
1943 self._markdirty(path, exists=True, date=dateutil.makedate(),
1970 flags=(l and 'l' or '') + (x and 'x' or ''))
1944 flags=(l and 'l' or '') + (x and 'x' or ''))
1971
1945
1972 def remove(self, path):
1946 def remove(self, path):
1973 self._markdirty(path, exists=False)
1947 self._markdirty(path, exists=False)
1974
1948
1975 def exists(self, path):
1949 def exists(self, path):
1976 """exists behaves like `lexists`, but needs to follow symlinks and
1950 """exists behaves like `lexists`, but needs to follow symlinks and
1977 return False if they are broken.
1951 return False if they are broken.
1978 """
1952 """
1979 if self.isdirty(path):
1953 if self.isdirty(path):
1980 # If this path exists and is a symlink, "follow" it by calling
1954 # If this path exists and is a symlink, "follow" it by calling
1981 # exists on the destination path.
1955 # exists on the destination path.
1982 if (self._cache[path]['exists'] and
1956 if (self._cache[path]['exists'] and
1983 'l' in self._cache[path]['flags']):
1957 'l' in self._cache[path]['flags']):
1984 return self.exists(self._cache[path]['data'].strip())
1958 return self.exists(self._cache[path]['data'].strip())
1985 else:
1959 else:
1986 return self._cache[path]['exists']
1960 return self._cache[path]['exists']
1987
1961
1988 return self._existsinparent(path)
1962 return self._existsinparent(path)
1989
1963
1990 def lexists(self, path):
1964 def lexists(self, path):
1991 """lexists returns True if the path exists"""
1965 """lexists returns True if the path exists"""
1992 if self.isdirty(path):
1966 if self.isdirty(path):
1993 return self._cache[path]['exists']
1967 return self._cache[path]['exists']
1994
1968
1995 return self._existsinparent(path)
1969 return self._existsinparent(path)
1996
1970
1997 def size(self, path):
1971 def size(self, path):
1998 if self.isdirty(path):
1972 if self.isdirty(path):
1999 if self._cache[path]['exists']:
1973 if self._cache[path]['exists']:
2000 return len(self._cache[path]['data'])
1974 return len(self._cache[path]['data'])
2001 else:
1975 else:
2002 raise error.ProgrammingError("No such file or directory: %s" %
1976 raise error.ProgrammingError("No such file or directory: %s" %
2003 self._path)
1977 self._path)
2004 return self._wrappedctx[path].size()
1978 return self._wrappedctx[path].size()
2005
1979
2006 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1980 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
2007 user=None, editor=None):
1981 user=None, editor=None):
2008 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1982 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2009 committed.
1983 committed.
2010
1984
2011 ``text`` is the commit message.
1985 ``text`` is the commit message.
2012 ``parents`` (optional) are rev numbers.
1986 ``parents`` (optional) are rev numbers.
2013 """
1987 """
2014 # Default parents to the wrapped contexts' if not passed.
1988 # Default parents to the wrapped contexts' if not passed.
2015 if parents is None:
1989 if parents is None:
2016 parents = self._wrappedctx.parents()
1990 parents = self._wrappedctx.parents()
2017 if len(parents) == 1:
1991 if len(parents) == 1:
2018 parents = (parents[0], None)
1992 parents = (parents[0], None)
2019
1993
2020 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1994 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2021 if parents[1] is None:
1995 if parents[1] is None:
2022 parents = (self._repo[parents[0]], None)
1996 parents = (self._repo[parents[0]], None)
2023 else:
1997 else:
2024 parents = (self._repo[parents[0]], self._repo[parents[1]])
1998 parents = (self._repo[parents[0]], self._repo[parents[1]])
2025
1999
2026 files = self._cache.keys()
2000 files = self._cache.keys()
2027 def getfile(repo, memctx, path):
2001 def getfile(repo, memctx, path):
2028 if self._cache[path]['exists']:
2002 if self._cache[path]['exists']:
2029 return memfilectx(repo, memctx, path,
2003 return memfilectx(repo, memctx, path,
2030 self._cache[path]['data'],
2004 self._cache[path]['data'],
2031 'l' in self._cache[path]['flags'],
2005 'l' in self._cache[path]['flags'],
2032 'x' in self._cache[path]['flags'],
2006 'x' in self._cache[path]['flags'],
2033 self._cache[path]['copied'])
2007 self._cache[path]['copied'])
2034 else:
2008 else:
2035 # Returning None, but including the path in `files`, is
2009 # Returning None, but including the path in `files`, is
2036 # necessary for memctx to register a deletion.
2010 # necessary for memctx to register a deletion.
2037 return None
2011 return None
2038 return memctx(self._repo, parents, text, files, getfile, date=date,
2012 return memctx(self._repo, parents, text, files, getfile, date=date,
2039 extra=extra, user=user, branch=branch, editor=editor)
2013 extra=extra, user=user, branch=branch, editor=editor)
2040
2014
2041 def isdirty(self, path):
2015 def isdirty(self, path):
2042 return path in self._cache
2016 return path in self._cache
2043
2017
2044 def isempty(self):
2018 def isempty(self):
2045 # We need to discard any keys that are actually clean before the empty
2019 # We need to discard any keys that are actually clean before the empty
2046 # commit check.
2020 # commit check.
2047 self._compact()
2021 self._compact()
2048 return len(self._cache) == 0
2022 return len(self._cache) == 0
2049
2023
2050 def clean(self):
2024 def clean(self):
2051 self._cache = {}
2025 self._cache = {}
2052
2026
2053 def _compact(self):
2027 def _compact(self):
2054 """Removes keys from the cache that are actually clean, by comparing
2028 """Removes keys from the cache that are actually clean, by comparing
2055 them with the underlying context.
2029 them with the underlying context.
2056
2030
2057 This can occur during the merge process, e.g. by passing --tool :local
2031 This can occur during the merge process, e.g. by passing --tool :local
2058 to resolve a conflict.
2032 to resolve a conflict.
2059 """
2033 """
2060 keys = []
2034 keys = []
2061 for path in self._cache.keys():
2035 for path in self._cache.keys():
2062 cache = self._cache[path]
2036 cache = self._cache[path]
2063 try:
2037 try:
2064 underlying = self._wrappedctx[path]
2038 underlying = self._wrappedctx[path]
2065 if (underlying.data() == cache['data'] and
2039 if (underlying.data() == cache['data'] and
2066 underlying.flags() == cache['flags']):
2040 underlying.flags() == cache['flags']):
2067 keys.append(path)
2041 keys.append(path)
2068 except error.ManifestLookupError:
2042 except error.ManifestLookupError:
2069 # Path not in the underlying manifest (created).
2043 # Path not in the underlying manifest (created).
2070 continue
2044 continue
2071
2045
2072 for path in keys:
2046 for path in keys:
2073 del self._cache[path]
2047 del self._cache[path]
2074 return keys
2048 return keys
2075
2049
2076 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2050 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2077 self._cache[path] = {
2051 self._cache[path] = {
2078 'exists': exists,
2052 'exists': exists,
2079 'data': data,
2053 'data': data,
2080 'date': date,
2054 'date': date,
2081 'flags': flags,
2055 'flags': flags,
2082 'copied': None,
2056 'copied': None,
2083 }
2057 }
2084
2058
2085 def filectx(self, path, filelog=None):
2059 def filectx(self, path, filelog=None):
2086 return overlayworkingfilectx(self._repo, path, parent=self,
2060 return overlayworkingfilectx(self._repo, path, parent=self,
2087 filelog=filelog)
2061 filelog=filelog)
2088
2062
2089 class overlayworkingfilectx(committablefilectx):
2063 class overlayworkingfilectx(committablefilectx):
2090 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2064 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2091 cache, which can be flushed through later by calling ``flush()``."""
2065 cache, which can be flushed through later by calling ``flush()``."""
2092
2066
2093 def __init__(self, repo, path, filelog=None, parent=None):
2067 def __init__(self, repo, path, filelog=None, parent=None):
2094 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2068 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2095 parent)
2069 parent)
2096 self._repo = repo
2070 self._repo = repo
2097 self._parent = parent
2071 self._parent = parent
2098 self._path = path
2072 self._path = path
2099
2073
2100 def cmp(self, fctx):
2074 def cmp(self, fctx):
2101 return self.data() != fctx.data()
2075 return self.data() != fctx.data()
2102
2076
2103 def changectx(self):
2077 def changectx(self):
2104 return self._parent
2078 return self._parent
2105
2079
2106 def data(self):
2080 def data(self):
2107 return self._parent.data(self._path)
2081 return self._parent.data(self._path)
2108
2082
2109 def date(self):
2083 def date(self):
2110 return self._parent.filedate(self._path)
2084 return self._parent.filedate(self._path)
2111
2085
2112 def exists(self):
2086 def exists(self):
2113 return self.lexists()
2087 return self.lexists()
2114
2088
2115 def lexists(self):
2089 def lexists(self):
2116 return self._parent.exists(self._path)
2090 return self._parent.exists(self._path)
2117
2091
2118 def renamed(self):
2092 def renamed(self):
2119 path = self._parent.copydata(self._path)
2093 path = self._parent.copydata(self._path)
2120 if not path:
2094 if not path:
2121 return None
2095 return None
2122 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2096 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2123
2097
2124 def size(self):
2098 def size(self):
2125 return self._parent.size(self._path)
2099 return self._parent.size(self._path)
2126
2100
2127 def markcopied(self, origin):
2101 def markcopied(self, origin):
2128 self._parent.markcopied(self._path, origin)
2102 self._parent.markcopied(self._path, origin)
2129
2103
2130 def audit(self):
2104 def audit(self):
2131 pass
2105 pass
2132
2106
2133 def flags(self):
2107 def flags(self):
2134 return self._parent.flags(self._path)
2108 return self._parent.flags(self._path)
2135
2109
2136 def setflags(self, islink, isexec):
2110 def setflags(self, islink, isexec):
2137 return self._parent.setflags(self._path, islink, isexec)
2111 return self._parent.setflags(self._path, islink, isexec)
2138
2112
2139 def write(self, data, flags, backgroundclose=False, **kwargs):
2113 def write(self, data, flags, backgroundclose=False, **kwargs):
2140 return self._parent.write(self._path, data, flags, **kwargs)
2114 return self._parent.write(self._path, data, flags, **kwargs)
2141
2115
2142 def remove(self, ignoremissing=False):
2116 def remove(self, ignoremissing=False):
2143 return self._parent.remove(self._path)
2117 return self._parent.remove(self._path)
2144
2118
2145 def clearunknown(self):
2119 def clearunknown(self):
2146 pass
2120 pass
2147
2121
2148 class workingcommitctx(workingctx):
2122 class workingcommitctx(workingctx):
2149 """A workingcommitctx object makes access to data related to
2123 """A workingcommitctx object makes access to data related to
2150 the revision being committed convenient.
2124 the revision being committed convenient.
2151
2125
2152 This hides changes in the working directory, if they aren't
2126 This hides changes in the working directory, if they aren't
2153 committed in this context.
2127 committed in this context.
2154 """
2128 """
2155 def __init__(self, repo, changes,
2129 def __init__(self, repo, changes,
2156 text="", user=None, date=None, extra=None):
2130 text="", user=None, date=None, extra=None):
2157 super(workingctx, self).__init__(repo, text, user, date, extra,
2131 super(workingctx, self).__init__(repo, text, user, date, extra,
2158 changes)
2132 changes)
2159
2133
2160 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2134 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2161 """Return matched files only in ``self._status``
2135 """Return matched files only in ``self._status``
2162
2136
2163 Uncommitted files appear "clean" via this context, even if
2137 Uncommitted files appear "clean" via this context, even if
2164 they aren't actually so in the working directory.
2138 they aren't actually so in the working directory.
2165 """
2139 """
2166 if clean:
2140 if clean:
2167 clean = [f for f in self._manifest if f not in self._changedset]
2141 clean = [f for f in self._manifest if f not in self._changedset]
2168 else:
2142 else:
2169 clean = []
2143 clean = []
2170 return scmutil.status([f for f in self._status.modified if match(f)],
2144 return scmutil.status([f for f in self._status.modified if match(f)],
2171 [f for f in self._status.added if match(f)],
2145 [f for f in self._status.added if match(f)],
2172 [f for f in self._status.removed if match(f)],
2146 [f for f in self._status.removed if match(f)],
2173 [], [], [], clean)
2147 [], [], [], clean)
2174
2148
2175 @propertycache
2149 @propertycache
2176 def _changedset(self):
2150 def _changedset(self):
2177 """Return the set of files changed in this context
2151 """Return the set of files changed in this context
2178 """
2152 """
2179 changed = set(self._status.modified)
2153 changed = set(self._status.modified)
2180 changed.update(self._status.added)
2154 changed.update(self._status.added)
2181 changed.update(self._status.removed)
2155 changed.update(self._status.removed)
2182 return changed
2156 return changed
2183
2157
2184 def makecachingfilectxfn(func):
2158 def makecachingfilectxfn(func):
2185 """Create a filectxfn that caches based on the path.
2159 """Create a filectxfn that caches based on the path.
2186
2160
2187 We can't use util.cachefunc because it uses all arguments as the cache
2161 We can't use util.cachefunc because it uses all arguments as the cache
2188 key and this creates a cycle since the arguments include the repo and
2162 key and this creates a cycle since the arguments include the repo and
2189 memctx.
2163 memctx.
2190 """
2164 """
2191 cache = {}
2165 cache = {}
2192
2166
2193 def getfilectx(repo, memctx, path):
2167 def getfilectx(repo, memctx, path):
2194 if path not in cache:
2168 if path not in cache:
2195 cache[path] = func(repo, memctx, path)
2169 cache[path] = func(repo, memctx, path)
2196 return cache[path]
2170 return cache[path]
2197
2171
2198 return getfilectx
2172 return getfilectx
2199
2173
2200 def memfilefromctx(ctx):
2174 def memfilefromctx(ctx):
2201 """Given a context return a memfilectx for ctx[path]
2175 """Given a context return a memfilectx for ctx[path]
2202
2176
2203 This is a convenience method for building a memctx based on another
2177 This is a convenience method for building a memctx based on another
2204 context.
2178 context.
2205 """
2179 """
2206 def getfilectx(repo, memctx, path):
2180 def getfilectx(repo, memctx, path):
2207 fctx = ctx[path]
2181 fctx = ctx[path]
2208 # this is weird but apparently we only keep track of one parent
2182 # this is weird but apparently we only keep track of one parent
2209 # (why not only store that instead of a tuple?)
2183 # (why not only store that instead of a tuple?)
2210 copied = fctx.renamed()
2184 copied = fctx.renamed()
2211 if copied:
2185 if copied:
2212 copied = copied[0]
2186 copied = copied[0]
2213 return memfilectx(repo, memctx, path, fctx.data(),
2187 return memfilectx(repo, memctx, path, fctx.data(),
2214 islink=fctx.islink(), isexec=fctx.isexec(),
2188 islink=fctx.islink(), isexec=fctx.isexec(),
2215 copied=copied)
2189 copied=copied)
2216
2190
2217 return getfilectx
2191 return getfilectx
2218
2192
2219 def memfilefrompatch(patchstore):
2193 def memfilefrompatch(patchstore):
2220 """Given a patch (e.g. patchstore object) return a memfilectx
2194 """Given a patch (e.g. patchstore object) return a memfilectx
2221
2195
2222 This is a convenience method for building a memctx based on a patchstore.
2196 This is a convenience method for building a memctx based on a patchstore.
2223 """
2197 """
2224 def getfilectx(repo, memctx, path):
2198 def getfilectx(repo, memctx, path):
2225 data, mode, copied = patchstore.getfile(path)
2199 data, mode, copied = patchstore.getfile(path)
2226 if data is None:
2200 if data is None:
2227 return None
2201 return None
2228 islink, isexec = mode
2202 islink, isexec = mode
2229 return memfilectx(repo, memctx, path, data, islink=islink,
2203 return memfilectx(repo, memctx, path, data, islink=islink,
2230 isexec=isexec, copied=copied)
2204 isexec=isexec, copied=copied)
2231
2205
2232 return getfilectx
2206 return getfilectx
2233
2207
2234 class memctx(committablectx):
2208 class memctx(committablectx):
2235 """Use memctx to perform in-memory commits via localrepo.commitctx().
2209 """Use memctx to perform in-memory commits via localrepo.commitctx().
2236
2210
2237 Revision information is supplied at initialization time while
2211 Revision information is supplied at initialization time while
2238 related files data and is made available through a callback
2212 related files data and is made available through a callback
2239 mechanism. 'repo' is the current localrepo, 'parents' is a
2213 mechanism. 'repo' is the current localrepo, 'parents' is a
2240 sequence of two parent revisions identifiers (pass None for every
2214 sequence of two parent revisions identifiers (pass None for every
2241 missing parent), 'text' is the commit message and 'files' lists
2215 missing parent), 'text' is the commit message and 'files' lists
2242 names of files touched by the revision (normalized and relative to
2216 names of files touched by the revision (normalized and relative to
2243 repository root).
2217 repository root).
2244
2218
2245 filectxfn(repo, memctx, path) is a callable receiving the
2219 filectxfn(repo, memctx, path) is a callable receiving the
2246 repository, the current memctx object and the normalized path of
2220 repository, the current memctx object and the normalized path of
2247 requested file, relative to repository root. It is fired by the
2221 requested file, relative to repository root. It is fired by the
2248 commit function for every file in 'files', but calls order is
2222 commit function for every file in 'files', but calls order is
2249 undefined. If the file is available in the revision being
2223 undefined. If the file is available in the revision being
2250 committed (updated or added), filectxfn returns a memfilectx
2224 committed (updated or added), filectxfn returns a memfilectx
2251 object. If the file was removed, filectxfn return None for recent
2225 object. If the file was removed, filectxfn return None for recent
2252 Mercurial. Moved files are represented by marking the source file
2226 Mercurial. Moved files are represented by marking the source file
2253 removed and the new file added with copy information (see
2227 removed and the new file added with copy information (see
2254 memfilectx).
2228 memfilectx).
2255
2229
2256 user receives the committer name and defaults to current
2230 user receives the committer name and defaults to current
2257 repository username, date is the commit date in any format
2231 repository username, date is the commit date in any format
2258 supported by dateutil.parsedate() and defaults to current date, extra
2232 supported by dateutil.parsedate() and defaults to current date, extra
2259 is a dictionary of metadata or is left empty.
2233 is a dictionary of metadata or is left empty.
2260 """
2234 """
2261
2235
2262 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2236 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2263 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2237 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2264 # this field to determine what to do in filectxfn.
2238 # this field to determine what to do in filectxfn.
2265 _returnnoneformissingfiles = True
2239 _returnnoneformissingfiles = True
2266
2240
2267 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2241 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2268 date=None, extra=None, branch=None, editor=False):
2242 date=None, extra=None, branch=None, editor=False):
2269 super(memctx, self).__init__(repo, text, user, date, extra)
2243 super(memctx, self).__init__(repo, text, user, date, extra)
2270 self._rev = None
2244 self._rev = None
2271 self._node = None
2245 self._node = None
2272 parents = [(p or nullid) for p in parents]
2246 parents = [(p or nullid) for p in parents]
2273 p1, p2 = parents
2247 p1, p2 = parents
2274 self._parents = [self._repo[p] for p in (p1, p2)]
2248 self._parents = [self._repo[p] for p in (p1, p2)]
2275 files = sorted(set(files))
2249 files = sorted(set(files))
2276 self._files = files
2250 self._files = files
2277 if branch is not None:
2251 if branch is not None:
2278 self._extra['branch'] = encoding.fromlocal(branch)
2252 self._extra['branch'] = encoding.fromlocal(branch)
2279 self.substate = {}
2253 self.substate = {}
2280
2254
2281 if isinstance(filectxfn, patch.filestore):
2255 if isinstance(filectxfn, patch.filestore):
2282 filectxfn = memfilefrompatch(filectxfn)
2256 filectxfn = memfilefrompatch(filectxfn)
2283 elif not callable(filectxfn):
2257 elif not callable(filectxfn):
2284 # if store is not callable, wrap it in a function
2258 # if store is not callable, wrap it in a function
2285 filectxfn = memfilefromctx(filectxfn)
2259 filectxfn = memfilefromctx(filectxfn)
2286
2260
2287 # memoizing increases performance for e.g. vcs convert scenarios.
2261 # memoizing increases performance for e.g. vcs convert scenarios.
2288 self._filectxfn = makecachingfilectxfn(filectxfn)
2262 self._filectxfn = makecachingfilectxfn(filectxfn)
2289
2263
2290 if editor:
2264 if editor:
2291 self._text = editor(self._repo, self, [])
2265 self._text = editor(self._repo, self, [])
2292 self._repo.savecommitmessage(self._text)
2266 self._repo.savecommitmessage(self._text)
2293
2267
2294 def filectx(self, path, filelog=None):
2268 def filectx(self, path, filelog=None):
2295 """get a file context from the working directory
2269 """get a file context from the working directory
2296
2270
2297 Returns None if file doesn't exist and should be removed."""
2271 Returns None if file doesn't exist and should be removed."""
2298 return self._filectxfn(self._repo, self, path)
2272 return self._filectxfn(self._repo, self, path)
2299
2273
2300 def commit(self):
2274 def commit(self):
2301 """commit context to the repo"""
2275 """commit context to the repo"""
2302 return self._repo.commitctx(self)
2276 return self._repo.commitctx(self)
2303
2277
2304 @propertycache
2278 @propertycache
2305 def _manifest(self):
2279 def _manifest(self):
2306 """generate a manifest based on the return values of filectxfn"""
2280 """generate a manifest based on the return values of filectxfn"""
2307
2281
2308 # keep this simple for now; just worry about p1
2282 # keep this simple for now; just worry about p1
2309 pctx = self._parents[0]
2283 pctx = self._parents[0]
2310 man = pctx.manifest().copy()
2284 man = pctx.manifest().copy()
2311
2285
2312 for f in self._status.modified:
2286 for f in self._status.modified:
2313 p1node = nullid
2287 p1node = nullid
2314 p2node = nullid
2288 p2node = nullid
2315 p = pctx[f].parents() # if file isn't in pctx, check p2?
2289 p = pctx[f].parents() # if file isn't in pctx, check p2?
2316 if len(p) > 0:
2290 if len(p) > 0:
2317 p1node = p[0].filenode()
2291 p1node = p[0].filenode()
2318 if len(p) > 1:
2292 if len(p) > 1:
2319 p2node = p[1].filenode()
2293 p2node = p[1].filenode()
2320 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2294 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2321
2295
2322 for f in self._status.added:
2296 for f in self._status.added:
2323 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2297 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2324
2298
2325 for f in self._status.removed:
2299 for f in self._status.removed:
2326 if f in man:
2300 if f in man:
2327 del man[f]
2301 del man[f]
2328
2302
2329 return man
2303 return man
2330
2304
2331 @propertycache
2305 @propertycache
2332 def _status(self):
2306 def _status(self):
2333 """Calculate exact status from ``files`` specified at construction
2307 """Calculate exact status from ``files`` specified at construction
2334 """
2308 """
2335 man1 = self.p1().manifest()
2309 man1 = self.p1().manifest()
2336 p2 = self._parents[1]
2310 p2 = self._parents[1]
2337 # "1 < len(self._parents)" can't be used for checking
2311 # "1 < len(self._parents)" can't be used for checking
2338 # existence of the 2nd parent, because "memctx._parents" is
2312 # existence of the 2nd parent, because "memctx._parents" is
2339 # explicitly initialized by the list, of which length is 2.
2313 # explicitly initialized by the list, of which length is 2.
2340 if p2.node() != nullid:
2314 if p2.node() != nullid:
2341 man2 = p2.manifest()
2315 man2 = p2.manifest()
2342 managing = lambda f: f in man1 or f in man2
2316 managing = lambda f: f in man1 or f in man2
2343 else:
2317 else:
2344 managing = lambda f: f in man1
2318 managing = lambda f: f in man1
2345
2319
2346 modified, added, removed = [], [], []
2320 modified, added, removed = [], [], []
2347 for f in self._files:
2321 for f in self._files:
2348 if not managing(f):
2322 if not managing(f):
2349 added.append(f)
2323 added.append(f)
2350 elif self[f]:
2324 elif self[f]:
2351 modified.append(f)
2325 modified.append(f)
2352 else:
2326 else:
2353 removed.append(f)
2327 removed.append(f)
2354
2328
2355 return scmutil.status(modified, added, removed, [], [], [], [])
2329 return scmutil.status(modified, added, removed, [], [], [], [])
2356
2330
2357 class memfilectx(committablefilectx):
2331 class memfilectx(committablefilectx):
2358 """memfilectx represents an in-memory file to commit.
2332 """memfilectx represents an in-memory file to commit.
2359
2333
2360 See memctx and committablefilectx for more details.
2334 See memctx and committablefilectx for more details.
2361 """
2335 """
2362 def __init__(self, repo, changectx, path, data, islink=False,
2336 def __init__(self, repo, changectx, path, data, islink=False,
2363 isexec=False, copied=None):
2337 isexec=False, copied=None):
2364 """
2338 """
2365 path is the normalized file path relative to repository root.
2339 path is the normalized file path relative to repository root.
2366 data is the file content as a string.
2340 data is the file content as a string.
2367 islink is True if the file is a symbolic link.
2341 islink is True if the file is a symbolic link.
2368 isexec is True if the file is executable.
2342 isexec is True if the file is executable.
2369 copied is the source file path if current file was copied in the
2343 copied is the source file path if current file was copied in the
2370 revision being committed, or None."""
2344 revision being committed, or None."""
2371 super(memfilectx, self).__init__(repo, path, None, changectx)
2345 super(memfilectx, self).__init__(repo, path, None, changectx)
2372 self._data = data
2346 self._data = data
2373 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2347 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2374 self._copied = None
2348 self._copied = None
2375 if copied:
2349 if copied:
2376 self._copied = (copied, nullid)
2350 self._copied = (copied, nullid)
2377
2351
2378 def data(self):
2352 def data(self):
2379 return self._data
2353 return self._data
2380
2354
2381 def remove(self, ignoremissing=False):
2355 def remove(self, ignoremissing=False):
2382 """wraps unlink for a repo's working directory"""
2356 """wraps unlink for a repo's working directory"""
2383 # need to figure out what to do here
2357 # need to figure out what to do here
2384 del self._changectx[self._path]
2358 del self._changectx[self._path]
2385
2359
2386 def write(self, data, flags, **kwargs):
2360 def write(self, data, flags, **kwargs):
2387 """wraps repo.wwrite"""
2361 """wraps repo.wwrite"""
2388 self._data = data
2362 self._data = data
2389
2363
2390 class overlayfilectx(committablefilectx):
2364 class overlayfilectx(committablefilectx):
2391 """Like memfilectx but take an original filectx and optional parameters to
2365 """Like memfilectx but take an original filectx and optional parameters to
2392 override parts of it. This is useful when fctx.data() is expensive (i.e.
2366 override parts of it. This is useful when fctx.data() is expensive (i.e.
2393 flag processor is expensive) and raw data, flags, and filenode could be
2367 flag processor is expensive) and raw data, flags, and filenode could be
2394 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2368 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2395 """
2369 """
2396
2370
2397 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2371 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2398 copied=None, ctx=None):
2372 copied=None, ctx=None):
2399 """originalfctx: filecontext to duplicate
2373 """originalfctx: filecontext to duplicate
2400
2374
2401 datafunc: None or a function to override data (file content). It is a
2375 datafunc: None or a function to override data (file content). It is a
2402 function to be lazy. path, flags, copied, ctx: None or overridden value
2376 function to be lazy. path, flags, copied, ctx: None or overridden value
2403
2377
2404 copied could be (path, rev), or False. copied could also be just path,
2378 copied could be (path, rev), or False. copied could also be just path,
2405 and will be converted to (path, nullid). This simplifies some callers.
2379 and will be converted to (path, nullid). This simplifies some callers.
2406 """
2380 """
2407
2381
2408 if path is None:
2382 if path is None:
2409 path = originalfctx.path()
2383 path = originalfctx.path()
2410 if ctx is None:
2384 if ctx is None:
2411 ctx = originalfctx.changectx()
2385 ctx = originalfctx.changectx()
2412 ctxmatch = lambda: True
2386 ctxmatch = lambda: True
2413 else:
2387 else:
2414 ctxmatch = lambda: ctx == originalfctx.changectx()
2388 ctxmatch = lambda: ctx == originalfctx.changectx()
2415
2389
2416 repo = originalfctx.repo()
2390 repo = originalfctx.repo()
2417 flog = originalfctx.filelog()
2391 flog = originalfctx.filelog()
2418 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2392 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2419
2393
2420 if copied is None:
2394 if copied is None:
2421 copied = originalfctx.renamed()
2395 copied = originalfctx.renamed()
2422 copiedmatch = lambda: True
2396 copiedmatch = lambda: True
2423 else:
2397 else:
2424 if copied and not isinstance(copied, tuple):
2398 if copied and not isinstance(copied, tuple):
2425 # repo._filecommit will recalculate copyrev so nullid is okay
2399 # repo._filecommit will recalculate copyrev so nullid is okay
2426 copied = (copied, nullid)
2400 copied = (copied, nullid)
2427 copiedmatch = lambda: copied == originalfctx.renamed()
2401 copiedmatch = lambda: copied == originalfctx.renamed()
2428
2402
2429 # When data, copied (could affect data), ctx (could affect filelog
2403 # When data, copied (could affect data), ctx (could affect filelog
2430 # parents) are not overridden, rawdata, rawflags, and filenode may be
2404 # parents) are not overridden, rawdata, rawflags, and filenode may be
2431 # reused (repo._filecommit should double check filelog parents).
2405 # reused (repo._filecommit should double check filelog parents).
2432 #
2406 #
2433 # path, flags are not hashed in filelog (but in manifestlog) so they do
2407 # path, flags are not hashed in filelog (but in manifestlog) so they do
2434 # not affect reusable here.
2408 # not affect reusable here.
2435 #
2409 #
2436 # If ctx or copied is overridden to a same value with originalfctx,
2410 # If ctx or copied is overridden to a same value with originalfctx,
2437 # still consider it's reusable. originalfctx.renamed() may be a bit
2411 # still consider it's reusable. originalfctx.renamed() may be a bit
2438 # expensive so it's not called unless necessary. Assuming datafunc is
2412 # expensive so it's not called unless necessary. Assuming datafunc is
2439 # always expensive, do not call it for this "reusable" test.
2413 # always expensive, do not call it for this "reusable" test.
2440 reusable = datafunc is None and ctxmatch() and copiedmatch()
2414 reusable = datafunc is None and ctxmatch() and copiedmatch()
2441
2415
2442 if datafunc is None:
2416 if datafunc is None:
2443 datafunc = originalfctx.data
2417 datafunc = originalfctx.data
2444 if flags is None:
2418 if flags is None:
2445 flags = originalfctx.flags()
2419 flags = originalfctx.flags()
2446
2420
2447 self._datafunc = datafunc
2421 self._datafunc = datafunc
2448 self._flags = flags
2422 self._flags = flags
2449 self._copied = copied
2423 self._copied = copied
2450
2424
2451 if reusable:
2425 if reusable:
2452 # copy extra fields from originalfctx
2426 # copy extra fields from originalfctx
2453 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2427 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2454 for attr_ in attrs:
2428 for attr_ in attrs:
2455 if util.safehasattr(originalfctx, attr_):
2429 if util.safehasattr(originalfctx, attr_):
2456 setattr(self, attr_, getattr(originalfctx, attr_))
2430 setattr(self, attr_, getattr(originalfctx, attr_))
2457
2431
2458 def data(self):
2432 def data(self):
2459 return self._datafunc()
2433 return self._datafunc()
2460
2434
2461 class metadataonlyctx(committablectx):
2435 class metadataonlyctx(committablectx):
2462 """Like memctx but it's reusing the manifest of different commit.
2436 """Like memctx but it's reusing the manifest of different commit.
2463 Intended to be used by lightweight operations that are creating
2437 Intended to be used by lightweight operations that are creating
2464 metadata-only changes.
2438 metadata-only changes.
2465
2439
2466 Revision information is supplied at initialization time. 'repo' is the
2440 Revision information is supplied at initialization time. 'repo' is the
2467 current localrepo, 'ctx' is original revision which manifest we're reuisng
2441 current localrepo, 'ctx' is original revision which manifest we're reuisng
2468 'parents' is a sequence of two parent revisions identifiers (pass None for
2442 'parents' is a sequence of two parent revisions identifiers (pass None for
2469 every missing parent), 'text' is the commit.
2443 every missing parent), 'text' is the commit.
2470
2444
2471 user receives the committer name and defaults to current repository
2445 user receives the committer name and defaults to current repository
2472 username, date is the commit date in any format supported by
2446 username, date is the commit date in any format supported by
2473 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2447 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2474 metadata or is left empty.
2448 metadata or is left empty.
2475 """
2449 """
2476 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2450 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2477 date=None, extra=None, editor=False):
2451 date=None, extra=None, editor=False):
2478 if text is None:
2452 if text is None:
2479 text = originalctx.description()
2453 text = originalctx.description()
2480 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2454 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2481 self._rev = None
2455 self._rev = None
2482 self._node = None
2456 self._node = None
2483 self._originalctx = originalctx
2457 self._originalctx = originalctx
2484 self._manifestnode = originalctx.manifestnode()
2458 self._manifestnode = originalctx.manifestnode()
2485 if parents is None:
2459 if parents is None:
2486 parents = originalctx.parents()
2460 parents = originalctx.parents()
2487 else:
2461 else:
2488 parents = [repo[p] for p in parents if p is not None]
2462 parents = [repo[p] for p in parents if p is not None]
2489 parents = parents[:]
2463 parents = parents[:]
2490 while len(parents) < 2:
2464 while len(parents) < 2:
2491 parents.append(repo[nullid])
2465 parents.append(repo[nullid])
2492 p1, p2 = self._parents = parents
2466 p1, p2 = self._parents = parents
2493
2467
2494 # sanity check to ensure that the reused manifest parents are
2468 # sanity check to ensure that the reused manifest parents are
2495 # manifests of our commit parents
2469 # manifests of our commit parents
2496 mp1, mp2 = self.manifestctx().parents
2470 mp1, mp2 = self.manifestctx().parents
2497 if p1 != nullid and p1.manifestnode() != mp1:
2471 if p1 != nullid and p1.manifestnode() != mp1:
2498 raise RuntimeError('can\'t reuse the manifest: '
2472 raise RuntimeError('can\'t reuse the manifest: '
2499 'its p1 doesn\'t match the new ctx p1')
2473 'its p1 doesn\'t match the new ctx p1')
2500 if p2 != nullid and p2.manifestnode() != mp2:
2474 if p2 != nullid and p2.manifestnode() != mp2:
2501 raise RuntimeError('can\'t reuse the manifest: '
2475 raise RuntimeError('can\'t reuse the manifest: '
2502 'its p2 doesn\'t match the new ctx p2')
2476 'its p2 doesn\'t match the new ctx p2')
2503
2477
2504 self._files = originalctx.files()
2478 self._files = originalctx.files()
2505 self.substate = {}
2479 self.substate = {}
2506
2480
2507 if editor:
2481 if editor:
2508 self._text = editor(self._repo, self, [])
2482 self._text = editor(self._repo, self, [])
2509 self._repo.savecommitmessage(self._text)
2483 self._repo.savecommitmessage(self._text)
2510
2484
2511 def manifestnode(self):
2485 def manifestnode(self):
2512 return self._manifestnode
2486 return self._manifestnode
2513
2487
2514 @property
2488 @property
2515 def _manifestctx(self):
2489 def _manifestctx(self):
2516 return self._repo.manifestlog[self._manifestnode]
2490 return self._repo.manifestlog[self._manifestnode]
2517
2491
2518 def filectx(self, path, filelog=None):
2492 def filectx(self, path, filelog=None):
2519 return self._originalctx.filectx(path, filelog=filelog)
2493 return self._originalctx.filectx(path, filelog=filelog)
2520
2494
2521 def commit(self):
2495 def commit(self):
2522 """commit context to the repo"""
2496 """commit context to the repo"""
2523 return self._repo.commitctx(self)
2497 return self._repo.commitctx(self)
2524
2498
2525 @property
2499 @property
2526 def _manifest(self):
2500 def _manifest(self):
2527 return self._originalctx.manifest()
2501 return self._originalctx.manifest()
2528
2502
2529 @propertycache
2503 @propertycache
2530 def _status(self):
2504 def _status(self):
2531 """Calculate exact status from ``files`` specified in the ``origctx``
2505 """Calculate exact status from ``files`` specified in the ``origctx``
2532 and parents manifests.
2506 and parents manifests.
2533 """
2507 """
2534 man1 = self.p1().manifest()
2508 man1 = self.p1().manifest()
2535 p2 = self._parents[1]
2509 p2 = self._parents[1]
2536 # "1 < len(self._parents)" can't be used for checking
2510 # "1 < len(self._parents)" can't be used for checking
2537 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2511 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2538 # explicitly initialized by the list, of which length is 2.
2512 # explicitly initialized by the list, of which length is 2.
2539 if p2.node() != nullid:
2513 if p2.node() != nullid:
2540 man2 = p2.manifest()
2514 man2 = p2.manifest()
2541 managing = lambda f: f in man1 or f in man2
2515 managing = lambda f: f in man1 or f in man2
2542 else:
2516 else:
2543 managing = lambda f: f in man1
2517 managing = lambda f: f in man1
2544
2518
2545 modified, added, removed = [], [], []
2519 modified, added, removed = [], [], []
2546 for f in self._files:
2520 for f in self._files:
2547 if not managing(f):
2521 if not managing(f):
2548 added.append(f)
2522 added.append(f)
2549 elif f in self:
2523 elif f in self:
2550 modified.append(f)
2524 modified.append(f)
2551 else:
2525 else:
2552 removed.append(f)
2526 removed.append(f)
2553
2527
2554 return scmutil.status(modified, added, removed, [], [], [], [])
2528 return scmutil.status(modified, added, removed, [], [], [], [])
2555
2529
2556 class arbitraryfilectx(object):
2530 class arbitraryfilectx(object):
2557 """Allows you to use filectx-like functions on a file in an arbitrary
2531 """Allows you to use filectx-like functions on a file in an arbitrary
2558 location on disk, possibly not in the working directory.
2532 location on disk, possibly not in the working directory.
2559 """
2533 """
2560 def __init__(self, path, repo=None):
2534 def __init__(self, path, repo=None):
2561 # Repo is optional because contrib/simplemerge uses this class.
2535 # Repo is optional because contrib/simplemerge uses this class.
2562 self._repo = repo
2536 self._repo = repo
2563 self._path = path
2537 self._path = path
2564
2538
2565 def cmp(self, fctx):
2539 def cmp(self, fctx):
2566 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2540 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2567 # path if either side is a symlink.
2541 # path if either side is a symlink.
2568 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2542 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2569 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2543 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2570 # Add a fast-path for merge if both sides are disk-backed.
2544 # Add a fast-path for merge if both sides are disk-backed.
2571 # Note that filecmp uses the opposite return values (True if same)
2545 # Note that filecmp uses the opposite return values (True if same)
2572 # from our cmp functions (True if different).
2546 # from our cmp functions (True if different).
2573 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2547 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2574 return self.data() != fctx.data()
2548 return self.data() != fctx.data()
2575
2549
2576 def path(self):
2550 def path(self):
2577 return self._path
2551 return self._path
2578
2552
2579 def flags(self):
2553 def flags(self):
2580 return ''
2554 return ''
2581
2555
2582 def data(self):
2556 def data(self):
2583 return util.readfile(self._path)
2557 return util.readfile(self._path)
2584
2558
2585 def decodeddata(self):
2559 def decodeddata(self):
2586 with open(self._path, "rb") as f:
2560 with open(self._path, "rb") as f:
2587 return f.read()
2561 return f.read()
2588
2562
2589 def remove(self):
2563 def remove(self):
2590 util.unlink(self._path)
2564 util.unlink(self._path)
2591
2565
2592 def write(self, data, flags, **kwargs):
2566 def write(self, data, flags, **kwargs):
2593 assert not flags
2567 assert not flags
2594 with open(self._path, "w") as f:
2568 with open(self._path, "w") as f:
2595 f.write(data)
2569 f.write(data)
@@ -1,2330 +1,2331 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import time
14 import time
15 import weakref
15 import weakref
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 hex,
19 hex,
20 nullid,
20 nullid,
21 short,
21 short,
22 )
22 )
23 from .thirdparty.zope import (
23 from .thirdparty.zope import (
24 interface as zi,
24 interface as zi,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 peer,
52 peer,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 repository,
56 repository,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store,
62 store,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70 from .utils import (
70 from .utils import (
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 release = lockmod.release
75 release = lockmod.release
76 urlerr = util.urlerr
76 urlerr = util.urlerr
77 urlreq = util.urlreq
77 urlreq = util.urlreq
78
78
79 # set of (path, vfs-location) tuples. vfs-location is:
79 # set of (path, vfs-location) tuples. vfs-location is:
80 # - 'plain for vfs relative paths
80 # - 'plain for vfs relative paths
81 # - '' for svfs relative paths
81 # - '' for svfs relative paths
82 _cachedfiles = set()
82 _cachedfiles = set()
83
83
84 class _basefilecache(scmutil.filecache):
84 class _basefilecache(scmutil.filecache):
85 """All filecache usage on repo are done for logic that should be unfiltered
85 """All filecache usage on repo are done for logic that should be unfiltered
86 """
86 """
87 def __get__(self, repo, type=None):
87 def __get__(self, repo, type=None):
88 if repo is None:
88 if repo is None:
89 return self
89 return self
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 def __set__(self, repo, value):
91 def __set__(self, repo, value):
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 def __delete__(self, repo):
93 def __delete__(self, repo):
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95
95
96 class repofilecache(_basefilecache):
96 class repofilecache(_basefilecache):
97 """filecache for files in .hg but outside of .hg/store"""
97 """filecache for files in .hg but outside of .hg/store"""
98 def __init__(self, *paths):
98 def __init__(self, *paths):
99 super(repofilecache, self).__init__(*paths)
99 super(repofilecache, self).__init__(*paths)
100 for path in paths:
100 for path in paths:
101 _cachedfiles.add((path, 'plain'))
101 _cachedfiles.add((path, 'plain'))
102
102
103 def join(self, obj, fname):
103 def join(self, obj, fname):
104 return obj.vfs.join(fname)
104 return obj.vfs.join(fname)
105
105
106 class storecache(_basefilecache):
106 class storecache(_basefilecache):
107 """filecache for files in the store"""
107 """filecache for files in the store"""
108 def __init__(self, *paths):
108 def __init__(self, *paths):
109 super(storecache, self).__init__(*paths)
109 super(storecache, self).__init__(*paths)
110 for path in paths:
110 for path in paths:
111 _cachedfiles.add((path, ''))
111 _cachedfiles.add((path, ''))
112
112
113 def join(self, obj, fname):
113 def join(self, obj, fname):
114 return obj.sjoin(fname)
114 return obj.sjoin(fname)
115
115
116 def isfilecached(repo, name):
116 def isfilecached(repo, name):
117 """check if a repo has already cached "name" filecache-ed property
117 """check if a repo has already cached "name" filecache-ed property
118
118
119 This returns (cachedobj-or-None, iscached) tuple.
119 This returns (cachedobj-or-None, iscached) tuple.
120 """
120 """
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 if not cacheentry:
122 if not cacheentry:
123 return None, False
123 return None, False
124 return cacheentry.obj, True
124 return cacheentry.obj, True
125
125
126 class unfilteredpropertycache(util.propertycache):
126 class unfilteredpropertycache(util.propertycache):
127 """propertycache that apply to unfiltered repo only"""
127 """propertycache that apply to unfiltered repo only"""
128
128
129 def __get__(self, repo, type=None):
129 def __get__(self, repo, type=None):
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 if unfi is repo:
131 if unfi is repo:
132 return super(unfilteredpropertycache, self).__get__(unfi)
132 return super(unfilteredpropertycache, self).__get__(unfi)
133 return getattr(unfi, self.name)
133 return getattr(unfi, self.name)
134
134
135 class filteredpropertycache(util.propertycache):
135 class filteredpropertycache(util.propertycache):
136 """propertycache that must take filtering in account"""
136 """propertycache that must take filtering in account"""
137
137
138 def cachevalue(self, obj, value):
138 def cachevalue(self, obj, value):
139 object.__setattr__(obj, self.name, value)
139 object.__setattr__(obj, self.name, value)
140
140
141
141
142 def hasunfilteredcache(repo, name):
142 def hasunfilteredcache(repo, name):
143 """check if a repo has an unfilteredpropertycache value for <name>"""
143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 return name in vars(repo.unfiltered())
144 return name in vars(repo.unfiltered())
145
145
146 def unfilteredmethod(orig):
146 def unfilteredmethod(orig):
147 """decorate method that always need to be run on unfiltered version"""
147 """decorate method that always need to be run on unfiltered version"""
148 def wrapper(repo, *args, **kwargs):
148 def wrapper(repo, *args, **kwargs):
149 return orig(repo.unfiltered(), *args, **kwargs)
149 return orig(repo.unfiltered(), *args, **kwargs)
150 return wrapper
150 return wrapper
151
151
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 'unbundle'}
153 'unbundle'}
154 legacycaps = moderncaps.union({'changegroupsubset'})
154 legacycaps = moderncaps.union({'changegroupsubset'})
155
155
156 class localpeer(repository.peer):
156 class localpeer(repository.peer):
157 '''peer for a local repo; reflects only the most recent API'''
157 '''peer for a local repo; reflects only the most recent API'''
158
158
159 def __init__(self, repo, caps=None):
159 def __init__(self, repo, caps=None):
160 super(localpeer, self).__init__()
160 super(localpeer, self).__init__()
161
161
162 if caps is None:
162 if caps is None:
163 caps = moderncaps.copy()
163 caps = moderncaps.copy()
164 self._repo = repo.filtered('served')
164 self._repo = repo.filtered('served')
165 self.ui = repo.ui
165 self.ui = repo.ui
166 self._caps = repo._restrictcapabilities(caps)
166 self._caps = repo._restrictcapabilities(caps)
167
167
168 # Begin of _basepeer interface.
168 # Begin of _basepeer interface.
169
169
170 def url(self):
170 def url(self):
171 return self._repo.url()
171 return self._repo.url()
172
172
173 def local(self):
173 def local(self):
174 return self._repo
174 return self._repo
175
175
176 def peer(self):
176 def peer(self):
177 return self
177 return self
178
178
179 def canpush(self):
179 def canpush(self):
180 return True
180 return True
181
181
182 def close(self):
182 def close(self):
183 self._repo.close()
183 self._repo.close()
184
184
185 # End of _basepeer interface.
185 # End of _basepeer interface.
186
186
187 # Begin of _basewirecommands interface.
187 # Begin of _basewirecommands interface.
188
188
189 def branchmap(self):
189 def branchmap(self):
190 return self._repo.branchmap()
190 return self._repo.branchmap()
191
191
192 def capabilities(self):
192 def capabilities(self):
193 return self._caps
193 return self._caps
194
194
195 def debugwireargs(self, one, two, three=None, four=None, five=None):
195 def debugwireargs(self, one, two, three=None, four=None, five=None):
196 """Used to test argument passing over the wire"""
196 """Used to test argument passing over the wire"""
197 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
197 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
198 pycompat.bytestr(four),
198 pycompat.bytestr(four),
199 pycompat.bytestr(five))
199 pycompat.bytestr(five))
200
200
201 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
201 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
202 **kwargs):
202 **kwargs):
203 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
203 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
204 common=common, bundlecaps=bundlecaps,
204 common=common, bundlecaps=bundlecaps,
205 **kwargs)[1]
205 **kwargs)[1]
206 cb = util.chunkbuffer(chunks)
206 cb = util.chunkbuffer(chunks)
207
207
208 if exchange.bundle2requested(bundlecaps):
208 if exchange.bundle2requested(bundlecaps):
209 # When requesting a bundle2, getbundle returns a stream to make the
209 # When requesting a bundle2, getbundle returns a stream to make the
210 # wire level function happier. We need to build a proper object
210 # wire level function happier. We need to build a proper object
211 # from it in local peer.
211 # from it in local peer.
212 return bundle2.getunbundler(self.ui, cb)
212 return bundle2.getunbundler(self.ui, cb)
213 else:
213 else:
214 return changegroup.getunbundler('01', cb, None)
214 return changegroup.getunbundler('01', cb, None)
215
215
216 def heads(self):
216 def heads(self):
217 return self._repo.heads()
217 return self._repo.heads()
218
218
219 def known(self, nodes):
219 def known(self, nodes):
220 return self._repo.known(nodes)
220 return self._repo.known(nodes)
221
221
222 def listkeys(self, namespace):
222 def listkeys(self, namespace):
223 return self._repo.listkeys(namespace)
223 return self._repo.listkeys(namespace)
224
224
225 def lookup(self, key):
225 def lookup(self, key):
226 return self._repo.lookup(key)
226 return self._repo.lookup(key)
227
227
228 def pushkey(self, namespace, key, old, new):
228 def pushkey(self, namespace, key, old, new):
229 return self._repo.pushkey(namespace, key, old, new)
229 return self._repo.pushkey(namespace, key, old, new)
230
230
231 def stream_out(self):
231 def stream_out(self):
232 raise error.Abort(_('cannot perform stream clone against local '
232 raise error.Abort(_('cannot perform stream clone against local '
233 'peer'))
233 'peer'))
234
234
235 def unbundle(self, cg, heads, url):
235 def unbundle(self, cg, heads, url):
236 """apply a bundle on a repo
236 """apply a bundle on a repo
237
237
238 This function handles the repo locking itself."""
238 This function handles the repo locking itself."""
239 try:
239 try:
240 try:
240 try:
241 cg = exchange.readbundle(self.ui, cg, None)
241 cg = exchange.readbundle(self.ui, cg, None)
242 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
242 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
243 if util.safehasattr(ret, 'getchunks'):
243 if util.safehasattr(ret, 'getchunks'):
244 # This is a bundle20 object, turn it into an unbundler.
244 # This is a bundle20 object, turn it into an unbundler.
245 # This little dance should be dropped eventually when the
245 # This little dance should be dropped eventually when the
246 # API is finally improved.
246 # API is finally improved.
247 stream = util.chunkbuffer(ret.getchunks())
247 stream = util.chunkbuffer(ret.getchunks())
248 ret = bundle2.getunbundler(self.ui, stream)
248 ret = bundle2.getunbundler(self.ui, stream)
249 return ret
249 return ret
250 except Exception as exc:
250 except Exception as exc:
251 # If the exception contains output salvaged from a bundle2
251 # If the exception contains output salvaged from a bundle2
252 # reply, we need to make sure it is printed before continuing
252 # reply, we need to make sure it is printed before continuing
253 # to fail. So we build a bundle2 with such output and consume
253 # to fail. So we build a bundle2 with such output and consume
254 # it directly.
254 # it directly.
255 #
255 #
256 # This is not very elegant but allows a "simple" solution for
256 # This is not very elegant but allows a "simple" solution for
257 # issue4594
257 # issue4594
258 output = getattr(exc, '_bundle2salvagedoutput', ())
258 output = getattr(exc, '_bundle2salvagedoutput', ())
259 if output:
259 if output:
260 bundler = bundle2.bundle20(self._repo.ui)
260 bundler = bundle2.bundle20(self._repo.ui)
261 for out in output:
261 for out in output:
262 bundler.addpart(out)
262 bundler.addpart(out)
263 stream = util.chunkbuffer(bundler.getchunks())
263 stream = util.chunkbuffer(bundler.getchunks())
264 b = bundle2.getunbundler(self.ui, stream)
264 b = bundle2.getunbundler(self.ui, stream)
265 bundle2.processbundle(self._repo, b)
265 bundle2.processbundle(self._repo, b)
266 raise
266 raise
267 except error.PushRaced as exc:
267 except error.PushRaced as exc:
268 raise error.ResponseError(_('push failed:'),
268 raise error.ResponseError(_('push failed:'),
269 stringutil.forcebytestr(exc))
269 stringutil.forcebytestr(exc))
270
270
271 # End of _basewirecommands interface.
271 # End of _basewirecommands interface.
272
272
273 # Begin of peer interface.
273 # Begin of peer interface.
274
274
275 def iterbatch(self):
275 def iterbatch(self):
276 return peer.localiterbatcher(self)
276 return peer.localiterbatcher(self)
277
277
278 # End of peer interface.
278 # End of peer interface.
279
279
280 class locallegacypeer(repository.legacypeer, localpeer):
280 class locallegacypeer(repository.legacypeer, localpeer):
281 '''peer extension which implements legacy methods too; used for tests with
281 '''peer extension which implements legacy methods too; used for tests with
282 restricted capabilities'''
282 restricted capabilities'''
283
283
284 def __init__(self, repo):
284 def __init__(self, repo):
285 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
285 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
286
286
287 # Begin of baselegacywirecommands interface.
287 # Begin of baselegacywirecommands interface.
288
288
289 def between(self, pairs):
289 def between(self, pairs):
290 return self._repo.between(pairs)
290 return self._repo.between(pairs)
291
291
292 def branches(self, nodes):
292 def branches(self, nodes):
293 return self._repo.branches(nodes)
293 return self._repo.branches(nodes)
294
294
295 def changegroup(self, basenodes, source):
295 def changegroup(self, basenodes, source):
296 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
296 outgoing = discovery.outgoing(self._repo, missingroots=basenodes,
297 missingheads=self._repo.heads())
297 missingheads=self._repo.heads())
298 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
298 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
299
299
300 def changegroupsubset(self, bases, heads, source):
300 def changegroupsubset(self, bases, heads, source):
301 outgoing = discovery.outgoing(self._repo, missingroots=bases,
301 outgoing = discovery.outgoing(self._repo, missingroots=bases,
302 missingheads=heads)
302 missingheads=heads)
303 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
303 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
304
304
305 # End of baselegacywirecommands interface.
305 # End of baselegacywirecommands interface.
306
306
307 # Increment the sub-version when the revlog v2 format changes to lock out old
307 # Increment the sub-version when the revlog v2 format changes to lock out old
308 # clients.
308 # clients.
309 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
309 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
310
310
311 # Functions receiving (ui, features) that extensions can register to impact
311 # Functions receiving (ui, features) that extensions can register to impact
312 # the ability to load repositories with custom requirements. Only
312 # the ability to load repositories with custom requirements. Only
313 # functions defined in loaded extensions are called.
313 # functions defined in loaded extensions are called.
314 #
314 #
315 # The function receives a set of requirement strings that the repository
315 # The function receives a set of requirement strings that the repository
316 # is capable of opening. Functions will typically add elements to the
316 # is capable of opening. Functions will typically add elements to the
317 # set to reflect that the extension knows how to handle that requirements.
317 # set to reflect that the extension knows how to handle that requirements.
318 featuresetupfuncs = set()
318 featuresetupfuncs = set()
319
319
320 @zi.implementer(repository.completelocalrepository)
320 @zi.implementer(repository.completelocalrepository)
321 class localrepository(object):
321 class localrepository(object):
322
322
323 # obsolete experimental requirements:
323 # obsolete experimental requirements:
324 # - manifestv2: An experimental new manifest format that allowed
324 # - manifestv2: An experimental new manifest format that allowed
325 # for stem compression of long paths. Experiment ended up not
325 # for stem compression of long paths. Experiment ended up not
326 # being successful (repository sizes went up due to worse delta
326 # being successful (repository sizes went up due to worse delta
327 # chains), and the code was deleted in 4.6.
327 # chains), and the code was deleted in 4.6.
328 supportedformats = {
328 supportedformats = {
329 'revlogv1',
329 'revlogv1',
330 'generaldelta',
330 'generaldelta',
331 'treemanifest',
331 'treemanifest',
332 REVLOGV2_REQUIREMENT,
332 REVLOGV2_REQUIREMENT,
333 }
333 }
334 _basesupported = supportedformats | {
334 _basesupported = supportedformats | {
335 'store',
335 'store',
336 'fncache',
336 'fncache',
337 'shared',
337 'shared',
338 'relshared',
338 'relshared',
339 'dotencode',
339 'dotencode',
340 'exp-sparse',
340 'exp-sparse',
341 }
341 }
342 openerreqs = {
342 openerreqs = {
343 'revlogv1',
343 'revlogv1',
344 'generaldelta',
344 'generaldelta',
345 'treemanifest',
345 'treemanifest',
346 }
346 }
347
347
348 # list of prefix for file which can be written without 'wlock'
348 # list of prefix for file which can be written without 'wlock'
349 # Extensions should extend this list when needed
349 # Extensions should extend this list when needed
350 _wlockfreeprefix = {
350 _wlockfreeprefix = {
351 # We migh consider requiring 'wlock' for the next
351 # We migh consider requiring 'wlock' for the next
352 # two, but pretty much all the existing code assume
352 # two, but pretty much all the existing code assume
353 # wlock is not needed so we keep them excluded for
353 # wlock is not needed so we keep them excluded for
354 # now.
354 # now.
355 'hgrc',
355 'hgrc',
356 'requires',
356 'requires',
357 # XXX cache is a complicatged business someone
357 # XXX cache is a complicatged business someone
358 # should investigate this in depth at some point
358 # should investigate this in depth at some point
359 'cache/',
359 'cache/',
360 # XXX shouldn't be dirstate covered by the wlock?
360 # XXX shouldn't be dirstate covered by the wlock?
361 'dirstate',
361 'dirstate',
362 # XXX bisect was still a bit too messy at the time
362 # XXX bisect was still a bit too messy at the time
363 # this changeset was introduced. Someone should fix
363 # this changeset was introduced. Someone should fix
364 # the remainig bit and drop this line
364 # the remainig bit and drop this line
365 'bisect.state',
365 'bisect.state',
366 }
366 }
367
367
368 def __init__(self, baseui, path, create=False):
368 def __init__(self, baseui, path, create=False):
369 self.requirements = set()
369 self.requirements = set()
370 self.filtername = None
370 self.filtername = None
371 # wvfs: rooted at the repository root, used to access the working copy
371 # wvfs: rooted at the repository root, used to access the working copy
372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
372 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
373 # vfs: rooted at .hg, used to access repo files outside of .hg/store
374 self.vfs = None
374 self.vfs = None
375 # svfs: usually rooted at .hg/store, used to access repository history
375 # svfs: usually rooted at .hg/store, used to access repository history
376 # If this is a shared repository, this vfs may point to another
376 # If this is a shared repository, this vfs may point to another
377 # repository's .hg/store directory.
377 # repository's .hg/store directory.
378 self.svfs = None
378 self.svfs = None
379 self.root = self.wvfs.base
379 self.root = self.wvfs.base
380 self.path = self.wvfs.join(".hg")
380 self.path = self.wvfs.join(".hg")
381 self.origroot = path
381 self.origroot = path
382 # This is only used by context.workingctx.match in order to
382 # This is only used by context.workingctx.match in order to
383 # detect files in subrepos.
383 # detect files in subrepos.
384 self.auditor = pathutil.pathauditor(
384 self.auditor = pathutil.pathauditor(
385 self.root, callback=self._checknested)
385 self.root, callback=self._checknested)
386 # This is only used by context.basectx.match in order to detect
386 # This is only used by context.basectx.match in order to detect
387 # files in subrepos.
387 # files in subrepos.
388 self.nofsauditor = pathutil.pathauditor(
388 self.nofsauditor = pathutil.pathauditor(
389 self.root, callback=self._checknested, realfs=False, cached=True)
389 self.root, callback=self._checknested, realfs=False, cached=True)
390 self.baseui = baseui
390 self.baseui = baseui
391 self.ui = baseui.copy()
391 self.ui = baseui.copy()
392 self.ui.copy = baseui.copy # prevent copying repo configuration
392 self.ui.copy = baseui.copy # prevent copying repo configuration
393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
393 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
394 if (self.ui.configbool('devel', 'all-warnings') or
394 if (self.ui.configbool('devel', 'all-warnings') or
395 self.ui.configbool('devel', 'check-locks')):
395 self.ui.configbool('devel', 'check-locks')):
396 self.vfs.audit = self._getvfsward(self.vfs.audit)
396 self.vfs.audit = self._getvfsward(self.vfs.audit)
397 # A list of callback to shape the phase if no data were found.
397 # A list of callback to shape the phase if no data were found.
398 # Callback are in the form: func(repo, roots) --> processed root.
398 # Callback are in the form: func(repo, roots) --> processed root.
399 # This list it to be filled by extension during repo setup
399 # This list it to be filled by extension during repo setup
400 self._phasedefaults = []
400 self._phasedefaults = []
401 try:
401 try:
402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
402 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
403 self._loadextensions()
403 self._loadextensions()
404 except IOError:
404 except IOError:
405 pass
405 pass
406
406
407 if featuresetupfuncs:
407 if featuresetupfuncs:
408 self.supported = set(self._basesupported) # use private copy
408 self.supported = set(self._basesupported) # use private copy
409 extmods = set(m.__name__ for n, m
409 extmods = set(m.__name__ for n, m
410 in extensions.extensions(self.ui))
410 in extensions.extensions(self.ui))
411 for setupfunc in featuresetupfuncs:
411 for setupfunc in featuresetupfuncs:
412 if setupfunc.__module__ in extmods:
412 if setupfunc.__module__ in extmods:
413 setupfunc(self.ui, self.supported)
413 setupfunc(self.ui, self.supported)
414 else:
414 else:
415 self.supported = self._basesupported
415 self.supported = self._basesupported
416 color.setup(self.ui)
416 color.setup(self.ui)
417
417
418 # Add compression engines.
418 # Add compression engines.
419 for name in util.compengines:
419 for name in util.compengines:
420 engine = util.compengines[name]
420 engine = util.compengines[name]
421 if engine.revlogheader():
421 if engine.revlogheader():
422 self.supported.add('exp-compression-%s' % name)
422 self.supported.add('exp-compression-%s' % name)
423
423
424 if not self.vfs.isdir():
424 if not self.vfs.isdir():
425 if create:
425 if create:
426 self.requirements = newreporequirements(self)
426 self.requirements = newreporequirements(self)
427
427
428 if not self.wvfs.exists():
428 if not self.wvfs.exists():
429 self.wvfs.makedirs()
429 self.wvfs.makedirs()
430 self.vfs.makedir(notindexed=True)
430 self.vfs.makedir(notindexed=True)
431
431
432 if 'store' in self.requirements:
432 if 'store' in self.requirements:
433 self.vfs.mkdir("store")
433 self.vfs.mkdir("store")
434
434
435 # create an invalid changelog
435 # create an invalid changelog
436 self.vfs.append(
436 self.vfs.append(
437 "00changelog.i",
437 "00changelog.i",
438 '\0\0\0\2' # represents revlogv2
438 '\0\0\0\2' # represents revlogv2
439 ' dummy changelog to prevent using the old repo layout'
439 ' dummy changelog to prevent using the old repo layout'
440 )
440 )
441 else:
441 else:
442 raise error.RepoError(_("repository %s not found") % path)
442 raise error.RepoError(_("repository %s not found") % path)
443 elif create:
443 elif create:
444 raise error.RepoError(_("repository %s already exists") % path)
444 raise error.RepoError(_("repository %s already exists") % path)
445 else:
445 else:
446 try:
446 try:
447 self.requirements = scmutil.readrequires(
447 self.requirements = scmutil.readrequires(
448 self.vfs, self.supported)
448 self.vfs, self.supported)
449 except IOError as inst:
449 except IOError as inst:
450 if inst.errno != errno.ENOENT:
450 if inst.errno != errno.ENOENT:
451 raise
451 raise
452
452
453 cachepath = self.vfs.join('cache')
453 cachepath = self.vfs.join('cache')
454 self.sharedpath = self.path
454 self.sharedpath = self.path
455 try:
455 try:
456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
456 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
457 if 'relshared' in self.requirements:
457 if 'relshared' in self.requirements:
458 sharedpath = self.vfs.join(sharedpath)
458 sharedpath = self.vfs.join(sharedpath)
459 vfs = vfsmod.vfs(sharedpath, realpath=True)
459 vfs = vfsmod.vfs(sharedpath, realpath=True)
460 cachepath = vfs.join('cache')
460 cachepath = vfs.join('cache')
461 s = vfs.base
461 s = vfs.base
462 if not vfs.exists():
462 if not vfs.exists():
463 raise error.RepoError(
463 raise error.RepoError(
464 _('.hg/sharedpath points to nonexistent directory %s') % s)
464 _('.hg/sharedpath points to nonexistent directory %s') % s)
465 self.sharedpath = s
465 self.sharedpath = s
466 except IOError as inst:
466 except IOError as inst:
467 if inst.errno != errno.ENOENT:
467 if inst.errno != errno.ENOENT:
468 raise
468 raise
469
469
470 if 'exp-sparse' in self.requirements and not sparse.enabled:
470 if 'exp-sparse' in self.requirements and not sparse.enabled:
471 raise error.RepoError(_('repository is using sparse feature but '
471 raise error.RepoError(_('repository is using sparse feature but '
472 'sparse is not enabled; enable the '
472 'sparse is not enabled; enable the '
473 '"sparse" extensions to access'))
473 '"sparse" extensions to access'))
474
474
475 self.store = store.store(
475 self.store = store.store(
476 self.requirements, self.sharedpath,
476 self.requirements, self.sharedpath,
477 lambda base: vfsmod.vfs(base, cacheaudited=True))
477 lambda base: vfsmod.vfs(base, cacheaudited=True))
478 self.spath = self.store.path
478 self.spath = self.store.path
479 self.svfs = self.store.vfs
479 self.svfs = self.store.vfs
480 self.sjoin = self.store.join
480 self.sjoin = self.store.join
481 self.vfs.createmode = self.store.createmode
481 self.vfs.createmode = self.store.createmode
482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
482 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
483 self.cachevfs.createmode = self.store.createmode
483 self.cachevfs.createmode = self.store.createmode
484 if (self.ui.configbool('devel', 'all-warnings') or
484 if (self.ui.configbool('devel', 'all-warnings') or
485 self.ui.configbool('devel', 'check-locks')):
485 self.ui.configbool('devel', 'check-locks')):
486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
486 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
487 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
488 else: # standard vfs
488 else: # standard vfs
489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
489 self.svfs.audit = self._getsvfsward(self.svfs.audit)
490 self._applyopenerreqs()
490 self._applyopenerreqs()
491 if create:
491 if create:
492 self._writerequirements()
492 self._writerequirements()
493
493
494 self._dirstatevalidatewarned = False
494 self._dirstatevalidatewarned = False
495
495
496 self._branchcaches = {}
496 self._branchcaches = {}
497 self._revbranchcache = None
497 self._revbranchcache = None
498 self._filterpats = {}
498 self._filterpats = {}
499 self._datafilters = {}
499 self._datafilters = {}
500 self._transref = self._lockref = self._wlockref = None
500 self._transref = self._lockref = self._wlockref = None
501
501
502 # A cache for various files under .hg/ that tracks file changes,
502 # A cache for various files under .hg/ that tracks file changes,
503 # (used by the filecache decorator)
503 # (used by the filecache decorator)
504 #
504 #
505 # Maps a property name to its util.filecacheentry
505 # Maps a property name to its util.filecacheentry
506 self._filecache = {}
506 self._filecache = {}
507
507
508 # hold sets of revision to be filtered
508 # hold sets of revision to be filtered
509 # should be cleared when something might have changed the filter value:
509 # should be cleared when something might have changed the filter value:
510 # - new changesets,
510 # - new changesets,
511 # - phase change,
511 # - phase change,
512 # - new obsolescence marker,
512 # - new obsolescence marker,
513 # - working directory parent change,
513 # - working directory parent change,
514 # - bookmark changes
514 # - bookmark changes
515 self.filteredrevcache = {}
515 self.filteredrevcache = {}
516
516
517 # post-dirstate-status hooks
517 # post-dirstate-status hooks
518 self._postdsstatus = []
518 self._postdsstatus = []
519
519
520 # generic mapping between names and nodes
520 # generic mapping between names and nodes
521 self.names = namespaces.namespaces()
521 self.names = namespaces.namespaces()
522
522
523 # Key to signature value.
523 # Key to signature value.
524 self._sparsesignaturecache = {}
524 self._sparsesignaturecache = {}
525 # Signature to cached matcher instance.
525 # Signature to cached matcher instance.
526 self._sparsematchercache = {}
526 self._sparsematchercache = {}
527
527
528 def _getvfsward(self, origfunc):
528 def _getvfsward(self, origfunc):
529 """build a ward for self.vfs"""
529 """build a ward for self.vfs"""
530 rref = weakref.ref(self)
530 rref = weakref.ref(self)
531 def checkvfs(path, mode=None):
531 def checkvfs(path, mode=None):
532 ret = origfunc(path, mode=mode)
532 ret = origfunc(path, mode=mode)
533 repo = rref()
533 repo = rref()
534 if (repo is None
534 if (repo is None
535 or not util.safehasattr(repo, '_wlockref')
535 or not util.safehasattr(repo, '_wlockref')
536 or not util.safehasattr(repo, '_lockref')):
536 or not util.safehasattr(repo, '_lockref')):
537 return
537 return
538 if mode in (None, 'r', 'rb'):
538 if mode in (None, 'r', 'rb'):
539 return
539 return
540 if path.startswith(repo.path):
540 if path.startswith(repo.path):
541 # truncate name relative to the repository (.hg)
541 # truncate name relative to the repository (.hg)
542 path = path[len(repo.path) + 1:]
542 path = path[len(repo.path) + 1:]
543 if path.startswith('cache/'):
543 if path.startswith('cache/'):
544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
544 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
545 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
546 if path.startswith('journal.'):
546 if path.startswith('journal.'):
547 # journal is covered by 'lock'
547 # journal is covered by 'lock'
548 if repo._currentlock(repo._lockref) is None:
548 if repo._currentlock(repo._lockref) is None:
549 repo.ui.develwarn('write with no lock: "%s"' % path,
549 repo.ui.develwarn('write with no lock: "%s"' % path,
550 stacklevel=2, config='check-locks')
550 stacklevel=2, config='check-locks')
551 elif repo._currentlock(repo._wlockref) is None:
551 elif repo._currentlock(repo._wlockref) is None:
552 # rest of vfs files are covered by 'wlock'
552 # rest of vfs files are covered by 'wlock'
553 #
553 #
554 # exclude special files
554 # exclude special files
555 for prefix in self._wlockfreeprefix:
555 for prefix in self._wlockfreeprefix:
556 if path.startswith(prefix):
556 if path.startswith(prefix):
557 return
557 return
558 repo.ui.develwarn('write with no wlock: "%s"' % path,
558 repo.ui.develwarn('write with no wlock: "%s"' % path,
559 stacklevel=2, config='check-locks')
559 stacklevel=2, config='check-locks')
560 return ret
560 return ret
561 return checkvfs
561 return checkvfs
562
562
563 def _getsvfsward(self, origfunc):
563 def _getsvfsward(self, origfunc):
564 """build a ward for self.svfs"""
564 """build a ward for self.svfs"""
565 rref = weakref.ref(self)
565 rref = weakref.ref(self)
566 def checksvfs(path, mode=None):
566 def checksvfs(path, mode=None):
567 ret = origfunc(path, mode=mode)
567 ret = origfunc(path, mode=mode)
568 repo = rref()
568 repo = rref()
569 if repo is None or not util.safehasattr(repo, '_lockref'):
569 if repo is None or not util.safehasattr(repo, '_lockref'):
570 return
570 return
571 if mode in (None, 'r', 'rb'):
571 if mode in (None, 'r', 'rb'):
572 return
572 return
573 if path.startswith(repo.sharedpath):
573 if path.startswith(repo.sharedpath):
574 # truncate name relative to the repository (.hg)
574 # truncate name relative to the repository (.hg)
575 path = path[len(repo.sharedpath) + 1:]
575 path = path[len(repo.sharedpath) + 1:]
576 if repo._currentlock(repo._lockref) is None:
576 if repo._currentlock(repo._lockref) is None:
577 repo.ui.develwarn('write with no lock: "%s"' % path,
577 repo.ui.develwarn('write with no lock: "%s"' % path,
578 stacklevel=3)
578 stacklevel=3)
579 return ret
579 return ret
580 return checksvfs
580 return checksvfs
581
581
582 def close(self):
582 def close(self):
583 self._writecaches()
583 self._writecaches()
584
584
585 def _loadextensions(self):
585 def _loadextensions(self):
586 extensions.loadall(self.ui)
586 extensions.loadall(self.ui)
587
587
588 def _writecaches(self):
588 def _writecaches(self):
589 if self._revbranchcache:
589 if self._revbranchcache:
590 self._revbranchcache.write()
590 self._revbranchcache.write()
591
591
592 def _restrictcapabilities(self, caps):
592 def _restrictcapabilities(self, caps):
593 if self.ui.configbool('experimental', 'bundle2-advertise'):
593 if self.ui.configbool('experimental', 'bundle2-advertise'):
594 caps = set(caps)
594 caps = set(caps)
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
595 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
596 role='client'))
596 role='client'))
597 caps.add('bundle2=' + urlreq.quote(capsblob))
597 caps.add('bundle2=' + urlreq.quote(capsblob))
598 return caps
598 return caps
599
599
600 def _applyopenerreqs(self):
600 def _applyopenerreqs(self):
601 self.svfs.options = dict((r, 1) for r in self.requirements
601 self.svfs.options = dict((r, 1) for r in self.requirements
602 if r in self.openerreqs)
602 if r in self.openerreqs)
603 # experimental config: format.chunkcachesize
603 # experimental config: format.chunkcachesize
604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
604 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
605 if chunkcachesize is not None:
605 if chunkcachesize is not None:
606 self.svfs.options['chunkcachesize'] = chunkcachesize
606 self.svfs.options['chunkcachesize'] = chunkcachesize
607 # experimental config: format.maxchainlen
607 # experimental config: format.maxchainlen
608 maxchainlen = self.ui.configint('format', 'maxchainlen')
608 maxchainlen = self.ui.configint('format', 'maxchainlen')
609 if maxchainlen is not None:
609 if maxchainlen is not None:
610 self.svfs.options['maxchainlen'] = maxchainlen
610 self.svfs.options['maxchainlen'] = maxchainlen
611 # experimental config: format.manifestcachesize
611 # experimental config: format.manifestcachesize
612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
612 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
613 if manifestcachesize is not None:
613 if manifestcachesize is not None:
614 self.svfs.options['manifestcachesize'] = manifestcachesize
614 self.svfs.options['manifestcachesize'] = manifestcachesize
615 # experimental config: format.aggressivemergedeltas
615 # experimental config: format.aggressivemergedeltas
616 aggressivemergedeltas = self.ui.configbool('format',
616 aggressivemergedeltas = self.ui.configbool('format',
617 'aggressivemergedeltas')
617 'aggressivemergedeltas')
618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
618 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
619 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
620 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
621 if 0 <= chainspan:
621 if 0 <= chainspan:
622 self.svfs.options['maxdeltachainspan'] = chainspan
622 self.svfs.options['maxdeltachainspan'] = chainspan
623 mmapindexthreshold = self.ui.configbytes('experimental',
623 mmapindexthreshold = self.ui.configbytes('experimental',
624 'mmapindexthreshold')
624 'mmapindexthreshold')
625 if mmapindexthreshold is not None:
625 if mmapindexthreshold is not None:
626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
626 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
627 withsparseread = self.ui.configbool('experimental', 'sparse-read')
628 srdensitythres = float(self.ui.config('experimental',
628 srdensitythres = float(self.ui.config('experimental',
629 'sparse-read.density-threshold'))
629 'sparse-read.density-threshold'))
630 srmingapsize = self.ui.configbytes('experimental',
630 srmingapsize = self.ui.configbytes('experimental',
631 'sparse-read.min-gap-size')
631 'sparse-read.min-gap-size')
632 self.svfs.options['with-sparse-read'] = withsparseread
632 self.svfs.options['with-sparse-read'] = withsparseread
633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
633 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
634 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
635
635
636 for r in self.requirements:
636 for r in self.requirements:
637 if r.startswith('exp-compression-'):
637 if r.startswith('exp-compression-'):
638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
638 self.svfs.options['compengine'] = r[len('exp-compression-'):]
639
639
640 # TODO move "revlogv2" to openerreqs once finalized.
640 # TODO move "revlogv2" to openerreqs once finalized.
641 if REVLOGV2_REQUIREMENT in self.requirements:
641 if REVLOGV2_REQUIREMENT in self.requirements:
642 self.svfs.options['revlogv2'] = True
642 self.svfs.options['revlogv2'] = True
643
643
644 def _writerequirements(self):
644 def _writerequirements(self):
645 scmutil.writerequires(self.vfs, self.requirements)
645 scmutil.writerequires(self.vfs, self.requirements)
646
646
647 def _checknested(self, path):
647 def _checknested(self, path):
648 """Determine if path is a legal nested repository."""
648 """Determine if path is a legal nested repository."""
649 if not path.startswith(self.root):
649 if not path.startswith(self.root):
650 return False
650 return False
651 subpath = path[len(self.root) + 1:]
651 subpath = path[len(self.root) + 1:]
652 normsubpath = util.pconvert(subpath)
652 normsubpath = util.pconvert(subpath)
653
653
654 # XXX: Checking against the current working copy is wrong in
654 # XXX: Checking against the current working copy is wrong in
655 # the sense that it can reject things like
655 # the sense that it can reject things like
656 #
656 #
657 # $ hg cat -r 10 sub/x.txt
657 # $ hg cat -r 10 sub/x.txt
658 #
658 #
659 # if sub/ is no longer a subrepository in the working copy
659 # if sub/ is no longer a subrepository in the working copy
660 # parent revision.
660 # parent revision.
661 #
661 #
662 # However, it can of course also allow things that would have
662 # However, it can of course also allow things that would have
663 # been rejected before, such as the above cat command if sub/
663 # been rejected before, such as the above cat command if sub/
664 # is a subrepository now, but was a normal directory before.
664 # is a subrepository now, but was a normal directory before.
665 # The old path auditor would have rejected by mistake since it
665 # The old path auditor would have rejected by mistake since it
666 # panics when it sees sub/.hg/.
666 # panics when it sees sub/.hg/.
667 #
667 #
668 # All in all, checking against the working copy seems sensible
668 # All in all, checking against the working copy seems sensible
669 # since we want to prevent access to nested repositories on
669 # since we want to prevent access to nested repositories on
670 # the filesystem *now*.
670 # the filesystem *now*.
671 ctx = self[None]
671 ctx = self[None]
672 parts = util.splitpath(subpath)
672 parts = util.splitpath(subpath)
673 while parts:
673 while parts:
674 prefix = '/'.join(parts)
674 prefix = '/'.join(parts)
675 if prefix in ctx.substate:
675 if prefix in ctx.substate:
676 if prefix == normsubpath:
676 if prefix == normsubpath:
677 return True
677 return True
678 else:
678 else:
679 sub = ctx.sub(prefix)
679 sub = ctx.sub(prefix)
680 return sub.checknested(subpath[len(prefix) + 1:])
680 return sub.checknested(subpath[len(prefix) + 1:])
681 else:
681 else:
682 parts.pop()
682 parts.pop()
683 return False
683 return False
684
684
685 def peer(self):
685 def peer(self):
686 return localpeer(self) # not cached to avoid reference cycle
686 return localpeer(self) # not cached to avoid reference cycle
687
687
688 def unfiltered(self):
688 def unfiltered(self):
689 """Return unfiltered version of the repository
689 """Return unfiltered version of the repository
690
690
691 Intended to be overwritten by filtered repo."""
691 Intended to be overwritten by filtered repo."""
692 return self
692 return self
693
693
694 def filtered(self, name, visibilityexceptions=None):
694 def filtered(self, name, visibilityexceptions=None):
695 """Return a filtered version of a repository"""
695 """Return a filtered version of a repository"""
696 cls = repoview.newtype(self.unfiltered().__class__)
696 cls = repoview.newtype(self.unfiltered().__class__)
697 return cls(self, name, visibilityexceptions)
697 return cls(self, name, visibilityexceptions)
698
698
699 @repofilecache('bookmarks', 'bookmarks.current')
699 @repofilecache('bookmarks', 'bookmarks.current')
700 def _bookmarks(self):
700 def _bookmarks(self):
701 return bookmarks.bmstore(self)
701 return bookmarks.bmstore(self)
702
702
703 @property
703 @property
704 def _activebookmark(self):
704 def _activebookmark(self):
705 return self._bookmarks.active
705 return self._bookmarks.active
706
706
707 # _phasesets depend on changelog. what we need is to call
707 # _phasesets depend on changelog. what we need is to call
708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
708 # _phasecache.invalidate() if '00changelog.i' was changed, but it
709 # can't be easily expressed in filecache mechanism.
709 # can't be easily expressed in filecache mechanism.
710 @storecache('phaseroots', '00changelog.i')
710 @storecache('phaseroots', '00changelog.i')
711 def _phasecache(self):
711 def _phasecache(self):
712 return phases.phasecache(self, self._phasedefaults)
712 return phases.phasecache(self, self._phasedefaults)
713
713
714 @storecache('obsstore')
714 @storecache('obsstore')
715 def obsstore(self):
715 def obsstore(self):
716 return obsolete.makestore(self.ui, self)
716 return obsolete.makestore(self.ui, self)
717
717
718 @storecache('00changelog.i')
718 @storecache('00changelog.i')
719 def changelog(self):
719 def changelog(self):
720 return changelog.changelog(self.svfs,
720 return changelog.changelog(self.svfs,
721 trypending=txnutil.mayhavepending(self.root))
721 trypending=txnutil.mayhavepending(self.root))
722
722
723 def _constructmanifest(self):
723 def _constructmanifest(self):
724 # This is a temporary function while we migrate from manifest to
724 # This is a temporary function while we migrate from manifest to
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
725 # manifestlog. It allows bundlerepo and unionrepo to intercept the
726 # manifest creation.
726 # manifest creation.
727 return manifest.manifestrevlog(self.svfs)
727 return manifest.manifestrevlog(self.svfs)
728
728
729 @storecache('00manifest.i')
729 @storecache('00manifest.i')
730 def manifestlog(self):
730 def manifestlog(self):
731 return manifest.manifestlog(self.svfs, self)
731 return manifest.manifestlog(self.svfs, self)
732
732
733 @repofilecache('dirstate')
733 @repofilecache('dirstate')
734 def dirstate(self):
734 def dirstate(self):
735 sparsematchfn = lambda: sparse.matcher(self)
735 sparsematchfn = lambda: sparse.matcher(self)
736
736
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
737 return dirstate.dirstate(self.vfs, self.ui, self.root,
738 self._dirstatevalidate, sparsematchfn)
738 self._dirstatevalidate, sparsematchfn)
739
739
740 def _dirstatevalidate(self, node):
740 def _dirstatevalidate(self, node):
741 try:
741 try:
742 self.changelog.rev(node)
742 self.changelog.rev(node)
743 return node
743 return node
744 except error.LookupError:
744 except error.LookupError:
745 if not self._dirstatevalidatewarned:
745 if not self._dirstatevalidatewarned:
746 self._dirstatevalidatewarned = True
746 self._dirstatevalidatewarned = True
747 self.ui.warn(_("warning: ignoring unknown"
747 self.ui.warn(_("warning: ignoring unknown"
748 " working parent %s!\n") % short(node))
748 " working parent %s!\n") % short(node))
749 return nullid
749 return nullid
750
750
751 @repofilecache(narrowspec.FILENAME)
751 @repofilecache(narrowspec.FILENAME)
752 def narrowpats(self):
752 def narrowpats(self):
753 """matcher patterns for this repository's narrowspec
753 """matcher patterns for this repository's narrowspec
754
754
755 A tuple of (includes, excludes).
755 A tuple of (includes, excludes).
756 """
756 """
757 source = self
757 source = self
758 if self.shared():
758 if self.shared():
759 from . import hg
759 from . import hg
760 source = hg.sharedreposource(self)
760 source = hg.sharedreposource(self)
761 return narrowspec.load(source)
761 return narrowspec.load(source)
762
762
763 @repofilecache(narrowspec.FILENAME)
763 @repofilecache(narrowspec.FILENAME)
764 def _narrowmatch(self):
764 def _narrowmatch(self):
765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
765 if changegroup.NARROW_REQUIREMENT not in self.requirements:
766 return matchmod.always(self.root, '')
766 return matchmod.always(self.root, '')
767 include, exclude = self.narrowpats
767 include, exclude = self.narrowpats
768 return narrowspec.match(self.root, include=include, exclude=exclude)
768 return narrowspec.match(self.root, include=include, exclude=exclude)
769
769
770 # TODO(martinvonz): make this property-like instead?
770 # TODO(martinvonz): make this property-like instead?
771 def narrowmatch(self):
771 def narrowmatch(self):
772 return self._narrowmatch
772 return self._narrowmatch
773
773
774 def setnarrowpats(self, newincludes, newexcludes):
774 def setnarrowpats(self, newincludes, newexcludes):
775 target = self
775 target = self
776 if self.shared():
776 if self.shared():
777 from . import hg
777 from . import hg
778 target = hg.sharedreposource(self)
778 target = hg.sharedreposource(self)
779 narrowspec.save(target, newincludes, newexcludes)
779 narrowspec.save(target, newincludes, newexcludes)
780 self.invalidate(clearfilecache=True)
780 self.invalidate(clearfilecache=True)
781
781
782 def __getitem__(self, changeid):
782 def __getitem__(self, changeid):
783 if changeid is None:
783 if changeid is None:
784 return context.workingctx(self)
784 return context.workingctx(self)
785 if isinstance(changeid, context.basectx):
785 if isinstance(changeid, context.basectx):
786 return changeid
786 return changeid
787 if isinstance(changeid, slice):
787 if isinstance(changeid, slice):
788 # wdirrev isn't contiguous so the slice shouldn't include it
788 # wdirrev isn't contiguous so the slice shouldn't include it
789 return [context.changectx(self, i)
789 return [context.changectx(self, i)
790 for i in xrange(*changeid.indices(len(self)))
790 for i in xrange(*changeid.indices(len(self)))
791 if i not in self.changelog.filteredrevs]
791 if i not in self.changelog.filteredrevs]
792 try:
792 try:
793 return context.changectx(self, changeid)
793 return context.changectx(self, changeid)
794 except error.WdirUnsupported:
794 except error.WdirUnsupported:
795 return context.workingctx(self)
795 return context.workingctx(self)
796
796
797 def __contains__(self, changeid):
797 def __contains__(self, changeid):
798 """True if the given changeid exists
798 """True if the given changeid exists
799
799
800 error.LookupError is raised if an ambiguous node specified.
800 error.LookupError is raised if an ambiguous node specified.
801 """
801 """
802 try:
802 try:
803 self[changeid]
803 self[changeid]
804 return True
804 return True
805 except error.RepoLookupError:
805 except (error.RepoLookupError, error.FilteredIndexError,
806 error.FilteredLookupError):
806 return False
807 return False
807
808
808 def __nonzero__(self):
809 def __nonzero__(self):
809 return True
810 return True
810
811
811 __bool__ = __nonzero__
812 __bool__ = __nonzero__
812
813
813 def __len__(self):
814 def __len__(self):
814 # no need to pay the cost of repoview.changelog
815 # no need to pay the cost of repoview.changelog
815 unfi = self.unfiltered()
816 unfi = self.unfiltered()
816 return len(unfi.changelog)
817 return len(unfi.changelog)
817
818
818 def __iter__(self):
819 def __iter__(self):
819 return iter(self.changelog)
820 return iter(self.changelog)
820
821
821 def revs(self, expr, *args):
822 def revs(self, expr, *args):
822 '''Find revisions matching a revset.
823 '''Find revisions matching a revset.
823
824
824 The revset is specified as a string ``expr`` that may contain
825 The revset is specified as a string ``expr`` that may contain
825 %-formatting to escape certain types. See ``revsetlang.formatspec``.
826 %-formatting to escape certain types. See ``revsetlang.formatspec``.
826
827
827 Revset aliases from the configuration are not expanded. To expand
828 Revset aliases from the configuration are not expanded. To expand
828 user aliases, consider calling ``scmutil.revrange()`` or
829 user aliases, consider calling ``scmutil.revrange()`` or
829 ``repo.anyrevs([expr], user=True)``.
830 ``repo.anyrevs([expr], user=True)``.
830
831
831 Returns a revset.abstractsmartset, which is a list-like interface
832 Returns a revset.abstractsmartset, which is a list-like interface
832 that contains integer revisions.
833 that contains integer revisions.
833 '''
834 '''
834 expr = revsetlang.formatspec(expr, *args)
835 expr = revsetlang.formatspec(expr, *args)
835 m = revset.match(None, expr)
836 m = revset.match(None, expr)
836 return m(self)
837 return m(self)
837
838
838 def set(self, expr, *args):
839 def set(self, expr, *args):
839 '''Find revisions matching a revset and emit changectx instances.
840 '''Find revisions matching a revset and emit changectx instances.
840
841
841 This is a convenience wrapper around ``revs()`` that iterates the
842 This is a convenience wrapper around ``revs()`` that iterates the
842 result and is a generator of changectx instances.
843 result and is a generator of changectx instances.
843
844
844 Revset aliases from the configuration are not expanded. To expand
845 Revset aliases from the configuration are not expanded. To expand
845 user aliases, consider calling ``scmutil.revrange()``.
846 user aliases, consider calling ``scmutil.revrange()``.
846 '''
847 '''
847 for r in self.revs(expr, *args):
848 for r in self.revs(expr, *args):
848 yield self[r]
849 yield self[r]
849
850
850 def anyrevs(self, specs, user=False, localalias=None):
851 def anyrevs(self, specs, user=False, localalias=None):
851 '''Find revisions matching one of the given revsets.
852 '''Find revisions matching one of the given revsets.
852
853
853 Revset aliases from the configuration are not expanded by default. To
854 Revset aliases from the configuration are not expanded by default. To
854 expand user aliases, specify ``user=True``. To provide some local
855 expand user aliases, specify ``user=True``. To provide some local
855 definitions overriding user aliases, set ``localalias`` to
856 definitions overriding user aliases, set ``localalias`` to
856 ``{name: definitionstring}``.
857 ``{name: definitionstring}``.
857 '''
858 '''
858 if user:
859 if user:
859 m = revset.matchany(self.ui, specs, repo=self,
860 m = revset.matchany(self.ui, specs, repo=self,
860 localalias=localalias)
861 localalias=localalias)
861 else:
862 else:
862 m = revset.matchany(None, specs, localalias=localalias)
863 m = revset.matchany(None, specs, localalias=localalias)
863 return m(self)
864 return m(self)
864
865
865 def url(self):
866 def url(self):
866 return 'file:' + self.root
867 return 'file:' + self.root
867
868
868 def hook(self, name, throw=False, **args):
869 def hook(self, name, throw=False, **args):
869 """Call a hook, passing this repo instance.
870 """Call a hook, passing this repo instance.
870
871
871 This a convenience method to aid invoking hooks. Extensions likely
872 This a convenience method to aid invoking hooks. Extensions likely
872 won't call this unless they have registered a custom hook or are
873 won't call this unless they have registered a custom hook or are
873 replacing code that is expected to call a hook.
874 replacing code that is expected to call a hook.
874 """
875 """
875 return hook.hook(self.ui, self, name, throw, **args)
876 return hook.hook(self.ui, self, name, throw, **args)
876
877
877 @filteredpropertycache
878 @filteredpropertycache
878 def _tagscache(self):
879 def _tagscache(self):
879 '''Returns a tagscache object that contains various tags related
880 '''Returns a tagscache object that contains various tags related
880 caches.'''
881 caches.'''
881
882
882 # This simplifies its cache management by having one decorated
883 # This simplifies its cache management by having one decorated
883 # function (this one) and the rest simply fetch things from it.
884 # function (this one) and the rest simply fetch things from it.
884 class tagscache(object):
885 class tagscache(object):
885 def __init__(self):
886 def __init__(self):
886 # These two define the set of tags for this repository. tags
887 # These two define the set of tags for this repository. tags
887 # maps tag name to node; tagtypes maps tag name to 'global' or
888 # maps tag name to node; tagtypes maps tag name to 'global' or
888 # 'local'. (Global tags are defined by .hgtags across all
889 # 'local'. (Global tags are defined by .hgtags across all
889 # heads, and local tags are defined in .hg/localtags.)
890 # heads, and local tags are defined in .hg/localtags.)
890 # They constitute the in-memory cache of tags.
891 # They constitute the in-memory cache of tags.
891 self.tags = self.tagtypes = None
892 self.tags = self.tagtypes = None
892
893
893 self.nodetagscache = self.tagslist = None
894 self.nodetagscache = self.tagslist = None
894
895
895 cache = tagscache()
896 cache = tagscache()
896 cache.tags, cache.tagtypes = self._findtags()
897 cache.tags, cache.tagtypes = self._findtags()
897
898
898 return cache
899 return cache
899
900
900 def tags(self):
901 def tags(self):
901 '''return a mapping of tag to node'''
902 '''return a mapping of tag to node'''
902 t = {}
903 t = {}
903 if self.changelog.filteredrevs:
904 if self.changelog.filteredrevs:
904 tags, tt = self._findtags()
905 tags, tt = self._findtags()
905 else:
906 else:
906 tags = self._tagscache.tags
907 tags = self._tagscache.tags
907 for k, v in tags.iteritems():
908 for k, v in tags.iteritems():
908 try:
909 try:
909 # ignore tags to unknown nodes
910 # ignore tags to unknown nodes
910 self.changelog.rev(v)
911 self.changelog.rev(v)
911 t[k] = v
912 t[k] = v
912 except (error.LookupError, ValueError):
913 except (error.LookupError, ValueError):
913 pass
914 pass
914 return t
915 return t
915
916
916 def _findtags(self):
917 def _findtags(self):
917 '''Do the hard work of finding tags. Return a pair of dicts
918 '''Do the hard work of finding tags. Return a pair of dicts
918 (tags, tagtypes) where tags maps tag name to node, and tagtypes
919 (tags, tagtypes) where tags maps tag name to node, and tagtypes
919 maps tag name to a string like \'global\' or \'local\'.
920 maps tag name to a string like \'global\' or \'local\'.
920 Subclasses or extensions are free to add their own tags, but
921 Subclasses or extensions are free to add their own tags, but
921 should be aware that the returned dicts will be retained for the
922 should be aware that the returned dicts will be retained for the
922 duration of the localrepo object.'''
923 duration of the localrepo object.'''
923
924
924 # XXX what tagtype should subclasses/extensions use? Currently
925 # XXX what tagtype should subclasses/extensions use? Currently
925 # mq and bookmarks add tags, but do not set the tagtype at all.
926 # mq and bookmarks add tags, but do not set the tagtype at all.
926 # Should each extension invent its own tag type? Should there
927 # Should each extension invent its own tag type? Should there
927 # be one tagtype for all such "virtual" tags? Or is the status
928 # be one tagtype for all such "virtual" tags? Or is the status
928 # quo fine?
929 # quo fine?
929
930
930
931
931 # map tag name to (node, hist)
932 # map tag name to (node, hist)
932 alltags = tagsmod.findglobaltags(self.ui, self)
933 alltags = tagsmod.findglobaltags(self.ui, self)
933 # map tag name to tag type
934 # map tag name to tag type
934 tagtypes = dict((tag, 'global') for tag in alltags)
935 tagtypes = dict((tag, 'global') for tag in alltags)
935
936
936 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
937 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
937
938
938 # Build the return dicts. Have to re-encode tag names because
939 # Build the return dicts. Have to re-encode tag names because
939 # the tags module always uses UTF-8 (in order not to lose info
940 # the tags module always uses UTF-8 (in order not to lose info
940 # writing to the cache), but the rest of Mercurial wants them in
941 # writing to the cache), but the rest of Mercurial wants them in
941 # local encoding.
942 # local encoding.
942 tags = {}
943 tags = {}
943 for (name, (node, hist)) in alltags.iteritems():
944 for (name, (node, hist)) in alltags.iteritems():
944 if node != nullid:
945 if node != nullid:
945 tags[encoding.tolocal(name)] = node
946 tags[encoding.tolocal(name)] = node
946 tags['tip'] = self.changelog.tip()
947 tags['tip'] = self.changelog.tip()
947 tagtypes = dict([(encoding.tolocal(name), value)
948 tagtypes = dict([(encoding.tolocal(name), value)
948 for (name, value) in tagtypes.iteritems()])
949 for (name, value) in tagtypes.iteritems()])
949 return (tags, tagtypes)
950 return (tags, tagtypes)
950
951
951 def tagtype(self, tagname):
952 def tagtype(self, tagname):
952 '''
953 '''
953 return the type of the given tag. result can be:
954 return the type of the given tag. result can be:
954
955
955 'local' : a local tag
956 'local' : a local tag
956 'global' : a global tag
957 'global' : a global tag
957 None : tag does not exist
958 None : tag does not exist
958 '''
959 '''
959
960
960 return self._tagscache.tagtypes.get(tagname)
961 return self._tagscache.tagtypes.get(tagname)
961
962
962 def tagslist(self):
963 def tagslist(self):
963 '''return a list of tags ordered by revision'''
964 '''return a list of tags ordered by revision'''
964 if not self._tagscache.tagslist:
965 if not self._tagscache.tagslist:
965 l = []
966 l = []
966 for t, n in self.tags().iteritems():
967 for t, n in self.tags().iteritems():
967 l.append((self.changelog.rev(n), t, n))
968 l.append((self.changelog.rev(n), t, n))
968 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
969 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
969
970
970 return self._tagscache.tagslist
971 return self._tagscache.tagslist
971
972
972 def nodetags(self, node):
973 def nodetags(self, node):
973 '''return the tags associated with a node'''
974 '''return the tags associated with a node'''
974 if not self._tagscache.nodetagscache:
975 if not self._tagscache.nodetagscache:
975 nodetagscache = {}
976 nodetagscache = {}
976 for t, n in self._tagscache.tags.iteritems():
977 for t, n in self._tagscache.tags.iteritems():
977 nodetagscache.setdefault(n, []).append(t)
978 nodetagscache.setdefault(n, []).append(t)
978 for tags in nodetagscache.itervalues():
979 for tags in nodetagscache.itervalues():
979 tags.sort()
980 tags.sort()
980 self._tagscache.nodetagscache = nodetagscache
981 self._tagscache.nodetagscache = nodetagscache
981 return self._tagscache.nodetagscache.get(node, [])
982 return self._tagscache.nodetagscache.get(node, [])
982
983
983 def nodebookmarks(self, node):
984 def nodebookmarks(self, node):
984 """return the list of bookmarks pointing to the specified node"""
985 """return the list of bookmarks pointing to the specified node"""
985 marks = []
986 marks = []
986 for bookmark, n in self._bookmarks.iteritems():
987 for bookmark, n in self._bookmarks.iteritems():
987 if n == node:
988 if n == node:
988 marks.append(bookmark)
989 marks.append(bookmark)
989 return sorted(marks)
990 return sorted(marks)
990
991
991 def branchmap(self):
992 def branchmap(self):
992 '''returns a dictionary {branch: [branchheads]} with branchheads
993 '''returns a dictionary {branch: [branchheads]} with branchheads
993 ordered by increasing revision number'''
994 ordered by increasing revision number'''
994 branchmap.updatecache(self)
995 branchmap.updatecache(self)
995 return self._branchcaches[self.filtername]
996 return self._branchcaches[self.filtername]
996
997
997 @unfilteredmethod
998 @unfilteredmethod
998 def revbranchcache(self):
999 def revbranchcache(self):
999 if not self._revbranchcache:
1000 if not self._revbranchcache:
1000 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1001 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1001 return self._revbranchcache
1002 return self._revbranchcache
1002
1003
1003 def branchtip(self, branch, ignoremissing=False):
1004 def branchtip(self, branch, ignoremissing=False):
1004 '''return the tip node for a given branch
1005 '''return the tip node for a given branch
1005
1006
1006 If ignoremissing is True, then this method will not raise an error.
1007 If ignoremissing is True, then this method will not raise an error.
1007 This is helpful for callers that only expect None for a missing branch
1008 This is helpful for callers that only expect None for a missing branch
1008 (e.g. namespace).
1009 (e.g. namespace).
1009
1010
1010 '''
1011 '''
1011 try:
1012 try:
1012 return self.branchmap().branchtip(branch)
1013 return self.branchmap().branchtip(branch)
1013 except KeyError:
1014 except KeyError:
1014 if not ignoremissing:
1015 if not ignoremissing:
1015 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1016 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1016 else:
1017 else:
1017 pass
1018 pass
1018
1019
1019 def lookup(self, key):
1020 def lookup(self, key):
1020 return scmutil.revsymbol(self, key).node()
1021 return scmutil.revsymbol(self, key).node()
1021
1022
1022 def lookupbranch(self, key):
1023 def lookupbranch(self, key):
1023 if key in self.branchmap():
1024 if key in self.branchmap():
1024 return key
1025 return key
1025
1026
1026 return scmutil.revsymbol(self, key).branch()
1027 return scmutil.revsymbol(self, key).branch()
1027
1028
1028 def known(self, nodes):
1029 def known(self, nodes):
1029 cl = self.changelog
1030 cl = self.changelog
1030 nm = cl.nodemap
1031 nm = cl.nodemap
1031 filtered = cl.filteredrevs
1032 filtered = cl.filteredrevs
1032 result = []
1033 result = []
1033 for n in nodes:
1034 for n in nodes:
1034 r = nm.get(n)
1035 r = nm.get(n)
1035 resp = not (r is None or r in filtered)
1036 resp = not (r is None or r in filtered)
1036 result.append(resp)
1037 result.append(resp)
1037 return result
1038 return result
1038
1039
1039 def local(self):
1040 def local(self):
1040 return self
1041 return self
1041
1042
1042 def publishing(self):
1043 def publishing(self):
1043 # it's safe (and desirable) to trust the publish flag unconditionally
1044 # it's safe (and desirable) to trust the publish flag unconditionally
1044 # so that we don't finalize changes shared between users via ssh or nfs
1045 # so that we don't finalize changes shared between users via ssh or nfs
1045 return self.ui.configbool('phases', 'publish', untrusted=True)
1046 return self.ui.configbool('phases', 'publish', untrusted=True)
1046
1047
1047 def cancopy(self):
1048 def cancopy(self):
1048 # so statichttprepo's override of local() works
1049 # so statichttprepo's override of local() works
1049 if not self.local():
1050 if not self.local():
1050 return False
1051 return False
1051 if not self.publishing():
1052 if not self.publishing():
1052 return True
1053 return True
1053 # if publishing we can't copy if there is filtered content
1054 # if publishing we can't copy if there is filtered content
1054 return not self.filtered('visible').changelog.filteredrevs
1055 return not self.filtered('visible').changelog.filteredrevs
1055
1056
1056 def shared(self):
1057 def shared(self):
1057 '''the type of shared repository (None if not shared)'''
1058 '''the type of shared repository (None if not shared)'''
1058 if self.sharedpath != self.path:
1059 if self.sharedpath != self.path:
1059 return 'store'
1060 return 'store'
1060 return None
1061 return None
1061
1062
1062 def wjoin(self, f, *insidef):
1063 def wjoin(self, f, *insidef):
1063 return self.vfs.reljoin(self.root, f, *insidef)
1064 return self.vfs.reljoin(self.root, f, *insidef)
1064
1065
1065 def file(self, f):
1066 def file(self, f):
1066 if f[0] == '/':
1067 if f[0] == '/':
1067 f = f[1:]
1068 f = f[1:]
1068 return filelog.filelog(self.svfs, f)
1069 return filelog.filelog(self.svfs, f)
1069
1070
1070 def setparents(self, p1, p2=nullid):
1071 def setparents(self, p1, p2=nullid):
1071 with self.dirstate.parentchange():
1072 with self.dirstate.parentchange():
1072 copies = self.dirstate.setparents(p1, p2)
1073 copies = self.dirstate.setparents(p1, p2)
1073 pctx = self[p1]
1074 pctx = self[p1]
1074 if copies:
1075 if copies:
1075 # Adjust copy records, the dirstate cannot do it, it
1076 # Adjust copy records, the dirstate cannot do it, it
1076 # requires access to parents manifests. Preserve them
1077 # requires access to parents manifests. Preserve them
1077 # only for entries added to first parent.
1078 # only for entries added to first parent.
1078 for f in copies:
1079 for f in copies:
1079 if f not in pctx and copies[f] in pctx:
1080 if f not in pctx and copies[f] in pctx:
1080 self.dirstate.copy(copies[f], f)
1081 self.dirstate.copy(copies[f], f)
1081 if p2 == nullid:
1082 if p2 == nullid:
1082 for f, s in sorted(self.dirstate.copies().items()):
1083 for f, s in sorted(self.dirstate.copies().items()):
1083 if f not in pctx and s not in pctx:
1084 if f not in pctx and s not in pctx:
1084 self.dirstate.copy(None, f)
1085 self.dirstate.copy(None, f)
1085
1086
1086 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1087 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1087 """changeid can be a changeset revision, node, or tag.
1088 """changeid can be a changeset revision, node, or tag.
1088 fileid can be a file revision or node."""
1089 fileid can be a file revision or node."""
1089 return context.filectx(self, path, changeid, fileid,
1090 return context.filectx(self, path, changeid, fileid,
1090 changectx=changectx)
1091 changectx=changectx)
1091
1092
1092 def getcwd(self):
1093 def getcwd(self):
1093 return self.dirstate.getcwd()
1094 return self.dirstate.getcwd()
1094
1095
1095 def pathto(self, f, cwd=None):
1096 def pathto(self, f, cwd=None):
1096 return self.dirstate.pathto(f, cwd)
1097 return self.dirstate.pathto(f, cwd)
1097
1098
1098 def _loadfilter(self, filter):
1099 def _loadfilter(self, filter):
1099 if filter not in self._filterpats:
1100 if filter not in self._filterpats:
1100 l = []
1101 l = []
1101 for pat, cmd in self.ui.configitems(filter):
1102 for pat, cmd in self.ui.configitems(filter):
1102 if cmd == '!':
1103 if cmd == '!':
1103 continue
1104 continue
1104 mf = matchmod.match(self.root, '', [pat])
1105 mf = matchmod.match(self.root, '', [pat])
1105 fn = None
1106 fn = None
1106 params = cmd
1107 params = cmd
1107 for name, filterfn in self._datafilters.iteritems():
1108 for name, filterfn in self._datafilters.iteritems():
1108 if cmd.startswith(name):
1109 if cmd.startswith(name):
1109 fn = filterfn
1110 fn = filterfn
1110 params = cmd[len(name):].lstrip()
1111 params = cmd[len(name):].lstrip()
1111 break
1112 break
1112 if not fn:
1113 if not fn:
1113 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1114 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1114 # Wrap old filters not supporting keyword arguments
1115 # Wrap old filters not supporting keyword arguments
1115 if not pycompat.getargspec(fn)[2]:
1116 if not pycompat.getargspec(fn)[2]:
1116 oldfn = fn
1117 oldfn = fn
1117 fn = lambda s, c, **kwargs: oldfn(s, c)
1118 fn = lambda s, c, **kwargs: oldfn(s, c)
1118 l.append((mf, fn, params))
1119 l.append((mf, fn, params))
1119 self._filterpats[filter] = l
1120 self._filterpats[filter] = l
1120 return self._filterpats[filter]
1121 return self._filterpats[filter]
1121
1122
1122 def _filter(self, filterpats, filename, data):
1123 def _filter(self, filterpats, filename, data):
1123 for mf, fn, cmd in filterpats:
1124 for mf, fn, cmd in filterpats:
1124 if mf(filename):
1125 if mf(filename):
1125 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1126 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1126 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1127 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1127 break
1128 break
1128
1129
1129 return data
1130 return data
1130
1131
1131 @unfilteredpropertycache
1132 @unfilteredpropertycache
1132 def _encodefilterpats(self):
1133 def _encodefilterpats(self):
1133 return self._loadfilter('encode')
1134 return self._loadfilter('encode')
1134
1135
1135 @unfilteredpropertycache
1136 @unfilteredpropertycache
1136 def _decodefilterpats(self):
1137 def _decodefilterpats(self):
1137 return self._loadfilter('decode')
1138 return self._loadfilter('decode')
1138
1139
1139 def adddatafilter(self, name, filter):
1140 def adddatafilter(self, name, filter):
1140 self._datafilters[name] = filter
1141 self._datafilters[name] = filter
1141
1142
1142 def wread(self, filename):
1143 def wread(self, filename):
1143 if self.wvfs.islink(filename):
1144 if self.wvfs.islink(filename):
1144 data = self.wvfs.readlink(filename)
1145 data = self.wvfs.readlink(filename)
1145 else:
1146 else:
1146 data = self.wvfs.read(filename)
1147 data = self.wvfs.read(filename)
1147 return self._filter(self._encodefilterpats, filename, data)
1148 return self._filter(self._encodefilterpats, filename, data)
1148
1149
1149 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1150 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1150 """write ``data`` into ``filename`` in the working directory
1151 """write ``data`` into ``filename`` in the working directory
1151
1152
1152 This returns length of written (maybe decoded) data.
1153 This returns length of written (maybe decoded) data.
1153 """
1154 """
1154 data = self._filter(self._decodefilterpats, filename, data)
1155 data = self._filter(self._decodefilterpats, filename, data)
1155 if 'l' in flags:
1156 if 'l' in flags:
1156 self.wvfs.symlink(data, filename)
1157 self.wvfs.symlink(data, filename)
1157 else:
1158 else:
1158 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1159 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1159 **kwargs)
1160 **kwargs)
1160 if 'x' in flags:
1161 if 'x' in flags:
1161 self.wvfs.setflags(filename, False, True)
1162 self.wvfs.setflags(filename, False, True)
1162 else:
1163 else:
1163 self.wvfs.setflags(filename, False, False)
1164 self.wvfs.setflags(filename, False, False)
1164 return len(data)
1165 return len(data)
1165
1166
1166 def wwritedata(self, filename, data):
1167 def wwritedata(self, filename, data):
1167 return self._filter(self._decodefilterpats, filename, data)
1168 return self._filter(self._decodefilterpats, filename, data)
1168
1169
1169 def currenttransaction(self):
1170 def currenttransaction(self):
1170 """return the current transaction or None if non exists"""
1171 """return the current transaction or None if non exists"""
1171 if self._transref:
1172 if self._transref:
1172 tr = self._transref()
1173 tr = self._transref()
1173 else:
1174 else:
1174 tr = None
1175 tr = None
1175
1176
1176 if tr and tr.running():
1177 if tr and tr.running():
1177 return tr
1178 return tr
1178 return None
1179 return None
1179
1180
1180 def transaction(self, desc, report=None):
1181 def transaction(self, desc, report=None):
1181 if (self.ui.configbool('devel', 'all-warnings')
1182 if (self.ui.configbool('devel', 'all-warnings')
1182 or self.ui.configbool('devel', 'check-locks')):
1183 or self.ui.configbool('devel', 'check-locks')):
1183 if self._currentlock(self._lockref) is None:
1184 if self._currentlock(self._lockref) is None:
1184 raise error.ProgrammingError('transaction requires locking')
1185 raise error.ProgrammingError('transaction requires locking')
1185 tr = self.currenttransaction()
1186 tr = self.currenttransaction()
1186 if tr is not None:
1187 if tr is not None:
1187 return tr.nest(name=desc)
1188 return tr.nest(name=desc)
1188
1189
1189 # abort here if the journal already exists
1190 # abort here if the journal already exists
1190 if self.svfs.exists("journal"):
1191 if self.svfs.exists("journal"):
1191 raise error.RepoError(
1192 raise error.RepoError(
1192 _("abandoned transaction found"),
1193 _("abandoned transaction found"),
1193 hint=_("run 'hg recover' to clean up transaction"))
1194 hint=_("run 'hg recover' to clean up transaction"))
1194
1195
1195 idbase = "%.40f#%f" % (random.random(), time.time())
1196 idbase = "%.40f#%f" % (random.random(), time.time())
1196 ha = hex(hashlib.sha1(idbase).digest())
1197 ha = hex(hashlib.sha1(idbase).digest())
1197 txnid = 'TXN:' + ha
1198 txnid = 'TXN:' + ha
1198 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1199 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1199
1200
1200 self._writejournal(desc)
1201 self._writejournal(desc)
1201 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1202 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1202 if report:
1203 if report:
1203 rp = report
1204 rp = report
1204 else:
1205 else:
1205 rp = self.ui.warn
1206 rp = self.ui.warn
1206 vfsmap = {'plain': self.vfs} # root of .hg/
1207 vfsmap = {'plain': self.vfs} # root of .hg/
1207 # we must avoid cyclic reference between repo and transaction.
1208 # we must avoid cyclic reference between repo and transaction.
1208 reporef = weakref.ref(self)
1209 reporef = weakref.ref(self)
1209 # Code to track tag movement
1210 # Code to track tag movement
1210 #
1211 #
1211 # Since tags are all handled as file content, it is actually quite hard
1212 # Since tags are all handled as file content, it is actually quite hard
1212 # to track these movement from a code perspective. So we fallback to a
1213 # to track these movement from a code perspective. So we fallback to a
1213 # tracking at the repository level. One could envision to track changes
1214 # tracking at the repository level. One could envision to track changes
1214 # to the '.hgtags' file through changegroup apply but that fails to
1215 # to the '.hgtags' file through changegroup apply but that fails to
1215 # cope with case where transaction expose new heads without changegroup
1216 # cope with case where transaction expose new heads without changegroup
1216 # being involved (eg: phase movement).
1217 # being involved (eg: phase movement).
1217 #
1218 #
1218 # For now, We gate the feature behind a flag since this likely comes
1219 # For now, We gate the feature behind a flag since this likely comes
1219 # with performance impacts. The current code run more often than needed
1220 # with performance impacts. The current code run more often than needed
1220 # and do not use caches as much as it could. The current focus is on
1221 # and do not use caches as much as it could. The current focus is on
1221 # the behavior of the feature so we disable it by default. The flag
1222 # the behavior of the feature so we disable it by default. The flag
1222 # will be removed when we are happy with the performance impact.
1223 # will be removed when we are happy with the performance impact.
1223 #
1224 #
1224 # Once this feature is no longer experimental move the following
1225 # Once this feature is no longer experimental move the following
1225 # documentation to the appropriate help section:
1226 # documentation to the appropriate help section:
1226 #
1227 #
1227 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1228 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1228 # tags (new or changed or deleted tags). In addition the details of
1229 # tags (new or changed or deleted tags). In addition the details of
1229 # these changes are made available in a file at:
1230 # these changes are made available in a file at:
1230 # ``REPOROOT/.hg/changes/tags.changes``.
1231 # ``REPOROOT/.hg/changes/tags.changes``.
1231 # Make sure you check for HG_TAG_MOVED before reading that file as it
1232 # Make sure you check for HG_TAG_MOVED before reading that file as it
1232 # might exist from a previous transaction even if no tag were touched
1233 # might exist from a previous transaction even if no tag were touched
1233 # in this one. Changes are recorded in a line base format::
1234 # in this one. Changes are recorded in a line base format::
1234 #
1235 #
1235 # <action> <hex-node> <tag-name>\n
1236 # <action> <hex-node> <tag-name>\n
1236 #
1237 #
1237 # Actions are defined as follow:
1238 # Actions are defined as follow:
1238 # "-R": tag is removed,
1239 # "-R": tag is removed,
1239 # "+A": tag is added,
1240 # "+A": tag is added,
1240 # "-M": tag is moved (old value),
1241 # "-M": tag is moved (old value),
1241 # "+M": tag is moved (new value),
1242 # "+M": tag is moved (new value),
1242 tracktags = lambda x: None
1243 tracktags = lambda x: None
1243 # experimental config: experimental.hook-track-tags
1244 # experimental config: experimental.hook-track-tags
1244 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1245 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1245 if desc != 'strip' and shouldtracktags:
1246 if desc != 'strip' and shouldtracktags:
1246 oldheads = self.changelog.headrevs()
1247 oldheads = self.changelog.headrevs()
1247 def tracktags(tr2):
1248 def tracktags(tr2):
1248 repo = reporef()
1249 repo = reporef()
1249 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1250 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1250 newheads = repo.changelog.headrevs()
1251 newheads = repo.changelog.headrevs()
1251 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1252 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1252 # notes: we compare lists here.
1253 # notes: we compare lists here.
1253 # As we do it only once buiding set would not be cheaper
1254 # As we do it only once buiding set would not be cheaper
1254 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1255 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1255 if changes:
1256 if changes:
1256 tr2.hookargs['tag_moved'] = '1'
1257 tr2.hookargs['tag_moved'] = '1'
1257 with repo.vfs('changes/tags.changes', 'w',
1258 with repo.vfs('changes/tags.changes', 'w',
1258 atomictemp=True) as changesfile:
1259 atomictemp=True) as changesfile:
1259 # note: we do not register the file to the transaction
1260 # note: we do not register the file to the transaction
1260 # because we needs it to still exist on the transaction
1261 # because we needs it to still exist on the transaction
1261 # is close (for txnclose hooks)
1262 # is close (for txnclose hooks)
1262 tagsmod.writediff(changesfile, changes)
1263 tagsmod.writediff(changesfile, changes)
1263 def validate(tr2):
1264 def validate(tr2):
1264 """will run pre-closing hooks"""
1265 """will run pre-closing hooks"""
1265 # XXX the transaction API is a bit lacking here so we take a hacky
1266 # XXX the transaction API is a bit lacking here so we take a hacky
1266 # path for now
1267 # path for now
1267 #
1268 #
1268 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1269 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1269 # dict is copied before these run. In addition we needs the data
1270 # dict is copied before these run. In addition we needs the data
1270 # available to in memory hooks too.
1271 # available to in memory hooks too.
1271 #
1272 #
1272 # Moreover, we also need to make sure this runs before txnclose
1273 # Moreover, we also need to make sure this runs before txnclose
1273 # hooks and there is no "pending" mechanism that would execute
1274 # hooks and there is no "pending" mechanism that would execute
1274 # logic only if hooks are about to run.
1275 # logic only if hooks are about to run.
1275 #
1276 #
1276 # Fixing this limitation of the transaction is also needed to track
1277 # Fixing this limitation of the transaction is also needed to track
1277 # other families of changes (bookmarks, phases, obsolescence).
1278 # other families of changes (bookmarks, phases, obsolescence).
1278 #
1279 #
1279 # This will have to be fixed before we remove the experimental
1280 # This will have to be fixed before we remove the experimental
1280 # gating.
1281 # gating.
1281 tracktags(tr2)
1282 tracktags(tr2)
1282 repo = reporef()
1283 repo = reporef()
1283 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1284 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1284 scmutil.enforcesinglehead(repo, tr2, desc)
1285 scmutil.enforcesinglehead(repo, tr2, desc)
1285 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1286 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1286 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1287 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1287 args = tr.hookargs.copy()
1288 args = tr.hookargs.copy()
1288 args.update(bookmarks.preparehookargs(name, old, new))
1289 args.update(bookmarks.preparehookargs(name, old, new))
1289 repo.hook('pretxnclose-bookmark', throw=True,
1290 repo.hook('pretxnclose-bookmark', throw=True,
1290 txnname=desc,
1291 txnname=desc,
1291 **pycompat.strkwargs(args))
1292 **pycompat.strkwargs(args))
1292 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1293 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1293 cl = repo.unfiltered().changelog
1294 cl = repo.unfiltered().changelog
1294 for rev, (old, new) in tr.changes['phases'].items():
1295 for rev, (old, new) in tr.changes['phases'].items():
1295 args = tr.hookargs.copy()
1296 args = tr.hookargs.copy()
1296 node = hex(cl.node(rev))
1297 node = hex(cl.node(rev))
1297 args.update(phases.preparehookargs(node, old, new))
1298 args.update(phases.preparehookargs(node, old, new))
1298 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1299 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1299 **pycompat.strkwargs(args))
1300 **pycompat.strkwargs(args))
1300
1301
1301 repo.hook('pretxnclose', throw=True,
1302 repo.hook('pretxnclose', throw=True,
1302 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1303 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1303 def releasefn(tr, success):
1304 def releasefn(tr, success):
1304 repo = reporef()
1305 repo = reporef()
1305 if success:
1306 if success:
1306 # this should be explicitly invoked here, because
1307 # this should be explicitly invoked here, because
1307 # in-memory changes aren't written out at closing
1308 # in-memory changes aren't written out at closing
1308 # transaction, if tr.addfilegenerator (via
1309 # transaction, if tr.addfilegenerator (via
1309 # dirstate.write or so) isn't invoked while
1310 # dirstate.write or so) isn't invoked while
1310 # transaction running
1311 # transaction running
1311 repo.dirstate.write(None)
1312 repo.dirstate.write(None)
1312 else:
1313 else:
1313 # discard all changes (including ones already written
1314 # discard all changes (including ones already written
1314 # out) in this transaction
1315 # out) in this transaction
1315 repo.dirstate.restorebackup(None, 'journal.dirstate')
1316 repo.dirstate.restorebackup(None, 'journal.dirstate')
1316
1317
1317 repo.invalidate(clearfilecache=True)
1318 repo.invalidate(clearfilecache=True)
1318
1319
1319 tr = transaction.transaction(rp, self.svfs, vfsmap,
1320 tr = transaction.transaction(rp, self.svfs, vfsmap,
1320 "journal",
1321 "journal",
1321 "undo",
1322 "undo",
1322 aftertrans(renames),
1323 aftertrans(renames),
1323 self.store.createmode,
1324 self.store.createmode,
1324 validator=validate,
1325 validator=validate,
1325 releasefn=releasefn,
1326 releasefn=releasefn,
1326 checkambigfiles=_cachedfiles,
1327 checkambigfiles=_cachedfiles,
1327 name=desc)
1328 name=desc)
1328 tr.changes['revs'] = xrange(0, 0)
1329 tr.changes['revs'] = xrange(0, 0)
1329 tr.changes['obsmarkers'] = set()
1330 tr.changes['obsmarkers'] = set()
1330 tr.changes['phases'] = {}
1331 tr.changes['phases'] = {}
1331 tr.changes['bookmarks'] = {}
1332 tr.changes['bookmarks'] = {}
1332
1333
1333 tr.hookargs['txnid'] = txnid
1334 tr.hookargs['txnid'] = txnid
1334 # note: writing the fncache only during finalize mean that the file is
1335 # note: writing the fncache only during finalize mean that the file is
1335 # outdated when running hooks. As fncache is used for streaming clone,
1336 # outdated when running hooks. As fncache is used for streaming clone,
1336 # this is not expected to break anything that happen during the hooks.
1337 # this is not expected to break anything that happen during the hooks.
1337 tr.addfinalize('flush-fncache', self.store.write)
1338 tr.addfinalize('flush-fncache', self.store.write)
1338 def txnclosehook(tr2):
1339 def txnclosehook(tr2):
1339 """To be run if transaction is successful, will schedule a hook run
1340 """To be run if transaction is successful, will schedule a hook run
1340 """
1341 """
1341 # Don't reference tr2 in hook() so we don't hold a reference.
1342 # Don't reference tr2 in hook() so we don't hold a reference.
1342 # This reduces memory consumption when there are multiple
1343 # This reduces memory consumption when there are multiple
1343 # transactions per lock. This can likely go away if issue5045
1344 # transactions per lock. This can likely go away if issue5045
1344 # fixes the function accumulation.
1345 # fixes the function accumulation.
1345 hookargs = tr2.hookargs
1346 hookargs = tr2.hookargs
1346
1347
1347 def hookfunc():
1348 def hookfunc():
1348 repo = reporef()
1349 repo = reporef()
1349 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1350 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1350 bmchanges = sorted(tr.changes['bookmarks'].items())
1351 bmchanges = sorted(tr.changes['bookmarks'].items())
1351 for name, (old, new) in bmchanges:
1352 for name, (old, new) in bmchanges:
1352 args = tr.hookargs.copy()
1353 args = tr.hookargs.copy()
1353 args.update(bookmarks.preparehookargs(name, old, new))
1354 args.update(bookmarks.preparehookargs(name, old, new))
1354 repo.hook('txnclose-bookmark', throw=False,
1355 repo.hook('txnclose-bookmark', throw=False,
1355 txnname=desc, **pycompat.strkwargs(args))
1356 txnname=desc, **pycompat.strkwargs(args))
1356
1357
1357 if hook.hashook(repo.ui, 'txnclose-phase'):
1358 if hook.hashook(repo.ui, 'txnclose-phase'):
1358 cl = repo.unfiltered().changelog
1359 cl = repo.unfiltered().changelog
1359 phasemv = sorted(tr.changes['phases'].items())
1360 phasemv = sorted(tr.changes['phases'].items())
1360 for rev, (old, new) in phasemv:
1361 for rev, (old, new) in phasemv:
1361 args = tr.hookargs.copy()
1362 args = tr.hookargs.copy()
1362 node = hex(cl.node(rev))
1363 node = hex(cl.node(rev))
1363 args.update(phases.preparehookargs(node, old, new))
1364 args.update(phases.preparehookargs(node, old, new))
1364 repo.hook('txnclose-phase', throw=False, txnname=desc,
1365 repo.hook('txnclose-phase', throw=False, txnname=desc,
1365 **pycompat.strkwargs(args))
1366 **pycompat.strkwargs(args))
1366
1367
1367 repo.hook('txnclose', throw=False, txnname=desc,
1368 repo.hook('txnclose', throw=False, txnname=desc,
1368 **pycompat.strkwargs(hookargs))
1369 **pycompat.strkwargs(hookargs))
1369 reporef()._afterlock(hookfunc)
1370 reporef()._afterlock(hookfunc)
1370 tr.addfinalize('txnclose-hook', txnclosehook)
1371 tr.addfinalize('txnclose-hook', txnclosehook)
1371 # Include a leading "-" to make it happen before the transaction summary
1372 # Include a leading "-" to make it happen before the transaction summary
1372 # reports registered via scmutil.registersummarycallback() whose names
1373 # reports registered via scmutil.registersummarycallback() whose names
1373 # are 00-txnreport etc. That way, the caches will be warm when the
1374 # are 00-txnreport etc. That way, the caches will be warm when the
1374 # callbacks run.
1375 # callbacks run.
1375 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1376 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1376 def txnaborthook(tr2):
1377 def txnaborthook(tr2):
1377 """To be run if transaction is aborted
1378 """To be run if transaction is aborted
1378 """
1379 """
1379 reporef().hook('txnabort', throw=False, txnname=desc,
1380 reporef().hook('txnabort', throw=False, txnname=desc,
1380 **pycompat.strkwargs(tr2.hookargs))
1381 **pycompat.strkwargs(tr2.hookargs))
1381 tr.addabort('txnabort-hook', txnaborthook)
1382 tr.addabort('txnabort-hook', txnaborthook)
1382 # avoid eager cache invalidation. in-memory data should be identical
1383 # avoid eager cache invalidation. in-memory data should be identical
1383 # to stored data if transaction has no error.
1384 # to stored data if transaction has no error.
1384 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1385 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1385 self._transref = weakref.ref(tr)
1386 self._transref = weakref.ref(tr)
1386 scmutil.registersummarycallback(self, tr, desc)
1387 scmutil.registersummarycallback(self, tr, desc)
1387 return tr
1388 return tr
1388
1389
1389 def _journalfiles(self):
1390 def _journalfiles(self):
1390 return ((self.svfs, 'journal'),
1391 return ((self.svfs, 'journal'),
1391 (self.vfs, 'journal.dirstate'),
1392 (self.vfs, 'journal.dirstate'),
1392 (self.vfs, 'journal.branch'),
1393 (self.vfs, 'journal.branch'),
1393 (self.vfs, 'journal.desc'),
1394 (self.vfs, 'journal.desc'),
1394 (self.vfs, 'journal.bookmarks'),
1395 (self.vfs, 'journal.bookmarks'),
1395 (self.svfs, 'journal.phaseroots'))
1396 (self.svfs, 'journal.phaseroots'))
1396
1397
1397 def undofiles(self):
1398 def undofiles(self):
1398 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1399 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1399
1400
1400 @unfilteredmethod
1401 @unfilteredmethod
1401 def _writejournal(self, desc):
1402 def _writejournal(self, desc):
1402 self.dirstate.savebackup(None, 'journal.dirstate')
1403 self.dirstate.savebackup(None, 'journal.dirstate')
1403 self.vfs.write("journal.branch",
1404 self.vfs.write("journal.branch",
1404 encoding.fromlocal(self.dirstate.branch()))
1405 encoding.fromlocal(self.dirstate.branch()))
1405 self.vfs.write("journal.desc",
1406 self.vfs.write("journal.desc",
1406 "%d\n%s\n" % (len(self), desc))
1407 "%d\n%s\n" % (len(self), desc))
1407 self.vfs.write("journal.bookmarks",
1408 self.vfs.write("journal.bookmarks",
1408 self.vfs.tryread("bookmarks"))
1409 self.vfs.tryread("bookmarks"))
1409 self.svfs.write("journal.phaseroots",
1410 self.svfs.write("journal.phaseroots",
1410 self.svfs.tryread("phaseroots"))
1411 self.svfs.tryread("phaseroots"))
1411
1412
1412 def recover(self):
1413 def recover(self):
1413 with self.lock():
1414 with self.lock():
1414 if self.svfs.exists("journal"):
1415 if self.svfs.exists("journal"):
1415 self.ui.status(_("rolling back interrupted transaction\n"))
1416 self.ui.status(_("rolling back interrupted transaction\n"))
1416 vfsmap = {'': self.svfs,
1417 vfsmap = {'': self.svfs,
1417 'plain': self.vfs,}
1418 'plain': self.vfs,}
1418 transaction.rollback(self.svfs, vfsmap, "journal",
1419 transaction.rollback(self.svfs, vfsmap, "journal",
1419 self.ui.warn,
1420 self.ui.warn,
1420 checkambigfiles=_cachedfiles)
1421 checkambigfiles=_cachedfiles)
1421 self.invalidate()
1422 self.invalidate()
1422 return True
1423 return True
1423 else:
1424 else:
1424 self.ui.warn(_("no interrupted transaction available\n"))
1425 self.ui.warn(_("no interrupted transaction available\n"))
1425 return False
1426 return False
1426
1427
1427 def rollback(self, dryrun=False, force=False):
1428 def rollback(self, dryrun=False, force=False):
1428 wlock = lock = dsguard = None
1429 wlock = lock = dsguard = None
1429 try:
1430 try:
1430 wlock = self.wlock()
1431 wlock = self.wlock()
1431 lock = self.lock()
1432 lock = self.lock()
1432 if self.svfs.exists("undo"):
1433 if self.svfs.exists("undo"):
1433 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1434 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1434
1435
1435 return self._rollback(dryrun, force, dsguard)
1436 return self._rollback(dryrun, force, dsguard)
1436 else:
1437 else:
1437 self.ui.warn(_("no rollback information available\n"))
1438 self.ui.warn(_("no rollback information available\n"))
1438 return 1
1439 return 1
1439 finally:
1440 finally:
1440 release(dsguard, lock, wlock)
1441 release(dsguard, lock, wlock)
1441
1442
1442 @unfilteredmethod # Until we get smarter cache management
1443 @unfilteredmethod # Until we get smarter cache management
1443 def _rollback(self, dryrun, force, dsguard):
1444 def _rollback(self, dryrun, force, dsguard):
1444 ui = self.ui
1445 ui = self.ui
1445 try:
1446 try:
1446 args = self.vfs.read('undo.desc').splitlines()
1447 args = self.vfs.read('undo.desc').splitlines()
1447 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1448 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1448 if len(args) >= 3:
1449 if len(args) >= 3:
1449 detail = args[2]
1450 detail = args[2]
1450 oldtip = oldlen - 1
1451 oldtip = oldlen - 1
1451
1452
1452 if detail and ui.verbose:
1453 if detail and ui.verbose:
1453 msg = (_('repository tip rolled back to revision %d'
1454 msg = (_('repository tip rolled back to revision %d'
1454 ' (undo %s: %s)\n')
1455 ' (undo %s: %s)\n')
1455 % (oldtip, desc, detail))
1456 % (oldtip, desc, detail))
1456 else:
1457 else:
1457 msg = (_('repository tip rolled back to revision %d'
1458 msg = (_('repository tip rolled back to revision %d'
1458 ' (undo %s)\n')
1459 ' (undo %s)\n')
1459 % (oldtip, desc))
1460 % (oldtip, desc))
1460 except IOError:
1461 except IOError:
1461 msg = _('rolling back unknown transaction\n')
1462 msg = _('rolling back unknown transaction\n')
1462 desc = None
1463 desc = None
1463
1464
1464 if not force and self['.'] != self['tip'] and desc == 'commit':
1465 if not force and self['.'] != self['tip'] and desc == 'commit':
1465 raise error.Abort(
1466 raise error.Abort(
1466 _('rollback of last commit while not checked out '
1467 _('rollback of last commit while not checked out '
1467 'may lose data'), hint=_('use -f to force'))
1468 'may lose data'), hint=_('use -f to force'))
1468
1469
1469 ui.status(msg)
1470 ui.status(msg)
1470 if dryrun:
1471 if dryrun:
1471 return 0
1472 return 0
1472
1473
1473 parents = self.dirstate.parents()
1474 parents = self.dirstate.parents()
1474 self.destroying()
1475 self.destroying()
1475 vfsmap = {'plain': self.vfs, '': self.svfs}
1476 vfsmap = {'plain': self.vfs, '': self.svfs}
1476 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1477 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1477 checkambigfiles=_cachedfiles)
1478 checkambigfiles=_cachedfiles)
1478 if self.vfs.exists('undo.bookmarks'):
1479 if self.vfs.exists('undo.bookmarks'):
1479 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1480 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1480 if self.svfs.exists('undo.phaseroots'):
1481 if self.svfs.exists('undo.phaseroots'):
1481 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1482 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1482 self.invalidate()
1483 self.invalidate()
1483
1484
1484 parentgone = (parents[0] not in self.changelog.nodemap or
1485 parentgone = (parents[0] not in self.changelog.nodemap or
1485 parents[1] not in self.changelog.nodemap)
1486 parents[1] not in self.changelog.nodemap)
1486 if parentgone:
1487 if parentgone:
1487 # prevent dirstateguard from overwriting already restored one
1488 # prevent dirstateguard from overwriting already restored one
1488 dsguard.close()
1489 dsguard.close()
1489
1490
1490 self.dirstate.restorebackup(None, 'undo.dirstate')
1491 self.dirstate.restorebackup(None, 'undo.dirstate')
1491 try:
1492 try:
1492 branch = self.vfs.read('undo.branch')
1493 branch = self.vfs.read('undo.branch')
1493 self.dirstate.setbranch(encoding.tolocal(branch))
1494 self.dirstate.setbranch(encoding.tolocal(branch))
1494 except IOError:
1495 except IOError:
1495 ui.warn(_('named branch could not be reset: '
1496 ui.warn(_('named branch could not be reset: '
1496 'current branch is still \'%s\'\n')
1497 'current branch is still \'%s\'\n')
1497 % self.dirstate.branch())
1498 % self.dirstate.branch())
1498
1499
1499 parents = tuple([p.rev() for p in self[None].parents()])
1500 parents = tuple([p.rev() for p in self[None].parents()])
1500 if len(parents) > 1:
1501 if len(parents) > 1:
1501 ui.status(_('working directory now based on '
1502 ui.status(_('working directory now based on '
1502 'revisions %d and %d\n') % parents)
1503 'revisions %d and %d\n') % parents)
1503 else:
1504 else:
1504 ui.status(_('working directory now based on '
1505 ui.status(_('working directory now based on '
1505 'revision %d\n') % parents)
1506 'revision %d\n') % parents)
1506 mergemod.mergestate.clean(self, self['.'].node())
1507 mergemod.mergestate.clean(self, self['.'].node())
1507
1508
1508 # TODO: if we know which new heads may result from this rollback, pass
1509 # TODO: if we know which new heads may result from this rollback, pass
1509 # them to destroy(), which will prevent the branchhead cache from being
1510 # them to destroy(), which will prevent the branchhead cache from being
1510 # invalidated.
1511 # invalidated.
1511 self.destroyed()
1512 self.destroyed()
1512 return 0
1513 return 0
1513
1514
1514 def _buildcacheupdater(self, newtransaction):
1515 def _buildcacheupdater(self, newtransaction):
1515 """called during transaction to build the callback updating cache
1516 """called during transaction to build the callback updating cache
1516
1517
1517 Lives on the repository to help extension who might want to augment
1518 Lives on the repository to help extension who might want to augment
1518 this logic. For this purpose, the created transaction is passed to the
1519 this logic. For this purpose, the created transaction is passed to the
1519 method.
1520 method.
1520 """
1521 """
1521 # we must avoid cyclic reference between repo and transaction.
1522 # we must avoid cyclic reference between repo and transaction.
1522 reporef = weakref.ref(self)
1523 reporef = weakref.ref(self)
1523 def updater(tr):
1524 def updater(tr):
1524 repo = reporef()
1525 repo = reporef()
1525 repo.updatecaches(tr)
1526 repo.updatecaches(tr)
1526 return updater
1527 return updater
1527
1528
1528 @unfilteredmethod
1529 @unfilteredmethod
1529 def updatecaches(self, tr=None, full=False):
1530 def updatecaches(self, tr=None, full=False):
1530 """warm appropriate caches
1531 """warm appropriate caches
1531
1532
1532 If this function is called after a transaction closed. The transaction
1533 If this function is called after a transaction closed. The transaction
1533 will be available in the 'tr' argument. This can be used to selectively
1534 will be available in the 'tr' argument. This can be used to selectively
1534 update caches relevant to the changes in that transaction.
1535 update caches relevant to the changes in that transaction.
1535
1536
1536 If 'full' is set, make sure all caches the function knows about have
1537 If 'full' is set, make sure all caches the function knows about have
1537 up-to-date data. Even the ones usually loaded more lazily.
1538 up-to-date data. Even the ones usually loaded more lazily.
1538 """
1539 """
1539 if tr is not None and tr.hookargs.get('source') == 'strip':
1540 if tr is not None and tr.hookargs.get('source') == 'strip':
1540 # During strip, many caches are invalid but
1541 # During strip, many caches are invalid but
1541 # later call to `destroyed` will refresh them.
1542 # later call to `destroyed` will refresh them.
1542 return
1543 return
1543
1544
1544 if tr is None or tr.changes['revs']:
1545 if tr is None or tr.changes['revs']:
1545 # updating the unfiltered branchmap should refresh all the others,
1546 # updating the unfiltered branchmap should refresh all the others,
1546 self.ui.debug('updating the branch cache\n')
1547 self.ui.debug('updating the branch cache\n')
1547 branchmap.updatecache(self.filtered('served'))
1548 branchmap.updatecache(self.filtered('served'))
1548
1549
1549 if full:
1550 if full:
1550 rbc = self.revbranchcache()
1551 rbc = self.revbranchcache()
1551 for r in self.changelog:
1552 for r in self.changelog:
1552 rbc.branchinfo(r)
1553 rbc.branchinfo(r)
1553 rbc.write()
1554 rbc.write()
1554
1555
1555 def invalidatecaches(self):
1556 def invalidatecaches(self):
1556
1557
1557 if '_tagscache' in vars(self):
1558 if '_tagscache' in vars(self):
1558 # can't use delattr on proxy
1559 # can't use delattr on proxy
1559 del self.__dict__['_tagscache']
1560 del self.__dict__['_tagscache']
1560
1561
1561 self.unfiltered()._branchcaches.clear()
1562 self.unfiltered()._branchcaches.clear()
1562 self.invalidatevolatilesets()
1563 self.invalidatevolatilesets()
1563 self._sparsesignaturecache.clear()
1564 self._sparsesignaturecache.clear()
1564
1565
1565 def invalidatevolatilesets(self):
1566 def invalidatevolatilesets(self):
1566 self.filteredrevcache.clear()
1567 self.filteredrevcache.clear()
1567 obsolete.clearobscaches(self)
1568 obsolete.clearobscaches(self)
1568
1569
1569 def invalidatedirstate(self):
1570 def invalidatedirstate(self):
1570 '''Invalidates the dirstate, causing the next call to dirstate
1571 '''Invalidates the dirstate, causing the next call to dirstate
1571 to check if it was modified since the last time it was read,
1572 to check if it was modified since the last time it was read,
1572 rereading it if it has.
1573 rereading it if it has.
1573
1574
1574 This is different to dirstate.invalidate() that it doesn't always
1575 This is different to dirstate.invalidate() that it doesn't always
1575 rereads the dirstate. Use dirstate.invalidate() if you want to
1576 rereads the dirstate. Use dirstate.invalidate() if you want to
1576 explicitly read the dirstate again (i.e. restoring it to a previous
1577 explicitly read the dirstate again (i.e. restoring it to a previous
1577 known good state).'''
1578 known good state).'''
1578 if hasunfilteredcache(self, 'dirstate'):
1579 if hasunfilteredcache(self, 'dirstate'):
1579 for k in self.dirstate._filecache:
1580 for k in self.dirstate._filecache:
1580 try:
1581 try:
1581 delattr(self.dirstate, k)
1582 delattr(self.dirstate, k)
1582 except AttributeError:
1583 except AttributeError:
1583 pass
1584 pass
1584 delattr(self.unfiltered(), 'dirstate')
1585 delattr(self.unfiltered(), 'dirstate')
1585
1586
1586 def invalidate(self, clearfilecache=False):
1587 def invalidate(self, clearfilecache=False):
1587 '''Invalidates both store and non-store parts other than dirstate
1588 '''Invalidates both store and non-store parts other than dirstate
1588
1589
1589 If a transaction is running, invalidation of store is omitted,
1590 If a transaction is running, invalidation of store is omitted,
1590 because discarding in-memory changes might cause inconsistency
1591 because discarding in-memory changes might cause inconsistency
1591 (e.g. incomplete fncache causes unintentional failure, but
1592 (e.g. incomplete fncache causes unintentional failure, but
1592 redundant one doesn't).
1593 redundant one doesn't).
1593 '''
1594 '''
1594 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1595 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1595 for k in list(self._filecache.keys()):
1596 for k in list(self._filecache.keys()):
1596 # dirstate is invalidated separately in invalidatedirstate()
1597 # dirstate is invalidated separately in invalidatedirstate()
1597 if k == 'dirstate':
1598 if k == 'dirstate':
1598 continue
1599 continue
1599 if (k == 'changelog' and
1600 if (k == 'changelog' and
1600 self.currenttransaction() and
1601 self.currenttransaction() and
1601 self.changelog._delayed):
1602 self.changelog._delayed):
1602 # The changelog object may store unwritten revisions. We don't
1603 # The changelog object may store unwritten revisions. We don't
1603 # want to lose them.
1604 # want to lose them.
1604 # TODO: Solve the problem instead of working around it.
1605 # TODO: Solve the problem instead of working around it.
1605 continue
1606 continue
1606
1607
1607 if clearfilecache:
1608 if clearfilecache:
1608 del self._filecache[k]
1609 del self._filecache[k]
1609 try:
1610 try:
1610 delattr(unfiltered, k)
1611 delattr(unfiltered, k)
1611 except AttributeError:
1612 except AttributeError:
1612 pass
1613 pass
1613 self.invalidatecaches()
1614 self.invalidatecaches()
1614 if not self.currenttransaction():
1615 if not self.currenttransaction():
1615 # TODO: Changing contents of store outside transaction
1616 # TODO: Changing contents of store outside transaction
1616 # causes inconsistency. We should make in-memory store
1617 # causes inconsistency. We should make in-memory store
1617 # changes detectable, and abort if changed.
1618 # changes detectable, and abort if changed.
1618 self.store.invalidatecaches()
1619 self.store.invalidatecaches()
1619
1620
1620 def invalidateall(self):
1621 def invalidateall(self):
1621 '''Fully invalidates both store and non-store parts, causing the
1622 '''Fully invalidates both store and non-store parts, causing the
1622 subsequent operation to reread any outside changes.'''
1623 subsequent operation to reread any outside changes.'''
1623 # extension should hook this to invalidate its caches
1624 # extension should hook this to invalidate its caches
1624 self.invalidate()
1625 self.invalidate()
1625 self.invalidatedirstate()
1626 self.invalidatedirstate()
1626
1627
1627 @unfilteredmethod
1628 @unfilteredmethod
1628 def _refreshfilecachestats(self, tr):
1629 def _refreshfilecachestats(self, tr):
1629 """Reload stats of cached files so that they are flagged as valid"""
1630 """Reload stats of cached files so that they are flagged as valid"""
1630 for k, ce in self._filecache.items():
1631 for k, ce in self._filecache.items():
1631 k = pycompat.sysstr(k)
1632 k = pycompat.sysstr(k)
1632 if k == r'dirstate' or k not in self.__dict__:
1633 if k == r'dirstate' or k not in self.__dict__:
1633 continue
1634 continue
1634 ce.refresh()
1635 ce.refresh()
1635
1636
1636 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1637 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1637 inheritchecker=None, parentenvvar=None):
1638 inheritchecker=None, parentenvvar=None):
1638 parentlock = None
1639 parentlock = None
1639 # the contents of parentenvvar are used by the underlying lock to
1640 # the contents of parentenvvar are used by the underlying lock to
1640 # determine whether it can be inherited
1641 # determine whether it can be inherited
1641 if parentenvvar is not None:
1642 if parentenvvar is not None:
1642 parentlock = encoding.environ.get(parentenvvar)
1643 parentlock = encoding.environ.get(parentenvvar)
1643
1644
1644 timeout = 0
1645 timeout = 0
1645 warntimeout = 0
1646 warntimeout = 0
1646 if wait:
1647 if wait:
1647 timeout = self.ui.configint("ui", "timeout")
1648 timeout = self.ui.configint("ui", "timeout")
1648 warntimeout = self.ui.configint("ui", "timeout.warn")
1649 warntimeout = self.ui.configint("ui", "timeout.warn")
1649
1650
1650 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1651 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1651 releasefn=releasefn,
1652 releasefn=releasefn,
1652 acquirefn=acquirefn, desc=desc,
1653 acquirefn=acquirefn, desc=desc,
1653 inheritchecker=inheritchecker,
1654 inheritchecker=inheritchecker,
1654 parentlock=parentlock)
1655 parentlock=parentlock)
1655 return l
1656 return l
1656
1657
1657 def _afterlock(self, callback):
1658 def _afterlock(self, callback):
1658 """add a callback to be run when the repository is fully unlocked
1659 """add a callback to be run when the repository is fully unlocked
1659
1660
1660 The callback will be executed when the outermost lock is released
1661 The callback will be executed when the outermost lock is released
1661 (with wlock being higher level than 'lock')."""
1662 (with wlock being higher level than 'lock')."""
1662 for ref in (self._wlockref, self._lockref):
1663 for ref in (self._wlockref, self._lockref):
1663 l = ref and ref()
1664 l = ref and ref()
1664 if l and l.held:
1665 if l and l.held:
1665 l.postrelease.append(callback)
1666 l.postrelease.append(callback)
1666 break
1667 break
1667 else: # no lock have been found.
1668 else: # no lock have been found.
1668 callback()
1669 callback()
1669
1670
1670 def lock(self, wait=True):
1671 def lock(self, wait=True):
1671 '''Lock the repository store (.hg/store) and return a weak reference
1672 '''Lock the repository store (.hg/store) and return a weak reference
1672 to the lock. Use this before modifying the store (e.g. committing or
1673 to the lock. Use this before modifying the store (e.g. committing or
1673 stripping). If you are opening a transaction, get a lock as well.)
1674 stripping). If you are opening a transaction, get a lock as well.)
1674
1675
1675 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1676 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1676 'wlock' first to avoid a dead-lock hazard.'''
1677 'wlock' first to avoid a dead-lock hazard.'''
1677 l = self._currentlock(self._lockref)
1678 l = self._currentlock(self._lockref)
1678 if l is not None:
1679 if l is not None:
1679 l.lock()
1680 l.lock()
1680 return l
1681 return l
1681
1682
1682 l = self._lock(self.svfs, "lock", wait, None,
1683 l = self._lock(self.svfs, "lock", wait, None,
1683 self.invalidate, _('repository %s') % self.origroot)
1684 self.invalidate, _('repository %s') % self.origroot)
1684 self._lockref = weakref.ref(l)
1685 self._lockref = weakref.ref(l)
1685 return l
1686 return l
1686
1687
1687 def _wlockchecktransaction(self):
1688 def _wlockchecktransaction(self):
1688 if self.currenttransaction() is not None:
1689 if self.currenttransaction() is not None:
1689 raise error.LockInheritanceContractViolation(
1690 raise error.LockInheritanceContractViolation(
1690 'wlock cannot be inherited in the middle of a transaction')
1691 'wlock cannot be inherited in the middle of a transaction')
1691
1692
1692 def wlock(self, wait=True):
1693 def wlock(self, wait=True):
1693 '''Lock the non-store parts of the repository (everything under
1694 '''Lock the non-store parts of the repository (everything under
1694 .hg except .hg/store) and return a weak reference to the lock.
1695 .hg except .hg/store) and return a weak reference to the lock.
1695
1696
1696 Use this before modifying files in .hg.
1697 Use this before modifying files in .hg.
1697
1698
1698 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1699 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1699 'wlock' first to avoid a dead-lock hazard.'''
1700 'wlock' first to avoid a dead-lock hazard.'''
1700 l = self._wlockref and self._wlockref()
1701 l = self._wlockref and self._wlockref()
1701 if l is not None and l.held:
1702 if l is not None and l.held:
1702 l.lock()
1703 l.lock()
1703 return l
1704 return l
1704
1705
1705 # We do not need to check for non-waiting lock acquisition. Such
1706 # We do not need to check for non-waiting lock acquisition. Such
1706 # acquisition would not cause dead-lock as they would just fail.
1707 # acquisition would not cause dead-lock as they would just fail.
1707 if wait and (self.ui.configbool('devel', 'all-warnings')
1708 if wait and (self.ui.configbool('devel', 'all-warnings')
1708 or self.ui.configbool('devel', 'check-locks')):
1709 or self.ui.configbool('devel', 'check-locks')):
1709 if self._currentlock(self._lockref) is not None:
1710 if self._currentlock(self._lockref) is not None:
1710 self.ui.develwarn('"wlock" acquired after "lock"')
1711 self.ui.develwarn('"wlock" acquired after "lock"')
1711
1712
1712 def unlock():
1713 def unlock():
1713 if self.dirstate.pendingparentchange():
1714 if self.dirstate.pendingparentchange():
1714 self.dirstate.invalidate()
1715 self.dirstate.invalidate()
1715 else:
1716 else:
1716 self.dirstate.write(None)
1717 self.dirstate.write(None)
1717
1718
1718 self._filecache['dirstate'].refresh()
1719 self._filecache['dirstate'].refresh()
1719
1720
1720 l = self._lock(self.vfs, "wlock", wait, unlock,
1721 l = self._lock(self.vfs, "wlock", wait, unlock,
1721 self.invalidatedirstate, _('working directory of %s') %
1722 self.invalidatedirstate, _('working directory of %s') %
1722 self.origroot,
1723 self.origroot,
1723 inheritchecker=self._wlockchecktransaction,
1724 inheritchecker=self._wlockchecktransaction,
1724 parentenvvar='HG_WLOCK_LOCKER')
1725 parentenvvar='HG_WLOCK_LOCKER')
1725 self._wlockref = weakref.ref(l)
1726 self._wlockref = weakref.ref(l)
1726 return l
1727 return l
1727
1728
1728 def _currentlock(self, lockref):
1729 def _currentlock(self, lockref):
1729 """Returns the lock if it's held, or None if it's not."""
1730 """Returns the lock if it's held, or None if it's not."""
1730 if lockref is None:
1731 if lockref is None:
1731 return None
1732 return None
1732 l = lockref()
1733 l = lockref()
1733 if l is None or not l.held:
1734 if l is None or not l.held:
1734 return None
1735 return None
1735 return l
1736 return l
1736
1737
1737 def currentwlock(self):
1738 def currentwlock(self):
1738 """Returns the wlock if it's held, or None if it's not."""
1739 """Returns the wlock if it's held, or None if it's not."""
1739 return self._currentlock(self._wlockref)
1740 return self._currentlock(self._wlockref)
1740
1741
1741 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1742 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1742 """
1743 """
1743 commit an individual file as part of a larger transaction
1744 commit an individual file as part of a larger transaction
1744 """
1745 """
1745
1746
1746 fname = fctx.path()
1747 fname = fctx.path()
1747 fparent1 = manifest1.get(fname, nullid)
1748 fparent1 = manifest1.get(fname, nullid)
1748 fparent2 = manifest2.get(fname, nullid)
1749 fparent2 = manifest2.get(fname, nullid)
1749 if isinstance(fctx, context.filectx):
1750 if isinstance(fctx, context.filectx):
1750 node = fctx.filenode()
1751 node = fctx.filenode()
1751 if node in [fparent1, fparent2]:
1752 if node in [fparent1, fparent2]:
1752 self.ui.debug('reusing %s filelog entry\n' % fname)
1753 self.ui.debug('reusing %s filelog entry\n' % fname)
1753 if manifest1.flags(fname) != fctx.flags():
1754 if manifest1.flags(fname) != fctx.flags():
1754 changelist.append(fname)
1755 changelist.append(fname)
1755 return node
1756 return node
1756
1757
1757 flog = self.file(fname)
1758 flog = self.file(fname)
1758 meta = {}
1759 meta = {}
1759 copy = fctx.renamed()
1760 copy = fctx.renamed()
1760 if copy and copy[0] != fname:
1761 if copy and copy[0] != fname:
1761 # Mark the new revision of this file as a copy of another
1762 # Mark the new revision of this file as a copy of another
1762 # file. This copy data will effectively act as a parent
1763 # file. This copy data will effectively act as a parent
1763 # of this new revision. If this is a merge, the first
1764 # of this new revision. If this is a merge, the first
1764 # parent will be the nullid (meaning "look up the copy data")
1765 # parent will be the nullid (meaning "look up the copy data")
1765 # and the second one will be the other parent. For example:
1766 # and the second one will be the other parent. For example:
1766 #
1767 #
1767 # 0 --- 1 --- 3 rev1 changes file foo
1768 # 0 --- 1 --- 3 rev1 changes file foo
1768 # \ / rev2 renames foo to bar and changes it
1769 # \ / rev2 renames foo to bar and changes it
1769 # \- 2 -/ rev3 should have bar with all changes and
1770 # \- 2 -/ rev3 should have bar with all changes and
1770 # should record that bar descends from
1771 # should record that bar descends from
1771 # bar in rev2 and foo in rev1
1772 # bar in rev2 and foo in rev1
1772 #
1773 #
1773 # this allows this merge to succeed:
1774 # this allows this merge to succeed:
1774 #
1775 #
1775 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1776 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1776 # \ / merging rev3 and rev4 should use bar@rev2
1777 # \ / merging rev3 and rev4 should use bar@rev2
1777 # \- 2 --- 4 as the merge base
1778 # \- 2 --- 4 as the merge base
1778 #
1779 #
1779
1780
1780 cfname = copy[0]
1781 cfname = copy[0]
1781 crev = manifest1.get(cfname)
1782 crev = manifest1.get(cfname)
1782 newfparent = fparent2
1783 newfparent = fparent2
1783
1784
1784 if manifest2: # branch merge
1785 if manifest2: # branch merge
1785 if fparent2 == nullid or crev is None: # copied on remote side
1786 if fparent2 == nullid or crev is None: # copied on remote side
1786 if cfname in manifest2:
1787 if cfname in manifest2:
1787 crev = manifest2[cfname]
1788 crev = manifest2[cfname]
1788 newfparent = fparent1
1789 newfparent = fparent1
1789
1790
1790 # Here, we used to search backwards through history to try to find
1791 # Here, we used to search backwards through history to try to find
1791 # where the file copy came from if the source of a copy was not in
1792 # where the file copy came from if the source of a copy was not in
1792 # the parent directory. However, this doesn't actually make sense to
1793 # the parent directory. However, this doesn't actually make sense to
1793 # do (what does a copy from something not in your working copy even
1794 # do (what does a copy from something not in your working copy even
1794 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1795 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1795 # the user that copy information was dropped, so if they didn't
1796 # the user that copy information was dropped, so if they didn't
1796 # expect this outcome it can be fixed, but this is the correct
1797 # expect this outcome it can be fixed, but this is the correct
1797 # behavior in this circumstance.
1798 # behavior in this circumstance.
1798
1799
1799 if crev:
1800 if crev:
1800 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1801 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1801 meta["copy"] = cfname
1802 meta["copy"] = cfname
1802 meta["copyrev"] = hex(crev)
1803 meta["copyrev"] = hex(crev)
1803 fparent1, fparent2 = nullid, newfparent
1804 fparent1, fparent2 = nullid, newfparent
1804 else:
1805 else:
1805 self.ui.warn(_("warning: can't find ancestor for '%s' "
1806 self.ui.warn(_("warning: can't find ancestor for '%s' "
1806 "copied from '%s'!\n") % (fname, cfname))
1807 "copied from '%s'!\n") % (fname, cfname))
1807
1808
1808 elif fparent1 == nullid:
1809 elif fparent1 == nullid:
1809 fparent1, fparent2 = fparent2, nullid
1810 fparent1, fparent2 = fparent2, nullid
1810 elif fparent2 != nullid:
1811 elif fparent2 != nullid:
1811 # is one parent an ancestor of the other?
1812 # is one parent an ancestor of the other?
1812 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1813 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1813 if fparent1 in fparentancestors:
1814 if fparent1 in fparentancestors:
1814 fparent1, fparent2 = fparent2, nullid
1815 fparent1, fparent2 = fparent2, nullid
1815 elif fparent2 in fparentancestors:
1816 elif fparent2 in fparentancestors:
1816 fparent2 = nullid
1817 fparent2 = nullid
1817
1818
1818 # is the file changed?
1819 # is the file changed?
1819 text = fctx.data()
1820 text = fctx.data()
1820 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1821 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1821 changelist.append(fname)
1822 changelist.append(fname)
1822 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1823 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1823 # are just the flags changed during merge?
1824 # are just the flags changed during merge?
1824 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1825 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1825 changelist.append(fname)
1826 changelist.append(fname)
1826
1827
1827 return fparent1
1828 return fparent1
1828
1829
1829 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1830 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1830 """check for commit arguments that aren't committable"""
1831 """check for commit arguments that aren't committable"""
1831 if match.isexact() or match.prefix():
1832 if match.isexact() or match.prefix():
1832 matched = set(status.modified + status.added + status.removed)
1833 matched = set(status.modified + status.added + status.removed)
1833
1834
1834 for f in match.files():
1835 for f in match.files():
1835 f = self.dirstate.normalize(f)
1836 f = self.dirstate.normalize(f)
1836 if f == '.' or f in matched or f in wctx.substate:
1837 if f == '.' or f in matched or f in wctx.substate:
1837 continue
1838 continue
1838 if f in status.deleted:
1839 if f in status.deleted:
1839 fail(f, _('file not found!'))
1840 fail(f, _('file not found!'))
1840 if f in vdirs: # visited directory
1841 if f in vdirs: # visited directory
1841 d = f + '/'
1842 d = f + '/'
1842 for mf in matched:
1843 for mf in matched:
1843 if mf.startswith(d):
1844 if mf.startswith(d):
1844 break
1845 break
1845 else:
1846 else:
1846 fail(f, _("no match under directory!"))
1847 fail(f, _("no match under directory!"))
1847 elif f not in self.dirstate:
1848 elif f not in self.dirstate:
1848 fail(f, _("file not tracked!"))
1849 fail(f, _("file not tracked!"))
1849
1850
1850 @unfilteredmethod
1851 @unfilteredmethod
1851 def commit(self, text="", user=None, date=None, match=None, force=False,
1852 def commit(self, text="", user=None, date=None, match=None, force=False,
1852 editor=False, extra=None):
1853 editor=False, extra=None):
1853 """Add a new revision to current repository.
1854 """Add a new revision to current repository.
1854
1855
1855 Revision information is gathered from the working directory,
1856 Revision information is gathered from the working directory,
1856 match can be used to filter the committed files. If editor is
1857 match can be used to filter the committed files. If editor is
1857 supplied, it is called to get a commit message.
1858 supplied, it is called to get a commit message.
1858 """
1859 """
1859 if extra is None:
1860 if extra is None:
1860 extra = {}
1861 extra = {}
1861
1862
1862 def fail(f, msg):
1863 def fail(f, msg):
1863 raise error.Abort('%s: %s' % (f, msg))
1864 raise error.Abort('%s: %s' % (f, msg))
1864
1865
1865 if not match:
1866 if not match:
1866 match = matchmod.always(self.root, '')
1867 match = matchmod.always(self.root, '')
1867
1868
1868 if not force:
1869 if not force:
1869 vdirs = []
1870 vdirs = []
1870 match.explicitdir = vdirs.append
1871 match.explicitdir = vdirs.append
1871 match.bad = fail
1872 match.bad = fail
1872
1873
1873 wlock = lock = tr = None
1874 wlock = lock = tr = None
1874 try:
1875 try:
1875 wlock = self.wlock()
1876 wlock = self.wlock()
1876 lock = self.lock() # for recent changelog (see issue4368)
1877 lock = self.lock() # for recent changelog (see issue4368)
1877
1878
1878 wctx = self[None]
1879 wctx = self[None]
1879 merge = len(wctx.parents()) > 1
1880 merge = len(wctx.parents()) > 1
1880
1881
1881 if not force and merge and not match.always():
1882 if not force and merge and not match.always():
1882 raise error.Abort(_('cannot partially commit a merge '
1883 raise error.Abort(_('cannot partially commit a merge '
1883 '(do not specify files or patterns)'))
1884 '(do not specify files or patterns)'))
1884
1885
1885 status = self.status(match=match, clean=force)
1886 status = self.status(match=match, clean=force)
1886 if force:
1887 if force:
1887 status.modified.extend(status.clean) # mq may commit clean files
1888 status.modified.extend(status.clean) # mq may commit clean files
1888
1889
1889 # check subrepos
1890 # check subrepos
1890 subs, commitsubs, newstate = subrepoutil.precommit(
1891 subs, commitsubs, newstate = subrepoutil.precommit(
1891 self.ui, wctx, status, match, force=force)
1892 self.ui, wctx, status, match, force=force)
1892
1893
1893 # make sure all explicit patterns are matched
1894 # make sure all explicit patterns are matched
1894 if not force:
1895 if not force:
1895 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1896 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1896
1897
1897 cctx = context.workingcommitctx(self, status,
1898 cctx = context.workingcommitctx(self, status,
1898 text, user, date, extra)
1899 text, user, date, extra)
1899
1900
1900 # internal config: ui.allowemptycommit
1901 # internal config: ui.allowemptycommit
1901 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1902 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1902 or extra.get('close') or merge or cctx.files()
1903 or extra.get('close') or merge or cctx.files()
1903 or self.ui.configbool('ui', 'allowemptycommit'))
1904 or self.ui.configbool('ui', 'allowemptycommit'))
1904 if not allowemptycommit:
1905 if not allowemptycommit:
1905 return None
1906 return None
1906
1907
1907 if merge and cctx.deleted():
1908 if merge and cctx.deleted():
1908 raise error.Abort(_("cannot commit merge with missing files"))
1909 raise error.Abort(_("cannot commit merge with missing files"))
1909
1910
1910 ms = mergemod.mergestate.read(self)
1911 ms = mergemod.mergestate.read(self)
1911 mergeutil.checkunresolved(ms)
1912 mergeutil.checkunresolved(ms)
1912
1913
1913 if editor:
1914 if editor:
1914 cctx._text = editor(self, cctx, subs)
1915 cctx._text = editor(self, cctx, subs)
1915 edited = (text != cctx._text)
1916 edited = (text != cctx._text)
1916
1917
1917 # Save commit message in case this transaction gets rolled back
1918 # Save commit message in case this transaction gets rolled back
1918 # (e.g. by a pretxncommit hook). Leave the content alone on
1919 # (e.g. by a pretxncommit hook). Leave the content alone on
1919 # the assumption that the user will use the same editor again.
1920 # the assumption that the user will use the same editor again.
1920 msgfn = self.savecommitmessage(cctx._text)
1921 msgfn = self.savecommitmessage(cctx._text)
1921
1922
1922 # commit subs and write new state
1923 # commit subs and write new state
1923 if subs:
1924 if subs:
1924 for s in sorted(commitsubs):
1925 for s in sorted(commitsubs):
1925 sub = wctx.sub(s)
1926 sub = wctx.sub(s)
1926 self.ui.status(_('committing subrepository %s\n') %
1927 self.ui.status(_('committing subrepository %s\n') %
1927 subrepoutil.subrelpath(sub))
1928 subrepoutil.subrelpath(sub))
1928 sr = sub.commit(cctx._text, user, date)
1929 sr = sub.commit(cctx._text, user, date)
1929 newstate[s] = (newstate[s][0], sr)
1930 newstate[s] = (newstate[s][0], sr)
1930 subrepoutil.writestate(self, newstate)
1931 subrepoutil.writestate(self, newstate)
1931
1932
1932 p1, p2 = self.dirstate.parents()
1933 p1, p2 = self.dirstate.parents()
1933 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1934 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1934 try:
1935 try:
1935 self.hook("precommit", throw=True, parent1=hookp1,
1936 self.hook("precommit", throw=True, parent1=hookp1,
1936 parent2=hookp2)
1937 parent2=hookp2)
1937 tr = self.transaction('commit')
1938 tr = self.transaction('commit')
1938 ret = self.commitctx(cctx, True)
1939 ret = self.commitctx(cctx, True)
1939 except: # re-raises
1940 except: # re-raises
1940 if edited:
1941 if edited:
1941 self.ui.write(
1942 self.ui.write(
1942 _('note: commit message saved in %s\n') % msgfn)
1943 _('note: commit message saved in %s\n') % msgfn)
1943 raise
1944 raise
1944 # update bookmarks, dirstate and mergestate
1945 # update bookmarks, dirstate and mergestate
1945 bookmarks.update(self, [p1, p2], ret)
1946 bookmarks.update(self, [p1, p2], ret)
1946 cctx.markcommitted(ret)
1947 cctx.markcommitted(ret)
1947 ms.reset()
1948 ms.reset()
1948 tr.close()
1949 tr.close()
1949
1950
1950 finally:
1951 finally:
1951 lockmod.release(tr, lock, wlock)
1952 lockmod.release(tr, lock, wlock)
1952
1953
1953 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1954 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1954 # hack for command that use a temporary commit (eg: histedit)
1955 # hack for command that use a temporary commit (eg: histedit)
1955 # temporary commit got stripped before hook release
1956 # temporary commit got stripped before hook release
1956 if self.changelog.hasnode(ret):
1957 if self.changelog.hasnode(ret):
1957 self.hook("commit", node=node, parent1=parent1,
1958 self.hook("commit", node=node, parent1=parent1,
1958 parent2=parent2)
1959 parent2=parent2)
1959 self._afterlock(commithook)
1960 self._afterlock(commithook)
1960 return ret
1961 return ret
1961
1962
1962 @unfilteredmethod
1963 @unfilteredmethod
1963 def commitctx(self, ctx, error=False):
1964 def commitctx(self, ctx, error=False):
1964 """Add a new revision to current repository.
1965 """Add a new revision to current repository.
1965 Revision information is passed via the context argument.
1966 Revision information is passed via the context argument.
1966 """
1967 """
1967
1968
1968 tr = None
1969 tr = None
1969 p1, p2 = ctx.p1(), ctx.p2()
1970 p1, p2 = ctx.p1(), ctx.p2()
1970 user = ctx.user()
1971 user = ctx.user()
1971
1972
1972 lock = self.lock()
1973 lock = self.lock()
1973 try:
1974 try:
1974 tr = self.transaction("commit")
1975 tr = self.transaction("commit")
1975 trp = weakref.proxy(tr)
1976 trp = weakref.proxy(tr)
1976
1977
1977 if ctx.manifestnode():
1978 if ctx.manifestnode():
1978 # reuse an existing manifest revision
1979 # reuse an existing manifest revision
1979 mn = ctx.manifestnode()
1980 mn = ctx.manifestnode()
1980 files = ctx.files()
1981 files = ctx.files()
1981 elif ctx.files():
1982 elif ctx.files():
1982 m1ctx = p1.manifestctx()
1983 m1ctx = p1.manifestctx()
1983 m2ctx = p2.manifestctx()
1984 m2ctx = p2.manifestctx()
1984 mctx = m1ctx.copy()
1985 mctx = m1ctx.copy()
1985
1986
1986 m = mctx.read()
1987 m = mctx.read()
1987 m1 = m1ctx.read()
1988 m1 = m1ctx.read()
1988 m2 = m2ctx.read()
1989 m2 = m2ctx.read()
1989
1990
1990 # check in files
1991 # check in files
1991 added = []
1992 added = []
1992 changed = []
1993 changed = []
1993 removed = list(ctx.removed())
1994 removed = list(ctx.removed())
1994 linkrev = len(self)
1995 linkrev = len(self)
1995 self.ui.note(_("committing files:\n"))
1996 self.ui.note(_("committing files:\n"))
1996 for f in sorted(ctx.modified() + ctx.added()):
1997 for f in sorted(ctx.modified() + ctx.added()):
1997 self.ui.note(f + "\n")
1998 self.ui.note(f + "\n")
1998 try:
1999 try:
1999 fctx = ctx[f]
2000 fctx = ctx[f]
2000 if fctx is None:
2001 if fctx is None:
2001 removed.append(f)
2002 removed.append(f)
2002 else:
2003 else:
2003 added.append(f)
2004 added.append(f)
2004 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2005 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2005 trp, changed)
2006 trp, changed)
2006 m.setflag(f, fctx.flags())
2007 m.setflag(f, fctx.flags())
2007 except OSError as inst:
2008 except OSError as inst:
2008 self.ui.warn(_("trouble committing %s!\n") % f)
2009 self.ui.warn(_("trouble committing %s!\n") % f)
2009 raise
2010 raise
2010 except IOError as inst:
2011 except IOError as inst:
2011 errcode = getattr(inst, 'errno', errno.ENOENT)
2012 errcode = getattr(inst, 'errno', errno.ENOENT)
2012 if error or errcode and errcode != errno.ENOENT:
2013 if error or errcode and errcode != errno.ENOENT:
2013 self.ui.warn(_("trouble committing %s!\n") % f)
2014 self.ui.warn(_("trouble committing %s!\n") % f)
2014 raise
2015 raise
2015
2016
2016 # update manifest
2017 # update manifest
2017 self.ui.note(_("committing manifest\n"))
2018 self.ui.note(_("committing manifest\n"))
2018 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2019 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2019 drop = [f for f in removed if f in m]
2020 drop = [f for f in removed if f in m]
2020 for f in drop:
2021 for f in drop:
2021 del m[f]
2022 del m[f]
2022 mn = mctx.write(trp, linkrev,
2023 mn = mctx.write(trp, linkrev,
2023 p1.manifestnode(), p2.manifestnode(),
2024 p1.manifestnode(), p2.manifestnode(),
2024 added, drop)
2025 added, drop)
2025 files = changed + removed
2026 files = changed + removed
2026 else:
2027 else:
2027 mn = p1.manifestnode()
2028 mn = p1.manifestnode()
2028 files = []
2029 files = []
2029
2030
2030 # update changelog
2031 # update changelog
2031 self.ui.note(_("committing changelog\n"))
2032 self.ui.note(_("committing changelog\n"))
2032 self.changelog.delayupdate(tr)
2033 self.changelog.delayupdate(tr)
2033 n = self.changelog.add(mn, files, ctx.description(),
2034 n = self.changelog.add(mn, files, ctx.description(),
2034 trp, p1.node(), p2.node(),
2035 trp, p1.node(), p2.node(),
2035 user, ctx.date(), ctx.extra().copy())
2036 user, ctx.date(), ctx.extra().copy())
2036 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2037 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2037 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2038 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2038 parent2=xp2)
2039 parent2=xp2)
2039 # set the new commit is proper phase
2040 # set the new commit is proper phase
2040 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2041 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2041 if targetphase:
2042 if targetphase:
2042 # retract boundary do not alter parent changeset.
2043 # retract boundary do not alter parent changeset.
2043 # if a parent have higher the resulting phase will
2044 # if a parent have higher the resulting phase will
2044 # be compliant anyway
2045 # be compliant anyway
2045 #
2046 #
2046 # if minimal phase was 0 we don't need to retract anything
2047 # if minimal phase was 0 we don't need to retract anything
2047 phases.registernew(self, tr, targetphase, [n])
2048 phases.registernew(self, tr, targetphase, [n])
2048 tr.close()
2049 tr.close()
2049 return n
2050 return n
2050 finally:
2051 finally:
2051 if tr:
2052 if tr:
2052 tr.release()
2053 tr.release()
2053 lock.release()
2054 lock.release()
2054
2055
2055 @unfilteredmethod
2056 @unfilteredmethod
2056 def destroying(self):
2057 def destroying(self):
2057 '''Inform the repository that nodes are about to be destroyed.
2058 '''Inform the repository that nodes are about to be destroyed.
2058 Intended for use by strip and rollback, so there's a common
2059 Intended for use by strip and rollback, so there's a common
2059 place for anything that has to be done before destroying history.
2060 place for anything that has to be done before destroying history.
2060
2061
2061 This is mostly useful for saving state that is in memory and waiting
2062 This is mostly useful for saving state that is in memory and waiting
2062 to be flushed when the current lock is released. Because a call to
2063 to be flushed when the current lock is released. Because a call to
2063 destroyed is imminent, the repo will be invalidated causing those
2064 destroyed is imminent, the repo will be invalidated causing those
2064 changes to stay in memory (waiting for the next unlock), or vanish
2065 changes to stay in memory (waiting for the next unlock), or vanish
2065 completely.
2066 completely.
2066 '''
2067 '''
2067 # When using the same lock to commit and strip, the phasecache is left
2068 # When using the same lock to commit and strip, the phasecache is left
2068 # dirty after committing. Then when we strip, the repo is invalidated,
2069 # dirty after committing. Then when we strip, the repo is invalidated,
2069 # causing those changes to disappear.
2070 # causing those changes to disappear.
2070 if '_phasecache' in vars(self):
2071 if '_phasecache' in vars(self):
2071 self._phasecache.write()
2072 self._phasecache.write()
2072
2073
2073 @unfilteredmethod
2074 @unfilteredmethod
2074 def destroyed(self):
2075 def destroyed(self):
2075 '''Inform the repository that nodes have been destroyed.
2076 '''Inform the repository that nodes have been destroyed.
2076 Intended for use by strip and rollback, so there's a common
2077 Intended for use by strip and rollback, so there's a common
2077 place for anything that has to be done after destroying history.
2078 place for anything that has to be done after destroying history.
2078 '''
2079 '''
2079 # When one tries to:
2080 # When one tries to:
2080 # 1) destroy nodes thus calling this method (e.g. strip)
2081 # 1) destroy nodes thus calling this method (e.g. strip)
2081 # 2) use phasecache somewhere (e.g. commit)
2082 # 2) use phasecache somewhere (e.g. commit)
2082 #
2083 #
2083 # then 2) will fail because the phasecache contains nodes that were
2084 # then 2) will fail because the phasecache contains nodes that were
2084 # removed. We can either remove phasecache from the filecache,
2085 # removed. We can either remove phasecache from the filecache,
2085 # causing it to reload next time it is accessed, or simply filter
2086 # causing it to reload next time it is accessed, or simply filter
2086 # the removed nodes now and write the updated cache.
2087 # the removed nodes now and write the updated cache.
2087 self._phasecache.filterunknown(self)
2088 self._phasecache.filterunknown(self)
2088 self._phasecache.write()
2089 self._phasecache.write()
2089
2090
2090 # refresh all repository caches
2091 # refresh all repository caches
2091 self.updatecaches()
2092 self.updatecaches()
2092
2093
2093 # Ensure the persistent tag cache is updated. Doing it now
2094 # Ensure the persistent tag cache is updated. Doing it now
2094 # means that the tag cache only has to worry about destroyed
2095 # means that the tag cache only has to worry about destroyed
2095 # heads immediately after a strip/rollback. That in turn
2096 # heads immediately after a strip/rollback. That in turn
2096 # guarantees that "cachetip == currenttip" (comparing both rev
2097 # guarantees that "cachetip == currenttip" (comparing both rev
2097 # and node) always means no nodes have been added or destroyed.
2098 # and node) always means no nodes have been added or destroyed.
2098
2099
2099 # XXX this is suboptimal when qrefresh'ing: we strip the current
2100 # XXX this is suboptimal when qrefresh'ing: we strip the current
2100 # head, refresh the tag cache, then immediately add a new head.
2101 # head, refresh the tag cache, then immediately add a new head.
2101 # But I think doing it this way is necessary for the "instant
2102 # But I think doing it this way is necessary for the "instant
2102 # tag cache retrieval" case to work.
2103 # tag cache retrieval" case to work.
2103 self.invalidate()
2104 self.invalidate()
2104
2105
2105 def status(self, node1='.', node2=None, match=None,
2106 def status(self, node1='.', node2=None, match=None,
2106 ignored=False, clean=False, unknown=False,
2107 ignored=False, clean=False, unknown=False,
2107 listsubrepos=False):
2108 listsubrepos=False):
2108 '''a convenience method that calls node1.status(node2)'''
2109 '''a convenience method that calls node1.status(node2)'''
2109 return self[node1].status(node2, match, ignored, clean, unknown,
2110 return self[node1].status(node2, match, ignored, clean, unknown,
2110 listsubrepos)
2111 listsubrepos)
2111
2112
2112 def addpostdsstatus(self, ps):
2113 def addpostdsstatus(self, ps):
2113 """Add a callback to run within the wlock, at the point at which status
2114 """Add a callback to run within the wlock, at the point at which status
2114 fixups happen.
2115 fixups happen.
2115
2116
2116 On status completion, callback(wctx, status) will be called with the
2117 On status completion, callback(wctx, status) will be called with the
2117 wlock held, unless the dirstate has changed from underneath or the wlock
2118 wlock held, unless the dirstate has changed from underneath or the wlock
2118 couldn't be grabbed.
2119 couldn't be grabbed.
2119
2120
2120 Callbacks should not capture and use a cached copy of the dirstate --
2121 Callbacks should not capture and use a cached copy of the dirstate --
2121 it might change in the meanwhile. Instead, they should access the
2122 it might change in the meanwhile. Instead, they should access the
2122 dirstate via wctx.repo().dirstate.
2123 dirstate via wctx.repo().dirstate.
2123
2124
2124 This list is emptied out after each status run -- extensions should
2125 This list is emptied out after each status run -- extensions should
2125 make sure it adds to this list each time dirstate.status is called.
2126 make sure it adds to this list each time dirstate.status is called.
2126 Extensions should also make sure they don't call this for statuses
2127 Extensions should also make sure they don't call this for statuses
2127 that don't involve the dirstate.
2128 that don't involve the dirstate.
2128 """
2129 """
2129
2130
2130 # The list is located here for uniqueness reasons -- it is actually
2131 # The list is located here for uniqueness reasons -- it is actually
2131 # managed by the workingctx, but that isn't unique per-repo.
2132 # managed by the workingctx, but that isn't unique per-repo.
2132 self._postdsstatus.append(ps)
2133 self._postdsstatus.append(ps)
2133
2134
2134 def postdsstatus(self):
2135 def postdsstatus(self):
2135 """Used by workingctx to get the list of post-dirstate-status hooks."""
2136 """Used by workingctx to get the list of post-dirstate-status hooks."""
2136 return self._postdsstatus
2137 return self._postdsstatus
2137
2138
2138 def clearpostdsstatus(self):
2139 def clearpostdsstatus(self):
2139 """Used by workingctx to clear post-dirstate-status hooks."""
2140 """Used by workingctx to clear post-dirstate-status hooks."""
2140 del self._postdsstatus[:]
2141 del self._postdsstatus[:]
2141
2142
2142 def heads(self, start=None):
2143 def heads(self, start=None):
2143 if start is None:
2144 if start is None:
2144 cl = self.changelog
2145 cl = self.changelog
2145 headrevs = reversed(cl.headrevs())
2146 headrevs = reversed(cl.headrevs())
2146 return [cl.node(rev) for rev in headrevs]
2147 return [cl.node(rev) for rev in headrevs]
2147
2148
2148 heads = self.changelog.heads(start)
2149 heads = self.changelog.heads(start)
2149 # sort the output in rev descending order
2150 # sort the output in rev descending order
2150 return sorted(heads, key=self.changelog.rev, reverse=True)
2151 return sorted(heads, key=self.changelog.rev, reverse=True)
2151
2152
2152 def branchheads(self, branch=None, start=None, closed=False):
2153 def branchheads(self, branch=None, start=None, closed=False):
2153 '''return a (possibly filtered) list of heads for the given branch
2154 '''return a (possibly filtered) list of heads for the given branch
2154
2155
2155 Heads are returned in topological order, from newest to oldest.
2156 Heads are returned in topological order, from newest to oldest.
2156 If branch is None, use the dirstate branch.
2157 If branch is None, use the dirstate branch.
2157 If start is not None, return only heads reachable from start.
2158 If start is not None, return only heads reachable from start.
2158 If closed is True, return heads that are marked as closed as well.
2159 If closed is True, return heads that are marked as closed as well.
2159 '''
2160 '''
2160 if branch is None:
2161 if branch is None:
2161 branch = self[None].branch()
2162 branch = self[None].branch()
2162 branches = self.branchmap()
2163 branches = self.branchmap()
2163 if branch not in branches:
2164 if branch not in branches:
2164 return []
2165 return []
2165 # the cache returns heads ordered lowest to highest
2166 # the cache returns heads ordered lowest to highest
2166 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2167 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2167 if start is not None:
2168 if start is not None:
2168 # filter out the heads that cannot be reached from startrev
2169 # filter out the heads that cannot be reached from startrev
2169 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2170 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2170 bheads = [h for h in bheads if h in fbheads]
2171 bheads = [h for h in bheads if h in fbheads]
2171 return bheads
2172 return bheads
2172
2173
2173 def branches(self, nodes):
2174 def branches(self, nodes):
2174 if not nodes:
2175 if not nodes:
2175 nodes = [self.changelog.tip()]
2176 nodes = [self.changelog.tip()]
2176 b = []
2177 b = []
2177 for n in nodes:
2178 for n in nodes:
2178 t = n
2179 t = n
2179 while True:
2180 while True:
2180 p = self.changelog.parents(n)
2181 p = self.changelog.parents(n)
2181 if p[1] != nullid or p[0] == nullid:
2182 if p[1] != nullid or p[0] == nullid:
2182 b.append((t, n, p[0], p[1]))
2183 b.append((t, n, p[0], p[1]))
2183 break
2184 break
2184 n = p[0]
2185 n = p[0]
2185 return b
2186 return b
2186
2187
2187 def between(self, pairs):
2188 def between(self, pairs):
2188 r = []
2189 r = []
2189
2190
2190 for top, bottom in pairs:
2191 for top, bottom in pairs:
2191 n, l, i = top, [], 0
2192 n, l, i = top, [], 0
2192 f = 1
2193 f = 1
2193
2194
2194 while n != bottom and n != nullid:
2195 while n != bottom and n != nullid:
2195 p = self.changelog.parents(n)[0]
2196 p = self.changelog.parents(n)[0]
2196 if i == f:
2197 if i == f:
2197 l.append(n)
2198 l.append(n)
2198 f = f * 2
2199 f = f * 2
2199 n = p
2200 n = p
2200 i += 1
2201 i += 1
2201
2202
2202 r.append(l)
2203 r.append(l)
2203
2204
2204 return r
2205 return r
2205
2206
2206 def checkpush(self, pushop):
2207 def checkpush(self, pushop):
2207 """Extensions can override this function if additional checks have
2208 """Extensions can override this function if additional checks have
2208 to be performed before pushing, or call it if they override push
2209 to be performed before pushing, or call it if they override push
2209 command.
2210 command.
2210 """
2211 """
2211
2212
2212 @unfilteredpropertycache
2213 @unfilteredpropertycache
2213 def prepushoutgoinghooks(self):
2214 def prepushoutgoinghooks(self):
2214 """Return util.hooks consists of a pushop with repo, remote, outgoing
2215 """Return util.hooks consists of a pushop with repo, remote, outgoing
2215 methods, which are called before pushing changesets.
2216 methods, which are called before pushing changesets.
2216 """
2217 """
2217 return util.hooks()
2218 return util.hooks()
2218
2219
2219 def pushkey(self, namespace, key, old, new):
2220 def pushkey(self, namespace, key, old, new):
2220 try:
2221 try:
2221 tr = self.currenttransaction()
2222 tr = self.currenttransaction()
2222 hookargs = {}
2223 hookargs = {}
2223 if tr is not None:
2224 if tr is not None:
2224 hookargs.update(tr.hookargs)
2225 hookargs.update(tr.hookargs)
2225 hookargs = pycompat.strkwargs(hookargs)
2226 hookargs = pycompat.strkwargs(hookargs)
2226 hookargs[r'namespace'] = namespace
2227 hookargs[r'namespace'] = namespace
2227 hookargs[r'key'] = key
2228 hookargs[r'key'] = key
2228 hookargs[r'old'] = old
2229 hookargs[r'old'] = old
2229 hookargs[r'new'] = new
2230 hookargs[r'new'] = new
2230 self.hook('prepushkey', throw=True, **hookargs)
2231 self.hook('prepushkey', throw=True, **hookargs)
2231 except error.HookAbort as exc:
2232 except error.HookAbort as exc:
2232 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2233 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2233 if exc.hint:
2234 if exc.hint:
2234 self.ui.write_err(_("(%s)\n") % exc.hint)
2235 self.ui.write_err(_("(%s)\n") % exc.hint)
2235 return False
2236 return False
2236 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2237 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2237 ret = pushkey.push(self, namespace, key, old, new)
2238 ret = pushkey.push(self, namespace, key, old, new)
2238 def runhook():
2239 def runhook():
2239 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2240 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2240 ret=ret)
2241 ret=ret)
2241 self._afterlock(runhook)
2242 self._afterlock(runhook)
2242 return ret
2243 return ret
2243
2244
2244 def listkeys(self, namespace):
2245 def listkeys(self, namespace):
2245 self.hook('prelistkeys', throw=True, namespace=namespace)
2246 self.hook('prelistkeys', throw=True, namespace=namespace)
2246 self.ui.debug('listing keys for "%s"\n' % namespace)
2247 self.ui.debug('listing keys for "%s"\n' % namespace)
2247 values = pushkey.list(self, namespace)
2248 values = pushkey.list(self, namespace)
2248 self.hook('listkeys', namespace=namespace, values=values)
2249 self.hook('listkeys', namespace=namespace, values=values)
2249 return values
2250 return values
2250
2251
2251 def debugwireargs(self, one, two, three=None, four=None, five=None):
2252 def debugwireargs(self, one, two, three=None, four=None, five=None):
2252 '''used to test argument passing over the wire'''
2253 '''used to test argument passing over the wire'''
2253 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2254 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2254 pycompat.bytestr(four),
2255 pycompat.bytestr(four),
2255 pycompat.bytestr(five))
2256 pycompat.bytestr(five))
2256
2257
2257 def savecommitmessage(self, text):
2258 def savecommitmessage(self, text):
2258 fp = self.vfs('last-message.txt', 'wb')
2259 fp = self.vfs('last-message.txt', 'wb')
2259 try:
2260 try:
2260 fp.write(text)
2261 fp.write(text)
2261 finally:
2262 finally:
2262 fp.close()
2263 fp.close()
2263 return self.pathto(fp.name[len(self.root) + 1:])
2264 return self.pathto(fp.name[len(self.root) + 1:])
2264
2265
2265 # used to avoid circular references so destructors work
2266 # used to avoid circular references so destructors work
2266 def aftertrans(files):
2267 def aftertrans(files):
2267 renamefiles = [tuple(t) for t in files]
2268 renamefiles = [tuple(t) for t in files]
2268 def a():
2269 def a():
2269 for vfs, src, dest in renamefiles:
2270 for vfs, src, dest in renamefiles:
2270 # if src and dest refer to a same file, vfs.rename is a no-op,
2271 # if src and dest refer to a same file, vfs.rename is a no-op,
2271 # leaving both src and dest on disk. delete dest to make sure
2272 # leaving both src and dest on disk. delete dest to make sure
2272 # the rename couldn't be such a no-op.
2273 # the rename couldn't be such a no-op.
2273 vfs.tryunlink(dest)
2274 vfs.tryunlink(dest)
2274 try:
2275 try:
2275 vfs.rename(src, dest)
2276 vfs.rename(src, dest)
2276 except OSError: # journal file does not yet exist
2277 except OSError: # journal file does not yet exist
2277 pass
2278 pass
2278 return a
2279 return a
2279
2280
2280 def undoname(fn):
2281 def undoname(fn):
2281 base, name = os.path.split(fn)
2282 base, name = os.path.split(fn)
2282 assert name.startswith('journal')
2283 assert name.startswith('journal')
2283 return os.path.join(base, name.replace('journal', 'undo', 1))
2284 return os.path.join(base, name.replace('journal', 'undo', 1))
2284
2285
2285 def instance(ui, path, create):
2286 def instance(ui, path, create):
2286 return localrepository(ui, util.urllocalpath(path), create)
2287 return localrepository(ui, util.urllocalpath(path), create)
2287
2288
2288 def islocal(path):
2289 def islocal(path):
2289 return True
2290 return True
2290
2291
2291 def newreporequirements(repo):
2292 def newreporequirements(repo):
2292 """Determine the set of requirements for a new local repository.
2293 """Determine the set of requirements for a new local repository.
2293
2294
2294 Extensions can wrap this function to specify custom requirements for
2295 Extensions can wrap this function to specify custom requirements for
2295 new repositories.
2296 new repositories.
2296 """
2297 """
2297 ui = repo.ui
2298 ui = repo.ui
2298 requirements = {'revlogv1'}
2299 requirements = {'revlogv1'}
2299 if ui.configbool('format', 'usestore'):
2300 if ui.configbool('format', 'usestore'):
2300 requirements.add('store')
2301 requirements.add('store')
2301 if ui.configbool('format', 'usefncache'):
2302 if ui.configbool('format', 'usefncache'):
2302 requirements.add('fncache')
2303 requirements.add('fncache')
2303 if ui.configbool('format', 'dotencode'):
2304 if ui.configbool('format', 'dotencode'):
2304 requirements.add('dotencode')
2305 requirements.add('dotencode')
2305
2306
2306 compengine = ui.config('experimental', 'format.compression')
2307 compengine = ui.config('experimental', 'format.compression')
2307 if compengine not in util.compengines:
2308 if compengine not in util.compengines:
2308 raise error.Abort(_('compression engine %s defined by '
2309 raise error.Abort(_('compression engine %s defined by '
2309 'experimental.format.compression not available') %
2310 'experimental.format.compression not available') %
2310 compengine,
2311 compengine,
2311 hint=_('run "hg debuginstall" to list available '
2312 hint=_('run "hg debuginstall" to list available '
2312 'compression engines'))
2313 'compression engines'))
2313
2314
2314 # zlib is the historical default and doesn't need an explicit requirement.
2315 # zlib is the historical default and doesn't need an explicit requirement.
2315 if compengine != 'zlib':
2316 if compengine != 'zlib':
2316 requirements.add('exp-compression-%s' % compengine)
2317 requirements.add('exp-compression-%s' % compengine)
2317
2318
2318 if scmutil.gdinitconfig(ui):
2319 if scmutil.gdinitconfig(ui):
2319 requirements.add('generaldelta')
2320 requirements.add('generaldelta')
2320 if ui.configbool('experimental', 'treemanifest'):
2321 if ui.configbool('experimental', 'treemanifest'):
2321 requirements.add('treemanifest')
2322 requirements.add('treemanifest')
2322
2323
2323 revlogv2 = ui.config('experimental', 'revlogv2')
2324 revlogv2 = ui.config('experimental', 'revlogv2')
2324 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2325 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2325 requirements.remove('revlogv1')
2326 requirements.remove('revlogv1')
2326 # generaldelta is implied by revlogv2.
2327 # generaldelta is implied by revlogv2.
2327 requirements.discard('generaldelta')
2328 requirements.discard('generaldelta')
2328 requirements.add(REVLOGV2_REQUIREMENT)
2329 requirements.add(REVLOGV2_REQUIREMENT)
2329
2330
2330 return requirements
2331 return requirements
@@ -1,1460 +1,1489 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import re
14 import re
15 import socket
15 import socket
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 hex,
21 hex,
22 nullid,
22 nullid,
23 short,
23 short,
24 wdirid,
24 wdirid,
25 wdirrev,
25 wdirrev,
26 )
26 )
27
27
28 from . import (
28 from . import (
29 encoding,
29 encoding,
30 error,
30 error,
31 match as matchmod,
31 match as matchmod,
32 obsolete,
32 obsolete,
33 obsutil,
33 obsutil,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 revsetlang,
37 revsetlang,
38 similar,
38 similar,
39 url,
39 url,
40 util,
40 util,
41 vfs,
41 vfs,
42 )
42 )
43
43
44 from .utils import (
44 from .utils import (
45 procutil,
45 procutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 if pycompat.iswindows:
49 if pycompat.iswindows:
50 from . import scmwindows as scmplatform
50 from . import scmwindows as scmplatform
51 else:
51 else:
52 from . import scmposix as scmplatform
52 from . import scmposix as scmplatform
53
53
54 termsize = scmplatform.termsize
54 termsize = scmplatform.termsize
55
55
56 class status(tuple):
56 class status(tuple):
57 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
57 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
58 and 'ignored' properties are only relevant to the working copy.
58 and 'ignored' properties are only relevant to the working copy.
59 '''
59 '''
60
60
61 __slots__ = ()
61 __slots__ = ()
62
62
63 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
63 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
64 clean):
64 clean):
65 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
65 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
66 ignored, clean))
66 ignored, clean))
67
67
68 @property
68 @property
69 def modified(self):
69 def modified(self):
70 '''files that have been modified'''
70 '''files that have been modified'''
71 return self[0]
71 return self[0]
72
72
73 @property
73 @property
74 def added(self):
74 def added(self):
75 '''files that have been added'''
75 '''files that have been added'''
76 return self[1]
76 return self[1]
77
77
78 @property
78 @property
79 def removed(self):
79 def removed(self):
80 '''files that have been removed'''
80 '''files that have been removed'''
81 return self[2]
81 return self[2]
82
82
83 @property
83 @property
84 def deleted(self):
84 def deleted(self):
85 '''files that are in the dirstate, but have been deleted from the
85 '''files that are in the dirstate, but have been deleted from the
86 working copy (aka "missing")
86 working copy (aka "missing")
87 '''
87 '''
88 return self[3]
88 return self[3]
89
89
90 @property
90 @property
91 def unknown(self):
91 def unknown(self):
92 '''files not in the dirstate that are not ignored'''
92 '''files not in the dirstate that are not ignored'''
93 return self[4]
93 return self[4]
94
94
95 @property
95 @property
96 def ignored(self):
96 def ignored(self):
97 '''files not in the dirstate that are ignored (by _dirignore())'''
97 '''files not in the dirstate that are ignored (by _dirignore())'''
98 return self[5]
98 return self[5]
99
99
100 @property
100 @property
101 def clean(self):
101 def clean(self):
102 '''files that have not been modified'''
102 '''files that have not been modified'''
103 return self[6]
103 return self[6]
104
104
105 def __repr__(self, *args, **kwargs):
105 def __repr__(self, *args, **kwargs):
106 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
106 return (('<status modified=%r, added=%r, removed=%r, deleted=%r, '
107 'unknown=%r, ignored=%r, clean=%r>') % self)
107 'unknown=%r, ignored=%r, clean=%r>') % self)
108
108
109 def itersubrepos(ctx1, ctx2):
109 def itersubrepos(ctx1, ctx2):
110 """find subrepos in ctx1 or ctx2"""
110 """find subrepos in ctx1 or ctx2"""
111 # Create a (subpath, ctx) mapping where we prefer subpaths from
111 # Create a (subpath, ctx) mapping where we prefer subpaths from
112 # ctx1. The subpaths from ctx2 are important when the .hgsub file
112 # ctx1. The subpaths from ctx2 are important when the .hgsub file
113 # has been modified (in ctx2) but not yet committed (in ctx1).
113 # has been modified (in ctx2) but not yet committed (in ctx1).
114 subpaths = dict.fromkeys(ctx2.substate, ctx2)
114 subpaths = dict.fromkeys(ctx2.substate, ctx2)
115 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
115 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
116
116
117 missing = set()
117 missing = set()
118
118
119 for subpath in ctx2.substate:
119 for subpath in ctx2.substate:
120 if subpath not in ctx1.substate:
120 if subpath not in ctx1.substate:
121 del subpaths[subpath]
121 del subpaths[subpath]
122 missing.add(subpath)
122 missing.add(subpath)
123
123
124 for subpath, ctx in sorted(subpaths.iteritems()):
124 for subpath, ctx in sorted(subpaths.iteritems()):
125 yield subpath, ctx.sub(subpath)
125 yield subpath, ctx.sub(subpath)
126
126
127 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
127 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
128 # status and diff will have an accurate result when it does
128 # status and diff will have an accurate result when it does
129 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
129 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
130 # against itself.
130 # against itself.
131 for subpath in missing:
131 for subpath in missing:
132 yield subpath, ctx2.nullsub(subpath, ctx1)
132 yield subpath, ctx2.nullsub(subpath, ctx1)
133
133
134 def nochangesfound(ui, repo, excluded=None):
134 def nochangesfound(ui, repo, excluded=None):
135 '''Report no changes for push/pull, excluded is None or a list of
135 '''Report no changes for push/pull, excluded is None or a list of
136 nodes excluded from the push/pull.
136 nodes excluded from the push/pull.
137 '''
137 '''
138 secretlist = []
138 secretlist = []
139 if excluded:
139 if excluded:
140 for n in excluded:
140 for n in excluded:
141 ctx = repo[n]
141 ctx = repo[n]
142 if ctx.phase() >= phases.secret and not ctx.extinct():
142 if ctx.phase() >= phases.secret and not ctx.extinct():
143 secretlist.append(n)
143 secretlist.append(n)
144
144
145 if secretlist:
145 if secretlist:
146 ui.status(_("no changes found (ignored %d secret changesets)\n")
146 ui.status(_("no changes found (ignored %d secret changesets)\n")
147 % len(secretlist))
147 % len(secretlist))
148 else:
148 else:
149 ui.status(_("no changes found\n"))
149 ui.status(_("no changes found\n"))
150
150
151 def callcatch(ui, func):
151 def callcatch(ui, func):
152 """call func() with global exception handling
152 """call func() with global exception handling
153
153
154 return func() if no exception happens. otherwise do some error handling
154 return func() if no exception happens. otherwise do some error handling
155 and return an exit code accordingly. does not handle all exceptions.
155 and return an exit code accordingly. does not handle all exceptions.
156 """
156 """
157 try:
157 try:
158 try:
158 try:
159 return func()
159 return func()
160 except: # re-raises
160 except: # re-raises
161 ui.traceback()
161 ui.traceback()
162 raise
162 raise
163 # Global exception handling, alphabetically
163 # Global exception handling, alphabetically
164 # Mercurial-specific first, followed by built-in and library exceptions
164 # Mercurial-specific first, followed by built-in and library exceptions
165 except error.LockHeld as inst:
165 except error.LockHeld as inst:
166 if inst.errno == errno.ETIMEDOUT:
166 if inst.errno == errno.ETIMEDOUT:
167 reason = _('timed out waiting for lock held by %r') % inst.locker
167 reason = _('timed out waiting for lock held by %r') % inst.locker
168 else:
168 else:
169 reason = _('lock held by %r') % inst.locker
169 reason = _('lock held by %r') % inst.locker
170 ui.warn(_("abort: %s: %s\n")
170 ui.warn(_("abort: %s: %s\n")
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
171 % (inst.desc or stringutil.forcebytestr(inst.filename), reason))
172 if not inst.locker:
172 if not inst.locker:
173 ui.warn(_("(lock might be very busy)\n"))
173 ui.warn(_("(lock might be very busy)\n"))
174 except error.LockUnavailable as inst:
174 except error.LockUnavailable as inst:
175 ui.warn(_("abort: could not lock %s: %s\n") %
175 ui.warn(_("abort: could not lock %s: %s\n") %
176 (inst.desc or stringutil.forcebytestr(inst.filename),
176 (inst.desc or stringutil.forcebytestr(inst.filename),
177 encoding.strtolocal(inst.strerror)))
177 encoding.strtolocal(inst.strerror)))
178 except error.OutOfBandError as inst:
178 except error.OutOfBandError as inst:
179 if inst.args:
179 if inst.args:
180 msg = _("abort: remote error:\n")
180 msg = _("abort: remote error:\n")
181 else:
181 else:
182 msg = _("abort: remote error\n")
182 msg = _("abort: remote error\n")
183 ui.warn(msg)
183 ui.warn(msg)
184 if inst.args:
184 if inst.args:
185 ui.warn(''.join(inst.args))
185 ui.warn(''.join(inst.args))
186 if inst.hint:
186 if inst.hint:
187 ui.warn('(%s)\n' % inst.hint)
187 ui.warn('(%s)\n' % inst.hint)
188 except error.RepoError as inst:
188 except error.RepoError as inst:
189 ui.warn(_("abort: %s!\n") % inst)
189 ui.warn(_("abort: %s!\n") % inst)
190 if inst.hint:
190 if inst.hint:
191 ui.warn(_("(%s)\n") % inst.hint)
191 ui.warn(_("(%s)\n") % inst.hint)
192 except error.ResponseError as inst:
192 except error.ResponseError as inst:
193 ui.warn(_("abort: %s") % inst.args[0])
193 ui.warn(_("abort: %s") % inst.args[0])
194 msg = inst.args[1]
194 msg = inst.args[1]
195 if isinstance(msg, type(u'')):
195 if isinstance(msg, type(u'')):
196 msg = pycompat.sysbytes(msg)
196 msg = pycompat.sysbytes(msg)
197 if not isinstance(msg, bytes):
197 if not isinstance(msg, bytes):
198 ui.warn(" %r\n" % (msg,))
198 ui.warn(" %r\n" % (msg,))
199 elif not msg:
199 elif not msg:
200 ui.warn(_(" empty string\n"))
200 ui.warn(_(" empty string\n"))
201 else:
201 else:
202 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
202 ui.warn("\n%r\n" % stringutil.ellipsis(msg))
203 except error.CensoredNodeError as inst:
203 except error.CensoredNodeError as inst:
204 ui.warn(_("abort: file censored %s!\n") % inst)
204 ui.warn(_("abort: file censored %s!\n") % inst)
205 except error.RevlogError as inst:
205 except error.RevlogError as inst:
206 ui.warn(_("abort: %s!\n") % inst)
206 ui.warn(_("abort: %s!\n") % inst)
207 except error.InterventionRequired as inst:
207 except error.InterventionRequired as inst:
208 ui.warn("%s\n" % inst)
208 ui.warn("%s\n" % inst)
209 if inst.hint:
209 if inst.hint:
210 ui.warn(_("(%s)\n") % inst.hint)
210 ui.warn(_("(%s)\n") % inst.hint)
211 return 1
211 return 1
212 except error.WdirUnsupported:
212 except error.WdirUnsupported:
213 ui.warn(_("abort: working directory revision cannot be specified\n"))
213 ui.warn(_("abort: working directory revision cannot be specified\n"))
214 except error.Abort as inst:
214 except error.Abort as inst:
215 ui.warn(_("abort: %s\n") % inst)
215 ui.warn(_("abort: %s\n") % inst)
216 if inst.hint:
216 if inst.hint:
217 ui.warn(_("(%s)\n") % inst.hint)
217 ui.warn(_("(%s)\n") % inst.hint)
218 except ImportError as inst:
218 except ImportError as inst:
219 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
219 ui.warn(_("abort: %s!\n") % stringutil.forcebytestr(inst))
220 m = stringutil.forcebytestr(inst).split()[-1]
220 m = stringutil.forcebytestr(inst).split()[-1]
221 if m in "mpatch bdiff".split():
221 if m in "mpatch bdiff".split():
222 ui.warn(_("(did you forget to compile extensions?)\n"))
222 ui.warn(_("(did you forget to compile extensions?)\n"))
223 elif m in "zlib".split():
223 elif m in "zlib".split():
224 ui.warn(_("(is your Python install correct?)\n"))
224 ui.warn(_("(is your Python install correct?)\n"))
225 except IOError as inst:
225 except IOError as inst:
226 if util.safehasattr(inst, "code"):
226 if util.safehasattr(inst, "code"):
227 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
227 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst))
228 elif util.safehasattr(inst, "reason"):
228 elif util.safehasattr(inst, "reason"):
229 try: # usually it is in the form (errno, strerror)
229 try: # usually it is in the form (errno, strerror)
230 reason = inst.reason.args[1]
230 reason = inst.reason.args[1]
231 except (AttributeError, IndexError):
231 except (AttributeError, IndexError):
232 # it might be anything, for example a string
232 # it might be anything, for example a string
233 reason = inst.reason
233 reason = inst.reason
234 if isinstance(reason, unicode):
234 if isinstance(reason, unicode):
235 # SSLError of Python 2.7.9 contains a unicode
235 # SSLError of Python 2.7.9 contains a unicode
236 reason = encoding.unitolocal(reason)
236 reason = encoding.unitolocal(reason)
237 ui.warn(_("abort: error: %s\n") % reason)
237 ui.warn(_("abort: error: %s\n") % reason)
238 elif (util.safehasattr(inst, "args")
238 elif (util.safehasattr(inst, "args")
239 and inst.args and inst.args[0] == errno.EPIPE):
239 and inst.args and inst.args[0] == errno.EPIPE):
240 pass
240 pass
241 elif getattr(inst, "strerror", None):
241 elif getattr(inst, "strerror", None):
242 if getattr(inst, "filename", None):
242 if getattr(inst, "filename", None):
243 ui.warn(_("abort: %s: %s\n") % (
243 ui.warn(_("abort: %s: %s\n") % (
244 encoding.strtolocal(inst.strerror),
244 encoding.strtolocal(inst.strerror),
245 stringutil.forcebytestr(inst.filename)))
245 stringutil.forcebytestr(inst.filename)))
246 else:
246 else:
247 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
247 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
248 else:
248 else:
249 raise
249 raise
250 except OSError as inst:
250 except OSError as inst:
251 if getattr(inst, "filename", None) is not None:
251 if getattr(inst, "filename", None) is not None:
252 ui.warn(_("abort: %s: '%s'\n") % (
252 ui.warn(_("abort: %s: '%s'\n") % (
253 encoding.strtolocal(inst.strerror),
253 encoding.strtolocal(inst.strerror),
254 stringutil.forcebytestr(inst.filename)))
254 stringutil.forcebytestr(inst.filename)))
255 else:
255 else:
256 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
256 ui.warn(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
257 except MemoryError:
257 except MemoryError:
258 ui.warn(_("abort: out of memory\n"))
258 ui.warn(_("abort: out of memory\n"))
259 except SystemExit as inst:
259 except SystemExit as inst:
260 # Commands shouldn't sys.exit directly, but give a return code.
260 # Commands shouldn't sys.exit directly, but give a return code.
261 # Just in case catch this and and pass exit code to caller.
261 # Just in case catch this and and pass exit code to caller.
262 return inst.code
262 return inst.code
263 except socket.error as inst:
263 except socket.error as inst:
264 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
264 ui.warn(_("abort: %s\n") % stringutil.forcebytestr(inst.args[-1]))
265
265
266 return -1
266 return -1
267
267
268 def checknewlabel(repo, lbl, kind):
268 def checknewlabel(repo, lbl, kind):
269 # Do not use the "kind" parameter in ui output.
269 # Do not use the "kind" parameter in ui output.
270 # It makes strings difficult to translate.
270 # It makes strings difficult to translate.
271 if lbl in ['tip', '.', 'null']:
271 if lbl in ['tip', '.', 'null']:
272 raise error.Abort(_("the name '%s' is reserved") % lbl)
272 raise error.Abort(_("the name '%s' is reserved") % lbl)
273 for c in (':', '\0', '\n', '\r'):
273 for c in (':', '\0', '\n', '\r'):
274 if c in lbl:
274 if c in lbl:
275 raise error.Abort(
275 raise error.Abort(
276 _("%r cannot be used in a name") % pycompat.bytestr(c))
276 _("%r cannot be used in a name") % pycompat.bytestr(c))
277 try:
277 try:
278 int(lbl)
278 int(lbl)
279 raise error.Abort(_("cannot use an integer as a name"))
279 raise error.Abort(_("cannot use an integer as a name"))
280 except ValueError:
280 except ValueError:
281 pass
281 pass
282 if lbl.strip() != lbl:
282 if lbl.strip() != lbl:
283 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
283 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
284
284
285 def checkfilename(f):
285 def checkfilename(f):
286 '''Check that the filename f is an acceptable filename for a tracked file'''
286 '''Check that the filename f is an acceptable filename for a tracked file'''
287 if '\r' in f or '\n' in f:
287 if '\r' in f or '\n' in f:
288 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
288 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") % f)
289
289
290 def checkportable(ui, f):
290 def checkportable(ui, f):
291 '''Check if filename f is portable and warn or abort depending on config'''
291 '''Check if filename f is portable and warn or abort depending on config'''
292 checkfilename(f)
292 checkfilename(f)
293 abort, warn = checkportabilityalert(ui)
293 abort, warn = checkportabilityalert(ui)
294 if abort or warn:
294 if abort or warn:
295 msg = util.checkwinfilename(f)
295 msg = util.checkwinfilename(f)
296 if msg:
296 if msg:
297 msg = "%s: %s" % (msg, procutil.shellquote(f))
297 msg = "%s: %s" % (msg, procutil.shellquote(f))
298 if abort:
298 if abort:
299 raise error.Abort(msg)
299 raise error.Abort(msg)
300 ui.warn(_("warning: %s\n") % msg)
300 ui.warn(_("warning: %s\n") % msg)
301
301
302 def checkportabilityalert(ui):
302 def checkportabilityalert(ui):
303 '''check if the user's config requests nothing, a warning, or abort for
303 '''check if the user's config requests nothing, a warning, or abort for
304 non-portable filenames'''
304 non-portable filenames'''
305 val = ui.config('ui', 'portablefilenames')
305 val = ui.config('ui', 'portablefilenames')
306 lval = val.lower()
306 lval = val.lower()
307 bval = stringutil.parsebool(val)
307 bval = stringutil.parsebool(val)
308 abort = pycompat.iswindows or lval == 'abort'
308 abort = pycompat.iswindows or lval == 'abort'
309 warn = bval or lval == 'warn'
309 warn = bval or lval == 'warn'
310 if bval is None and not (warn or abort or lval == 'ignore'):
310 if bval is None and not (warn or abort or lval == 'ignore'):
311 raise error.ConfigError(
311 raise error.ConfigError(
312 _("ui.portablefilenames value is invalid ('%s')") % val)
312 _("ui.portablefilenames value is invalid ('%s')") % val)
313 return abort, warn
313 return abort, warn
314
314
315 class casecollisionauditor(object):
315 class casecollisionauditor(object):
316 def __init__(self, ui, abort, dirstate):
316 def __init__(self, ui, abort, dirstate):
317 self._ui = ui
317 self._ui = ui
318 self._abort = abort
318 self._abort = abort
319 allfiles = '\0'.join(dirstate._map)
319 allfiles = '\0'.join(dirstate._map)
320 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
320 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
321 self._dirstate = dirstate
321 self._dirstate = dirstate
322 # The purpose of _newfiles is so that we don't complain about
322 # The purpose of _newfiles is so that we don't complain about
323 # case collisions if someone were to call this object with the
323 # case collisions if someone were to call this object with the
324 # same filename twice.
324 # same filename twice.
325 self._newfiles = set()
325 self._newfiles = set()
326
326
327 def __call__(self, f):
327 def __call__(self, f):
328 if f in self._newfiles:
328 if f in self._newfiles:
329 return
329 return
330 fl = encoding.lower(f)
330 fl = encoding.lower(f)
331 if fl in self._loweredfiles and f not in self._dirstate:
331 if fl in self._loweredfiles and f not in self._dirstate:
332 msg = _('possible case-folding collision for %s') % f
332 msg = _('possible case-folding collision for %s') % f
333 if self._abort:
333 if self._abort:
334 raise error.Abort(msg)
334 raise error.Abort(msg)
335 self._ui.warn(_("warning: %s\n") % msg)
335 self._ui.warn(_("warning: %s\n") % msg)
336 self._loweredfiles.add(fl)
336 self._loweredfiles.add(fl)
337 self._newfiles.add(f)
337 self._newfiles.add(f)
338
338
339 def filteredhash(repo, maxrev):
339 def filteredhash(repo, maxrev):
340 """build hash of filtered revisions in the current repoview.
340 """build hash of filtered revisions in the current repoview.
341
341
342 Multiple caches perform up-to-date validation by checking that the
342 Multiple caches perform up-to-date validation by checking that the
343 tiprev and tipnode stored in the cache file match the current repository.
343 tiprev and tipnode stored in the cache file match the current repository.
344 However, this is not sufficient for validating repoviews because the set
344 However, this is not sufficient for validating repoviews because the set
345 of revisions in the view may change without the repository tiprev and
345 of revisions in the view may change without the repository tiprev and
346 tipnode changing.
346 tipnode changing.
347
347
348 This function hashes all the revs filtered from the view and returns
348 This function hashes all the revs filtered from the view and returns
349 that SHA-1 digest.
349 that SHA-1 digest.
350 """
350 """
351 cl = repo.changelog
351 cl = repo.changelog
352 if not cl.filteredrevs:
352 if not cl.filteredrevs:
353 return None
353 return None
354 key = None
354 key = None
355 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
355 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
356 if revs:
356 if revs:
357 s = hashlib.sha1()
357 s = hashlib.sha1()
358 for rev in revs:
358 for rev in revs:
359 s.update('%d;' % rev)
359 s.update('%d;' % rev)
360 key = s.digest()
360 key = s.digest()
361 return key
361 return key
362
362
363 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
363 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
364 '''yield every hg repository under path, always recursively.
364 '''yield every hg repository under path, always recursively.
365 The recurse flag will only control recursion into repo working dirs'''
365 The recurse flag will only control recursion into repo working dirs'''
366 def errhandler(err):
366 def errhandler(err):
367 if err.filename == path:
367 if err.filename == path:
368 raise err
368 raise err
369 samestat = getattr(os.path, 'samestat', None)
369 samestat = getattr(os.path, 'samestat', None)
370 if followsym and samestat is not None:
370 if followsym and samestat is not None:
371 def adddir(dirlst, dirname):
371 def adddir(dirlst, dirname):
372 dirstat = os.stat(dirname)
372 dirstat = os.stat(dirname)
373 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
373 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
374 if not match:
374 if not match:
375 dirlst.append(dirstat)
375 dirlst.append(dirstat)
376 return not match
376 return not match
377 else:
377 else:
378 followsym = False
378 followsym = False
379
379
380 if (seen_dirs is None) and followsym:
380 if (seen_dirs is None) and followsym:
381 seen_dirs = []
381 seen_dirs = []
382 adddir(seen_dirs, path)
382 adddir(seen_dirs, path)
383 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
383 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
384 dirs.sort()
384 dirs.sort()
385 if '.hg' in dirs:
385 if '.hg' in dirs:
386 yield root # found a repository
386 yield root # found a repository
387 qroot = os.path.join(root, '.hg', 'patches')
387 qroot = os.path.join(root, '.hg', 'patches')
388 if os.path.isdir(os.path.join(qroot, '.hg')):
388 if os.path.isdir(os.path.join(qroot, '.hg')):
389 yield qroot # we have a patch queue repo here
389 yield qroot # we have a patch queue repo here
390 if recurse:
390 if recurse:
391 # avoid recursing inside the .hg directory
391 # avoid recursing inside the .hg directory
392 dirs.remove('.hg')
392 dirs.remove('.hg')
393 else:
393 else:
394 dirs[:] = [] # don't descend further
394 dirs[:] = [] # don't descend further
395 elif followsym:
395 elif followsym:
396 newdirs = []
396 newdirs = []
397 for d in dirs:
397 for d in dirs:
398 fname = os.path.join(root, d)
398 fname = os.path.join(root, d)
399 if adddir(seen_dirs, fname):
399 if adddir(seen_dirs, fname):
400 if os.path.islink(fname):
400 if os.path.islink(fname):
401 for hgname in walkrepos(fname, True, seen_dirs):
401 for hgname in walkrepos(fname, True, seen_dirs):
402 yield hgname
402 yield hgname
403 else:
403 else:
404 newdirs.append(d)
404 newdirs.append(d)
405 dirs[:] = newdirs
405 dirs[:] = newdirs
406
406
407 def binnode(ctx):
407 def binnode(ctx):
408 """Return binary node id for a given basectx"""
408 """Return binary node id for a given basectx"""
409 node = ctx.node()
409 node = ctx.node()
410 if node is None:
410 if node is None:
411 return wdirid
411 return wdirid
412 return node
412 return node
413
413
414 def intrev(ctx):
414 def intrev(ctx):
415 """Return integer for a given basectx that can be used in comparison or
415 """Return integer for a given basectx that can be used in comparison or
416 arithmetic operation"""
416 arithmetic operation"""
417 rev = ctx.rev()
417 rev = ctx.rev()
418 if rev is None:
418 if rev is None:
419 return wdirrev
419 return wdirrev
420 return rev
420 return rev
421
421
422 def formatchangeid(ctx):
422 def formatchangeid(ctx):
423 """Format changectx as '{rev}:{node|formatnode}', which is the default
423 """Format changectx as '{rev}:{node|formatnode}', which is the default
424 template provided by logcmdutil.changesettemplater"""
424 template provided by logcmdutil.changesettemplater"""
425 repo = ctx.repo()
425 repo = ctx.repo()
426 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
426 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
427
427
428 def formatrevnode(ui, rev, node):
428 def formatrevnode(ui, rev, node):
429 """Format given revision and node depending on the current verbosity"""
429 """Format given revision and node depending on the current verbosity"""
430 if ui.debugflag:
430 if ui.debugflag:
431 hexfunc = hex
431 hexfunc = hex
432 else:
432 else:
433 hexfunc = short
433 hexfunc = short
434 return '%d:%s' % (rev, hexfunc(node))
434 return '%d:%s' % (rev, hexfunc(node))
435
435
436 def isrevsymbol(repo, symbol):
436 def isrevsymbol(repo, symbol):
437 try:
437 try:
438 revsymbol(repo, symbol)
438 revsymbol(repo, symbol)
439 return True
439 return True
440 except error.RepoLookupError:
440 except error.RepoLookupError:
441 return False
441 return False
442
442
443 def revsymbol(repo, symbol):
443 def revsymbol(repo, symbol):
444 """Returns a context given a single revision symbol (as string).
444 """Returns a context given a single revision symbol (as string).
445
445
446 This is similar to revsingle(), but accepts only a single revision symbol,
446 This is similar to revsingle(), but accepts only a single revision symbol,
447 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
447 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
448 not "max(public())".
448 not "max(public())".
449 """
449 """
450 if not isinstance(symbol, bytes):
450 if not isinstance(symbol, bytes):
451 msg = ("symbol (%s of type %s) was not a string, did you mean "
451 msg = ("symbol (%s of type %s) was not a string, did you mean "
452 "repo[symbol]?" % (symbol, type(symbol)))
452 "repo[symbol]?" % (symbol, type(symbol)))
453 raise error.ProgrammingError(msg)
453 raise error.ProgrammingError(msg)
454 return repo[symbol]
454 try:
455 return repo[symbol]
456 except (error.FilteredIndexError, error.FilteredLookupError,
457 error.FilteredRepoLookupError):
458 raise _filterederror(repo, symbol)
459
460 def _filterederror(repo, changeid):
461 """build an exception to be raised about a filtered changeid
462
463 This is extracted in a function to help extensions (eg: evolve) to
464 experiment with various message variants."""
465 if repo.filtername.startswith('visible'):
466
467 # Check if the changeset is obsolete
468 unfilteredrepo = repo.unfiltered()
469 ctx = revsymbol(unfilteredrepo, changeid)
470
471 # If the changeset is obsolete, enrich the message with the reason
472 # that made this changeset not visible
473 if ctx.obsolete():
474 msg = obsutil._getfilteredreason(repo, changeid, ctx)
475 else:
476 msg = _("hidden revision '%s'") % changeid
477
478 hint = _('use --hidden to access hidden revisions')
479
480 return error.FilteredRepoLookupError(msg, hint=hint)
481 msg = _("filtered revision '%s' (not in '%s' subset)")
482 msg %= (changeid, repo.filtername)
483 return error.FilteredRepoLookupError(msg)
455
484
456 def revsingle(repo, revspec, default='.', localalias=None):
485 def revsingle(repo, revspec, default='.', localalias=None):
457 if not revspec and revspec != 0:
486 if not revspec and revspec != 0:
458 return repo[default]
487 return repo[default]
459
488
460 l = revrange(repo, [revspec], localalias=localalias)
489 l = revrange(repo, [revspec], localalias=localalias)
461 if not l:
490 if not l:
462 raise error.Abort(_('empty revision set'))
491 raise error.Abort(_('empty revision set'))
463 return repo[l.last()]
492 return repo[l.last()]
464
493
465 def _pairspec(revspec):
494 def _pairspec(revspec):
466 tree = revsetlang.parse(revspec)
495 tree = revsetlang.parse(revspec)
467 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
496 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
468
497
469 def revpairnodes(repo, revs):
498 def revpairnodes(repo, revs):
470 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
499 repo.ui.deprecwarn("revpairnodes is deprecated, please use revpair", "4.6")
471 ctx1, ctx2 = revpair(repo, revs)
500 ctx1, ctx2 = revpair(repo, revs)
472 return ctx1.node(), ctx2.node()
501 return ctx1.node(), ctx2.node()
473
502
474 def revpair(repo, revs):
503 def revpair(repo, revs):
475 if not revs:
504 if not revs:
476 return repo['.'], repo[None]
505 return repo['.'], repo[None]
477
506
478 l = revrange(repo, revs)
507 l = revrange(repo, revs)
479
508
480 if not l:
509 if not l:
481 first = second = None
510 first = second = None
482 elif l.isascending():
511 elif l.isascending():
483 first = l.min()
512 first = l.min()
484 second = l.max()
513 second = l.max()
485 elif l.isdescending():
514 elif l.isdescending():
486 first = l.max()
515 first = l.max()
487 second = l.min()
516 second = l.min()
488 else:
517 else:
489 first = l.first()
518 first = l.first()
490 second = l.last()
519 second = l.last()
491
520
492 if first is None:
521 if first is None:
493 raise error.Abort(_('empty revision range'))
522 raise error.Abort(_('empty revision range'))
494 if (first == second and len(revs) >= 2
523 if (first == second and len(revs) >= 2
495 and not all(revrange(repo, [r]) for r in revs)):
524 and not all(revrange(repo, [r]) for r in revs)):
496 raise error.Abort(_('empty revision on one side of range'))
525 raise error.Abort(_('empty revision on one side of range'))
497
526
498 # if top-level is range expression, the result must always be a pair
527 # if top-level is range expression, the result must always be a pair
499 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
528 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
500 return repo[first], repo[None]
529 return repo[first], repo[None]
501
530
502 return repo[first], repo[second]
531 return repo[first], repo[second]
503
532
504 def revrange(repo, specs, localalias=None):
533 def revrange(repo, specs, localalias=None):
505 """Execute 1 to many revsets and return the union.
534 """Execute 1 to many revsets and return the union.
506
535
507 This is the preferred mechanism for executing revsets using user-specified
536 This is the preferred mechanism for executing revsets using user-specified
508 config options, such as revset aliases.
537 config options, such as revset aliases.
509
538
510 The revsets specified by ``specs`` will be executed via a chained ``OR``
539 The revsets specified by ``specs`` will be executed via a chained ``OR``
511 expression. If ``specs`` is empty, an empty result is returned.
540 expression. If ``specs`` is empty, an empty result is returned.
512
541
513 ``specs`` can contain integers, in which case they are assumed to be
542 ``specs`` can contain integers, in which case they are assumed to be
514 revision numbers.
543 revision numbers.
515
544
516 It is assumed the revsets are already formatted. If you have arguments
545 It is assumed the revsets are already formatted. If you have arguments
517 that need to be expanded in the revset, call ``revsetlang.formatspec()``
546 that need to be expanded in the revset, call ``revsetlang.formatspec()``
518 and pass the result as an element of ``specs``.
547 and pass the result as an element of ``specs``.
519
548
520 Specifying a single revset is allowed.
549 Specifying a single revset is allowed.
521
550
522 Returns a ``revset.abstractsmartset`` which is a list-like interface over
551 Returns a ``revset.abstractsmartset`` which is a list-like interface over
523 integer revisions.
552 integer revisions.
524 """
553 """
525 allspecs = []
554 allspecs = []
526 for spec in specs:
555 for spec in specs:
527 if isinstance(spec, int):
556 if isinstance(spec, int):
528 spec = revsetlang.formatspec('rev(%d)', spec)
557 spec = revsetlang.formatspec('rev(%d)', spec)
529 allspecs.append(spec)
558 allspecs.append(spec)
530 return repo.anyrevs(allspecs, user=True, localalias=localalias)
559 return repo.anyrevs(allspecs, user=True, localalias=localalias)
531
560
532 def meaningfulparents(repo, ctx):
561 def meaningfulparents(repo, ctx):
533 """Return list of meaningful (or all if debug) parentrevs for rev.
562 """Return list of meaningful (or all if debug) parentrevs for rev.
534
563
535 For merges (two non-nullrev revisions) both parents are meaningful.
564 For merges (two non-nullrev revisions) both parents are meaningful.
536 Otherwise the first parent revision is considered meaningful if it
565 Otherwise the first parent revision is considered meaningful if it
537 is not the preceding revision.
566 is not the preceding revision.
538 """
567 """
539 parents = ctx.parents()
568 parents = ctx.parents()
540 if len(parents) > 1:
569 if len(parents) > 1:
541 return parents
570 return parents
542 if repo.ui.debugflag:
571 if repo.ui.debugflag:
543 return [parents[0], repo['null']]
572 return [parents[0], repo['null']]
544 if parents[0].rev() >= intrev(ctx) - 1:
573 if parents[0].rev() >= intrev(ctx) - 1:
545 return []
574 return []
546 return parents
575 return parents
547
576
548 def expandpats(pats):
577 def expandpats(pats):
549 '''Expand bare globs when running on windows.
578 '''Expand bare globs when running on windows.
550 On posix we assume it already has already been done by sh.'''
579 On posix we assume it already has already been done by sh.'''
551 if not util.expandglobs:
580 if not util.expandglobs:
552 return list(pats)
581 return list(pats)
553 ret = []
582 ret = []
554 for kindpat in pats:
583 for kindpat in pats:
555 kind, pat = matchmod._patsplit(kindpat, None)
584 kind, pat = matchmod._patsplit(kindpat, None)
556 if kind is None:
585 if kind is None:
557 try:
586 try:
558 globbed = glob.glob(pat)
587 globbed = glob.glob(pat)
559 except re.error:
588 except re.error:
560 globbed = [pat]
589 globbed = [pat]
561 if globbed:
590 if globbed:
562 ret.extend(globbed)
591 ret.extend(globbed)
563 continue
592 continue
564 ret.append(kindpat)
593 ret.append(kindpat)
565 return ret
594 return ret
566
595
567 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
596 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
568 badfn=None):
597 badfn=None):
569 '''Return a matcher and the patterns that were used.
598 '''Return a matcher and the patterns that were used.
570 The matcher will warn about bad matches, unless an alternate badfn callback
599 The matcher will warn about bad matches, unless an alternate badfn callback
571 is provided.'''
600 is provided.'''
572 if pats == ("",):
601 if pats == ("",):
573 pats = []
602 pats = []
574 if opts is None:
603 if opts is None:
575 opts = {}
604 opts = {}
576 if not globbed and default == 'relpath':
605 if not globbed and default == 'relpath':
577 pats = expandpats(pats or [])
606 pats = expandpats(pats or [])
578
607
579 def bad(f, msg):
608 def bad(f, msg):
580 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
609 ctx.repo().ui.warn("%s: %s\n" % (m.rel(f), msg))
581
610
582 if badfn is None:
611 if badfn is None:
583 badfn = bad
612 badfn = bad
584
613
585 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
614 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
586 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
615 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
587
616
588 if m.always():
617 if m.always():
589 pats = []
618 pats = []
590 return m, pats
619 return m, pats
591
620
592 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
621 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
593 badfn=None):
622 badfn=None):
594 '''Return a matcher that will warn about bad matches.'''
623 '''Return a matcher that will warn about bad matches.'''
595 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
624 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
596
625
597 def matchall(repo):
626 def matchall(repo):
598 '''Return a matcher that will efficiently match everything.'''
627 '''Return a matcher that will efficiently match everything.'''
599 return matchmod.always(repo.root, repo.getcwd())
628 return matchmod.always(repo.root, repo.getcwd())
600
629
601 def matchfiles(repo, files, badfn=None):
630 def matchfiles(repo, files, badfn=None):
602 '''Return a matcher that will efficiently match exactly these files.'''
631 '''Return a matcher that will efficiently match exactly these files.'''
603 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
632 return matchmod.exact(repo.root, repo.getcwd(), files, badfn=badfn)
604
633
605 def parsefollowlinespattern(repo, rev, pat, msg):
634 def parsefollowlinespattern(repo, rev, pat, msg):
606 """Return a file name from `pat` pattern suitable for usage in followlines
635 """Return a file name from `pat` pattern suitable for usage in followlines
607 logic.
636 logic.
608 """
637 """
609 if not matchmod.patkind(pat):
638 if not matchmod.patkind(pat):
610 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
639 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
611 else:
640 else:
612 ctx = repo[rev]
641 ctx = repo[rev]
613 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
642 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
614 files = [f for f in ctx if m(f)]
643 files = [f for f in ctx if m(f)]
615 if len(files) != 1:
644 if len(files) != 1:
616 raise error.ParseError(msg)
645 raise error.ParseError(msg)
617 return files[0]
646 return files[0]
618
647
619 def origpath(ui, repo, filepath):
648 def origpath(ui, repo, filepath):
620 '''customize where .orig files are created
649 '''customize where .orig files are created
621
650
622 Fetch user defined path from config file: [ui] origbackuppath = <path>
651 Fetch user defined path from config file: [ui] origbackuppath = <path>
623 Fall back to default (filepath with .orig suffix) if not specified
652 Fall back to default (filepath with .orig suffix) if not specified
624 '''
653 '''
625 origbackuppath = ui.config('ui', 'origbackuppath')
654 origbackuppath = ui.config('ui', 'origbackuppath')
626 if not origbackuppath:
655 if not origbackuppath:
627 return filepath + ".orig"
656 return filepath + ".orig"
628
657
629 # Convert filepath from an absolute path into a path inside the repo.
658 # Convert filepath from an absolute path into a path inside the repo.
630 filepathfromroot = util.normpath(os.path.relpath(filepath,
659 filepathfromroot = util.normpath(os.path.relpath(filepath,
631 start=repo.root))
660 start=repo.root))
632
661
633 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
662 origvfs = vfs.vfs(repo.wjoin(origbackuppath))
634 origbackupdir = origvfs.dirname(filepathfromroot)
663 origbackupdir = origvfs.dirname(filepathfromroot)
635 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
664 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
636 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
665 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
637
666
638 # Remove any files that conflict with the backup file's path
667 # Remove any files that conflict with the backup file's path
639 for f in reversed(list(util.finddirs(filepathfromroot))):
668 for f in reversed(list(util.finddirs(filepathfromroot))):
640 if origvfs.isfileorlink(f):
669 if origvfs.isfileorlink(f):
641 ui.note(_('removing conflicting file: %s\n')
670 ui.note(_('removing conflicting file: %s\n')
642 % origvfs.join(f))
671 % origvfs.join(f))
643 origvfs.unlink(f)
672 origvfs.unlink(f)
644 break
673 break
645
674
646 origvfs.makedirs(origbackupdir)
675 origvfs.makedirs(origbackupdir)
647
676
648 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
677 if origvfs.isdir(filepathfromroot) and not origvfs.islink(filepathfromroot):
649 ui.note(_('removing conflicting directory: %s\n')
678 ui.note(_('removing conflicting directory: %s\n')
650 % origvfs.join(filepathfromroot))
679 % origvfs.join(filepathfromroot))
651 origvfs.rmtree(filepathfromroot, forcibly=True)
680 origvfs.rmtree(filepathfromroot, forcibly=True)
652
681
653 return origvfs.join(filepathfromroot)
682 return origvfs.join(filepathfromroot)
654
683
655 class _containsnode(object):
684 class _containsnode(object):
656 """proxy __contains__(node) to container.__contains__ which accepts revs"""
685 """proxy __contains__(node) to container.__contains__ which accepts revs"""
657
686
658 def __init__(self, repo, revcontainer):
687 def __init__(self, repo, revcontainer):
659 self._torev = repo.changelog.rev
688 self._torev = repo.changelog.rev
660 self._revcontains = revcontainer.__contains__
689 self._revcontains = revcontainer.__contains__
661
690
662 def __contains__(self, node):
691 def __contains__(self, node):
663 return self._revcontains(self._torev(node))
692 return self._revcontains(self._torev(node))
664
693
665 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
694 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None):
666 """do common cleanups when old nodes are replaced by new nodes
695 """do common cleanups when old nodes are replaced by new nodes
667
696
668 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
697 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
669 (we might also want to move working directory parent in the future)
698 (we might also want to move working directory parent in the future)
670
699
671 By default, bookmark moves are calculated automatically from 'replacements',
700 By default, bookmark moves are calculated automatically from 'replacements',
672 but 'moves' can be used to override that. Also, 'moves' may include
701 but 'moves' can be used to override that. Also, 'moves' may include
673 additional bookmark moves that should not have associated obsmarkers.
702 additional bookmark moves that should not have associated obsmarkers.
674
703
675 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
704 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
676 have replacements. operation is a string, like "rebase".
705 have replacements. operation is a string, like "rebase".
677
706
678 metadata is dictionary containing metadata to be stored in obsmarker if
707 metadata is dictionary containing metadata to be stored in obsmarker if
679 obsolescence is enabled.
708 obsolescence is enabled.
680 """
709 """
681 if not replacements and not moves:
710 if not replacements and not moves:
682 return
711 return
683
712
684 # translate mapping's other forms
713 # translate mapping's other forms
685 if not util.safehasattr(replacements, 'items'):
714 if not util.safehasattr(replacements, 'items'):
686 replacements = {n: () for n in replacements}
715 replacements = {n: () for n in replacements}
687
716
688 # Calculate bookmark movements
717 # Calculate bookmark movements
689 if moves is None:
718 if moves is None:
690 moves = {}
719 moves = {}
691 # Unfiltered repo is needed since nodes in replacements might be hidden.
720 # Unfiltered repo is needed since nodes in replacements might be hidden.
692 unfi = repo.unfiltered()
721 unfi = repo.unfiltered()
693 for oldnode, newnodes in replacements.items():
722 for oldnode, newnodes in replacements.items():
694 if oldnode in moves:
723 if oldnode in moves:
695 continue
724 continue
696 if len(newnodes) > 1:
725 if len(newnodes) > 1:
697 # usually a split, take the one with biggest rev number
726 # usually a split, take the one with biggest rev number
698 newnode = next(unfi.set('max(%ln)', newnodes)).node()
727 newnode = next(unfi.set('max(%ln)', newnodes)).node()
699 elif len(newnodes) == 0:
728 elif len(newnodes) == 0:
700 # move bookmark backwards
729 # move bookmark backwards
701 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
730 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
702 list(replacements)))
731 list(replacements)))
703 if roots:
732 if roots:
704 newnode = roots[0].node()
733 newnode = roots[0].node()
705 else:
734 else:
706 newnode = nullid
735 newnode = nullid
707 else:
736 else:
708 newnode = newnodes[0]
737 newnode = newnodes[0]
709 moves[oldnode] = newnode
738 moves[oldnode] = newnode
710
739
711 with repo.transaction('cleanup') as tr:
740 with repo.transaction('cleanup') as tr:
712 # Move bookmarks
741 # Move bookmarks
713 bmarks = repo._bookmarks
742 bmarks = repo._bookmarks
714 bmarkchanges = []
743 bmarkchanges = []
715 allnewnodes = [n for ns in replacements.values() for n in ns]
744 allnewnodes = [n for ns in replacements.values() for n in ns]
716 for oldnode, newnode in moves.items():
745 for oldnode, newnode in moves.items():
717 oldbmarks = repo.nodebookmarks(oldnode)
746 oldbmarks = repo.nodebookmarks(oldnode)
718 if not oldbmarks:
747 if not oldbmarks:
719 continue
748 continue
720 from . import bookmarks # avoid import cycle
749 from . import bookmarks # avoid import cycle
721 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
750 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
722 (util.rapply(pycompat.maybebytestr, oldbmarks),
751 (util.rapply(pycompat.maybebytestr, oldbmarks),
723 hex(oldnode), hex(newnode)))
752 hex(oldnode), hex(newnode)))
724 # Delete divergent bookmarks being parents of related newnodes
753 # Delete divergent bookmarks being parents of related newnodes
725 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
754 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
726 allnewnodes, newnode, oldnode)
755 allnewnodes, newnode, oldnode)
727 deletenodes = _containsnode(repo, deleterevs)
756 deletenodes = _containsnode(repo, deleterevs)
728 for name in oldbmarks:
757 for name in oldbmarks:
729 bmarkchanges.append((name, newnode))
758 bmarkchanges.append((name, newnode))
730 for b in bookmarks.divergent2delete(repo, deletenodes, name):
759 for b in bookmarks.divergent2delete(repo, deletenodes, name):
731 bmarkchanges.append((b, None))
760 bmarkchanges.append((b, None))
732
761
733 if bmarkchanges:
762 if bmarkchanges:
734 bmarks.applychanges(repo, tr, bmarkchanges)
763 bmarks.applychanges(repo, tr, bmarkchanges)
735
764
736 # Obsolete or strip nodes
765 # Obsolete or strip nodes
737 if obsolete.isenabled(repo, obsolete.createmarkersopt):
766 if obsolete.isenabled(repo, obsolete.createmarkersopt):
738 # If a node is already obsoleted, and we want to obsolete it
767 # If a node is already obsoleted, and we want to obsolete it
739 # without a successor, skip that obssolete request since it's
768 # without a successor, skip that obssolete request since it's
740 # unnecessary. That's the "if s or not isobs(n)" check below.
769 # unnecessary. That's the "if s or not isobs(n)" check below.
741 # Also sort the node in topology order, that might be useful for
770 # Also sort the node in topology order, that might be useful for
742 # some obsstore logic.
771 # some obsstore logic.
743 # NOTE: the filtering and sorting might belong to createmarkers.
772 # NOTE: the filtering and sorting might belong to createmarkers.
744 isobs = unfi.obsstore.successors.__contains__
773 isobs = unfi.obsstore.successors.__contains__
745 torev = unfi.changelog.rev
774 torev = unfi.changelog.rev
746 sortfunc = lambda ns: torev(ns[0])
775 sortfunc = lambda ns: torev(ns[0])
747 rels = [(unfi[n], tuple(unfi[m] for m in s))
776 rels = [(unfi[n], tuple(unfi[m] for m in s))
748 for n, s in sorted(replacements.items(), key=sortfunc)
777 for n, s in sorted(replacements.items(), key=sortfunc)
749 if s or not isobs(n)]
778 if s or not isobs(n)]
750 if rels:
779 if rels:
751 obsolete.createmarkers(repo, rels, operation=operation,
780 obsolete.createmarkers(repo, rels, operation=operation,
752 metadata=metadata)
781 metadata=metadata)
753 else:
782 else:
754 from . import repair # avoid import cycle
783 from . import repair # avoid import cycle
755 tostrip = list(replacements)
784 tostrip = list(replacements)
756 if tostrip:
785 if tostrip:
757 repair.delayedstrip(repo.ui, repo, tostrip, operation)
786 repair.delayedstrip(repo.ui, repo, tostrip, operation)
758
787
759 def addremove(repo, matcher, prefix, opts=None):
788 def addremove(repo, matcher, prefix, opts=None):
760 if opts is None:
789 if opts is None:
761 opts = {}
790 opts = {}
762 m = matcher
791 m = matcher
763 dry_run = opts.get('dry_run')
792 dry_run = opts.get('dry_run')
764 try:
793 try:
765 similarity = float(opts.get('similarity') or 0)
794 similarity = float(opts.get('similarity') or 0)
766 except ValueError:
795 except ValueError:
767 raise error.Abort(_('similarity must be a number'))
796 raise error.Abort(_('similarity must be a number'))
768 if similarity < 0 or similarity > 100:
797 if similarity < 0 or similarity > 100:
769 raise error.Abort(_('similarity must be between 0 and 100'))
798 raise error.Abort(_('similarity must be between 0 and 100'))
770 similarity /= 100.0
799 similarity /= 100.0
771
800
772 ret = 0
801 ret = 0
773 join = lambda f: os.path.join(prefix, f)
802 join = lambda f: os.path.join(prefix, f)
774
803
775 wctx = repo[None]
804 wctx = repo[None]
776 for subpath in sorted(wctx.substate):
805 for subpath in sorted(wctx.substate):
777 submatch = matchmod.subdirmatcher(subpath, m)
806 submatch = matchmod.subdirmatcher(subpath, m)
778 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
807 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
779 sub = wctx.sub(subpath)
808 sub = wctx.sub(subpath)
780 try:
809 try:
781 if sub.addremove(submatch, prefix, opts):
810 if sub.addremove(submatch, prefix, opts):
782 ret = 1
811 ret = 1
783 except error.LookupError:
812 except error.LookupError:
784 repo.ui.status(_("skipping missing subrepository: %s\n")
813 repo.ui.status(_("skipping missing subrepository: %s\n")
785 % join(subpath))
814 % join(subpath))
786
815
787 rejected = []
816 rejected = []
788 def badfn(f, msg):
817 def badfn(f, msg):
789 if f in m.files():
818 if f in m.files():
790 m.bad(f, msg)
819 m.bad(f, msg)
791 rejected.append(f)
820 rejected.append(f)
792
821
793 badmatch = matchmod.badmatch(m, badfn)
822 badmatch = matchmod.badmatch(m, badfn)
794 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
823 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
795 badmatch)
824 badmatch)
796
825
797 unknownset = set(unknown + forgotten)
826 unknownset = set(unknown + forgotten)
798 toprint = unknownset.copy()
827 toprint = unknownset.copy()
799 toprint.update(deleted)
828 toprint.update(deleted)
800 for abs in sorted(toprint):
829 for abs in sorted(toprint):
801 if repo.ui.verbose or not m.exact(abs):
830 if repo.ui.verbose or not m.exact(abs):
802 if abs in unknownset:
831 if abs in unknownset:
803 status = _('adding %s\n') % m.uipath(abs)
832 status = _('adding %s\n') % m.uipath(abs)
804 else:
833 else:
805 status = _('removing %s\n') % m.uipath(abs)
834 status = _('removing %s\n') % m.uipath(abs)
806 repo.ui.status(status)
835 repo.ui.status(status)
807
836
808 renames = _findrenames(repo, m, added + unknown, removed + deleted,
837 renames = _findrenames(repo, m, added + unknown, removed + deleted,
809 similarity)
838 similarity)
810
839
811 if not dry_run:
840 if not dry_run:
812 _markchanges(repo, unknown + forgotten, deleted, renames)
841 _markchanges(repo, unknown + forgotten, deleted, renames)
813
842
814 for f in rejected:
843 for f in rejected:
815 if f in m.files():
844 if f in m.files():
816 return 1
845 return 1
817 return ret
846 return ret
818
847
819 def marktouched(repo, files, similarity=0.0):
848 def marktouched(repo, files, similarity=0.0):
820 '''Assert that files have somehow been operated upon. files are relative to
849 '''Assert that files have somehow been operated upon. files are relative to
821 the repo root.'''
850 the repo root.'''
822 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
851 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
823 rejected = []
852 rejected = []
824
853
825 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
854 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
826
855
827 if repo.ui.verbose:
856 if repo.ui.verbose:
828 unknownset = set(unknown + forgotten)
857 unknownset = set(unknown + forgotten)
829 toprint = unknownset.copy()
858 toprint = unknownset.copy()
830 toprint.update(deleted)
859 toprint.update(deleted)
831 for abs in sorted(toprint):
860 for abs in sorted(toprint):
832 if abs in unknownset:
861 if abs in unknownset:
833 status = _('adding %s\n') % abs
862 status = _('adding %s\n') % abs
834 else:
863 else:
835 status = _('removing %s\n') % abs
864 status = _('removing %s\n') % abs
836 repo.ui.status(status)
865 repo.ui.status(status)
837
866
838 renames = _findrenames(repo, m, added + unknown, removed + deleted,
867 renames = _findrenames(repo, m, added + unknown, removed + deleted,
839 similarity)
868 similarity)
840
869
841 _markchanges(repo, unknown + forgotten, deleted, renames)
870 _markchanges(repo, unknown + forgotten, deleted, renames)
842
871
843 for f in rejected:
872 for f in rejected:
844 if f in m.files():
873 if f in m.files():
845 return 1
874 return 1
846 return 0
875 return 0
847
876
848 def _interestingfiles(repo, matcher):
877 def _interestingfiles(repo, matcher):
849 '''Walk dirstate with matcher, looking for files that addremove would care
878 '''Walk dirstate with matcher, looking for files that addremove would care
850 about.
879 about.
851
880
852 This is different from dirstate.status because it doesn't care about
881 This is different from dirstate.status because it doesn't care about
853 whether files are modified or clean.'''
882 whether files are modified or clean.'''
854 added, unknown, deleted, removed, forgotten = [], [], [], [], []
883 added, unknown, deleted, removed, forgotten = [], [], [], [], []
855 audit_path = pathutil.pathauditor(repo.root, cached=True)
884 audit_path = pathutil.pathauditor(repo.root, cached=True)
856
885
857 ctx = repo[None]
886 ctx = repo[None]
858 dirstate = repo.dirstate
887 dirstate = repo.dirstate
859 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
888 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
860 unknown=True, ignored=False, full=False)
889 unknown=True, ignored=False, full=False)
861 for abs, st in walkresults.iteritems():
890 for abs, st in walkresults.iteritems():
862 dstate = dirstate[abs]
891 dstate = dirstate[abs]
863 if dstate == '?' and audit_path.check(abs):
892 if dstate == '?' and audit_path.check(abs):
864 unknown.append(abs)
893 unknown.append(abs)
865 elif dstate != 'r' and not st:
894 elif dstate != 'r' and not st:
866 deleted.append(abs)
895 deleted.append(abs)
867 elif dstate == 'r' and st:
896 elif dstate == 'r' and st:
868 forgotten.append(abs)
897 forgotten.append(abs)
869 # for finding renames
898 # for finding renames
870 elif dstate == 'r' and not st:
899 elif dstate == 'r' and not st:
871 removed.append(abs)
900 removed.append(abs)
872 elif dstate == 'a':
901 elif dstate == 'a':
873 added.append(abs)
902 added.append(abs)
874
903
875 return added, unknown, deleted, removed, forgotten
904 return added, unknown, deleted, removed, forgotten
876
905
877 def _findrenames(repo, matcher, added, removed, similarity):
906 def _findrenames(repo, matcher, added, removed, similarity):
878 '''Find renames from removed files to added ones.'''
907 '''Find renames from removed files to added ones.'''
879 renames = {}
908 renames = {}
880 if similarity > 0:
909 if similarity > 0:
881 for old, new, score in similar.findrenames(repo, added, removed,
910 for old, new, score in similar.findrenames(repo, added, removed,
882 similarity):
911 similarity):
883 if (repo.ui.verbose or not matcher.exact(old)
912 if (repo.ui.verbose or not matcher.exact(old)
884 or not matcher.exact(new)):
913 or not matcher.exact(new)):
885 repo.ui.status(_('recording removal of %s as rename to %s '
914 repo.ui.status(_('recording removal of %s as rename to %s '
886 '(%d%% similar)\n') %
915 '(%d%% similar)\n') %
887 (matcher.rel(old), matcher.rel(new),
916 (matcher.rel(old), matcher.rel(new),
888 score * 100))
917 score * 100))
889 renames[new] = old
918 renames[new] = old
890 return renames
919 return renames
891
920
892 def _markchanges(repo, unknown, deleted, renames):
921 def _markchanges(repo, unknown, deleted, renames):
893 '''Marks the files in unknown as added, the files in deleted as removed,
922 '''Marks the files in unknown as added, the files in deleted as removed,
894 and the files in renames as copied.'''
923 and the files in renames as copied.'''
895 wctx = repo[None]
924 wctx = repo[None]
896 with repo.wlock():
925 with repo.wlock():
897 wctx.forget(deleted)
926 wctx.forget(deleted)
898 wctx.add(unknown)
927 wctx.add(unknown)
899 for new, old in renames.iteritems():
928 for new, old in renames.iteritems():
900 wctx.copy(old, new)
929 wctx.copy(old, new)
901
930
902 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
931 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
903 """Update the dirstate to reflect the intent of copying src to dst. For
932 """Update the dirstate to reflect the intent of copying src to dst. For
904 different reasons it might not end with dst being marked as copied from src.
933 different reasons it might not end with dst being marked as copied from src.
905 """
934 """
906 origsrc = repo.dirstate.copied(src) or src
935 origsrc = repo.dirstate.copied(src) or src
907 if dst == origsrc: # copying back a copy?
936 if dst == origsrc: # copying back a copy?
908 if repo.dirstate[dst] not in 'mn' and not dryrun:
937 if repo.dirstate[dst] not in 'mn' and not dryrun:
909 repo.dirstate.normallookup(dst)
938 repo.dirstate.normallookup(dst)
910 else:
939 else:
911 if repo.dirstate[origsrc] == 'a' and origsrc == src:
940 if repo.dirstate[origsrc] == 'a' and origsrc == src:
912 if not ui.quiet:
941 if not ui.quiet:
913 ui.warn(_("%s has not been committed yet, so no copy "
942 ui.warn(_("%s has not been committed yet, so no copy "
914 "data will be stored for %s.\n")
943 "data will be stored for %s.\n")
915 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
944 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
916 if repo.dirstate[dst] in '?r' and not dryrun:
945 if repo.dirstate[dst] in '?r' and not dryrun:
917 wctx.add([dst])
946 wctx.add([dst])
918 elif not dryrun:
947 elif not dryrun:
919 wctx.copy(origsrc, dst)
948 wctx.copy(origsrc, dst)
920
949
921 def readrequires(opener, supported):
950 def readrequires(opener, supported):
922 '''Reads and parses .hg/requires and checks if all entries found
951 '''Reads and parses .hg/requires and checks if all entries found
923 are in the list of supported features.'''
952 are in the list of supported features.'''
924 requirements = set(opener.read("requires").splitlines())
953 requirements = set(opener.read("requires").splitlines())
925 missings = []
954 missings = []
926 for r in requirements:
955 for r in requirements:
927 if r not in supported:
956 if r not in supported:
928 if not r or not r[0:1].isalnum():
957 if not r or not r[0:1].isalnum():
929 raise error.RequirementError(_(".hg/requires file is corrupt"))
958 raise error.RequirementError(_(".hg/requires file is corrupt"))
930 missings.append(r)
959 missings.append(r)
931 missings.sort()
960 missings.sort()
932 if missings:
961 if missings:
933 raise error.RequirementError(
962 raise error.RequirementError(
934 _("repository requires features unknown to this Mercurial: %s")
963 _("repository requires features unknown to this Mercurial: %s")
935 % " ".join(missings),
964 % " ".join(missings),
936 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
965 hint=_("see https://mercurial-scm.org/wiki/MissingRequirement"
937 " for more information"))
966 " for more information"))
938 return requirements
967 return requirements
939
968
940 def writerequires(opener, requirements):
969 def writerequires(opener, requirements):
941 with opener('requires', 'w') as fp:
970 with opener('requires', 'w') as fp:
942 for r in sorted(requirements):
971 for r in sorted(requirements):
943 fp.write("%s\n" % r)
972 fp.write("%s\n" % r)
944
973
945 class filecachesubentry(object):
974 class filecachesubentry(object):
946 def __init__(self, path, stat):
975 def __init__(self, path, stat):
947 self.path = path
976 self.path = path
948 self.cachestat = None
977 self.cachestat = None
949 self._cacheable = None
978 self._cacheable = None
950
979
951 if stat:
980 if stat:
952 self.cachestat = filecachesubentry.stat(self.path)
981 self.cachestat = filecachesubentry.stat(self.path)
953
982
954 if self.cachestat:
983 if self.cachestat:
955 self._cacheable = self.cachestat.cacheable()
984 self._cacheable = self.cachestat.cacheable()
956 else:
985 else:
957 # None means we don't know yet
986 # None means we don't know yet
958 self._cacheable = None
987 self._cacheable = None
959
988
960 def refresh(self):
989 def refresh(self):
961 if self.cacheable():
990 if self.cacheable():
962 self.cachestat = filecachesubentry.stat(self.path)
991 self.cachestat = filecachesubentry.stat(self.path)
963
992
964 def cacheable(self):
993 def cacheable(self):
965 if self._cacheable is not None:
994 if self._cacheable is not None:
966 return self._cacheable
995 return self._cacheable
967
996
968 # we don't know yet, assume it is for now
997 # we don't know yet, assume it is for now
969 return True
998 return True
970
999
971 def changed(self):
1000 def changed(self):
972 # no point in going further if we can't cache it
1001 # no point in going further if we can't cache it
973 if not self.cacheable():
1002 if not self.cacheable():
974 return True
1003 return True
975
1004
976 newstat = filecachesubentry.stat(self.path)
1005 newstat = filecachesubentry.stat(self.path)
977
1006
978 # we may not know if it's cacheable yet, check again now
1007 # we may not know if it's cacheable yet, check again now
979 if newstat and self._cacheable is None:
1008 if newstat and self._cacheable is None:
980 self._cacheable = newstat.cacheable()
1009 self._cacheable = newstat.cacheable()
981
1010
982 # check again
1011 # check again
983 if not self._cacheable:
1012 if not self._cacheable:
984 return True
1013 return True
985
1014
986 if self.cachestat != newstat:
1015 if self.cachestat != newstat:
987 self.cachestat = newstat
1016 self.cachestat = newstat
988 return True
1017 return True
989 else:
1018 else:
990 return False
1019 return False
991
1020
992 @staticmethod
1021 @staticmethod
993 def stat(path):
1022 def stat(path):
994 try:
1023 try:
995 return util.cachestat(path)
1024 return util.cachestat(path)
996 except OSError as e:
1025 except OSError as e:
997 if e.errno != errno.ENOENT:
1026 if e.errno != errno.ENOENT:
998 raise
1027 raise
999
1028
1000 class filecacheentry(object):
1029 class filecacheentry(object):
1001 def __init__(self, paths, stat=True):
1030 def __init__(self, paths, stat=True):
1002 self._entries = []
1031 self._entries = []
1003 for path in paths:
1032 for path in paths:
1004 self._entries.append(filecachesubentry(path, stat))
1033 self._entries.append(filecachesubentry(path, stat))
1005
1034
1006 def changed(self):
1035 def changed(self):
1007 '''true if any entry has changed'''
1036 '''true if any entry has changed'''
1008 for entry in self._entries:
1037 for entry in self._entries:
1009 if entry.changed():
1038 if entry.changed():
1010 return True
1039 return True
1011 return False
1040 return False
1012
1041
1013 def refresh(self):
1042 def refresh(self):
1014 for entry in self._entries:
1043 for entry in self._entries:
1015 entry.refresh()
1044 entry.refresh()
1016
1045
1017 class filecache(object):
1046 class filecache(object):
1018 '''A property like decorator that tracks files under .hg/ for updates.
1047 '''A property like decorator that tracks files under .hg/ for updates.
1019
1048
1020 Records stat info when called in _filecache.
1049 Records stat info when called in _filecache.
1021
1050
1022 On subsequent calls, compares old stat info with new info, and recreates the
1051 On subsequent calls, compares old stat info with new info, and recreates the
1023 object when any of the files changes, updating the new stat info in
1052 object when any of the files changes, updating the new stat info in
1024 _filecache.
1053 _filecache.
1025
1054
1026 Mercurial either atomic renames or appends for files under .hg,
1055 Mercurial either atomic renames or appends for files under .hg,
1027 so to ensure the cache is reliable we need the filesystem to be able
1056 so to ensure the cache is reliable we need the filesystem to be able
1028 to tell us if a file has been replaced. If it can't, we fallback to
1057 to tell us if a file has been replaced. If it can't, we fallback to
1029 recreating the object on every call (essentially the same behavior as
1058 recreating the object on every call (essentially the same behavior as
1030 propertycache).
1059 propertycache).
1031
1060
1032 '''
1061 '''
1033 def __init__(self, *paths):
1062 def __init__(self, *paths):
1034 self.paths = paths
1063 self.paths = paths
1035
1064
1036 def join(self, obj, fname):
1065 def join(self, obj, fname):
1037 """Used to compute the runtime path of a cached file.
1066 """Used to compute the runtime path of a cached file.
1038
1067
1039 Users should subclass filecache and provide their own version of this
1068 Users should subclass filecache and provide their own version of this
1040 function to call the appropriate join function on 'obj' (an instance
1069 function to call the appropriate join function on 'obj' (an instance
1041 of the class that its member function was decorated).
1070 of the class that its member function was decorated).
1042 """
1071 """
1043 raise NotImplementedError
1072 raise NotImplementedError
1044
1073
1045 def __call__(self, func):
1074 def __call__(self, func):
1046 self.func = func
1075 self.func = func
1047 self.name = func.__name__.encode('ascii')
1076 self.name = func.__name__.encode('ascii')
1048 return self
1077 return self
1049
1078
1050 def __get__(self, obj, type=None):
1079 def __get__(self, obj, type=None):
1051 # if accessed on the class, return the descriptor itself.
1080 # if accessed on the class, return the descriptor itself.
1052 if obj is None:
1081 if obj is None:
1053 return self
1082 return self
1054 # do we need to check if the file changed?
1083 # do we need to check if the file changed?
1055 if self.name in obj.__dict__:
1084 if self.name in obj.__dict__:
1056 assert self.name in obj._filecache, self.name
1085 assert self.name in obj._filecache, self.name
1057 return obj.__dict__[self.name]
1086 return obj.__dict__[self.name]
1058
1087
1059 entry = obj._filecache.get(self.name)
1088 entry = obj._filecache.get(self.name)
1060
1089
1061 if entry:
1090 if entry:
1062 if entry.changed():
1091 if entry.changed():
1063 entry.obj = self.func(obj)
1092 entry.obj = self.func(obj)
1064 else:
1093 else:
1065 paths = [self.join(obj, path) for path in self.paths]
1094 paths = [self.join(obj, path) for path in self.paths]
1066
1095
1067 # We stat -before- creating the object so our cache doesn't lie if
1096 # We stat -before- creating the object so our cache doesn't lie if
1068 # a writer modified between the time we read and stat
1097 # a writer modified between the time we read and stat
1069 entry = filecacheentry(paths, True)
1098 entry = filecacheentry(paths, True)
1070 entry.obj = self.func(obj)
1099 entry.obj = self.func(obj)
1071
1100
1072 obj._filecache[self.name] = entry
1101 obj._filecache[self.name] = entry
1073
1102
1074 obj.__dict__[self.name] = entry.obj
1103 obj.__dict__[self.name] = entry.obj
1075 return entry.obj
1104 return entry.obj
1076
1105
1077 def __set__(self, obj, value):
1106 def __set__(self, obj, value):
1078 if self.name not in obj._filecache:
1107 if self.name not in obj._filecache:
1079 # we add an entry for the missing value because X in __dict__
1108 # we add an entry for the missing value because X in __dict__
1080 # implies X in _filecache
1109 # implies X in _filecache
1081 paths = [self.join(obj, path) for path in self.paths]
1110 paths = [self.join(obj, path) for path in self.paths]
1082 ce = filecacheentry(paths, False)
1111 ce = filecacheentry(paths, False)
1083 obj._filecache[self.name] = ce
1112 obj._filecache[self.name] = ce
1084 else:
1113 else:
1085 ce = obj._filecache[self.name]
1114 ce = obj._filecache[self.name]
1086
1115
1087 ce.obj = value # update cached copy
1116 ce.obj = value # update cached copy
1088 obj.__dict__[self.name] = value # update copy returned by obj.x
1117 obj.__dict__[self.name] = value # update copy returned by obj.x
1089
1118
1090 def __delete__(self, obj):
1119 def __delete__(self, obj):
1091 try:
1120 try:
1092 del obj.__dict__[self.name]
1121 del obj.__dict__[self.name]
1093 except KeyError:
1122 except KeyError:
1094 raise AttributeError(self.name)
1123 raise AttributeError(self.name)
1095
1124
1096 def extdatasource(repo, source):
1125 def extdatasource(repo, source):
1097 """Gather a map of rev -> value dict from the specified source
1126 """Gather a map of rev -> value dict from the specified source
1098
1127
1099 A source spec is treated as a URL, with a special case shell: type
1128 A source spec is treated as a URL, with a special case shell: type
1100 for parsing the output from a shell command.
1129 for parsing the output from a shell command.
1101
1130
1102 The data is parsed as a series of newline-separated records where
1131 The data is parsed as a series of newline-separated records where
1103 each record is a revision specifier optionally followed by a space
1132 each record is a revision specifier optionally followed by a space
1104 and a freeform string value. If the revision is known locally, it
1133 and a freeform string value. If the revision is known locally, it
1105 is converted to a rev, otherwise the record is skipped.
1134 is converted to a rev, otherwise the record is skipped.
1106
1135
1107 Note that both key and value are treated as UTF-8 and converted to
1136 Note that both key and value are treated as UTF-8 and converted to
1108 the local encoding. This allows uniformity between local and
1137 the local encoding. This allows uniformity between local and
1109 remote data sources.
1138 remote data sources.
1110 """
1139 """
1111
1140
1112 spec = repo.ui.config("extdata", source)
1141 spec = repo.ui.config("extdata", source)
1113 if not spec:
1142 if not spec:
1114 raise error.Abort(_("unknown extdata source '%s'") % source)
1143 raise error.Abort(_("unknown extdata source '%s'") % source)
1115
1144
1116 data = {}
1145 data = {}
1117 src = proc = None
1146 src = proc = None
1118 try:
1147 try:
1119 if spec.startswith("shell:"):
1148 if spec.startswith("shell:"):
1120 # external commands should be run relative to the repo root
1149 # external commands should be run relative to the repo root
1121 cmd = spec[6:]
1150 cmd = spec[6:]
1122 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1151 proc = subprocess.Popen(cmd, shell=True, bufsize=-1,
1123 close_fds=procutil.closefds,
1152 close_fds=procutil.closefds,
1124 stdout=subprocess.PIPE, cwd=repo.root)
1153 stdout=subprocess.PIPE, cwd=repo.root)
1125 src = proc.stdout
1154 src = proc.stdout
1126 else:
1155 else:
1127 # treat as a URL or file
1156 # treat as a URL or file
1128 src = url.open(repo.ui, spec)
1157 src = url.open(repo.ui, spec)
1129 for l in src:
1158 for l in src:
1130 if " " in l:
1159 if " " in l:
1131 k, v = l.strip().split(" ", 1)
1160 k, v = l.strip().split(" ", 1)
1132 else:
1161 else:
1133 k, v = l.strip(), ""
1162 k, v = l.strip(), ""
1134
1163
1135 k = encoding.tolocal(k)
1164 k = encoding.tolocal(k)
1136 try:
1165 try:
1137 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1166 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1138 except (error.LookupError, error.RepoLookupError):
1167 except (error.LookupError, error.RepoLookupError):
1139 pass # we ignore data for nodes that don't exist locally
1168 pass # we ignore data for nodes that don't exist locally
1140 finally:
1169 finally:
1141 if proc:
1170 if proc:
1142 proc.communicate()
1171 proc.communicate()
1143 if src:
1172 if src:
1144 src.close()
1173 src.close()
1145 if proc and proc.returncode != 0:
1174 if proc and proc.returncode != 0:
1146 raise error.Abort(_("extdata command '%s' failed: %s")
1175 raise error.Abort(_("extdata command '%s' failed: %s")
1147 % (cmd, procutil.explainexit(proc.returncode)[0]))
1176 % (cmd, procutil.explainexit(proc.returncode)[0]))
1148
1177
1149 return data
1178 return data
1150
1179
1151 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1180 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1152 if lock is None:
1181 if lock is None:
1153 raise error.LockInheritanceContractViolation(
1182 raise error.LockInheritanceContractViolation(
1154 'lock can only be inherited while held')
1183 'lock can only be inherited while held')
1155 if environ is None:
1184 if environ is None:
1156 environ = {}
1185 environ = {}
1157 with lock.inherit() as locker:
1186 with lock.inherit() as locker:
1158 environ[envvar] = locker
1187 environ[envvar] = locker
1159 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1188 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1160
1189
1161 def wlocksub(repo, cmd, *args, **kwargs):
1190 def wlocksub(repo, cmd, *args, **kwargs):
1162 """run cmd as a subprocess that allows inheriting repo's wlock
1191 """run cmd as a subprocess that allows inheriting repo's wlock
1163
1192
1164 This can only be called while the wlock is held. This takes all the
1193 This can only be called while the wlock is held. This takes all the
1165 arguments that ui.system does, and returns the exit code of the
1194 arguments that ui.system does, and returns the exit code of the
1166 subprocess."""
1195 subprocess."""
1167 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1196 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1168 **kwargs)
1197 **kwargs)
1169
1198
1170 def gdinitconfig(ui):
1199 def gdinitconfig(ui):
1171 """helper function to know if a repo should be created as general delta
1200 """helper function to know if a repo should be created as general delta
1172 """
1201 """
1173 # experimental config: format.generaldelta
1202 # experimental config: format.generaldelta
1174 return (ui.configbool('format', 'generaldelta')
1203 return (ui.configbool('format', 'generaldelta')
1175 or ui.configbool('format', 'usegeneraldelta'))
1204 or ui.configbool('format', 'usegeneraldelta'))
1176
1205
1177 def gddeltaconfig(ui):
1206 def gddeltaconfig(ui):
1178 """helper function to know if incoming delta should be optimised
1207 """helper function to know if incoming delta should be optimised
1179 """
1208 """
1180 # experimental config: format.generaldelta
1209 # experimental config: format.generaldelta
1181 return ui.configbool('format', 'generaldelta')
1210 return ui.configbool('format', 'generaldelta')
1182
1211
1183 class simplekeyvaluefile(object):
1212 class simplekeyvaluefile(object):
1184 """A simple file with key=value lines
1213 """A simple file with key=value lines
1185
1214
1186 Keys must be alphanumerics and start with a letter, values must not
1215 Keys must be alphanumerics and start with a letter, values must not
1187 contain '\n' characters"""
1216 contain '\n' characters"""
1188 firstlinekey = '__firstline'
1217 firstlinekey = '__firstline'
1189
1218
1190 def __init__(self, vfs, path, keys=None):
1219 def __init__(self, vfs, path, keys=None):
1191 self.vfs = vfs
1220 self.vfs = vfs
1192 self.path = path
1221 self.path = path
1193
1222
1194 def read(self, firstlinenonkeyval=False):
1223 def read(self, firstlinenonkeyval=False):
1195 """Read the contents of a simple key-value file
1224 """Read the contents of a simple key-value file
1196
1225
1197 'firstlinenonkeyval' indicates whether the first line of file should
1226 'firstlinenonkeyval' indicates whether the first line of file should
1198 be treated as a key-value pair or reuturned fully under the
1227 be treated as a key-value pair or reuturned fully under the
1199 __firstline key."""
1228 __firstline key."""
1200 lines = self.vfs.readlines(self.path)
1229 lines = self.vfs.readlines(self.path)
1201 d = {}
1230 d = {}
1202 if firstlinenonkeyval:
1231 if firstlinenonkeyval:
1203 if not lines:
1232 if not lines:
1204 e = _("empty simplekeyvalue file")
1233 e = _("empty simplekeyvalue file")
1205 raise error.CorruptedState(e)
1234 raise error.CorruptedState(e)
1206 # we don't want to include '\n' in the __firstline
1235 # we don't want to include '\n' in the __firstline
1207 d[self.firstlinekey] = lines[0][:-1]
1236 d[self.firstlinekey] = lines[0][:-1]
1208 del lines[0]
1237 del lines[0]
1209
1238
1210 try:
1239 try:
1211 # the 'if line.strip()' part prevents us from failing on empty
1240 # the 'if line.strip()' part prevents us from failing on empty
1212 # lines which only contain '\n' therefore are not skipped
1241 # lines which only contain '\n' therefore are not skipped
1213 # by 'if line'
1242 # by 'if line'
1214 updatedict = dict(line[:-1].split('=', 1) for line in lines
1243 updatedict = dict(line[:-1].split('=', 1) for line in lines
1215 if line.strip())
1244 if line.strip())
1216 if self.firstlinekey in updatedict:
1245 if self.firstlinekey in updatedict:
1217 e = _("%r can't be used as a key")
1246 e = _("%r can't be used as a key")
1218 raise error.CorruptedState(e % self.firstlinekey)
1247 raise error.CorruptedState(e % self.firstlinekey)
1219 d.update(updatedict)
1248 d.update(updatedict)
1220 except ValueError as e:
1249 except ValueError as e:
1221 raise error.CorruptedState(str(e))
1250 raise error.CorruptedState(str(e))
1222 return d
1251 return d
1223
1252
1224 def write(self, data, firstline=None):
1253 def write(self, data, firstline=None):
1225 """Write key=>value mapping to a file
1254 """Write key=>value mapping to a file
1226 data is a dict. Keys must be alphanumerical and start with a letter.
1255 data is a dict. Keys must be alphanumerical and start with a letter.
1227 Values must not contain newline characters.
1256 Values must not contain newline characters.
1228
1257
1229 If 'firstline' is not None, it is written to file before
1258 If 'firstline' is not None, it is written to file before
1230 everything else, as it is, not in a key=value form"""
1259 everything else, as it is, not in a key=value form"""
1231 lines = []
1260 lines = []
1232 if firstline is not None:
1261 if firstline is not None:
1233 lines.append('%s\n' % firstline)
1262 lines.append('%s\n' % firstline)
1234
1263
1235 for k, v in data.items():
1264 for k, v in data.items():
1236 if k == self.firstlinekey:
1265 if k == self.firstlinekey:
1237 e = "key name '%s' is reserved" % self.firstlinekey
1266 e = "key name '%s' is reserved" % self.firstlinekey
1238 raise error.ProgrammingError(e)
1267 raise error.ProgrammingError(e)
1239 if not k[0:1].isalpha():
1268 if not k[0:1].isalpha():
1240 e = "keys must start with a letter in a key-value file"
1269 e = "keys must start with a letter in a key-value file"
1241 raise error.ProgrammingError(e)
1270 raise error.ProgrammingError(e)
1242 if not k.isalnum():
1271 if not k.isalnum():
1243 e = "invalid key name in a simple key-value file"
1272 e = "invalid key name in a simple key-value file"
1244 raise error.ProgrammingError(e)
1273 raise error.ProgrammingError(e)
1245 if '\n' in v:
1274 if '\n' in v:
1246 e = "invalid value in a simple key-value file"
1275 e = "invalid value in a simple key-value file"
1247 raise error.ProgrammingError(e)
1276 raise error.ProgrammingError(e)
1248 lines.append("%s=%s\n" % (k, v))
1277 lines.append("%s=%s\n" % (k, v))
1249 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1278 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1250 fp.write(''.join(lines))
1279 fp.write(''.join(lines))
1251
1280
1252 _reportobsoletedsource = [
1281 _reportobsoletedsource = [
1253 'debugobsolete',
1282 'debugobsolete',
1254 'pull',
1283 'pull',
1255 'push',
1284 'push',
1256 'serve',
1285 'serve',
1257 'unbundle',
1286 'unbundle',
1258 ]
1287 ]
1259
1288
1260 _reportnewcssource = [
1289 _reportnewcssource = [
1261 'pull',
1290 'pull',
1262 'unbundle',
1291 'unbundle',
1263 ]
1292 ]
1264
1293
1265 # a list of (repo, ctx, files) functions called by various commands to allow
1294 # a list of (repo, ctx, files) functions called by various commands to allow
1266 # extensions to ensure the corresponding files are available locally, before the
1295 # extensions to ensure the corresponding files are available locally, before the
1267 # command uses them.
1296 # command uses them.
1268 fileprefetchhooks = util.hooks()
1297 fileprefetchhooks = util.hooks()
1269
1298
1270 # A marker that tells the evolve extension to suppress its own reporting
1299 # A marker that tells the evolve extension to suppress its own reporting
1271 _reportstroubledchangesets = True
1300 _reportstroubledchangesets = True
1272
1301
1273 def registersummarycallback(repo, otr, txnname=''):
1302 def registersummarycallback(repo, otr, txnname=''):
1274 """register a callback to issue a summary after the transaction is closed
1303 """register a callback to issue a summary after the transaction is closed
1275 """
1304 """
1276 def txmatch(sources):
1305 def txmatch(sources):
1277 return any(txnname.startswith(source) for source in sources)
1306 return any(txnname.startswith(source) for source in sources)
1278
1307
1279 categories = []
1308 categories = []
1280
1309
1281 def reportsummary(func):
1310 def reportsummary(func):
1282 """decorator for report callbacks."""
1311 """decorator for report callbacks."""
1283 # The repoview life cycle is shorter than the one of the actual
1312 # The repoview life cycle is shorter than the one of the actual
1284 # underlying repository. So the filtered object can die before the
1313 # underlying repository. So the filtered object can die before the
1285 # weakref is used leading to troubles. We keep a reference to the
1314 # weakref is used leading to troubles. We keep a reference to the
1286 # unfiltered object and restore the filtering when retrieving the
1315 # unfiltered object and restore the filtering when retrieving the
1287 # repository through the weakref.
1316 # repository through the weakref.
1288 filtername = repo.filtername
1317 filtername = repo.filtername
1289 reporef = weakref.ref(repo.unfiltered())
1318 reporef = weakref.ref(repo.unfiltered())
1290 def wrapped(tr):
1319 def wrapped(tr):
1291 repo = reporef()
1320 repo = reporef()
1292 if filtername:
1321 if filtername:
1293 repo = repo.filtered(filtername)
1322 repo = repo.filtered(filtername)
1294 func(repo, tr)
1323 func(repo, tr)
1295 newcat = '%02i-txnreport' % len(categories)
1324 newcat = '%02i-txnreport' % len(categories)
1296 otr.addpostclose(newcat, wrapped)
1325 otr.addpostclose(newcat, wrapped)
1297 categories.append(newcat)
1326 categories.append(newcat)
1298 return wrapped
1327 return wrapped
1299
1328
1300 if txmatch(_reportobsoletedsource):
1329 if txmatch(_reportobsoletedsource):
1301 @reportsummary
1330 @reportsummary
1302 def reportobsoleted(repo, tr):
1331 def reportobsoleted(repo, tr):
1303 obsoleted = obsutil.getobsoleted(repo, tr)
1332 obsoleted = obsutil.getobsoleted(repo, tr)
1304 if obsoleted:
1333 if obsoleted:
1305 repo.ui.status(_('obsoleted %i changesets\n')
1334 repo.ui.status(_('obsoleted %i changesets\n')
1306 % len(obsoleted))
1335 % len(obsoleted))
1307
1336
1308 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1337 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1309 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1338 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1310 instabilitytypes = [
1339 instabilitytypes = [
1311 ('orphan', 'orphan'),
1340 ('orphan', 'orphan'),
1312 ('phase-divergent', 'phasedivergent'),
1341 ('phase-divergent', 'phasedivergent'),
1313 ('content-divergent', 'contentdivergent'),
1342 ('content-divergent', 'contentdivergent'),
1314 ]
1343 ]
1315
1344
1316 def getinstabilitycounts(repo):
1345 def getinstabilitycounts(repo):
1317 filtered = repo.changelog.filteredrevs
1346 filtered = repo.changelog.filteredrevs
1318 counts = {}
1347 counts = {}
1319 for instability, revset in instabilitytypes:
1348 for instability, revset in instabilitytypes:
1320 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1349 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1321 filtered)
1350 filtered)
1322 return counts
1351 return counts
1323
1352
1324 oldinstabilitycounts = getinstabilitycounts(repo)
1353 oldinstabilitycounts = getinstabilitycounts(repo)
1325 @reportsummary
1354 @reportsummary
1326 def reportnewinstabilities(repo, tr):
1355 def reportnewinstabilities(repo, tr):
1327 newinstabilitycounts = getinstabilitycounts(repo)
1356 newinstabilitycounts = getinstabilitycounts(repo)
1328 for instability, revset in instabilitytypes:
1357 for instability, revset in instabilitytypes:
1329 delta = (newinstabilitycounts[instability] -
1358 delta = (newinstabilitycounts[instability] -
1330 oldinstabilitycounts[instability])
1359 oldinstabilitycounts[instability])
1331 if delta > 0:
1360 if delta > 0:
1332 repo.ui.warn(_('%i new %s changesets\n') %
1361 repo.ui.warn(_('%i new %s changesets\n') %
1333 (delta, instability))
1362 (delta, instability))
1334
1363
1335 if txmatch(_reportnewcssource):
1364 if txmatch(_reportnewcssource):
1336 @reportsummary
1365 @reportsummary
1337 def reportnewcs(repo, tr):
1366 def reportnewcs(repo, tr):
1338 """Report the range of new revisions pulled/unbundled."""
1367 """Report the range of new revisions pulled/unbundled."""
1339 newrevs = tr.changes.get('revs', xrange(0, 0))
1368 newrevs = tr.changes.get('revs', xrange(0, 0))
1340 if not newrevs:
1369 if not newrevs:
1341 return
1370 return
1342
1371
1343 # Compute the bounds of new revisions' range, excluding obsoletes.
1372 # Compute the bounds of new revisions' range, excluding obsoletes.
1344 unfi = repo.unfiltered()
1373 unfi = repo.unfiltered()
1345 revs = unfi.revs('%ld and not obsolete()', newrevs)
1374 revs = unfi.revs('%ld and not obsolete()', newrevs)
1346 if not revs:
1375 if not revs:
1347 # Got only obsoletes.
1376 # Got only obsoletes.
1348 return
1377 return
1349 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1378 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1350
1379
1351 if minrev == maxrev:
1380 if minrev == maxrev:
1352 revrange = minrev
1381 revrange = minrev
1353 else:
1382 else:
1354 revrange = '%s:%s' % (minrev, maxrev)
1383 revrange = '%s:%s' % (minrev, maxrev)
1355 repo.ui.status(_('new changesets %s\n') % revrange)
1384 repo.ui.status(_('new changesets %s\n') % revrange)
1356
1385
1357 def nodesummaries(repo, nodes, maxnumnodes=4):
1386 def nodesummaries(repo, nodes, maxnumnodes=4):
1358 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1387 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1359 return ' '.join(short(h) for h in nodes)
1388 return ' '.join(short(h) for h in nodes)
1360 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1389 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1361 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1390 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1362
1391
1363 def enforcesinglehead(repo, tr, desc):
1392 def enforcesinglehead(repo, tr, desc):
1364 """check that no named branch has multiple heads"""
1393 """check that no named branch has multiple heads"""
1365 if desc in ('strip', 'repair'):
1394 if desc in ('strip', 'repair'):
1366 # skip the logic during strip
1395 # skip the logic during strip
1367 return
1396 return
1368 visible = repo.filtered('visible')
1397 visible = repo.filtered('visible')
1369 # possible improvement: we could restrict the check to affected branch
1398 # possible improvement: we could restrict the check to affected branch
1370 for name, heads in visible.branchmap().iteritems():
1399 for name, heads in visible.branchmap().iteritems():
1371 if len(heads) > 1:
1400 if len(heads) > 1:
1372 msg = _('rejecting multiple heads on branch "%s"')
1401 msg = _('rejecting multiple heads on branch "%s"')
1373 msg %= name
1402 msg %= name
1374 hint = _('%d heads: %s')
1403 hint = _('%d heads: %s')
1375 hint %= (len(heads), nodesummaries(repo, heads))
1404 hint %= (len(heads), nodesummaries(repo, heads))
1376 raise error.Abort(msg, hint=hint)
1405 raise error.Abort(msg, hint=hint)
1377
1406
1378 def wrapconvertsink(sink):
1407 def wrapconvertsink(sink):
1379 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1408 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1380 before it is used, whether or not the convert extension was formally loaded.
1409 before it is used, whether or not the convert extension was formally loaded.
1381 """
1410 """
1382 return sink
1411 return sink
1383
1412
1384 def unhidehashlikerevs(repo, specs, hiddentype):
1413 def unhidehashlikerevs(repo, specs, hiddentype):
1385 """parse the user specs and unhide changesets whose hash or revision number
1414 """parse the user specs and unhide changesets whose hash or revision number
1386 is passed.
1415 is passed.
1387
1416
1388 hiddentype can be: 1) 'warn': warn while unhiding changesets
1417 hiddentype can be: 1) 'warn': warn while unhiding changesets
1389 2) 'nowarn': don't warn while unhiding changesets
1418 2) 'nowarn': don't warn while unhiding changesets
1390
1419
1391 returns a repo object with the required changesets unhidden
1420 returns a repo object with the required changesets unhidden
1392 """
1421 """
1393 if not repo.filtername or not repo.ui.configbool('experimental',
1422 if not repo.filtername or not repo.ui.configbool('experimental',
1394 'directaccess'):
1423 'directaccess'):
1395 return repo
1424 return repo
1396
1425
1397 if repo.filtername not in ('visible', 'visible-hidden'):
1426 if repo.filtername not in ('visible', 'visible-hidden'):
1398 return repo
1427 return repo
1399
1428
1400 symbols = set()
1429 symbols = set()
1401 for spec in specs:
1430 for spec in specs:
1402 try:
1431 try:
1403 tree = revsetlang.parse(spec)
1432 tree = revsetlang.parse(spec)
1404 except error.ParseError: # will be reported by scmutil.revrange()
1433 except error.ParseError: # will be reported by scmutil.revrange()
1405 continue
1434 continue
1406
1435
1407 symbols.update(revsetlang.gethashlikesymbols(tree))
1436 symbols.update(revsetlang.gethashlikesymbols(tree))
1408
1437
1409 if not symbols:
1438 if not symbols:
1410 return repo
1439 return repo
1411
1440
1412 revs = _getrevsfromsymbols(repo, symbols)
1441 revs = _getrevsfromsymbols(repo, symbols)
1413
1442
1414 if not revs:
1443 if not revs:
1415 return repo
1444 return repo
1416
1445
1417 if hiddentype == 'warn':
1446 if hiddentype == 'warn':
1418 unfi = repo.unfiltered()
1447 unfi = repo.unfiltered()
1419 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1448 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1420 repo.ui.warn(_("warning: accessing hidden changesets for write "
1449 repo.ui.warn(_("warning: accessing hidden changesets for write "
1421 "operation: %s\n") % revstr)
1450 "operation: %s\n") % revstr)
1422
1451
1423 # we have to use new filtername to separate branch/tags cache until we can
1452 # we have to use new filtername to separate branch/tags cache until we can
1424 # disbale these cache when revisions are dynamically pinned.
1453 # disbale these cache when revisions are dynamically pinned.
1425 return repo.filtered('visible-hidden', revs)
1454 return repo.filtered('visible-hidden', revs)
1426
1455
1427 def _getrevsfromsymbols(repo, symbols):
1456 def _getrevsfromsymbols(repo, symbols):
1428 """parse the list of symbols and returns a set of revision numbers of hidden
1457 """parse the list of symbols and returns a set of revision numbers of hidden
1429 changesets present in symbols"""
1458 changesets present in symbols"""
1430 revs = set()
1459 revs = set()
1431 unfi = repo.unfiltered()
1460 unfi = repo.unfiltered()
1432 unficl = unfi.changelog
1461 unficl = unfi.changelog
1433 cl = repo.changelog
1462 cl = repo.changelog
1434 tiprev = len(unficl)
1463 tiprev = len(unficl)
1435 pmatch = unficl._partialmatch
1464 pmatch = unficl._partialmatch
1436 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1465 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1437 for s in symbols:
1466 for s in symbols:
1438 try:
1467 try:
1439 n = int(s)
1468 n = int(s)
1440 if n <= tiprev:
1469 if n <= tiprev:
1441 if not allowrevnums:
1470 if not allowrevnums:
1442 continue
1471 continue
1443 else:
1472 else:
1444 if n not in cl:
1473 if n not in cl:
1445 revs.add(n)
1474 revs.add(n)
1446 continue
1475 continue
1447 except ValueError:
1476 except ValueError:
1448 pass
1477 pass
1449
1478
1450 try:
1479 try:
1451 s = pmatch(s)
1480 s = pmatch(s)
1452 except (error.LookupError, error.WdirUnsupported):
1481 except (error.LookupError, error.WdirUnsupported):
1453 s = None
1482 s = None
1454
1483
1455 if s is not None:
1484 if s is not None:
1456 rev = unficl.rev(s)
1485 rev = unficl.rev(s)
1457 if rev not in cl:
1486 if rev not in cl:
1458 revs.add(rev)
1487 revs.add(rev)
1459
1488
1460 return revs
1489 return revs
General Comments 0
You need to be logged in to leave comments. Login now