##// END OF EJS Templates
repo: move unfiltered-repo optimization to workingctx...
Martin von Zweigbergk -
r39995:43d3b09b default
parent child Browse files
Show More
@@ -1,2437 +1,2439 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirid,
24 wdirid,
25 )
25 )
26 from . import (
26 from . import (
27 dagop,
27 dagop,
28 encoding,
28 encoding,
29 error,
29 error,
30 fileset,
30 fileset,
31 match as matchmod,
31 match as matchmod,
32 obsolete as obsmod,
32 obsolete as obsmod,
33 patch,
33 patch,
34 pathutil,
34 pathutil,
35 phases,
35 phases,
36 pycompat,
36 pycompat,
37 repoview,
37 repoview,
38 scmutil,
38 scmutil,
39 sparse,
39 sparse,
40 subrepo,
40 subrepo,
41 subrepoutil,
41 subrepoutil,
42 util,
42 util,
43 )
43 )
44 from .utils import (
44 from .utils import (
45 dateutil,
45 dateutil,
46 stringutil,
46 stringutil,
47 )
47 )
48
48
49 propertycache = util.propertycache
49 propertycache = util.propertycache
50
50
51 class basectx(object):
51 class basectx(object):
52 """A basectx object represents the common logic for its children:
52 """A basectx object represents the common logic for its children:
53 changectx: read-only context that is already present in the repo,
53 changectx: read-only context that is already present in the repo,
54 workingctx: a context that represents the working directory and can
54 workingctx: a context that represents the working directory and can
55 be committed,
55 be committed,
56 memctx: a context that represents changes in-memory and can also
56 memctx: a context that represents changes in-memory and can also
57 be committed."""
57 be committed."""
58
58
59 def __init__(self, repo):
59 def __init__(self, repo):
60 self._repo = repo
60 self._repo = repo
61
61
62 def __bytes__(self):
62 def __bytes__(self):
63 return short(self.node())
63 return short(self.node())
64
64
65 __str__ = encoding.strmethod(__bytes__)
65 __str__ = encoding.strmethod(__bytes__)
66
66
67 def __repr__(self):
67 def __repr__(self):
68 return r"<%s %s>" % (type(self).__name__, str(self))
68 return r"<%s %s>" % (type(self).__name__, str(self))
69
69
70 def __eq__(self, other):
70 def __eq__(self, other):
71 try:
71 try:
72 return type(self) == type(other) and self._rev == other._rev
72 return type(self) == type(other) and self._rev == other._rev
73 except AttributeError:
73 except AttributeError:
74 return False
74 return False
75
75
76 def __ne__(self, other):
76 def __ne__(self, other):
77 return not (self == other)
77 return not (self == other)
78
78
79 def __contains__(self, key):
79 def __contains__(self, key):
80 return key in self._manifest
80 return key in self._manifest
81
81
82 def __getitem__(self, key):
82 def __getitem__(self, key):
83 return self.filectx(key)
83 return self.filectx(key)
84
84
85 def __iter__(self):
85 def __iter__(self):
86 return iter(self._manifest)
86 return iter(self._manifest)
87
87
88 def _buildstatusmanifest(self, status):
88 def _buildstatusmanifest(self, status):
89 """Builds a manifest that includes the given status results, if this is
89 """Builds a manifest that includes the given status results, if this is
90 a working copy context. For non-working copy contexts, it just returns
90 a working copy context. For non-working copy contexts, it just returns
91 the normal manifest."""
91 the normal manifest."""
92 return self.manifest()
92 return self.manifest()
93
93
94 def _matchstatus(self, other, match):
94 def _matchstatus(self, other, match):
95 """This internal method provides a way for child objects to override the
95 """This internal method provides a way for child objects to override the
96 match operator.
96 match operator.
97 """
97 """
98 return match
98 return match
99
99
100 def _buildstatus(self, other, s, match, listignored, listclean,
100 def _buildstatus(self, other, s, match, listignored, listclean,
101 listunknown):
101 listunknown):
102 """build a status with respect to another context"""
102 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
108 # delta application.
109 mf2 = None
109 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
113 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115
115
116 modified, added = [], []
116 modified, added = [], []
117 removed = []
117 removed = []
118 clean = []
118 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
120 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.iteritems():
122 for fn, value in d.iteritems():
123 if fn in deletedset:
123 if fn in deletedset:
124 continue
124 continue
125 if value is None:
125 if value is None:
126 clean.append(fn)
126 clean.append(fn)
127 continue
127 continue
128 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
129 if node1 is None:
130 added.append(fn)
130 added.append(fn)
131 elif node2 is None:
131 elif node2 is None:
132 removed.append(fn)
132 removed.append(fn)
133 elif flag1 != flag2:
133 elif flag1 != flag2:
134 modified.append(fn)
134 modified.append(fn)
135 elif node2 not in wdirfilenodeids:
135 elif node2 not in wdirfilenodeids:
136 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
139 # to a file as a modification.
140 modified.append(fn)
140 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
142 modified.append(fn)
143 else:
143 else:
144 clean.append(fn)
144 clean.append(fn)
145
145
146 if removed:
146 if removed:
147 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
148 unknown = [fn for fn in unknown if fn not in mf1 and
148 unknown = [fn for fn in unknown if fn not in mf1 and
149 (not match or match(fn))]
149 (not match or match(fn))]
150 ignored = [fn for fn in ignored if fn not in mf1 and
150 ignored = [fn for fn in ignored if fn not in mf1 and
151 (not match or match(fn))]
151 (not match or match(fn))]
152 # if they're deleted, don't report them as removed
152 # if they're deleted, don't report them as removed
153 removed = [fn for fn in removed if fn not in deletedset]
153 removed = [fn for fn in removed if fn not in deletedset]
154
154
155 return scmutil.status(modified, added, removed, deleted, unknown,
155 return scmutil.status(modified, added, removed, deleted, unknown,
156 ignored, clean)
156 ignored, clean)
157
157
158 @propertycache
158 @propertycache
159 def substate(self):
159 def substate(self):
160 return subrepoutil.state(self, self._repo.ui)
160 return subrepoutil.state(self, self._repo.ui)
161
161
162 def subrev(self, subpath):
162 def subrev(self, subpath):
163 return self.substate[subpath][1]
163 return self.substate[subpath][1]
164
164
165 def rev(self):
165 def rev(self):
166 return self._rev
166 return self._rev
167 def node(self):
167 def node(self):
168 return self._node
168 return self._node
169 def hex(self):
169 def hex(self):
170 return hex(self.node())
170 return hex(self.node())
171 def manifest(self):
171 def manifest(self):
172 return self._manifest
172 return self._manifest
173 def manifestctx(self):
173 def manifestctx(self):
174 return self._manifestctx
174 return self._manifestctx
175 def repo(self):
175 def repo(self):
176 return self._repo
176 return self._repo
177 def phasestr(self):
177 def phasestr(self):
178 return phases.phasenames[self.phase()]
178 return phases.phasenames[self.phase()]
179 def mutable(self):
179 def mutable(self):
180 return self.phase() > phases.public
180 return self.phase() > phases.public
181
181
182 def matchfileset(self, expr, badfn=None):
182 def matchfileset(self, expr, badfn=None):
183 return fileset.match(self, expr, badfn=badfn)
183 return fileset.match(self, expr, badfn=badfn)
184
184
185 def obsolete(self):
185 def obsolete(self):
186 """True if the changeset is obsolete"""
186 """True if the changeset is obsolete"""
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
187 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
188
188
189 def extinct(self):
189 def extinct(self):
190 """True if the changeset is extinct"""
190 """True if the changeset is extinct"""
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
191 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
192
192
193 def orphan(self):
193 def orphan(self):
194 """True if the changeset is not obsolete, but its ancestor is"""
194 """True if the changeset is not obsolete, but its ancestor is"""
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
195 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
196
196
197 def phasedivergent(self):
197 def phasedivergent(self):
198 """True if the changeset tries to be a successor of a public changeset
198 """True if the changeset tries to be a successor of a public changeset
199
199
200 Only non-public and non-obsolete changesets may be phase-divergent.
200 Only non-public and non-obsolete changesets may be phase-divergent.
201 """
201 """
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
202 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
203
203
204 def contentdivergent(self):
204 def contentdivergent(self):
205 """Is a successor of a changeset with multiple possible successor sets
205 """Is a successor of a changeset with multiple possible successor sets
206
206
207 Only non-public and non-obsolete changesets may be content-divergent.
207 Only non-public and non-obsolete changesets may be content-divergent.
208 """
208 """
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
209 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
210
210
211 def isunstable(self):
211 def isunstable(self):
212 """True if the changeset is either orphan, phase-divergent or
212 """True if the changeset is either orphan, phase-divergent or
213 content-divergent"""
213 content-divergent"""
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
214 return self.orphan() or self.phasedivergent() or self.contentdivergent()
215
215
216 def instabilities(self):
216 def instabilities(self):
217 """return the list of instabilities affecting this changeset.
217 """return the list of instabilities affecting this changeset.
218
218
219 Instabilities are returned as strings. possible values are:
219 Instabilities are returned as strings. possible values are:
220 - orphan,
220 - orphan,
221 - phase-divergent,
221 - phase-divergent,
222 - content-divergent.
222 - content-divergent.
223 """
223 """
224 instabilities = []
224 instabilities = []
225 if self.orphan():
225 if self.orphan():
226 instabilities.append('orphan')
226 instabilities.append('orphan')
227 if self.phasedivergent():
227 if self.phasedivergent():
228 instabilities.append('phase-divergent')
228 instabilities.append('phase-divergent')
229 if self.contentdivergent():
229 if self.contentdivergent():
230 instabilities.append('content-divergent')
230 instabilities.append('content-divergent')
231 return instabilities
231 return instabilities
232
232
233 def parents(self):
233 def parents(self):
234 """return contexts for each parent changeset"""
234 """return contexts for each parent changeset"""
235 return self._parents
235 return self._parents
236
236
237 def p1(self):
237 def p1(self):
238 return self._parents[0]
238 return self._parents[0]
239
239
240 def p2(self):
240 def p2(self):
241 parents = self._parents
241 parents = self._parents
242 if len(parents) == 2:
242 if len(parents) == 2:
243 return parents[1]
243 return parents[1]
244 return self._repo[nullrev]
244 return self._repo[nullrev]
245
245
246 def _fileinfo(self, path):
246 def _fileinfo(self, path):
247 if r'_manifest' in self.__dict__:
247 if r'_manifest' in self.__dict__:
248 try:
248 try:
249 return self._manifest[path], self._manifest.flags(path)
249 return self._manifest[path], self._manifest.flags(path)
250 except KeyError:
250 except KeyError:
251 raise error.ManifestLookupError(self._node, path,
251 raise error.ManifestLookupError(self._node, path,
252 _('not found in manifest'))
252 _('not found in manifest'))
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
253 if r'_manifestdelta' in self.__dict__ or path in self.files():
254 if path in self._manifestdelta:
254 if path in self._manifestdelta:
255 return (self._manifestdelta[path],
255 return (self._manifestdelta[path],
256 self._manifestdelta.flags(path))
256 self._manifestdelta.flags(path))
257 mfl = self._repo.manifestlog
257 mfl = self._repo.manifestlog
258 try:
258 try:
259 node, flag = mfl[self._changeset.manifest].find(path)
259 node, flag = mfl[self._changeset.manifest].find(path)
260 except KeyError:
260 except KeyError:
261 raise error.ManifestLookupError(self._node, path,
261 raise error.ManifestLookupError(self._node, path,
262 _('not found in manifest'))
262 _('not found in manifest'))
263
263
264 return node, flag
264 return node, flag
265
265
266 def filenode(self, path):
266 def filenode(self, path):
267 return self._fileinfo(path)[0]
267 return self._fileinfo(path)[0]
268
268
269 def flags(self, path):
269 def flags(self, path):
270 try:
270 try:
271 return self._fileinfo(path)[1]
271 return self._fileinfo(path)[1]
272 except error.LookupError:
272 except error.LookupError:
273 return ''
273 return ''
274
274
275 def sub(self, path, allowcreate=True):
275 def sub(self, path, allowcreate=True):
276 '''return a subrepo for the stored revision of path, never wdir()'''
276 '''return a subrepo for the stored revision of path, never wdir()'''
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
277 return subrepo.subrepo(self, path, allowcreate=allowcreate)
278
278
279 def nullsub(self, path, pctx):
279 def nullsub(self, path, pctx):
280 return subrepo.nullsubrepo(self, path, pctx)
280 return subrepo.nullsubrepo(self, path, pctx)
281
281
282 def workingsub(self, path):
282 def workingsub(self, path):
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
283 '''return a subrepo for the stored revision, or wdir if this is a wdir
284 context.
284 context.
285 '''
285 '''
286 return subrepo.subrepo(self, path, allowwdir=True)
286 return subrepo.subrepo(self, path, allowwdir=True)
287
287
288 def match(self, pats=None, include=None, exclude=None, default='glob',
288 def match(self, pats=None, include=None, exclude=None, default='glob',
289 listsubrepos=False, badfn=None):
289 listsubrepos=False, badfn=None):
290 r = self._repo
290 r = self._repo
291 return matchmod.match(r.root, r.getcwd(), pats,
291 return matchmod.match(r.root, r.getcwd(), pats,
292 include, exclude, default,
292 include, exclude, default,
293 auditor=r.nofsauditor, ctx=self,
293 auditor=r.nofsauditor, ctx=self,
294 listsubrepos=listsubrepos, badfn=badfn)
294 listsubrepos=listsubrepos, badfn=badfn)
295
295
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
296 def diff(self, ctx2=None, match=None, changes=None, opts=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
297 losedatafn=None, prefix='', relroot='', copy=None,
298 hunksfilterfn=None):
298 hunksfilterfn=None):
299 """Returns a diff generator for the given contexts and matcher"""
299 """Returns a diff generator for the given contexts and matcher"""
300 if ctx2 is None:
300 if ctx2 is None:
301 ctx2 = self.p1()
301 ctx2 = self.p1()
302 if ctx2 is not None:
302 if ctx2 is not None:
303 ctx2 = self._repo[ctx2]
303 ctx2 = self._repo[ctx2]
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
304 return patch.diff(self._repo, ctx2, self, match=match, changes=changes,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
305 opts=opts, losedatafn=losedatafn, prefix=prefix,
306 relroot=relroot, copy=copy,
306 relroot=relroot, copy=copy,
307 hunksfilterfn=hunksfilterfn)
307 hunksfilterfn=hunksfilterfn)
308
308
309 def dirs(self):
309 def dirs(self):
310 return self._manifest.dirs()
310 return self._manifest.dirs()
311
311
312 def hasdir(self, dir):
312 def hasdir(self, dir):
313 return self._manifest.hasdir(dir)
313 return self._manifest.hasdir(dir)
314
314
315 def status(self, other=None, match=None, listignored=False,
315 def status(self, other=None, match=None, listignored=False,
316 listclean=False, listunknown=False, listsubrepos=False):
316 listclean=False, listunknown=False, listsubrepos=False):
317 """return status of files between two nodes or node and working
317 """return status of files between two nodes or node and working
318 directory.
318 directory.
319
319
320 If other is None, compare this node with working directory.
320 If other is None, compare this node with working directory.
321
321
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
322 returns (modified, added, removed, deleted, unknown, ignored, clean)
323 """
323 """
324
324
325 ctx1 = self
325 ctx1 = self
326 ctx2 = self._repo[other]
326 ctx2 = self._repo[other]
327
327
328 # This next code block is, admittedly, fragile logic that tests for
328 # This next code block is, admittedly, fragile logic that tests for
329 # reversing the contexts and wouldn't need to exist if it weren't for
329 # reversing the contexts and wouldn't need to exist if it weren't for
330 # the fast (and common) code path of comparing the working directory
330 # the fast (and common) code path of comparing the working directory
331 # with its first parent.
331 # with its first parent.
332 #
332 #
333 # What we're aiming for here is the ability to call:
333 # What we're aiming for here is the ability to call:
334 #
334 #
335 # workingctx.status(parentctx)
335 # workingctx.status(parentctx)
336 #
336 #
337 # If we always built the manifest for each context and compared those,
337 # If we always built the manifest for each context and compared those,
338 # then we'd be done. But the special case of the above call means we
338 # then we'd be done. But the special case of the above call means we
339 # just copy the manifest of the parent.
339 # just copy the manifest of the parent.
340 reversed = False
340 reversed = False
341 if (not isinstance(ctx1, changectx)
341 if (not isinstance(ctx1, changectx)
342 and isinstance(ctx2, changectx)):
342 and isinstance(ctx2, changectx)):
343 reversed = True
343 reversed = True
344 ctx1, ctx2 = ctx2, ctx1
344 ctx1, ctx2 = ctx2, ctx1
345
345
346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
346 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
347 match = ctx2._matchstatus(ctx1, match)
347 match = ctx2._matchstatus(ctx1, match)
348 r = scmutil.status([], [], [], [], [], [], [])
348 r = scmutil.status([], [], [], [], [], [], [])
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
349 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
350 listunknown)
350 listunknown)
351
351
352 if reversed:
352 if reversed:
353 # Reverse added and removed. Clear deleted, unknown and ignored as
353 # Reverse added and removed. Clear deleted, unknown and ignored as
354 # these make no sense to reverse.
354 # these make no sense to reverse.
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
355 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
356 r.clean)
356 r.clean)
357
357
358 if listsubrepos:
358 if listsubrepos:
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
359 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
360 try:
360 try:
361 rev2 = ctx2.subrev(subpath)
361 rev2 = ctx2.subrev(subpath)
362 except KeyError:
362 except KeyError:
363 # A subrepo that existed in node1 was deleted between
363 # A subrepo that existed in node1 was deleted between
364 # node1 and node2 (inclusive). Thus, ctx2's substate
364 # node1 and node2 (inclusive). Thus, ctx2's substate
365 # won't contain that subpath. The best we can do ignore it.
365 # won't contain that subpath. The best we can do ignore it.
366 rev2 = None
366 rev2 = None
367 submatch = matchmod.subdirmatcher(subpath, match)
367 submatch = matchmod.subdirmatcher(subpath, match)
368 s = sub.status(rev2, match=submatch, ignored=listignored,
368 s = sub.status(rev2, match=submatch, ignored=listignored,
369 clean=listclean, unknown=listunknown,
369 clean=listclean, unknown=listunknown,
370 listsubrepos=True)
370 listsubrepos=True)
371 for rfiles, sfiles in zip(r, s):
371 for rfiles, sfiles in zip(r, s):
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
372 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
373
373
374 narrowmatch = self._repo.narrowmatch()
374 narrowmatch = self._repo.narrowmatch()
375 if not narrowmatch.always():
375 if not narrowmatch.always():
376 for l in r:
376 for l in r:
377 l[:] = list(filter(narrowmatch, l))
377 l[:] = list(filter(narrowmatch, l))
378 for l in r:
378 for l in r:
379 l.sort()
379 l.sort()
380
380
381 return r
381 return r
382
382
383 class changectx(basectx):
383 class changectx(basectx):
384 """A changecontext object makes access to data related to a particular
384 """A changecontext object makes access to data related to a particular
385 changeset convenient. It represents a read-only context already present in
385 changeset convenient. It represents a read-only context already present in
386 the repo."""
386 the repo."""
387 def __init__(self, repo, rev, node):
387 def __init__(self, repo, rev, node):
388 """changeid is a revision number, node, or tag"""
388 """changeid is a revision number, node, or tag"""
389 super(changectx, self).__init__(repo)
389 super(changectx, self).__init__(repo)
390 self._rev = rev
390 self._rev = rev
391 self._node = node
391 self._node = node
392
392
393 def __hash__(self):
393 def __hash__(self):
394 try:
394 try:
395 return hash(self._rev)
395 return hash(self._rev)
396 except AttributeError:
396 except AttributeError:
397 return id(self)
397 return id(self)
398
398
399 def __nonzero__(self):
399 def __nonzero__(self):
400 return self._rev != nullrev
400 return self._rev != nullrev
401
401
402 __bool__ = __nonzero__
402 __bool__ = __nonzero__
403
403
404 @propertycache
404 @propertycache
405 def _changeset(self):
405 def _changeset(self):
406 return self._repo.changelog.changelogrevision(self.rev())
406 return self._repo.changelog.changelogrevision(self.rev())
407
407
408 @propertycache
408 @propertycache
409 def _manifest(self):
409 def _manifest(self):
410 return self._manifestctx.read()
410 return self._manifestctx.read()
411
411
412 @property
412 @property
413 def _manifestctx(self):
413 def _manifestctx(self):
414 return self._repo.manifestlog[self._changeset.manifest]
414 return self._repo.manifestlog[self._changeset.manifest]
415
415
416 @propertycache
416 @propertycache
417 def _manifestdelta(self):
417 def _manifestdelta(self):
418 return self._manifestctx.readdelta()
418 return self._manifestctx.readdelta()
419
419
420 @propertycache
420 @propertycache
421 def _parents(self):
421 def _parents(self):
422 repo = self._repo
422 repo = self._repo
423 p1, p2 = repo.changelog.parentrevs(self._rev)
423 p1, p2 = repo.changelog.parentrevs(self._rev)
424 if p2 == nullrev:
424 if p2 == nullrev:
425 return [repo[p1]]
425 return [repo[p1]]
426 return [repo[p1], repo[p2]]
426 return [repo[p1], repo[p2]]
427
427
428 def changeset(self):
428 def changeset(self):
429 c = self._changeset
429 c = self._changeset
430 return (
430 return (
431 c.manifest,
431 c.manifest,
432 c.user,
432 c.user,
433 c.date,
433 c.date,
434 c.files,
434 c.files,
435 c.description,
435 c.description,
436 c.extra,
436 c.extra,
437 )
437 )
438 def manifestnode(self):
438 def manifestnode(self):
439 return self._changeset.manifest
439 return self._changeset.manifest
440
440
441 def user(self):
441 def user(self):
442 return self._changeset.user
442 return self._changeset.user
443 def date(self):
443 def date(self):
444 return self._changeset.date
444 return self._changeset.date
445 def files(self):
445 def files(self):
446 return self._changeset.files
446 return self._changeset.files
447 def description(self):
447 def description(self):
448 return self._changeset.description
448 return self._changeset.description
449 def branch(self):
449 def branch(self):
450 return encoding.tolocal(self._changeset.extra.get("branch"))
450 return encoding.tolocal(self._changeset.extra.get("branch"))
451 def closesbranch(self):
451 def closesbranch(self):
452 return 'close' in self._changeset.extra
452 return 'close' in self._changeset.extra
453 def extra(self):
453 def extra(self):
454 """Return a dict of extra information."""
454 """Return a dict of extra information."""
455 return self._changeset.extra
455 return self._changeset.extra
456 def tags(self):
456 def tags(self):
457 """Return a list of byte tag names"""
457 """Return a list of byte tag names"""
458 return self._repo.nodetags(self._node)
458 return self._repo.nodetags(self._node)
459 def bookmarks(self):
459 def bookmarks(self):
460 """Return a list of byte bookmark names."""
460 """Return a list of byte bookmark names."""
461 return self._repo.nodebookmarks(self._node)
461 return self._repo.nodebookmarks(self._node)
462 def phase(self):
462 def phase(self):
463 return self._repo._phasecache.phase(self._repo, self._rev)
463 return self._repo._phasecache.phase(self._repo, self._rev)
464 def hidden(self):
464 def hidden(self):
465 return self._rev in repoview.filterrevs(self._repo, 'visible')
465 return self._rev in repoview.filterrevs(self._repo, 'visible')
466
466
467 def isinmemory(self):
467 def isinmemory(self):
468 return False
468 return False
469
469
470 def children(self):
470 def children(self):
471 """return list of changectx contexts for each child changeset.
471 """return list of changectx contexts for each child changeset.
472
472
473 This returns only the immediate child changesets. Use descendants() to
473 This returns only the immediate child changesets. Use descendants() to
474 recursively walk children.
474 recursively walk children.
475 """
475 """
476 c = self._repo.changelog.children(self._node)
476 c = self._repo.changelog.children(self._node)
477 return [self._repo[x] for x in c]
477 return [self._repo[x] for x in c]
478
478
479 def ancestors(self):
479 def ancestors(self):
480 for a in self._repo.changelog.ancestors([self._rev]):
480 for a in self._repo.changelog.ancestors([self._rev]):
481 yield self._repo[a]
481 yield self._repo[a]
482
482
483 def descendants(self):
483 def descendants(self):
484 """Recursively yield all children of the changeset.
484 """Recursively yield all children of the changeset.
485
485
486 For just the immediate children, use children()
486 For just the immediate children, use children()
487 """
487 """
488 for d in self._repo.changelog.descendants([self._rev]):
488 for d in self._repo.changelog.descendants([self._rev]):
489 yield self._repo[d]
489 yield self._repo[d]
490
490
491 def filectx(self, path, fileid=None, filelog=None):
491 def filectx(self, path, fileid=None, filelog=None):
492 """get a file context from this changeset"""
492 """get a file context from this changeset"""
493 if fileid is None:
493 if fileid is None:
494 fileid = self.filenode(path)
494 fileid = self.filenode(path)
495 return filectx(self._repo, path, fileid=fileid,
495 return filectx(self._repo, path, fileid=fileid,
496 changectx=self, filelog=filelog)
496 changectx=self, filelog=filelog)
497
497
498 def ancestor(self, c2, warn=False):
498 def ancestor(self, c2, warn=False):
499 """return the "best" ancestor context of self and c2
499 """return the "best" ancestor context of self and c2
500
500
501 If there are multiple candidates, it will show a message and check
501 If there are multiple candidates, it will show a message and check
502 merge.preferancestor configuration before falling back to the
502 merge.preferancestor configuration before falling back to the
503 revlog ancestor."""
503 revlog ancestor."""
504 # deal with workingctxs
504 # deal with workingctxs
505 n2 = c2._node
505 n2 = c2._node
506 if n2 is None:
506 if n2 is None:
507 n2 = c2._parents[0]._node
507 n2 = c2._parents[0]._node
508 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
508 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
509 if not cahs:
509 if not cahs:
510 anc = nullid
510 anc = nullid
511 elif len(cahs) == 1:
511 elif len(cahs) == 1:
512 anc = cahs[0]
512 anc = cahs[0]
513 else:
513 else:
514 # experimental config: merge.preferancestor
514 # experimental config: merge.preferancestor
515 for r in self._repo.ui.configlist('merge', 'preferancestor'):
515 for r in self._repo.ui.configlist('merge', 'preferancestor'):
516 try:
516 try:
517 ctx = scmutil.revsymbol(self._repo, r)
517 ctx = scmutil.revsymbol(self._repo, r)
518 except error.RepoLookupError:
518 except error.RepoLookupError:
519 continue
519 continue
520 anc = ctx.node()
520 anc = ctx.node()
521 if anc in cahs:
521 if anc in cahs:
522 break
522 break
523 else:
523 else:
524 anc = self._repo.changelog.ancestor(self._node, n2)
524 anc = self._repo.changelog.ancestor(self._node, n2)
525 if warn:
525 if warn:
526 self._repo.ui.status(
526 self._repo.ui.status(
527 (_("note: using %s as ancestor of %s and %s\n") %
527 (_("note: using %s as ancestor of %s and %s\n") %
528 (short(anc), short(self._node), short(n2))) +
528 (short(anc), short(self._node), short(n2))) +
529 ''.join(_(" alternatively, use --config "
529 ''.join(_(" alternatively, use --config "
530 "merge.preferancestor=%s\n") %
530 "merge.preferancestor=%s\n") %
531 short(n) for n in sorted(cahs) if n != anc))
531 short(n) for n in sorted(cahs) if n != anc))
532 return self._repo[anc]
532 return self._repo[anc]
533
533
534 def isancestorof(self, other):
534 def isancestorof(self, other):
535 """True if this changeset is an ancestor of other"""
535 """True if this changeset is an ancestor of other"""
536 return self._repo.changelog.isancestorrev(self._rev, other._rev)
536 return self._repo.changelog.isancestorrev(self._rev, other._rev)
537
537
538 def walk(self, match):
538 def walk(self, match):
539 '''Generates matching file names.'''
539 '''Generates matching file names.'''
540
540
541 # Wrap match.bad method to have message with nodeid
541 # Wrap match.bad method to have message with nodeid
542 def bad(fn, msg):
542 def bad(fn, msg):
543 # The manifest doesn't know about subrepos, so don't complain about
543 # The manifest doesn't know about subrepos, so don't complain about
544 # paths into valid subrepos.
544 # paths into valid subrepos.
545 if any(fn == s or fn.startswith(s + '/')
545 if any(fn == s or fn.startswith(s + '/')
546 for s in self.substate):
546 for s in self.substate):
547 return
547 return
548 match.bad(fn, _('no such file in rev %s') % self)
548 match.bad(fn, _('no such file in rev %s') % self)
549
549
550 m = matchmod.badmatch(match, bad)
550 m = matchmod.badmatch(match, bad)
551 return self._manifest.walk(m)
551 return self._manifest.walk(m)
552
552
553 def matches(self, match):
553 def matches(self, match):
554 return self.walk(match)
554 return self.walk(match)
555
555
556 class basefilectx(object):
556 class basefilectx(object):
557 """A filecontext object represents the common logic for its children:
557 """A filecontext object represents the common logic for its children:
558 filectx: read-only access to a filerevision that is already present
558 filectx: read-only access to a filerevision that is already present
559 in the repo,
559 in the repo,
560 workingfilectx: a filecontext that represents files from the working
560 workingfilectx: a filecontext that represents files from the working
561 directory,
561 directory,
562 memfilectx: a filecontext that represents files in-memory,
562 memfilectx: a filecontext that represents files in-memory,
563 """
563 """
564 @propertycache
564 @propertycache
565 def _filelog(self):
565 def _filelog(self):
566 return self._repo.file(self._path)
566 return self._repo.file(self._path)
567
567
568 @propertycache
568 @propertycache
569 def _changeid(self):
569 def _changeid(self):
570 if r'_changeid' in self.__dict__:
570 if r'_changeid' in self.__dict__:
571 return self._changeid
571 return self._changeid
572 elif r'_changectx' in self.__dict__:
572 elif r'_changectx' in self.__dict__:
573 return self._changectx.rev()
573 return self._changectx.rev()
574 elif r'_descendantrev' in self.__dict__:
574 elif r'_descendantrev' in self.__dict__:
575 # this file context was created from a revision with a known
575 # this file context was created from a revision with a known
576 # descendant, we can (lazily) correct for linkrev aliases
576 # descendant, we can (lazily) correct for linkrev aliases
577 return self._adjustlinkrev(self._descendantrev)
577 return self._adjustlinkrev(self._descendantrev)
578 else:
578 else:
579 return self._filelog.linkrev(self._filerev)
579 return self._filelog.linkrev(self._filerev)
580
580
581 @propertycache
581 @propertycache
582 def _filenode(self):
582 def _filenode(self):
583 if r'_fileid' in self.__dict__:
583 if r'_fileid' in self.__dict__:
584 return self._filelog.lookup(self._fileid)
584 return self._filelog.lookup(self._fileid)
585 else:
585 else:
586 return self._changectx.filenode(self._path)
586 return self._changectx.filenode(self._path)
587
587
588 @propertycache
588 @propertycache
589 def _filerev(self):
589 def _filerev(self):
590 return self._filelog.rev(self._filenode)
590 return self._filelog.rev(self._filenode)
591
591
592 @propertycache
592 @propertycache
593 def _repopath(self):
593 def _repopath(self):
594 return self._path
594 return self._path
595
595
596 def __nonzero__(self):
596 def __nonzero__(self):
597 try:
597 try:
598 self._filenode
598 self._filenode
599 return True
599 return True
600 except error.LookupError:
600 except error.LookupError:
601 # file is missing
601 # file is missing
602 return False
602 return False
603
603
604 __bool__ = __nonzero__
604 __bool__ = __nonzero__
605
605
606 def __bytes__(self):
606 def __bytes__(self):
607 try:
607 try:
608 return "%s@%s" % (self.path(), self._changectx)
608 return "%s@%s" % (self.path(), self._changectx)
609 except error.LookupError:
609 except error.LookupError:
610 return "%s@???" % self.path()
610 return "%s@???" % self.path()
611
611
612 __str__ = encoding.strmethod(__bytes__)
612 __str__ = encoding.strmethod(__bytes__)
613
613
614 def __repr__(self):
614 def __repr__(self):
615 return r"<%s %s>" % (type(self).__name__, str(self))
615 return r"<%s %s>" % (type(self).__name__, str(self))
616
616
617 def __hash__(self):
617 def __hash__(self):
618 try:
618 try:
619 return hash((self._path, self._filenode))
619 return hash((self._path, self._filenode))
620 except AttributeError:
620 except AttributeError:
621 return id(self)
621 return id(self)
622
622
623 def __eq__(self, other):
623 def __eq__(self, other):
624 try:
624 try:
625 return (type(self) == type(other) and self._path == other._path
625 return (type(self) == type(other) and self._path == other._path
626 and self._filenode == other._filenode)
626 and self._filenode == other._filenode)
627 except AttributeError:
627 except AttributeError:
628 return False
628 return False
629
629
630 def __ne__(self, other):
630 def __ne__(self, other):
631 return not (self == other)
631 return not (self == other)
632
632
633 def filerev(self):
633 def filerev(self):
634 return self._filerev
634 return self._filerev
635 def filenode(self):
635 def filenode(self):
636 return self._filenode
636 return self._filenode
637 @propertycache
637 @propertycache
638 def _flags(self):
638 def _flags(self):
639 return self._changectx.flags(self._path)
639 return self._changectx.flags(self._path)
640 def flags(self):
640 def flags(self):
641 return self._flags
641 return self._flags
642 def filelog(self):
642 def filelog(self):
643 return self._filelog
643 return self._filelog
644 def rev(self):
644 def rev(self):
645 return self._changeid
645 return self._changeid
646 def linkrev(self):
646 def linkrev(self):
647 return self._filelog.linkrev(self._filerev)
647 return self._filelog.linkrev(self._filerev)
648 def node(self):
648 def node(self):
649 return self._changectx.node()
649 return self._changectx.node()
650 def hex(self):
650 def hex(self):
651 return self._changectx.hex()
651 return self._changectx.hex()
652 def user(self):
652 def user(self):
653 return self._changectx.user()
653 return self._changectx.user()
654 def date(self):
654 def date(self):
655 return self._changectx.date()
655 return self._changectx.date()
656 def files(self):
656 def files(self):
657 return self._changectx.files()
657 return self._changectx.files()
658 def description(self):
658 def description(self):
659 return self._changectx.description()
659 return self._changectx.description()
660 def branch(self):
660 def branch(self):
661 return self._changectx.branch()
661 return self._changectx.branch()
662 def extra(self):
662 def extra(self):
663 return self._changectx.extra()
663 return self._changectx.extra()
664 def phase(self):
664 def phase(self):
665 return self._changectx.phase()
665 return self._changectx.phase()
666 def phasestr(self):
666 def phasestr(self):
667 return self._changectx.phasestr()
667 return self._changectx.phasestr()
668 def obsolete(self):
668 def obsolete(self):
669 return self._changectx.obsolete()
669 return self._changectx.obsolete()
670 def instabilities(self):
670 def instabilities(self):
671 return self._changectx.instabilities()
671 return self._changectx.instabilities()
672 def manifest(self):
672 def manifest(self):
673 return self._changectx.manifest()
673 return self._changectx.manifest()
674 def changectx(self):
674 def changectx(self):
675 return self._changectx
675 return self._changectx
676 def renamed(self):
676 def renamed(self):
677 return self._copied
677 return self._copied
678 def repo(self):
678 def repo(self):
679 return self._repo
679 return self._repo
680 def size(self):
680 def size(self):
681 return len(self.data())
681 return len(self.data())
682
682
683 def path(self):
683 def path(self):
684 return self._path
684 return self._path
685
685
686 def isbinary(self):
686 def isbinary(self):
687 try:
687 try:
688 return stringutil.binary(self.data())
688 return stringutil.binary(self.data())
689 except IOError:
689 except IOError:
690 return False
690 return False
691 def isexec(self):
691 def isexec(self):
692 return 'x' in self.flags()
692 return 'x' in self.flags()
693 def islink(self):
693 def islink(self):
694 return 'l' in self.flags()
694 return 'l' in self.flags()
695
695
696 def isabsent(self):
696 def isabsent(self):
697 """whether this filectx represents a file not in self._changectx
697 """whether this filectx represents a file not in self._changectx
698
698
699 This is mainly for merge code to detect change/delete conflicts. This is
699 This is mainly for merge code to detect change/delete conflicts. This is
700 expected to be True for all subclasses of basectx."""
700 expected to be True for all subclasses of basectx."""
701 return False
701 return False
702
702
703 _customcmp = False
703 _customcmp = False
704 def cmp(self, fctx):
704 def cmp(self, fctx):
705 """compare with other file context
705 """compare with other file context
706
706
707 returns True if different than fctx.
707 returns True if different than fctx.
708 """
708 """
709 if fctx._customcmp:
709 if fctx._customcmp:
710 return fctx.cmp(self)
710 return fctx.cmp(self)
711
711
712 if (fctx._filenode is None
712 if (fctx._filenode is None
713 and (self._repo._encodefilterpats
713 and (self._repo._encodefilterpats
714 # if file data starts with '\1\n', empty metadata block is
714 # if file data starts with '\1\n', empty metadata block is
715 # prepended, which adds 4 bytes to filelog.size().
715 # prepended, which adds 4 bytes to filelog.size().
716 or self.size() - 4 == fctx.size())
716 or self.size() - 4 == fctx.size())
717 or self.size() == fctx.size()):
717 or self.size() == fctx.size()):
718 return self._filelog.cmp(self._filenode, fctx.data())
718 return self._filelog.cmp(self._filenode, fctx.data())
719
719
720 return True
720 return True
721
721
722 def _adjustlinkrev(self, srcrev, inclusive=False):
722 def _adjustlinkrev(self, srcrev, inclusive=False):
723 """return the first ancestor of <srcrev> introducing <fnode>
723 """return the first ancestor of <srcrev> introducing <fnode>
724
724
725 If the linkrev of the file revision does not point to an ancestor of
725 If the linkrev of the file revision does not point to an ancestor of
726 srcrev, we'll walk down the ancestors until we find one introducing
726 srcrev, we'll walk down the ancestors until we find one introducing
727 this file revision.
727 this file revision.
728
728
729 :srcrev: the changeset revision we search ancestors from
729 :srcrev: the changeset revision we search ancestors from
730 :inclusive: if true, the src revision will also be checked
730 :inclusive: if true, the src revision will also be checked
731 """
731 """
732 repo = self._repo
732 repo = self._repo
733 cl = repo.unfiltered().changelog
733 cl = repo.unfiltered().changelog
734 mfl = repo.manifestlog
734 mfl = repo.manifestlog
735 # fetch the linkrev
735 # fetch the linkrev
736 lkr = self.linkrev()
736 lkr = self.linkrev()
737 # hack to reuse ancestor computation when searching for renames
737 # hack to reuse ancestor computation when searching for renames
738 memberanc = getattr(self, '_ancestrycontext', None)
738 memberanc = getattr(self, '_ancestrycontext', None)
739 iteranc = None
739 iteranc = None
740 if srcrev is None:
740 if srcrev is None:
741 # wctx case, used by workingfilectx during mergecopy
741 # wctx case, used by workingfilectx during mergecopy
742 revs = [p.rev() for p in self._repo[None].parents()]
742 revs = [p.rev() for p in self._repo[None].parents()]
743 inclusive = True # we skipped the real (revless) source
743 inclusive = True # we skipped the real (revless) source
744 else:
744 else:
745 revs = [srcrev]
745 revs = [srcrev]
746 if memberanc is None:
746 if memberanc is None:
747 memberanc = iteranc = cl.ancestors(revs, lkr,
747 memberanc = iteranc = cl.ancestors(revs, lkr,
748 inclusive=inclusive)
748 inclusive=inclusive)
749 # check if this linkrev is an ancestor of srcrev
749 # check if this linkrev is an ancestor of srcrev
750 if lkr not in memberanc:
750 if lkr not in memberanc:
751 if iteranc is None:
751 if iteranc is None:
752 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
752 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
753 fnode = self._filenode
753 fnode = self._filenode
754 path = self._path
754 path = self._path
755 for a in iteranc:
755 for a in iteranc:
756 ac = cl.read(a) # get changeset data (we avoid object creation)
756 ac = cl.read(a) # get changeset data (we avoid object creation)
757 if path in ac[3]: # checking the 'files' field.
757 if path in ac[3]: # checking the 'files' field.
758 # The file has been touched, check if the content is
758 # The file has been touched, check if the content is
759 # similar to the one we search for.
759 # similar to the one we search for.
760 if fnode == mfl[ac[0]].readfast().get(path):
760 if fnode == mfl[ac[0]].readfast().get(path):
761 return a
761 return a
762 # In theory, we should never get out of that loop without a result.
762 # In theory, we should never get out of that loop without a result.
763 # But if manifest uses a buggy file revision (not children of the
763 # But if manifest uses a buggy file revision (not children of the
764 # one it replaces) we could. Such a buggy situation will likely
764 # one it replaces) we could. Such a buggy situation will likely
765 # result is crash somewhere else at to some point.
765 # result is crash somewhere else at to some point.
766 return lkr
766 return lkr
767
767
768 def introrev(self):
768 def introrev(self):
769 """return the rev of the changeset which introduced this file revision
769 """return the rev of the changeset which introduced this file revision
770
770
771 This method is different from linkrev because it take into account the
771 This method is different from linkrev because it take into account the
772 changeset the filectx was created from. It ensures the returned
772 changeset the filectx was created from. It ensures the returned
773 revision is one of its ancestors. This prevents bugs from
773 revision is one of its ancestors. This prevents bugs from
774 'linkrev-shadowing' when a file revision is used by multiple
774 'linkrev-shadowing' when a file revision is used by multiple
775 changesets.
775 changesets.
776 """
776 """
777 lkr = self.linkrev()
777 lkr = self.linkrev()
778 attrs = vars(self)
778 attrs = vars(self)
779 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
779 noctx = not (r'_changeid' in attrs or r'_changectx' in attrs)
780 if noctx or self.rev() == lkr:
780 if noctx or self.rev() == lkr:
781 return self.linkrev()
781 return self.linkrev()
782 return self._adjustlinkrev(self.rev(), inclusive=True)
782 return self._adjustlinkrev(self.rev(), inclusive=True)
783
783
784 def introfilectx(self):
784 def introfilectx(self):
785 """Return filectx having identical contents, but pointing to the
785 """Return filectx having identical contents, but pointing to the
786 changeset revision where this filectx was introduced"""
786 changeset revision where this filectx was introduced"""
787 introrev = self.introrev()
787 introrev = self.introrev()
788 if self.rev() == introrev:
788 if self.rev() == introrev:
789 return self
789 return self
790 return self.filectx(self.filenode(), changeid=introrev)
790 return self.filectx(self.filenode(), changeid=introrev)
791
791
792 def _parentfilectx(self, path, fileid, filelog):
792 def _parentfilectx(self, path, fileid, filelog):
793 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
793 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
794 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
794 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
795 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
795 if r'_changeid' in vars(self) or r'_changectx' in vars(self):
796 # If self is associated with a changeset (probably explicitly
796 # If self is associated with a changeset (probably explicitly
797 # fed), ensure the created filectx is associated with a
797 # fed), ensure the created filectx is associated with a
798 # changeset that is an ancestor of self.changectx.
798 # changeset that is an ancestor of self.changectx.
799 # This lets us later use _adjustlinkrev to get a correct link.
799 # This lets us later use _adjustlinkrev to get a correct link.
800 fctx._descendantrev = self.rev()
800 fctx._descendantrev = self.rev()
801 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
801 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
802 elif r'_descendantrev' in vars(self):
802 elif r'_descendantrev' in vars(self):
803 # Otherwise propagate _descendantrev if we have one associated.
803 # Otherwise propagate _descendantrev if we have one associated.
804 fctx._descendantrev = self._descendantrev
804 fctx._descendantrev = self._descendantrev
805 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
805 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
806 return fctx
806 return fctx
807
807
808 def parents(self):
808 def parents(self):
809 _path = self._path
809 _path = self._path
810 fl = self._filelog
810 fl = self._filelog
811 parents = self._filelog.parents(self._filenode)
811 parents = self._filelog.parents(self._filenode)
812 pl = [(_path, node, fl) for node in parents if node != nullid]
812 pl = [(_path, node, fl) for node in parents if node != nullid]
813
813
814 r = fl.renamed(self._filenode)
814 r = fl.renamed(self._filenode)
815 if r:
815 if r:
816 # - In the simple rename case, both parent are nullid, pl is empty.
816 # - In the simple rename case, both parent are nullid, pl is empty.
817 # - In case of merge, only one of the parent is null id and should
817 # - In case of merge, only one of the parent is null id and should
818 # be replaced with the rename information. This parent is -always-
818 # be replaced with the rename information. This parent is -always-
819 # the first one.
819 # the first one.
820 #
820 #
821 # As null id have always been filtered out in the previous list
821 # As null id have always been filtered out in the previous list
822 # comprehension, inserting to 0 will always result in "replacing
822 # comprehension, inserting to 0 will always result in "replacing
823 # first nullid parent with rename information.
823 # first nullid parent with rename information.
824 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
824 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
825
825
826 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
826 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
827
827
828 def p1(self):
828 def p1(self):
829 return self.parents()[0]
829 return self.parents()[0]
830
830
831 def p2(self):
831 def p2(self):
832 p = self.parents()
832 p = self.parents()
833 if len(p) == 2:
833 if len(p) == 2:
834 return p[1]
834 return p[1]
835 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
835 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
836
836
837 def annotate(self, follow=False, skiprevs=None, diffopts=None):
837 def annotate(self, follow=False, skiprevs=None, diffopts=None):
838 """Returns a list of annotateline objects for each line in the file
838 """Returns a list of annotateline objects for each line in the file
839
839
840 - line.fctx is the filectx of the node where that line was last changed
840 - line.fctx is the filectx of the node where that line was last changed
841 - line.lineno is the line number at the first appearance in the managed
841 - line.lineno is the line number at the first appearance in the managed
842 file
842 file
843 - line.text is the data on that line (including newline character)
843 - line.text is the data on that line (including newline character)
844 """
844 """
845 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
845 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
846
846
847 def parents(f):
847 def parents(f):
848 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
848 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
849 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
849 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
850 # from the topmost introrev (= srcrev) down to p.linkrev() if it
850 # from the topmost introrev (= srcrev) down to p.linkrev() if it
851 # isn't an ancestor of the srcrev.
851 # isn't an ancestor of the srcrev.
852 f._changeid
852 f._changeid
853 pl = f.parents()
853 pl = f.parents()
854
854
855 # Don't return renamed parents if we aren't following.
855 # Don't return renamed parents if we aren't following.
856 if not follow:
856 if not follow:
857 pl = [p for p in pl if p.path() == f.path()]
857 pl = [p for p in pl if p.path() == f.path()]
858
858
859 # renamed filectx won't have a filelog yet, so set it
859 # renamed filectx won't have a filelog yet, so set it
860 # from the cache to save time
860 # from the cache to save time
861 for p in pl:
861 for p in pl:
862 if not r'_filelog' in p.__dict__:
862 if not r'_filelog' in p.__dict__:
863 p._filelog = getlog(p.path())
863 p._filelog = getlog(p.path())
864
864
865 return pl
865 return pl
866
866
867 # use linkrev to find the first changeset where self appeared
867 # use linkrev to find the first changeset where self appeared
868 base = self.introfilectx()
868 base = self.introfilectx()
869 if getattr(base, '_ancestrycontext', None) is None:
869 if getattr(base, '_ancestrycontext', None) is None:
870 cl = self._repo.changelog
870 cl = self._repo.changelog
871 if base.rev() is None:
871 if base.rev() is None:
872 # wctx is not inclusive, but works because _ancestrycontext
872 # wctx is not inclusive, but works because _ancestrycontext
873 # is used to test filelog revisions
873 # is used to test filelog revisions
874 ac = cl.ancestors([p.rev() for p in base.parents()],
874 ac = cl.ancestors([p.rev() for p in base.parents()],
875 inclusive=True)
875 inclusive=True)
876 else:
876 else:
877 ac = cl.ancestors([base.rev()], inclusive=True)
877 ac = cl.ancestors([base.rev()], inclusive=True)
878 base._ancestrycontext = ac
878 base._ancestrycontext = ac
879
879
880 return dagop.annotate(base, parents, skiprevs=skiprevs,
880 return dagop.annotate(base, parents, skiprevs=skiprevs,
881 diffopts=diffopts)
881 diffopts=diffopts)
882
882
883 def ancestors(self, followfirst=False):
883 def ancestors(self, followfirst=False):
884 visit = {}
884 visit = {}
885 c = self
885 c = self
886 if followfirst:
886 if followfirst:
887 cut = 1
887 cut = 1
888 else:
888 else:
889 cut = None
889 cut = None
890
890
891 while True:
891 while True:
892 for parent in c.parents()[:cut]:
892 for parent in c.parents()[:cut]:
893 visit[(parent.linkrev(), parent.filenode())] = parent
893 visit[(parent.linkrev(), parent.filenode())] = parent
894 if not visit:
894 if not visit:
895 break
895 break
896 c = visit.pop(max(visit))
896 c = visit.pop(max(visit))
897 yield c
897 yield c
898
898
899 def decodeddata(self):
899 def decodeddata(self):
900 """Returns `data()` after running repository decoding filters.
900 """Returns `data()` after running repository decoding filters.
901
901
902 This is often equivalent to how the data would be expressed on disk.
902 This is often equivalent to how the data would be expressed on disk.
903 """
903 """
904 return self._repo.wwritedata(self.path(), self.data())
904 return self._repo.wwritedata(self.path(), self.data())
905
905
906 class filectx(basefilectx):
906 class filectx(basefilectx):
907 """A filecontext object makes access to data related to a particular
907 """A filecontext object makes access to data related to a particular
908 filerevision convenient."""
908 filerevision convenient."""
909 def __init__(self, repo, path, changeid=None, fileid=None,
909 def __init__(self, repo, path, changeid=None, fileid=None,
910 filelog=None, changectx=None):
910 filelog=None, changectx=None):
911 """changeid can be a changeset revision, node, or tag.
911 """changeid can be a changeset revision, node, or tag.
912 fileid can be a file revision or node."""
912 fileid can be a file revision or node."""
913 self._repo = repo
913 self._repo = repo
914 self._path = path
914 self._path = path
915
915
916 assert (changeid is not None
916 assert (changeid is not None
917 or fileid is not None
917 or fileid is not None
918 or changectx is not None), \
918 or changectx is not None), \
919 ("bad args: changeid=%r, fileid=%r, changectx=%r"
919 ("bad args: changeid=%r, fileid=%r, changectx=%r"
920 % (changeid, fileid, changectx))
920 % (changeid, fileid, changectx))
921
921
922 if filelog is not None:
922 if filelog is not None:
923 self._filelog = filelog
923 self._filelog = filelog
924
924
925 if changeid is not None:
925 if changeid is not None:
926 self._changeid = changeid
926 self._changeid = changeid
927 if changectx is not None:
927 if changectx is not None:
928 self._changectx = changectx
928 self._changectx = changectx
929 if fileid is not None:
929 if fileid is not None:
930 self._fileid = fileid
930 self._fileid = fileid
931
931
932 @propertycache
932 @propertycache
933 def _changectx(self):
933 def _changectx(self):
934 try:
934 try:
935 return self._repo[self._changeid]
935 return self._repo[self._changeid]
936 except error.FilteredRepoLookupError:
936 except error.FilteredRepoLookupError:
937 # Linkrev may point to any revision in the repository. When the
937 # Linkrev may point to any revision in the repository. When the
938 # repository is filtered this may lead to `filectx` trying to build
938 # repository is filtered this may lead to `filectx` trying to build
939 # `changectx` for filtered revision. In such case we fallback to
939 # `changectx` for filtered revision. In such case we fallback to
940 # creating `changectx` on the unfiltered version of the reposition.
940 # creating `changectx` on the unfiltered version of the reposition.
941 # This fallback should not be an issue because `changectx` from
941 # This fallback should not be an issue because `changectx` from
942 # `filectx` are not used in complex operations that care about
942 # `filectx` are not used in complex operations that care about
943 # filtering.
943 # filtering.
944 #
944 #
945 # This fallback is a cheap and dirty fix that prevent several
945 # This fallback is a cheap and dirty fix that prevent several
946 # crashes. It does not ensure the behavior is correct. However the
946 # crashes. It does not ensure the behavior is correct. However the
947 # behavior was not correct before filtering either and "incorrect
947 # behavior was not correct before filtering either and "incorrect
948 # behavior" is seen as better as "crash"
948 # behavior" is seen as better as "crash"
949 #
949 #
950 # Linkrevs have several serious troubles with filtering that are
950 # Linkrevs have several serious troubles with filtering that are
951 # complicated to solve. Proper handling of the issue here should be
951 # complicated to solve. Proper handling of the issue here should be
952 # considered when solving linkrev issue are on the table.
952 # considered when solving linkrev issue are on the table.
953 return self._repo.unfiltered()[self._changeid]
953 return self._repo.unfiltered()[self._changeid]
954
954
955 def filectx(self, fileid, changeid=None):
955 def filectx(self, fileid, changeid=None):
956 '''opens an arbitrary revision of the file without
956 '''opens an arbitrary revision of the file without
957 opening a new filelog'''
957 opening a new filelog'''
958 return filectx(self._repo, self._path, fileid=fileid,
958 return filectx(self._repo, self._path, fileid=fileid,
959 filelog=self._filelog, changeid=changeid)
959 filelog=self._filelog, changeid=changeid)
960
960
961 def rawdata(self):
961 def rawdata(self):
962 return self._filelog.revision(self._filenode, raw=True)
962 return self._filelog.revision(self._filenode, raw=True)
963
963
964 def rawflags(self):
964 def rawflags(self):
965 """low-level revlog flags"""
965 """low-level revlog flags"""
966 return self._filelog.flags(self._filerev)
966 return self._filelog.flags(self._filerev)
967
967
968 def data(self):
968 def data(self):
969 try:
969 try:
970 return self._filelog.read(self._filenode)
970 return self._filelog.read(self._filenode)
971 except error.CensoredNodeError:
971 except error.CensoredNodeError:
972 if self._repo.ui.config("censor", "policy") == "ignore":
972 if self._repo.ui.config("censor", "policy") == "ignore":
973 return ""
973 return ""
974 raise error.Abort(_("censored node: %s") % short(self._filenode),
974 raise error.Abort(_("censored node: %s") % short(self._filenode),
975 hint=_("set censor.policy to ignore errors"))
975 hint=_("set censor.policy to ignore errors"))
976
976
977 def size(self):
977 def size(self):
978 return self._filelog.size(self._filerev)
978 return self._filelog.size(self._filerev)
979
979
980 @propertycache
980 @propertycache
981 def _copied(self):
981 def _copied(self):
982 """check if file was actually renamed in this changeset revision
982 """check if file was actually renamed in this changeset revision
983
983
984 If rename logged in file revision, we report copy for changeset only
984 If rename logged in file revision, we report copy for changeset only
985 if file revisions linkrev points back to the changeset in question
985 if file revisions linkrev points back to the changeset in question
986 or both changeset parents contain different file revisions.
986 or both changeset parents contain different file revisions.
987 """
987 """
988
988
989 renamed = self._filelog.renamed(self._filenode)
989 renamed = self._filelog.renamed(self._filenode)
990 if not renamed:
990 if not renamed:
991 return None
991 return None
992
992
993 if self.rev() == self.linkrev():
993 if self.rev() == self.linkrev():
994 return renamed
994 return renamed
995
995
996 name = self.path()
996 name = self.path()
997 fnode = self._filenode
997 fnode = self._filenode
998 for p in self._changectx.parents():
998 for p in self._changectx.parents():
999 try:
999 try:
1000 if fnode == p.filenode(name):
1000 if fnode == p.filenode(name):
1001 return None
1001 return None
1002 except error.LookupError:
1002 except error.LookupError:
1003 pass
1003 pass
1004 return renamed
1004 return renamed
1005
1005
1006 def children(self):
1006 def children(self):
1007 # hard for renames
1007 # hard for renames
1008 c = self._filelog.children(self._filenode)
1008 c = self._filelog.children(self._filenode)
1009 return [filectx(self._repo, self._path, fileid=x,
1009 return [filectx(self._repo, self._path, fileid=x,
1010 filelog=self._filelog) for x in c]
1010 filelog=self._filelog) for x in c]
1011
1011
1012 class committablectx(basectx):
1012 class committablectx(basectx):
1013 """A committablectx object provides common functionality for a context that
1013 """A committablectx object provides common functionality for a context that
1014 wants the ability to commit, e.g. workingctx or memctx."""
1014 wants the ability to commit, e.g. workingctx or memctx."""
1015 def __init__(self, repo, text="", user=None, date=None, extra=None,
1015 def __init__(self, repo, text="", user=None, date=None, extra=None,
1016 changes=None):
1016 changes=None):
1017 super(committablectx, self).__init__(repo)
1017 super(committablectx, self).__init__(repo)
1018 self._rev = None
1018 self._rev = None
1019 self._node = None
1019 self._node = None
1020 self._text = text
1020 self._text = text
1021 if date:
1021 if date:
1022 self._date = dateutil.parsedate(date)
1022 self._date = dateutil.parsedate(date)
1023 if user:
1023 if user:
1024 self._user = user
1024 self._user = user
1025 if changes:
1025 if changes:
1026 self._status = changes
1026 self._status = changes
1027
1027
1028 self._extra = {}
1028 self._extra = {}
1029 if extra:
1029 if extra:
1030 self._extra = extra.copy()
1030 self._extra = extra.copy()
1031 if 'branch' not in self._extra:
1031 if 'branch' not in self._extra:
1032 try:
1032 try:
1033 branch = encoding.fromlocal(self._repo.dirstate.branch())
1033 branch = encoding.fromlocal(self._repo.dirstate.branch())
1034 except UnicodeDecodeError:
1034 except UnicodeDecodeError:
1035 raise error.Abort(_('branch name not in UTF-8!'))
1035 raise error.Abort(_('branch name not in UTF-8!'))
1036 self._extra['branch'] = branch
1036 self._extra['branch'] = branch
1037 if self._extra['branch'] == '':
1037 if self._extra['branch'] == '':
1038 self._extra['branch'] = 'default'
1038 self._extra['branch'] = 'default'
1039
1039
1040 def __bytes__(self):
1040 def __bytes__(self):
1041 return bytes(self._parents[0]) + "+"
1041 return bytes(self._parents[0]) + "+"
1042
1042
1043 __str__ = encoding.strmethod(__bytes__)
1043 __str__ = encoding.strmethod(__bytes__)
1044
1044
1045 def __nonzero__(self):
1045 def __nonzero__(self):
1046 return True
1046 return True
1047
1047
1048 __bool__ = __nonzero__
1048 __bool__ = __nonzero__
1049
1049
1050 def _buildflagfunc(self):
1050 def _buildflagfunc(self):
1051 # Create a fallback function for getting file flags when the
1051 # Create a fallback function for getting file flags when the
1052 # filesystem doesn't support them
1052 # filesystem doesn't support them
1053
1053
1054 copiesget = self._repo.dirstate.copies().get
1054 copiesget = self._repo.dirstate.copies().get
1055 parents = self.parents()
1055 parents = self.parents()
1056 if len(parents) < 2:
1056 if len(parents) < 2:
1057 # when we have one parent, it's easy: copy from parent
1057 # when we have one parent, it's easy: copy from parent
1058 man = parents[0].manifest()
1058 man = parents[0].manifest()
1059 def func(f):
1059 def func(f):
1060 f = copiesget(f, f)
1060 f = copiesget(f, f)
1061 return man.flags(f)
1061 return man.flags(f)
1062 else:
1062 else:
1063 # merges are tricky: we try to reconstruct the unstored
1063 # merges are tricky: we try to reconstruct the unstored
1064 # result from the merge (issue1802)
1064 # result from the merge (issue1802)
1065 p1, p2 = parents
1065 p1, p2 = parents
1066 pa = p1.ancestor(p2)
1066 pa = p1.ancestor(p2)
1067 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1067 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1068
1068
1069 def func(f):
1069 def func(f):
1070 f = copiesget(f, f) # may be wrong for merges with copies
1070 f = copiesget(f, f) # may be wrong for merges with copies
1071 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1071 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1072 if fl1 == fl2:
1072 if fl1 == fl2:
1073 return fl1
1073 return fl1
1074 if fl1 == fla:
1074 if fl1 == fla:
1075 return fl2
1075 return fl2
1076 if fl2 == fla:
1076 if fl2 == fla:
1077 return fl1
1077 return fl1
1078 return '' # punt for conflicts
1078 return '' # punt for conflicts
1079
1079
1080 return func
1080 return func
1081
1081
1082 @propertycache
1082 @propertycache
1083 def _flagfunc(self):
1083 def _flagfunc(self):
1084 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1084 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1085
1085
1086 @propertycache
1086 @propertycache
1087 def _status(self):
1087 def _status(self):
1088 return self._repo.status()
1088 return self._repo.status()
1089
1089
1090 @propertycache
1090 @propertycache
1091 def _user(self):
1091 def _user(self):
1092 return self._repo.ui.username()
1092 return self._repo.ui.username()
1093
1093
1094 @propertycache
1094 @propertycache
1095 def _date(self):
1095 def _date(self):
1096 ui = self._repo.ui
1096 ui = self._repo.ui
1097 date = ui.configdate('devel', 'default-date')
1097 date = ui.configdate('devel', 'default-date')
1098 if date is None:
1098 if date is None:
1099 date = dateutil.makedate()
1099 date = dateutil.makedate()
1100 return date
1100 return date
1101
1101
1102 def subrev(self, subpath):
1102 def subrev(self, subpath):
1103 return None
1103 return None
1104
1104
1105 def manifestnode(self):
1105 def manifestnode(self):
1106 return None
1106 return None
1107 def user(self):
1107 def user(self):
1108 return self._user or self._repo.ui.username()
1108 return self._user or self._repo.ui.username()
1109 def date(self):
1109 def date(self):
1110 return self._date
1110 return self._date
1111 def description(self):
1111 def description(self):
1112 return self._text
1112 return self._text
1113 def files(self):
1113 def files(self):
1114 return sorted(self._status.modified + self._status.added +
1114 return sorted(self._status.modified + self._status.added +
1115 self._status.removed)
1115 self._status.removed)
1116
1116
1117 def modified(self):
1117 def modified(self):
1118 return self._status.modified
1118 return self._status.modified
1119 def added(self):
1119 def added(self):
1120 return self._status.added
1120 return self._status.added
1121 def removed(self):
1121 def removed(self):
1122 return self._status.removed
1122 return self._status.removed
1123 def deleted(self):
1123 def deleted(self):
1124 return self._status.deleted
1124 return self._status.deleted
1125 def branch(self):
1125 def branch(self):
1126 return encoding.tolocal(self._extra['branch'])
1126 return encoding.tolocal(self._extra['branch'])
1127 def closesbranch(self):
1127 def closesbranch(self):
1128 return 'close' in self._extra
1128 return 'close' in self._extra
1129 def extra(self):
1129 def extra(self):
1130 return self._extra
1130 return self._extra
1131
1131
1132 def isinmemory(self):
1132 def isinmemory(self):
1133 return False
1133 return False
1134
1134
1135 def tags(self):
1135 def tags(self):
1136 return []
1136 return []
1137
1137
1138 def bookmarks(self):
1138 def bookmarks(self):
1139 b = []
1139 b = []
1140 for p in self.parents():
1140 for p in self.parents():
1141 b.extend(p.bookmarks())
1141 b.extend(p.bookmarks())
1142 return b
1142 return b
1143
1143
1144 def phase(self):
1144 def phase(self):
1145 phase = phases.draft # default phase to draft
1145 phase = phases.draft # default phase to draft
1146 for p in self.parents():
1146 for p in self.parents():
1147 phase = max(phase, p.phase())
1147 phase = max(phase, p.phase())
1148 return phase
1148 return phase
1149
1149
1150 def hidden(self):
1150 def hidden(self):
1151 return False
1151 return False
1152
1152
1153 def children(self):
1153 def children(self):
1154 return []
1154 return []
1155
1155
1156 def flags(self, path):
1156 def flags(self, path):
1157 if r'_manifest' in self.__dict__:
1157 if r'_manifest' in self.__dict__:
1158 try:
1158 try:
1159 return self._manifest.flags(path)
1159 return self._manifest.flags(path)
1160 except KeyError:
1160 except KeyError:
1161 return ''
1161 return ''
1162
1162
1163 try:
1163 try:
1164 return self._flagfunc(path)
1164 return self._flagfunc(path)
1165 except OSError:
1165 except OSError:
1166 return ''
1166 return ''
1167
1167
1168 def ancestor(self, c2):
1168 def ancestor(self, c2):
1169 """return the "best" ancestor context of self and c2"""
1169 """return the "best" ancestor context of self and c2"""
1170 return self._parents[0].ancestor(c2) # punt on two parents for now
1170 return self._parents[0].ancestor(c2) # punt on two parents for now
1171
1171
1172 def walk(self, match):
1172 def walk(self, match):
1173 '''Generates matching file names.'''
1173 '''Generates matching file names.'''
1174 return sorted(self._repo.dirstate.walk(match,
1174 return sorted(self._repo.dirstate.walk(match,
1175 subrepos=sorted(self.substate),
1175 subrepos=sorted(self.substate),
1176 unknown=True, ignored=False))
1176 unknown=True, ignored=False))
1177
1177
1178 def matches(self, match):
1178 def matches(self, match):
1179 ds = self._repo.dirstate
1179 ds = self._repo.dirstate
1180 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1180 return sorted(f for f in ds.matches(match) if ds[f] != 'r')
1181
1181
1182 def ancestors(self):
1182 def ancestors(self):
1183 for p in self._parents:
1183 for p in self._parents:
1184 yield p
1184 yield p
1185 for a in self._repo.changelog.ancestors(
1185 for a in self._repo.changelog.ancestors(
1186 [p.rev() for p in self._parents]):
1186 [p.rev() for p in self._parents]):
1187 yield self._repo[a]
1187 yield self._repo[a]
1188
1188
1189 def markcommitted(self, node):
1189 def markcommitted(self, node):
1190 """Perform post-commit cleanup necessary after committing this ctx
1190 """Perform post-commit cleanup necessary after committing this ctx
1191
1191
1192 Specifically, this updates backing stores this working context
1192 Specifically, this updates backing stores this working context
1193 wraps to reflect the fact that the changes reflected by this
1193 wraps to reflect the fact that the changes reflected by this
1194 workingctx have been committed. For example, it marks
1194 workingctx have been committed. For example, it marks
1195 modified and added files as normal in the dirstate.
1195 modified and added files as normal in the dirstate.
1196
1196
1197 """
1197 """
1198
1198
1199 with self._repo.dirstate.parentchange():
1199 with self._repo.dirstate.parentchange():
1200 for f in self.modified() + self.added():
1200 for f in self.modified() + self.added():
1201 self._repo.dirstate.normal(f)
1201 self._repo.dirstate.normal(f)
1202 for f in self.removed():
1202 for f in self.removed():
1203 self._repo.dirstate.drop(f)
1203 self._repo.dirstate.drop(f)
1204 self._repo.dirstate.setparents(node)
1204 self._repo.dirstate.setparents(node)
1205
1205
1206 # write changes out explicitly, because nesting wlock at
1206 # write changes out explicitly, because nesting wlock at
1207 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1207 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1208 # from immediately doing so for subsequent changing files
1208 # from immediately doing so for subsequent changing files
1209 self._repo.dirstate.write(self._repo.currenttransaction())
1209 self._repo.dirstate.write(self._repo.currenttransaction())
1210
1210
1211 def dirty(self, missing=False, merge=True, branch=True):
1211 def dirty(self, missing=False, merge=True, branch=True):
1212 return False
1212 return False
1213
1213
1214 class workingctx(committablectx):
1214 class workingctx(committablectx):
1215 """A workingctx object makes access to data related to
1215 """A workingctx object makes access to data related to
1216 the current working directory convenient.
1216 the current working directory convenient.
1217 date - any valid date string or (unixtime, offset), or None.
1217 date - any valid date string or (unixtime, offset), or None.
1218 user - username string, or None.
1218 user - username string, or None.
1219 extra - a dictionary of extra values, or None.
1219 extra - a dictionary of extra values, or None.
1220 changes - a list of file lists as returned by localrepo.status()
1220 changes - a list of file lists as returned by localrepo.status()
1221 or None to use the repository status.
1221 or None to use the repository status.
1222 """
1222 """
1223 def __init__(self, repo, text="", user=None, date=None, extra=None,
1223 def __init__(self, repo, text="", user=None, date=None, extra=None,
1224 changes=None):
1224 changes=None):
1225 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1225 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1226
1226
1227 def __iter__(self):
1227 def __iter__(self):
1228 d = self._repo.dirstate
1228 d = self._repo.dirstate
1229 for f in d:
1229 for f in d:
1230 if d[f] != 'r':
1230 if d[f] != 'r':
1231 yield f
1231 yield f
1232
1232
1233 def __contains__(self, key):
1233 def __contains__(self, key):
1234 return self._repo.dirstate[key] not in "?r"
1234 return self._repo.dirstate[key] not in "?r"
1235
1235
1236 def hex(self):
1236 def hex(self):
1237 return hex(wdirid)
1237 return hex(wdirid)
1238
1238
1239 @propertycache
1239 @propertycache
1240 def _parents(self):
1240 def _parents(self):
1241 p = self._repo.dirstate.parents()
1241 p = self._repo.dirstate.parents()
1242 if p[1] == nullid:
1242 if p[1] == nullid:
1243 p = p[:-1]
1243 p = p[:-1]
1244 return [self._repo[x] for x in p]
1244 # use unfiltered repo to delay/avoid loading obsmarkers
1245 unfi = self._repo.unfiltered()
1246 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1245
1247
1246 def _fileinfo(self, path):
1248 def _fileinfo(self, path):
1247 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1249 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1248 self._manifest
1250 self._manifest
1249 return super(workingctx, self)._fileinfo(path)
1251 return super(workingctx, self)._fileinfo(path)
1250
1252
1251 def filectx(self, path, filelog=None):
1253 def filectx(self, path, filelog=None):
1252 """get a file context from the working directory"""
1254 """get a file context from the working directory"""
1253 return workingfilectx(self._repo, path, workingctx=self,
1255 return workingfilectx(self._repo, path, workingctx=self,
1254 filelog=filelog)
1256 filelog=filelog)
1255
1257
1256 def dirty(self, missing=False, merge=True, branch=True):
1258 def dirty(self, missing=False, merge=True, branch=True):
1257 "check whether a working directory is modified"
1259 "check whether a working directory is modified"
1258 # check subrepos first
1260 # check subrepos first
1259 for s in sorted(self.substate):
1261 for s in sorted(self.substate):
1260 if self.sub(s).dirty(missing=missing):
1262 if self.sub(s).dirty(missing=missing):
1261 return True
1263 return True
1262 # check current working dir
1264 # check current working dir
1263 return ((merge and self.p2()) or
1265 return ((merge and self.p2()) or
1264 (branch and self.branch() != self.p1().branch()) or
1266 (branch and self.branch() != self.p1().branch()) or
1265 self.modified() or self.added() or self.removed() or
1267 self.modified() or self.added() or self.removed() or
1266 (missing and self.deleted()))
1268 (missing and self.deleted()))
1267
1269
1268 def add(self, list, prefix=""):
1270 def add(self, list, prefix=""):
1269 with self._repo.wlock():
1271 with self._repo.wlock():
1270 ui, ds = self._repo.ui, self._repo.dirstate
1272 ui, ds = self._repo.ui, self._repo.dirstate
1271 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1273 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1272 rejected = []
1274 rejected = []
1273 lstat = self._repo.wvfs.lstat
1275 lstat = self._repo.wvfs.lstat
1274 for f in list:
1276 for f in list:
1275 # ds.pathto() returns an absolute file when this is invoked from
1277 # ds.pathto() returns an absolute file when this is invoked from
1276 # the keyword extension. That gets flagged as non-portable on
1278 # the keyword extension. That gets flagged as non-portable on
1277 # Windows, since it contains the drive letter and colon.
1279 # Windows, since it contains the drive letter and colon.
1278 scmutil.checkportable(ui, os.path.join(prefix, f))
1280 scmutil.checkportable(ui, os.path.join(prefix, f))
1279 try:
1281 try:
1280 st = lstat(f)
1282 st = lstat(f)
1281 except OSError:
1283 except OSError:
1282 ui.warn(_("%s does not exist!\n") % uipath(f))
1284 ui.warn(_("%s does not exist!\n") % uipath(f))
1283 rejected.append(f)
1285 rejected.append(f)
1284 continue
1286 continue
1285 limit = ui.configbytes('ui', 'large-file-limit')
1287 limit = ui.configbytes('ui', 'large-file-limit')
1286 if limit != 0 and st.st_size > limit:
1288 if limit != 0 and st.st_size > limit:
1287 ui.warn(_("%s: up to %d MB of RAM may be required "
1289 ui.warn(_("%s: up to %d MB of RAM may be required "
1288 "to manage this file\n"
1290 "to manage this file\n"
1289 "(use 'hg revert %s' to cancel the "
1291 "(use 'hg revert %s' to cancel the "
1290 "pending addition)\n")
1292 "pending addition)\n")
1291 % (f, 3 * st.st_size // 1000000, uipath(f)))
1293 % (f, 3 * st.st_size // 1000000, uipath(f)))
1292 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1294 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1293 ui.warn(_("%s not added: only files and symlinks "
1295 ui.warn(_("%s not added: only files and symlinks "
1294 "supported currently\n") % uipath(f))
1296 "supported currently\n") % uipath(f))
1295 rejected.append(f)
1297 rejected.append(f)
1296 elif ds[f] in 'amn':
1298 elif ds[f] in 'amn':
1297 ui.warn(_("%s already tracked!\n") % uipath(f))
1299 ui.warn(_("%s already tracked!\n") % uipath(f))
1298 elif ds[f] == 'r':
1300 elif ds[f] == 'r':
1299 ds.normallookup(f)
1301 ds.normallookup(f)
1300 else:
1302 else:
1301 ds.add(f)
1303 ds.add(f)
1302 return rejected
1304 return rejected
1303
1305
1304 def forget(self, files, prefix=""):
1306 def forget(self, files, prefix=""):
1305 with self._repo.wlock():
1307 with self._repo.wlock():
1306 ds = self._repo.dirstate
1308 ds = self._repo.dirstate
1307 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1309 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1308 rejected = []
1310 rejected = []
1309 for f in files:
1311 for f in files:
1310 if f not in self._repo.dirstate:
1312 if f not in self._repo.dirstate:
1311 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1313 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1312 rejected.append(f)
1314 rejected.append(f)
1313 elif self._repo.dirstate[f] != 'a':
1315 elif self._repo.dirstate[f] != 'a':
1314 self._repo.dirstate.remove(f)
1316 self._repo.dirstate.remove(f)
1315 else:
1317 else:
1316 self._repo.dirstate.drop(f)
1318 self._repo.dirstate.drop(f)
1317 return rejected
1319 return rejected
1318
1320
1319 def undelete(self, list):
1321 def undelete(self, list):
1320 pctxs = self.parents()
1322 pctxs = self.parents()
1321 with self._repo.wlock():
1323 with self._repo.wlock():
1322 ds = self._repo.dirstate
1324 ds = self._repo.dirstate
1323 for f in list:
1325 for f in list:
1324 if self._repo.dirstate[f] != 'r':
1326 if self._repo.dirstate[f] != 'r':
1325 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1327 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1326 else:
1328 else:
1327 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1329 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1328 t = fctx.data()
1330 t = fctx.data()
1329 self._repo.wwrite(f, t, fctx.flags())
1331 self._repo.wwrite(f, t, fctx.flags())
1330 self._repo.dirstate.normal(f)
1332 self._repo.dirstate.normal(f)
1331
1333
1332 def copy(self, source, dest):
1334 def copy(self, source, dest):
1333 try:
1335 try:
1334 st = self._repo.wvfs.lstat(dest)
1336 st = self._repo.wvfs.lstat(dest)
1335 except OSError as err:
1337 except OSError as err:
1336 if err.errno != errno.ENOENT:
1338 if err.errno != errno.ENOENT:
1337 raise
1339 raise
1338 self._repo.ui.warn(_("%s does not exist!\n")
1340 self._repo.ui.warn(_("%s does not exist!\n")
1339 % self._repo.dirstate.pathto(dest))
1341 % self._repo.dirstate.pathto(dest))
1340 return
1342 return
1341 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1343 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1342 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1344 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1343 "symbolic link\n")
1345 "symbolic link\n")
1344 % self._repo.dirstate.pathto(dest))
1346 % self._repo.dirstate.pathto(dest))
1345 else:
1347 else:
1346 with self._repo.wlock():
1348 with self._repo.wlock():
1347 if self._repo.dirstate[dest] in '?':
1349 if self._repo.dirstate[dest] in '?':
1348 self._repo.dirstate.add(dest)
1350 self._repo.dirstate.add(dest)
1349 elif self._repo.dirstate[dest] in 'r':
1351 elif self._repo.dirstate[dest] in 'r':
1350 self._repo.dirstate.normallookup(dest)
1352 self._repo.dirstate.normallookup(dest)
1351 self._repo.dirstate.copy(source, dest)
1353 self._repo.dirstate.copy(source, dest)
1352
1354
1353 def match(self, pats=None, include=None, exclude=None, default='glob',
1355 def match(self, pats=None, include=None, exclude=None, default='glob',
1354 listsubrepos=False, badfn=None):
1356 listsubrepos=False, badfn=None):
1355 r = self._repo
1357 r = self._repo
1356
1358
1357 # Only a case insensitive filesystem needs magic to translate user input
1359 # Only a case insensitive filesystem needs magic to translate user input
1358 # to actual case in the filesystem.
1360 # to actual case in the filesystem.
1359 icasefs = not util.fscasesensitive(r.root)
1361 icasefs = not util.fscasesensitive(r.root)
1360 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1362 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1361 default, auditor=r.auditor, ctx=self,
1363 default, auditor=r.auditor, ctx=self,
1362 listsubrepos=listsubrepos, badfn=badfn,
1364 listsubrepos=listsubrepos, badfn=badfn,
1363 icasefs=icasefs)
1365 icasefs=icasefs)
1364
1366
1365 def _filtersuspectsymlink(self, files):
1367 def _filtersuspectsymlink(self, files):
1366 if not files or self._repo.dirstate._checklink:
1368 if not files or self._repo.dirstate._checklink:
1367 return files
1369 return files
1368
1370
1369 # Symlink placeholders may get non-symlink-like contents
1371 # Symlink placeholders may get non-symlink-like contents
1370 # via user error or dereferencing by NFS or Samba servers,
1372 # via user error or dereferencing by NFS or Samba servers,
1371 # so we filter out any placeholders that don't look like a
1373 # so we filter out any placeholders that don't look like a
1372 # symlink
1374 # symlink
1373 sane = []
1375 sane = []
1374 for f in files:
1376 for f in files:
1375 if self.flags(f) == 'l':
1377 if self.flags(f) == 'l':
1376 d = self[f].data()
1378 d = self[f].data()
1377 if (d == '' or len(d) >= 1024 or '\n' in d
1379 if (d == '' or len(d) >= 1024 or '\n' in d
1378 or stringutil.binary(d)):
1380 or stringutil.binary(d)):
1379 self._repo.ui.debug('ignoring suspect symlink placeholder'
1381 self._repo.ui.debug('ignoring suspect symlink placeholder'
1380 ' "%s"\n' % f)
1382 ' "%s"\n' % f)
1381 continue
1383 continue
1382 sane.append(f)
1384 sane.append(f)
1383 return sane
1385 return sane
1384
1386
1385 def _checklookup(self, files):
1387 def _checklookup(self, files):
1386 # check for any possibly clean files
1388 # check for any possibly clean files
1387 if not files:
1389 if not files:
1388 return [], [], []
1390 return [], [], []
1389
1391
1390 modified = []
1392 modified = []
1391 deleted = []
1393 deleted = []
1392 fixup = []
1394 fixup = []
1393 pctx = self._parents[0]
1395 pctx = self._parents[0]
1394 # do a full compare of any files that might have changed
1396 # do a full compare of any files that might have changed
1395 for f in sorted(files):
1397 for f in sorted(files):
1396 try:
1398 try:
1397 # This will return True for a file that got replaced by a
1399 # This will return True for a file that got replaced by a
1398 # directory in the interim, but fixing that is pretty hard.
1400 # directory in the interim, but fixing that is pretty hard.
1399 if (f not in pctx or self.flags(f) != pctx.flags(f)
1401 if (f not in pctx or self.flags(f) != pctx.flags(f)
1400 or pctx[f].cmp(self[f])):
1402 or pctx[f].cmp(self[f])):
1401 modified.append(f)
1403 modified.append(f)
1402 else:
1404 else:
1403 fixup.append(f)
1405 fixup.append(f)
1404 except (IOError, OSError):
1406 except (IOError, OSError):
1405 # A file become inaccessible in between? Mark it as deleted,
1407 # A file become inaccessible in between? Mark it as deleted,
1406 # matching dirstate behavior (issue5584).
1408 # matching dirstate behavior (issue5584).
1407 # The dirstate has more complex behavior around whether a
1409 # The dirstate has more complex behavior around whether a
1408 # missing file matches a directory, etc, but we don't need to
1410 # missing file matches a directory, etc, but we don't need to
1409 # bother with that: if f has made it to this point, we're sure
1411 # bother with that: if f has made it to this point, we're sure
1410 # it's in the dirstate.
1412 # it's in the dirstate.
1411 deleted.append(f)
1413 deleted.append(f)
1412
1414
1413 return modified, deleted, fixup
1415 return modified, deleted, fixup
1414
1416
1415 def _poststatusfixup(self, status, fixup):
1417 def _poststatusfixup(self, status, fixup):
1416 """update dirstate for files that are actually clean"""
1418 """update dirstate for files that are actually clean"""
1417 poststatus = self._repo.postdsstatus()
1419 poststatus = self._repo.postdsstatus()
1418 if fixup or poststatus:
1420 if fixup or poststatus:
1419 try:
1421 try:
1420 oldid = self._repo.dirstate.identity()
1422 oldid = self._repo.dirstate.identity()
1421
1423
1422 # updating the dirstate is optional
1424 # updating the dirstate is optional
1423 # so we don't wait on the lock
1425 # so we don't wait on the lock
1424 # wlock can invalidate the dirstate, so cache normal _after_
1426 # wlock can invalidate the dirstate, so cache normal _after_
1425 # taking the lock
1427 # taking the lock
1426 with self._repo.wlock(False):
1428 with self._repo.wlock(False):
1427 if self._repo.dirstate.identity() == oldid:
1429 if self._repo.dirstate.identity() == oldid:
1428 if fixup:
1430 if fixup:
1429 normal = self._repo.dirstate.normal
1431 normal = self._repo.dirstate.normal
1430 for f in fixup:
1432 for f in fixup:
1431 normal(f)
1433 normal(f)
1432 # write changes out explicitly, because nesting
1434 # write changes out explicitly, because nesting
1433 # wlock at runtime may prevent 'wlock.release()'
1435 # wlock at runtime may prevent 'wlock.release()'
1434 # after this block from doing so for subsequent
1436 # after this block from doing so for subsequent
1435 # changing files
1437 # changing files
1436 tr = self._repo.currenttransaction()
1438 tr = self._repo.currenttransaction()
1437 self._repo.dirstate.write(tr)
1439 self._repo.dirstate.write(tr)
1438
1440
1439 if poststatus:
1441 if poststatus:
1440 for ps in poststatus:
1442 for ps in poststatus:
1441 ps(self, status)
1443 ps(self, status)
1442 else:
1444 else:
1443 # in this case, writing changes out breaks
1445 # in this case, writing changes out breaks
1444 # consistency, because .hg/dirstate was
1446 # consistency, because .hg/dirstate was
1445 # already changed simultaneously after last
1447 # already changed simultaneously after last
1446 # caching (see also issue5584 for detail)
1448 # caching (see also issue5584 for detail)
1447 self._repo.ui.debug('skip updating dirstate: '
1449 self._repo.ui.debug('skip updating dirstate: '
1448 'identity mismatch\n')
1450 'identity mismatch\n')
1449 except error.LockError:
1451 except error.LockError:
1450 pass
1452 pass
1451 finally:
1453 finally:
1452 # Even if the wlock couldn't be grabbed, clear out the list.
1454 # Even if the wlock couldn't be grabbed, clear out the list.
1453 self._repo.clearpostdsstatus()
1455 self._repo.clearpostdsstatus()
1454
1456
1455 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1457 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1456 '''Gets the status from the dirstate -- internal use only.'''
1458 '''Gets the status from the dirstate -- internal use only.'''
1457 subrepos = []
1459 subrepos = []
1458 if '.hgsub' in self:
1460 if '.hgsub' in self:
1459 subrepos = sorted(self.substate)
1461 subrepos = sorted(self.substate)
1460 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1462 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1461 clean=clean, unknown=unknown)
1463 clean=clean, unknown=unknown)
1462
1464
1463 # check for any possibly clean files
1465 # check for any possibly clean files
1464 fixup = []
1466 fixup = []
1465 if cmp:
1467 if cmp:
1466 modified2, deleted2, fixup = self._checklookup(cmp)
1468 modified2, deleted2, fixup = self._checklookup(cmp)
1467 s.modified.extend(modified2)
1469 s.modified.extend(modified2)
1468 s.deleted.extend(deleted2)
1470 s.deleted.extend(deleted2)
1469
1471
1470 if fixup and clean:
1472 if fixup and clean:
1471 s.clean.extend(fixup)
1473 s.clean.extend(fixup)
1472
1474
1473 self._poststatusfixup(s, fixup)
1475 self._poststatusfixup(s, fixup)
1474
1476
1475 if match.always():
1477 if match.always():
1476 # cache for performance
1478 # cache for performance
1477 if s.unknown or s.ignored or s.clean:
1479 if s.unknown or s.ignored or s.clean:
1478 # "_status" is cached with list*=False in the normal route
1480 # "_status" is cached with list*=False in the normal route
1479 self._status = scmutil.status(s.modified, s.added, s.removed,
1481 self._status = scmutil.status(s.modified, s.added, s.removed,
1480 s.deleted, [], [], [])
1482 s.deleted, [], [], [])
1481 else:
1483 else:
1482 self._status = s
1484 self._status = s
1483
1485
1484 return s
1486 return s
1485
1487
1486 @propertycache
1488 @propertycache
1487 def _manifest(self):
1489 def _manifest(self):
1488 """generate a manifest corresponding to the values in self._status
1490 """generate a manifest corresponding to the values in self._status
1489
1491
1490 This reuse the file nodeid from parent, but we use special node
1492 This reuse the file nodeid from parent, but we use special node
1491 identifiers for added and modified files. This is used by manifests
1493 identifiers for added and modified files. This is used by manifests
1492 merge to see that files are different and by update logic to avoid
1494 merge to see that files are different and by update logic to avoid
1493 deleting newly added files.
1495 deleting newly added files.
1494 """
1496 """
1495 return self._buildstatusmanifest(self._status)
1497 return self._buildstatusmanifest(self._status)
1496
1498
1497 def _buildstatusmanifest(self, status):
1499 def _buildstatusmanifest(self, status):
1498 """Builds a manifest that includes the given status results."""
1500 """Builds a manifest that includes the given status results."""
1499 parents = self.parents()
1501 parents = self.parents()
1500
1502
1501 man = parents[0].manifest().copy()
1503 man = parents[0].manifest().copy()
1502
1504
1503 ff = self._flagfunc
1505 ff = self._flagfunc
1504 for i, l in ((addednodeid, status.added),
1506 for i, l in ((addednodeid, status.added),
1505 (modifiednodeid, status.modified)):
1507 (modifiednodeid, status.modified)):
1506 for f in l:
1508 for f in l:
1507 man[f] = i
1509 man[f] = i
1508 try:
1510 try:
1509 man.setflag(f, ff(f))
1511 man.setflag(f, ff(f))
1510 except OSError:
1512 except OSError:
1511 pass
1513 pass
1512
1514
1513 for f in status.deleted + status.removed:
1515 for f in status.deleted + status.removed:
1514 if f in man:
1516 if f in man:
1515 del man[f]
1517 del man[f]
1516
1518
1517 return man
1519 return man
1518
1520
1519 def _buildstatus(self, other, s, match, listignored, listclean,
1521 def _buildstatus(self, other, s, match, listignored, listclean,
1520 listunknown):
1522 listunknown):
1521 """build a status with respect to another context
1523 """build a status with respect to another context
1522
1524
1523 This includes logic for maintaining the fast path of status when
1525 This includes logic for maintaining the fast path of status when
1524 comparing the working directory against its parent, which is to skip
1526 comparing the working directory against its parent, which is to skip
1525 building a new manifest if self (working directory) is not comparing
1527 building a new manifest if self (working directory) is not comparing
1526 against its parent (repo['.']).
1528 against its parent (repo['.']).
1527 """
1529 """
1528 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1530 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1529 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1531 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1530 # might have accidentally ended up with the entire contents of the file
1532 # might have accidentally ended up with the entire contents of the file
1531 # they are supposed to be linking to.
1533 # they are supposed to be linking to.
1532 s.modified[:] = self._filtersuspectsymlink(s.modified)
1534 s.modified[:] = self._filtersuspectsymlink(s.modified)
1533 if other != self._repo['.']:
1535 if other != self._repo['.']:
1534 s = super(workingctx, self)._buildstatus(other, s, match,
1536 s = super(workingctx, self)._buildstatus(other, s, match,
1535 listignored, listclean,
1537 listignored, listclean,
1536 listunknown)
1538 listunknown)
1537 return s
1539 return s
1538
1540
1539 def _matchstatus(self, other, match):
1541 def _matchstatus(self, other, match):
1540 """override the match method with a filter for directory patterns
1542 """override the match method with a filter for directory patterns
1541
1543
1542 We use inheritance to customize the match.bad method only in cases of
1544 We use inheritance to customize the match.bad method only in cases of
1543 workingctx since it belongs only to the working directory when
1545 workingctx since it belongs only to the working directory when
1544 comparing against the parent changeset.
1546 comparing against the parent changeset.
1545
1547
1546 If we aren't comparing against the working directory's parent, then we
1548 If we aren't comparing against the working directory's parent, then we
1547 just use the default match object sent to us.
1549 just use the default match object sent to us.
1548 """
1550 """
1549 if other != self._repo['.']:
1551 if other != self._repo['.']:
1550 def bad(f, msg):
1552 def bad(f, msg):
1551 # 'f' may be a directory pattern from 'match.files()',
1553 # 'f' may be a directory pattern from 'match.files()',
1552 # so 'f not in ctx1' is not enough
1554 # so 'f not in ctx1' is not enough
1553 if f not in other and not other.hasdir(f):
1555 if f not in other and not other.hasdir(f):
1554 self._repo.ui.warn('%s: %s\n' %
1556 self._repo.ui.warn('%s: %s\n' %
1555 (self._repo.dirstate.pathto(f), msg))
1557 (self._repo.dirstate.pathto(f), msg))
1556 match.bad = bad
1558 match.bad = bad
1557 return match
1559 return match
1558
1560
1559 def markcommitted(self, node):
1561 def markcommitted(self, node):
1560 super(workingctx, self).markcommitted(node)
1562 super(workingctx, self).markcommitted(node)
1561
1563
1562 sparse.aftercommit(self._repo, node)
1564 sparse.aftercommit(self._repo, node)
1563
1565
1564 class committablefilectx(basefilectx):
1566 class committablefilectx(basefilectx):
1565 """A committablefilectx provides common functionality for a file context
1567 """A committablefilectx provides common functionality for a file context
1566 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1568 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1567 def __init__(self, repo, path, filelog=None, ctx=None):
1569 def __init__(self, repo, path, filelog=None, ctx=None):
1568 self._repo = repo
1570 self._repo = repo
1569 self._path = path
1571 self._path = path
1570 self._changeid = None
1572 self._changeid = None
1571 self._filerev = self._filenode = None
1573 self._filerev = self._filenode = None
1572
1574
1573 if filelog is not None:
1575 if filelog is not None:
1574 self._filelog = filelog
1576 self._filelog = filelog
1575 if ctx:
1577 if ctx:
1576 self._changectx = ctx
1578 self._changectx = ctx
1577
1579
1578 def __nonzero__(self):
1580 def __nonzero__(self):
1579 return True
1581 return True
1580
1582
1581 __bool__ = __nonzero__
1583 __bool__ = __nonzero__
1582
1584
1583 def linkrev(self):
1585 def linkrev(self):
1584 # linked to self._changectx no matter if file is modified or not
1586 # linked to self._changectx no matter if file is modified or not
1585 return self.rev()
1587 return self.rev()
1586
1588
1587 def parents(self):
1589 def parents(self):
1588 '''return parent filectxs, following copies if necessary'''
1590 '''return parent filectxs, following copies if necessary'''
1589 def filenode(ctx, path):
1591 def filenode(ctx, path):
1590 return ctx._manifest.get(path, nullid)
1592 return ctx._manifest.get(path, nullid)
1591
1593
1592 path = self._path
1594 path = self._path
1593 fl = self._filelog
1595 fl = self._filelog
1594 pcl = self._changectx._parents
1596 pcl = self._changectx._parents
1595 renamed = self.renamed()
1597 renamed = self.renamed()
1596
1598
1597 if renamed:
1599 if renamed:
1598 pl = [renamed + (None,)]
1600 pl = [renamed + (None,)]
1599 else:
1601 else:
1600 pl = [(path, filenode(pcl[0], path), fl)]
1602 pl = [(path, filenode(pcl[0], path), fl)]
1601
1603
1602 for pc in pcl[1:]:
1604 for pc in pcl[1:]:
1603 pl.append((path, filenode(pc, path), fl))
1605 pl.append((path, filenode(pc, path), fl))
1604
1606
1605 return [self._parentfilectx(p, fileid=n, filelog=l)
1607 return [self._parentfilectx(p, fileid=n, filelog=l)
1606 for p, n, l in pl if n != nullid]
1608 for p, n, l in pl if n != nullid]
1607
1609
1608 def children(self):
1610 def children(self):
1609 return []
1611 return []
1610
1612
1611 class workingfilectx(committablefilectx):
1613 class workingfilectx(committablefilectx):
1612 """A workingfilectx object makes access to data related to a particular
1614 """A workingfilectx object makes access to data related to a particular
1613 file in the working directory convenient."""
1615 file in the working directory convenient."""
1614 def __init__(self, repo, path, filelog=None, workingctx=None):
1616 def __init__(self, repo, path, filelog=None, workingctx=None):
1615 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1617 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1616
1618
1617 @propertycache
1619 @propertycache
1618 def _changectx(self):
1620 def _changectx(self):
1619 return workingctx(self._repo)
1621 return workingctx(self._repo)
1620
1622
1621 def data(self):
1623 def data(self):
1622 return self._repo.wread(self._path)
1624 return self._repo.wread(self._path)
1623 def renamed(self):
1625 def renamed(self):
1624 rp = self._repo.dirstate.copied(self._path)
1626 rp = self._repo.dirstate.copied(self._path)
1625 if not rp:
1627 if not rp:
1626 return None
1628 return None
1627 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1629 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1628
1630
1629 def size(self):
1631 def size(self):
1630 return self._repo.wvfs.lstat(self._path).st_size
1632 return self._repo.wvfs.lstat(self._path).st_size
1631 def date(self):
1633 def date(self):
1632 t, tz = self._changectx.date()
1634 t, tz = self._changectx.date()
1633 try:
1635 try:
1634 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1636 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
1635 except OSError as err:
1637 except OSError as err:
1636 if err.errno != errno.ENOENT:
1638 if err.errno != errno.ENOENT:
1637 raise
1639 raise
1638 return (t, tz)
1640 return (t, tz)
1639
1641
1640 def exists(self):
1642 def exists(self):
1641 return self._repo.wvfs.exists(self._path)
1643 return self._repo.wvfs.exists(self._path)
1642
1644
1643 def lexists(self):
1645 def lexists(self):
1644 return self._repo.wvfs.lexists(self._path)
1646 return self._repo.wvfs.lexists(self._path)
1645
1647
1646 def audit(self):
1648 def audit(self):
1647 return self._repo.wvfs.audit(self._path)
1649 return self._repo.wvfs.audit(self._path)
1648
1650
1649 def cmp(self, fctx):
1651 def cmp(self, fctx):
1650 """compare with other file context
1652 """compare with other file context
1651
1653
1652 returns True if different than fctx.
1654 returns True if different than fctx.
1653 """
1655 """
1654 # fctx should be a filectx (not a workingfilectx)
1656 # fctx should be a filectx (not a workingfilectx)
1655 # invert comparison to reuse the same code path
1657 # invert comparison to reuse the same code path
1656 return fctx.cmp(self)
1658 return fctx.cmp(self)
1657
1659
1658 def remove(self, ignoremissing=False):
1660 def remove(self, ignoremissing=False):
1659 """wraps unlink for a repo's working directory"""
1661 """wraps unlink for a repo's working directory"""
1660 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1662 rmdir = self._repo.ui.configbool('experimental', 'removeemptydirs')
1661 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1663 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing,
1662 rmdir=rmdir)
1664 rmdir=rmdir)
1663
1665
1664 def write(self, data, flags, backgroundclose=False, **kwargs):
1666 def write(self, data, flags, backgroundclose=False, **kwargs):
1665 """wraps repo.wwrite"""
1667 """wraps repo.wwrite"""
1666 self._repo.wwrite(self._path, data, flags,
1668 self._repo.wwrite(self._path, data, flags,
1667 backgroundclose=backgroundclose,
1669 backgroundclose=backgroundclose,
1668 **kwargs)
1670 **kwargs)
1669
1671
1670 def markcopied(self, src):
1672 def markcopied(self, src):
1671 """marks this file a copy of `src`"""
1673 """marks this file a copy of `src`"""
1672 if self._repo.dirstate[self._path] in "nma":
1674 if self._repo.dirstate[self._path] in "nma":
1673 self._repo.dirstate.copy(src, self._path)
1675 self._repo.dirstate.copy(src, self._path)
1674
1676
1675 def clearunknown(self):
1677 def clearunknown(self):
1676 """Removes conflicting items in the working directory so that
1678 """Removes conflicting items in the working directory so that
1677 ``write()`` can be called successfully.
1679 ``write()`` can be called successfully.
1678 """
1680 """
1679 wvfs = self._repo.wvfs
1681 wvfs = self._repo.wvfs
1680 f = self._path
1682 f = self._path
1681 wvfs.audit(f)
1683 wvfs.audit(f)
1682 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1684 if self._repo.ui.configbool('experimental', 'merge.checkpathconflicts'):
1683 # remove files under the directory as they should already be
1685 # remove files under the directory as they should already be
1684 # warned and backed up
1686 # warned and backed up
1685 if wvfs.isdir(f) and not wvfs.islink(f):
1687 if wvfs.isdir(f) and not wvfs.islink(f):
1686 wvfs.rmtree(f, forcibly=True)
1688 wvfs.rmtree(f, forcibly=True)
1687 for p in reversed(list(util.finddirs(f))):
1689 for p in reversed(list(util.finddirs(f))):
1688 if wvfs.isfileorlink(p):
1690 if wvfs.isfileorlink(p):
1689 wvfs.unlink(p)
1691 wvfs.unlink(p)
1690 break
1692 break
1691 else:
1693 else:
1692 # don't remove files if path conflicts are not processed
1694 # don't remove files if path conflicts are not processed
1693 if wvfs.isdir(f) and not wvfs.islink(f):
1695 if wvfs.isdir(f) and not wvfs.islink(f):
1694 wvfs.removedirs(f)
1696 wvfs.removedirs(f)
1695
1697
1696 def setflags(self, l, x):
1698 def setflags(self, l, x):
1697 self._repo.wvfs.setflags(self._path, l, x)
1699 self._repo.wvfs.setflags(self._path, l, x)
1698
1700
1699 class overlayworkingctx(committablectx):
1701 class overlayworkingctx(committablectx):
1700 """Wraps another mutable context with a write-back cache that can be
1702 """Wraps another mutable context with a write-back cache that can be
1701 converted into a commit context.
1703 converted into a commit context.
1702
1704
1703 self._cache[path] maps to a dict with keys: {
1705 self._cache[path] maps to a dict with keys: {
1704 'exists': bool?
1706 'exists': bool?
1705 'date': date?
1707 'date': date?
1706 'data': str?
1708 'data': str?
1707 'flags': str?
1709 'flags': str?
1708 'copied': str? (path or None)
1710 'copied': str? (path or None)
1709 }
1711 }
1710 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1712 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1711 is `False`, the file was deleted.
1713 is `False`, the file was deleted.
1712 """
1714 """
1713
1715
1714 def __init__(self, repo):
1716 def __init__(self, repo):
1715 super(overlayworkingctx, self).__init__(repo)
1717 super(overlayworkingctx, self).__init__(repo)
1716 self.clean()
1718 self.clean()
1717
1719
1718 def setbase(self, wrappedctx):
1720 def setbase(self, wrappedctx):
1719 self._wrappedctx = wrappedctx
1721 self._wrappedctx = wrappedctx
1720 self._parents = [wrappedctx]
1722 self._parents = [wrappedctx]
1721 # Drop old manifest cache as it is now out of date.
1723 # Drop old manifest cache as it is now out of date.
1722 # This is necessary when, e.g., rebasing several nodes with one
1724 # This is necessary when, e.g., rebasing several nodes with one
1723 # ``overlayworkingctx`` (e.g. with --collapse).
1725 # ``overlayworkingctx`` (e.g. with --collapse).
1724 util.clearcachedproperty(self, '_manifest')
1726 util.clearcachedproperty(self, '_manifest')
1725
1727
1726 def data(self, path):
1728 def data(self, path):
1727 if self.isdirty(path):
1729 if self.isdirty(path):
1728 if self._cache[path]['exists']:
1730 if self._cache[path]['exists']:
1729 if self._cache[path]['data']:
1731 if self._cache[path]['data']:
1730 return self._cache[path]['data']
1732 return self._cache[path]['data']
1731 else:
1733 else:
1732 # Must fallback here, too, because we only set flags.
1734 # Must fallback here, too, because we only set flags.
1733 return self._wrappedctx[path].data()
1735 return self._wrappedctx[path].data()
1734 else:
1736 else:
1735 raise error.ProgrammingError("No such file or directory: %s" %
1737 raise error.ProgrammingError("No such file or directory: %s" %
1736 path)
1738 path)
1737 else:
1739 else:
1738 return self._wrappedctx[path].data()
1740 return self._wrappedctx[path].data()
1739
1741
1740 @propertycache
1742 @propertycache
1741 def _manifest(self):
1743 def _manifest(self):
1742 parents = self.parents()
1744 parents = self.parents()
1743 man = parents[0].manifest().copy()
1745 man = parents[0].manifest().copy()
1744
1746
1745 flag = self._flagfunc
1747 flag = self._flagfunc
1746 for path in self.added():
1748 for path in self.added():
1747 man[path] = addednodeid
1749 man[path] = addednodeid
1748 man.setflag(path, flag(path))
1750 man.setflag(path, flag(path))
1749 for path in self.modified():
1751 for path in self.modified():
1750 man[path] = modifiednodeid
1752 man[path] = modifiednodeid
1751 man.setflag(path, flag(path))
1753 man.setflag(path, flag(path))
1752 for path in self.removed():
1754 for path in self.removed():
1753 del man[path]
1755 del man[path]
1754 return man
1756 return man
1755
1757
1756 @propertycache
1758 @propertycache
1757 def _flagfunc(self):
1759 def _flagfunc(self):
1758 def f(path):
1760 def f(path):
1759 return self._cache[path]['flags']
1761 return self._cache[path]['flags']
1760 return f
1762 return f
1761
1763
1762 def files(self):
1764 def files(self):
1763 return sorted(self.added() + self.modified() + self.removed())
1765 return sorted(self.added() + self.modified() + self.removed())
1764
1766
1765 def modified(self):
1767 def modified(self):
1766 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1768 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1767 self._existsinparent(f)]
1769 self._existsinparent(f)]
1768
1770
1769 def added(self):
1771 def added(self):
1770 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1772 return [f for f in self._cache.keys() if self._cache[f]['exists'] and
1771 not self._existsinparent(f)]
1773 not self._existsinparent(f)]
1772
1774
1773 def removed(self):
1775 def removed(self):
1774 return [f for f in self._cache.keys() if
1776 return [f for f in self._cache.keys() if
1775 not self._cache[f]['exists'] and self._existsinparent(f)]
1777 not self._cache[f]['exists'] and self._existsinparent(f)]
1776
1778
1777 def isinmemory(self):
1779 def isinmemory(self):
1778 return True
1780 return True
1779
1781
1780 def filedate(self, path):
1782 def filedate(self, path):
1781 if self.isdirty(path):
1783 if self.isdirty(path):
1782 return self._cache[path]['date']
1784 return self._cache[path]['date']
1783 else:
1785 else:
1784 return self._wrappedctx[path].date()
1786 return self._wrappedctx[path].date()
1785
1787
1786 def markcopied(self, path, origin):
1788 def markcopied(self, path, origin):
1787 if self.isdirty(path):
1789 if self.isdirty(path):
1788 self._cache[path]['copied'] = origin
1790 self._cache[path]['copied'] = origin
1789 else:
1791 else:
1790 raise error.ProgrammingError('markcopied() called on clean context')
1792 raise error.ProgrammingError('markcopied() called on clean context')
1791
1793
1792 def copydata(self, path):
1794 def copydata(self, path):
1793 if self.isdirty(path):
1795 if self.isdirty(path):
1794 return self._cache[path]['copied']
1796 return self._cache[path]['copied']
1795 else:
1797 else:
1796 raise error.ProgrammingError('copydata() called on clean context')
1798 raise error.ProgrammingError('copydata() called on clean context')
1797
1799
1798 def flags(self, path):
1800 def flags(self, path):
1799 if self.isdirty(path):
1801 if self.isdirty(path):
1800 if self._cache[path]['exists']:
1802 if self._cache[path]['exists']:
1801 return self._cache[path]['flags']
1803 return self._cache[path]['flags']
1802 else:
1804 else:
1803 raise error.ProgrammingError("No such file or directory: %s" %
1805 raise error.ProgrammingError("No such file or directory: %s" %
1804 self._path)
1806 self._path)
1805 else:
1807 else:
1806 return self._wrappedctx[path].flags()
1808 return self._wrappedctx[path].flags()
1807
1809
1808 def _existsinparent(self, path):
1810 def _existsinparent(self, path):
1809 try:
1811 try:
1810 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1812 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
1811 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1813 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
1812 # with an ``exists()`` function.
1814 # with an ``exists()`` function.
1813 self._wrappedctx[path]
1815 self._wrappedctx[path]
1814 return True
1816 return True
1815 except error.ManifestLookupError:
1817 except error.ManifestLookupError:
1816 return False
1818 return False
1817
1819
1818 def _auditconflicts(self, path):
1820 def _auditconflicts(self, path):
1819 """Replicates conflict checks done by wvfs.write().
1821 """Replicates conflict checks done by wvfs.write().
1820
1822
1821 Since we never write to the filesystem and never call `applyupdates` in
1823 Since we never write to the filesystem and never call `applyupdates` in
1822 IMM, we'll never check that a path is actually writable -- e.g., because
1824 IMM, we'll never check that a path is actually writable -- e.g., because
1823 it adds `a/foo`, but `a` is actually a file in the other commit.
1825 it adds `a/foo`, but `a` is actually a file in the other commit.
1824 """
1826 """
1825 def fail(path, component):
1827 def fail(path, component):
1826 # p1() is the base and we're receiving "writes" for p2()'s
1828 # p1() is the base and we're receiving "writes" for p2()'s
1827 # files.
1829 # files.
1828 if 'l' in self.p1()[component].flags():
1830 if 'l' in self.p1()[component].flags():
1829 raise error.Abort("error: %s conflicts with symlink %s "
1831 raise error.Abort("error: %s conflicts with symlink %s "
1830 "in %s." % (path, component,
1832 "in %s." % (path, component,
1831 self.p1().rev()))
1833 self.p1().rev()))
1832 else:
1834 else:
1833 raise error.Abort("error: '%s' conflicts with file '%s' in "
1835 raise error.Abort("error: '%s' conflicts with file '%s' in "
1834 "%s." % (path, component,
1836 "%s." % (path, component,
1835 self.p1().rev()))
1837 self.p1().rev()))
1836
1838
1837 # Test that each new directory to be created to write this path from p2
1839 # Test that each new directory to be created to write this path from p2
1838 # is not a file in p1.
1840 # is not a file in p1.
1839 components = path.split('/')
1841 components = path.split('/')
1840 for i in pycompat.xrange(len(components)):
1842 for i in pycompat.xrange(len(components)):
1841 component = "/".join(components[0:i])
1843 component = "/".join(components[0:i])
1842 if component in self.p1() and self._cache[component]['exists']:
1844 if component in self.p1() and self._cache[component]['exists']:
1843 fail(path, component)
1845 fail(path, component)
1844
1846
1845 # Test the other direction -- that this path from p2 isn't a directory
1847 # Test the other direction -- that this path from p2 isn't a directory
1846 # in p1 (test that p1 doesn't any paths matching `path/*`).
1848 # in p1 (test that p1 doesn't any paths matching `path/*`).
1847 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1849 match = matchmod.match('/', '', [path + '/'], default=b'relpath')
1848 matches = self.p1().manifest().matches(match)
1850 matches = self.p1().manifest().matches(match)
1849 mfiles = matches.keys()
1851 mfiles = matches.keys()
1850 if len(mfiles) > 0:
1852 if len(mfiles) > 0:
1851 if len(mfiles) == 1 and mfiles[0] == path:
1853 if len(mfiles) == 1 and mfiles[0] == path:
1852 return
1854 return
1853 # omit the files which are deleted in current IMM wctx
1855 # omit the files which are deleted in current IMM wctx
1854 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1856 mfiles = [m for m in mfiles if self._cache[m]['exists']]
1855 if not mfiles:
1857 if not mfiles:
1856 return
1858 return
1857 raise error.Abort("error: file '%s' cannot be written because "
1859 raise error.Abort("error: file '%s' cannot be written because "
1858 " '%s/' is a folder in %s (containing %d "
1860 " '%s/' is a folder in %s (containing %d "
1859 "entries: %s)"
1861 "entries: %s)"
1860 % (path, path, self.p1(), len(mfiles),
1862 % (path, path, self.p1(), len(mfiles),
1861 ', '.join(mfiles)))
1863 ', '.join(mfiles)))
1862
1864
1863 def write(self, path, data, flags='', **kwargs):
1865 def write(self, path, data, flags='', **kwargs):
1864 if data is None:
1866 if data is None:
1865 raise error.ProgrammingError("data must be non-None")
1867 raise error.ProgrammingError("data must be non-None")
1866 self._auditconflicts(path)
1868 self._auditconflicts(path)
1867 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1869 self._markdirty(path, exists=True, data=data, date=dateutil.makedate(),
1868 flags=flags)
1870 flags=flags)
1869
1871
1870 def setflags(self, path, l, x):
1872 def setflags(self, path, l, x):
1871 flag = ''
1873 flag = ''
1872 if l:
1874 if l:
1873 flag = 'l'
1875 flag = 'l'
1874 elif x:
1876 elif x:
1875 flag = 'x'
1877 flag = 'x'
1876 self._markdirty(path, exists=True, date=dateutil.makedate(),
1878 self._markdirty(path, exists=True, date=dateutil.makedate(),
1877 flags=flag)
1879 flags=flag)
1878
1880
1879 def remove(self, path):
1881 def remove(self, path):
1880 self._markdirty(path, exists=False)
1882 self._markdirty(path, exists=False)
1881
1883
1882 def exists(self, path):
1884 def exists(self, path):
1883 """exists behaves like `lexists`, but needs to follow symlinks and
1885 """exists behaves like `lexists`, but needs to follow symlinks and
1884 return False if they are broken.
1886 return False if they are broken.
1885 """
1887 """
1886 if self.isdirty(path):
1888 if self.isdirty(path):
1887 # If this path exists and is a symlink, "follow" it by calling
1889 # If this path exists and is a symlink, "follow" it by calling
1888 # exists on the destination path.
1890 # exists on the destination path.
1889 if (self._cache[path]['exists'] and
1891 if (self._cache[path]['exists'] and
1890 'l' in self._cache[path]['flags']):
1892 'l' in self._cache[path]['flags']):
1891 return self.exists(self._cache[path]['data'].strip())
1893 return self.exists(self._cache[path]['data'].strip())
1892 else:
1894 else:
1893 return self._cache[path]['exists']
1895 return self._cache[path]['exists']
1894
1896
1895 return self._existsinparent(path)
1897 return self._existsinparent(path)
1896
1898
1897 def lexists(self, path):
1899 def lexists(self, path):
1898 """lexists returns True if the path exists"""
1900 """lexists returns True if the path exists"""
1899 if self.isdirty(path):
1901 if self.isdirty(path):
1900 return self._cache[path]['exists']
1902 return self._cache[path]['exists']
1901
1903
1902 return self._existsinparent(path)
1904 return self._existsinparent(path)
1903
1905
1904 def size(self, path):
1906 def size(self, path):
1905 if self.isdirty(path):
1907 if self.isdirty(path):
1906 if self._cache[path]['exists']:
1908 if self._cache[path]['exists']:
1907 return len(self._cache[path]['data'])
1909 return len(self._cache[path]['data'])
1908 else:
1910 else:
1909 raise error.ProgrammingError("No such file or directory: %s" %
1911 raise error.ProgrammingError("No such file or directory: %s" %
1910 self._path)
1912 self._path)
1911 return self._wrappedctx[path].size()
1913 return self._wrappedctx[path].size()
1912
1914
1913 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1915 def tomemctx(self, text, branch=None, extra=None, date=None, parents=None,
1914 user=None, editor=None):
1916 user=None, editor=None):
1915 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1917 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
1916 committed.
1918 committed.
1917
1919
1918 ``text`` is the commit message.
1920 ``text`` is the commit message.
1919 ``parents`` (optional) are rev numbers.
1921 ``parents`` (optional) are rev numbers.
1920 """
1922 """
1921 # Default parents to the wrapped contexts' if not passed.
1923 # Default parents to the wrapped contexts' if not passed.
1922 if parents is None:
1924 if parents is None:
1923 parents = self._wrappedctx.parents()
1925 parents = self._wrappedctx.parents()
1924 if len(parents) == 1:
1926 if len(parents) == 1:
1925 parents = (parents[0], None)
1927 parents = (parents[0], None)
1926
1928
1927 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1929 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
1928 if parents[1] is None:
1930 if parents[1] is None:
1929 parents = (self._repo[parents[0]], None)
1931 parents = (self._repo[parents[0]], None)
1930 else:
1932 else:
1931 parents = (self._repo[parents[0]], self._repo[parents[1]])
1933 parents = (self._repo[parents[0]], self._repo[parents[1]])
1932
1934
1933 files = self._cache.keys()
1935 files = self._cache.keys()
1934 def getfile(repo, memctx, path):
1936 def getfile(repo, memctx, path):
1935 if self._cache[path]['exists']:
1937 if self._cache[path]['exists']:
1936 return memfilectx(repo, memctx, path,
1938 return memfilectx(repo, memctx, path,
1937 self._cache[path]['data'],
1939 self._cache[path]['data'],
1938 'l' in self._cache[path]['flags'],
1940 'l' in self._cache[path]['flags'],
1939 'x' in self._cache[path]['flags'],
1941 'x' in self._cache[path]['flags'],
1940 self._cache[path]['copied'])
1942 self._cache[path]['copied'])
1941 else:
1943 else:
1942 # Returning None, but including the path in `files`, is
1944 # Returning None, but including the path in `files`, is
1943 # necessary for memctx to register a deletion.
1945 # necessary for memctx to register a deletion.
1944 return None
1946 return None
1945 return memctx(self._repo, parents, text, files, getfile, date=date,
1947 return memctx(self._repo, parents, text, files, getfile, date=date,
1946 extra=extra, user=user, branch=branch, editor=editor)
1948 extra=extra, user=user, branch=branch, editor=editor)
1947
1949
1948 def isdirty(self, path):
1950 def isdirty(self, path):
1949 return path in self._cache
1951 return path in self._cache
1950
1952
1951 def isempty(self):
1953 def isempty(self):
1952 # We need to discard any keys that are actually clean before the empty
1954 # We need to discard any keys that are actually clean before the empty
1953 # commit check.
1955 # commit check.
1954 self._compact()
1956 self._compact()
1955 return len(self._cache) == 0
1957 return len(self._cache) == 0
1956
1958
1957 def clean(self):
1959 def clean(self):
1958 self._cache = {}
1960 self._cache = {}
1959
1961
1960 def _compact(self):
1962 def _compact(self):
1961 """Removes keys from the cache that are actually clean, by comparing
1963 """Removes keys from the cache that are actually clean, by comparing
1962 them with the underlying context.
1964 them with the underlying context.
1963
1965
1964 This can occur during the merge process, e.g. by passing --tool :local
1966 This can occur during the merge process, e.g. by passing --tool :local
1965 to resolve a conflict.
1967 to resolve a conflict.
1966 """
1968 """
1967 keys = []
1969 keys = []
1968 for path in self._cache.keys():
1970 for path in self._cache.keys():
1969 cache = self._cache[path]
1971 cache = self._cache[path]
1970 try:
1972 try:
1971 underlying = self._wrappedctx[path]
1973 underlying = self._wrappedctx[path]
1972 if (underlying.data() == cache['data'] and
1974 if (underlying.data() == cache['data'] and
1973 underlying.flags() == cache['flags']):
1975 underlying.flags() == cache['flags']):
1974 keys.append(path)
1976 keys.append(path)
1975 except error.ManifestLookupError:
1977 except error.ManifestLookupError:
1976 # Path not in the underlying manifest (created).
1978 # Path not in the underlying manifest (created).
1977 continue
1979 continue
1978
1980
1979 for path in keys:
1981 for path in keys:
1980 del self._cache[path]
1982 del self._cache[path]
1981 return keys
1983 return keys
1982
1984
1983 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1985 def _markdirty(self, path, exists, data=None, date=None, flags=''):
1984 # data not provided, let's see if we already have some; if not, let's
1986 # data not provided, let's see if we already have some; if not, let's
1985 # grab it from our underlying context, so that we always have data if
1987 # grab it from our underlying context, so that we always have data if
1986 # the file is marked as existing.
1988 # the file is marked as existing.
1987 if exists and data is None:
1989 if exists and data is None:
1988 oldentry = self._cache.get(path) or {}
1990 oldentry = self._cache.get(path) or {}
1989 data = oldentry.get('data') or self._wrappedctx[path].data()
1991 data = oldentry.get('data') or self._wrappedctx[path].data()
1990
1992
1991 self._cache[path] = {
1993 self._cache[path] = {
1992 'exists': exists,
1994 'exists': exists,
1993 'data': data,
1995 'data': data,
1994 'date': date,
1996 'date': date,
1995 'flags': flags,
1997 'flags': flags,
1996 'copied': None,
1998 'copied': None,
1997 }
1999 }
1998
2000
1999 def filectx(self, path, filelog=None):
2001 def filectx(self, path, filelog=None):
2000 return overlayworkingfilectx(self._repo, path, parent=self,
2002 return overlayworkingfilectx(self._repo, path, parent=self,
2001 filelog=filelog)
2003 filelog=filelog)
2002
2004
2003 class overlayworkingfilectx(committablefilectx):
2005 class overlayworkingfilectx(committablefilectx):
2004 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2006 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2005 cache, which can be flushed through later by calling ``flush()``."""
2007 cache, which can be flushed through later by calling ``flush()``."""
2006
2008
2007 def __init__(self, repo, path, filelog=None, parent=None):
2009 def __init__(self, repo, path, filelog=None, parent=None):
2008 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2010 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2009 parent)
2011 parent)
2010 self._repo = repo
2012 self._repo = repo
2011 self._parent = parent
2013 self._parent = parent
2012 self._path = path
2014 self._path = path
2013
2015
2014 def cmp(self, fctx):
2016 def cmp(self, fctx):
2015 return self.data() != fctx.data()
2017 return self.data() != fctx.data()
2016
2018
2017 def changectx(self):
2019 def changectx(self):
2018 return self._parent
2020 return self._parent
2019
2021
2020 def data(self):
2022 def data(self):
2021 return self._parent.data(self._path)
2023 return self._parent.data(self._path)
2022
2024
2023 def date(self):
2025 def date(self):
2024 return self._parent.filedate(self._path)
2026 return self._parent.filedate(self._path)
2025
2027
2026 def exists(self):
2028 def exists(self):
2027 return self.lexists()
2029 return self.lexists()
2028
2030
2029 def lexists(self):
2031 def lexists(self):
2030 return self._parent.exists(self._path)
2032 return self._parent.exists(self._path)
2031
2033
2032 def renamed(self):
2034 def renamed(self):
2033 path = self._parent.copydata(self._path)
2035 path = self._parent.copydata(self._path)
2034 if not path:
2036 if not path:
2035 return None
2037 return None
2036 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2038 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2037
2039
2038 def size(self):
2040 def size(self):
2039 return self._parent.size(self._path)
2041 return self._parent.size(self._path)
2040
2042
2041 def markcopied(self, origin):
2043 def markcopied(self, origin):
2042 self._parent.markcopied(self._path, origin)
2044 self._parent.markcopied(self._path, origin)
2043
2045
2044 def audit(self):
2046 def audit(self):
2045 pass
2047 pass
2046
2048
2047 def flags(self):
2049 def flags(self):
2048 return self._parent.flags(self._path)
2050 return self._parent.flags(self._path)
2049
2051
2050 def setflags(self, islink, isexec):
2052 def setflags(self, islink, isexec):
2051 return self._parent.setflags(self._path, islink, isexec)
2053 return self._parent.setflags(self._path, islink, isexec)
2052
2054
2053 def write(self, data, flags, backgroundclose=False, **kwargs):
2055 def write(self, data, flags, backgroundclose=False, **kwargs):
2054 return self._parent.write(self._path, data, flags, **kwargs)
2056 return self._parent.write(self._path, data, flags, **kwargs)
2055
2057
2056 def remove(self, ignoremissing=False):
2058 def remove(self, ignoremissing=False):
2057 return self._parent.remove(self._path)
2059 return self._parent.remove(self._path)
2058
2060
2059 def clearunknown(self):
2061 def clearunknown(self):
2060 pass
2062 pass
2061
2063
2062 class workingcommitctx(workingctx):
2064 class workingcommitctx(workingctx):
2063 """A workingcommitctx object makes access to data related to
2065 """A workingcommitctx object makes access to data related to
2064 the revision being committed convenient.
2066 the revision being committed convenient.
2065
2067
2066 This hides changes in the working directory, if they aren't
2068 This hides changes in the working directory, if they aren't
2067 committed in this context.
2069 committed in this context.
2068 """
2070 """
2069 def __init__(self, repo, changes,
2071 def __init__(self, repo, changes,
2070 text="", user=None, date=None, extra=None):
2072 text="", user=None, date=None, extra=None):
2071 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2073 super(workingcommitctx, self).__init__(repo, text, user, date, extra,
2072 changes)
2074 changes)
2073
2075
2074 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2076 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2075 """Return matched files only in ``self._status``
2077 """Return matched files only in ``self._status``
2076
2078
2077 Uncommitted files appear "clean" via this context, even if
2079 Uncommitted files appear "clean" via this context, even if
2078 they aren't actually so in the working directory.
2080 they aren't actually so in the working directory.
2079 """
2081 """
2080 if clean:
2082 if clean:
2081 clean = [f for f in self._manifest if f not in self._changedset]
2083 clean = [f for f in self._manifest if f not in self._changedset]
2082 else:
2084 else:
2083 clean = []
2085 clean = []
2084 return scmutil.status([f for f in self._status.modified if match(f)],
2086 return scmutil.status([f for f in self._status.modified if match(f)],
2085 [f for f in self._status.added if match(f)],
2087 [f for f in self._status.added if match(f)],
2086 [f for f in self._status.removed if match(f)],
2088 [f for f in self._status.removed if match(f)],
2087 [], [], [], clean)
2089 [], [], [], clean)
2088
2090
2089 @propertycache
2091 @propertycache
2090 def _changedset(self):
2092 def _changedset(self):
2091 """Return the set of files changed in this context
2093 """Return the set of files changed in this context
2092 """
2094 """
2093 changed = set(self._status.modified)
2095 changed = set(self._status.modified)
2094 changed.update(self._status.added)
2096 changed.update(self._status.added)
2095 changed.update(self._status.removed)
2097 changed.update(self._status.removed)
2096 return changed
2098 return changed
2097
2099
2098 def makecachingfilectxfn(func):
2100 def makecachingfilectxfn(func):
2099 """Create a filectxfn that caches based on the path.
2101 """Create a filectxfn that caches based on the path.
2100
2102
2101 We can't use util.cachefunc because it uses all arguments as the cache
2103 We can't use util.cachefunc because it uses all arguments as the cache
2102 key and this creates a cycle since the arguments include the repo and
2104 key and this creates a cycle since the arguments include the repo and
2103 memctx.
2105 memctx.
2104 """
2106 """
2105 cache = {}
2107 cache = {}
2106
2108
2107 def getfilectx(repo, memctx, path):
2109 def getfilectx(repo, memctx, path):
2108 if path not in cache:
2110 if path not in cache:
2109 cache[path] = func(repo, memctx, path)
2111 cache[path] = func(repo, memctx, path)
2110 return cache[path]
2112 return cache[path]
2111
2113
2112 return getfilectx
2114 return getfilectx
2113
2115
2114 def memfilefromctx(ctx):
2116 def memfilefromctx(ctx):
2115 """Given a context return a memfilectx for ctx[path]
2117 """Given a context return a memfilectx for ctx[path]
2116
2118
2117 This is a convenience method for building a memctx based on another
2119 This is a convenience method for building a memctx based on another
2118 context.
2120 context.
2119 """
2121 """
2120 def getfilectx(repo, memctx, path):
2122 def getfilectx(repo, memctx, path):
2121 fctx = ctx[path]
2123 fctx = ctx[path]
2122 # this is weird but apparently we only keep track of one parent
2124 # this is weird but apparently we only keep track of one parent
2123 # (why not only store that instead of a tuple?)
2125 # (why not only store that instead of a tuple?)
2124 copied = fctx.renamed()
2126 copied = fctx.renamed()
2125 if copied:
2127 if copied:
2126 copied = copied[0]
2128 copied = copied[0]
2127 return memfilectx(repo, memctx, path, fctx.data(),
2129 return memfilectx(repo, memctx, path, fctx.data(),
2128 islink=fctx.islink(), isexec=fctx.isexec(),
2130 islink=fctx.islink(), isexec=fctx.isexec(),
2129 copied=copied)
2131 copied=copied)
2130
2132
2131 return getfilectx
2133 return getfilectx
2132
2134
2133 def memfilefrompatch(patchstore):
2135 def memfilefrompatch(patchstore):
2134 """Given a patch (e.g. patchstore object) return a memfilectx
2136 """Given a patch (e.g. patchstore object) return a memfilectx
2135
2137
2136 This is a convenience method for building a memctx based on a patchstore.
2138 This is a convenience method for building a memctx based on a patchstore.
2137 """
2139 """
2138 def getfilectx(repo, memctx, path):
2140 def getfilectx(repo, memctx, path):
2139 data, mode, copied = patchstore.getfile(path)
2141 data, mode, copied = patchstore.getfile(path)
2140 if data is None:
2142 if data is None:
2141 return None
2143 return None
2142 islink, isexec = mode
2144 islink, isexec = mode
2143 return memfilectx(repo, memctx, path, data, islink=islink,
2145 return memfilectx(repo, memctx, path, data, islink=islink,
2144 isexec=isexec, copied=copied)
2146 isexec=isexec, copied=copied)
2145
2147
2146 return getfilectx
2148 return getfilectx
2147
2149
2148 class memctx(committablectx):
2150 class memctx(committablectx):
2149 """Use memctx to perform in-memory commits via localrepo.commitctx().
2151 """Use memctx to perform in-memory commits via localrepo.commitctx().
2150
2152
2151 Revision information is supplied at initialization time while
2153 Revision information is supplied at initialization time while
2152 related files data and is made available through a callback
2154 related files data and is made available through a callback
2153 mechanism. 'repo' is the current localrepo, 'parents' is a
2155 mechanism. 'repo' is the current localrepo, 'parents' is a
2154 sequence of two parent revisions identifiers (pass None for every
2156 sequence of two parent revisions identifiers (pass None for every
2155 missing parent), 'text' is the commit message and 'files' lists
2157 missing parent), 'text' is the commit message and 'files' lists
2156 names of files touched by the revision (normalized and relative to
2158 names of files touched by the revision (normalized and relative to
2157 repository root).
2159 repository root).
2158
2160
2159 filectxfn(repo, memctx, path) is a callable receiving the
2161 filectxfn(repo, memctx, path) is a callable receiving the
2160 repository, the current memctx object and the normalized path of
2162 repository, the current memctx object and the normalized path of
2161 requested file, relative to repository root. It is fired by the
2163 requested file, relative to repository root. It is fired by the
2162 commit function for every file in 'files', but calls order is
2164 commit function for every file in 'files', but calls order is
2163 undefined. If the file is available in the revision being
2165 undefined. If the file is available in the revision being
2164 committed (updated or added), filectxfn returns a memfilectx
2166 committed (updated or added), filectxfn returns a memfilectx
2165 object. If the file was removed, filectxfn return None for recent
2167 object. If the file was removed, filectxfn return None for recent
2166 Mercurial. Moved files are represented by marking the source file
2168 Mercurial. Moved files are represented by marking the source file
2167 removed and the new file added with copy information (see
2169 removed and the new file added with copy information (see
2168 memfilectx).
2170 memfilectx).
2169
2171
2170 user receives the committer name and defaults to current
2172 user receives the committer name and defaults to current
2171 repository username, date is the commit date in any format
2173 repository username, date is the commit date in any format
2172 supported by dateutil.parsedate() and defaults to current date, extra
2174 supported by dateutil.parsedate() and defaults to current date, extra
2173 is a dictionary of metadata or is left empty.
2175 is a dictionary of metadata or is left empty.
2174 """
2176 """
2175
2177
2176 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2178 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2177 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2179 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2178 # this field to determine what to do in filectxfn.
2180 # this field to determine what to do in filectxfn.
2179 _returnnoneformissingfiles = True
2181 _returnnoneformissingfiles = True
2180
2182
2181 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2183 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2182 date=None, extra=None, branch=None, editor=False):
2184 date=None, extra=None, branch=None, editor=False):
2183 super(memctx, self).__init__(repo, text, user, date, extra)
2185 super(memctx, self).__init__(repo, text, user, date, extra)
2184 self._rev = None
2186 self._rev = None
2185 self._node = None
2187 self._node = None
2186 parents = [(p or nullid) for p in parents]
2188 parents = [(p or nullid) for p in parents]
2187 p1, p2 = parents
2189 p1, p2 = parents
2188 self._parents = [self._repo[p] for p in (p1, p2)]
2190 self._parents = [self._repo[p] for p in (p1, p2)]
2189 files = sorted(set(files))
2191 files = sorted(set(files))
2190 self._files = files
2192 self._files = files
2191 if branch is not None:
2193 if branch is not None:
2192 self._extra['branch'] = encoding.fromlocal(branch)
2194 self._extra['branch'] = encoding.fromlocal(branch)
2193 self.substate = {}
2195 self.substate = {}
2194
2196
2195 if isinstance(filectxfn, patch.filestore):
2197 if isinstance(filectxfn, patch.filestore):
2196 filectxfn = memfilefrompatch(filectxfn)
2198 filectxfn = memfilefrompatch(filectxfn)
2197 elif not callable(filectxfn):
2199 elif not callable(filectxfn):
2198 # if store is not callable, wrap it in a function
2200 # if store is not callable, wrap it in a function
2199 filectxfn = memfilefromctx(filectxfn)
2201 filectxfn = memfilefromctx(filectxfn)
2200
2202
2201 # memoizing increases performance for e.g. vcs convert scenarios.
2203 # memoizing increases performance for e.g. vcs convert scenarios.
2202 self._filectxfn = makecachingfilectxfn(filectxfn)
2204 self._filectxfn = makecachingfilectxfn(filectxfn)
2203
2205
2204 if editor:
2206 if editor:
2205 self._text = editor(self._repo, self, [])
2207 self._text = editor(self._repo, self, [])
2206 self._repo.savecommitmessage(self._text)
2208 self._repo.savecommitmessage(self._text)
2207
2209
2208 def filectx(self, path, filelog=None):
2210 def filectx(self, path, filelog=None):
2209 """get a file context from the working directory
2211 """get a file context from the working directory
2210
2212
2211 Returns None if file doesn't exist and should be removed."""
2213 Returns None if file doesn't exist and should be removed."""
2212 return self._filectxfn(self._repo, self, path)
2214 return self._filectxfn(self._repo, self, path)
2213
2215
2214 def commit(self):
2216 def commit(self):
2215 """commit context to the repo"""
2217 """commit context to the repo"""
2216 return self._repo.commitctx(self)
2218 return self._repo.commitctx(self)
2217
2219
2218 @propertycache
2220 @propertycache
2219 def _manifest(self):
2221 def _manifest(self):
2220 """generate a manifest based on the return values of filectxfn"""
2222 """generate a manifest based on the return values of filectxfn"""
2221
2223
2222 # keep this simple for now; just worry about p1
2224 # keep this simple for now; just worry about p1
2223 pctx = self._parents[0]
2225 pctx = self._parents[0]
2224 man = pctx.manifest().copy()
2226 man = pctx.manifest().copy()
2225
2227
2226 for f in self._status.modified:
2228 for f in self._status.modified:
2227 man[f] = modifiednodeid
2229 man[f] = modifiednodeid
2228
2230
2229 for f in self._status.added:
2231 for f in self._status.added:
2230 man[f] = addednodeid
2232 man[f] = addednodeid
2231
2233
2232 for f in self._status.removed:
2234 for f in self._status.removed:
2233 if f in man:
2235 if f in man:
2234 del man[f]
2236 del man[f]
2235
2237
2236 return man
2238 return man
2237
2239
2238 @propertycache
2240 @propertycache
2239 def _status(self):
2241 def _status(self):
2240 """Calculate exact status from ``files`` specified at construction
2242 """Calculate exact status from ``files`` specified at construction
2241 """
2243 """
2242 man1 = self.p1().manifest()
2244 man1 = self.p1().manifest()
2243 p2 = self._parents[1]
2245 p2 = self._parents[1]
2244 # "1 < len(self._parents)" can't be used for checking
2246 # "1 < len(self._parents)" can't be used for checking
2245 # existence of the 2nd parent, because "memctx._parents" is
2247 # existence of the 2nd parent, because "memctx._parents" is
2246 # explicitly initialized by the list, of which length is 2.
2248 # explicitly initialized by the list, of which length is 2.
2247 if p2.node() != nullid:
2249 if p2.node() != nullid:
2248 man2 = p2.manifest()
2250 man2 = p2.manifest()
2249 managing = lambda f: f in man1 or f in man2
2251 managing = lambda f: f in man1 or f in man2
2250 else:
2252 else:
2251 managing = lambda f: f in man1
2253 managing = lambda f: f in man1
2252
2254
2253 modified, added, removed = [], [], []
2255 modified, added, removed = [], [], []
2254 for f in self._files:
2256 for f in self._files:
2255 if not managing(f):
2257 if not managing(f):
2256 added.append(f)
2258 added.append(f)
2257 elif self[f]:
2259 elif self[f]:
2258 modified.append(f)
2260 modified.append(f)
2259 else:
2261 else:
2260 removed.append(f)
2262 removed.append(f)
2261
2263
2262 return scmutil.status(modified, added, removed, [], [], [], [])
2264 return scmutil.status(modified, added, removed, [], [], [], [])
2263
2265
2264 class memfilectx(committablefilectx):
2266 class memfilectx(committablefilectx):
2265 """memfilectx represents an in-memory file to commit.
2267 """memfilectx represents an in-memory file to commit.
2266
2268
2267 See memctx and committablefilectx for more details.
2269 See memctx and committablefilectx for more details.
2268 """
2270 """
2269 def __init__(self, repo, changectx, path, data, islink=False,
2271 def __init__(self, repo, changectx, path, data, islink=False,
2270 isexec=False, copied=None):
2272 isexec=False, copied=None):
2271 """
2273 """
2272 path is the normalized file path relative to repository root.
2274 path is the normalized file path relative to repository root.
2273 data is the file content as a string.
2275 data is the file content as a string.
2274 islink is True if the file is a symbolic link.
2276 islink is True if the file is a symbolic link.
2275 isexec is True if the file is executable.
2277 isexec is True if the file is executable.
2276 copied is the source file path if current file was copied in the
2278 copied is the source file path if current file was copied in the
2277 revision being committed, or None."""
2279 revision being committed, or None."""
2278 super(memfilectx, self).__init__(repo, path, None, changectx)
2280 super(memfilectx, self).__init__(repo, path, None, changectx)
2279 self._data = data
2281 self._data = data
2280 if islink:
2282 if islink:
2281 self._flags = 'l'
2283 self._flags = 'l'
2282 elif isexec:
2284 elif isexec:
2283 self._flags = 'x'
2285 self._flags = 'x'
2284 else:
2286 else:
2285 self._flags = ''
2287 self._flags = ''
2286 self._copied = None
2288 self._copied = None
2287 if copied:
2289 if copied:
2288 self._copied = (copied, nullid)
2290 self._copied = (copied, nullid)
2289
2291
2290 def data(self):
2292 def data(self):
2291 return self._data
2293 return self._data
2292
2294
2293 def remove(self, ignoremissing=False):
2295 def remove(self, ignoremissing=False):
2294 """wraps unlink for a repo's working directory"""
2296 """wraps unlink for a repo's working directory"""
2295 # need to figure out what to do here
2297 # need to figure out what to do here
2296 del self._changectx[self._path]
2298 del self._changectx[self._path]
2297
2299
2298 def write(self, data, flags, **kwargs):
2300 def write(self, data, flags, **kwargs):
2299 """wraps repo.wwrite"""
2301 """wraps repo.wwrite"""
2300 self._data = data
2302 self._data = data
2301
2303
2302
2304
2303 class metadataonlyctx(committablectx):
2305 class metadataonlyctx(committablectx):
2304 """Like memctx but it's reusing the manifest of different commit.
2306 """Like memctx but it's reusing the manifest of different commit.
2305 Intended to be used by lightweight operations that are creating
2307 Intended to be used by lightweight operations that are creating
2306 metadata-only changes.
2308 metadata-only changes.
2307
2309
2308 Revision information is supplied at initialization time. 'repo' is the
2310 Revision information is supplied at initialization time. 'repo' is the
2309 current localrepo, 'ctx' is original revision which manifest we're reuisng
2311 current localrepo, 'ctx' is original revision which manifest we're reuisng
2310 'parents' is a sequence of two parent revisions identifiers (pass None for
2312 'parents' is a sequence of two parent revisions identifiers (pass None for
2311 every missing parent), 'text' is the commit.
2313 every missing parent), 'text' is the commit.
2312
2314
2313 user receives the committer name and defaults to current repository
2315 user receives the committer name and defaults to current repository
2314 username, date is the commit date in any format supported by
2316 username, date is the commit date in any format supported by
2315 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2317 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2316 metadata or is left empty.
2318 metadata or is left empty.
2317 """
2319 """
2318 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2320 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2319 date=None, extra=None, editor=False):
2321 date=None, extra=None, editor=False):
2320 if text is None:
2322 if text is None:
2321 text = originalctx.description()
2323 text = originalctx.description()
2322 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2324 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2323 self._rev = None
2325 self._rev = None
2324 self._node = None
2326 self._node = None
2325 self._originalctx = originalctx
2327 self._originalctx = originalctx
2326 self._manifestnode = originalctx.manifestnode()
2328 self._manifestnode = originalctx.manifestnode()
2327 if parents is None:
2329 if parents is None:
2328 parents = originalctx.parents()
2330 parents = originalctx.parents()
2329 else:
2331 else:
2330 parents = [repo[p] for p in parents if p is not None]
2332 parents = [repo[p] for p in parents if p is not None]
2331 parents = parents[:]
2333 parents = parents[:]
2332 while len(parents) < 2:
2334 while len(parents) < 2:
2333 parents.append(repo[nullid])
2335 parents.append(repo[nullid])
2334 p1, p2 = self._parents = parents
2336 p1, p2 = self._parents = parents
2335
2337
2336 # sanity check to ensure that the reused manifest parents are
2338 # sanity check to ensure that the reused manifest parents are
2337 # manifests of our commit parents
2339 # manifests of our commit parents
2338 mp1, mp2 = self.manifestctx().parents
2340 mp1, mp2 = self.manifestctx().parents
2339 if p1 != nullid and p1.manifestnode() != mp1:
2341 if p1 != nullid and p1.manifestnode() != mp1:
2340 raise RuntimeError('can\'t reuse the manifest: '
2342 raise RuntimeError('can\'t reuse the manifest: '
2341 'its p1 doesn\'t match the new ctx p1')
2343 'its p1 doesn\'t match the new ctx p1')
2342 if p2 != nullid and p2.manifestnode() != mp2:
2344 if p2 != nullid and p2.manifestnode() != mp2:
2343 raise RuntimeError('can\'t reuse the manifest: '
2345 raise RuntimeError('can\'t reuse the manifest: '
2344 'its p2 doesn\'t match the new ctx p2')
2346 'its p2 doesn\'t match the new ctx p2')
2345
2347
2346 self._files = originalctx.files()
2348 self._files = originalctx.files()
2347 self.substate = {}
2349 self.substate = {}
2348
2350
2349 if editor:
2351 if editor:
2350 self._text = editor(self._repo, self, [])
2352 self._text = editor(self._repo, self, [])
2351 self._repo.savecommitmessage(self._text)
2353 self._repo.savecommitmessage(self._text)
2352
2354
2353 def manifestnode(self):
2355 def manifestnode(self):
2354 return self._manifestnode
2356 return self._manifestnode
2355
2357
2356 @property
2358 @property
2357 def _manifestctx(self):
2359 def _manifestctx(self):
2358 return self._repo.manifestlog[self._manifestnode]
2360 return self._repo.manifestlog[self._manifestnode]
2359
2361
2360 def filectx(self, path, filelog=None):
2362 def filectx(self, path, filelog=None):
2361 return self._originalctx.filectx(path, filelog=filelog)
2363 return self._originalctx.filectx(path, filelog=filelog)
2362
2364
2363 def commit(self):
2365 def commit(self):
2364 """commit context to the repo"""
2366 """commit context to the repo"""
2365 return self._repo.commitctx(self)
2367 return self._repo.commitctx(self)
2366
2368
2367 @property
2369 @property
2368 def _manifest(self):
2370 def _manifest(self):
2369 return self._originalctx.manifest()
2371 return self._originalctx.manifest()
2370
2372
2371 @propertycache
2373 @propertycache
2372 def _status(self):
2374 def _status(self):
2373 """Calculate exact status from ``files`` specified in the ``origctx``
2375 """Calculate exact status from ``files`` specified in the ``origctx``
2374 and parents manifests.
2376 and parents manifests.
2375 """
2377 """
2376 man1 = self.p1().manifest()
2378 man1 = self.p1().manifest()
2377 p2 = self._parents[1]
2379 p2 = self._parents[1]
2378 # "1 < len(self._parents)" can't be used for checking
2380 # "1 < len(self._parents)" can't be used for checking
2379 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2381 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2380 # explicitly initialized by the list, of which length is 2.
2382 # explicitly initialized by the list, of which length is 2.
2381 if p2.node() != nullid:
2383 if p2.node() != nullid:
2382 man2 = p2.manifest()
2384 man2 = p2.manifest()
2383 managing = lambda f: f in man1 or f in man2
2385 managing = lambda f: f in man1 or f in man2
2384 else:
2386 else:
2385 managing = lambda f: f in man1
2387 managing = lambda f: f in man1
2386
2388
2387 modified, added, removed = [], [], []
2389 modified, added, removed = [], [], []
2388 for f in self._files:
2390 for f in self._files:
2389 if not managing(f):
2391 if not managing(f):
2390 added.append(f)
2392 added.append(f)
2391 elif f in self:
2393 elif f in self:
2392 modified.append(f)
2394 modified.append(f)
2393 else:
2395 else:
2394 removed.append(f)
2396 removed.append(f)
2395
2397
2396 return scmutil.status(modified, added, removed, [], [], [], [])
2398 return scmutil.status(modified, added, removed, [], [], [], [])
2397
2399
2398 class arbitraryfilectx(object):
2400 class arbitraryfilectx(object):
2399 """Allows you to use filectx-like functions on a file in an arbitrary
2401 """Allows you to use filectx-like functions on a file in an arbitrary
2400 location on disk, possibly not in the working directory.
2402 location on disk, possibly not in the working directory.
2401 """
2403 """
2402 def __init__(self, path, repo=None):
2404 def __init__(self, path, repo=None):
2403 # Repo is optional because contrib/simplemerge uses this class.
2405 # Repo is optional because contrib/simplemerge uses this class.
2404 self._repo = repo
2406 self._repo = repo
2405 self._path = path
2407 self._path = path
2406
2408
2407 def cmp(self, fctx):
2409 def cmp(self, fctx):
2408 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2410 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2409 # path if either side is a symlink.
2411 # path if either side is a symlink.
2410 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2412 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2411 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2413 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2412 # Add a fast-path for merge if both sides are disk-backed.
2414 # Add a fast-path for merge if both sides are disk-backed.
2413 # Note that filecmp uses the opposite return values (True if same)
2415 # Note that filecmp uses the opposite return values (True if same)
2414 # from our cmp functions (True if different).
2416 # from our cmp functions (True if different).
2415 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2417 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2416 return self.data() != fctx.data()
2418 return self.data() != fctx.data()
2417
2419
2418 def path(self):
2420 def path(self):
2419 return self._path
2421 return self._path
2420
2422
2421 def flags(self):
2423 def flags(self):
2422 return ''
2424 return ''
2423
2425
2424 def data(self):
2426 def data(self):
2425 return util.readfile(self._path)
2427 return util.readfile(self._path)
2426
2428
2427 def decodeddata(self):
2429 def decodeddata(self):
2428 with open(self._path, "rb") as f:
2430 with open(self._path, "rb") as f:
2429 return f.read()
2431 return f.read()
2430
2432
2431 def remove(self):
2433 def remove(self):
2432 util.unlink(self._path)
2434 util.unlink(self._path)
2433
2435
2434 def write(self, data, flags, **kwargs):
2436 def write(self, data, flags, **kwargs):
2435 assert not flags
2437 assert not flags
2436 with open(self._path, "w") as f:
2438 with open(self._path, "w") as f:
2437 f.write(data)
2439 f.write(data)
@@ -1,3001 +1,3000 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 bin,
20 bin,
21 hex,
21 hex,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 )
25 )
26 from . import (
26 from . import (
27 bookmarks,
27 bookmarks,
28 branchmap,
28 branchmap,
29 bundle2,
29 bundle2,
30 changegroup,
30 changegroup,
31 changelog,
31 changelog,
32 color,
32 color,
33 context,
33 context,
34 dirstate,
34 dirstate,
35 dirstateguard,
35 dirstateguard,
36 discovery,
36 discovery,
37 encoding,
37 encoding,
38 error,
38 error,
39 exchange,
39 exchange,
40 extensions,
40 extensions,
41 filelog,
41 filelog,
42 hook,
42 hook,
43 lock as lockmod,
43 lock as lockmod,
44 manifest,
44 manifest,
45 match as matchmod,
45 match as matchmod,
46 merge as mergemod,
46 merge as mergemod,
47 mergeutil,
47 mergeutil,
48 namespaces,
48 namespaces,
49 narrowspec,
49 narrowspec,
50 obsolete,
50 obsolete,
51 pathutil,
51 pathutil,
52 phases,
52 phases,
53 pushkey,
53 pushkey,
54 pycompat,
54 pycompat,
55 repository,
55 repository,
56 repoview,
56 repoview,
57 revset,
57 revset,
58 revsetlang,
58 revsetlang,
59 scmutil,
59 scmutil,
60 sparse,
60 sparse,
61 store as storemod,
61 store as storemod,
62 subrepoutil,
62 subrepoutil,
63 tags as tagsmod,
63 tags as tagsmod,
64 transaction,
64 transaction,
65 txnutil,
65 txnutil,
66 util,
66 util,
67 vfs as vfsmod,
67 vfs as vfsmod,
68 )
68 )
69 from .utils import (
69 from .utils import (
70 interfaceutil,
70 interfaceutil,
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 from .revlogutils import (
75 from .revlogutils import (
76 constants as revlogconst,
76 constants as revlogconst,
77 )
77 )
78
78
79 release = lockmod.release
79 release = lockmod.release
80 urlerr = util.urlerr
80 urlerr = util.urlerr
81 urlreq = util.urlreq
81 urlreq = util.urlreq
82
82
83 # set of (path, vfs-location) tuples. vfs-location is:
83 # set of (path, vfs-location) tuples. vfs-location is:
84 # - 'plain for vfs relative paths
84 # - 'plain for vfs relative paths
85 # - '' for svfs relative paths
85 # - '' for svfs relative paths
86 _cachedfiles = set()
86 _cachedfiles = set()
87
87
88 class _basefilecache(scmutil.filecache):
88 class _basefilecache(scmutil.filecache):
89 """All filecache usage on repo are done for logic that should be unfiltered
89 """All filecache usage on repo are done for logic that should be unfiltered
90 """
90 """
91 def __get__(self, repo, type=None):
91 def __get__(self, repo, type=None):
92 if repo is None:
92 if repo is None:
93 return self
93 return self
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
94 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
95 def __set__(self, repo, value):
95 def __set__(self, repo, value):
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
96 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
97 def __delete__(self, repo):
97 def __delete__(self, repo):
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
98 return super(_basefilecache, self).__delete__(repo.unfiltered())
99
99
100 class repofilecache(_basefilecache):
100 class repofilecache(_basefilecache):
101 """filecache for files in .hg but outside of .hg/store"""
101 """filecache for files in .hg but outside of .hg/store"""
102 def __init__(self, *paths):
102 def __init__(self, *paths):
103 super(repofilecache, self).__init__(*paths)
103 super(repofilecache, self).__init__(*paths)
104 for path in paths:
104 for path in paths:
105 _cachedfiles.add((path, 'plain'))
105 _cachedfiles.add((path, 'plain'))
106
106
107 def join(self, obj, fname):
107 def join(self, obj, fname):
108 return obj.vfs.join(fname)
108 return obj.vfs.join(fname)
109
109
110 class storecache(_basefilecache):
110 class storecache(_basefilecache):
111 """filecache for files in the store"""
111 """filecache for files in the store"""
112 def __init__(self, *paths):
112 def __init__(self, *paths):
113 super(storecache, self).__init__(*paths)
113 super(storecache, self).__init__(*paths)
114 for path in paths:
114 for path in paths:
115 _cachedfiles.add((path, ''))
115 _cachedfiles.add((path, ''))
116
116
117 def join(self, obj, fname):
117 def join(self, obj, fname):
118 return obj.sjoin(fname)
118 return obj.sjoin(fname)
119
119
120 def isfilecached(repo, name):
120 def isfilecached(repo, name):
121 """check if a repo has already cached "name" filecache-ed property
121 """check if a repo has already cached "name" filecache-ed property
122
122
123 This returns (cachedobj-or-None, iscached) tuple.
123 This returns (cachedobj-or-None, iscached) tuple.
124 """
124 """
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
125 cacheentry = repo.unfiltered()._filecache.get(name, None)
126 if not cacheentry:
126 if not cacheentry:
127 return None, False
127 return None, False
128 return cacheentry.obj, True
128 return cacheentry.obj, True
129
129
130 class unfilteredpropertycache(util.propertycache):
130 class unfilteredpropertycache(util.propertycache):
131 """propertycache that apply to unfiltered repo only"""
131 """propertycache that apply to unfiltered repo only"""
132
132
133 def __get__(self, repo, type=None):
133 def __get__(self, repo, type=None):
134 unfi = repo.unfiltered()
134 unfi = repo.unfiltered()
135 if unfi is repo:
135 if unfi is repo:
136 return super(unfilteredpropertycache, self).__get__(unfi)
136 return super(unfilteredpropertycache, self).__get__(unfi)
137 return getattr(unfi, self.name)
137 return getattr(unfi, self.name)
138
138
139 class filteredpropertycache(util.propertycache):
139 class filteredpropertycache(util.propertycache):
140 """propertycache that must take filtering in account"""
140 """propertycache that must take filtering in account"""
141
141
142 def cachevalue(self, obj, value):
142 def cachevalue(self, obj, value):
143 object.__setattr__(obj, self.name, value)
143 object.__setattr__(obj, self.name, value)
144
144
145
145
146 def hasunfilteredcache(repo, name):
146 def hasunfilteredcache(repo, name):
147 """check if a repo has an unfilteredpropertycache value for <name>"""
147 """check if a repo has an unfilteredpropertycache value for <name>"""
148 return name in vars(repo.unfiltered())
148 return name in vars(repo.unfiltered())
149
149
150 def unfilteredmethod(orig):
150 def unfilteredmethod(orig):
151 """decorate method that always need to be run on unfiltered version"""
151 """decorate method that always need to be run on unfiltered version"""
152 def wrapper(repo, *args, **kwargs):
152 def wrapper(repo, *args, **kwargs):
153 return orig(repo.unfiltered(), *args, **kwargs)
153 return orig(repo.unfiltered(), *args, **kwargs)
154 return wrapper
154 return wrapper
155
155
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
156 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
157 'unbundle'}
157 'unbundle'}
158 legacycaps = moderncaps.union({'changegroupsubset'})
158 legacycaps = moderncaps.union({'changegroupsubset'})
159
159
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
160 @interfaceutil.implementer(repository.ipeercommandexecutor)
161 class localcommandexecutor(object):
161 class localcommandexecutor(object):
162 def __init__(self, peer):
162 def __init__(self, peer):
163 self._peer = peer
163 self._peer = peer
164 self._sent = False
164 self._sent = False
165 self._closed = False
165 self._closed = False
166
166
167 def __enter__(self):
167 def __enter__(self):
168 return self
168 return self
169
169
170 def __exit__(self, exctype, excvalue, exctb):
170 def __exit__(self, exctype, excvalue, exctb):
171 self.close()
171 self.close()
172
172
173 def callcommand(self, command, args):
173 def callcommand(self, command, args):
174 if self._sent:
174 if self._sent:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'sendcommands()')
176 'sendcommands()')
177
177
178 if self._closed:
178 if self._closed:
179 raise error.ProgrammingError('callcommand() cannot be used after '
179 raise error.ProgrammingError('callcommand() cannot be used after '
180 'close()')
180 'close()')
181
181
182 # We don't need to support anything fancy. Just call the named
182 # We don't need to support anything fancy. Just call the named
183 # method on the peer and return a resolved future.
183 # method on the peer and return a resolved future.
184 fn = getattr(self._peer, pycompat.sysstr(command))
184 fn = getattr(self._peer, pycompat.sysstr(command))
185
185
186 f = pycompat.futures.Future()
186 f = pycompat.futures.Future()
187
187
188 try:
188 try:
189 result = fn(**pycompat.strkwargs(args))
189 result = fn(**pycompat.strkwargs(args))
190 except Exception:
190 except Exception:
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
191 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
192 else:
192 else:
193 f.set_result(result)
193 f.set_result(result)
194
194
195 return f
195 return f
196
196
197 def sendcommands(self):
197 def sendcommands(self):
198 self._sent = True
198 self._sent = True
199
199
200 def close(self):
200 def close(self):
201 self._closed = True
201 self._closed = True
202
202
203 @interfaceutil.implementer(repository.ipeercommands)
203 @interfaceutil.implementer(repository.ipeercommands)
204 class localpeer(repository.peer):
204 class localpeer(repository.peer):
205 '''peer for a local repo; reflects only the most recent API'''
205 '''peer for a local repo; reflects only the most recent API'''
206
206
207 def __init__(self, repo, caps=None):
207 def __init__(self, repo, caps=None):
208 super(localpeer, self).__init__()
208 super(localpeer, self).__init__()
209
209
210 if caps is None:
210 if caps is None:
211 caps = moderncaps.copy()
211 caps = moderncaps.copy()
212 self._repo = repo.filtered('served')
212 self._repo = repo.filtered('served')
213 self.ui = repo.ui
213 self.ui = repo.ui
214 self._caps = repo._restrictcapabilities(caps)
214 self._caps = repo._restrictcapabilities(caps)
215
215
216 # Begin of _basepeer interface.
216 # Begin of _basepeer interface.
217
217
218 def url(self):
218 def url(self):
219 return self._repo.url()
219 return self._repo.url()
220
220
221 def local(self):
221 def local(self):
222 return self._repo
222 return self._repo
223
223
224 def peer(self):
224 def peer(self):
225 return self
225 return self
226
226
227 def canpush(self):
227 def canpush(self):
228 return True
228 return True
229
229
230 def close(self):
230 def close(self):
231 self._repo.close()
231 self._repo.close()
232
232
233 # End of _basepeer interface.
233 # End of _basepeer interface.
234
234
235 # Begin of _basewirecommands interface.
235 # Begin of _basewirecommands interface.
236
236
237 def branchmap(self):
237 def branchmap(self):
238 return self._repo.branchmap()
238 return self._repo.branchmap()
239
239
240 def capabilities(self):
240 def capabilities(self):
241 return self._caps
241 return self._caps
242
242
243 def clonebundles(self):
243 def clonebundles(self):
244 return self._repo.tryread('clonebundles.manifest')
244 return self._repo.tryread('clonebundles.manifest')
245
245
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
246 def debugwireargs(self, one, two, three=None, four=None, five=None):
247 """Used to test argument passing over the wire"""
247 """Used to test argument passing over the wire"""
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
248 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
249 pycompat.bytestr(four),
249 pycompat.bytestr(four),
250 pycompat.bytestr(five))
250 pycompat.bytestr(five))
251
251
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
252 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
253 **kwargs):
253 **kwargs):
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
254 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
255 common=common, bundlecaps=bundlecaps,
255 common=common, bundlecaps=bundlecaps,
256 **kwargs)[1]
256 **kwargs)[1]
257 cb = util.chunkbuffer(chunks)
257 cb = util.chunkbuffer(chunks)
258
258
259 if exchange.bundle2requested(bundlecaps):
259 if exchange.bundle2requested(bundlecaps):
260 # When requesting a bundle2, getbundle returns a stream to make the
260 # When requesting a bundle2, getbundle returns a stream to make the
261 # wire level function happier. We need to build a proper object
261 # wire level function happier. We need to build a proper object
262 # from it in local peer.
262 # from it in local peer.
263 return bundle2.getunbundler(self.ui, cb)
263 return bundle2.getunbundler(self.ui, cb)
264 else:
264 else:
265 return changegroup.getunbundler('01', cb, None)
265 return changegroup.getunbundler('01', cb, None)
266
266
267 def heads(self):
267 def heads(self):
268 return self._repo.heads()
268 return self._repo.heads()
269
269
270 def known(self, nodes):
270 def known(self, nodes):
271 return self._repo.known(nodes)
271 return self._repo.known(nodes)
272
272
273 def listkeys(self, namespace):
273 def listkeys(self, namespace):
274 return self._repo.listkeys(namespace)
274 return self._repo.listkeys(namespace)
275
275
276 def lookup(self, key):
276 def lookup(self, key):
277 return self._repo.lookup(key)
277 return self._repo.lookup(key)
278
278
279 def pushkey(self, namespace, key, old, new):
279 def pushkey(self, namespace, key, old, new):
280 return self._repo.pushkey(namespace, key, old, new)
280 return self._repo.pushkey(namespace, key, old, new)
281
281
282 def stream_out(self):
282 def stream_out(self):
283 raise error.Abort(_('cannot perform stream clone against local '
283 raise error.Abort(_('cannot perform stream clone against local '
284 'peer'))
284 'peer'))
285
285
286 def unbundle(self, bundle, heads, url):
286 def unbundle(self, bundle, heads, url):
287 """apply a bundle on a repo
287 """apply a bundle on a repo
288
288
289 This function handles the repo locking itself."""
289 This function handles the repo locking itself."""
290 try:
290 try:
291 try:
291 try:
292 bundle = exchange.readbundle(self.ui, bundle, None)
292 bundle = exchange.readbundle(self.ui, bundle, None)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
293 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
294 if util.safehasattr(ret, 'getchunks'):
294 if util.safehasattr(ret, 'getchunks'):
295 # This is a bundle20 object, turn it into an unbundler.
295 # This is a bundle20 object, turn it into an unbundler.
296 # This little dance should be dropped eventually when the
296 # This little dance should be dropped eventually when the
297 # API is finally improved.
297 # API is finally improved.
298 stream = util.chunkbuffer(ret.getchunks())
298 stream = util.chunkbuffer(ret.getchunks())
299 ret = bundle2.getunbundler(self.ui, stream)
299 ret = bundle2.getunbundler(self.ui, stream)
300 return ret
300 return ret
301 except Exception as exc:
301 except Exception as exc:
302 # If the exception contains output salvaged from a bundle2
302 # If the exception contains output salvaged from a bundle2
303 # reply, we need to make sure it is printed before continuing
303 # reply, we need to make sure it is printed before continuing
304 # to fail. So we build a bundle2 with such output and consume
304 # to fail. So we build a bundle2 with such output and consume
305 # it directly.
305 # it directly.
306 #
306 #
307 # This is not very elegant but allows a "simple" solution for
307 # This is not very elegant but allows a "simple" solution for
308 # issue4594
308 # issue4594
309 output = getattr(exc, '_bundle2salvagedoutput', ())
309 output = getattr(exc, '_bundle2salvagedoutput', ())
310 if output:
310 if output:
311 bundler = bundle2.bundle20(self._repo.ui)
311 bundler = bundle2.bundle20(self._repo.ui)
312 for out in output:
312 for out in output:
313 bundler.addpart(out)
313 bundler.addpart(out)
314 stream = util.chunkbuffer(bundler.getchunks())
314 stream = util.chunkbuffer(bundler.getchunks())
315 b = bundle2.getunbundler(self.ui, stream)
315 b = bundle2.getunbundler(self.ui, stream)
316 bundle2.processbundle(self._repo, b)
316 bundle2.processbundle(self._repo, b)
317 raise
317 raise
318 except error.PushRaced as exc:
318 except error.PushRaced as exc:
319 raise error.ResponseError(_('push failed:'),
319 raise error.ResponseError(_('push failed:'),
320 stringutil.forcebytestr(exc))
320 stringutil.forcebytestr(exc))
321
321
322 # End of _basewirecommands interface.
322 # End of _basewirecommands interface.
323
323
324 # Begin of peer interface.
324 # Begin of peer interface.
325
325
326 def commandexecutor(self):
326 def commandexecutor(self):
327 return localcommandexecutor(self)
327 return localcommandexecutor(self)
328
328
329 # End of peer interface.
329 # End of peer interface.
330
330
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
331 @interfaceutil.implementer(repository.ipeerlegacycommands)
332 class locallegacypeer(localpeer):
332 class locallegacypeer(localpeer):
333 '''peer extension which implements legacy methods too; used for tests with
333 '''peer extension which implements legacy methods too; used for tests with
334 restricted capabilities'''
334 restricted capabilities'''
335
335
336 def __init__(self, repo):
336 def __init__(self, repo):
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
337 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
338
338
339 # Begin of baselegacywirecommands interface.
339 # Begin of baselegacywirecommands interface.
340
340
341 def between(self, pairs):
341 def between(self, pairs):
342 return self._repo.between(pairs)
342 return self._repo.between(pairs)
343
343
344 def branches(self, nodes):
344 def branches(self, nodes):
345 return self._repo.branches(nodes)
345 return self._repo.branches(nodes)
346
346
347 def changegroup(self, nodes, source):
347 def changegroup(self, nodes, source):
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
348 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
349 missingheads=self._repo.heads())
349 missingheads=self._repo.heads())
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351
351
352 def changegroupsubset(self, bases, heads, source):
352 def changegroupsubset(self, bases, heads, source):
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
353 outgoing = discovery.outgoing(self._repo, missingroots=bases,
354 missingheads=heads)
354 missingheads=heads)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
355 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
356
356
357 # End of baselegacywirecommands interface.
357 # End of baselegacywirecommands interface.
358
358
359 # Increment the sub-version when the revlog v2 format changes to lock out old
359 # Increment the sub-version when the revlog v2 format changes to lock out old
360 # clients.
360 # clients.
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
361 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
362
362
363 # A repository with the sparserevlog feature will have delta chains that
363 # A repository with the sparserevlog feature will have delta chains that
364 # can spread over a larger span. Sparse reading cuts these large spans into
364 # can spread over a larger span. Sparse reading cuts these large spans into
365 # pieces, so that each piece isn't too big.
365 # pieces, so that each piece isn't too big.
366 # Without the sparserevlog capability, reading from the repository could use
366 # Without the sparserevlog capability, reading from the repository could use
367 # huge amounts of memory, because the whole span would be read at once,
367 # huge amounts of memory, because the whole span would be read at once,
368 # including all the intermediate revisions that aren't pertinent for the chain.
368 # including all the intermediate revisions that aren't pertinent for the chain.
369 # This is why once a repository has enabled sparse-read, it becomes required.
369 # This is why once a repository has enabled sparse-read, it becomes required.
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
370 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
371
371
372 # Functions receiving (ui, features) that extensions can register to impact
372 # Functions receiving (ui, features) that extensions can register to impact
373 # the ability to load repositories with custom requirements. Only
373 # the ability to load repositories with custom requirements. Only
374 # functions defined in loaded extensions are called.
374 # functions defined in loaded extensions are called.
375 #
375 #
376 # The function receives a set of requirement strings that the repository
376 # The function receives a set of requirement strings that the repository
377 # is capable of opening. Functions will typically add elements to the
377 # is capable of opening. Functions will typically add elements to the
378 # set to reflect that the extension knows how to handle that requirements.
378 # set to reflect that the extension knows how to handle that requirements.
379 featuresetupfuncs = set()
379 featuresetupfuncs = set()
380
380
381 def makelocalrepository(baseui, path, intents=None):
381 def makelocalrepository(baseui, path, intents=None):
382 """Create a local repository object.
382 """Create a local repository object.
383
383
384 Given arguments needed to construct a local repository, this function
384 Given arguments needed to construct a local repository, this function
385 performs various early repository loading functionality (such as
385 performs various early repository loading functionality (such as
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
386 reading the ``.hg/requires`` and ``.hg/hgrc`` files), validates that
387 the repository can be opened, derives a type suitable for representing
387 the repository can be opened, derives a type suitable for representing
388 that repository, and returns an instance of it.
388 that repository, and returns an instance of it.
389
389
390 The returned object conforms to the ``repository.completelocalrepository``
390 The returned object conforms to the ``repository.completelocalrepository``
391 interface.
391 interface.
392
392
393 The repository type is derived by calling a series of factory functions
393 The repository type is derived by calling a series of factory functions
394 for each aspect/interface of the final repository. These are defined by
394 for each aspect/interface of the final repository. These are defined by
395 ``REPO_INTERFACES``.
395 ``REPO_INTERFACES``.
396
396
397 Each factory function is called to produce a type implementing a specific
397 Each factory function is called to produce a type implementing a specific
398 interface. The cumulative list of returned types will be combined into a
398 interface. The cumulative list of returned types will be combined into a
399 new type and that type will be instantiated to represent the local
399 new type and that type will be instantiated to represent the local
400 repository.
400 repository.
401
401
402 The factory functions each receive various state that may be consulted
402 The factory functions each receive various state that may be consulted
403 as part of deriving a type.
403 as part of deriving a type.
404
404
405 Extensions should wrap these factory functions to customize repository type
405 Extensions should wrap these factory functions to customize repository type
406 creation. Note that an extension's wrapped function may be called even if
406 creation. Note that an extension's wrapped function may be called even if
407 that extension is not loaded for the repo being constructed. Extensions
407 that extension is not loaded for the repo being constructed. Extensions
408 should check if their ``__name__`` appears in the
408 should check if their ``__name__`` appears in the
409 ``extensionmodulenames`` set passed to the factory function and no-op if
409 ``extensionmodulenames`` set passed to the factory function and no-op if
410 not.
410 not.
411 """
411 """
412 ui = baseui.copy()
412 ui = baseui.copy()
413 # Prevent copying repo configuration.
413 # Prevent copying repo configuration.
414 ui.copy = baseui.copy
414 ui.copy = baseui.copy
415
415
416 # Working directory VFS rooted at repository root.
416 # Working directory VFS rooted at repository root.
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
418
418
419 # Main VFS for .hg/ directory.
419 # Main VFS for .hg/ directory.
420 hgpath = wdirvfs.join(b'.hg')
420 hgpath = wdirvfs.join(b'.hg')
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
421 hgvfs = vfsmod.vfs(hgpath, cacheaudited=True)
422
422
423 # The .hg/ path should exist and should be a directory. All other
423 # The .hg/ path should exist and should be a directory. All other
424 # cases are errors.
424 # cases are errors.
425 if not hgvfs.isdir():
425 if not hgvfs.isdir():
426 try:
426 try:
427 hgvfs.stat()
427 hgvfs.stat()
428 except OSError as e:
428 except OSError as e:
429 if e.errno != errno.ENOENT:
429 if e.errno != errno.ENOENT:
430 raise
430 raise
431
431
432 raise error.RepoError(_(b'repository %s not found') % path)
432 raise error.RepoError(_(b'repository %s not found') % path)
433
433
434 # .hg/requires file contains a newline-delimited list of
434 # .hg/requires file contains a newline-delimited list of
435 # features/capabilities the opener (us) must have in order to use
435 # features/capabilities the opener (us) must have in order to use
436 # the repository. This file was introduced in Mercurial 0.9.2,
436 # the repository. This file was introduced in Mercurial 0.9.2,
437 # which means very old repositories may not have one. We assume
437 # which means very old repositories may not have one. We assume
438 # a missing file translates to no requirements.
438 # a missing file translates to no requirements.
439 try:
439 try:
440 requirements = set(hgvfs.read(b'requires').splitlines())
440 requirements = set(hgvfs.read(b'requires').splitlines())
441 except IOError as e:
441 except IOError as e:
442 if e.errno != errno.ENOENT:
442 if e.errno != errno.ENOENT:
443 raise
443 raise
444 requirements = set()
444 requirements = set()
445
445
446 # The .hg/hgrc file may load extensions or contain config options
446 # The .hg/hgrc file may load extensions or contain config options
447 # that influence repository construction. Attempt to load it and
447 # that influence repository construction. Attempt to load it and
448 # process any new extensions that it may have pulled in.
448 # process any new extensions that it may have pulled in.
449 try:
449 try:
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
450 ui.readconfig(hgvfs.join(b'hgrc'), root=wdirvfs.base)
451 # Run this before extensions.loadall() so extensions can be
451 # Run this before extensions.loadall() so extensions can be
452 # automatically enabled.
452 # automatically enabled.
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
453 afterhgrcload(ui, wdirvfs, hgvfs, requirements)
454 except IOError:
454 except IOError:
455 pass
455 pass
456 else:
456 else:
457 extensions.loadall(ui)
457 extensions.loadall(ui)
458
458
459 # Set of module names of extensions loaded for this repository.
459 # Set of module names of extensions loaded for this repository.
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
460 extensionmodulenames = {m.__name__ for n, m in extensions.extensions(ui)}
461
461
462 supportedrequirements = gathersupportedrequirements(ui)
462 supportedrequirements = gathersupportedrequirements(ui)
463
463
464 # We first validate the requirements are known.
464 # We first validate the requirements are known.
465 ensurerequirementsrecognized(requirements, supportedrequirements)
465 ensurerequirementsrecognized(requirements, supportedrequirements)
466
466
467 # Then we validate that the known set is reasonable to use together.
467 # Then we validate that the known set is reasonable to use together.
468 ensurerequirementscompatible(ui, requirements)
468 ensurerequirementscompatible(ui, requirements)
469
469
470 # TODO there are unhandled edge cases related to opening repositories with
470 # TODO there are unhandled edge cases related to opening repositories with
471 # shared storage. If storage is shared, we should also test for requirements
471 # shared storage. If storage is shared, we should also test for requirements
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
472 # compatibility in the pointed-to repo. This entails loading the .hg/hgrc in
473 # that repo, as that repo may load extensions needed to open it. This is a
473 # that repo, as that repo may load extensions needed to open it. This is a
474 # bit complicated because we don't want the other hgrc to overwrite settings
474 # bit complicated because we don't want the other hgrc to overwrite settings
475 # in this hgrc.
475 # in this hgrc.
476 #
476 #
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
477 # This bug is somewhat mitigated by the fact that we copy the .hg/requires
478 # file when sharing repos. But if a requirement is added after the share is
478 # file when sharing repos. But if a requirement is added after the share is
479 # performed, thereby introducing a new requirement for the opener, we may
479 # performed, thereby introducing a new requirement for the opener, we may
480 # will not see that and could encounter a run-time error interacting with
480 # will not see that and could encounter a run-time error interacting with
481 # that shared store since it has an unknown-to-us requirement.
481 # that shared store since it has an unknown-to-us requirement.
482
482
483 # At this point, we know we should be capable of opening the repository.
483 # At this point, we know we should be capable of opening the repository.
484 # Now get on with doing that.
484 # Now get on with doing that.
485
485
486 features = set()
486 features = set()
487
487
488 # The "store" part of the repository holds versioned data. How it is
488 # The "store" part of the repository holds versioned data. How it is
489 # accessed is determined by various requirements. The ``shared`` or
489 # accessed is determined by various requirements. The ``shared`` or
490 # ``relshared`` requirements indicate the store lives in the path contained
490 # ``relshared`` requirements indicate the store lives in the path contained
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
491 # in the ``.hg/sharedpath`` file. This is an absolute path for
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
492 # ``shared`` and relative to ``.hg/`` for ``relshared``.
493 if b'shared' in requirements or b'relshared' in requirements:
493 if b'shared' in requirements or b'relshared' in requirements:
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
494 sharedpath = hgvfs.read(b'sharedpath').rstrip(b'\n')
495 if b'relshared' in requirements:
495 if b'relshared' in requirements:
496 sharedpath = hgvfs.join(sharedpath)
496 sharedpath = hgvfs.join(sharedpath)
497
497
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
498 sharedvfs = vfsmod.vfs(sharedpath, realpath=True)
499
499
500 if not sharedvfs.exists():
500 if not sharedvfs.exists():
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
501 raise error.RepoError(_(b'.hg/sharedpath points to nonexistent '
502 b'directory %s') % sharedvfs.base)
502 b'directory %s') % sharedvfs.base)
503
503
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
504 features.add(repository.REPO_FEATURE_SHARED_STORAGE)
505
505
506 storebasepath = sharedvfs.base
506 storebasepath = sharedvfs.base
507 cachepath = sharedvfs.join(b'cache')
507 cachepath = sharedvfs.join(b'cache')
508 else:
508 else:
509 storebasepath = hgvfs.base
509 storebasepath = hgvfs.base
510 cachepath = hgvfs.join(b'cache')
510 cachepath = hgvfs.join(b'cache')
511
511
512 # The store has changed over time and the exact layout is dictated by
512 # The store has changed over time and the exact layout is dictated by
513 # requirements. The store interface abstracts differences across all
513 # requirements. The store interface abstracts differences across all
514 # of them.
514 # of them.
515 store = makestore(requirements, storebasepath,
515 store = makestore(requirements, storebasepath,
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
516 lambda base: vfsmod.vfs(base, cacheaudited=True))
517 hgvfs.createmode = store.createmode
517 hgvfs.createmode = store.createmode
518
518
519 storevfs = store.vfs
519 storevfs = store.vfs
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
520 storevfs.options = resolvestorevfsoptions(ui, requirements, features)
521
521
522 # The cache vfs is used to manage cache files.
522 # The cache vfs is used to manage cache files.
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
523 cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
524 cachevfs.createmode = store.createmode
524 cachevfs.createmode = store.createmode
525
525
526 # Now resolve the type for the repository object. We do this by repeatedly
526 # Now resolve the type for the repository object. We do this by repeatedly
527 # calling a factory function to produces types for specific aspects of the
527 # calling a factory function to produces types for specific aspects of the
528 # repo's operation. The aggregate returned types are used as base classes
528 # repo's operation. The aggregate returned types are used as base classes
529 # for a dynamically-derived type, which will represent our new repository.
529 # for a dynamically-derived type, which will represent our new repository.
530
530
531 bases = []
531 bases = []
532 extrastate = {}
532 extrastate = {}
533
533
534 for iface, fn in REPO_INTERFACES:
534 for iface, fn in REPO_INTERFACES:
535 # We pass all potentially useful state to give extensions tons of
535 # We pass all potentially useful state to give extensions tons of
536 # flexibility.
536 # flexibility.
537 typ = fn(ui=ui,
537 typ = fn(ui=ui,
538 intents=intents,
538 intents=intents,
539 requirements=requirements,
539 requirements=requirements,
540 features=features,
540 features=features,
541 wdirvfs=wdirvfs,
541 wdirvfs=wdirvfs,
542 hgvfs=hgvfs,
542 hgvfs=hgvfs,
543 store=store,
543 store=store,
544 storevfs=storevfs,
544 storevfs=storevfs,
545 storeoptions=storevfs.options,
545 storeoptions=storevfs.options,
546 cachevfs=cachevfs,
546 cachevfs=cachevfs,
547 extensionmodulenames=extensionmodulenames,
547 extensionmodulenames=extensionmodulenames,
548 extrastate=extrastate,
548 extrastate=extrastate,
549 baseclasses=bases)
549 baseclasses=bases)
550
550
551 if not isinstance(typ, type):
551 if not isinstance(typ, type):
552 raise error.ProgrammingError('unable to construct type for %s' %
552 raise error.ProgrammingError('unable to construct type for %s' %
553 iface)
553 iface)
554
554
555 bases.append(typ)
555 bases.append(typ)
556
556
557 # type() allows you to use characters in type names that wouldn't be
557 # type() allows you to use characters in type names that wouldn't be
558 # recognized as Python symbols in source code. We abuse that to add
558 # recognized as Python symbols in source code. We abuse that to add
559 # rich information about our constructed repo.
559 # rich information about our constructed repo.
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
560 name = pycompat.sysstr(b'derivedrepo:%s<%s>' % (
561 wdirvfs.base,
561 wdirvfs.base,
562 b','.join(sorted(requirements))))
562 b','.join(sorted(requirements))))
563
563
564 cls = type(name, tuple(bases), {})
564 cls = type(name, tuple(bases), {})
565
565
566 return cls(
566 return cls(
567 baseui=baseui,
567 baseui=baseui,
568 ui=ui,
568 ui=ui,
569 origroot=path,
569 origroot=path,
570 wdirvfs=wdirvfs,
570 wdirvfs=wdirvfs,
571 hgvfs=hgvfs,
571 hgvfs=hgvfs,
572 requirements=requirements,
572 requirements=requirements,
573 supportedrequirements=supportedrequirements,
573 supportedrequirements=supportedrequirements,
574 sharedpath=storebasepath,
574 sharedpath=storebasepath,
575 store=store,
575 store=store,
576 cachevfs=cachevfs,
576 cachevfs=cachevfs,
577 features=features,
577 features=features,
578 intents=intents)
578 intents=intents)
579
579
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
580 def afterhgrcload(ui, wdirvfs, hgvfs, requirements):
581 """Perform additional actions after .hg/hgrc is loaded.
581 """Perform additional actions after .hg/hgrc is loaded.
582
582
583 This function is called during repository loading immediately after
583 This function is called during repository loading immediately after
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
584 the .hg/hgrc file is loaded and before per-repo extensions are loaded.
585
585
586 The function can be used to validate configs, automatically add
586 The function can be used to validate configs, automatically add
587 options (including extensions) based on requirements, etc.
587 options (including extensions) based on requirements, etc.
588 """
588 """
589
589
590 # Map of requirements to list of extensions to load automatically when
590 # Map of requirements to list of extensions to load automatically when
591 # requirement is present.
591 # requirement is present.
592 autoextensions = {
592 autoextensions = {
593 b'largefiles': [b'largefiles'],
593 b'largefiles': [b'largefiles'],
594 b'lfs': [b'lfs'],
594 b'lfs': [b'lfs'],
595 }
595 }
596
596
597 for requirement, names in sorted(autoextensions.items()):
597 for requirement, names in sorted(autoextensions.items()):
598 if requirement not in requirements:
598 if requirement not in requirements:
599 continue
599 continue
600
600
601 for name in names:
601 for name in names:
602 if not ui.hasconfig(b'extensions', name):
602 if not ui.hasconfig(b'extensions', name):
603 ui.setconfig(b'extensions', name, b'', source='autoload')
603 ui.setconfig(b'extensions', name, b'', source='autoload')
604
604
605 def gathersupportedrequirements(ui):
605 def gathersupportedrequirements(ui):
606 """Determine the complete set of recognized requirements."""
606 """Determine the complete set of recognized requirements."""
607 # Start with all requirements supported by this file.
607 # Start with all requirements supported by this file.
608 supported = set(localrepository._basesupported)
608 supported = set(localrepository._basesupported)
609
609
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
610 # Execute ``featuresetupfuncs`` entries if they belong to an extension
611 # relevant to this ui instance.
611 # relevant to this ui instance.
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
612 modules = {m.__name__ for n, m in extensions.extensions(ui)}
613
613
614 for fn in featuresetupfuncs:
614 for fn in featuresetupfuncs:
615 if fn.__module__ in modules:
615 if fn.__module__ in modules:
616 fn(ui, supported)
616 fn(ui, supported)
617
617
618 # Add derived requirements from registered compression engines.
618 # Add derived requirements from registered compression engines.
619 for name in util.compengines:
619 for name in util.compengines:
620 engine = util.compengines[name]
620 engine = util.compengines[name]
621 if engine.revlogheader():
621 if engine.revlogheader():
622 supported.add(b'exp-compression-%s' % name)
622 supported.add(b'exp-compression-%s' % name)
623
623
624 return supported
624 return supported
625
625
626 def ensurerequirementsrecognized(requirements, supported):
626 def ensurerequirementsrecognized(requirements, supported):
627 """Validate that a set of local requirements is recognized.
627 """Validate that a set of local requirements is recognized.
628
628
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
629 Receives a set of requirements. Raises an ``error.RepoError`` if there
630 exists any requirement in that set that currently loaded code doesn't
630 exists any requirement in that set that currently loaded code doesn't
631 recognize.
631 recognize.
632
632
633 Returns a set of supported requirements.
633 Returns a set of supported requirements.
634 """
634 """
635 missing = set()
635 missing = set()
636
636
637 for requirement in requirements:
637 for requirement in requirements:
638 if requirement in supported:
638 if requirement in supported:
639 continue
639 continue
640
640
641 if not requirement or not requirement[0:1].isalnum():
641 if not requirement or not requirement[0:1].isalnum():
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
642 raise error.RequirementError(_(b'.hg/requires file is corrupt'))
643
643
644 missing.add(requirement)
644 missing.add(requirement)
645
645
646 if missing:
646 if missing:
647 raise error.RequirementError(
647 raise error.RequirementError(
648 _(b'repository requires features unknown to this Mercurial: %s') %
648 _(b'repository requires features unknown to this Mercurial: %s') %
649 b' '.join(sorted(missing)),
649 b' '.join(sorted(missing)),
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
650 hint=_(b'see https://mercurial-scm.org/wiki/MissingRequirement '
651 b'for more information'))
651 b'for more information'))
652
652
653 def ensurerequirementscompatible(ui, requirements):
653 def ensurerequirementscompatible(ui, requirements):
654 """Validates that a set of recognized requirements is mutually compatible.
654 """Validates that a set of recognized requirements is mutually compatible.
655
655
656 Some requirements may not be compatible with others or require
656 Some requirements may not be compatible with others or require
657 config options that aren't enabled. This function is called during
657 config options that aren't enabled. This function is called during
658 repository opening to ensure that the set of requirements needed
658 repository opening to ensure that the set of requirements needed
659 to open a repository is sane and compatible with config options.
659 to open a repository is sane and compatible with config options.
660
660
661 Extensions can monkeypatch this function to perform additional
661 Extensions can monkeypatch this function to perform additional
662 checking.
662 checking.
663
663
664 ``error.RepoError`` should be raised on failure.
664 ``error.RepoError`` should be raised on failure.
665 """
665 """
666 if b'exp-sparse' in requirements and not sparse.enabled:
666 if b'exp-sparse' in requirements and not sparse.enabled:
667 raise error.RepoError(_(b'repository is using sparse feature but '
667 raise error.RepoError(_(b'repository is using sparse feature but '
668 b'sparse is not enabled; enable the '
668 b'sparse is not enabled; enable the '
669 b'"sparse" extensions to access'))
669 b'"sparse" extensions to access'))
670
670
671 def makestore(requirements, path, vfstype):
671 def makestore(requirements, path, vfstype):
672 """Construct a storage object for a repository."""
672 """Construct a storage object for a repository."""
673 if b'store' in requirements:
673 if b'store' in requirements:
674 if b'fncache' in requirements:
674 if b'fncache' in requirements:
675 return storemod.fncachestore(path, vfstype,
675 return storemod.fncachestore(path, vfstype,
676 b'dotencode' in requirements)
676 b'dotencode' in requirements)
677
677
678 return storemod.encodedstore(path, vfstype)
678 return storemod.encodedstore(path, vfstype)
679
679
680 return storemod.basicstore(path, vfstype)
680 return storemod.basicstore(path, vfstype)
681
681
682 def resolvestorevfsoptions(ui, requirements, features):
682 def resolvestorevfsoptions(ui, requirements, features):
683 """Resolve the options to pass to the store vfs opener.
683 """Resolve the options to pass to the store vfs opener.
684
684
685 The returned dict is used to influence behavior of the storage layer.
685 The returned dict is used to influence behavior of the storage layer.
686 """
686 """
687 options = {}
687 options = {}
688
688
689 if b'treemanifest' in requirements:
689 if b'treemanifest' in requirements:
690 options[b'treemanifest'] = True
690 options[b'treemanifest'] = True
691
691
692 # experimental config: format.manifestcachesize
692 # experimental config: format.manifestcachesize
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
693 manifestcachesize = ui.configint(b'format', b'manifestcachesize')
694 if manifestcachesize is not None:
694 if manifestcachesize is not None:
695 options[b'manifestcachesize'] = manifestcachesize
695 options[b'manifestcachesize'] = manifestcachesize
696
696
697 # In the absence of another requirement superseding a revlog-related
697 # In the absence of another requirement superseding a revlog-related
698 # requirement, we have to assume the repo is using revlog version 0.
698 # requirement, we have to assume the repo is using revlog version 0.
699 # This revlog format is super old and we don't bother trying to parse
699 # This revlog format is super old and we don't bother trying to parse
700 # opener options for it because those options wouldn't do anything
700 # opener options for it because those options wouldn't do anything
701 # meaningful on such old repos.
701 # meaningful on such old repos.
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
702 if b'revlogv1' in requirements or REVLOGV2_REQUIREMENT in requirements:
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
703 options.update(resolverevlogstorevfsoptions(ui, requirements, features))
704
704
705 return options
705 return options
706
706
707 def resolverevlogstorevfsoptions(ui, requirements, features):
707 def resolverevlogstorevfsoptions(ui, requirements, features):
708 """Resolve opener options specific to revlogs."""
708 """Resolve opener options specific to revlogs."""
709
709
710 options = {}
710 options = {}
711
711
712 if b'revlogv1' in requirements:
712 if b'revlogv1' in requirements:
713 options[b'revlogv1'] = True
713 options[b'revlogv1'] = True
714 if REVLOGV2_REQUIREMENT in requirements:
714 if REVLOGV2_REQUIREMENT in requirements:
715 options[b'revlogv2'] = True
715 options[b'revlogv2'] = True
716
716
717 if b'generaldelta' in requirements:
717 if b'generaldelta' in requirements:
718 options[b'generaldelta'] = True
718 options[b'generaldelta'] = True
719
719
720 # experimental config: format.chunkcachesize
720 # experimental config: format.chunkcachesize
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
721 chunkcachesize = ui.configint(b'format', b'chunkcachesize')
722 if chunkcachesize is not None:
722 if chunkcachesize is not None:
723 options[b'chunkcachesize'] = chunkcachesize
723 options[b'chunkcachesize'] = chunkcachesize
724
724
725 deltabothparents = ui.configbool(b'storage',
725 deltabothparents = ui.configbool(b'storage',
726 b'revlog.optimize-delta-parent-choice')
726 b'revlog.optimize-delta-parent-choice')
727 options[b'deltabothparents'] = deltabothparents
727 options[b'deltabothparents'] = deltabothparents
728
728
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
729 options[b'lazydeltabase'] = not scmutil.gddeltaconfig(ui)
730
730
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
731 chainspan = ui.configbytes(b'experimental', b'maxdeltachainspan')
732 if 0 <= chainspan:
732 if 0 <= chainspan:
733 options[b'maxdeltachainspan'] = chainspan
733 options[b'maxdeltachainspan'] = chainspan
734
734
735 mmapindexthreshold = ui.configbytes(b'experimental',
735 mmapindexthreshold = ui.configbytes(b'experimental',
736 b'mmapindexthreshold')
736 b'mmapindexthreshold')
737 if mmapindexthreshold is not None:
737 if mmapindexthreshold is not None:
738 options[b'mmapindexthreshold'] = mmapindexthreshold
738 options[b'mmapindexthreshold'] = mmapindexthreshold
739
739
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
740 withsparseread = ui.configbool(b'experimental', b'sparse-read')
741 srdensitythres = float(ui.config(b'experimental',
741 srdensitythres = float(ui.config(b'experimental',
742 b'sparse-read.density-threshold'))
742 b'sparse-read.density-threshold'))
743 srmingapsize = ui.configbytes(b'experimental',
743 srmingapsize = ui.configbytes(b'experimental',
744 b'sparse-read.min-gap-size')
744 b'sparse-read.min-gap-size')
745 options[b'with-sparse-read'] = withsparseread
745 options[b'with-sparse-read'] = withsparseread
746 options[b'sparse-read-density-threshold'] = srdensitythres
746 options[b'sparse-read-density-threshold'] = srdensitythres
747 options[b'sparse-read-min-gap-size'] = srmingapsize
747 options[b'sparse-read-min-gap-size'] = srmingapsize
748
748
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
749 sparserevlog = SPARSEREVLOG_REQUIREMENT in requirements
750 options[b'sparse-revlog'] = sparserevlog
750 options[b'sparse-revlog'] = sparserevlog
751 if sparserevlog:
751 if sparserevlog:
752 options[b'generaldelta'] = True
752 options[b'generaldelta'] = True
753
753
754 maxchainlen = None
754 maxchainlen = None
755 if sparserevlog:
755 if sparserevlog:
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
756 maxchainlen = revlogconst.SPARSE_REVLOG_MAX_CHAIN_LENGTH
757 # experimental config: format.maxchainlen
757 # experimental config: format.maxchainlen
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
758 maxchainlen = ui.configint(b'format', b'maxchainlen', maxchainlen)
759 if maxchainlen is not None:
759 if maxchainlen is not None:
760 options[b'maxchainlen'] = maxchainlen
760 options[b'maxchainlen'] = maxchainlen
761
761
762 for r in requirements:
762 for r in requirements:
763 if r.startswith(b'exp-compression-'):
763 if r.startswith(b'exp-compression-'):
764 options[b'compengine'] = r[len(b'exp-compression-'):]
764 options[b'compengine'] = r[len(b'exp-compression-'):]
765
765
766 if repository.NARROW_REQUIREMENT in requirements:
766 if repository.NARROW_REQUIREMENT in requirements:
767 options[b'enableellipsis'] = True
767 options[b'enableellipsis'] = True
768
768
769 return options
769 return options
770
770
771 def makemain(**kwargs):
771 def makemain(**kwargs):
772 """Produce a type conforming to ``ilocalrepositorymain``."""
772 """Produce a type conforming to ``ilocalrepositorymain``."""
773 return localrepository
773 return localrepository
774
774
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
775 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
776 class revlogfilestorage(object):
776 class revlogfilestorage(object):
777 """File storage when using revlogs."""
777 """File storage when using revlogs."""
778
778
779 def file(self, path):
779 def file(self, path):
780 if path[0] == b'/':
780 if path[0] == b'/':
781 path = path[1:]
781 path = path[1:]
782
782
783 return filelog.filelog(self.svfs, path)
783 return filelog.filelog(self.svfs, path)
784
784
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
785 @interfaceutil.implementer(repository.ilocalrepositoryfilestorage)
786 class revlognarrowfilestorage(object):
786 class revlognarrowfilestorage(object):
787 """File storage when using revlogs and narrow files."""
787 """File storage when using revlogs and narrow files."""
788
788
789 def file(self, path):
789 def file(self, path):
790 if path[0] == b'/':
790 if path[0] == b'/':
791 path = path[1:]
791 path = path[1:]
792
792
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
793 return filelog.narrowfilelog(self.svfs, path, self.narrowmatch())
794
794
795 def makefilestorage(requirements, features, **kwargs):
795 def makefilestorage(requirements, features, **kwargs):
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
796 """Produce a type conforming to ``ilocalrepositoryfilestorage``."""
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
797 features.add(repository.REPO_FEATURE_REVLOG_FILE_STORAGE)
798
798
799 if repository.NARROW_REQUIREMENT in requirements:
799 if repository.NARROW_REQUIREMENT in requirements:
800 return revlognarrowfilestorage
800 return revlognarrowfilestorage
801 else:
801 else:
802 return revlogfilestorage
802 return revlogfilestorage
803
803
804 # List of repository interfaces and factory functions for them. Each
804 # List of repository interfaces and factory functions for them. Each
805 # will be called in order during ``makelocalrepository()`` to iteratively
805 # will be called in order during ``makelocalrepository()`` to iteratively
806 # derive the final type for a local repository instance.
806 # derive the final type for a local repository instance.
807 REPO_INTERFACES = [
807 REPO_INTERFACES = [
808 (repository.ilocalrepositorymain, makemain),
808 (repository.ilocalrepositorymain, makemain),
809 (repository.ilocalrepositoryfilestorage, makefilestorage),
809 (repository.ilocalrepositoryfilestorage, makefilestorage),
810 ]
810 ]
811
811
812 @interfaceutil.implementer(repository.ilocalrepositorymain)
812 @interfaceutil.implementer(repository.ilocalrepositorymain)
813 class localrepository(object):
813 class localrepository(object):
814 """Main class for representing local repositories.
814 """Main class for representing local repositories.
815
815
816 All local repositories are instances of this class.
816 All local repositories are instances of this class.
817
817
818 Constructed on its own, instances of this class are not usable as
818 Constructed on its own, instances of this class are not usable as
819 repository objects. To obtain a usable repository object, call
819 repository objects. To obtain a usable repository object, call
820 ``hg.repository()``, ``localrepo.instance()``, or
820 ``hg.repository()``, ``localrepo.instance()``, or
821 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
821 ``localrepo.makelocalrepository()``. The latter is the lowest-level.
822 ``instance()`` adds support for creating new repositories.
822 ``instance()`` adds support for creating new repositories.
823 ``hg.repository()`` adds more extension integration, including calling
823 ``hg.repository()`` adds more extension integration, including calling
824 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
824 ``reposetup()``. Generally speaking, ``hg.repository()`` should be
825 used.
825 used.
826 """
826 """
827
827
828 # obsolete experimental requirements:
828 # obsolete experimental requirements:
829 # - manifestv2: An experimental new manifest format that allowed
829 # - manifestv2: An experimental new manifest format that allowed
830 # for stem compression of long paths. Experiment ended up not
830 # for stem compression of long paths. Experiment ended up not
831 # being successful (repository sizes went up due to worse delta
831 # being successful (repository sizes went up due to worse delta
832 # chains), and the code was deleted in 4.6.
832 # chains), and the code was deleted in 4.6.
833 supportedformats = {
833 supportedformats = {
834 'revlogv1',
834 'revlogv1',
835 'generaldelta',
835 'generaldelta',
836 'treemanifest',
836 'treemanifest',
837 REVLOGV2_REQUIREMENT,
837 REVLOGV2_REQUIREMENT,
838 SPARSEREVLOG_REQUIREMENT,
838 SPARSEREVLOG_REQUIREMENT,
839 }
839 }
840 _basesupported = supportedformats | {
840 _basesupported = supportedformats | {
841 'store',
841 'store',
842 'fncache',
842 'fncache',
843 'shared',
843 'shared',
844 'relshared',
844 'relshared',
845 'dotencode',
845 'dotencode',
846 'exp-sparse',
846 'exp-sparse',
847 'internal-phase'
847 'internal-phase'
848 }
848 }
849
849
850 # list of prefix for file which can be written without 'wlock'
850 # list of prefix for file which can be written without 'wlock'
851 # Extensions should extend this list when needed
851 # Extensions should extend this list when needed
852 _wlockfreeprefix = {
852 _wlockfreeprefix = {
853 # We migh consider requiring 'wlock' for the next
853 # We migh consider requiring 'wlock' for the next
854 # two, but pretty much all the existing code assume
854 # two, but pretty much all the existing code assume
855 # wlock is not needed so we keep them excluded for
855 # wlock is not needed so we keep them excluded for
856 # now.
856 # now.
857 'hgrc',
857 'hgrc',
858 'requires',
858 'requires',
859 # XXX cache is a complicatged business someone
859 # XXX cache is a complicatged business someone
860 # should investigate this in depth at some point
860 # should investigate this in depth at some point
861 'cache/',
861 'cache/',
862 # XXX shouldn't be dirstate covered by the wlock?
862 # XXX shouldn't be dirstate covered by the wlock?
863 'dirstate',
863 'dirstate',
864 # XXX bisect was still a bit too messy at the time
864 # XXX bisect was still a bit too messy at the time
865 # this changeset was introduced. Someone should fix
865 # this changeset was introduced. Someone should fix
866 # the remainig bit and drop this line
866 # the remainig bit and drop this line
867 'bisect.state',
867 'bisect.state',
868 }
868 }
869
869
870 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
870 def __init__(self, baseui, ui, origroot, wdirvfs, hgvfs, requirements,
871 supportedrequirements, sharedpath, store, cachevfs,
871 supportedrequirements, sharedpath, store, cachevfs,
872 features, intents=None):
872 features, intents=None):
873 """Create a new local repository instance.
873 """Create a new local repository instance.
874
874
875 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
875 Most callers should use ``hg.repository()``, ``localrepo.instance()``,
876 or ``localrepo.makelocalrepository()`` for obtaining a new repository
876 or ``localrepo.makelocalrepository()`` for obtaining a new repository
877 object.
877 object.
878
878
879 Arguments:
879 Arguments:
880
880
881 baseui
881 baseui
882 ``ui.ui`` instance that ``ui`` argument was based off of.
882 ``ui.ui`` instance that ``ui`` argument was based off of.
883
883
884 ui
884 ui
885 ``ui.ui`` instance for use by the repository.
885 ``ui.ui`` instance for use by the repository.
886
886
887 origroot
887 origroot
888 ``bytes`` path to working directory root of this repository.
888 ``bytes`` path to working directory root of this repository.
889
889
890 wdirvfs
890 wdirvfs
891 ``vfs.vfs`` rooted at the working directory.
891 ``vfs.vfs`` rooted at the working directory.
892
892
893 hgvfs
893 hgvfs
894 ``vfs.vfs`` rooted at .hg/
894 ``vfs.vfs`` rooted at .hg/
895
895
896 requirements
896 requirements
897 ``set`` of bytestrings representing repository opening requirements.
897 ``set`` of bytestrings representing repository opening requirements.
898
898
899 supportedrequirements
899 supportedrequirements
900 ``set`` of bytestrings representing repository requirements that we
900 ``set`` of bytestrings representing repository requirements that we
901 know how to open. May be a supetset of ``requirements``.
901 know how to open. May be a supetset of ``requirements``.
902
902
903 sharedpath
903 sharedpath
904 ``bytes`` Defining path to storage base directory. Points to a
904 ``bytes`` Defining path to storage base directory. Points to a
905 ``.hg/`` directory somewhere.
905 ``.hg/`` directory somewhere.
906
906
907 store
907 store
908 ``store.basicstore`` (or derived) instance providing access to
908 ``store.basicstore`` (or derived) instance providing access to
909 versioned storage.
909 versioned storage.
910
910
911 cachevfs
911 cachevfs
912 ``vfs.vfs`` used for cache files.
912 ``vfs.vfs`` used for cache files.
913
913
914 features
914 features
915 ``set`` of bytestrings defining features/capabilities of this
915 ``set`` of bytestrings defining features/capabilities of this
916 instance.
916 instance.
917
917
918 intents
918 intents
919 ``set`` of system strings indicating what this repo will be used
919 ``set`` of system strings indicating what this repo will be used
920 for.
920 for.
921 """
921 """
922 self.baseui = baseui
922 self.baseui = baseui
923 self.ui = ui
923 self.ui = ui
924 self.origroot = origroot
924 self.origroot = origroot
925 # vfs rooted at working directory.
925 # vfs rooted at working directory.
926 self.wvfs = wdirvfs
926 self.wvfs = wdirvfs
927 self.root = wdirvfs.base
927 self.root = wdirvfs.base
928 # vfs rooted at .hg/. Used to access most non-store paths.
928 # vfs rooted at .hg/. Used to access most non-store paths.
929 self.vfs = hgvfs
929 self.vfs = hgvfs
930 self.path = hgvfs.base
930 self.path = hgvfs.base
931 self.requirements = requirements
931 self.requirements = requirements
932 self.supported = supportedrequirements
932 self.supported = supportedrequirements
933 self.sharedpath = sharedpath
933 self.sharedpath = sharedpath
934 self.store = store
934 self.store = store
935 self.cachevfs = cachevfs
935 self.cachevfs = cachevfs
936 self.features = features
936 self.features = features
937
937
938 self.filtername = None
938 self.filtername = None
939
939
940 if (self.ui.configbool('devel', 'all-warnings') or
940 if (self.ui.configbool('devel', 'all-warnings') or
941 self.ui.configbool('devel', 'check-locks')):
941 self.ui.configbool('devel', 'check-locks')):
942 self.vfs.audit = self._getvfsward(self.vfs.audit)
942 self.vfs.audit = self._getvfsward(self.vfs.audit)
943 # A list of callback to shape the phase if no data were found.
943 # A list of callback to shape the phase if no data were found.
944 # Callback are in the form: func(repo, roots) --> processed root.
944 # Callback are in the form: func(repo, roots) --> processed root.
945 # This list it to be filled by extension during repo setup
945 # This list it to be filled by extension during repo setup
946 self._phasedefaults = []
946 self._phasedefaults = []
947
947
948 color.setup(self.ui)
948 color.setup(self.ui)
949
949
950 self.spath = self.store.path
950 self.spath = self.store.path
951 self.svfs = self.store.vfs
951 self.svfs = self.store.vfs
952 self.sjoin = self.store.join
952 self.sjoin = self.store.join
953 if (self.ui.configbool('devel', 'all-warnings') or
953 if (self.ui.configbool('devel', 'all-warnings') or
954 self.ui.configbool('devel', 'check-locks')):
954 self.ui.configbool('devel', 'check-locks')):
955 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
955 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
956 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
956 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
957 else: # standard vfs
957 else: # standard vfs
958 self.svfs.audit = self._getsvfsward(self.svfs.audit)
958 self.svfs.audit = self._getsvfsward(self.svfs.audit)
959
959
960 self._dirstatevalidatewarned = False
960 self._dirstatevalidatewarned = False
961
961
962 self._branchcaches = {}
962 self._branchcaches = {}
963 self._revbranchcache = None
963 self._revbranchcache = None
964 self._filterpats = {}
964 self._filterpats = {}
965 self._datafilters = {}
965 self._datafilters = {}
966 self._transref = self._lockref = self._wlockref = None
966 self._transref = self._lockref = self._wlockref = None
967
967
968 # A cache for various files under .hg/ that tracks file changes,
968 # A cache for various files under .hg/ that tracks file changes,
969 # (used by the filecache decorator)
969 # (used by the filecache decorator)
970 #
970 #
971 # Maps a property name to its util.filecacheentry
971 # Maps a property name to its util.filecacheentry
972 self._filecache = {}
972 self._filecache = {}
973
973
974 # hold sets of revision to be filtered
974 # hold sets of revision to be filtered
975 # should be cleared when something might have changed the filter value:
975 # should be cleared when something might have changed the filter value:
976 # - new changesets,
976 # - new changesets,
977 # - phase change,
977 # - phase change,
978 # - new obsolescence marker,
978 # - new obsolescence marker,
979 # - working directory parent change,
979 # - working directory parent change,
980 # - bookmark changes
980 # - bookmark changes
981 self.filteredrevcache = {}
981 self.filteredrevcache = {}
982
982
983 # post-dirstate-status hooks
983 # post-dirstate-status hooks
984 self._postdsstatus = []
984 self._postdsstatus = []
985
985
986 # generic mapping between names and nodes
986 # generic mapping between names and nodes
987 self.names = namespaces.namespaces()
987 self.names = namespaces.namespaces()
988
988
989 # Key to signature value.
989 # Key to signature value.
990 self._sparsesignaturecache = {}
990 self._sparsesignaturecache = {}
991 # Signature to cached matcher instance.
991 # Signature to cached matcher instance.
992 self._sparsematchercache = {}
992 self._sparsematchercache = {}
993
993
994 def _getvfsward(self, origfunc):
994 def _getvfsward(self, origfunc):
995 """build a ward for self.vfs"""
995 """build a ward for self.vfs"""
996 rref = weakref.ref(self)
996 rref = weakref.ref(self)
997 def checkvfs(path, mode=None):
997 def checkvfs(path, mode=None):
998 ret = origfunc(path, mode=mode)
998 ret = origfunc(path, mode=mode)
999 repo = rref()
999 repo = rref()
1000 if (repo is None
1000 if (repo is None
1001 or not util.safehasattr(repo, '_wlockref')
1001 or not util.safehasattr(repo, '_wlockref')
1002 or not util.safehasattr(repo, '_lockref')):
1002 or not util.safehasattr(repo, '_lockref')):
1003 return
1003 return
1004 if mode in (None, 'r', 'rb'):
1004 if mode in (None, 'r', 'rb'):
1005 return
1005 return
1006 if path.startswith(repo.path):
1006 if path.startswith(repo.path):
1007 # truncate name relative to the repository (.hg)
1007 # truncate name relative to the repository (.hg)
1008 path = path[len(repo.path) + 1:]
1008 path = path[len(repo.path) + 1:]
1009 if path.startswith('cache/'):
1009 if path.startswith('cache/'):
1010 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1010 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
1011 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1011 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
1012 if path.startswith('journal.'):
1012 if path.startswith('journal.'):
1013 # journal is covered by 'lock'
1013 # journal is covered by 'lock'
1014 if repo._currentlock(repo._lockref) is None:
1014 if repo._currentlock(repo._lockref) is None:
1015 repo.ui.develwarn('write with no lock: "%s"' % path,
1015 repo.ui.develwarn('write with no lock: "%s"' % path,
1016 stacklevel=2, config='check-locks')
1016 stacklevel=2, config='check-locks')
1017 elif repo._currentlock(repo._wlockref) is None:
1017 elif repo._currentlock(repo._wlockref) is None:
1018 # rest of vfs files are covered by 'wlock'
1018 # rest of vfs files are covered by 'wlock'
1019 #
1019 #
1020 # exclude special files
1020 # exclude special files
1021 for prefix in self._wlockfreeprefix:
1021 for prefix in self._wlockfreeprefix:
1022 if path.startswith(prefix):
1022 if path.startswith(prefix):
1023 return
1023 return
1024 repo.ui.develwarn('write with no wlock: "%s"' % path,
1024 repo.ui.develwarn('write with no wlock: "%s"' % path,
1025 stacklevel=2, config='check-locks')
1025 stacklevel=2, config='check-locks')
1026 return ret
1026 return ret
1027 return checkvfs
1027 return checkvfs
1028
1028
1029 def _getsvfsward(self, origfunc):
1029 def _getsvfsward(self, origfunc):
1030 """build a ward for self.svfs"""
1030 """build a ward for self.svfs"""
1031 rref = weakref.ref(self)
1031 rref = weakref.ref(self)
1032 def checksvfs(path, mode=None):
1032 def checksvfs(path, mode=None):
1033 ret = origfunc(path, mode=mode)
1033 ret = origfunc(path, mode=mode)
1034 repo = rref()
1034 repo = rref()
1035 if repo is None or not util.safehasattr(repo, '_lockref'):
1035 if repo is None or not util.safehasattr(repo, '_lockref'):
1036 return
1036 return
1037 if mode in (None, 'r', 'rb'):
1037 if mode in (None, 'r', 'rb'):
1038 return
1038 return
1039 if path.startswith(repo.sharedpath):
1039 if path.startswith(repo.sharedpath):
1040 # truncate name relative to the repository (.hg)
1040 # truncate name relative to the repository (.hg)
1041 path = path[len(repo.sharedpath) + 1:]
1041 path = path[len(repo.sharedpath) + 1:]
1042 if repo._currentlock(repo._lockref) is None:
1042 if repo._currentlock(repo._lockref) is None:
1043 repo.ui.develwarn('write with no lock: "%s"' % path,
1043 repo.ui.develwarn('write with no lock: "%s"' % path,
1044 stacklevel=3)
1044 stacklevel=3)
1045 return ret
1045 return ret
1046 return checksvfs
1046 return checksvfs
1047
1047
1048 def close(self):
1048 def close(self):
1049 self._writecaches()
1049 self._writecaches()
1050
1050
1051 def _writecaches(self):
1051 def _writecaches(self):
1052 if self._revbranchcache:
1052 if self._revbranchcache:
1053 self._revbranchcache.write()
1053 self._revbranchcache.write()
1054
1054
1055 def _restrictcapabilities(self, caps):
1055 def _restrictcapabilities(self, caps):
1056 if self.ui.configbool('experimental', 'bundle2-advertise'):
1056 if self.ui.configbool('experimental', 'bundle2-advertise'):
1057 caps = set(caps)
1057 caps = set(caps)
1058 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1058 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
1059 role='client'))
1059 role='client'))
1060 caps.add('bundle2=' + urlreq.quote(capsblob))
1060 caps.add('bundle2=' + urlreq.quote(capsblob))
1061 return caps
1061 return caps
1062
1062
1063 def _writerequirements(self):
1063 def _writerequirements(self):
1064 scmutil.writerequires(self.vfs, self.requirements)
1064 scmutil.writerequires(self.vfs, self.requirements)
1065
1065
1066 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1066 # Don't cache auditor/nofsauditor, or you'll end up with reference cycle:
1067 # self -> auditor -> self._checknested -> self
1067 # self -> auditor -> self._checknested -> self
1068
1068
1069 @property
1069 @property
1070 def auditor(self):
1070 def auditor(self):
1071 # This is only used by context.workingctx.match in order to
1071 # This is only used by context.workingctx.match in order to
1072 # detect files in subrepos.
1072 # detect files in subrepos.
1073 return pathutil.pathauditor(self.root, callback=self._checknested)
1073 return pathutil.pathauditor(self.root, callback=self._checknested)
1074
1074
1075 @property
1075 @property
1076 def nofsauditor(self):
1076 def nofsauditor(self):
1077 # This is only used by context.basectx.match in order to detect
1077 # This is only used by context.basectx.match in order to detect
1078 # files in subrepos.
1078 # files in subrepos.
1079 return pathutil.pathauditor(self.root, callback=self._checknested,
1079 return pathutil.pathauditor(self.root, callback=self._checknested,
1080 realfs=False, cached=True)
1080 realfs=False, cached=True)
1081
1081
1082 def _checknested(self, path):
1082 def _checknested(self, path):
1083 """Determine if path is a legal nested repository."""
1083 """Determine if path is a legal nested repository."""
1084 if not path.startswith(self.root):
1084 if not path.startswith(self.root):
1085 return False
1085 return False
1086 subpath = path[len(self.root) + 1:]
1086 subpath = path[len(self.root) + 1:]
1087 normsubpath = util.pconvert(subpath)
1087 normsubpath = util.pconvert(subpath)
1088
1088
1089 # XXX: Checking against the current working copy is wrong in
1089 # XXX: Checking against the current working copy is wrong in
1090 # the sense that it can reject things like
1090 # the sense that it can reject things like
1091 #
1091 #
1092 # $ hg cat -r 10 sub/x.txt
1092 # $ hg cat -r 10 sub/x.txt
1093 #
1093 #
1094 # if sub/ is no longer a subrepository in the working copy
1094 # if sub/ is no longer a subrepository in the working copy
1095 # parent revision.
1095 # parent revision.
1096 #
1096 #
1097 # However, it can of course also allow things that would have
1097 # However, it can of course also allow things that would have
1098 # been rejected before, such as the above cat command if sub/
1098 # been rejected before, such as the above cat command if sub/
1099 # is a subrepository now, but was a normal directory before.
1099 # is a subrepository now, but was a normal directory before.
1100 # The old path auditor would have rejected by mistake since it
1100 # The old path auditor would have rejected by mistake since it
1101 # panics when it sees sub/.hg/.
1101 # panics when it sees sub/.hg/.
1102 #
1102 #
1103 # All in all, checking against the working copy seems sensible
1103 # All in all, checking against the working copy seems sensible
1104 # since we want to prevent access to nested repositories on
1104 # since we want to prevent access to nested repositories on
1105 # the filesystem *now*.
1105 # the filesystem *now*.
1106 ctx = self[None]
1106 ctx = self[None]
1107 parts = util.splitpath(subpath)
1107 parts = util.splitpath(subpath)
1108 while parts:
1108 while parts:
1109 prefix = '/'.join(parts)
1109 prefix = '/'.join(parts)
1110 if prefix in ctx.substate:
1110 if prefix in ctx.substate:
1111 if prefix == normsubpath:
1111 if prefix == normsubpath:
1112 return True
1112 return True
1113 else:
1113 else:
1114 sub = ctx.sub(prefix)
1114 sub = ctx.sub(prefix)
1115 return sub.checknested(subpath[len(prefix) + 1:])
1115 return sub.checknested(subpath[len(prefix) + 1:])
1116 else:
1116 else:
1117 parts.pop()
1117 parts.pop()
1118 return False
1118 return False
1119
1119
1120 def peer(self):
1120 def peer(self):
1121 return localpeer(self) # not cached to avoid reference cycle
1121 return localpeer(self) # not cached to avoid reference cycle
1122
1122
1123 def unfiltered(self):
1123 def unfiltered(self):
1124 """Return unfiltered version of the repository
1124 """Return unfiltered version of the repository
1125
1125
1126 Intended to be overwritten by filtered repo."""
1126 Intended to be overwritten by filtered repo."""
1127 return self
1127 return self
1128
1128
1129 def filtered(self, name, visibilityexceptions=None):
1129 def filtered(self, name, visibilityexceptions=None):
1130 """Return a filtered version of a repository"""
1130 """Return a filtered version of a repository"""
1131 cls = repoview.newtype(self.unfiltered().__class__)
1131 cls = repoview.newtype(self.unfiltered().__class__)
1132 return cls(self, name, visibilityexceptions)
1132 return cls(self, name, visibilityexceptions)
1133
1133
1134 @repofilecache('bookmarks', 'bookmarks.current')
1134 @repofilecache('bookmarks', 'bookmarks.current')
1135 def _bookmarks(self):
1135 def _bookmarks(self):
1136 return bookmarks.bmstore(self)
1136 return bookmarks.bmstore(self)
1137
1137
1138 @property
1138 @property
1139 def _activebookmark(self):
1139 def _activebookmark(self):
1140 return self._bookmarks.active
1140 return self._bookmarks.active
1141
1141
1142 # _phasesets depend on changelog. what we need is to call
1142 # _phasesets depend on changelog. what we need is to call
1143 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1143 # _phasecache.invalidate() if '00changelog.i' was changed, but it
1144 # can't be easily expressed in filecache mechanism.
1144 # can't be easily expressed in filecache mechanism.
1145 @storecache('phaseroots', '00changelog.i')
1145 @storecache('phaseroots', '00changelog.i')
1146 def _phasecache(self):
1146 def _phasecache(self):
1147 return phases.phasecache(self, self._phasedefaults)
1147 return phases.phasecache(self, self._phasedefaults)
1148
1148
1149 @storecache('obsstore')
1149 @storecache('obsstore')
1150 def obsstore(self):
1150 def obsstore(self):
1151 return obsolete.makestore(self.ui, self)
1151 return obsolete.makestore(self.ui, self)
1152
1152
1153 @storecache('00changelog.i')
1153 @storecache('00changelog.i')
1154 def changelog(self):
1154 def changelog(self):
1155 return changelog.changelog(self.svfs,
1155 return changelog.changelog(self.svfs,
1156 trypending=txnutil.mayhavepending(self.root))
1156 trypending=txnutil.mayhavepending(self.root))
1157
1157
1158 @storecache('00manifest.i')
1158 @storecache('00manifest.i')
1159 def manifestlog(self):
1159 def manifestlog(self):
1160 rootstore = manifest.manifestrevlog(self.svfs)
1160 rootstore = manifest.manifestrevlog(self.svfs)
1161 return manifest.manifestlog(self.svfs, self, rootstore)
1161 return manifest.manifestlog(self.svfs, self, rootstore)
1162
1162
1163 @repofilecache('dirstate')
1163 @repofilecache('dirstate')
1164 def dirstate(self):
1164 def dirstate(self):
1165 return self._makedirstate()
1165 return self._makedirstate()
1166
1166
1167 def _makedirstate(self):
1167 def _makedirstate(self):
1168 """Extension point for wrapping the dirstate per-repo."""
1168 """Extension point for wrapping the dirstate per-repo."""
1169 sparsematchfn = lambda: sparse.matcher(self)
1169 sparsematchfn = lambda: sparse.matcher(self)
1170
1170
1171 return dirstate.dirstate(self.vfs, self.ui, self.root,
1171 return dirstate.dirstate(self.vfs, self.ui, self.root,
1172 self._dirstatevalidate, sparsematchfn)
1172 self._dirstatevalidate, sparsematchfn)
1173
1173
1174 def _dirstatevalidate(self, node):
1174 def _dirstatevalidate(self, node):
1175 try:
1175 try:
1176 self.changelog.rev(node)
1176 self.changelog.rev(node)
1177 return node
1177 return node
1178 except error.LookupError:
1178 except error.LookupError:
1179 if not self._dirstatevalidatewarned:
1179 if not self._dirstatevalidatewarned:
1180 self._dirstatevalidatewarned = True
1180 self._dirstatevalidatewarned = True
1181 self.ui.warn(_("warning: ignoring unknown"
1181 self.ui.warn(_("warning: ignoring unknown"
1182 " working parent %s!\n") % short(node))
1182 " working parent %s!\n") % short(node))
1183 return nullid
1183 return nullid
1184
1184
1185 @storecache(narrowspec.FILENAME)
1185 @storecache(narrowspec.FILENAME)
1186 def narrowpats(self):
1186 def narrowpats(self):
1187 """matcher patterns for this repository's narrowspec
1187 """matcher patterns for this repository's narrowspec
1188
1188
1189 A tuple of (includes, excludes).
1189 A tuple of (includes, excludes).
1190 """
1190 """
1191 return narrowspec.load(self)
1191 return narrowspec.load(self)
1192
1192
1193 @storecache(narrowspec.FILENAME)
1193 @storecache(narrowspec.FILENAME)
1194 def _narrowmatch(self):
1194 def _narrowmatch(self):
1195 if repository.NARROW_REQUIREMENT not in self.requirements:
1195 if repository.NARROW_REQUIREMENT not in self.requirements:
1196 return matchmod.always(self.root, '')
1196 return matchmod.always(self.root, '')
1197 include, exclude = self.narrowpats
1197 include, exclude = self.narrowpats
1198 return narrowspec.match(self.root, include=include, exclude=exclude)
1198 return narrowspec.match(self.root, include=include, exclude=exclude)
1199
1199
1200 # TODO(martinvonz): make this property-like instead?
1200 # TODO(martinvonz): make this property-like instead?
1201 def narrowmatch(self):
1201 def narrowmatch(self):
1202 return self._narrowmatch
1202 return self._narrowmatch
1203
1203
1204 def setnarrowpats(self, newincludes, newexcludes):
1204 def setnarrowpats(self, newincludes, newexcludes):
1205 narrowspec.save(self, newincludes, newexcludes)
1205 narrowspec.save(self, newincludes, newexcludes)
1206 self.invalidate(clearfilecache=True)
1206 self.invalidate(clearfilecache=True)
1207
1207
1208 def __getitem__(self, changeid):
1208 def __getitem__(self, changeid):
1209 if changeid is None:
1209 if changeid is None:
1210 return context.workingctx(self)
1210 return context.workingctx(self)
1211 if isinstance(changeid, context.basectx):
1211 if isinstance(changeid, context.basectx):
1212 return changeid
1212 return changeid
1213 if isinstance(changeid, slice):
1213 if isinstance(changeid, slice):
1214 # wdirrev isn't contiguous so the slice shouldn't include it
1214 # wdirrev isn't contiguous so the slice shouldn't include it
1215 return [self[i]
1215 return [self[i]
1216 for i in pycompat.xrange(*changeid.indices(len(self)))
1216 for i in pycompat.xrange(*changeid.indices(len(self)))
1217 if i not in self.changelog.filteredrevs]
1217 if i not in self.changelog.filteredrevs]
1218 try:
1218 try:
1219 if isinstance(changeid, int):
1219 if isinstance(changeid, int):
1220 node = self.changelog.node(changeid)
1220 node = self.changelog.node(changeid)
1221 rev = changeid
1221 rev = changeid
1222 return context.changectx(self, rev, node)
1222 return context.changectx(self, rev, node)
1223 elif changeid == 'null':
1223 elif changeid == 'null':
1224 node = nullid
1224 node = nullid
1225 rev = nullrev
1225 rev = nullrev
1226 return context.changectx(self, rev, node)
1226 return context.changectx(self, rev, node)
1227 elif changeid == 'tip':
1227 elif changeid == 'tip':
1228 node = self.changelog.tip()
1228 node = self.changelog.tip()
1229 rev = self.changelog.rev(node)
1229 rev = self.changelog.rev(node)
1230 return context.changectx(self, rev, node)
1230 return context.changectx(self, rev, node)
1231 elif (changeid == '.'
1231 elif changeid == '.':
1232 or self.local() and changeid == self.dirstate.p1()):
1233 # this is a hack to delay/avoid loading obsmarkers
1232 # this is a hack to delay/avoid loading obsmarkers
1234 # when we know that '.' won't be hidden
1233 # when we know that '.' won't be hidden
1235 node = self.dirstate.p1()
1234 node = self.dirstate.p1()
1236 rev = self.unfiltered().changelog.rev(node)
1235 rev = self.unfiltered().changelog.rev(node)
1237 return context.changectx(self, rev, node)
1236 return context.changectx(self, rev, node)
1238 elif len(changeid) == 20:
1237 elif len(changeid) == 20:
1239 try:
1238 try:
1240 node = changeid
1239 node = changeid
1241 rev = self.changelog.rev(changeid)
1240 rev = self.changelog.rev(changeid)
1242 return context.changectx(self, rev, node)
1241 return context.changectx(self, rev, node)
1243 except error.FilteredLookupError:
1242 except error.FilteredLookupError:
1244 changeid = hex(changeid) # for the error message
1243 changeid = hex(changeid) # for the error message
1245 raise
1244 raise
1246 except LookupError:
1245 except LookupError:
1247 # check if it might have come from damaged dirstate
1246 # check if it might have come from damaged dirstate
1248 #
1247 #
1249 # XXX we could avoid the unfiltered if we had a recognizable
1248 # XXX we could avoid the unfiltered if we had a recognizable
1250 # exception for filtered changeset access
1249 # exception for filtered changeset access
1251 if (self.local()
1250 if (self.local()
1252 and changeid in self.unfiltered().dirstate.parents()):
1251 and changeid in self.unfiltered().dirstate.parents()):
1253 msg = _("working directory has unknown parent '%s'!")
1252 msg = _("working directory has unknown parent '%s'!")
1254 raise error.Abort(msg % short(changeid))
1253 raise error.Abort(msg % short(changeid))
1255 changeid = hex(changeid) # for the error message
1254 changeid = hex(changeid) # for the error message
1256
1255
1257 elif len(changeid) == 40:
1256 elif len(changeid) == 40:
1258 try:
1257 try:
1259 node = bin(changeid)
1258 node = bin(changeid)
1260 rev = self.changelog.rev(node)
1259 rev = self.changelog.rev(node)
1261 return context.changectx(self, rev, node)
1260 return context.changectx(self, rev, node)
1262 except error.FilteredLookupError:
1261 except error.FilteredLookupError:
1263 raise
1262 raise
1264 except LookupError:
1263 except LookupError:
1265 pass
1264 pass
1266 else:
1265 else:
1267 raise error.ProgrammingError(
1266 raise error.ProgrammingError(
1268 "unsupported changeid '%s' of type %s" %
1267 "unsupported changeid '%s' of type %s" %
1269 (changeid, type(changeid)))
1268 (changeid, type(changeid)))
1270
1269
1271 except (error.FilteredIndexError, error.FilteredLookupError):
1270 except (error.FilteredIndexError, error.FilteredLookupError):
1272 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1271 raise error.FilteredRepoLookupError(_("filtered revision '%s'")
1273 % pycompat.bytestr(changeid))
1272 % pycompat.bytestr(changeid))
1274 except IndexError:
1273 except IndexError:
1275 pass
1274 pass
1276 except error.WdirUnsupported:
1275 except error.WdirUnsupported:
1277 return context.workingctx(self)
1276 return context.workingctx(self)
1278 raise error.RepoLookupError(
1277 raise error.RepoLookupError(
1279 _("unknown revision '%s'") % changeid)
1278 _("unknown revision '%s'") % changeid)
1280
1279
1281 def __contains__(self, changeid):
1280 def __contains__(self, changeid):
1282 """True if the given changeid exists
1281 """True if the given changeid exists
1283
1282
1284 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1283 error.AmbiguousPrefixLookupError is raised if an ambiguous node
1285 specified.
1284 specified.
1286 """
1285 """
1287 try:
1286 try:
1288 self[changeid]
1287 self[changeid]
1289 return True
1288 return True
1290 except error.RepoLookupError:
1289 except error.RepoLookupError:
1291 return False
1290 return False
1292
1291
1293 def __nonzero__(self):
1292 def __nonzero__(self):
1294 return True
1293 return True
1295
1294
1296 __bool__ = __nonzero__
1295 __bool__ = __nonzero__
1297
1296
1298 def __len__(self):
1297 def __len__(self):
1299 # no need to pay the cost of repoview.changelog
1298 # no need to pay the cost of repoview.changelog
1300 unfi = self.unfiltered()
1299 unfi = self.unfiltered()
1301 return len(unfi.changelog)
1300 return len(unfi.changelog)
1302
1301
1303 def __iter__(self):
1302 def __iter__(self):
1304 return iter(self.changelog)
1303 return iter(self.changelog)
1305
1304
1306 def revs(self, expr, *args):
1305 def revs(self, expr, *args):
1307 '''Find revisions matching a revset.
1306 '''Find revisions matching a revset.
1308
1307
1309 The revset is specified as a string ``expr`` that may contain
1308 The revset is specified as a string ``expr`` that may contain
1310 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1309 %-formatting to escape certain types. See ``revsetlang.formatspec``.
1311
1310
1312 Revset aliases from the configuration are not expanded. To expand
1311 Revset aliases from the configuration are not expanded. To expand
1313 user aliases, consider calling ``scmutil.revrange()`` or
1312 user aliases, consider calling ``scmutil.revrange()`` or
1314 ``repo.anyrevs([expr], user=True)``.
1313 ``repo.anyrevs([expr], user=True)``.
1315
1314
1316 Returns a revset.abstractsmartset, which is a list-like interface
1315 Returns a revset.abstractsmartset, which is a list-like interface
1317 that contains integer revisions.
1316 that contains integer revisions.
1318 '''
1317 '''
1319 expr = revsetlang.formatspec(expr, *args)
1318 expr = revsetlang.formatspec(expr, *args)
1320 m = revset.match(None, expr)
1319 m = revset.match(None, expr)
1321 return m(self)
1320 return m(self)
1322
1321
1323 def set(self, expr, *args):
1322 def set(self, expr, *args):
1324 '''Find revisions matching a revset and emit changectx instances.
1323 '''Find revisions matching a revset and emit changectx instances.
1325
1324
1326 This is a convenience wrapper around ``revs()`` that iterates the
1325 This is a convenience wrapper around ``revs()`` that iterates the
1327 result and is a generator of changectx instances.
1326 result and is a generator of changectx instances.
1328
1327
1329 Revset aliases from the configuration are not expanded. To expand
1328 Revset aliases from the configuration are not expanded. To expand
1330 user aliases, consider calling ``scmutil.revrange()``.
1329 user aliases, consider calling ``scmutil.revrange()``.
1331 '''
1330 '''
1332 for r in self.revs(expr, *args):
1331 for r in self.revs(expr, *args):
1333 yield self[r]
1332 yield self[r]
1334
1333
1335 def anyrevs(self, specs, user=False, localalias=None):
1334 def anyrevs(self, specs, user=False, localalias=None):
1336 '''Find revisions matching one of the given revsets.
1335 '''Find revisions matching one of the given revsets.
1337
1336
1338 Revset aliases from the configuration are not expanded by default. To
1337 Revset aliases from the configuration are not expanded by default. To
1339 expand user aliases, specify ``user=True``. To provide some local
1338 expand user aliases, specify ``user=True``. To provide some local
1340 definitions overriding user aliases, set ``localalias`` to
1339 definitions overriding user aliases, set ``localalias`` to
1341 ``{name: definitionstring}``.
1340 ``{name: definitionstring}``.
1342 '''
1341 '''
1343 if user:
1342 if user:
1344 m = revset.matchany(self.ui, specs,
1343 m = revset.matchany(self.ui, specs,
1345 lookup=revset.lookupfn(self),
1344 lookup=revset.lookupfn(self),
1346 localalias=localalias)
1345 localalias=localalias)
1347 else:
1346 else:
1348 m = revset.matchany(None, specs, localalias=localalias)
1347 m = revset.matchany(None, specs, localalias=localalias)
1349 return m(self)
1348 return m(self)
1350
1349
1351 def url(self):
1350 def url(self):
1352 return 'file:' + self.root
1351 return 'file:' + self.root
1353
1352
1354 def hook(self, name, throw=False, **args):
1353 def hook(self, name, throw=False, **args):
1355 """Call a hook, passing this repo instance.
1354 """Call a hook, passing this repo instance.
1356
1355
1357 This a convenience method to aid invoking hooks. Extensions likely
1356 This a convenience method to aid invoking hooks. Extensions likely
1358 won't call this unless they have registered a custom hook or are
1357 won't call this unless they have registered a custom hook or are
1359 replacing code that is expected to call a hook.
1358 replacing code that is expected to call a hook.
1360 """
1359 """
1361 return hook.hook(self.ui, self, name, throw, **args)
1360 return hook.hook(self.ui, self, name, throw, **args)
1362
1361
1363 @filteredpropertycache
1362 @filteredpropertycache
1364 def _tagscache(self):
1363 def _tagscache(self):
1365 '''Returns a tagscache object that contains various tags related
1364 '''Returns a tagscache object that contains various tags related
1366 caches.'''
1365 caches.'''
1367
1366
1368 # This simplifies its cache management by having one decorated
1367 # This simplifies its cache management by having one decorated
1369 # function (this one) and the rest simply fetch things from it.
1368 # function (this one) and the rest simply fetch things from it.
1370 class tagscache(object):
1369 class tagscache(object):
1371 def __init__(self):
1370 def __init__(self):
1372 # These two define the set of tags for this repository. tags
1371 # These two define the set of tags for this repository. tags
1373 # maps tag name to node; tagtypes maps tag name to 'global' or
1372 # maps tag name to node; tagtypes maps tag name to 'global' or
1374 # 'local'. (Global tags are defined by .hgtags across all
1373 # 'local'. (Global tags are defined by .hgtags across all
1375 # heads, and local tags are defined in .hg/localtags.)
1374 # heads, and local tags are defined in .hg/localtags.)
1376 # They constitute the in-memory cache of tags.
1375 # They constitute the in-memory cache of tags.
1377 self.tags = self.tagtypes = None
1376 self.tags = self.tagtypes = None
1378
1377
1379 self.nodetagscache = self.tagslist = None
1378 self.nodetagscache = self.tagslist = None
1380
1379
1381 cache = tagscache()
1380 cache = tagscache()
1382 cache.tags, cache.tagtypes = self._findtags()
1381 cache.tags, cache.tagtypes = self._findtags()
1383
1382
1384 return cache
1383 return cache
1385
1384
1386 def tags(self):
1385 def tags(self):
1387 '''return a mapping of tag to node'''
1386 '''return a mapping of tag to node'''
1388 t = {}
1387 t = {}
1389 if self.changelog.filteredrevs:
1388 if self.changelog.filteredrevs:
1390 tags, tt = self._findtags()
1389 tags, tt = self._findtags()
1391 else:
1390 else:
1392 tags = self._tagscache.tags
1391 tags = self._tagscache.tags
1393 for k, v in tags.iteritems():
1392 for k, v in tags.iteritems():
1394 try:
1393 try:
1395 # ignore tags to unknown nodes
1394 # ignore tags to unknown nodes
1396 self.changelog.rev(v)
1395 self.changelog.rev(v)
1397 t[k] = v
1396 t[k] = v
1398 except (error.LookupError, ValueError):
1397 except (error.LookupError, ValueError):
1399 pass
1398 pass
1400 return t
1399 return t
1401
1400
1402 def _findtags(self):
1401 def _findtags(self):
1403 '''Do the hard work of finding tags. Return a pair of dicts
1402 '''Do the hard work of finding tags. Return a pair of dicts
1404 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1403 (tags, tagtypes) where tags maps tag name to node, and tagtypes
1405 maps tag name to a string like \'global\' or \'local\'.
1404 maps tag name to a string like \'global\' or \'local\'.
1406 Subclasses or extensions are free to add their own tags, but
1405 Subclasses or extensions are free to add their own tags, but
1407 should be aware that the returned dicts will be retained for the
1406 should be aware that the returned dicts will be retained for the
1408 duration of the localrepo object.'''
1407 duration of the localrepo object.'''
1409
1408
1410 # XXX what tagtype should subclasses/extensions use? Currently
1409 # XXX what tagtype should subclasses/extensions use? Currently
1411 # mq and bookmarks add tags, but do not set the tagtype at all.
1410 # mq and bookmarks add tags, but do not set the tagtype at all.
1412 # Should each extension invent its own tag type? Should there
1411 # Should each extension invent its own tag type? Should there
1413 # be one tagtype for all such "virtual" tags? Or is the status
1412 # be one tagtype for all such "virtual" tags? Or is the status
1414 # quo fine?
1413 # quo fine?
1415
1414
1416
1415
1417 # map tag name to (node, hist)
1416 # map tag name to (node, hist)
1418 alltags = tagsmod.findglobaltags(self.ui, self)
1417 alltags = tagsmod.findglobaltags(self.ui, self)
1419 # map tag name to tag type
1418 # map tag name to tag type
1420 tagtypes = dict((tag, 'global') for tag in alltags)
1419 tagtypes = dict((tag, 'global') for tag in alltags)
1421
1420
1422 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1421 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1423
1422
1424 # Build the return dicts. Have to re-encode tag names because
1423 # Build the return dicts. Have to re-encode tag names because
1425 # the tags module always uses UTF-8 (in order not to lose info
1424 # the tags module always uses UTF-8 (in order not to lose info
1426 # writing to the cache), but the rest of Mercurial wants them in
1425 # writing to the cache), but the rest of Mercurial wants them in
1427 # local encoding.
1426 # local encoding.
1428 tags = {}
1427 tags = {}
1429 for (name, (node, hist)) in alltags.iteritems():
1428 for (name, (node, hist)) in alltags.iteritems():
1430 if node != nullid:
1429 if node != nullid:
1431 tags[encoding.tolocal(name)] = node
1430 tags[encoding.tolocal(name)] = node
1432 tags['tip'] = self.changelog.tip()
1431 tags['tip'] = self.changelog.tip()
1433 tagtypes = dict([(encoding.tolocal(name), value)
1432 tagtypes = dict([(encoding.tolocal(name), value)
1434 for (name, value) in tagtypes.iteritems()])
1433 for (name, value) in tagtypes.iteritems()])
1435 return (tags, tagtypes)
1434 return (tags, tagtypes)
1436
1435
1437 def tagtype(self, tagname):
1436 def tagtype(self, tagname):
1438 '''
1437 '''
1439 return the type of the given tag. result can be:
1438 return the type of the given tag. result can be:
1440
1439
1441 'local' : a local tag
1440 'local' : a local tag
1442 'global' : a global tag
1441 'global' : a global tag
1443 None : tag does not exist
1442 None : tag does not exist
1444 '''
1443 '''
1445
1444
1446 return self._tagscache.tagtypes.get(tagname)
1445 return self._tagscache.tagtypes.get(tagname)
1447
1446
1448 def tagslist(self):
1447 def tagslist(self):
1449 '''return a list of tags ordered by revision'''
1448 '''return a list of tags ordered by revision'''
1450 if not self._tagscache.tagslist:
1449 if not self._tagscache.tagslist:
1451 l = []
1450 l = []
1452 for t, n in self.tags().iteritems():
1451 for t, n in self.tags().iteritems():
1453 l.append((self.changelog.rev(n), t, n))
1452 l.append((self.changelog.rev(n), t, n))
1454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1453 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1455
1454
1456 return self._tagscache.tagslist
1455 return self._tagscache.tagslist
1457
1456
1458 def nodetags(self, node):
1457 def nodetags(self, node):
1459 '''return the tags associated with a node'''
1458 '''return the tags associated with a node'''
1460 if not self._tagscache.nodetagscache:
1459 if not self._tagscache.nodetagscache:
1461 nodetagscache = {}
1460 nodetagscache = {}
1462 for t, n in self._tagscache.tags.iteritems():
1461 for t, n in self._tagscache.tags.iteritems():
1463 nodetagscache.setdefault(n, []).append(t)
1462 nodetagscache.setdefault(n, []).append(t)
1464 for tags in nodetagscache.itervalues():
1463 for tags in nodetagscache.itervalues():
1465 tags.sort()
1464 tags.sort()
1466 self._tagscache.nodetagscache = nodetagscache
1465 self._tagscache.nodetagscache = nodetagscache
1467 return self._tagscache.nodetagscache.get(node, [])
1466 return self._tagscache.nodetagscache.get(node, [])
1468
1467
1469 def nodebookmarks(self, node):
1468 def nodebookmarks(self, node):
1470 """return the list of bookmarks pointing to the specified node"""
1469 """return the list of bookmarks pointing to the specified node"""
1471 return self._bookmarks.names(node)
1470 return self._bookmarks.names(node)
1472
1471
1473 def branchmap(self):
1472 def branchmap(self):
1474 '''returns a dictionary {branch: [branchheads]} with branchheads
1473 '''returns a dictionary {branch: [branchheads]} with branchheads
1475 ordered by increasing revision number'''
1474 ordered by increasing revision number'''
1476 branchmap.updatecache(self)
1475 branchmap.updatecache(self)
1477 return self._branchcaches[self.filtername]
1476 return self._branchcaches[self.filtername]
1478
1477
1479 @unfilteredmethod
1478 @unfilteredmethod
1480 def revbranchcache(self):
1479 def revbranchcache(self):
1481 if not self._revbranchcache:
1480 if not self._revbranchcache:
1482 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1481 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1483 return self._revbranchcache
1482 return self._revbranchcache
1484
1483
1485 def branchtip(self, branch, ignoremissing=False):
1484 def branchtip(self, branch, ignoremissing=False):
1486 '''return the tip node for a given branch
1485 '''return the tip node for a given branch
1487
1486
1488 If ignoremissing is True, then this method will not raise an error.
1487 If ignoremissing is True, then this method will not raise an error.
1489 This is helpful for callers that only expect None for a missing branch
1488 This is helpful for callers that only expect None for a missing branch
1490 (e.g. namespace).
1489 (e.g. namespace).
1491
1490
1492 '''
1491 '''
1493 try:
1492 try:
1494 return self.branchmap().branchtip(branch)
1493 return self.branchmap().branchtip(branch)
1495 except KeyError:
1494 except KeyError:
1496 if not ignoremissing:
1495 if not ignoremissing:
1497 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1496 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1498 else:
1497 else:
1499 pass
1498 pass
1500
1499
1501 def lookup(self, key):
1500 def lookup(self, key):
1502 return scmutil.revsymbol(self, key).node()
1501 return scmutil.revsymbol(self, key).node()
1503
1502
1504 def lookupbranch(self, key):
1503 def lookupbranch(self, key):
1505 if key in self.branchmap():
1504 if key in self.branchmap():
1506 return key
1505 return key
1507
1506
1508 return scmutil.revsymbol(self, key).branch()
1507 return scmutil.revsymbol(self, key).branch()
1509
1508
1510 def known(self, nodes):
1509 def known(self, nodes):
1511 cl = self.changelog
1510 cl = self.changelog
1512 nm = cl.nodemap
1511 nm = cl.nodemap
1513 filtered = cl.filteredrevs
1512 filtered = cl.filteredrevs
1514 result = []
1513 result = []
1515 for n in nodes:
1514 for n in nodes:
1516 r = nm.get(n)
1515 r = nm.get(n)
1517 resp = not (r is None or r in filtered)
1516 resp = not (r is None or r in filtered)
1518 result.append(resp)
1517 result.append(resp)
1519 return result
1518 return result
1520
1519
1521 def local(self):
1520 def local(self):
1522 return self
1521 return self
1523
1522
1524 def publishing(self):
1523 def publishing(self):
1525 # it's safe (and desirable) to trust the publish flag unconditionally
1524 # it's safe (and desirable) to trust the publish flag unconditionally
1526 # so that we don't finalize changes shared between users via ssh or nfs
1525 # so that we don't finalize changes shared between users via ssh or nfs
1527 return self.ui.configbool('phases', 'publish', untrusted=True)
1526 return self.ui.configbool('phases', 'publish', untrusted=True)
1528
1527
1529 def cancopy(self):
1528 def cancopy(self):
1530 # so statichttprepo's override of local() works
1529 # so statichttprepo's override of local() works
1531 if not self.local():
1530 if not self.local():
1532 return False
1531 return False
1533 if not self.publishing():
1532 if not self.publishing():
1534 return True
1533 return True
1535 # if publishing we can't copy if there is filtered content
1534 # if publishing we can't copy if there is filtered content
1536 return not self.filtered('visible').changelog.filteredrevs
1535 return not self.filtered('visible').changelog.filteredrevs
1537
1536
1538 def shared(self):
1537 def shared(self):
1539 '''the type of shared repository (None if not shared)'''
1538 '''the type of shared repository (None if not shared)'''
1540 if self.sharedpath != self.path:
1539 if self.sharedpath != self.path:
1541 return 'store'
1540 return 'store'
1542 return None
1541 return None
1543
1542
1544 def wjoin(self, f, *insidef):
1543 def wjoin(self, f, *insidef):
1545 return self.vfs.reljoin(self.root, f, *insidef)
1544 return self.vfs.reljoin(self.root, f, *insidef)
1546
1545
1547 def setparents(self, p1, p2=nullid):
1546 def setparents(self, p1, p2=nullid):
1548 with self.dirstate.parentchange():
1547 with self.dirstate.parentchange():
1549 copies = self.dirstate.setparents(p1, p2)
1548 copies = self.dirstate.setparents(p1, p2)
1550 pctx = self[p1]
1549 pctx = self[p1]
1551 if copies:
1550 if copies:
1552 # Adjust copy records, the dirstate cannot do it, it
1551 # Adjust copy records, the dirstate cannot do it, it
1553 # requires access to parents manifests. Preserve them
1552 # requires access to parents manifests. Preserve them
1554 # only for entries added to first parent.
1553 # only for entries added to first parent.
1555 for f in copies:
1554 for f in copies:
1556 if f not in pctx and copies[f] in pctx:
1555 if f not in pctx and copies[f] in pctx:
1557 self.dirstate.copy(copies[f], f)
1556 self.dirstate.copy(copies[f], f)
1558 if p2 == nullid:
1557 if p2 == nullid:
1559 for f, s in sorted(self.dirstate.copies().items()):
1558 for f, s in sorted(self.dirstate.copies().items()):
1560 if f not in pctx and s not in pctx:
1559 if f not in pctx and s not in pctx:
1561 self.dirstate.copy(None, f)
1560 self.dirstate.copy(None, f)
1562
1561
1563 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1562 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1564 """changeid can be a changeset revision, node, or tag.
1563 """changeid can be a changeset revision, node, or tag.
1565 fileid can be a file revision or node."""
1564 fileid can be a file revision or node."""
1566 return context.filectx(self, path, changeid, fileid,
1565 return context.filectx(self, path, changeid, fileid,
1567 changectx=changectx)
1566 changectx=changectx)
1568
1567
1569 def getcwd(self):
1568 def getcwd(self):
1570 return self.dirstate.getcwd()
1569 return self.dirstate.getcwd()
1571
1570
1572 def pathto(self, f, cwd=None):
1571 def pathto(self, f, cwd=None):
1573 return self.dirstate.pathto(f, cwd)
1572 return self.dirstate.pathto(f, cwd)
1574
1573
1575 def _loadfilter(self, filter):
1574 def _loadfilter(self, filter):
1576 if filter not in self._filterpats:
1575 if filter not in self._filterpats:
1577 l = []
1576 l = []
1578 for pat, cmd in self.ui.configitems(filter):
1577 for pat, cmd in self.ui.configitems(filter):
1579 if cmd == '!':
1578 if cmd == '!':
1580 continue
1579 continue
1581 mf = matchmod.match(self.root, '', [pat])
1580 mf = matchmod.match(self.root, '', [pat])
1582 fn = None
1581 fn = None
1583 params = cmd
1582 params = cmd
1584 for name, filterfn in self._datafilters.iteritems():
1583 for name, filterfn in self._datafilters.iteritems():
1585 if cmd.startswith(name):
1584 if cmd.startswith(name):
1586 fn = filterfn
1585 fn = filterfn
1587 params = cmd[len(name):].lstrip()
1586 params = cmd[len(name):].lstrip()
1588 break
1587 break
1589 if not fn:
1588 if not fn:
1590 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1589 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1591 # Wrap old filters not supporting keyword arguments
1590 # Wrap old filters not supporting keyword arguments
1592 if not pycompat.getargspec(fn)[2]:
1591 if not pycompat.getargspec(fn)[2]:
1593 oldfn = fn
1592 oldfn = fn
1594 fn = lambda s, c, **kwargs: oldfn(s, c)
1593 fn = lambda s, c, **kwargs: oldfn(s, c)
1595 l.append((mf, fn, params))
1594 l.append((mf, fn, params))
1596 self._filterpats[filter] = l
1595 self._filterpats[filter] = l
1597 return self._filterpats[filter]
1596 return self._filterpats[filter]
1598
1597
1599 def _filter(self, filterpats, filename, data):
1598 def _filter(self, filterpats, filename, data):
1600 for mf, fn, cmd in filterpats:
1599 for mf, fn, cmd in filterpats:
1601 if mf(filename):
1600 if mf(filename):
1602 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1601 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1603 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1602 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1604 break
1603 break
1605
1604
1606 return data
1605 return data
1607
1606
1608 @unfilteredpropertycache
1607 @unfilteredpropertycache
1609 def _encodefilterpats(self):
1608 def _encodefilterpats(self):
1610 return self._loadfilter('encode')
1609 return self._loadfilter('encode')
1611
1610
1612 @unfilteredpropertycache
1611 @unfilteredpropertycache
1613 def _decodefilterpats(self):
1612 def _decodefilterpats(self):
1614 return self._loadfilter('decode')
1613 return self._loadfilter('decode')
1615
1614
1616 def adddatafilter(self, name, filter):
1615 def adddatafilter(self, name, filter):
1617 self._datafilters[name] = filter
1616 self._datafilters[name] = filter
1618
1617
1619 def wread(self, filename):
1618 def wread(self, filename):
1620 if self.wvfs.islink(filename):
1619 if self.wvfs.islink(filename):
1621 data = self.wvfs.readlink(filename)
1620 data = self.wvfs.readlink(filename)
1622 else:
1621 else:
1623 data = self.wvfs.read(filename)
1622 data = self.wvfs.read(filename)
1624 return self._filter(self._encodefilterpats, filename, data)
1623 return self._filter(self._encodefilterpats, filename, data)
1625
1624
1626 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1625 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1627 """write ``data`` into ``filename`` in the working directory
1626 """write ``data`` into ``filename`` in the working directory
1628
1627
1629 This returns length of written (maybe decoded) data.
1628 This returns length of written (maybe decoded) data.
1630 """
1629 """
1631 data = self._filter(self._decodefilterpats, filename, data)
1630 data = self._filter(self._decodefilterpats, filename, data)
1632 if 'l' in flags:
1631 if 'l' in flags:
1633 self.wvfs.symlink(data, filename)
1632 self.wvfs.symlink(data, filename)
1634 else:
1633 else:
1635 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1634 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1636 **kwargs)
1635 **kwargs)
1637 if 'x' in flags:
1636 if 'x' in flags:
1638 self.wvfs.setflags(filename, False, True)
1637 self.wvfs.setflags(filename, False, True)
1639 else:
1638 else:
1640 self.wvfs.setflags(filename, False, False)
1639 self.wvfs.setflags(filename, False, False)
1641 return len(data)
1640 return len(data)
1642
1641
1643 def wwritedata(self, filename, data):
1642 def wwritedata(self, filename, data):
1644 return self._filter(self._decodefilterpats, filename, data)
1643 return self._filter(self._decodefilterpats, filename, data)
1645
1644
1646 def currenttransaction(self):
1645 def currenttransaction(self):
1647 """return the current transaction or None if non exists"""
1646 """return the current transaction or None if non exists"""
1648 if self._transref:
1647 if self._transref:
1649 tr = self._transref()
1648 tr = self._transref()
1650 else:
1649 else:
1651 tr = None
1650 tr = None
1652
1651
1653 if tr and tr.running():
1652 if tr and tr.running():
1654 return tr
1653 return tr
1655 return None
1654 return None
1656
1655
1657 def transaction(self, desc, report=None):
1656 def transaction(self, desc, report=None):
1658 if (self.ui.configbool('devel', 'all-warnings')
1657 if (self.ui.configbool('devel', 'all-warnings')
1659 or self.ui.configbool('devel', 'check-locks')):
1658 or self.ui.configbool('devel', 'check-locks')):
1660 if self._currentlock(self._lockref) is None:
1659 if self._currentlock(self._lockref) is None:
1661 raise error.ProgrammingError('transaction requires locking')
1660 raise error.ProgrammingError('transaction requires locking')
1662 tr = self.currenttransaction()
1661 tr = self.currenttransaction()
1663 if tr is not None:
1662 if tr is not None:
1664 return tr.nest(name=desc)
1663 return tr.nest(name=desc)
1665
1664
1666 # abort here if the journal already exists
1665 # abort here if the journal already exists
1667 if self.svfs.exists("journal"):
1666 if self.svfs.exists("journal"):
1668 raise error.RepoError(
1667 raise error.RepoError(
1669 _("abandoned transaction found"),
1668 _("abandoned transaction found"),
1670 hint=_("run 'hg recover' to clean up transaction"))
1669 hint=_("run 'hg recover' to clean up transaction"))
1671
1670
1672 idbase = "%.40f#%f" % (random.random(), time.time())
1671 idbase = "%.40f#%f" % (random.random(), time.time())
1673 ha = hex(hashlib.sha1(idbase).digest())
1672 ha = hex(hashlib.sha1(idbase).digest())
1674 txnid = 'TXN:' + ha
1673 txnid = 'TXN:' + ha
1675 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1674 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1676
1675
1677 self._writejournal(desc)
1676 self._writejournal(desc)
1678 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1677 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1679 if report:
1678 if report:
1680 rp = report
1679 rp = report
1681 else:
1680 else:
1682 rp = self.ui.warn
1681 rp = self.ui.warn
1683 vfsmap = {'plain': self.vfs} # root of .hg/
1682 vfsmap = {'plain': self.vfs} # root of .hg/
1684 # we must avoid cyclic reference between repo and transaction.
1683 # we must avoid cyclic reference between repo and transaction.
1685 reporef = weakref.ref(self)
1684 reporef = weakref.ref(self)
1686 # Code to track tag movement
1685 # Code to track tag movement
1687 #
1686 #
1688 # Since tags are all handled as file content, it is actually quite hard
1687 # Since tags are all handled as file content, it is actually quite hard
1689 # to track these movement from a code perspective. So we fallback to a
1688 # to track these movement from a code perspective. So we fallback to a
1690 # tracking at the repository level. One could envision to track changes
1689 # tracking at the repository level. One could envision to track changes
1691 # to the '.hgtags' file through changegroup apply but that fails to
1690 # to the '.hgtags' file through changegroup apply but that fails to
1692 # cope with case where transaction expose new heads without changegroup
1691 # cope with case where transaction expose new heads without changegroup
1693 # being involved (eg: phase movement).
1692 # being involved (eg: phase movement).
1694 #
1693 #
1695 # For now, We gate the feature behind a flag since this likely comes
1694 # For now, We gate the feature behind a flag since this likely comes
1696 # with performance impacts. The current code run more often than needed
1695 # with performance impacts. The current code run more often than needed
1697 # and do not use caches as much as it could. The current focus is on
1696 # and do not use caches as much as it could. The current focus is on
1698 # the behavior of the feature so we disable it by default. The flag
1697 # the behavior of the feature so we disable it by default. The flag
1699 # will be removed when we are happy with the performance impact.
1698 # will be removed when we are happy with the performance impact.
1700 #
1699 #
1701 # Once this feature is no longer experimental move the following
1700 # Once this feature is no longer experimental move the following
1702 # documentation to the appropriate help section:
1701 # documentation to the appropriate help section:
1703 #
1702 #
1704 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1703 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1705 # tags (new or changed or deleted tags). In addition the details of
1704 # tags (new or changed or deleted tags). In addition the details of
1706 # these changes are made available in a file at:
1705 # these changes are made available in a file at:
1707 # ``REPOROOT/.hg/changes/tags.changes``.
1706 # ``REPOROOT/.hg/changes/tags.changes``.
1708 # Make sure you check for HG_TAG_MOVED before reading that file as it
1707 # Make sure you check for HG_TAG_MOVED before reading that file as it
1709 # might exist from a previous transaction even if no tag were touched
1708 # might exist from a previous transaction even if no tag were touched
1710 # in this one. Changes are recorded in a line base format::
1709 # in this one. Changes are recorded in a line base format::
1711 #
1710 #
1712 # <action> <hex-node> <tag-name>\n
1711 # <action> <hex-node> <tag-name>\n
1713 #
1712 #
1714 # Actions are defined as follow:
1713 # Actions are defined as follow:
1715 # "-R": tag is removed,
1714 # "-R": tag is removed,
1716 # "+A": tag is added,
1715 # "+A": tag is added,
1717 # "-M": tag is moved (old value),
1716 # "-M": tag is moved (old value),
1718 # "+M": tag is moved (new value),
1717 # "+M": tag is moved (new value),
1719 tracktags = lambda x: None
1718 tracktags = lambda x: None
1720 # experimental config: experimental.hook-track-tags
1719 # experimental config: experimental.hook-track-tags
1721 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1720 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1722 if desc != 'strip' and shouldtracktags:
1721 if desc != 'strip' and shouldtracktags:
1723 oldheads = self.changelog.headrevs()
1722 oldheads = self.changelog.headrevs()
1724 def tracktags(tr2):
1723 def tracktags(tr2):
1725 repo = reporef()
1724 repo = reporef()
1726 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1725 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1727 newheads = repo.changelog.headrevs()
1726 newheads = repo.changelog.headrevs()
1728 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1727 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1729 # notes: we compare lists here.
1728 # notes: we compare lists here.
1730 # As we do it only once buiding set would not be cheaper
1729 # As we do it only once buiding set would not be cheaper
1731 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1730 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1732 if changes:
1731 if changes:
1733 tr2.hookargs['tag_moved'] = '1'
1732 tr2.hookargs['tag_moved'] = '1'
1734 with repo.vfs('changes/tags.changes', 'w',
1733 with repo.vfs('changes/tags.changes', 'w',
1735 atomictemp=True) as changesfile:
1734 atomictemp=True) as changesfile:
1736 # note: we do not register the file to the transaction
1735 # note: we do not register the file to the transaction
1737 # because we needs it to still exist on the transaction
1736 # because we needs it to still exist on the transaction
1738 # is close (for txnclose hooks)
1737 # is close (for txnclose hooks)
1739 tagsmod.writediff(changesfile, changes)
1738 tagsmod.writediff(changesfile, changes)
1740 def validate(tr2):
1739 def validate(tr2):
1741 """will run pre-closing hooks"""
1740 """will run pre-closing hooks"""
1742 # XXX the transaction API is a bit lacking here so we take a hacky
1741 # XXX the transaction API is a bit lacking here so we take a hacky
1743 # path for now
1742 # path for now
1744 #
1743 #
1745 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1744 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1746 # dict is copied before these run. In addition we needs the data
1745 # dict is copied before these run. In addition we needs the data
1747 # available to in memory hooks too.
1746 # available to in memory hooks too.
1748 #
1747 #
1749 # Moreover, we also need to make sure this runs before txnclose
1748 # Moreover, we also need to make sure this runs before txnclose
1750 # hooks and there is no "pending" mechanism that would execute
1749 # hooks and there is no "pending" mechanism that would execute
1751 # logic only if hooks are about to run.
1750 # logic only if hooks are about to run.
1752 #
1751 #
1753 # Fixing this limitation of the transaction is also needed to track
1752 # Fixing this limitation of the transaction is also needed to track
1754 # other families of changes (bookmarks, phases, obsolescence).
1753 # other families of changes (bookmarks, phases, obsolescence).
1755 #
1754 #
1756 # This will have to be fixed before we remove the experimental
1755 # This will have to be fixed before we remove the experimental
1757 # gating.
1756 # gating.
1758 tracktags(tr2)
1757 tracktags(tr2)
1759 repo = reporef()
1758 repo = reporef()
1760 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1759 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1761 scmutil.enforcesinglehead(repo, tr2, desc)
1760 scmutil.enforcesinglehead(repo, tr2, desc)
1762 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1761 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1763 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1762 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1764 args = tr.hookargs.copy()
1763 args = tr.hookargs.copy()
1765 args.update(bookmarks.preparehookargs(name, old, new))
1764 args.update(bookmarks.preparehookargs(name, old, new))
1766 repo.hook('pretxnclose-bookmark', throw=True,
1765 repo.hook('pretxnclose-bookmark', throw=True,
1767 txnname=desc,
1766 txnname=desc,
1768 **pycompat.strkwargs(args))
1767 **pycompat.strkwargs(args))
1769 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1768 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1770 cl = repo.unfiltered().changelog
1769 cl = repo.unfiltered().changelog
1771 for rev, (old, new) in tr.changes['phases'].items():
1770 for rev, (old, new) in tr.changes['phases'].items():
1772 args = tr.hookargs.copy()
1771 args = tr.hookargs.copy()
1773 node = hex(cl.node(rev))
1772 node = hex(cl.node(rev))
1774 args.update(phases.preparehookargs(node, old, new))
1773 args.update(phases.preparehookargs(node, old, new))
1775 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1774 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1776 **pycompat.strkwargs(args))
1775 **pycompat.strkwargs(args))
1777
1776
1778 repo.hook('pretxnclose', throw=True,
1777 repo.hook('pretxnclose', throw=True,
1779 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1778 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1780 def releasefn(tr, success):
1779 def releasefn(tr, success):
1781 repo = reporef()
1780 repo = reporef()
1782 if success:
1781 if success:
1783 # this should be explicitly invoked here, because
1782 # this should be explicitly invoked here, because
1784 # in-memory changes aren't written out at closing
1783 # in-memory changes aren't written out at closing
1785 # transaction, if tr.addfilegenerator (via
1784 # transaction, if tr.addfilegenerator (via
1786 # dirstate.write or so) isn't invoked while
1785 # dirstate.write or so) isn't invoked while
1787 # transaction running
1786 # transaction running
1788 repo.dirstate.write(None)
1787 repo.dirstate.write(None)
1789 else:
1788 else:
1790 # discard all changes (including ones already written
1789 # discard all changes (including ones already written
1791 # out) in this transaction
1790 # out) in this transaction
1792 narrowspec.restorebackup(self, 'journal.narrowspec')
1791 narrowspec.restorebackup(self, 'journal.narrowspec')
1793 repo.dirstate.restorebackup(None, 'journal.dirstate')
1792 repo.dirstate.restorebackup(None, 'journal.dirstate')
1794
1793
1795 repo.invalidate(clearfilecache=True)
1794 repo.invalidate(clearfilecache=True)
1796
1795
1797 tr = transaction.transaction(rp, self.svfs, vfsmap,
1796 tr = transaction.transaction(rp, self.svfs, vfsmap,
1798 "journal",
1797 "journal",
1799 "undo",
1798 "undo",
1800 aftertrans(renames),
1799 aftertrans(renames),
1801 self.store.createmode,
1800 self.store.createmode,
1802 validator=validate,
1801 validator=validate,
1803 releasefn=releasefn,
1802 releasefn=releasefn,
1804 checkambigfiles=_cachedfiles,
1803 checkambigfiles=_cachedfiles,
1805 name=desc)
1804 name=desc)
1806 tr.changes['origrepolen'] = len(self)
1805 tr.changes['origrepolen'] = len(self)
1807 tr.changes['obsmarkers'] = set()
1806 tr.changes['obsmarkers'] = set()
1808 tr.changes['phases'] = {}
1807 tr.changes['phases'] = {}
1809 tr.changes['bookmarks'] = {}
1808 tr.changes['bookmarks'] = {}
1810
1809
1811 tr.hookargs['txnid'] = txnid
1810 tr.hookargs['txnid'] = txnid
1812 # note: writing the fncache only during finalize mean that the file is
1811 # note: writing the fncache only during finalize mean that the file is
1813 # outdated when running hooks. As fncache is used for streaming clone,
1812 # outdated when running hooks. As fncache is used for streaming clone,
1814 # this is not expected to break anything that happen during the hooks.
1813 # this is not expected to break anything that happen during the hooks.
1815 tr.addfinalize('flush-fncache', self.store.write)
1814 tr.addfinalize('flush-fncache', self.store.write)
1816 def txnclosehook(tr2):
1815 def txnclosehook(tr2):
1817 """To be run if transaction is successful, will schedule a hook run
1816 """To be run if transaction is successful, will schedule a hook run
1818 """
1817 """
1819 # Don't reference tr2 in hook() so we don't hold a reference.
1818 # Don't reference tr2 in hook() so we don't hold a reference.
1820 # This reduces memory consumption when there are multiple
1819 # This reduces memory consumption when there are multiple
1821 # transactions per lock. This can likely go away if issue5045
1820 # transactions per lock. This can likely go away if issue5045
1822 # fixes the function accumulation.
1821 # fixes the function accumulation.
1823 hookargs = tr2.hookargs
1822 hookargs = tr2.hookargs
1824
1823
1825 def hookfunc():
1824 def hookfunc():
1826 repo = reporef()
1825 repo = reporef()
1827 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1826 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1828 bmchanges = sorted(tr.changes['bookmarks'].items())
1827 bmchanges = sorted(tr.changes['bookmarks'].items())
1829 for name, (old, new) in bmchanges:
1828 for name, (old, new) in bmchanges:
1830 args = tr.hookargs.copy()
1829 args = tr.hookargs.copy()
1831 args.update(bookmarks.preparehookargs(name, old, new))
1830 args.update(bookmarks.preparehookargs(name, old, new))
1832 repo.hook('txnclose-bookmark', throw=False,
1831 repo.hook('txnclose-bookmark', throw=False,
1833 txnname=desc, **pycompat.strkwargs(args))
1832 txnname=desc, **pycompat.strkwargs(args))
1834
1833
1835 if hook.hashook(repo.ui, 'txnclose-phase'):
1834 if hook.hashook(repo.ui, 'txnclose-phase'):
1836 cl = repo.unfiltered().changelog
1835 cl = repo.unfiltered().changelog
1837 phasemv = sorted(tr.changes['phases'].items())
1836 phasemv = sorted(tr.changes['phases'].items())
1838 for rev, (old, new) in phasemv:
1837 for rev, (old, new) in phasemv:
1839 args = tr.hookargs.copy()
1838 args = tr.hookargs.copy()
1840 node = hex(cl.node(rev))
1839 node = hex(cl.node(rev))
1841 args.update(phases.preparehookargs(node, old, new))
1840 args.update(phases.preparehookargs(node, old, new))
1842 repo.hook('txnclose-phase', throw=False, txnname=desc,
1841 repo.hook('txnclose-phase', throw=False, txnname=desc,
1843 **pycompat.strkwargs(args))
1842 **pycompat.strkwargs(args))
1844
1843
1845 repo.hook('txnclose', throw=False, txnname=desc,
1844 repo.hook('txnclose', throw=False, txnname=desc,
1846 **pycompat.strkwargs(hookargs))
1845 **pycompat.strkwargs(hookargs))
1847 reporef()._afterlock(hookfunc)
1846 reporef()._afterlock(hookfunc)
1848 tr.addfinalize('txnclose-hook', txnclosehook)
1847 tr.addfinalize('txnclose-hook', txnclosehook)
1849 # Include a leading "-" to make it happen before the transaction summary
1848 # Include a leading "-" to make it happen before the transaction summary
1850 # reports registered via scmutil.registersummarycallback() whose names
1849 # reports registered via scmutil.registersummarycallback() whose names
1851 # are 00-txnreport etc. That way, the caches will be warm when the
1850 # are 00-txnreport etc. That way, the caches will be warm when the
1852 # callbacks run.
1851 # callbacks run.
1853 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1852 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1854 def txnaborthook(tr2):
1853 def txnaborthook(tr2):
1855 """To be run if transaction is aborted
1854 """To be run if transaction is aborted
1856 """
1855 """
1857 reporef().hook('txnabort', throw=False, txnname=desc,
1856 reporef().hook('txnabort', throw=False, txnname=desc,
1858 **pycompat.strkwargs(tr2.hookargs))
1857 **pycompat.strkwargs(tr2.hookargs))
1859 tr.addabort('txnabort-hook', txnaborthook)
1858 tr.addabort('txnabort-hook', txnaborthook)
1860 # avoid eager cache invalidation. in-memory data should be identical
1859 # avoid eager cache invalidation. in-memory data should be identical
1861 # to stored data if transaction has no error.
1860 # to stored data if transaction has no error.
1862 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1861 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1863 self._transref = weakref.ref(tr)
1862 self._transref = weakref.ref(tr)
1864 scmutil.registersummarycallback(self, tr, desc)
1863 scmutil.registersummarycallback(self, tr, desc)
1865 return tr
1864 return tr
1866
1865
1867 def _journalfiles(self):
1866 def _journalfiles(self):
1868 return ((self.svfs, 'journal'),
1867 return ((self.svfs, 'journal'),
1869 (self.vfs, 'journal.dirstate'),
1868 (self.vfs, 'journal.dirstate'),
1870 (self.vfs, 'journal.branch'),
1869 (self.vfs, 'journal.branch'),
1871 (self.vfs, 'journal.desc'),
1870 (self.vfs, 'journal.desc'),
1872 (self.vfs, 'journal.bookmarks'),
1871 (self.vfs, 'journal.bookmarks'),
1873 (self.svfs, 'journal.phaseroots'))
1872 (self.svfs, 'journal.phaseroots'))
1874
1873
1875 def undofiles(self):
1874 def undofiles(self):
1876 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1875 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1877
1876
1878 @unfilteredmethod
1877 @unfilteredmethod
1879 def _writejournal(self, desc):
1878 def _writejournal(self, desc):
1880 self.dirstate.savebackup(None, 'journal.dirstate')
1879 self.dirstate.savebackup(None, 'journal.dirstate')
1881 narrowspec.savebackup(self, 'journal.narrowspec')
1880 narrowspec.savebackup(self, 'journal.narrowspec')
1882 self.vfs.write("journal.branch",
1881 self.vfs.write("journal.branch",
1883 encoding.fromlocal(self.dirstate.branch()))
1882 encoding.fromlocal(self.dirstate.branch()))
1884 self.vfs.write("journal.desc",
1883 self.vfs.write("journal.desc",
1885 "%d\n%s\n" % (len(self), desc))
1884 "%d\n%s\n" % (len(self), desc))
1886 self.vfs.write("journal.bookmarks",
1885 self.vfs.write("journal.bookmarks",
1887 self.vfs.tryread("bookmarks"))
1886 self.vfs.tryread("bookmarks"))
1888 self.svfs.write("journal.phaseroots",
1887 self.svfs.write("journal.phaseroots",
1889 self.svfs.tryread("phaseroots"))
1888 self.svfs.tryread("phaseroots"))
1890
1889
1891 def recover(self):
1890 def recover(self):
1892 with self.lock():
1891 with self.lock():
1893 if self.svfs.exists("journal"):
1892 if self.svfs.exists("journal"):
1894 self.ui.status(_("rolling back interrupted transaction\n"))
1893 self.ui.status(_("rolling back interrupted transaction\n"))
1895 vfsmap = {'': self.svfs,
1894 vfsmap = {'': self.svfs,
1896 'plain': self.vfs,}
1895 'plain': self.vfs,}
1897 transaction.rollback(self.svfs, vfsmap, "journal",
1896 transaction.rollback(self.svfs, vfsmap, "journal",
1898 self.ui.warn,
1897 self.ui.warn,
1899 checkambigfiles=_cachedfiles)
1898 checkambigfiles=_cachedfiles)
1900 self.invalidate()
1899 self.invalidate()
1901 return True
1900 return True
1902 else:
1901 else:
1903 self.ui.warn(_("no interrupted transaction available\n"))
1902 self.ui.warn(_("no interrupted transaction available\n"))
1904 return False
1903 return False
1905
1904
1906 def rollback(self, dryrun=False, force=False):
1905 def rollback(self, dryrun=False, force=False):
1907 wlock = lock = dsguard = None
1906 wlock = lock = dsguard = None
1908 try:
1907 try:
1909 wlock = self.wlock()
1908 wlock = self.wlock()
1910 lock = self.lock()
1909 lock = self.lock()
1911 if self.svfs.exists("undo"):
1910 if self.svfs.exists("undo"):
1912 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1911 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1913
1912
1914 return self._rollback(dryrun, force, dsguard)
1913 return self._rollback(dryrun, force, dsguard)
1915 else:
1914 else:
1916 self.ui.warn(_("no rollback information available\n"))
1915 self.ui.warn(_("no rollback information available\n"))
1917 return 1
1916 return 1
1918 finally:
1917 finally:
1919 release(dsguard, lock, wlock)
1918 release(dsguard, lock, wlock)
1920
1919
1921 @unfilteredmethod # Until we get smarter cache management
1920 @unfilteredmethod # Until we get smarter cache management
1922 def _rollback(self, dryrun, force, dsguard):
1921 def _rollback(self, dryrun, force, dsguard):
1923 ui = self.ui
1922 ui = self.ui
1924 try:
1923 try:
1925 args = self.vfs.read('undo.desc').splitlines()
1924 args = self.vfs.read('undo.desc').splitlines()
1926 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1925 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1927 if len(args) >= 3:
1926 if len(args) >= 3:
1928 detail = args[2]
1927 detail = args[2]
1929 oldtip = oldlen - 1
1928 oldtip = oldlen - 1
1930
1929
1931 if detail and ui.verbose:
1930 if detail and ui.verbose:
1932 msg = (_('repository tip rolled back to revision %d'
1931 msg = (_('repository tip rolled back to revision %d'
1933 ' (undo %s: %s)\n')
1932 ' (undo %s: %s)\n')
1934 % (oldtip, desc, detail))
1933 % (oldtip, desc, detail))
1935 else:
1934 else:
1936 msg = (_('repository tip rolled back to revision %d'
1935 msg = (_('repository tip rolled back to revision %d'
1937 ' (undo %s)\n')
1936 ' (undo %s)\n')
1938 % (oldtip, desc))
1937 % (oldtip, desc))
1939 except IOError:
1938 except IOError:
1940 msg = _('rolling back unknown transaction\n')
1939 msg = _('rolling back unknown transaction\n')
1941 desc = None
1940 desc = None
1942
1941
1943 if not force and self['.'] != self['tip'] and desc == 'commit':
1942 if not force and self['.'] != self['tip'] and desc == 'commit':
1944 raise error.Abort(
1943 raise error.Abort(
1945 _('rollback of last commit while not checked out '
1944 _('rollback of last commit while not checked out '
1946 'may lose data'), hint=_('use -f to force'))
1945 'may lose data'), hint=_('use -f to force'))
1947
1946
1948 ui.status(msg)
1947 ui.status(msg)
1949 if dryrun:
1948 if dryrun:
1950 return 0
1949 return 0
1951
1950
1952 parents = self.dirstate.parents()
1951 parents = self.dirstate.parents()
1953 self.destroying()
1952 self.destroying()
1954 vfsmap = {'plain': self.vfs, '': self.svfs}
1953 vfsmap = {'plain': self.vfs, '': self.svfs}
1955 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1954 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1956 checkambigfiles=_cachedfiles)
1955 checkambigfiles=_cachedfiles)
1957 if self.vfs.exists('undo.bookmarks'):
1956 if self.vfs.exists('undo.bookmarks'):
1958 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1957 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1959 if self.svfs.exists('undo.phaseroots'):
1958 if self.svfs.exists('undo.phaseroots'):
1960 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1959 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1961 self.invalidate()
1960 self.invalidate()
1962
1961
1963 parentgone = (parents[0] not in self.changelog.nodemap or
1962 parentgone = (parents[0] not in self.changelog.nodemap or
1964 parents[1] not in self.changelog.nodemap)
1963 parents[1] not in self.changelog.nodemap)
1965 if parentgone:
1964 if parentgone:
1966 # prevent dirstateguard from overwriting already restored one
1965 # prevent dirstateguard from overwriting already restored one
1967 dsguard.close()
1966 dsguard.close()
1968
1967
1969 narrowspec.restorebackup(self, 'undo.narrowspec')
1968 narrowspec.restorebackup(self, 'undo.narrowspec')
1970 self.dirstate.restorebackup(None, 'undo.dirstate')
1969 self.dirstate.restorebackup(None, 'undo.dirstate')
1971 try:
1970 try:
1972 branch = self.vfs.read('undo.branch')
1971 branch = self.vfs.read('undo.branch')
1973 self.dirstate.setbranch(encoding.tolocal(branch))
1972 self.dirstate.setbranch(encoding.tolocal(branch))
1974 except IOError:
1973 except IOError:
1975 ui.warn(_('named branch could not be reset: '
1974 ui.warn(_('named branch could not be reset: '
1976 'current branch is still \'%s\'\n')
1975 'current branch is still \'%s\'\n')
1977 % self.dirstate.branch())
1976 % self.dirstate.branch())
1978
1977
1979 parents = tuple([p.rev() for p in self[None].parents()])
1978 parents = tuple([p.rev() for p in self[None].parents()])
1980 if len(parents) > 1:
1979 if len(parents) > 1:
1981 ui.status(_('working directory now based on '
1980 ui.status(_('working directory now based on '
1982 'revisions %d and %d\n') % parents)
1981 'revisions %d and %d\n') % parents)
1983 else:
1982 else:
1984 ui.status(_('working directory now based on '
1983 ui.status(_('working directory now based on '
1985 'revision %d\n') % parents)
1984 'revision %d\n') % parents)
1986 mergemod.mergestate.clean(self, self['.'].node())
1985 mergemod.mergestate.clean(self, self['.'].node())
1987
1986
1988 # TODO: if we know which new heads may result from this rollback, pass
1987 # TODO: if we know which new heads may result from this rollback, pass
1989 # them to destroy(), which will prevent the branchhead cache from being
1988 # them to destroy(), which will prevent the branchhead cache from being
1990 # invalidated.
1989 # invalidated.
1991 self.destroyed()
1990 self.destroyed()
1992 return 0
1991 return 0
1993
1992
1994 def _buildcacheupdater(self, newtransaction):
1993 def _buildcacheupdater(self, newtransaction):
1995 """called during transaction to build the callback updating cache
1994 """called during transaction to build the callback updating cache
1996
1995
1997 Lives on the repository to help extension who might want to augment
1996 Lives on the repository to help extension who might want to augment
1998 this logic. For this purpose, the created transaction is passed to the
1997 this logic. For this purpose, the created transaction is passed to the
1999 method.
1998 method.
2000 """
1999 """
2001 # we must avoid cyclic reference between repo and transaction.
2000 # we must avoid cyclic reference between repo and transaction.
2002 reporef = weakref.ref(self)
2001 reporef = weakref.ref(self)
2003 def updater(tr):
2002 def updater(tr):
2004 repo = reporef()
2003 repo = reporef()
2005 repo.updatecaches(tr)
2004 repo.updatecaches(tr)
2006 return updater
2005 return updater
2007
2006
2008 @unfilteredmethod
2007 @unfilteredmethod
2009 def updatecaches(self, tr=None, full=False):
2008 def updatecaches(self, tr=None, full=False):
2010 """warm appropriate caches
2009 """warm appropriate caches
2011
2010
2012 If this function is called after a transaction closed. The transaction
2011 If this function is called after a transaction closed. The transaction
2013 will be available in the 'tr' argument. This can be used to selectively
2012 will be available in the 'tr' argument. This can be used to selectively
2014 update caches relevant to the changes in that transaction.
2013 update caches relevant to the changes in that transaction.
2015
2014
2016 If 'full' is set, make sure all caches the function knows about have
2015 If 'full' is set, make sure all caches the function knows about have
2017 up-to-date data. Even the ones usually loaded more lazily.
2016 up-to-date data. Even the ones usually loaded more lazily.
2018 """
2017 """
2019 if tr is not None and tr.hookargs.get('source') == 'strip':
2018 if tr is not None and tr.hookargs.get('source') == 'strip':
2020 # During strip, many caches are invalid but
2019 # During strip, many caches are invalid but
2021 # later call to `destroyed` will refresh them.
2020 # later call to `destroyed` will refresh them.
2022 return
2021 return
2023
2022
2024 if tr is None or tr.changes['origrepolen'] < len(self):
2023 if tr is None or tr.changes['origrepolen'] < len(self):
2025 # updating the unfiltered branchmap should refresh all the others,
2024 # updating the unfiltered branchmap should refresh all the others,
2026 self.ui.debug('updating the branch cache\n')
2025 self.ui.debug('updating the branch cache\n')
2027 branchmap.updatecache(self.filtered('served'))
2026 branchmap.updatecache(self.filtered('served'))
2028
2027
2029 if full:
2028 if full:
2030 rbc = self.revbranchcache()
2029 rbc = self.revbranchcache()
2031 for r in self.changelog:
2030 for r in self.changelog:
2032 rbc.branchinfo(r)
2031 rbc.branchinfo(r)
2033 rbc.write()
2032 rbc.write()
2034
2033
2035 # ensure the working copy parents are in the manifestfulltextcache
2034 # ensure the working copy parents are in the manifestfulltextcache
2036 for ctx in self['.'].parents():
2035 for ctx in self['.'].parents():
2037 ctx.manifest() # accessing the manifest is enough
2036 ctx.manifest() # accessing the manifest is enough
2038
2037
2039 def invalidatecaches(self):
2038 def invalidatecaches(self):
2040
2039
2041 if '_tagscache' in vars(self):
2040 if '_tagscache' in vars(self):
2042 # can't use delattr on proxy
2041 # can't use delattr on proxy
2043 del self.__dict__['_tagscache']
2042 del self.__dict__['_tagscache']
2044
2043
2045 self.unfiltered()._branchcaches.clear()
2044 self.unfiltered()._branchcaches.clear()
2046 self.invalidatevolatilesets()
2045 self.invalidatevolatilesets()
2047 self._sparsesignaturecache.clear()
2046 self._sparsesignaturecache.clear()
2048
2047
2049 def invalidatevolatilesets(self):
2048 def invalidatevolatilesets(self):
2050 self.filteredrevcache.clear()
2049 self.filteredrevcache.clear()
2051 obsolete.clearobscaches(self)
2050 obsolete.clearobscaches(self)
2052
2051
2053 def invalidatedirstate(self):
2052 def invalidatedirstate(self):
2054 '''Invalidates the dirstate, causing the next call to dirstate
2053 '''Invalidates the dirstate, causing the next call to dirstate
2055 to check if it was modified since the last time it was read,
2054 to check if it was modified since the last time it was read,
2056 rereading it if it has.
2055 rereading it if it has.
2057
2056
2058 This is different to dirstate.invalidate() that it doesn't always
2057 This is different to dirstate.invalidate() that it doesn't always
2059 rereads the dirstate. Use dirstate.invalidate() if you want to
2058 rereads the dirstate. Use dirstate.invalidate() if you want to
2060 explicitly read the dirstate again (i.e. restoring it to a previous
2059 explicitly read the dirstate again (i.e. restoring it to a previous
2061 known good state).'''
2060 known good state).'''
2062 if hasunfilteredcache(self, 'dirstate'):
2061 if hasunfilteredcache(self, 'dirstate'):
2063 for k in self.dirstate._filecache:
2062 for k in self.dirstate._filecache:
2064 try:
2063 try:
2065 delattr(self.dirstate, k)
2064 delattr(self.dirstate, k)
2066 except AttributeError:
2065 except AttributeError:
2067 pass
2066 pass
2068 delattr(self.unfiltered(), 'dirstate')
2067 delattr(self.unfiltered(), 'dirstate')
2069
2068
2070 def invalidate(self, clearfilecache=False):
2069 def invalidate(self, clearfilecache=False):
2071 '''Invalidates both store and non-store parts other than dirstate
2070 '''Invalidates both store and non-store parts other than dirstate
2072
2071
2073 If a transaction is running, invalidation of store is omitted,
2072 If a transaction is running, invalidation of store is omitted,
2074 because discarding in-memory changes might cause inconsistency
2073 because discarding in-memory changes might cause inconsistency
2075 (e.g. incomplete fncache causes unintentional failure, but
2074 (e.g. incomplete fncache causes unintentional failure, but
2076 redundant one doesn't).
2075 redundant one doesn't).
2077 '''
2076 '''
2078 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2077 unfiltered = self.unfiltered() # all file caches are stored unfiltered
2079 for k in list(self._filecache.keys()):
2078 for k in list(self._filecache.keys()):
2080 # dirstate is invalidated separately in invalidatedirstate()
2079 # dirstate is invalidated separately in invalidatedirstate()
2081 if k == 'dirstate':
2080 if k == 'dirstate':
2082 continue
2081 continue
2083 if (k == 'changelog' and
2082 if (k == 'changelog' and
2084 self.currenttransaction() and
2083 self.currenttransaction() and
2085 self.changelog._delayed):
2084 self.changelog._delayed):
2086 # The changelog object may store unwritten revisions. We don't
2085 # The changelog object may store unwritten revisions. We don't
2087 # want to lose them.
2086 # want to lose them.
2088 # TODO: Solve the problem instead of working around it.
2087 # TODO: Solve the problem instead of working around it.
2089 continue
2088 continue
2090
2089
2091 if clearfilecache:
2090 if clearfilecache:
2092 del self._filecache[k]
2091 del self._filecache[k]
2093 try:
2092 try:
2094 delattr(unfiltered, k)
2093 delattr(unfiltered, k)
2095 except AttributeError:
2094 except AttributeError:
2096 pass
2095 pass
2097 self.invalidatecaches()
2096 self.invalidatecaches()
2098 if not self.currenttransaction():
2097 if not self.currenttransaction():
2099 # TODO: Changing contents of store outside transaction
2098 # TODO: Changing contents of store outside transaction
2100 # causes inconsistency. We should make in-memory store
2099 # causes inconsistency. We should make in-memory store
2101 # changes detectable, and abort if changed.
2100 # changes detectable, and abort if changed.
2102 self.store.invalidatecaches()
2101 self.store.invalidatecaches()
2103
2102
2104 def invalidateall(self):
2103 def invalidateall(self):
2105 '''Fully invalidates both store and non-store parts, causing the
2104 '''Fully invalidates both store and non-store parts, causing the
2106 subsequent operation to reread any outside changes.'''
2105 subsequent operation to reread any outside changes.'''
2107 # extension should hook this to invalidate its caches
2106 # extension should hook this to invalidate its caches
2108 self.invalidate()
2107 self.invalidate()
2109 self.invalidatedirstate()
2108 self.invalidatedirstate()
2110
2109
2111 @unfilteredmethod
2110 @unfilteredmethod
2112 def _refreshfilecachestats(self, tr):
2111 def _refreshfilecachestats(self, tr):
2113 """Reload stats of cached files so that they are flagged as valid"""
2112 """Reload stats of cached files so that they are flagged as valid"""
2114 for k, ce in self._filecache.items():
2113 for k, ce in self._filecache.items():
2115 k = pycompat.sysstr(k)
2114 k = pycompat.sysstr(k)
2116 if k == r'dirstate' or k not in self.__dict__:
2115 if k == r'dirstate' or k not in self.__dict__:
2117 continue
2116 continue
2118 ce.refresh()
2117 ce.refresh()
2119
2118
2120 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2119 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
2121 inheritchecker=None, parentenvvar=None):
2120 inheritchecker=None, parentenvvar=None):
2122 parentlock = None
2121 parentlock = None
2123 # the contents of parentenvvar are used by the underlying lock to
2122 # the contents of parentenvvar are used by the underlying lock to
2124 # determine whether it can be inherited
2123 # determine whether it can be inherited
2125 if parentenvvar is not None:
2124 if parentenvvar is not None:
2126 parentlock = encoding.environ.get(parentenvvar)
2125 parentlock = encoding.environ.get(parentenvvar)
2127
2126
2128 timeout = 0
2127 timeout = 0
2129 warntimeout = 0
2128 warntimeout = 0
2130 if wait:
2129 if wait:
2131 timeout = self.ui.configint("ui", "timeout")
2130 timeout = self.ui.configint("ui", "timeout")
2132 warntimeout = self.ui.configint("ui", "timeout.warn")
2131 warntimeout = self.ui.configint("ui", "timeout.warn")
2133 # internal config: ui.signal-safe-lock
2132 # internal config: ui.signal-safe-lock
2134 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2133 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
2135
2134
2136 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2135 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
2137 releasefn=releasefn,
2136 releasefn=releasefn,
2138 acquirefn=acquirefn, desc=desc,
2137 acquirefn=acquirefn, desc=desc,
2139 inheritchecker=inheritchecker,
2138 inheritchecker=inheritchecker,
2140 parentlock=parentlock,
2139 parentlock=parentlock,
2141 signalsafe=signalsafe)
2140 signalsafe=signalsafe)
2142 return l
2141 return l
2143
2142
2144 def _afterlock(self, callback):
2143 def _afterlock(self, callback):
2145 """add a callback to be run when the repository is fully unlocked
2144 """add a callback to be run when the repository is fully unlocked
2146
2145
2147 The callback will be executed when the outermost lock is released
2146 The callback will be executed when the outermost lock is released
2148 (with wlock being higher level than 'lock')."""
2147 (with wlock being higher level than 'lock')."""
2149 for ref in (self._wlockref, self._lockref):
2148 for ref in (self._wlockref, self._lockref):
2150 l = ref and ref()
2149 l = ref and ref()
2151 if l and l.held:
2150 if l and l.held:
2152 l.postrelease.append(callback)
2151 l.postrelease.append(callback)
2153 break
2152 break
2154 else: # no lock have been found.
2153 else: # no lock have been found.
2155 callback()
2154 callback()
2156
2155
2157 def lock(self, wait=True):
2156 def lock(self, wait=True):
2158 '''Lock the repository store (.hg/store) and return a weak reference
2157 '''Lock the repository store (.hg/store) and return a weak reference
2159 to the lock. Use this before modifying the store (e.g. committing or
2158 to the lock. Use this before modifying the store (e.g. committing or
2160 stripping). If you are opening a transaction, get a lock as well.)
2159 stripping). If you are opening a transaction, get a lock as well.)
2161
2160
2162 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2161 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2163 'wlock' first to avoid a dead-lock hazard.'''
2162 'wlock' first to avoid a dead-lock hazard.'''
2164 l = self._currentlock(self._lockref)
2163 l = self._currentlock(self._lockref)
2165 if l is not None:
2164 if l is not None:
2166 l.lock()
2165 l.lock()
2167 return l
2166 return l
2168
2167
2169 l = self._lock(self.svfs, "lock", wait, None,
2168 l = self._lock(self.svfs, "lock", wait, None,
2170 self.invalidate, _('repository %s') % self.origroot)
2169 self.invalidate, _('repository %s') % self.origroot)
2171 self._lockref = weakref.ref(l)
2170 self._lockref = weakref.ref(l)
2172 return l
2171 return l
2173
2172
2174 def _wlockchecktransaction(self):
2173 def _wlockchecktransaction(self):
2175 if self.currenttransaction() is not None:
2174 if self.currenttransaction() is not None:
2176 raise error.LockInheritanceContractViolation(
2175 raise error.LockInheritanceContractViolation(
2177 'wlock cannot be inherited in the middle of a transaction')
2176 'wlock cannot be inherited in the middle of a transaction')
2178
2177
2179 def wlock(self, wait=True):
2178 def wlock(self, wait=True):
2180 '''Lock the non-store parts of the repository (everything under
2179 '''Lock the non-store parts of the repository (everything under
2181 .hg except .hg/store) and return a weak reference to the lock.
2180 .hg except .hg/store) and return a weak reference to the lock.
2182
2181
2183 Use this before modifying files in .hg.
2182 Use this before modifying files in .hg.
2184
2183
2185 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2184 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
2186 'wlock' first to avoid a dead-lock hazard.'''
2185 'wlock' first to avoid a dead-lock hazard.'''
2187 l = self._wlockref and self._wlockref()
2186 l = self._wlockref and self._wlockref()
2188 if l is not None and l.held:
2187 if l is not None and l.held:
2189 l.lock()
2188 l.lock()
2190 return l
2189 return l
2191
2190
2192 # We do not need to check for non-waiting lock acquisition. Such
2191 # We do not need to check for non-waiting lock acquisition. Such
2193 # acquisition would not cause dead-lock as they would just fail.
2192 # acquisition would not cause dead-lock as they would just fail.
2194 if wait and (self.ui.configbool('devel', 'all-warnings')
2193 if wait and (self.ui.configbool('devel', 'all-warnings')
2195 or self.ui.configbool('devel', 'check-locks')):
2194 or self.ui.configbool('devel', 'check-locks')):
2196 if self._currentlock(self._lockref) is not None:
2195 if self._currentlock(self._lockref) is not None:
2197 self.ui.develwarn('"wlock" acquired after "lock"')
2196 self.ui.develwarn('"wlock" acquired after "lock"')
2198
2197
2199 def unlock():
2198 def unlock():
2200 if self.dirstate.pendingparentchange():
2199 if self.dirstate.pendingparentchange():
2201 self.dirstate.invalidate()
2200 self.dirstate.invalidate()
2202 else:
2201 else:
2203 self.dirstate.write(None)
2202 self.dirstate.write(None)
2204
2203
2205 self._filecache['dirstate'].refresh()
2204 self._filecache['dirstate'].refresh()
2206
2205
2207 l = self._lock(self.vfs, "wlock", wait, unlock,
2206 l = self._lock(self.vfs, "wlock", wait, unlock,
2208 self.invalidatedirstate, _('working directory of %s') %
2207 self.invalidatedirstate, _('working directory of %s') %
2209 self.origroot,
2208 self.origroot,
2210 inheritchecker=self._wlockchecktransaction,
2209 inheritchecker=self._wlockchecktransaction,
2211 parentenvvar='HG_WLOCK_LOCKER')
2210 parentenvvar='HG_WLOCK_LOCKER')
2212 self._wlockref = weakref.ref(l)
2211 self._wlockref = weakref.ref(l)
2213 return l
2212 return l
2214
2213
2215 def _currentlock(self, lockref):
2214 def _currentlock(self, lockref):
2216 """Returns the lock if it's held, or None if it's not."""
2215 """Returns the lock if it's held, or None if it's not."""
2217 if lockref is None:
2216 if lockref is None:
2218 return None
2217 return None
2219 l = lockref()
2218 l = lockref()
2220 if l is None or not l.held:
2219 if l is None or not l.held:
2221 return None
2220 return None
2222 return l
2221 return l
2223
2222
2224 def currentwlock(self):
2223 def currentwlock(self):
2225 """Returns the wlock if it's held, or None if it's not."""
2224 """Returns the wlock if it's held, or None if it's not."""
2226 return self._currentlock(self._wlockref)
2225 return self._currentlock(self._wlockref)
2227
2226
2228 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2227 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
2229 """
2228 """
2230 commit an individual file as part of a larger transaction
2229 commit an individual file as part of a larger transaction
2231 """
2230 """
2232
2231
2233 fname = fctx.path()
2232 fname = fctx.path()
2234 fparent1 = manifest1.get(fname, nullid)
2233 fparent1 = manifest1.get(fname, nullid)
2235 fparent2 = manifest2.get(fname, nullid)
2234 fparent2 = manifest2.get(fname, nullid)
2236 if isinstance(fctx, context.filectx):
2235 if isinstance(fctx, context.filectx):
2237 node = fctx.filenode()
2236 node = fctx.filenode()
2238 if node in [fparent1, fparent2]:
2237 if node in [fparent1, fparent2]:
2239 self.ui.debug('reusing %s filelog entry\n' % fname)
2238 self.ui.debug('reusing %s filelog entry\n' % fname)
2240 if manifest1.flags(fname) != fctx.flags():
2239 if manifest1.flags(fname) != fctx.flags():
2241 changelist.append(fname)
2240 changelist.append(fname)
2242 return node
2241 return node
2243
2242
2244 flog = self.file(fname)
2243 flog = self.file(fname)
2245 meta = {}
2244 meta = {}
2246 copy = fctx.renamed()
2245 copy = fctx.renamed()
2247 if copy and copy[0] != fname:
2246 if copy and copy[0] != fname:
2248 # Mark the new revision of this file as a copy of another
2247 # Mark the new revision of this file as a copy of another
2249 # file. This copy data will effectively act as a parent
2248 # file. This copy data will effectively act as a parent
2250 # of this new revision. If this is a merge, the first
2249 # of this new revision. If this is a merge, the first
2251 # parent will be the nullid (meaning "look up the copy data")
2250 # parent will be the nullid (meaning "look up the copy data")
2252 # and the second one will be the other parent. For example:
2251 # and the second one will be the other parent. For example:
2253 #
2252 #
2254 # 0 --- 1 --- 3 rev1 changes file foo
2253 # 0 --- 1 --- 3 rev1 changes file foo
2255 # \ / rev2 renames foo to bar and changes it
2254 # \ / rev2 renames foo to bar and changes it
2256 # \- 2 -/ rev3 should have bar with all changes and
2255 # \- 2 -/ rev3 should have bar with all changes and
2257 # should record that bar descends from
2256 # should record that bar descends from
2258 # bar in rev2 and foo in rev1
2257 # bar in rev2 and foo in rev1
2259 #
2258 #
2260 # this allows this merge to succeed:
2259 # this allows this merge to succeed:
2261 #
2260 #
2262 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2261 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
2263 # \ / merging rev3 and rev4 should use bar@rev2
2262 # \ / merging rev3 and rev4 should use bar@rev2
2264 # \- 2 --- 4 as the merge base
2263 # \- 2 --- 4 as the merge base
2265 #
2264 #
2266
2265
2267 cfname = copy[0]
2266 cfname = copy[0]
2268 crev = manifest1.get(cfname)
2267 crev = manifest1.get(cfname)
2269 newfparent = fparent2
2268 newfparent = fparent2
2270
2269
2271 if manifest2: # branch merge
2270 if manifest2: # branch merge
2272 if fparent2 == nullid or crev is None: # copied on remote side
2271 if fparent2 == nullid or crev is None: # copied on remote side
2273 if cfname in manifest2:
2272 if cfname in manifest2:
2274 crev = manifest2[cfname]
2273 crev = manifest2[cfname]
2275 newfparent = fparent1
2274 newfparent = fparent1
2276
2275
2277 # Here, we used to search backwards through history to try to find
2276 # Here, we used to search backwards through history to try to find
2278 # where the file copy came from if the source of a copy was not in
2277 # where the file copy came from if the source of a copy was not in
2279 # the parent directory. However, this doesn't actually make sense to
2278 # the parent directory. However, this doesn't actually make sense to
2280 # do (what does a copy from something not in your working copy even
2279 # do (what does a copy from something not in your working copy even
2281 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2280 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
2282 # the user that copy information was dropped, so if they didn't
2281 # the user that copy information was dropped, so if they didn't
2283 # expect this outcome it can be fixed, but this is the correct
2282 # expect this outcome it can be fixed, but this is the correct
2284 # behavior in this circumstance.
2283 # behavior in this circumstance.
2285
2284
2286 if crev:
2285 if crev:
2287 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2286 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
2288 meta["copy"] = cfname
2287 meta["copy"] = cfname
2289 meta["copyrev"] = hex(crev)
2288 meta["copyrev"] = hex(crev)
2290 fparent1, fparent2 = nullid, newfparent
2289 fparent1, fparent2 = nullid, newfparent
2291 else:
2290 else:
2292 self.ui.warn(_("warning: can't find ancestor for '%s' "
2291 self.ui.warn(_("warning: can't find ancestor for '%s' "
2293 "copied from '%s'!\n") % (fname, cfname))
2292 "copied from '%s'!\n") % (fname, cfname))
2294
2293
2295 elif fparent1 == nullid:
2294 elif fparent1 == nullid:
2296 fparent1, fparent2 = fparent2, nullid
2295 fparent1, fparent2 = fparent2, nullid
2297 elif fparent2 != nullid:
2296 elif fparent2 != nullid:
2298 # is one parent an ancestor of the other?
2297 # is one parent an ancestor of the other?
2299 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2298 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
2300 if fparent1 in fparentancestors:
2299 if fparent1 in fparentancestors:
2301 fparent1, fparent2 = fparent2, nullid
2300 fparent1, fparent2 = fparent2, nullid
2302 elif fparent2 in fparentancestors:
2301 elif fparent2 in fparentancestors:
2303 fparent2 = nullid
2302 fparent2 = nullid
2304
2303
2305 # is the file changed?
2304 # is the file changed?
2306 text = fctx.data()
2305 text = fctx.data()
2307 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2306 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
2308 changelist.append(fname)
2307 changelist.append(fname)
2309 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2308 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
2310 # are just the flags changed during merge?
2309 # are just the flags changed during merge?
2311 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2310 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
2312 changelist.append(fname)
2311 changelist.append(fname)
2313
2312
2314 return fparent1
2313 return fparent1
2315
2314
2316 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2315 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
2317 """check for commit arguments that aren't committable"""
2316 """check for commit arguments that aren't committable"""
2318 if match.isexact() or match.prefix():
2317 if match.isexact() or match.prefix():
2319 matched = set(status.modified + status.added + status.removed)
2318 matched = set(status.modified + status.added + status.removed)
2320
2319
2321 for f in match.files():
2320 for f in match.files():
2322 f = self.dirstate.normalize(f)
2321 f = self.dirstate.normalize(f)
2323 if f == '.' or f in matched or f in wctx.substate:
2322 if f == '.' or f in matched or f in wctx.substate:
2324 continue
2323 continue
2325 if f in status.deleted:
2324 if f in status.deleted:
2326 fail(f, _('file not found!'))
2325 fail(f, _('file not found!'))
2327 if f in vdirs: # visited directory
2326 if f in vdirs: # visited directory
2328 d = f + '/'
2327 d = f + '/'
2329 for mf in matched:
2328 for mf in matched:
2330 if mf.startswith(d):
2329 if mf.startswith(d):
2331 break
2330 break
2332 else:
2331 else:
2333 fail(f, _("no match under directory!"))
2332 fail(f, _("no match under directory!"))
2334 elif f not in self.dirstate:
2333 elif f not in self.dirstate:
2335 fail(f, _("file not tracked!"))
2334 fail(f, _("file not tracked!"))
2336
2335
2337 @unfilteredmethod
2336 @unfilteredmethod
2338 def commit(self, text="", user=None, date=None, match=None, force=False,
2337 def commit(self, text="", user=None, date=None, match=None, force=False,
2339 editor=False, extra=None):
2338 editor=False, extra=None):
2340 """Add a new revision to current repository.
2339 """Add a new revision to current repository.
2341
2340
2342 Revision information is gathered from the working directory,
2341 Revision information is gathered from the working directory,
2343 match can be used to filter the committed files. If editor is
2342 match can be used to filter the committed files. If editor is
2344 supplied, it is called to get a commit message.
2343 supplied, it is called to get a commit message.
2345 """
2344 """
2346 if extra is None:
2345 if extra is None:
2347 extra = {}
2346 extra = {}
2348
2347
2349 def fail(f, msg):
2348 def fail(f, msg):
2350 raise error.Abort('%s: %s' % (f, msg))
2349 raise error.Abort('%s: %s' % (f, msg))
2351
2350
2352 if not match:
2351 if not match:
2353 match = matchmod.always(self.root, '')
2352 match = matchmod.always(self.root, '')
2354
2353
2355 if not force:
2354 if not force:
2356 vdirs = []
2355 vdirs = []
2357 match.explicitdir = vdirs.append
2356 match.explicitdir = vdirs.append
2358 match.bad = fail
2357 match.bad = fail
2359
2358
2360 wlock = lock = tr = None
2359 wlock = lock = tr = None
2361 try:
2360 try:
2362 wlock = self.wlock()
2361 wlock = self.wlock()
2363 lock = self.lock() # for recent changelog (see issue4368)
2362 lock = self.lock() # for recent changelog (see issue4368)
2364
2363
2365 wctx = self[None]
2364 wctx = self[None]
2366 merge = len(wctx.parents()) > 1
2365 merge = len(wctx.parents()) > 1
2367
2366
2368 if not force and merge and not match.always():
2367 if not force and merge and not match.always():
2369 raise error.Abort(_('cannot partially commit a merge '
2368 raise error.Abort(_('cannot partially commit a merge '
2370 '(do not specify files or patterns)'))
2369 '(do not specify files or patterns)'))
2371
2370
2372 status = self.status(match=match, clean=force)
2371 status = self.status(match=match, clean=force)
2373 if force:
2372 if force:
2374 status.modified.extend(status.clean) # mq may commit clean files
2373 status.modified.extend(status.clean) # mq may commit clean files
2375
2374
2376 # check subrepos
2375 # check subrepos
2377 subs, commitsubs, newstate = subrepoutil.precommit(
2376 subs, commitsubs, newstate = subrepoutil.precommit(
2378 self.ui, wctx, status, match, force=force)
2377 self.ui, wctx, status, match, force=force)
2379
2378
2380 # make sure all explicit patterns are matched
2379 # make sure all explicit patterns are matched
2381 if not force:
2380 if not force:
2382 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2381 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
2383
2382
2384 cctx = context.workingcommitctx(self, status,
2383 cctx = context.workingcommitctx(self, status,
2385 text, user, date, extra)
2384 text, user, date, extra)
2386
2385
2387 # internal config: ui.allowemptycommit
2386 # internal config: ui.allowemptycommit
2388 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2387 allowemptycommit = (wctx.branch() != wctx.p1().branch()
2389 or extra.get('close') or merge or cctx.files()
2388 or extra.get('close') or merge or cctx.files()
2390 or self.ui.configbool('ui', 'allowemptycommit'))
2389 or self.ui.configbool('ui', 'allowemptycommit'))
2391 if not allowemptycommit:
2390 if not allowemptycommit:
2392 return None
2391 return None
2393
2392
2394 if merge and cctx.deleted():
2393 if merge and cctx.deleted():
2395 raise error.Abort(_("cannot commit merge with missing files"))
2394 raise error.Abort(_("cannot commit merge with missing files"))
2396
2395
2397 ms = mergemod.mergestate.read(self)
2396 ms = mergemod.mergestate.read(self)
2398 mergeutil.checkunresolved(ms)
2397 mergeutil.checkunresolved(ms)
2399
2398
2400 if editor:
2399 if editor:
2401 cctx._text = editor(self, cctx, subs)
2400 cctx._text = editor(self, cctx, subs)
2402 edited = (text != cctx._text)
2401 edited = (text != cctx._text)
2403
2402
2404 # Save commit message in case this transaction gets rolled back
2403 # Save commit message in case this transaction gets rolled back
2405 # (e.g. by a pretxncommit hook). Leave the content alone on
2404 # (e.g. by a pretxncommit hook). Leave the content alone on
2406 # the assumption that the user will use the same editor again.
2405 # the assumption that the user will use the same editor again.
2407 msgfn = self.savecommitmessage(cctx._text)
2406 msgfn = self.savecommitmessage(cctx._text)
2408
2407
2409 # commit subs and write new state
2408 # commit subs and write new state
2410 if subs:
2409 if subs:
2411 for s in sorted(commitsubs):
2410 for s in sorted(commitsubs):
2412 sub = wctx.sub(s)
2411 sub = wctx.sub(s)
2413 self.ui.status(_('committing subrepository %s\n') %
2412 self.ui.status(_('committing subrepository %s\n') %
2414 subrepoutil.subrelpath(sub))
2413 subrepoutil.subrelpath(sub))
2415 sr = sub.commit(cctx._text, user, date)
2414 sr = sub.commit(cctx._text, user, date)
2416 newstate[s] = (newstate[s][0], sr)
2415 newstate[s] = (newstate[s][0], sr)
2417 subrepoutil.writestate(self, newstate)
2416 subrepoutil.writestate(self, newstate)
2418
2417
2419 p1, p2 = self.dirstate.parents()
2418 p1, p2 = self.dirstate.parents()
2420 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2419 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2421 try:
2420 try:
2422 self.hook("precommit", throw=True, parent1=hookp1,
2421 self.hook("precommit", throw=True, parent1=hookp1,
2423 parent2=hookp2)
2422 parent2=hookp2)
2424 tr = self.transaction('commit')
2423 tr = self.transaction('commit')
2425 ret = self.commitctx(cctx, True)
2424 ret = self.commitctx(cctx, True)
2426 except: # re-raises
2425 except: # re-raises
2427 if edited:
2426 if edited:
2428 self.ui.write(
2427 self.ui.write(
2429 _('note: commit message saved in %s\n') % msgfn)
2428 _('note: commit message saved in %s\n') % msgfn)
2430 raise
2429 raise
2431 # update bookmarks, dirstate and mergestate
2430 # update bookmarks, dirstate and mergestate
2432 bookmarks.update(self, [p1, p2], ret)
2431 bookmarks.update(self, [p1, p2], ret)
2433 cctx.markcommitted(ret)
2432 cctx.markcommitted(ret)
2434 ms.reset()
2433 ms.reset()
2435 tr.close()
2434 tr.close()
2436
2435
2437 finally:
2436 finally:
2438 lockmod.release(tr, lock, wlock)
2437 lockmod.release(tr, lock, wlock)
2439
2438
2440 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2439 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2441 # hack for command that use a temporary commit (eg: histedit)
2440 # hack for command that use a temporary commit (eg: histedit)
2442 # temporary commit got stripped before hook release
2441 # temporary commit got stripped before hook release
2443 if self.changelog.hasnode(ret):
2442 if self.changelog.hasnode(ret):
2444 self.hook("commit", node=node, parent1=parent1,
2443 self.hook("commit", node=node, parent1=parent1,
2445 parent2=parent2)
2444 parent2=parent2)
2446 self._afterlock(commithook)
2445 self._afterlock(commithook)
2447 return ret
2446 return ret
2448
2447
2449 @unfilteredmethod
2448 @unfilteredmethod
2450 def commitctx(self, ctx, error=False):
2449 def commitctx(self, ctx, error=False):
2451 """Add a new revision to current repository.
2450 """Add a new revision to current repository.
2452 Revision information is passed via the context argument.
2451 Revision information is passed via the context argument.
2453
2452
2454 ctx.files() should list all files involved in this commit, i.e.
2453 ctx.files() should list all files involved in this commit, i.e.
2455 modified/added/removed files. On merge, it may be wider than the
2454 modified/added/removed files. On merge, it may be wider than the
2456 ctx.files() to be committed, since any file nodes derived directly
2455 ctx.files() to be committed, since any file nodes derived directly
2457 from p1 or p2 are excluded from the committed ctx.files().
2456 from p1 or p2 are excluded from the committed ctx.files().
2458 """
2457 """
2459
2458
2460 tr = None
2459 tr = None
2461 p1, p2 = ctx.p1(), ctx.p2()
2460 p1, p2 = ctx.p1(), ctx.p2()
2462 user = ctx.user()
2461 user = ctx.user()
2463
2462
2464 lock = self.lock()
2463 lock = self.lock()
2465 try:
2464 try:
2466 tr = self.transaction("commit")
2465 tr = self.transaction("commit")
2467 trp = weakref.proxy(tr)
2466 trp = weakref.proxy(tr)
2468
2467
2469 if ctx.manifestnode():
2468 if ctx.manifestnode():
2470 # reuse an existing manifest revision
2469 # reuse an existing manifest revision
2471 self.ui.debug('reusing known manifest\n')
2470 self.ui.debug('reusing known manifest\n')
2472 mn = ctx.manifestnode()
2471 mn = ctx.manifestnode()
2473 files = ctx.files()
2472 files = ctx.files()
2474 elif ctx.files():
2473 elif ctx.files():
2475 m1ctx = p1.manifestctx()
2474 m1ctx = p1.manifestctx()
2476 m2ctx = p2.manifestctx()
2475 m2ctx = p2.manifestctx()
2477 mctx = m1ctx.copy()
2476 mctx = m1ctx.copy()
2478
2477
2479 m = mctx.read()
2478 m = mctx.read()
2480 m1 = m1ctx.read()
2479 m1 = m1ctx.read()
2481 m2 = m2ctx.read()
2480 m2 = m2ctx.read()
2482
2481
2483 # check in files
2482 # check in files
2484 added = []
2483 added = []
2485 changed = []
2484 changed = []
2486 removed = list(ctx.removed())
2485 removed = list(ctx.removed())
2487 linkrev = len(self)
2486 linkrev = len(self)
2488 self.ui.note(_("committing files:\n"))
2487 self.ui.note(_("committing files:\n"))
2489 for f in sorted(ctx.modified() + ctx.added()):
2488 for f in sorted(ctx.modified() + ctx.added()):
2490 self.ui.note(f + "\n")
2489 self.ui.note(f + "\n")
2491 try:
2490 try:
2492 fctx = ctx[f]
2491 fctx = ctx[f]
2493 if fctx is None:
2492 if fctx is None:
2494 removed.append(f)
2493 removed.append(f)
2495 else:
2494 else:
2496 added.append(f)
2495 added.append(f)
2497 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2496 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2498 trp, changed)
2497 trp, changed)
2499 m.setflag(f, fctx.flags())
2498 m.setflag(f, fctx.flags())
2500 except OSError as inst:
2499 except OSError as inst:
2501 self.ui.warn(_("trouble committing %s!\n") % f)
2500 self.ui.warn(_("trouble committing %s!\n") % f)
2502 raise
2501 raise
2503 except IOError as inst:
2502 except IOError as inst:
2504 errcode = getattr(inst, 'errno', errno.ENOENT)
2503 errcode = getattr(inst, 'errno', errno.ENOENT)
2505 if error or errcode and errcode != errno.ENOENT:
2504 if error or errcode and errcode != errno.ENOENT:
2506 self.ui.warn(_("trouble committing %s!\n") % f)
2505 self.ui.warn(_("trouble committing %s!\n") % f)
2507 raise
2506 raise
2508
2507
2509 # update manifest
2508 # update manifest
2510 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2509 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2511 drop = [f for f in removed if f in m]
2510 drop = [f for f in removed if f in m]
2512 for f in drop:
2511 for f in drop:
2513 del m[f]
2512 del m[f]
2514 files = changed + removed
2513 files = changed + removed
2515 md = None
2514 md = None
2516 if not files:
2515 if not files:
2517 # if no "files" actually changed in terms of the changelog,
2516 # if no "files" actually changed in terms of the changelog,
2518 # try hard to detect unmodified manifest entry so that the
2517 # try hard to detect unmodified manifest entry so that the
2519 # exact same commit can be reproduced later on convert.
2518 # exact same commit can be reproduced later on convert.
2520 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2519 md = m1.diff(m, scmutil.matchfiles(self, ctx.files()))
2521 if not files and md:
2520 if not files and md:
2522 self.ui.debug('not reusing manifest (no file change in '
2521 self.ui.debug('not reusing manifest (no file change in '
2523 'changelog, but manifest differs)\n')
2522 'changelog, but manifest differs)\n')
2524 if files or md:
2523 if files or md:
2525 self.ui.note(_("committing manifest\n"))
2524 self.ui.note(_("committing manifest\n"))
2526 # we're using narrowmatch here since it's already applied at
2525 # we're using narrowmatch here since it's already applied at
2527 # other stages (such as dirstate.walk), so we're already
2526 # other stages (such as dirstate.walk), so we're already
2528 # ignoring things outside of narrowspec in most cases. The
2527 # ignoring things outside of narrowspec in most cases. The
2529 # one case where we might have files outside the narrowspec
2528 # one case where we might have files outside the narrowspec
2530 # at this point is merges, and we already error out in the
2529 # at this point is merges, and we already error out in the
2531 # case where the merge has files outside of the narrowspec,
2530 # case where the merge has files outside of the narrowspec,
2532 # so this is safe.
2531 # so this is safe.
2533 mn = mctx.write(trp, linkrev,
2532 mn = mctx.write(trp, linkrev,
2534 p1.manifestnode(), p2.manifestnode(),
2533 p1.manifestnode(), p2.manifestnode(),
2535 added, drop, match=self.narrowmatch())
2534 added, drop, match=self.narrowmatch())
2536 else:
2535 else:
2537 self.ui.debug('reusing manifest form p1 (listed files '
2536 self.ui.debug('reusing manifest form p1 (listed files '
2538 'actually unchanged)\n')
2537 'actually unchanged)\n')
2539 mn = p1.manifestnode()
2538 mn = p1.manifestnode()
2540 else:
2539 else:
2541 self.ui.debug('reusing manifest from p1 (no file change)\n')
2540 self.ui.debug('reusing manifest from p1 (no file change)\n')
2542 mn = p1.manifestnode()
2541 mn = p1.manifestnode()
2543 files = []
2542 files = []
2544
2543
2545 # update changelog
2544 # update changelog
2546 self.ui.note(_("committing changelog\n"))
2545 self.ui.note(_("committing changelog\n"))
2547 self.changelog.delayupdate(tr)
2546 self.changelog.delayupdate(tr)
2548 n = self.changelog.add(mn, files, ctx.description(),
2547 n = self.changelog.add(mn, files, ctx.description(),
2549 trp, p1.node(), p2.node(),
2548 trp, p1.node(), p2.node(),
2550 user, ctx.date(), ctx.extra().copy())
2549 user, ctx.date(), ctx.extra().copy())
2551 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2550 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2552 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2551 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2553 parent2=xp2)
2552 parent2=xp2)
2554 # set the new commit is proper phase
2553 # set the new commit is proper phase
2555 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2554 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2556 if targetphase:
2555 if targetphase:
2557 # retract boundary do not alter parent changeset.
2556 # retract boundary do not alter parent changeset.
2558 # if a parent have higher the resulting phase will
2557 # if a parent have higher the resulting phase will
2559 # be compliant anyway
2558 # be compliant anyway
2560 #
2559 #
2561 # if minimal phase was 0 we don't need to retract anything
2560 # if minimal phase was 0 we don't need to retract anything
2562 phases.registernew(self, tr, targetphase, [n])
2561 phases.registernew(self, tr, targetphase, [n])
2563 tr.close()
2562 tr.close()
2564 return n
2563 return n
2565 finally:
2564 finally:
2566 if tr:
2565 if tr:
2567 tr.release()
2566 tr.release()
2568 lock.release()
2567 lock.release()
2569
2568
2570 @unfilteredmethod
2569 @unfilteredmethod
2571 def destroying(self):
2570 def destroying(self):
2572 '''Inform the repository that nodes are about to be destroyed.
2571 '''Inform the repository that nodes are about to be destroyed.
2573 Intended for use by strip and rollback, so there's a common
2572 Intended for use by strip and rollback, so there's a common
2574 place for anything that has to be done before destroying history.
2573 place for anything that has to be done before destroying history.
2575
2574
2576 This is mostly useful for saving state that is in memory and waiting
2575 This is mostly useful for saving state that is in memory and waiting
2577 to be flushed when the current lock is released. Because a call to
2576 to be flushed when the current lock is released. Because a call to
2578 destroyed is imminent, the repo will be invalidated causing those
2577 destroyed is imminent, the repo will be invalidated causing those
2579 changes to stay in memory (waiting for the next unlock), or vanish
2578 changes to stay in memory (waiting for the next unlock), or vanish
2580 completely.
2579 completely.
2581 '''
2580 '''
2582 # When using the same lock to commit and strip, the phasecache is left
2581 # When using the same lock to commit and strip, the phasecache is left
2583 # dirty after committing. Then when we strip, the repo is invalidated,
2582 # dirty after committing. Then when we strip, the repo is invalidated,
2584 # causing those changes to disappear.
2583 # causing those changes to disappear.
2585 if '_phasecache' in vars(self):
2584 if '_phasecache' in vars(self):
2586 self._phasecache.write()
2585 self._phasecache.write()
2587
2586
2588 @unfilteredmethod
2587 @unfilteredmethod
2589 def destroyed(self):
2588 def destroyed(self):
2590 '''Inform the repository that nodes have been destroyed.
2589 '''Inform the repository that nodes have been destroyed.
2591 Intended for use by strip and rollback, so there's a common
2590 Intended for use by strip and rollback, so there's a common
2592 place for anything that has to be done after destroying history.
2591 place for anything that has to be done after destroying history.
2593 '''
2592 '''
2594 # When one tries to:
2593 # When one tries to:
2595 # 1) destroy nodes thus calling this method (e.g. strip)
2594 # 1) destroy nodes thus calling this method (e.g. strip)
2596 # 2) use phasecache somewhere (e.g. commit)
2595 # 2) use phasecache somewhere (e.g. commit)
2597 #
2596 #
2598 # then 2) will fail because the phasecache contains nodes that were
2597 # then 2) will fail because the phasecache contains nodes that were
2599 # removed. We can either remove phasecache from the filecache,
2598 # removed. We can either remove phasecache from the filecache,
2600 # causing it to reload next time it is accessed, or simply filter
2599 # causing it to reload next time it is accessed, or simply filter
2601 # the removed nodes now and write the updated cache.
2600 # the removed nodes now and write the updated cache.
2602 self._phasecache.filterunknown(self)
2601 self._phasecache.filterunknown(self)
2603 self._phasecache.write()
2602 self._phasecache.write()
2604
2603
2605 # refresh all repository caches
2604 # refresh all repository caches
2606 self.updatecaches()
2605 self.updatecaches()
2607
2606
2608 # Ensure the persistent tag cache is updated. Doing it now
2607 # Ensure the persistent tag cache is updated. Doing it now
2609 # means that the tag cache only has to worry about destroyed
2608 # means that the tag cache only has to worry about destroyed
2610 # heads immediately after a strip/rollback. That in turn
2609 # heads immediately after a strip/rollback. That in turn
2611 # guarantees that "cachetip == currenttip" (comparing both rev
2610 # guarantees that "cachetip == currenttip" (comparing both rev
2612 # and node) always means no nodes have been added or destroyed.
2611 # and node) always means no nodes have been added or destroyed.
2613
2612
2614 # XXX this is suboptimal when qrefresh'ing: we strip the current
2613 # XXX this is suboptimal when qrefresh'ing: we strip the current
2615 # head, refresh the tag cache, then immediately add a new head.
2614 # head, refresh the tag cache, then immediately add a new head.
2616 # But I think doing it this way is necessary for the "instant
2615 # But I think doing it this way is necessary for the "instant
2617 # tag cache retrieval" case to work.
2616 # tag cache retrieval" case to work.
2618 self.invalidate()
2617 self.invalidate()
2619
2618
2620 def status(self, node1='.', node2=None, match=None,
2619 def status(self, node1='.', node2=None, match=None,
2621 ignored=False, clean=False, unknown=False,
2620 ignored=False, clean=False, unknown=False,
2622 listsubrepos=False):
2621 listsubrepos=False):
2623 '''a convenience method that calls node1.status(node2)'''
2622 '''a convenience method that calls node1.status(node2)'''
2624 return self[node1].status(node2, match, ignored, clean, unknown,
2623 return self[node1].status(node2, match, ignored, clean, unknown,
2625 listsubrepos)
2624 listsubrepos)
2626
2625
2627 def addpostdsstatus(self, ps):
2626 def addpostdsstatus(self, ps):
2628 """Add a callback to run within the wlock, at the point at which status
2627 """Add a callback to run within the wlock, at the point at which status
2629 fixups happen.
2628 fixups happen.
2630
2629
2631 On status completion, callback(wctx, status) will be called with the
2630 On status completion, callback(wctx, status) will be called with the
2632 wlock held, unless the dirstate has changed from underneath or the wlock
2631 wlock held, unless the dirstate has changed from underneath or the wlock
2633 couldn't be grabbed.
2632 couldn't be grabbed.
2634
2633
2635 Callbacks should not capture and use a cached copy of the dirstate --
2634 Callbacks should not capture and use a cached copy of the dirstate --
2636 it might change in the meanwhile. Instead, they should access the
2635 it might change in the meanwhile. Instead, they should access the
2637 dirstate via wctx.repo().dirstate.
2636 dirstate via wctx.repo().dirstate.
2638
2637
2639 This list is emptied out after each status run -- extensions should
2638 This list is emptied out after each status run -- extensions should
2640 make sure it adds to this list each time dirstate.status is called.
2639 make sure it adds to this list each time dirstate.status is called.
2641 Extensions should also make sure they don't call this for statuses
2640 Extensions should also make sure they don't call this for statuses
2642 that don't involve the dirstate.
2641 that don't involve the dirstate.
2643 """
2642 """
2644
2643
2645 # The list is located here for uniqueness reasons -- it is actually
2644 # The list is located here for uniqueness reasons -- it is actually
2646 # managed by the workingctx, but that isn't unique per-repo.
2645 # managed by the workingctx, but that isn't unique per-repo.
2647 self._postdsstatus.append(ps)
2646 self._postdsstatus.append(ps)
2648
2647
2649 def postdsstatus(self):
2648 def postdsstatus(self):
2650 """Used by workingctx to get the list of post-dirstate-status hooks."""
2649 """Used by workingctx to get the list of post-dirstate-status hooks."""
2651 return self._postdsstatus
2650 return self._postdsstatus
2652
2651
2653 def clearpostdsstatus(self):
2652 def clearpostdsstatus(self):
2654 """Used by workingctx to clear post-dirstate-status hooks."""
2653 """Used by workingctx to clear post-dirstate-status hooks."""
2655 del self._postdsstatus[:]
2654 del self._postdsstatus[:]
2656
2655
2657 def heads(self, start=None):
2656 def heads(self, start=None):
2658 if start is None:
2657 if start is None:
2659 cl = self.changelog
2658 cl = self.changelog
2660 headrevs = reversed(cl.headrevs())
2659 headrevs = reversed(cl.headrevs())
2661 return [cl.node(rev) for rev in headrevs]
2660 return [cl.node(rev) for rev in headrevs]
2662
2661
2663 heads = self.changelog.heads(start)
2662 heads = self.changelog.heads(start)
2664 # sort the output in rev descending order
2663 # sort the output in rev descending order
2665 return sorted(heads, key=self.changelog.rev, reverse=True)
2664 return sorted(heads, key=self.changelog.rev, reverse=True)
2666
2665
2667 def branchheads(self, branch=None, start=None, closed=False):
2666 def branchheads(self, branch=None, start=None, closed=False):
2668 '''return a (possibly filtered) list of heads for the given branch
2667 '''return a (possibly filtered) list of heads for the given branch
2669
2668
2670 Heads are returned in topological order, from newest to oldest.
2669 Heads are returned in topological order, from newest to oldest.
2671 If branch is None, use the dirstate branch.
2670 If branch is None, use the dirstate branch.
2672 If start is not None, return only heads reachable from start.
2671 If start is not None, return only heads reachable from start.
2673 If closed is True, return heads that are marked as closed as well.
2672 If closed is True, return heads that are marked as closed as well.
2674 '''
2673 '''
2675 if branch is None:
2674 if branch is None:
2676 branch = self[None].branch()
2675 branch = self[None].branch()
2677 branches = self.branchmap()
2676 branches = self.branchmap()
2678 if branch not in branches:
2677 if branch not in branches:
2679 return []
2678 return []
2680 # the cache returns heads ordered lowest to highest
2679 # the cache returns heads ordered lowest to highest
2681 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2680 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2682 if start is not None:
2681 if start is not None:
2683 # filter out the heads that cannot be reached from startrev
2682 # filter out the heads that cannot be reached from startrev
2684 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2683 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2685 bheads = [h for h in bheads if h in fbheads]
2684 bheads = [h for h in bheads if h in fbheads]
2686 return bheads
2685 return bheads
2687
2686
2688 def branches(self, nodes):
2687 def branches(self, nodes):
2689 if not nodes:
2688 if not nodes:
2690 nodes = [self.changelog.tip()]
2689 nodes = [self.changelog.tip()]
2691 b = []
2690 b = []
2692 for n in nodes:
2691 for n in nodes:
2693 t = n
2692 t = n
2694 while True:
2693 while True:
2695 p = self.changelog.parents(n)
2694 p = self.changelog.parents(n)
2696 if p[1] != nullid or p[0] == nullid:
2695 if p[1] != nullid or p[0] == nullid:
2697 b.append((t, n, p[0], p[1]))
2696 b.append((t, n, p[0], p[1]))
2698 break
2697 break
2699 n = p[0]
2698 n = p[0]
2700 return b
2699 return b
2701
2700
2702 def between(self, pairs):
2701 def between(self, pairs):
2703 r = []
2702 r = []
2704
2703
2705 for top, bottom in pairs:
2704 for top, bottom in pairs:
2706 n, l, i = top, [], 0
2705 n, l, i = top, [], 0
2707 f = 1
2706 f = 1
2708
2707
2709 while n != bottom and n != nullid:
2708 while n != bottom and n != nullid:
2710 p = self.changelog.parents(n)[0]
2709 p = self.changelog.parents(n)[0]
2711 if i == f:
2710 if i == f:
2712 l.append(n)
2711 l.append(n)
2713 f = f * 2
2712 f = f * 2
2714 n = p
2713 n = p
2715 i += 1
2714 i += 1
2716
2715
2717 r.append(l)
2716 r.append(l)
2718
2717
2719 return r
2718 return r
2720
2719
2721 def checkpush(self, pushop):
2720 def checkpush(self, pushop):
2722 """Extensions can override this function if additional checks have
2721 """Extensions can override this function if additional checks have
2723 to be performed before pushing, or call it if they override push
2722 to be performed before pushing, or call it if they override push
2724 command.
2723 command.
2725 """
2724 """
2726
2725
2727 @unfilteredpropertycache
2726 @unfilteredpropertycache
2728 def prepushoutgoinghooks(self):
2727 def prepushoutgoinghooks(self):
2729 """Return util.hooks consists of a pushop with repo, remote, outgoing
2728 """Return util.hooks consists of a pushop with repo, remote, outgoing
2730 methods, which are called before pushing changesets.
2729 methods, which are called before pushing changesets.
2731 """
2730 """
2732 return util.hooks()
2731 return util.hooks()
2733
2732
2734 def pushkey(self, namespace, key, old, new):
2733 def pushkey(self, namespace, key, old, new):
2735 try:
2734 try:
2736 tr = self.currenttransaction()
2735 tr = self.currenttransaction()
2737 hookargs = {}
2736 hookargs = {}
2738 if tr is not None:
2737 if tr is not None:
2739 hookargs.update(tr.hookargs)
2738 hookargs.update(tr.hookargs)
2740 hookargs = pycompat.strkwargs(hookargs)
2739 hookargs = pycompat.strkwargs(hookargs)
2741 hookargs[r'namespace'] = namespace
2740 hookargs[r'namespace'] = namespace
2742 hookargs[r'key'] = key
2741 hookargs[r'key'] = key
2743 hookargs[r'old'] = old
2742 hookargs[r'old'] = old
2744 hookargs[r'new'] = new
2743 hookargs[r'new'] = new
2745 self.hook('prepushkey', throw=True, **hookargs)
2744 self.hook('prepushkey', throw=True, **hookargs)
2746 except error.HookAbort as exc:
2745 except error.HookAbort as exc:
2747 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2746 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2748 if exc.hint:
2747 if exc.hint:
2749 self.ui.write_err(_("(%s)\n") % exc.hint)
2748 self.ui.write_err(_("(%s)\n") % exc.hint)
2750 return False
2749 return False
2751 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2750 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2752 ret = pushkey.push(self, namespace, key, old, new)
2751 ret = pushkey.push(self, namespace, key, old, new)
2753 def runhook():
2752 def runhook():
2754 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2753 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2755 ret=ret)
2754 ret=ret)
2756 self._afterlock(runhook)
2755 self._afterlock(runhook)
2757 return ret
2756 return ret
2758
2757
2759 def listkeys(self, namespace):
2758 def listkeys(self, namespace):
2760 self.hook('prelistkeys', throw=True, namespace=namespace)
2759 self.hook('prelistkeys', throw=True, namespace=namespace)
2761 self.ui.debug('listing keys for "%s"\n' % namespace)
2760 self.ui.debug('listing keys for "%s"\n' % namespace)
2762 values = pushkey.list(self, namespace)
2761 values = pushkey.list(self, namespace)
2763 self.hook('listkeys', namespace=namespace, values=values)
2762 self.hook('listkeys', namespace=namespace, values=values)
2764 return values
2763 return values
2765
2764
2766 def debugwireargs(self, one, two, three=None, four=None, five=None):
2765 def debugwireargs(self, one, two, three=None, four=None, five=None):
2767 '''used to test argument passing over the wire'''
2766 '''used to test argument passing over the wire'''
2768 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2767 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2769 pycompat.bytestr(four),
2768 pycompat.bytestr(four),
2770 pycompat.bytestr(five))
2769 pycompat.bytestr(five))
2771
2770
2772 def savecommitmessage(self, text):
2771 def savecommitmessage(self, text):
2773 fp = self.vfs('last-message.txt', 'wb')
2772 fp = self.vfs('last-message.txt', 'wb')
2774 try:
2773 try:
2775 fp.write(text)
2774 fp.write(text)
2776 finally:
2775 finally:
2777 fp.close()
2776 fp.close()
2778 return self.pathto(fp.name[len(self.root) + 1:])
2777 return self.pathto(fp.name[len(self.root) + 1:])
2779
2778
2780 # used to avoid circular references so destructors work
2779 # used to avoid circular references so destructors work
2781 def aftertrans(files):
2780 def aftertrans(files):
2782 renamefiles = [tuple(t) for t in files]
2781 renamefiles = [tuple(t) for t in files]
2783 def a():
2782 def a():
2784 for vfs, src, dest in renamefiles:
2783 for vfs, src, dest in renamefiles:
2785 # if src and dest refer to a same file, vfs.rename is a no-op,
2784 # if src and dest refer to a same file, vfs.rename is a no-op,
2786 # leaving both src and dest on disk. delete dest to make sure
2785 # leaving both src and dest on disk. delete dest to make sure
2787 # the rename couldn't be such a no-op.
2786 # the rename couldn't be such a no-op.
2788 vfs.tryunlink(dest)
2787 vfs.tryunlink(dest)
2789 try:
2788 try:
2790 vfs.rename(src, dest)
2789 vfs.rename(src, dest)
2791 except OSError: # journal file does not yet exist
2790 except OSError: # journal file does not yet exist
2792 pass
2791 pass
2793 return a
2792 return a
2794
2793
2795 def undoname(fn):
2794 def undoname(fn):
2796 base, name = os.path.split(fn)
2795 base, name = os.path.split(fn)
2797 assert name.startswith('journal')
2796 assert name.startswith('journal')
2798 return os.path.join(base, name.replace('journal', 'undo', 1))
2797 return os.path.join(base, name.replace('journal', 'undo', 1))
2799
2798
2800 def instance(ui, path, create, intents=None, createopts=None):
2799 def instance(ui, path, create, intents=None, createopts=None):
2801 localpath = util.urllocalpath(path)
2800 localpath = util.urllocalpath(path)
2802 if create:
2801 if create:
2803 createrepository(ui, localpath, createopts=createopts)
2802 createrepository(ui, localpath, createopts=createopts)
2804
2803
2805 return makelocalrepository(ui, localpath, intents=intents)
2804 return makelocalrepository(ui, localpath, intents=intents)
2806
2805
2807 def islocal(path):
2806 def islocal(path):
2808 return True
2807 return True
2809
2808
2810 def newreporequirements(ui, createopts=None):
2809 def newreporequirements(ui, createopts=None):
2811 """Determine the set of requirements for a new local repository.
2810 """Determine the set of requirements for a new local repository.
2812
2811
2813 Extensions can wrap this function to specify custom requirements for
2812 Extensions can wrap this function to specify custom requirements for
2814 new repositories.
2813 new repositories.
2815 """
2814 """
2816 createopts = createopts or {}
2815 createopts = createopts or {}
2817
2816
2818 # If the repo is being created from a shared repository, we copy
2817 # If the repo is being created from a shared repository, we copy
2819 # its requirements.
2818 # its requirements.
2820 if 'sharedrepo' in createopts:
2819 if 'sharedrepo' in createopts:
2821 requirements = set(createopts['sharedrepo'].requirements)
2820 requirements = set(createopts['sharedrepo'].requirements)
2822 if createopts.get('sharedrelative'):
2821 if createopts.get('sharedrelative'):
2823 requirements.add('relshared')
2822 requirements.add('relshared')
2824 else:
2823 else:
2825 requirements.add('shared')
2824 requirements.add('shared')
2826
2825
2827 return requirements
2826 return requirements
2828
2827
2829 requirements = {'revlogv1'}
2828 requirements = {'revlogv1'}
2830 if ui.configbool('format', 'usestore'):
2829 if ui.configbool('format', 'usestore'):
2831 requirements.add('store')
2830 requirements.add('store')
2832 if ui.configbool('format', 'usefncache'):
2831 if ui.configbool('format', 'usefncache'):
2833 requirements.add('fncache')
2832 requirements.add('fncache')
2834 if ui.configbool('format', 'dotencode'):
2833 if ui.configbool('format', 'dotencode'):
2835 requirements.add('dotencode')
2834 requirements.add('dotencode')
2836
2835
2837 compengine = ui.config('experimental', 'format.compression')
2836 compengine = ui.config('experimental', 'format.compression')
2838 if compengine not in util.compengines:
2837 if compengine not in util.compengines:
2839 raise error.Abort(_('compression engine %s defined by '
2838 raise error.Abort(_('compression engine %s defined by '
2840 'experimental.format.compression not available') %
2839 'experimental.format.compression not available') %
2841 compengine,
2840 compengine,
2842 hint=_('run "hg debuginstall" to list available '
2841 hint=_('run "hg debuginstall" to list available '
2843 'compression engines'))
2842 'compression engines'))
2844
2843
2845 # zlib is the historical default and doesn't need an explicit requirement.
2844 # zlib is the historical default and doesn't need an explicit requirement.
2846 if compengine != 'zlib':
2845 if compengine != 'zlib':
2847 requirements.add('exp-compression-%s' % compengine)
2846 requirements.add('exp-compression-%s' % compengine)
2848
2847
2849 if scmutil.gdinitconfig(ui):
2848 if scmutil.gdinitconfig(ui):
2850 requirements.add('generaldelta')
2849 requirements.add('generaldelta')
2851 if ui.configbool('experimental', 'treemanifest'):
2850 if ui.configbool('experimental', 'treemanifest'):
2852 requirements.add('treemanifest')
2851 requirements.add('treemanifest')
2853 # experimental config: format.sparse-revlog
2852 # experimental config: format.sparse-revlog
2854 if ui.configbool('format', 'sparse-revlog'):
2853 if ui.configbool('format', 'sparse-revlog'):
2855 requirements.add(SPARSEREVLOG_REQUIREMENT)
2854 requirements.add(SPARSEREVLOG_REQUIREMENT)
2856
2855
2857 revlogv2 = ui.config('experimental', 'revlogv2')
2856 revlogv2 = ui.config('experimental', 'revlogv2')
2858 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2857 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2859 requirements.remove('revlogv1')
2858 requirements.remove('revlogv1')
2860 # generaldelta is implied by revlogv2.
2859 # generaldelta is implied by revlogv2.
2861 requirements.discard('generaldelta')
2860 requirements.discard('generaldelta')
2862 requirements.add(REVLOGV2_REQUIREMENT)
2861 requirements.add(REVLOGV2_REQUIREMENT)
2863 # experimental config: format.internal-phase
2862 # experimental config: format.internal-phase
2864 if ui.configbool('format', 'internal-phase'):
2863 if ui.configbool('format', 'internal-phase'):
2865 requirements.add('internal-phase')
2864 requirements.add('internal-phase')
2866
2865
2867 if createopts.get('narrowfiles'):
2866 if createopts.get('narrowfiles'):
2868 requirements.add(repository.NARROW_REQUIREMENT)
2867 requirements.add(repository.NARROW_REQUIREMENT)
2869
2868
2870 return requirements
2869 return requirements
2871
2870
2872 def filterknowncreateopts(ui, createopts):
2871 def filterknowncreateopts(ui, createopts):
2873 """Filters a dict of repo creation options against options that are known.
2872 """Filters a dict of repo creation options against options that are known.
2874
2873
2875 Receives a dict of repo creation options and returns a dict of those
2874 Receives a dict of repo creation options and returns a dict of those
2876 options that we don't know how to handle.
2875 options that we don't know how to handle.
2877
2876
2878 This function is called as part of repository creation. If the
2877 This function is called as part of repository creation. If the
2879 returned dict contains any items, repository creation will not
2878 returned dict contains any items, repository creation will not
2880 be allowed, as it means there was a request to create a repository
2879 be allowed, as it means there was a request to create a repository
2881 with options not recognized by loaded code.
2880 with options not recognized by loaded code.
2882
2881
2883 Extensions can wrap this function to filter out creation options
2882 Extensions can wrap this function to filter out creation options
2884 they know how to handle.
2883 they know how to handle.
2885 """
2884 """
2886 known = {
2885 known = {
2887 'narrowfiles',
2886 'narrowfiles',
2888 'sharedrepo',
2887 'sharedrepo',
2889 'sharedrelative',
2888 'sharedrelative',
2890 'shareditems',
2889 'shareditems',
2891 }
2890 }
2892
2891
2893 return {k: v for k, v in createopts.items() if k not in known}
2892 return {k: v for k, v in createopts.items() if k not in known}
2894
2893
2895 def createrepository(ui, path, createopts=None):
2894 def createrepository(ui, path, createopts=None):
2896 """Create a new repository in a vfs.
2895 """Create a new repository in a vfs.
2897
2896
2898 ``path`` path to the new repo's working directory.
2897 ``path`` path to the new repo's working directory.
2899 ``createopts`` options for the new repository.
2898 ``createopts`` options for the new repository.
2900
2899
2901 The following keys for ``createopts`` are recognized:
2900 The following keys for ``createopts`` are recognized:
2902
2901
2903 narrowfiles
2902 narrowfiles
2904 Set up repository to support narrow file storage.
2903 Set up repository to support narrow file storage.
2905 sharedrepo
2904 sharedrepo
2906 Repository object from which storage should be shared.
2905 Repository object from which storage should be shared.
2907 sharedrelative
2906 sharedrelative
2908 Boolean indicating if the path to the shared repo should be
2907 Boolean indicating if the path to the shared repo should be
2909 stored as relative. By default, the pointer to the "parent" repo
2908 stored as relative. By default, the pointer to the "parent" repo
2910 is stored as an absolute path.
2909 is stored as an absolute path.
2911 shareditems
2910 shareditems
2912 Set of items to share to the new repository (in addition to storage).
2911 Set of items to share to the new repository (in addition to storage).
2913 """
2912 """
2914 createopts = createopts or {}
2913 createopts = createopts or {}
2915
2914
2916 unknownopts = filterknowncreateopts(ui, createopts)
2915 unknownopts = filterknowncreateopts(ui, createopts)
2917
2916
2918 if not isinstance(unknownopts, dict):
2917 if not isinstance(unknownopts, dict):
2919 raise error.ProgrammingError('filterknowncreateopts() did not return '
2918 raise error.ProgrammingError('filterknowncreateopts() did not return '
2920 'a dict')
2919 'a dict')
2921
2920
2922 if unknownopts:
2921 if unknownopts:
2923 raise error.Abort(_('unable to create repository because of unknown '
2922 raise error.Abort(_('unable to create repository because of unknown '
2924 'creation option: %s') %
2923 'creation option: %s') %
2925 ', '.join(sorted(unknownopts)),
2924 ', '.join(sorted(unknownopts)),
2926 hint=_('is a required extension not loaded?'))
2925 hint=_('is a required extension not loaded?'))
2927
2926
2928 requirements = newreporequirements(ui, createopts=createopts)
2927 requirements = newreporequirements(ui, createopts=createopts)
2929
2928
2930 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2929 wdirvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
2931
2930
2932 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2931 hgvfs = vfsmod.vfs(wdirvfs.join(b'.hg'))
2933 if hgvfs.exists():
2932 if hgvfs.exists():
2934 raise error.RepoError(_('repository %s already exists') % path)
2933 raise error.RepoError(_('repository %s already exists') % path)
2935
2934
2936 if 'sharedrepo' in createopts:
2935 if 'sharedrepo' in createopts:
2937 sharedpath = createopts['sharedrepo'].sharedpath
2936 sharedpath = createopts['sharedrepo'].sharedpath
2938
2937
2939 if createopts.get('sharedrelative'):
2938 if createopts.get('sharedrelative'):
2940 try:
2939 try:
2941 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2940 sharedpath = os.path.relpath(sharedpath, hgvfs.base)
2942 except (IOError, ValueError) as e:
2941 except (IOError, ValueError) as e:
2943 # ValueError is raised on Windows if the drive letters differ
2942 # ValueError is raised on Windows if the drive letters differ
2944 # on each path.
2943 # on each path.
2945 raise error.Abort(_('cannot calculate relative path'),
2944 raise error.Abort(_('cannot calculate relative path'),
2946 hint=stringutil.forcebytestr(e))
2945 hint=stringutil.forcebytestr(e))
2947
2946
2948 if not wdirvfs.exists():
2947 if not wdirvfs.exists():
2949 wdirvfs.makedirs()
2948 wdirvfs.makedirs()
2950
2949
2951 hgvfs.makedir(notindexed=True)
2950 hgvfs.makedir(notindexed=True)
2952
2951
2953 if b'store' in requirements and 'sharedrepo' not in createopts:
2952 if b'store' in requirements and 'sharedrepo' not in createopts:
2954 hgvfs.mkdir(b'store')
2953 hgvfs.mkdir(b'store')
2955
2954
2956 # We create an invalid changelog outside the store so very old
2955 # We create an invalid changelog outside the store so very old
2957 # Mercurial versions (which didn't know about the requirements
2956 # Mercurial versions (which didn't know about the requirements
2958 # file) encounter an error on reading the changelog. This
2957 # file) encounter an error on reading the changelog. This
2959 # effectively locks out old clients and prevents them from
2958 # effectively locks out old clients and prevents them from
2960 # mucking with a repo in an unknown format.
2959 # mucking with a repo in an unknown format.
2961 #
2960 #
2962 # The revlog header has version 2, which won't be recognized by
2961 # The revlog header has version 2, which won't be recognized by
2963 # such old clients.
2962 # such old clients.
2964 hgvfs.append(b'00changelog.i',
2963 hgvfs.append(b'00changelog.i',
2965 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2964 b'\0\0\0\2 dummy changelog to prevent using the old repo '
2966 b'layout')
2965 b'layout')
2967
2966
2968 scmutil.writerequires(hgvfs, requirements)
2967 scmutil.writerequires(hgvfs, requirements)
2969
2968
2970 # Write out file telling readers where to find the shared store.
2969 # Write out file telling readers where to find the shared store.
2971 if 'sharedrepo' in createopts:
2970 if 'sharedrepo' in createopts:
2972 hgvfs.write(b'sharedpath', sharedpath)
2971 hgvfs.write(b'sharedpath', sharedpath)
2973
2972
2974 if createopts.get('shareditems'):
2973 if createopts.get('shareditems'):
2975 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2974 shared = b'\n'.join(sorted(createopts['shareditems'])) + b'\n'
2976 hgvfs.write(b'shared', shared)
2975 hgvfs.write(b'shared', shared)
2977
2976
2978 def poisonrepository(repo):
2977 def poisonrepository(repo):
2979 """Poison a repository instance so it can no longer be used."""
2978 """Poison a repository instance so it can no longer be used."""
2980 # Perform any cleanup on the instance.
2979 # Perform any cleanup on the instance.
2981 repo.close()
2980 repo.close()
2982
2981
2983 # Our strategy is to replace the type of the object with one that
2982 # Our strategy is to replace the type of the object with one that
2984 # has all attribute lookups result in error.
2983 # has all attribute lookups result in error.
2985 #
2984 #
2986 # But we have to allow the close() method because some constructors
2985 # But we have to allow the close() method because some constructors
2987 # of repos call close() on repo references.
2986 # of repos call close() on repo references.
2988 class poisonedrepository(object):
2987 class poisonedrepository(object):
2989 def __getattribute__(self, item):
2988 def __getattribute__(self, item):
2990 if item == r'close':
2989 if item == r'close':
2991 return object.__getattribute__(self, item)
2990 return object.__getattribute__(self, item)
2992
2991
2993 raise error.ProgrammingError('repo instances should not be used '
2992 raise error.ProgrammingError('repo instances should not be used '
2994 'after unshare')
2993 'after unshare')
2995
2994
2996 def close(self):
2995 def close(self):
2997 pass
2996 pass
2998
2997
2999 # We may have a repoview, which intercepts __setattr__. So be sure
2998 # We may have a repoview, which intercepts __setattr__. So be sure
3000 # we operate at the lowest level possible.
2999 # we operate at the lowest level possible.
3001 object.__setattr__(repo, r'__class__', poisonedrepository)
3000 object.__setattr__(repo, r'__class__', poisonedrepository)
@@ -1,580 +1,579 b''
1 $ . "$TESTDIR/histedit-helpers.sh"
1 $ . "$TESTDIR/histedit-helpers.sh"
2
2
3 Enable obsolete
3 Enable obsolete
4
4
5 $ cat >> $HGRCPATH << EOF
5 $ cat >> $HGRCPATH << EOF
6 > [ui]
6 > [ui]
7 > logtemplate= {rev}:{node|short} {desc|firstline}
7 > logtemplate= {rev}:{node|short} {desc|firstline}
8 > [phases]
8 > [phases]
9 > publish=False
9 > publish=False
10 > [experimental]
10 > [experimental]
11 > evolution.createmarkers=True
11 > evolution.createmarkers=True
12 > evolution.allowunstable=True
12 > evolution.allowunstable=True
13 > [extensions]
13 > [extensions]
14 > histedit=
14 > histedit=
15 > rebase=
15 > rebase=
16 > EOF
16 > EOF
17
17
18 Test that histedit learns about obsolescence not stored in histedit state
18 Test that histedit learns about obsolescence not stored in histedit state
19 $ hg init boo
19 $ hg init boo
20 $ cd boo
20 $ cd boo
21 $ echo a > a
21 $ echo a > a
22 $ hg ci -Am a
22 $ hg ci -Am a
23 adding a
23 adding a
24 $ echo a > b
24 $ echo a > b
25 $ echo a > c
25 $ echo a > c
26 $ echo a > c
26 $ echo a > c
27 $ hg ci -Am b
27 $ hg ci -Am b
28 adding b
28 adding b
29 adding c
29 adding c
30 $ echo a > d
30 $ echo a > d
31 $ hg ci -Am c
31 $ hg ci -Am c
32 adding d
32 adding d
33 $ echo "pick `hg log -r 0 -T '{node|short}'`" > plan
33 $ echo "pick `hg log -r 0 -T '{node|short}'`" > plan
34 $ echo "pick `hg log -r 2 -T '{node|short}'`" >> plan
34 $ echo "pick `hg log -r 2 -T '{node|short}'`" >> plan
35 $ echo "edit `hg log -r 1 -T '{node|short}'`" >> plan
35 $ echo "edit `hg log -r 1 -T '{node|short}'`" >> plan
36 $ hg histedit -r 'all()' --commands plan
36 $ hg histedit -r 'all()' --commands plan
37 Editing (1b2d564fad96), you may commit or record as needed now.
37 Editing (1b2d564fad96), you may commit or record as needed now.
38 (hg histedit --continue to resume)
38 (hg histedit --continue to resume)
39 [1]
39 [1]
40 $ hg st
40 $ hg st
41 A b
41 A b
42 A c
42 A c
43 ? plan
43 ? plan
44 $ hg commit --amend b
44 $ hg commit --amend b
45 $ hg histedit --continue
45 $ hg histedit --continue
46 $ hg log -G
46 $ hg log -G
47 @ 5:46abc7c4d873 b
47 @ 5:46abc7c4d873 b
48 |
48 |
49 o 4:49d44ab2be1b c
49 o 4:49d44ab2be1b c
50 |
50 |
51 o 0:cb9a9f314b8b a
51 o 0:cb9a9f314b8b a
52
52
53 $ hg debugobsolete
53 $ hg debugobsolete
54 e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
54 e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
55 1b2d564fad96311b45362f17c2aa855150efb35f 46abc7c4d8738e8563e577f7889e1b6db3da4199 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
55 1b2d564fad96311b45362f17c2aa855150efb35f 46abc7c4d8738e8563e577f7889e1b6db3da4199 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
56 114f4176969ef342759a8a57e6bccefc4234829b 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
56 114f4176969ef342759a8a57e6bccefc4234829b 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
57
57
58 With some node gone missing during the edit.
58 With some node gone missing during the edit.
59
59
60 $ echo "pick `hg log -r 0 -T '{node|short}'`" > plan
60 $ echo "pick `hg log -r 0 -T '{node|short}'`" > plan
61 $ echo "pick `hg log -r 5 -T '{node|short}'`" >> plan
61 $ echo "pick `hg log -r 5 -T '{node|short}'`" >> plan
62 $ echo "edit `hg log -r 4 -T '{node|short}'`" >> plan
62 $ echo "edit `hg log -r 4 -T '{node|short}'`" >> plan
63 $ hg histedit -r 'all()' --commands plan
63 $ hg histedit -r 'all()' --commands plan
64 Editing (49d44ab2be1b), you may commit or record as needed now.
64 Editing (49d44ab2be1b), you may commit or record as needed now.
65 (hg histedit --continue to resume)
65 (hg histedit --continue to resume)
66 [1]
66 [1]
67 $ hg st
67 $ hg st
68 A b
68 A b
69 A d
69 A d
70 ? plan
70 ? plan
71 $ hg commit --amend -X . -m XXXXXX
71 $ hg commit --amend -X . -m XXXXXX
72 $ hg commit --amend -X . -m b2
72 $ hg commit --amend -X . -m b2
73 $ hg --hidden --config extensions.strip= strip 'desc(XXXXXX)' --no-backup
73 $ hg --hidden --config extensions.strip= strip 'desc(XXXXXX)' --no-backup
74 warning: ignoring unknown working parent aba7da937030!
75 $ hg histedit --continue
74 $ hg histedit --continue
76 $ hg log -G
75 $ hg log -G
77 @ 8:273c1f3b8626 c
76 @ 8:273c1f3b8626 c
78 |
77 |
79 o 7:aba7da937030 b2
78 o 7:aba7da937030 b2
80 |
79 |
81 o 0:cb9a9f314b8b a
80 o 0:cb9a9f314b8b a
82
81
83 $ hg debugobsolete
82 $ hg debugobsolete
84 e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
83 e72d22b19f8ecf4150ab4f91d0973fd9955d3ddf 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '8', 'operation': 'amend', 'user': 'test'}
85 1b2d564fad96311b45362f17c2aa855150efb35f 46abc7c4d8738e8563e577f7889e1b6db3da4199 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
84 1b2d564fad96311b45362f17c2aa855150efb35f 46abc7c4d8738e8563e577f7889e1b6db3da4199 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
86 114f4176969ef342759a8a57e6bccefc4234829b 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
85 114f4176969ef342759a8a57e6bccefc4234829b 49d44ab2be1b67a79127568a67c9c99430633b48 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '12', 'operation': 'histedit', 'user': 'test'}
87 76f72745eac0643d16530e56e2f86e36e40631f1 2ca853e48edbd6453a0674dc0fe28a0974c51b9c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
86 76f72745eac0643d16530e56e2f86e36e40631f1 2ca853e48edbd6453a0674dc0fe28a0974c51b9c 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
88 2ca853e48edbd6453a0674dc0fe28a0974c51b9c aba7da93703075eec9fb1dbaf143ff2bc1c49d46 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
87 2ca853e48edbd6453a0674dc0fe28a0974c51b9c aba7da93703075eec9fb1dbaf143ff2bc1c49d46 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '1', 'operation': 'amend', 'user': 'test'}
89 49d44ab2be1b67a79127568a67c9c99430633b48 273c1f3b86267ed3ec684bb13af1fa4d6ba56e02 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
88 49d44ab2be1b67a79127568a67c9c99430633b48 273c1f3b86267ed3ec684bb13af1fa4d6ba56e02 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
90 46abc7c4d8738e8563e577f7889e1b6db3da4199 aba7da93703075eec9fb1dbaf143ff2bc1c49d46 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '5', 'operation': 'histedit', 'user': 'test'}
89 46abc7c4d8738e8563e577f7889e1b6db3da4199 aba7da93703075eec9fb1dbaf143ff2bc1c49d46 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '5', 'operation': 'histedit', 'user': 'test'}
91 $ cd ..
90 $ cd ..
92
91
93 Base setup for the rest of the testing
92 Base setup for the rest of the testing
94 ======================================
93 ======================================
95
94
96 $ hg init base
95 $ hg init base
97 $ cd base
96 $ cd base
98
97
99 $ for x in a b c d e f ; do
98 $ for x in a b c d e f ; do
100 > echo $x > $x
99 > echo $x > $x
101 > hg add $x
100 > hg add $x
102 > hg ci -m $x
101 > hg ci -m $x
103 > done
102 > done
104
103
105 $ hg log --graph
104 $ hg log --graph
106 @ 5:652413bf663e f
105 @ 5:652413bf663e f
107 |
106 |
108 o 4:e860deea161a e
107 o 4:e860deea161a e
109 |
108 |
110 o 3:055a42cdd887 d
109 o 3:055a42cdd887 d
111 |
110 |
112 o 2:177f92b77385 c
111 o 2:177f92b77385 c
113 |
112 |
114 o 1:d2ae7f538514 b
113 o 1:d2ae7f538514 b
115 |
114 |
116 o 0:cb9a9f314b8b a
115 o 0:cb9a9f314b8b a
117
116
118
117
119 $ HGEDITOR=cat hg histedit 1
118 $ HGEDITOR=cat hg histedit 1
120 pick d2ae7f538514 1 b
119 pick d2ae7f538514 1 b
121 pick 177f92b77385 2 c
120 pick 177f92b77385 2 c
122 pick 055a42cdd887 3 d
121 pick 055a42cdd887 3 d
123 pick e860deea161a 4 e
122 pick e860deea161a 4 e
124 pick 652413bf663e 5 f
123 pick 652413bf663e 5 f
125
124
126 # Edit history between d2ae7f538514 and 652413bf663e
125 # Edit history between d2ae7f538514 and 652413bf663e
127 #
126 #
128 # Commits are listed from least to most recent
127 # Commits are listed from least to most recent
129 #
128 #
130 # You can reorder changesets by reordering the lines
129 # You can reorder changesets by reordering the lines
131 #
130 #
132 # Commands:
131 # Commands:
133 #
132 #
134 # e, edit = use commit, but stop for amending
133 # e, edit = use commit, but stop for amending
135 # m, mess = edit commit message without changing commit content
134 # m, mess = edit commit message without changing commit content
136 # p, pick = use commit
135 # p, pick = use commit
137 # b, base = checkout changeset and apply further changesets from there
136 # b, base = checkout changeset and apply further changesets from there
138 # d, drop = remove commit from history
137 # d, drop = remove commit from history
139 # f, fold = use commit, but combine it with the one above
138 # f, fold = use commit, but combine it with the one above
140 # r, roll = like fold, but discard this commit's description and date
139 # r, roll = like fold, but discard this commit's description and date
141 #
140 #
142 $ hg histedit 1 --commands - --verbose <<EOF | grep histedit
141 $ hg histedit 1 --commands - --verbose <<EOF | grep histedit
143 > pick 177f92b77385 2 c
142 > pick 177f92b77385 2 c
144 > drop d2ae7f538514 1 b
143 > drop d2ae7f538514 1 b
145 > pick 055a42cdd887 3 d
144 > pick 055a42cdd887 3 d
146 > fold e860deea161a 4 e
145 > fold e860deea161a 4 e
147 > pick 652413bf663e 5 f
146 > pick 652413bf663e 5 f
148 > EOF
147 > EOF
149 [1]
148 [1]
150 $ hg log --graph --hidden
149 $ hg log --graph --hidden
151 @ 10:cacdfd884a93 f
150 @ 10:cacdfd884a93 f
152 |
151 |
153 o 9:59d9f330561f d
152 o 9:59d9f330561f d
154 |
153 |
155 | x 8:b558abc46d09 fold-temp-revision e860deea161a
154 | x 8:b558abc46d09 fold-temp-revision e860deea161a
156 | |
155 | |
157 | x 7:96e494a2d553 d
156 | x 7:96e494a2d553 d
158 |/
157 |/
159 o 6:b346ab9a313d c
158 o 6:b346ab9a313d c
160 |
159 |
161 | x 5:652413bf663e f
160 | x 5:652413bf663e f
162 | |
161 | |
163 | x 4:e860deea161a e
162 | x 4:e860deea161a e
164 | |
163 | |
165 | x 3:055a42cdd887 d
164 | x 3:055a42cdd887 d
166 | |
165 | |
167 | x 2:177f92b77385 c
166 | x 2:177f92b77385 c
168 | |
167 | |
169 | x 1:d2ae7f538514 b
168 | x 1:d2ae7f538514 b
170 |/
169 |/
171 o 0:cb9a9f314b8b a
170 o 0:cb9a9f314b8b a
172
171
173 $ hg debugobsolete
172 $ hg debugobsolete
174 d2ae7f538514cd87c17547b0de4cea71fe1af9fb 0 {cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
173 d2ae7f538514cd87c17547b0de4cea71fe1af9fb 0 {cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
175 177f92b773850b59254aa5e923436f921b55483b b346ab9a313db8537ecf96fca3ca3ca984ef3bd7 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
174 177f92b773850b59254aa5e923436f921b55483b b346ab9a313db8537ecf96fca3ca3ca984ef3bd7 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
176 055a42cdd88768532f9cf79daa407fc8d138de9b 59d9f330561fd6c88b1a6b32f0e45034d88db784 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'histedit', 'user': 'test'}
175 055a42cdd88768532f9cf79daa407fc8d138de9b 59d9f330561fd6c88b1a6b32f0e45034d88db784 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'histedit', 'user': 'test'}
177 e860deea161a2f77de56603b340ebbb4536308ae 59d9f330561fd6c88b1a6b32f0e45034d88db784 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'histedit', 'user': 'test'}
176 e860deea161a2f77de56603b340ebbb4536308ae 59d9f330561fd6c88b1a6b32f0e45034d88db784 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '13', 'operation': 'histedit', 'user': 'test'}
178 652413bf663ef2a641cab26574e46d5f5a64a55a cacdfd884a9321ec4e1de275ef3949fa953a1f83 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
177 652413bf663ef2a641cab26574e46d5f5a64a55a cacdfd884a9321ec4e1de275ef3949fa953a1f83 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
179 96e494a2d553dd05902ba1cee1d94d4cb7b8faed 0 {b346ab9a313db8537ecf96fca3ca3ca984ef3bd7} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
178 96e494a2d553dd05902ba1cee1d94d4cb7b8faed 0 {b346ab9a313db8537ecf96fca3ca3ca984ef3bd7} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
180 b558abc46d09c30f57ac31e85a8a3d64d2e906e4 0 {96e494a2d553dd05902ba1cee1d94d4cb7b8faed} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
179 b558abc46d09c30f57ac31e85a8a3d64d2e906e4 0 {96e494a2d553dd05902ba1cee1d94d4cb7b8faed} (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '0', 'operation': 'histedit', 'user': 'test'}
181
180
182
181
183 Ensure hidden revision does not prevent histedit
182 Ensure hidden revision does not prevent histedit
184 -------------------------------------------------
183 -------------------------------------------------
185
184
186 create an hidden revision
185 create an hidden revision
187
186
188 $ hg histedit 6 --commands - << EOF
187 $ hg histedit 6 --commands - << EOF
189 > pick b346ab9a313d 6 c
188 > pick b346ab9a313d 6 c
190 > drop 59d9f330561f 7 d
189 > drop 59d9f330561f 7 d
191 > pick cacdfd884a93 8 f
190 > pick cacdfd884a93 8 f
192 > EOF
191 > EOF
193 $ hg log --graph
192 $ hg log --graph
194 @ 11:c13eb81022ca f
193 @ 11:c13eb81022ca f
195 |
194 |
196 o 6:b346ab9a313d c
195 o 6:b346ab9a313d c
197 |
196 |
198 o 0:cb9a9f314b8b a
197 o 0:cb9a9f314b8b a
199
198
200 check hidden revision are ignored (6 have hidden children 7 and 8)
199 check hidden revision are ignored (6 have hidden children 7 and 8)
201
200
202 $ hg histedit 6 --commands - << EOF
201 $ hg histedit 6 --commands - << EOF
203 > pick b346ab9a313d 6 c
202 > pick b346ab9a313d 6 c
204 > pick c13eb81022ca 8 f
203 > pick c13eb81022ca 8 f
205 > EOF
204 > EOF
206
205
207
206
208
207
209 Test that rewriting leaving instability behind is allowed
208 Test that rewriting leaving instability behind is allowed
210 ---------------------------------------------------------------------
209 ---------------------------------------------------------------------
211
210
212 $ hg up '.^'
211 $ hg up '.^'
213 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
212 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
214 $ hg log -r 'children(.)'
213 $ hg log -r 'children(.)'
215 11:c13eb81022ca f (no-eol)
214 11:c13eb81022ca f (no-eol)
216 $ hg histedit -r '.' --commands - <<EOF
215 $ hg histedit -r '.' --commands - <<EOF
217 > edit b346ab9a313d 6 c
216 > edit b346ab9a313d 6 c
218 > EOF
217 > EOF
219 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
218 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
220 adding c
219 adding c
221 Editing (b346ab9a313d), you may commit or record as needed now.
220 Editing (b346ab9a313d), you may commit or record as needed now.
222 (hg histedit --continue to resume)
221 (hg histedit --continue to resume)
223 [1]
222 [1]
224 $ echo c >> c
223 $ echo c >> c
225 $ hg histedit --continue
224 $ hg histedit --continue
226 1 new orphan changesets
225 1 new orphan changesets
227
226
228 $ hg log -r 'orphan()'
227 $ hg log -r 'orphan()'
229 11:c13eb81022ca f (no-eol)
228 11:c13eb81022ca f (no-eol)
230
229
231 stabilise
230 stabilise
232
231
233 $ hg rebase -r 'orphan()' -d .
232 $ hg rebase -r 'orphan()' -d .
234 rebasing 11:c13eb81022ca "f"
233 rebasing 11:c13eb81022ca "f"
235 $ hg up tip -q
234 $ hg up tip -q
236
235
237 Test dropping of changeset on the top of the stack
236 Test dropping of changeset on the top of the stack
238 -------------------------------------------------------
237 -------------------------------------------------------
239
238
240 Nothing is rewritten below, the working directory parent must be change for the
239 Nothing is rewritten below, the working directory parent must be change for the
241 dropped changeset to be hidden.
240 dropped changeset to be hidden.
242
241
243 $ cd ..
242 $ cd ..
244 $ hg clone base droplast
243 $ hg clone base droplast
245 updating to branch default
244 updating to branch default
246 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
245 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 $ cd droplast
246 $ cd droplast
248 $ hg histedit -r '40db8afa467b' --commands - << EOF
247 $ hg histedit -r '40db8afa467b' --commands - << EOF
249 > pick 40db8afa467b 10 c
248 > pick 40db8afa467b 10 c
250 > drop b449568bf7fc 11 f
249 > drop b449568bf7fc 11 f
251 > EOF
250 > EOF
252 $ hg log -G
251 $ hg log -G
253 @ 12:40db8afa467b c
252 @ 12:40db8afa467b c
254 |
253 |
255 o 0:cb9a9f314b8b a
254 o 0:cb9a9f314b8b a
256
255
257
256
258 With rewritten ancestors
257 With rewritten ancestors
259
258
260 $ echo e > e
259 $ echo e > e
261 $ hg add e
260 $ hg add e
262 $ hg commit -m g
261 $ hg commit -m g
263 $ echo f > f
262 $ echo f > f
264 $ hg add f
263 $ hg add f
265 $ hg commit -m h
264 $ hg commit -m h
266 $ hg histedit -r '40db8afa467b' --commands - << EOF
265 $ hg histedit -r '40db8afa467b' --commands - << EOF
267 > pick 47a8561c0449 12 g
266 > pick 47a8561c0449 12 g
268 > pick 40db8afa467b 10 c
267 > pick 40db8afa467b 10 c
269 > drop 1b3b05f35ff0 13 h
268 > drop 1b3b05f35ff0 13 h
270 > EOF
269 > EOF
271 $ hg log -G
270 $ hg log -G
272 @ 17:ee6544123ab8 c
271 @ 17:ee6544123ab8 c
273 |
272 |
274 o 16:269e713e9eae g
273 o 16:269e713e9eae g
275 |
274 |
276 o 0:cb9a9f314b8b a
275 o 0:cb9a9f314b8b a
277
276
278 $ cd ../base
277 $ cd ../base
279
278
280
279
281
280
282 Test phases support
281 Test phases support
283 ===========================================
282 ===========================================
284
283
285 Check that histedit respect immutability
284 Check that histedit respect immutability
286 -------------------------------------------
285 -------------------------------------------
287
286
288 $ cat >> $HGRCPATH << EOF
287 $ cat >> $HGRCPATH << EOF
289 > [ui]
288 > [ui]
290 > logtemplate= {rev}:{node|short} ({phase}) {desc|firstline}\n
289 > logtemplate= {rev}:{node|short} ({phase}) {desc|firstline}\n
291 > EOF
290 > EOF
292
291
293 $ hg ph -pv '.^'
292 $ hg ph -pv '.^'
294 phase changed for 2 changesets
293 phase changed for 2 changesets
295 $ hg log -G
294 $ hg log -G
296 @ 13:b449568bf7fc (draft) f
295 @ 13:b449568bf7fc (draft) f
297 |
296 |
298 o 12:40db8afa467b (public) c
297 o 12:40db8afa467b (public) c
299 |
298 |
300 o 0:cb9a9f314b8b (public) a
299 o 0:cb9a9f314b8b (public) a
301
300
302 $ hg histedit -r '.~2'
301 $ hg histedit -r '.~2'
303 abort: cannot edit public changeset: cb9a9f314b8b
302 abort: cannot edit public changeset: cb9a9f314b8b
304 (see 'hg help phases' for details)
303 (see 'hg help phases' for details)
305 [255]
304 [255]
306
305
307
306
308 Prepare further testing
307 Prepare further testing
309 -------------------------------------------
308 -------------------------------------------
310
309
311 $ for x in g h i j k ; do
310 $ for x in g h i j k ; do
312 > echo $x > $x
311 > echo $x > $x
313 > hg add $x
312 > hg add $x
314 > hg ci -m $x
313 > hg ci -m $x
315 > done
314 > done
316 $ hg phase --force --secret .~2
315 $ hg phase --force --secret .~2
317 $ hg log -G
316 $ hg log -G
318 @ 18:ee118ab9fa44 (secret) k
317 @ 18:ee118ab9fa44 (secret) k
319 |
318 |
320 o 17:3a6c53ee7f3d (secret) j
319 o 17:3a6c53ee7f3d (secret) j
321 |
320 |
322 o 16:b605fb7503f2 (secret) i
321 o 16:b605fb7503f2 (secret) i
323 |
322 |
324 o 15:7395e1ff83bd (draft) h
323 o 15:7395e1ff83bd (draft) h
325 |
324 |
326 o 14:6b70183d2492 (draft) g
325 o 14:6b70183d2492 (draft) g
327 |
326 |
328 o 13:b449568bf7fc (draft) f
327 o 13:b449568bf7fc (draft) f
329 |
328 |
330 o 12:40db8afa467b (public) c
329 o 12:40db8afa467b (public) c
331 |
330 |
332 o 0:cb9a9f314b8b (public) a
331 o 0:cb9a9f314b8b (public) a
333
332
334 $ cd ..
333 $ cd ..
335
334
336 simple phase conservation
335 simple phase conservation
337 -------------------------------------------
336 -------------------------------------------
338
337
339 Resulting changeset should conserve the phase of the original one whatever the
338 Resulting changeset should conserve the phase of the original one whatever the
340 phases.new-commit option is.
339 phases.new-commit option is.
341
340
342 New-commit as draft (default)
341 New-commit as draft (default)
343
342
344 $ cp -R base simple-draft
343 $ cp -R base simple-draft
345 $ cd simple-draft
344 $ cd simple-draft
346 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
345 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
347 > edit b449568bf7fc 11 f
346 > edit b449568bf7fc 11 f
348 > pick 6b70183d2492 12 g
347 > pick 6b70183d2492 12 g
349 > pick 7395e1ff83bd 13 h
348 > pick 7395e1ff83bd 13 h
350 > pick b605fb7503f2 14 i
349 > pick b605fb7503f2 14 i
351 > pick 3a6c53ee7f3d 15 j
350 > pick 3a6c53ee7f3d 15 j
352 > pick ee118ab9fa44 16 k
351 > pick ee118ab9fa44 16 k
353 > EOF
352 > EOF
354 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
353 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
355 adding f
354 adding f
356 Editing (b449568bf7fc), you may commit or record as needed now.
355 Editing (b449568bf7fc), you may commit or record as needed now.
357 (hg histedit --continue to resume)
356 (hg histedit --continue to resume)
358 [1]
357 [1]
359 $ echo f >> f
358 $ echo f >> f
360 $ hg histedit --continue
359 $ hg histedit --continue
361 $ hg log -G
360 $ hg log -G
362 @ 24:12e89af74238 (secret) k
361 @ 24:12e89af74238 (secret) k
363 |
362 |
364 o 23:636a8687b22e (secret) j
363 o 23:636a8687b22e (secret) j
365 |
364 |
366 o 22:ccaf0a38653f (secret) i
365 o 22:ccaf0a38653f (secret) i
367 |
366 |
368 o 21:11a89d1c2613 (draft) h
367 o 21:11a89d1c2613 (draft) h
369 |
368 |
370 o 20:c1dec7ca82ea (draft) g
369 o 20:c1dec7ca82ea (draft) g
371 |
370 |
372 o 19:087281e68428 (draft) f
371 o 19:087281e68428 (draft) f
373 |
372 |
374 o 12:40db8afa467b (public) c
373 o 12:40db8afa467b (public) c
375 |
374 |
376 o 0:cb9a9f314b8b (public) a
375 o 0:cb9a9f314b8b (public) a
377
376
378 $ cd ..
377 $ cd ..
379
378
380
379
381 New-commit as secret (config)
380 New-commit as secret (config)
382
381
383 $ cp -R base simple-secret
382 $ cp -R base simple-secret
384 $ cd simple-secret
383 $ cd simple-secret
385 $ cat >> .hg/hgrc << EOF
384 $ cat >> .hg/hgrc << EOF
386 > [phases]
385 > [phases]
387 > new-commit=secret
386 > new-commit=secret
388 > EOF
387 > EOF
389 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
388 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
390 > edit b449568bf7fc 11 f
389 > edit b449568bf7fc 11 f
391 > pick 6b70183d2492 12 g
390 > pick 6b70183d2492 12 g
392 > pick 7395e1ff83bd 13 h
391 > pick 7395e1ff83bd 13 h
393 > pick b605fb7503f2 14 i
392 > pick b605fb7503f2 14 i
394 > pick 3a6c53ee7f3d 15 j
393 > pick 3a6c53ee7f3d 15 j
395 > pick ee118ab9fa44 16 k
394 > pick ee118ab9fa44 16 k
396 > EOF
395 > EOF
397 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
396 0 files updated, 0 files merged, 6 files removed, 0 files unresolved
398 adding f
397 adding f
399 Editing (b449568bf7fc), you may commit or record as needed now.
398 Editing (b449568bf7fc), you may commit or record as needed now.
400 (hg histedit --continue to resume)
399 (hg histedit --continue to resume)
401 [1]
400 [1]
402 $ echo f >> f
401 $ echo f >> f
403 $ hg histedit --continue
402 $ hg histedit --continue
404 $ hg log -G
403 $ hg log -G
405 @ 24:12e89af74238 (secret) k
404 @ 24:12e89af74238 (secret) k
406 |
405 |
407 o 23:636a8687b22e (secret) j
406 o 23:636a8687b22e (secret) j
408 |
407 |
409 o 22:ccaf0a38653f (secret) i
408 o 22:ccaf0a38653f (secret) i
410 |
409 |
411 o 21:11a89d1c2613 (draft) h
410 o 21:11a89d1c2613 (draft) h
412 |
411 |
413 o 20:c1dec7ca82ea (draft) g
412 o 20:c1dec7ca82ea (draft) g
414 |
413 |
415 o 19:087281e68428 (draft) f
414 o 19:087281e68428 (draft) f
416 |
415 |
417 o 12:40db8afa467b (public) c
416 o 12:40db8afa467b (public) c
418 |
417 |
419 o 0:cb9a9f314b8b (public) a
418 o 0:cb9a9f314b8b (public) a
420
419
421 $ cd ..
420 $ cd ..
422
421
423
422
424 Changeset reordering
423 Changeset reordering
425 -------------------------------------------
424 -------------------------------------------
426
425
427 If a secret changeset is put before a draft one, all descendant should be secret.
426 If a secret changeset is put before a draft one, all descendant should be secret.
428 It seems more important to present the secret phase.
427 It seems more important to present the secret phase.
429
428
430 $ cp -R base reorder
429 $ cp -R base reorder
431 $ cd reorder
430 $ cd reorder
432 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
431 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
433 > pick b449568bf7fc 11 f
432 > pick b449568bf7fc 11 f
434 > pick 3a6c53ee7f3d 15 j
433 > pick 3a6c53ee7f3d 15 j
435 > pick 6b70183d2492 12 g
434 > pick 6b70183d2492 12 g
436 > pick b605fb7503f2 14 i
435 > pick b605fb7503f2 14 i
437 > pick 7395e1ff83bd 13 h
436 > pick 7395e1ff83bd 13 h
438 > pick ee118ab9fa44 16 k
437 > pick ee118ab9fa44 16 k
439 > EOF
438 > EOF
440 $ hg log -G
439 $ hg log -G
441 @ 23:558246857888 (secret) k
440 @ 23:558246857888 (secret) k
442 |
441 |
443 o 22:28bd44768535 (secret) h
442 o 22:28bd44768535 (secret) h
444 |
443 |
445 o 21:d5395202aeb9 (secret) i
444 o 21:d5395202aeb9 (secret) i
446 |
445 |
447 o 20:21edda8e341b (secret) g
446 o 20:21edda8e341b (secret) g
448 |
447 |
449 o 19:5ab64f3a4832 (secret) j
448 o 19:5ab64f3a4832 (secret) j
450 |
449 |
451 o 13:b449568bf7fc (draft) f
450 o 13:b449568bf7fc (draft) f
452 |
451 |
453 o 12:40db8afa467b (public) c
452 o 12:40db8afa467b (public) c
454 |
453 |
455 o 0:cb9a9f314b8b (public) a
454 o 0:cb9a9f314b8b (public) a
456
455
457 $ cd ..
456 $ cd ..
458
457
459 Changeset folding
458 Changeset folding
460 -------------------------------------------
459 -------------------------------------------
461
460
462 Folding a secret changeset with a draft one turn the result secret (again,
461 Folding a secret changeset with a draft one turn the result secret (again,
463 better safe than sorry). Folding between same phase changeset still works
462 better safe than sorry). Folding between same phase changeset still works
464
463
465 Note that there is a few reordering in this series for more extensive test
464 Note that there is a few reordering in this series for more extensive test
466
465
467 $ cp -R base folding
466 $ cp -R base folding
468 $ cd folding
467 $ cd folding
469 $ cat >> .hg/hgrc << EOF
468 $ cat >> .hg/hgrc << EOF
470 > [phases]
469 > [phases]
471 > new-commit=secret
470 > new-commit=secret
472 > EOF
471 > EOF
473 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
472 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
474 > pick 7395e1ff83bd 13 h
473 > pick 7395e1ff83bd 13 h
475 > fold b449568bf7fc 11 f
474 > fold b449568bf7fc 11 f
476 > pick 6b70183d2492 12 g
475 > pick 6b70183d2492 12 g
477 > fold 3a6c53ee7f3d 15 j
476 > fold 3a6c53ee7f3d 15 j
478 > pick b605fb7503f2 14 i
477 > pick b605fb7503f2 14 i
479 > fold ee118ab9fa44 16 k
478 > fold ee118ab9fa44 16 k
480 > EOF
479 > EOF
481 $ hg log -G
480 $ hg log -G
482 @ 27:f9daec13fb98 (secret) i
481 @ 27:f9daec13fb98 (secret) i
483 |
482 |
484 o 24:49807617f46a (secret) g
483 o 24:49807617f46a (secret) g
485 |
484 |
486 o 21:050280826e04 (draft) h
485 o 21:050280826e04 (draft) h
487 |
486 |
488 o 12:40db8afa467b (public) c
487 o 12:40db8afa467b (public) c
489 |
488 |
490 o 0:cb9a9f314b8b (public) a
489 o 0:cb9a9f314b8b (public) a
491
490
492 $ hg co 49807617f46a
491 $ hg co 49807617f46a
493 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
492 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
494 $ echo wat >> wat
493 $ echo wat >> wat
495 $ hg add wat
494 $ hg add wat
496 $ hg ci -m 'add wat'
495 $ hg ci -m 'add wat'
497 created new head
496 created new head
498 $ hg merge f9daec13fb98
497 $ hg merge f9daec13fb98
499 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
498 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
500 (branch merge, don't forget to commit)
499 (branch merge, don't forget to commit)
501 $ hg ci -m 'merge'
500 $ hg ci -m 'merge'
502 $ echo not wat > wat
501 $ echo not wat > wat
503 $ hg ci -m 'modify wat'
502 $ hg ci -m 'modify wat'
504 $ hg histedit 050280826e04
503 $ hg histedit 050280826e04
505 abort: cannot edit history that contains merges
504 abort: cannot edit history that contains merges
506 [255]
505 [255]
507 $ cd ..
506 $ cd ..
508
507
509 Check abort behavior
508 Check abort behavior
510 -------------------------------------------
509 -------------------------------------------
511
510
512 We checks that abort properly clean the repository so the same histedit can be
511 We checks that abort properly clean the repository so the same histedit can be
513 attempted later.
512 attempted later.
514
513
515 $ cp -R base abort
514 $ cp -R base abort
516 $ cd abort
515 $ cd abort
517 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
516 $ hg histedit -r 'b449568bf7fc' --commands - << EOF
518 > pick b449568bf7fc 13 f
517 > pick b449568bf7fc 13 f
519 > pick 7395e1ff83bd 15 h
518 > pick 7395e1ff83bd 15 h
520 > pick 6b70183d2492 14 g
519 > pick 6b70183d2492 14 g
521 > pick b605fb7503f2 16 i
520 > pick b605fb7503f2 16 i
522 > roll 3a6c53ee7f3d 17 j
521 > roll 3a6c53ee7f3d 17 j
523 > edit ee118ab9fa44 18 k
522 > edit ee118ab9fa44 18 k
524 > EOF
523 > EOF
525 Editing (ee118ab9fa44), you may commit or record as needed now.
524 Editing (ee118ab9fa44), you may commit or record as needed now.
526 (hg histedit --continue to resume)
525 (hg histedit --continue to resume)
527 [1]
526 [1]
528
527
529 $ hg histedit --abort
528 $ hg histedit --abort
530 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
529 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
531 saved backup bundle to $TESTTMP/abort/.hg/strip-backup/4dc06258baa6-dff4ef05-backup.hg
530 saved backup bundle to $TESTTMP/abort/.hg/strip-backup/4dc06258baa6-dff4ef05-backup.hg
532
531
533 $ hg log -G
532 $ hg log -G
534 @ 18:ee118ab9fa44 (secret) k
533 @ 18:ee118ab9fa44 (secret) k
535 |
534 |
536 o 17:3a6c53ee7f3d (secret) j
535 o 17:3a6c53ee7f3d (secret) j
537 |
536 |
538 o 16:b605fb7503f2 (secret) i
537 o 16:b605fb7503f2 (secret) i
539 |
538 |
540 o 15:7395e1ff83bd (draft) h
539 o 15:7395e1ff83bd (draft) h
541 |
540 |
542 o 14:6b70183d2492 (draft) g
541 o 14:6b70183d2492 (draft) g
543 |
542 |
544 o 13:b449568bf7fc (draft) f
543 o 13:b449568bf7fc (draft) f
545 |
544 |
546 o 12:40db8afa467b (public) c
545 o 12:40db8afa467b (public) c
547 |
546 |
548 o 0:cb9a9f314b8b (public) a
547 o 0:cb9a9f314b8b (public) a
549
548
550 $ hg histedit -r 'b449568bf7fc' --commands - << EOF --config experimental.evolution.track-operation=1
549 $ hg histedit -r 'b449568bf7fc' --commands - << EOF --config experimental.evolution.track-operation=1
551 > pick b449568bf7fc 13 f
550 > pick b449568bf7fc 13 f
552 > pick 7395e1ff83bd 15 h
551 > pick 7395e1ff83bd 15 h
553 > pick 6b70183d2492 14 g
552 > pick 6b70183d2492 14 g
554 > pick b605fb7503f2 16 i
553 > pick b605fb7503f2 16 i
555 > pick 3a6c53ee7f3d 17 j
554 > pick 3a6c53ee7f3d 17 j
556 > edit ee118ab9fa44 18 k
555 > edit ee118ab9fa44 18 k
557 > EOF
556 > EOF
558 Editing (ee118ab9fa44), you may commit or record as needed now.
557 Editing (ee118ab9fa44), you may commit or record as needed now.
559 (hg histedit --continue to resume)
558 (hg histedit --continue to resume)
560 [1]
559 [1]
561 $ hg histedit --continue --config experimental.evolution.track-operation=1
560 $ hg histedit --continue --config experimental.evolution.track-operation=1
562 $ hg log -G
561 $ hg log -G
563 @ 23:175d6b286a22 (secret) k
562 @ 23:175d6b286a22 (secret) k
564 |
563 |
565 o 22:44ca09d59ae4 (secret) j
564 o 22:44ca09d59ae4 (secret) j
566 |
565 |
567 o 21:31747692a644 (secret) i
566 o 21:31747692a644 (secret) i
568 |
567 |
569 o 20:9985cd4f21fa (draft) g
568 o 20:9985cd4f21fa (draft) g
570 |
569 |
571 o 19:4dc06258baa6 (draft) h
570 o 19:4dc06258baa6 (draft) h
572 |
571 |
573 o 13:b449568bf7fc (draft) f
572 o 13:b449568bf7fc (draft) f
574 |
573 |
575 o 12:40db8afa467b (public) c
574 o 12:40db8afa467b (public) c
576 |
575 |
577 o 0:cb9a9f314b8b (public) a
576 o 0:cb9a9f314b8b (public) a
578
577
579 $ hg debugobsolete --rev .
578 $ hg debugobsolete --rev .
580 ee118ab9fa44ebb86be85996548b5517a39e5093 175d6b286a224c23f192e79a581ce83131a53fa2 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
579 ee118ab9fa44ebb86be85996548b5517a39e5093 175d6b286a224c23f192e79a581ce83131a53fa2 0 (Thu Jan 01 00:00:00 1970 +0000) {'ef1': '4', 'operation': 'histedit', 'user': 'test'}
@@ -1,204 +1,203 b''
1 =====================
1 =====================
2 Test workflow options
2 Test workflow options
3 =====================
3 =====================
4
4
5 $ . "$TESTDIR/testlib/obsmarker-common.sh"
5 $ . "$TESTDIR/testlib/obsmarker-common.sh"
6
6
7 Test single head enforcing - Setup
7 Test single head enforcing - Setup
8 =============================================
8 =============================================
9
9
10 $ cat << EOF >> $HGRCPATH
10 $ cat << EOF >> $HGRCPATH
11 > [experimental]
11 > [experimental]
12 > evolution = all
12 > evolution = all
13 > EOF
13 > EOF
14 $ hg init single-head-server
14 $ hg init single-head-server
15 $ cd single-head-server
15 $ cd single-head-server
16 $ cat <<EOF >> .hg/hgrc
16 $ cat <<EOF >> .hg/hgrc
17 > [phases]
17 > [phases]
18 > publish = no
18 > publish = no
19 > [experimental]
19 > [experimental]
20 > single-head-per-branch = yes
20 > single-head-per-branch = yes
21 > EOF
21 > EOF
22 $ mkcommit ROOT
22 $ mkcommit ROOT
23 $ mkcommit c_dA0
23 $ mkcommit c_dA0
24 $ cd ..
24 $ cd ..
25
25
26 $ hg clone single-head-server client
26 $ hg clone single-head-server client
27 updating to branch default
27 updating to branch default
28 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
29
29
30 Test single head enforcing - with branch only
30 Test single head enforcing - with branch only
31 ---------------------------------------------
31 ---------------------------------------------
32
32
33 $ cd client
33 $ cd client
34
34
35 continuing the current defaultbranch
35 continuing the current defaultbranch
36
36
37 $ mkcommit c_dB0
37 $ mkcommit c_dB0
38 $ hg push
38 $ hg push
39 pushing to $TESTTMP/single-head-server
39 pushing to $TESTTMP/single-head-server
40 searching for changes
40 searching for changes
41 adding changesets
41 adding changesets
42 adding manifests
42 adding manifests
43 adding file changes
43 adding file changes
44 added 1 changesets with 1 changes to 1 files
44 added 1 changesets with 1 changes to 1 files
45
45
46 creating a new branch
46 creating a new branch
47
47
48 $ hg up 'desc("ROOT")'
48 $ hg up 'desc("ROOT")'
49 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
49 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
50 $ hg branch branch_A
50 $ hg branch branch_A
51 marked working directory as branch branch_A
51 marked working directory as branch branch_A
52 (branches are permanent and global, did you want a bookmark?)
52 (branches are permanent and global, did you want a bookmark?)
53 $ mkcommit c_aC0
53 $ mkcommit c_aC0
54 $ hg push --new-branch
54 $ hg push --new-branch
55 pushing to $TESTTMP/single-head-server
55 pushing to $TESTTMP/single-head-server
56 searching for changes
56 searching for changes
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 1 changesets with 1 changes to 1 files (+1 heads)
60 added 1 changesets with 1 changes to 1 files (+1 heads)
61
61
62 Create a new head on the default branch
62 Create a new head on the default branch
63
63
64 $ hg up 'desc("c_dA0")'
64 $ hg up 'desc("c_dA0")'
65 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
65 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
66 $ mkcommit c_dD0
66 $ mkcommit c_dD0
67 created new head
67 created new head
68 $ hg push -f
68 $ hg push -f
69 pushing to $TESTTMP/single-head-server
69 pushing to $TESTTMP/single-head-server
70 searching for changes
70 searching for changes
71 adding changesets
71 adding changesets
72 adding manifests
72 adding manifests
73 adding file changes
73 adding file changes
74 added 1 changesets with 1 changes to 1 files (+1 heads)
74 added 1 changesets with 1 changes to 1 files (+1 heads)
75 transaction abort!
75 transaction abort!
76 rollback completed
76 rollback completed
77 abort: rejecting multiple heads on branch "default"
77 abort: rejecting multiple heads on branch "default"
78 (2 heads: 286d02a6e2a2 9bf953aa81f6)
78 (2 heads: 286d02a6e2a2 9bf953aa81f6)
79 [255]
79 [255]
80
80
81 remerge them
81 remerge them
82
82
83 $ hg merge
83 $ hg merge
84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
85 (branch merge, don't forget to commit)
85 (branch merge, don't forget to commit)
86 $ mkcommit c_dE0
86 $ mkcommit c_dE0
87 $ hg push
87 $ hg push
88 pushing to $TESTTMP/single-head-server
88 pushing to $TESTTMP/single-head-server
89 searching for changes
89 searching for changes
90 adding changesets
90 adding changesets
91 adding manifests
91 adding manifests
92 adding file changes
92 adding file changes
93 added 2 changesets with 2 changes to 2 files
93 added 2 changesets with 2 changes to 2 files
94
94
95 Test single head enforcing - after rewrite
95 Test single head enforcing - after rewrite
96 ------------------------------------------
96 ------------------------------------------
97
97
98 $ mkcommit c_dF0
98 $ mkcommit c_dF0
99 $ hg push
99 $ hg push
100 pushing to $TESTTMP/single-head-server
100 pushing to $TESTTMP/single-head-server
101 searching for changes
101 searching for changes
102 adding changesets
102 adding changesets
103 adding manifests
103 adding manifests
104 adding file changes
104 adding file changes
105 added 1 changesets with 1 changes to 1 files
105 added 1 changesets with 1 changes to 1 files
106 $ hg commit --amend -m c_dF1
106 $ hg commit --amend -m c_dF1
107 $ hg push
107 $ hg push
108 pushing to $TESTTMP/single-head-server
108 pushing to $TESTTMP/single-head-server
109 searching for changes
109 searching for changes
110 adding changesets
110 adding changesets
111 adding manifests
111 adding manifests
112 adding file changes
112 adding file changes
113 added 1 changesets with 0 changes to 1 files (+1 heads)
113 added 1 changesets with 0 changes to 1 files (+1 heads)
114 1 new obsolescence markers
114 1 new obsolescence markers
115 obsoleted 1 changesets
115 obsoleted 1 changesets
116
116
117 Check it does to interfer with strip
117 Check it does to interfer with strip
118 ------------------------------------
118 ------------------------------------
119
119
120 setup
120 setup
121
121
122 $ hg branch branch_A --force
122 $ hg branch branch_A --force
123 marked working directory as branch branch_A
123 marked working directory as branch branch_A
124 $ mkcommit c_aG0
124 $ mkcommit c_aG0
125 created new head
125 created new head
126 $ hg update 'desc("c_dF1")'
126 $ hg update 'desc("c_dF1")'
127 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
127 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
128 $ mkcommit c_dH0
128 $ mkcommit c_dH0
129 $ hg update 'desc("c_aG0")'
129 $ hg update 'desc("c_aG0")'
130 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
130 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
131 $ hg merge
131 $ hg merge
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
132 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
133 (branch merge, don't forget to commit)
133 (branch merge, don't forget to commit)
134 $ mkcommit c_aI0
134 $ mkcommit c_aI0
135 $ hg log -G
135 $ hg log -G
136 @ changeset: 10:49003e504178
136 @ changeset: 10:49003e504178
137 |\ branch: branch_A
137 |\ branch: branch_A
138 | | tag: tip
138 | | tag: tip
139 | | parent: 8:a33fb808fb4b
139 | | parent: 8:a33fb808fb4b
140 | | parent: 3:840af1c6bc88
140 | | parent: 3:840af1c6bc88
141 | | user: test
141 | | user: test
142 | | date: Thu Jan 01 00:00:00 1970 +0000
142 | | date: Thu Jan 01 00:00:00 1970 +0000
143 | | summary: c_aI0
143 | | summary: c_aI0
144 | |
144 | |
145 | | o changeset: 9:fe47ea669cea
145 | | o changeset: 9:fe47ea669cea
146 | | | parent: 7:99a2dc242c5d
146 | | | parent: 7:99a2dc242c5d
147 | | | user: test
147 | | | user: test
148 | | | date: Thu Jan 01 00:00:00 1970 +0000
148 | | | date: Thu Jan 01 00:00:00 1970 +0000
149 | | | summary: c_dH0
149 | | | summary: c_dH0
150 | | |
150 | | |
151 | o | changeset: 8:a33fb808fb4b
151 | o | changeset: 8:a33fb808fb4b
152 | |/ branch: branch_A
152 | |/ branch: branch_A
153 | | user: test
153 | | user: test
154 | | date: Thu Jan 01 00:00:00 1970 +0000
154 | | date: Thu Jan 01 00:00:00 1970 +0000
155 | | summary: c_aG0
155 | | summary: c_aG0
156 | |
156 | |
157 | o changeset: 7:99a2dc242c5d
157 | o changeset: 7:99a2dc242c5d
158 | | parent: 5:6ed1df20edb1
158 | | parent: 5:6ed1df20edb1
159 | | user: test
159 | | user: test
160 | | date: Thu Jan 01 00:00:00 1970 +0000
160 | | date: Thu Jan 01 00:00:00 1970 +0000
161 | | summary: c_dF1
161 | | summary: c_dF1
162 | |
162 | |
163 | o changeset: 5:6ed1df20edb1
163 | o changeset: 5:6ed1df20edb1
164 | |\ parent: 4:9bf953aa81f6
164 | |\ parent: 4:9bf953aa81f6
165 | | | parent: 2:286d02a6e2a2
165 | | | parent: 2:286d02a6e2a2
166 | | | user: test
166 | | | user: test
167 | | | date: Thu Jan 01 00:00:00 1970 +0000
167 | | | date: Thu Jan 01 00:00:00 1970 +0000
168 | | | summary: c_dE0
168 | | | summary: c_dE0
169 | | |
169 | | |
170 | | o changeset: 4:9bf953aa81f6
170 | | o changeset: 4:9bf953aa81f6
171 | | | parent: 1:134bc3852ad2
171 | | | parent: 1:134bc3852ad2
172 | | | user: test
172 | | | user: test
173 | | | date: Thu Jan 01 00:00:00 1970 +0000
173 | | | date: Thu Jan 01 00:00:00 1970 +0000
174 | | | summary: c_dD0
174 | | | summary: c_dD0
175 | | |
175 | | |
176 o | | changeset: 3:840af1c6bc88
176 o | | changeset: 3:840af1c6bc88
177 | | | branch: branch_A
177 | | | branch: branch_A
178 | | | parent: 0:ea207398892e
178 | | | parent: 0:ea207398892e
179 | | | user: test
179 | | | user: test
180 | | | date: Thu Jan 01 00:00:00 1970 +0000
180 | | | date: Thu Jan 01 00:00:00 1970 +0000
181 | | | summary: c_aC0
181 | | | summary: c_aC0
182 | | |
182 | | |
183 | o | changeset: 2:286d02a6e2a2
183 | o | changeset: 2:286d02a6e2a2
184 | |/ user: test
184 | |/ user: test
185 | | date: Thu Jan 01 00:00:00 1970 +0000
185 | | date: Thu Jan 01 00:00:00 1970 +0000
186 | | summary: c_dB0
186 | | summary: c_dB0
187 | |
187 | |
188 | o changeset: 1:134bc3852ad2
188 | o changeset: 1:134bc3852ad2
189 |/ user: test
189 |/ user: test
190 | date: Thu Jan 01 00:00:00 1970 +0000
190 | date: Thu Jan 01 00:00:00 1970 +0000
191 | summary: c_dA0
191 | summary: c_dA0
192 |
192 |
193 o changeset: 0:ea207398892e
193 o changeset: 0:ea207398892e
194 user: test
194 user: test
195 date: Thu Jan 01 00:00:00 1970 +0000
195 date: Thu Jan 01 00:00:00 1970 +0000
196 summary: ROOT
196 summary: ROOT
197
197
198
198
199 actual stripping
199 actual stripping
200
200
201 $ hg strip --config extensions.strip= --rev 'desc("c_dH0")'
201 $ hg strip --config extensions.strip= --rev 'desc("c_dH0")'
202 saved backup bundle to $TESTTMP/client/.hg/strip-backup/fe47ea669cea-a41bf5a9-backup.hg
202 saved backup bundle to $TESTTMP/client/.hg/strip-backup/fe47ea669cea-a41bf5a9-backup.hg
203 warning: ignoring unknown working parent 49003e504178!
204
203
General Comments 0
You need to be logged in to leave comments. Login now