##// END OF EJS Templates
statichttprepo: prevent loading dirstate over HTTP on node lookup (issue5717)...
Yuya Nishihara -
r34927:f7e4d6c2 stable
parent child Browse files
Show More
@@ -1,2602 +1,2604 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import re
13 import re
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullid,
22 nullid,
23 nullrev,
23 nullrev,
24 short,
24 short,
25 wdirid,
25 wdirid,
26 wdirnodes,
26 wdirnodes,
27 wdirrev,
27 wdirrev,
28 )
28 )
29 from .thirdparty import (
29 from .thirdparty import (
30 attr,
30 attr,
31 )
31 )
32 from . import (
32 from . import (
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 mdiff,
37 mdiff,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 patch,
39 patch,
40 pathutil,
40 pathutil,
41 phases,
41 phases,
42 pycompat,
42 pycompat,
43 repoview,
43 repoview,
44 revlog,
44 revlog,
45 scmutil,
45 scmutil,
46 sparse,
46 sparse,
47 subrepo,
47 subrepo,
48 util,
48 util,
49 )
49 )
50
50
51 propertycache = util.propertycache
51 propertycache = util.propertycache
52
52
53 nonascii = re.compile(r'[^\x21-\x7f]').search
53 nonascii = re.compile(r'[^\x21-\x7f]').search
54
54
55 class basectx(object):
55 class basectx(object):
56 """A basectx object represents the common logic for its children:
56 """A basectx object represents the common logic for its children:
57 changectx: read-only context that is already present in the repo,
57 changectx: read-only context that is already present in the repo,
58 workingctx: a context that represents the working directory and can
58 workingctx: a context that represents the working directory and can
59 be committed,
59 be committed,
60 memctx: a context that represents changes in-memory and can also
60 memctx: a context that represents changes in-memory and can also
61 be committed."""
61 be committed."""
62 def __new__(cls, repo, changeid='', *args, **kwargs):
62 def __new__(cls, repo, changeid='', *args, **kwargs):
63 if isinstance(changeid, basectx):
63 if isinstance(changeid, basectx):
64 return changeid
64 return changeid
65
65
66 o = super(basectx, cls).__new__(cls)
66 o = super(basectx, cls).__new__(cls)
67
67
68 o._repo = repo
68 o._repo = repo
69 o._rev = nullrev
69 o._rev = nullrev
70 o._node = nullid
70 o._node = nullid
71
71
72 return o
72 return o
73
73
74 def __bytes__(self):
74 def __bytes__(self):
75 return short(self.node())
75 return short(self.node())
76
76
77 __str__ = encoding.strmethod(__bytes__)
77 __str__ = encoding.strmethod(__bytes__)
78
78
79 def __int__(self):
79 def __int__(self):
80 return self.rev()
80 return self.rev()
81
81
82 def __repr__(self):
82 def __repr__(self):
83 return r"<%s %s>" % (type(self).__name__, str(self))
83 return r"<%s %s>" % (type(self).__name__, str(self))
84
84
85 def __eq__(self, other):
85 def __eq__(self, other):
86 try:
86 try:
87 return type(self) == type(other) and self._rev == other._rev
87 return type(self) == type(other) and self._rev == other._rev
88 except AttributeError:
88 except AttributeError:
89 return False
89 return False
90
90
91 def __ne__(self, other):
91 def __ne__(self, other):
92 return not (self == other)
92 return not (self == other)
93
93
94 def __contains__(self, key):
94 def __contains__(self, key):
95 return key in self._manifest
95 return key in self._manifest
96
96
97 def __getitem__(self, key):
97 def __getitem__(self, key):
98 return self.filectx(key)
98 return self.filectx(key)
99
99
100 def __iter__(self):
100 def __iter__(self):
101 return iter(self._manifest)
101 return iter(self._manifest)
102
102
103 def _buildstatusmanifest(self, status):
103 def _buildstatusmanifest(self, status):
104 """Builds a manifest that includes the given status results, if this is
104 """Builds a manifest that includes the given status results, if this is
105 a working copy context. For non-working copy contexts, it just returns
105 a working copy context. For non-working copy contexts, it just returns
106 the normal manifest."""
106 the normal manifest."""
107 return self.manifest()
107 return self.manifest()
108
108
109 def _matchstatus(self, other, match):
109 def _matchstatus(self, other, match):
110 """This internal method provides a way for child objects to override the
110 """This internal method provides a way for child objects to override the
111 match operator.
111 match operator.
112 """
112 """
113 return match
113 return match
114
114
115 def _buildstatus(self, other, s, match, listignored, listclean,
115 def _buildstatus(self, other, s, match, listignored, listclean,
116 listunknown):
116 listunknown):
117 """build a status with respect to another context"""
117 """build a status with respect to another context"""
118 # Load earliest manifest first for caching reasons. More specifically,
118 # Load earliest manifest first for caching reasons. More specifically,
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
119 # if you have revisions 1000 and 1001, 1001 is probably stored as a
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
120 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
121 # 1000 and cache it so that when you read 1001, we just need to apply a
121 # 1000 and cache it so that when you read 1001, we just need to apply a
122 # delta to what's in the cache. So that's one full reconstruction + one
122 # delta to what's in the cache. So that's one full reconstruction + one
123 # delta application.
123 # delta application.
124 mf2 = None
124 mf2 = None
125 if self.rev() is not None and self.rev() < other.rev():
125 if self.rev() is not None and self.rev() < other.rev():
126 mf2 = self._buildstatusmanifest(s)
126 mf2 = self._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
127 mf1 = other._buildstatusmanifest(s)
128 if mf2 is None:
128 if mf2 is None:
129 mf2 = self._buildstatusmanifest(s)
129 mf2 = self._buildstatusmanifest(s)
130
130
131 modified, added = [], []
131 modified, added = [], []
132 removed = []
132 removed = []
133 clean = []
133 clean = []
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
134 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
135 deletedset = set(deleted)
135 deletedset = set(deleted)
136 d = mf1.diff(mf2, match=match, clean=listclean)
136 d = mf1.diff(mf2, match=match, clean=listclean)
137 for fn, value in d.iteritems():
137 for fn, value in d.iteritems():
138 if fn in deletedset:
138 if fn in deletedset:
139 continue
139 continue
140 if value is None:
140 if value is None:
141 clean.append(fn)
141 clean.append(fn)
142 continue
142 continue
143 (node1, flag1), (node2, flag2) = value
143 (node1, flag1), (node2, flag2) = value
144 if node1 is None:
144 if node1 is None:
145 added.append(fn)
145 added.append(fn)
146 elif node2 is None:
146 elif node2 is None:
147 removed.append(fn)
147 removed.append(fn)
148 elif flag1 != flag2:
148 elif flag1 != flag2:
149 modified.append(fn)
149 modified.append(fn)
150 elif node2 not in wdirnodes:
150 elif node2 not in wdirnodes:
151 # When comparing files between two commits, we save time by
151 # When comparing files between two commits, we save time by
152 # not comparing the file contents when the nodeids differ.
152 # not comparing the file contents when the nodeids differ.
153 # Note that this means we incorrectly report a reverted change
153 # Note that this means we incorrectly report a reverted change
154 # to a file as a modification.
154 # to a file as a modification.
155 modified.append(fn)
155 modified.append(fn)
156 elif self[fn].cmp(other[fn]):
156 elif self[fn].cmp(other[fn]):
157 modified.append(fn)
157 modified.append(fn)
158 else:
158 else:
159 clean.append(fn)
159 clean.append(fn)
160
160
161 if removed:
161 if removed:
162 # need to filter files if they are already reported as removed
162 # need to filter files if they are already reported as removed
163 unknown = [fn for fn in unknown if fn not in mf1 and
163 unknown = [fn for fn in unknown if fn not in mf1 and
164 (not match or match(fn))]
164 (not match or match(fn))]
165 ignored = [fn for fn in ignored if fn not in mf1 and
165 ignored = [fn for fn in ignored if fn not in mf1 and
166 (not match or match(fn))]
166 (not match or match(fn))]
167 # if they're deleted, don't report them as removed
167 # if they're deleted, don't report them as removed
168 removed = [fn for fn in removed if fn not in deletedset]
168 removed = [fn for fn in removed if fn not in deletedset]
169
169
170 return scmutil.status(modified, added, removed, deleted, unknown,
170 return scmutil.status(modified, added, removed, deleted, unknown,
171 ignored, clean)
171 ignored, clean)
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepo.state(self, self._repo.ui)
175 return subrepo.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184 def hex(self):
184 def hex(self):
185 return hex(self.node())
185 return hex(self.node())
186 def manifest(self):
186 def manifest(self):
187 return self._manifest
187 return self._manifest
188 def manifestctx(self):
188 def manifestctx(self):
189 return self._manifestctx
189 return self._manifestctx
190 def repo(self):
190 def repo(self):
191 return self._repo
191 return self._repo
192 def phasestr(self):
192 def phasestr(self):
193 return phases.phasenames[self.phase()]
193 return phases.phasenames[self.phase()]
194 def mutable(self):
194 def mutable(self):
195 return self.phase() > phases.public
195 return self.phase() > phases.public
196
196
197 def getfileset(self, expr):
197 def getfileset(self, expr):
198 return fileset.getfileset(self, expr)
198 return fileset.getfileset(self, expr)
199
199
200 def obsolete(self):
200 def obsolete(self):
201 """True if the changeset is obsolete"""
201 """True if the changeset is obsolete"""
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
202 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
203
203
204 def extinct(self):
204 def extinct(self):
205 """True if the changeset is extinct"""
205 """True if the changeset is extinct"""
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
206 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
207
207
208 def unstable(self):
208 def unstable(self):
209 msg = ("'context.unstable' is deprecated, "
209 msg = ("'context.unstable' is deprecated, "
210 "use 'context.orphan'")
210 "use 'context.orphan'")
211 self._repo.ui.deprecwarn(msg, '4.4')
211 self._repo.ui.deprecwarn(msg, '4.4')
212 return self.orphan()
212 return self.orphan()
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete but it's ancestor are"""
215 """True if the changeset is not obsolete but it's ancestor are"""
216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, 'orphan')
217
217
218 def bumped(self):
218 def bumped(self):
219 msg = ("'context.bumped' is deprecated, "
219 msg = ("'context.bumped' is deprecated, "
220 "use 'context.phasedivergent'")
220 "use 'context.phasedivergent'")
221 self._repo.ui.deprecwarn(msg, '4.4')
221 self._repo.ui.deprecwarn(msg, '4.4')
222 return self.phasedivergent()
222 return self.phasedivergent()
223
223
224 def phasedivergent(self):
224 def phasedivergent(self):
225 """True if the changeset try to be a successor of a public changeset
225 """True if the changeset try to be a successor of a public changeset
226
226
227 Only non-public and non-obsolete changesets may be bumped.
227 Only non-public and non-obsolete changesets may be bumped.
228 """
228 """
229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
229 return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent')
230
230
231 def divergent(self):
231 def divergent(self):
232 msg = ("'context.divergent' is deprecated, "
232 msg = ("'context.divergent' is deprecated, "
233 "use 'context.contentdivergent'")
233 "use 'context.contentdivergent'")
234 self._repo.ui.deprecwarn(msg, '4.4')
234 self._repo.ui.deprecwarn(msg, '4.4')
235 return self.contentdivergent()
235 return self.contentdivergent()
236
236
237 def contentdivergent(self):
237 def contentdivergent(self):
238 """Is a successors of a changeset with multiple possible successors set
238 """Is a successors of a changeset with multiple possible successors set
239
239
240 Only non-public and non-obsolete changesets may be divergent.
240 Only non-public and non-obsolete changesets may be divergent.
241 """
241 """
242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
242 return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent')
243
243
244 def troubled(self):
244 def troubled(self):
245 msg = ("'context.troubled' is deprecated, "
245 msg = ("'context.troubled' is deprecated, "
246 "use 'context.isunstable'")
246 "use 'context.isunstable'")
247 self._repo.ui.deprecwarn(msg, '4.4')
247 self._repo.ui.deprecwarn(msg, '4.4')
248 return self.isunstable()
248 return self.isunstable()
249
249
250 def isunstable(self):
250 def isunstable(self):
251 """True if the changeset is either unstable, bumped or divergent"""
251 """True if the changeset is either unstable, bumped or divergent"""
252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
252 return self.orphan() or self.phasedivergent() or self.contentdivergent()
253
253
254 def troubles(self):
254 def troubles(self):
255 """Keep the old version around in order to avoid breaking extensions
255 """Keep the old version around in order to avoid breaking extensions
256 about different return values.
256 about different return values.
257 """
257 """
258 msg = ("'context.troubles' is deprecated, "
258 msg = ("'context.troubles' is deprecated, "
259 "use 'context.instabilities'")
259 "use 'context.instabilities'")
260 self._repo.ui.deprecwarn(msg, '4.4')
260 self._repo.ui.deprecwarn(msg, '4.4')
261
261
262 troubles = []
262 troubles = []
263 if self.orphan():
263 if self.orphan():
264 troubles.append('orphan')
264 troubles.append('orphan')
265 if self.phasedivergent():
265 if self.phasedivergent():
266 troubles.append('bumped')
266 troubles.append('bumped')
267 if self.contentdivergent():
267 if self.contentdivergent():
268 troubles.append('divergent')
268 troubles.append('divergent')
269 return troubles
269 return troubles
270
270
271 def instabilities(self):
271 def instabilities(self):
272 """return the list of instabilities affecting this changeset.
272 """return the list of instabilities affecting this changeset.
273
273
274 Instabilities are returned as strings. possible values are:
274 Instabilities are returned as strings. possible values are:
275 - orphan,
275 - orphan,
276 - phase-divergent,
276 - phase-divergent,
277 - content-divergent.
277 - content-divergent.
278 """
278 """
279 instabilities = []
279 instabilities = []
280 if self.orphan():
280 if self.orphan():
281 instabilities.append('orphan')
281 instabilities.append('orphan')
282 if self.phasedivergent():
282 if self.phasedivergent():
283 instabilities.append('phase-divergent')
283 instabilities.append('phase-divergent')
284 if self.contentdivergent():
284 if self.contentdivergent():
285 instabilities.append('content-divergent')
285 instabilities.append('content-divergent')
286 return instabilities
286 return instabilities
287
287
288 def parents(self):
288 def parents(self):
289 """return contexts for each parent changeset"""
289 """return contexts for each parent changeset"""
290 return self._parents
290 return self._parents
291
291
292 def p1(self):
292 def p1(self):
293 return self._parents[0]
293 return self._parents[0]
294
294
295 def p2(self):
295 def p2(self):
296 parents = self._parents
296 parents = self._parents
297 if len(parents) == 2:
297 if len(parents) == 2:
298 return parents[1]
298 return parents[1]
299 return changectx(self._repo, nullrev)
299 return changectx(self._repo, nullrev)
300
300
301 def _fileinfo(self, path):
301 def _fileinfo(self, path):
302 if r'_manifest' in self.__dict__:
302 if r'_manifest' in self.__dict__:
303 try:
303 try:
304 return self._manifest[path], self._manifest.flags(path)
304 return self._manifest[path], self._manifest.flags(path)
305 except KeyError:
305 except KeyError:
306 raise error.ManifestLookupError(self._node, path,
306 raise error.ManifestLookupError(self._node, path,
307 _('not found in manifest'))
307 _('not found in manifest'))
308 if r'_manifestdelta' in self.__dict__ or path in self.files():
308 if r'_manifestdelta' in self.__dict__ or path in self.files():
309 if path in self._manifestdelta:
309 if path in self._manifestdelta:
310 return (self._manifestdelta[path],
310 return (self._manifestdelta[path],
311 self._manifestdelta.flags(path))
311 self._manifestdelta.flags(path))
312 mfl = self._repo.manifestlog
312 mfl = self._repo.manifestlog
313 try:
313 try:
314 node, flag = mfl[self._changeset.manifest].find(path)
314 node, flag = mfl[self._changeset.manifest].find(path)
315 except KeyError:
315 except KeyError:
316 raise error.ManifestLookupError(self._node, path,
316 raise error.ManifestLookupError(self._node, path,
317 _('not found in manifest'))
317 _('not found in manifest'))
318
318
319 return node, flag
319 return node, flag
320
320
321 def filenode(self, path):
321 def filenode(self, path):
322 return self._fileinfo(path)[0]
322 return self._fileinfo(path)[0]
323
323
324 def flags(self, path):
324 def flags(self, path):
325 try:
325 try:
326 return self._fileinfo(path)[1]
326 return self._fileinfo(path)[1]
327 except error.LookupError:
327 except error.LookupError:
328 return ''
328 return ''
329
329
330 def sub(self, path, allowcreate=True):
330 def sub(self, path, allowcreate=True):
331 '''return a subrepo for the stored revision of path, never wdir()'''
331 '''return a subrepo for the stored revision of path, never wdir()'''
332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
332 return subrepo.subrepo(self, path, allowcreate=allowcreate)
333
333
334 def nullsub(self, path, pctx):
334 def nullsub(self, path, pctx):
335 return subrepo.nullsubrepo(self, path, pctx)
335 return subrepo.nullsubrepo(self, path, pctx)
336
336
337 def workingsub(self, path):
337 def workingsub(self, path):
338 '''return a subrepo for the stored revision, or wdir if this is a wdir
338 '''return a subrepo for the stored revision, or wdir if this is a wdir
339 context.
339 context.
340 '''
340 '''
341 return subrepo.subrepo(self, path, allowwdir=True)
341 return subrepo.subrepo(self, path, allowwdir=True)
342
342
343 def match(self, pats=None, include=None, exclude=None, default='glob',
343 def match(self, pats=None, include=None, exclude=None, default='glob',
344 listsubrepos=False, badfn=None):
344 listsubrepos=False, badfn=None):
345 r = self._repo
345 r = self._repo
346 return matchmod.match(r.root, r.getcwd(), pats,
346 return matchmod.match(r.root, r.getcwd(), pats,
347 include, exclude, default,
347 include, exclude, default,
348 auditor=r.nofsauditor, ctx=self,
348 auditor=r.nofsauditor, ctx=self,
349 listsubrepos=listsubrepos, badfn=badfn)
349 listsubrepos=listsubrepos, badfn=badfn)
350
350
351 def diff(self, ctx2=None, match=None, **opts):
351 def diff(self, ctx2=None, match=None, **opts):
352 """Returns a diff generator for the given contexts and matcher"""
352 """Returns a diff generator for the given contexts and matcher"""
353 if ctx2 is None:
353 if ctx2 is None:
354 ctx2 = self.p1()
354 ctx2 = self.p1()
355 if ctx2 is not None:
355 if ctx2 is not None:
356 ctx2 = self._repo[ctx2]
356 ctx2 = self._repo[ctx2]
357 diffopts = patch.diffopts(self._repo.ui, opts)
357 diffopts = patch.diffopts(self._repo.ui, opts)
358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
358 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
359
359
360 def dirs(self):
360 def dirs(self):
361 return self._manifest.dirs()
361 return self._manifest.dirs()
362
362
363 def hasdir(self, dir):
363 def hasdir(self, dir):
364 return self._manifest.hasdir(dir)
364 return self._manifest.hasdir(dir)
365
365
366 def status(self, other=None, match=None, listignored=False,
366 def status(self, other=None, match=None, listignored=False,
367 listclean=False, listunknown=False, listsubrepos=False):
367 listclean=False, listunknown=False, listsubrepos=False):
368 """return status of files between two nodes or node and working
368 """return status of files between two nodes or node and working
369 directory.
369 directory.
370
370
371 If other is None, compare this node with working directory.
371 If other is None, compare this node with working directory.
372
372
373 returns (modified, added, removed, deleted, unknown, ignored, clean)
373 returns (modified, added, removed, deleted, unknown, ignored, clean)
374 """
374 """
375
375
376 ctx1 = self
376 ctx1 = self
377 ctx2 = self._repo[other]
377 ctx2 = self._repo[other]
378
378
379 # This next code block is, admittedly, fragile logic that tests for
379 # This next code block is, admittedly, fragile logic that tests for
380 # reversing the contexts and wouldn't need to exist if it weren't for
380 # reversing the contexts and wouldn't need to exist if it weren't for
381 # the fast (and common) code path of comparing the working directory
381 # the fast (and common) code path of comparing the working directory
382 # with its first parent.
382 # with its first parent.
383 #
383 #
384 # What we're aiming for here is the ability to call:
384 # What we're aiming for here is the ability to call:
385 #
385 #
386 # workingctx.status(parentctx)
386 # workingctx.status(parentctx)
387 #
387 #
388 # If we always built the manifest for each context and compared those,
388 # If we always built the manifest for each context and compared those,
389 # then we'd be done. But the special case of the above call means we
389 # then we'd be done. But the special case of the above call means we
390 # just copy the manifest of the parent.
390 # just copy the manifest of the parent.
391 reversed = False
391 reversed = False
392 if (not isinstance(ctx1, changectx)
392 if (not isinstance(ctx1, changectx)
393 and isinstance(ctx2, changectx)):
393 and isinstance(ctx2, changectx)):
394 reversed = True
394 reversed = True
395 ctx1, ctx2 = ctx2, ctx1
395 ctx1, ctx2 = ctx2, ctx1
396
396
397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
397 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
398 match = ctx2._matchstatus(ctx1, match)
398 match = ctx2._matchstatus(ctx1, match)
399 r = scmutil.status([], [], [], [], [], [], [])
399 r = scmutil.status([], [], [], [], [], [], [])
400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
400 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
401 listunknown)
401 listunknown)
402
402
403 if reversed:
403 if reversed:
404 # Reverse added and removed. Clear deleted, unknown and ignored as
404 # Reverse added and removed. Clear deleted, unknown and ignored as
405 # these make no sense to reverse.
405 # these make no sense to reverse.
406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
406 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
407 r.clean)
407 r.clean)
408
408
409 if listsubrepos:
409 if listsubrepos:
410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
410 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
411 try:
411 try:
412 rev2 = ctx2.subrev(subpath)
412 rev2 = ctx2.subrev(subpath)
413 except KeyError:
413 except KeyError:
414 # A subrepo that existed in node1 was deleted between
414 # A subrepo that existed in node1 was deleted between
415 # node1 and node2 (inclusive). Thus, ctx2's substate
415 # node1 and node2 (inclusive). Thus, ctx2's substate
416 # won't contain that subpath. The best we can do ignore it.
416 # won't contain that subpath. The best we can do ignore it.
417 rev2 = None
417 rev2 = None
418 submatch = matchmod.subdirmatcher(subpath, match)
418 submatch = matchmod.subdirmatcher(subpath, match)
419 s = sub.status(rev2, match=submatch, ignored=listignored,
419 s = sub.status(rev2, match=submatch, ignored=listignored,
420 clean=listclean, unknown=listunknown,
420 clean=listclean, unknown=listunknown,
421 listsubrepos=True)
421 listsubrepos=True)
422 for rfiles, sfiles in zip(r, s):
422 for rfiles, sfiles in zip(r, s):
423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
423 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
424
424
425 for l in r:
425 for l in r:
426 l.sort()
426 l.sort()
427
427
428 return r
428 return r
429
429
430 def _filterederror(repo, changeid):
430 def _filterederror(repo, changeid):
431 """build an exception to be raised about a filtered changeid
431 """build an exception to be raised about a filtered changeid
432
432
433 This is extracted in a function to help extensions (eg: evolve) to
433 This is extracted in a function to help extensions (eg: evolve) to
434 experiment with various message variants."""
434 experiment with various message variants."""
435 if repo.filtername.startswith('visible'):
435 if repo.filtername.startswith('visible'):
436 msg = _("hidden revision '%s'") % changeid
436 msg = _("hidden revision '%s'") % changeid
437 hint = _('use --hidden to access hidden revisions')
437 hint = _('use --hidden to access hidden revisions')
438 return error.FilteredRepoLookupError(msg, hint=hint)
438 return error.FilteredRepoLookupError(msg, hint=hint)
439 msg = _("filtered revision '%s' (not in '%s' subset)")
439 msg = _("filtered revision '%s' (not in '%s' subset)")
440 msg %= (changeid, repo.filtername)
440 msg %= (changeid, repo.filtername)
441 return error.FilteredRepoLookupError(msg)
441 return error.FilteredRepoLookupError(msg)
442
442
443 class changectx(basectx):
443 class changectx(basectx):
444 """A changecontext object makes access to data related to a particular
444 """A changecontext object makes access to data related to a particular
445 changeset convenient. It represents a read-only context already present in
445 changeset convenient. It represents a read-only context already present in
446 the repo."""
446 the repo."""
447 def __init__(self, repo, changeid=''):
447 def __init__(self, repo, changeid=''):
448 """changeid is a revision number, node, or tag"""
448 """changeid is a revision number, node, or tag"""
449
449
450 # since basectx.__new__ already took care of copying the object, we
450 # since basectx.__new__ already took care of copying the object, we
451 # don't need to do anything in __init__, so we just exit here
451 # don't need to do anything in __init__, so we just exit here
452 if isinstance(changeid, basectx):
452 if isinstance(changeid, basectx):
453 return
453 return
454
454
455 if changeid == '':
455 if changeid == '':
456 changeid = '.'
456 changeid = '.'
457 self._repo = repo
457 self._repo = repo
458
458
459 try:
459 try:
460 if isinstance(changeid, int):
460 if isinstance(changeid, int):
461 self._node = repo.changelog.node(changeid)
461 self._node = repo.changelog.node(changeid)
462 self._rev = changeid
462 self._rev = changeid
463 return
463 return
464 if not pycompat.ispy3 and isinstance(changeid, long):
464 if not pycompat.ispy3 and isinstance(changeid, long):
465 changeid = str(changeid)
465 changeid = str(changeid)
466 if changeid == 'null':
466 if changeid == 'null':
467 self._node = nullid
467 self._node = nullid
468 self._rev = nullrev
468 self._rev = nullrev
469 return
469 return
470 if changeid == 'tip':
470 if changeid == 'tip':
471 self._node = repo.changelog.tip()
471 self._node = repo.changelog.tip()
472 self._rev = repo.changelog.rev(self._node)
472 self._rev = repo.changelog.rev(self._node)
473 return
473 return
474 if changeid == '.' or changeid == repo.dirstate.p1():
474 if (changeid == '.'
475 or repo.local() and changeid == repo.dirstate.p1()):
475 # this is a hack to delay/avoid loading obsmarkers
476 # this is a hack to delay/avoid loading obsmarkers
476 # when we know that '.' won't be hidden
477 # when we know that '.' won't be hidden
477 self._node = repo.dirstate.p1()
478 self._node = repo.dirstate.p1()
478 self._rev = repo.unfiltered().changelog.rev(self._node)
479 self._rev = repo.unfiltered().changelog.rev(self._node)
479 return
480 return
480 if len(changeid) == 20:
481 if len(changeid) == 20:
481 try:
482 try:
482 self._node = changeid
483 self._node = changeid
483 self._rev = repo.changelog.rev(changeid)
484 self._rev = repo.changelog.rev(changeid)
484 return
485 return
485 except error.FilteredRepoLookupError:
486 except error.FilteredRepoLookupError:
486 raise
487 raise
487 except LookupError:
488 except LookupError:
488 pass
489 pass
489
490
490 try:
491 try:
491 r = int(changeid)
492 r = int(changeid)
492 if '%d' % r != changeid:
493 if '%d' % r != changeid:
493 raise ValueError
494 raise ValueError
494 l = len(repo.changelog)
495 l = len(repo.changelog)
495 if r < 0:
496 if r < 0:
496 r += l
497 r += l
497 if r < 0 or r >= l and r != wdirrev:
498 if r < 0 or r >= l and r != wdirrev:
498 raise ValueError
499 raise ValueError
499 self._rev = r
500 self._rev = r
500 self._node = repo.changelog.node(r)
501 self._node = repo.changelog.node(r)
501 return
502 return
502 except error.FilteredIndexError:
503 except error.FilteredIndexError:
503 raise
504 raise
504 except (ValueError, OverflowError, IndexError):
505 except (ValueError, OverflowError, IndexError):
505 pass
506 pass
506
507
507 if len(changeid) == 40:
508 if len(changeid) == 40:
508 try:
509 try:
509 self._node = bin(changeid)
510 self._node = bin(changeid)
510 self._rev = repo.changelog.rev(self._node)
511 self._rev = repo.changelog.rev(self._node)
511 return
512 return
512 except error.FilteredLookupError:
513 except error.FilteredLookupError:
513 raise
514 raise
514 except (TypeError, LookupError):
515 except (TypeError, LookupError):
515 pass
516 pass
516
517
517 # lookup bookmarks through the name interface
518 # lookup bookmarks through the name interface
518 try:
519 try:
519 self._node = repo.names.singlenode(repo, changeid)
520 self._node = repo.names.singlenode(repo, changeid)
520 self._rev = repo.changelog.rev(self._node)
521 self._rev = repo.changelog.rev(self._node)
521 return
522 return
522 except KeyError:
523 except KeyError:
523 pass
524 pass
524 except error.FilteredRepoLookupError:
525 except error.FilteredRepoLookupError:
525 raise
526 raise
526 except error.RepoLookupError:
527 except error.RepoLookupError:
527 pass
528 pass
528
529
529 self._node = repo.unfiltered().changelog._partialmatch(changeid)
530 self._node = repo.unfiltered().changelog._partialmatch(changeid)
530 if self._node is not None:
531 if self._node is not None:
531 self._rev = repo.changelog.rev(self._node)
532 self._rev = repo.changelog.rev(self._node)
532 return
533 return
533
534
534 # lookup failed
535 # lookup failed
535 # check if it might have come from damaged dirstate
536 # check if it might have come from damaged dirstate
536 #
537 #
537 # XXX we could avoid the unfiltered if we had a recognizable
538 # XXX we could avoid the unfiltered if we had a recognizable
538 # exception for filtered changeset access
539 # exception for filtered changeset access
539 if changeid in repo.unfiltered().dirstate.parents():
540 if (repo.local()
541 and changeid in repo.unfiltered().dirstate.parents()):
540 msg = _("working directory has unknown parent '%s'!")
542 msg = _("working directory has unknown parent '%s'!")
541 raise error.Abort(msg % short(changeid))
543 raise error.Abort(msg % short(changeid))
542 try:
544 try:
543 if len(changeid) == 20 and nonascii(changeid):
545 if len(changeid) == 20 and nonascii(changeid):
544 changeid = hex(changeid)
546 changeid = hex(changeid)
545 except TypeError:
547 except TypeError:
546 pass
548 pass
547 except (error.FilteredIndexError, error.FilteredLookupError,
549 except (error.FilteredIndexError, error.FilteredLookupError,
548 error.FilteredRepoLookupError):
550 error.FilteredRepoLookupError):
549 raise _filterederror(repo, changeid)
551 raise _filterederror(repo, changeid)
550 except IndexError:
552 except IndexError:
551 pass
553 pass
552 raise error.RepoLookupError(
554 raise error.RepoLookupError(
553 _("unknown revision '%s'") % changeid)
555 _("unknown revision '%s'") % changeid)
554
556
555 def __hash__(self):
557 def __hash__(self):
556 try:
558 try:
557 return hash(self._rev)
559 return hash(self._rev)
558 except AttributeError:
560 except AttributeError:
559 return id(self)
561 return id(self)
560
562
561 def __nonzero__(self):
563 def __nonzero__(self):
562 return self._rev != nullrev
564 return self._rev != nullrev
563
565
564 __bool__ = __nonzero__
566 __bool__ = __nonzero__
565
567
566 @propertycache
568 @propertycache
567 def _changeset(self):
569 def _changeset(self):
568 return self._repo.changelog.changelogrevision(self.rev())
570 return self._repo.changelog.changelogrevision(self.rev())
569
571
570 @propertycache
572 @propertycache
571 def _manifest(self):
573 def _manifest(self):
572 return self._manifestctx.read()
574 return self._manifestctx.read()
573
575
574 @property
576 @property
575 def _manifestctx(self):
577 def _manifestctx(self):
576 return self._repo.manifestlog[self._changeset.manifest]
578 return self._repo.manifestlog[self._changeset.manifest]
577
579
578 @propertycache
580 @propertycache
579 def _manifestdelta(self):
581 def _manifestdelta(self):
580 return self._manifestctx.readdelta()
582 return self._manifestctx.readdelta()
581
583
582 @propertycache
584 @propertycache
583 def _parents(self):
585 def _parents(self):
584 repo = self._repo
586 repo = self._repo
585 p1, p2 = repo.changelog.parentrevs(self._rev)
587 p1, p2 = repo.changelog.parentrevs(self._rev)
586 if p2 == nullrev:
588 if p2 == nullrev:
587 return [changectx(repo, p1)]
589 return [changectx(repo, p1)]
588 return [changectx(repo, p1), changectx(repo, p2)]
590 return [changectx(repo, p1), changectx(repo, p2)]
589
591
590 def changeset(self):
592 def changeset(self):
591 c = self._changeset
593 c = self._changeset
592 return (
594 return (
593 c.manifest,
595 c.manifest,
594 c.user,
596 c.user,
595 c.date,
597 c.date,
596 c.files,
598 c.files,
597 c.description,
599 c.description,
598 c.extra,
600 c.extra,
599 )
601 )
600 def manifestnode(self):
602 def manifestnode(self):
601 return self._changeset.manifest
603 return self._changeset.manifest
602
604
603 def user(self):
605 def user(self):
604 return self._changeset.user
606 return self._changeset.user
605 def date(self):
607 def date(self):
606 return self._changeset.date
608 return self._changeset.date
607 def files(self):
609 def files(self):
608 return self._changeset.files
610 return self._changeset.files
609 def description(self):
611 def description(self):
610 return self._changeset.description
612 return self._changeset.description
611 def branch(self):
613 def branch(self):
612 return encoding.tolocal(self._changeset.extra.get("branch"))
614 return encoding.tolocal(self._changeset.extra.get("branch"))
613 def closesbranch(self):
615 def closesbranch(self):
614 return 'close' in self._changeset.extra
616 return 'close' in self._changeset.extra
615 def extra(self):
617 def extra(self):
616 return self._changeset.extra
618 return self._changeset.extra
617 def tags(self):
619 def tags(self):
618 return self._repo.nodetags(self._node)
620 return self._repo.nodetags(self._node)
619 def bookmarks(self):
621 def bookmarks(self):
620 return self._repo.nodebookmarks(self._node)
622 return self._repo.nodebookmarks(self._node)
621 def phase(self):
623 def phase(self):
622 return self._repo._phasecache.phase(self._repo, self._rev)
624 return self._repo._phasecache.phase(self._repo, self._rev)
623 def hidden(self):
625 def hidden(self):
624 return self._rev in repoview.filterrevs(self._repo, 'visible')
626 return self._rev in repoview.filterrevs(self._repo, 'visible')
625
627
626 def isinmemory(self):
628 def isinmemory(self):
627 return False
629 return False
628
630
629 def children(self):
631 def children(self):
630 """return contexts for each child changeset"""
632 """return contexts for each child changeset"""
631 c = self._repo.changelog.children(self._node)
633 c = self._repo.changelog.children(self._node)
632 return [changectx(self._repo, x) for x in c]
634 return [changectx(self._repo, x) for x in c]
633
635
634 def ancestors(self):
636 def ancestors(self):
635 for a in self._repo.changelog.ancestors([self._rev]):
637 for a in self._repo.changelog.ancestors([self._rev]):
636 yield changectx(self._repo, a)
638 yield changectx(self._repo, a)
637
639
638 def descendants(self):
640 def descendants(self):
639 for d in self._repo.changelog.descendants([self._rev]):
641 for d in self._repo.changelog.descendants([self._rev]):
640 yield changectx(self._repo, d)
642 yield changectx(self._repo, d)
641
643
642 def filectx(self, path, fileid=None, filelog=None):
644 def filectx(self, path, fileid=None, filelog=None):
643 """get a file context from this changeset"""
645 """get a file context from this changeset"""
644 if fileid is None:
646 if fileid is None:
645 fileid = self.filenode(path)
647 fileid = self.filenode(path)
646 return filectx(self._repo, path, fileid=fileid,
648 return filectx(self._repo, path, fileid=fileid,
647 changectx=self, filelog=filelog)
649 changectx=self, filelog=filelog)
648
650
649 def ancestor(self, c2, warn=False):
651 def ancestor(self, c2, warn=False):
650 """return the "best" ancestor context of self and c2
652 """return the "best" ancestor context of self and c2
651
653
652 If there are multiple candidates, it will show a message and check
654 If there are multiple candidates, it will show a message and check
653 merge.preferancestor configuration before falling back to the
655 merge.preferancestor configuration before falling back to the
654 revlog ancestor."""
656 revlog ancestor."""
655 # deal with workingctxs
657 # deal with workingctxs
656 n2 = c2._node
658 n2 = c2._node
657 if n2 is None:
659 if n2 is None:
658 n2 = c2._parents[0]._node
660 n2 = c2._parents[0]._node
659 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
661 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
660 if not cahs:
662 if not cahs:
661 anc = nullid
663 anc = nullid
662 elif len(cahs) == 1:
664 elif len(cahs) == 1:
663 anc = cahs[0]
665 anc = cahs[0]
664 else:
666 else:
665 # experimental config: merge.preferancestor
667 # experimental config: merge.preferancestor
666 for r in self._repo.ui.configlist('merge', 'preferancestor'):
668 for r in self._repo.ui.configlist('merge', 'preferancestor'):
667 try:
669 try:
668 ctx = changectx(self._repo, r)
670 ctx = changectx(self._repo, r)
669 except error.RepoLookupError:
671 except error.RepoLookupError:
670 continue
672 continue
671 anc = ctx.node()
673 anc = ctx.node()
672 if anc in cahs:
674 if anc in cahs:
673 break
675 break
674 else:
676 else:
675 anc = self._repo.changelog.ancestor(self._node, n2)
677 anc = self._repo.changelog.ancestor(self._node, n2)
676 if warn:
678 if warn:
677 self._repo.ui.status(
679 self._repo.ui.status(
678 (_("note: using %s as ancestor of %s and %s\n") %
680 (_("note: using %s as ancestor of %s and %s\n") %
679 (short(anc), short(self._node), short(n2))) +
681 (short(anc), short(self._node), short(n2))) +
680 ''.join(_(" alternatively, use --config "
682 ''.join(_(" alternatively, use --config "
681 "merge.preferancestor=%s\n") %
683 "merge.preferancestor=%s\n") %
682 short(n) for n in sorted(cahs) if n != anc))
684 short(n) for n in sorted(cahs) if n != anc))
683 return changectx(self._repo, anc)
685 return changectx(self._repo, anc)
684
686
685 def descendant(self, other):
687 def descendant(self, other):
686 """True if other is descendant of this changeset"""
688 """True if other is descendant of this changeset"""
687 return self._repo.changelog.descendant(self._rev, other._rev)
689 return self._repo.changelog.descendant(self._rev, other._rev)
688
690
689 def walk(self, match):
691 def walk(self, match):
690 '''Generates matching file names.'''
692 '''Generates matching file names.'''
691
693
692 # Wrap match.bad method to have message with nodeid
694 # Wrap match.bad method to have message with nodeid
693 def bad(fn, msg):
695 def bad(fn, msg):
694 # The manifest doesn't know about subrepos, so don't complain about
696 # The manifest doesn't know about subrepos, so don't complain about
695 # paths into valid subrepos.
697 # paths into valid subrepos.
696 if any(fn == s or fn.startswith(s + '/')
698 if any(fn == s or fn.startswith(s + '/')
697 for s in self.substate):
699 for s in self.substate):
698 return
700 return
699 match.bad(fn, _('no such file in rev %s') % self)
701 match.bad(fn, _('no such file in rev %s') % self)
700
702
701 m = matchmod.badmatch(match, bad)
703 m = matchmod.badmatch(match, bad)
702 return self._manifest.walk(m)
704 return self._manifest.walk(m)
703
705
704 def matches(self, match):
706 def matches(self, match):
705 return self.walk(match)
707 return self.walk(match)
706
708
707 class basefilectx(object):
709 class basefilectx(object):
708 """A filecontext object represents the common logic for its children:
710 """A filecontext object represents the common logic for its children:
709 filectx: read-only access to a filerevision that is already present
711 filectx: read-only access to a filerevision that is already present
710 in the repo,
712 in the repo,
711 workingfilectx: a filecontext that represents files from the working
713 workingfilectx: a filecontext that represents files from the working
712 directory,
714 directory,
713 memfilectx: a filecontext that represents files in-memory,
715 memfilectx: a filecontext that represents files in-memory,
714 overlayfilectx: duplicate another filecontext with some fields overridden.
716 overlayfilectx: duplicate another filecontext with some fields overridden.
715 """
717 """
716 @propertycache
718 @propertycache
717 def _filelog(self):
719 def _filelog(self):
718 return self._repo.file(self._path)
720 return self._repo.file(self._path)
719
721
720 @propertycache
722 @propertycache
721 def _changeid(self):
723 def _changeid(self):
722 if r'_changeid' in self.__dict__:
724 if r'_changeid' in self.__dict__:
723 return self._changeid
725 return self._changeid
724 elif r'_changectx' in self.__dict__:
726 elif r'_changectx' in self.__dict__:
725 return self._changectx.rev()
727 return self._changectx.rev()
726 elif r'_descendantrev' in self.__dict__:
728 elif r'_descendantrev' in self.__dict__:
727 # this file context was created from a revision with a known
729 # this file context was created from a revision with a known
728 # descendant, we can (lazily) correct for linkrev aliases
730 # descendant, we can (lazily) correct for linkrev aliases
729 return self._adjustlinkrev(self._descendantrev)
731 return self._adjustlinkrev(self._descendantrev)
730 else:
732 else:
731 return self._filelog.linkrev(self._filerev)
733 return self._filelog.linkrev(self._filerev)
732
734
733 @propertycache
735 @propertycache
734 def _filenode(self):
736 def _filenode(self):
735 if r'_fileid' in self.__dict__:
737 if r'_fileid' in self.__dict__:
736 return self._filelog.lookup(self._fileid)
738 return self._filelog.lookup(self._fileid)
737 else:
739 else:
738 return self._changectx.filenode(self._path)
740 return self._changectx.filenode(self._path)
739
741
740 @propertycache
742 @propertycache
741 def _filerev(self):
743 def _filerev(self):
742 return self._filelog.rev(self._filenode)
744 return self._filelog.rev(self._filenode)
743
745
744 @propertycache
746 @propertycache
745 def _repopath(self):
747 def _repopath(self):
746 return self._path
748 return self._path
747
749
748 def __nonzero__(self):
750 def __nonzero__(self):
749 try:
751 try:
750 self._filenode
752 self._filenode
751 return True
753 return True
752 except error.LookupError:
754 except error.LookupError:
753 # file is missing
755 # file is missing
754 return False
756 return False
755
757
756 __bool__ = __nonzero__
758 __bool__ = __nonzero__
757
759
758 def __bytes__(self):
760 def __bytes__(self):
759 try:
761 try:
760 return "%s@%s" % (self.path(), self._changectx)
762 return "%s@%s" % (self.path(), self._changectx)
761 except error.LookupError:
763 except error.LookupError:
762 return "%s@???" % self.path()
764 return "%s@???" % self.path()
763
765
764 __str__ = encoding.strmethod(__bytes__)
766 __str__ = encoding.strmethod(__bytes__)
765
767
766 def __repr__(self):
768 def __repr__(self):
767 return "<%s %s>" % (type(self).__name__, str(self))
769 return "<%s %s>" % (type(self).__name__, str(self))
768
770
769 def __hash__(self):
771 def __hash__(self):
770 try:
772 try:
771 return hash((self._path, self._filenode))
773 return hash((self._path, self._filenode))
772 except AttributeError:
774 except AttributeError:
773 return id(self)
775 return id(self)
774
776
775 def __eq__(self, other):
777 def __eq__(self, other):
776 try:
778 try:
777 return (type(self) == type(other) and self._path == other._path
779 return (type(self) == type(other) and self._path == other._path
778 and self._filenode == other._filenode)
780 and self._filenode == other._filenode)
779 except AttributeError:
781 except AttributeError:
780 return False
782 return False
781
783
782 def __ne__(self, other):
784 def __ne__(self, other):
783 return not (self == other)
785 return not (self == other)
784
786
785 def filerev(self):
787 def filerev(self):
786 return self._filerev
788 return self._filerev
787 def filenode(self):
789 def filenode(self):
788 return self._filenode
790 return self._filenode
789 @propertycache
791 @propertycache
790 def _flags(self):
792 def _flags(self):
791 return self._changectx.flags(self._path)
793 return self._changectx.flags(self._path)
792 def flags(self):
794 def flags(self):
793 return self._flags
795 return self._flags
794 def filelog(self):
796 def filelog(self):
795 return self._filelog
797 return self._filelog
796 def rev(self):
798 def rev(self):
797 return self._changeid
799 return self._changeid
798 def linkrev(self):
800 def linkrev(self):
799 return self._filelog.linkrev(self._filerev)
801 return self._filelog.linkrev(self._filerev)
800 def node(self):
802 def node(self):
801 return self._changectx.node()
803 return self._changectx.node()
802 def hex(self):
804 def hex(self):
803 return self._changectx.hex()
805 return self._changectx.hex()
804 def user(self):
806 def user(self):
805 return self._changectx.user()
807 return self._changectx.user()
806 def date(self):
808 def date(self):
807 return self._changectx.date()
809 return self._changectx.date()
808 def files(self):
810 def files(self):
809 return self._changectx.files()
811 return self._changectx.files()
810 def description(self):
812 def description(self):
811 return self._changectx.description()
813 return self._changectx.description()
812 def branch(self):
814 def branch(self):
813 return self._changectx.branch()
815 return self._changectx.branch()
814 def extra(self):
816 def extra(self):
815 return self._changectx.extra()
817 return self._changectx.extra()
816 def phase(self):
818 def phase(self):
817 return self._changectx.phase()
819 return self._changectx.phase()
818 def phasestr(self):
820 def phasestr(self):
819 return self._changectx.phasestr()
821 return self._changectx.phasestr()
820 def manifest(self):
822 def manifest(self):
821 return self._changectx.manifest()
823 return self._changectx.manifest()
822 def changectx(self):
824 def changectx(self):
823 return self._changectx
825 return self._changectx
824 def renamed(self):
826 def renamed(self):
825 return self._copied
827 return self._copied
826 def repo(self):
828 def repo(self):
827 return self._repo
829 return self._repo
828 def size(self):
830 def size(self):
829 return len(self.data())
831 return len(self.data())
830
832
831 def path(self):
833 def path(self):
832 return self._path
834 return self._path
833
835
834 def isbinary(self):
836 def isbinary(self):
835 try:
837 try:
836 return util.binary(self.data())
838 return util.binary(self.data())
837 except IOError:
839 except IOError:
838 return False
840 return False
839 def isexec(self):
841 def isexec(self):
840 return 'x' in self.flags()
842 return 'x' in self.flags()
841 def islink(self):
843 def islink(self):
842 return 'l' in self.flags()
844 return 'l' in self.flags()
843
845
844 def isabsent(self):
846 def isabsent(self):
845 """whether this filectx represents a file not in self._changectx
847 """whether this filectx represents a file not in self._changectx
846
848
847 This is mainly for merge code to detect change/delete conflicts. This is
849 This is mainly for merge code to detect change/delete conflicts. This is
848 expected to be True for all subclasses of basectx."""
850 expected to be True for all subclasses of basectx."""
849 return False
851 return False
850
852
851 _customcmp = False
853 _customcmp = False
852 def cmp(self, fctx):
854 def cmp(self, fctx):
853 """compare with other file context
855 """compare with other file context
854
856
855 returns True if different than fctx.
857 returns True if different than fctx.
856 """
858 """
857 if fctx._customcmp:
859 if fctx._customcmp:
858 return fctx.cmp(self)
860 return fctx.cmp(self)
859
861
860 if (fctx._filenode is None
862 if (fctx._filenode is None
861 and (self._repo._encodefilterpats
863 and (self._repo._encodefilterpats
862 # if file data starts with '\1\n', empty metadata block is
864 # if file data starts with '\1\n', empty metadata block is
863 # prepended, which adds 4 bytes to filelog.size().
865 # prepended, which adds 4 bytes to filelog.size().
864 or self.size() - 4 == fctx.size())
866 or self.size() - 4 == fctx.size())
865 or self.size() == fctx.size()):
867 or self.size() == fctx.size()):
866 return self._filelog.cmp(self._filenode, fctx.data())
868 return self._filelog.cmp(self._filenode, fctx.data())
867
869
868 return True
870 return True
869
871
870 def _adjustlinkrev(self, srcrev, inclusive=False):
872 def _adjustlinkrev(self, srcrev, inclusive=False):
871 """return the first ancestor of <srcrev> introducing <fnode>
873 """return the first ancestor of <srcrev> introducing <fnode>
872
874
873 If the linkrev of the file revision does not point to an ancestor of
875 If the linkrev of the file revision does not point to an ancestor of
874 srcrev, we'll walk down the ancestors until we find one introducing
876 srcrev, we'll walk down the ancestors until we find one introducing
875 this file revision.
877 this file revision.
876
878
877 :srcrev: the changeset revision we search ancestors from
879 :srcrev: the changeset revision we search ancestors from
878 :inclusive: if true, the src revision will also be checked
880 :inclusive: if true, the src revision will also be checked
879 """
881 """
880 repo = self._repo
882 repo = self._repo
881 cl = repo.unfiltered().changelog
883 cl = repo.unfiltered().changelog
882 mfl = repo.manifestlog
884 mfl = repo.manifestlog
883 # fetch the linkrev
885 # fetch the linkrev
884 lkr = self.linkrev()
886 lkr = self.linkrev()
885 # hack to reuse ancestor computation when searching for renames
887 # hack to reuse ancestor computation when searching for renames
886 memberanc = getattr(self, '_ancestrycontext', None)
888 memberanc = getattr(self, '_ancestrycontext', None)
887 iteranc = None
889 iteranc = None
888 if srcrev is None:
890 if srcrev is None:
889 # wctx case, used by workingfilectx during mergecopy
891 # wctx case, used by workingfilectx during mergecopy
890 revs = [p.rev() for p in self._repo[None].parents()]
892 revs = [p.rev() for p in self._repo[None].parents()]
891 inclusive = True # we skipped the real (revless) source
893 inclusive = True # we skipped the real (revless) source
892 else:
894 else:
893 revs = [srcrev]
895 revs = [srcrev]
894 if memberanc is None:
896 if memberanc is None:
895 memberanc = iteranc = cl.ancestors(revs, lkr,
897 memberanc = iteranc = cl.ancestors(revs, lkr,
896 inclusive=inclusive)
898 inclusive=inclusive)
897 # check if this linkrev is an ancestor of srcrev
899 # check if this linkrev is an ancestor of srcrev
898 if lkr not in memberanc:
900 if lkr not in memberanc:
899 if iteranc is None:
901 if iteranc is None:
900 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
902 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
901 fnode = self._filenode
903 fnode = self._filenode
902 path = self._path
904 path = self._path
903 for a in iteranc:
905 for a in iteranc:
904 ac = cl.read(a) # get changeset data (we avoid object creation)
906 ac = cl.read(a) # get changeset data (we avoid object creation)
905 if path in ac[3]: # checking the 'files' field.
907 if path in ac[3]: # checking the 'files' field.
906 # The file has been touched, check if the content is
908 # The file has been touched, check if the content is
907 # similar to the one we search for.
909 # similar to the one we search for.
908 if fnode == mfl[ac[0]].readfast().get(path):
910 if fnode == mfl[ac[0]].readfast().get(path):
909 return a
911 return a
910 # In theory, we should never get out of that loop without a result.
912 # In theory, we should never get out of that loop without a result.
911 # But if manifest uses a buggy file revision (not children of the
913 # But if manifest uses a buggy file revision (not children of the
912 # one it replaces) we could. Such a buggy situation will likely
914 # one it replaces) we could. Such a buggy situation will likely
913 # result is crash somewhere else at to some point.
915 # result is crash somewhere else at to some point.
914 return lkr
916 return lkr
915
917
916 def introrev(self):
918 def introrev(self):
917 """return the rev of the changeset which introduced this file revision
919 """return the rev of the changeset which introduced this file revision
918
920
919 This method is different from linkrev because it take into account the
921 This method is different from linkrev because it take into account the
920 changeset the filectx was created from. It ensures the returned
922 changeset the filectx was created from. It ensures the returned
921 revision is one of its ancestors. This prevents bugs from
923 revision is one of its ancestors. This prevents bugs from
922 'linkrev-shadowing' when a file revision is used by multiple
924 'linkrev-shadowing' when a file revision is used by multiple
923 changesets.
925 changesets.
924 """
926 """
925 lkr = self.linkrev()
927 lkr = self.linkrev()
926 attrs = vars(self)
928 attrs = vars(self)
927 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
929 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
928 if noctx or self.rev() == lkr:
930 if noctx or self.rev() == lkr:
929 return self.linkrev()
931 return self.linkrev()
930 return self._adjustlinkrev(self.rev(), inclusive=True)
932 return self._adjustlinkrev(self.rev(), inclusive=True)
931
933
932 def _parentfilectx(self, path, fileid, filelog):
934 def _parentfilectx(self, path, fileid, filelog):
933 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
935 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
934 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
936 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
935 if '_changeid' in vars(self) or '_changectx' in vars(self):
937 if '_changeid' in vars(self) or '_changectx' in vars(self):
936 # If self is associated with a changeset (probably explicitly
938 # If self is associated with a changeset (probably explicitly
937 # fed), ensure the created filectx is associated with a
939 # fed), ensure the created filectx is associated with a
938 # changeset that is an ancestor of self.changectx.
940 # changeset that is an ancestor of self.changectx.
939 # This lets us later use _adjustlinkrev to get a correct link.
941 # This lets us later use _adjustlinkrev to get a correct link.
940 fctx._descendantrev = self.rev()
942 fctx._descendantrev = self.rev()
941 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
943 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
942 elif '_descendantrev' in vars(self):
944 elif '_descendantrev' in vars(self):
943 # Otherwise propagate _descendantrev if we have one associated.
945 # Otherwise propagate _descendantrev if we have one associated.
944 fctx._descendantrev = self._descendantrev
946 fctx._descendantrev = self._descendantrev
945 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
947 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
946 return fctx
948 return fctx
947
949
948 def parents(self):
950 def parents(self):
949 _path = self._path
951 _path = self._path
950 fl = self._filelog
952 fl = self._filelog
951 parents = self._filelog.parents(self._filenode)
953 parents = self._filelog.parents(self._filenode)
952 pl = [(_path, node, fl) for node in parents if node != nullid]
954 pl = [(_path, node, fl) for node in parents if node != nullid]
953
955
954 r = fl.renamed(self._filenode)
956 r = fl.renamed(self._filenode)
955 if r:
957 if r:
956 # - In the simple rename case, both parent are nullid, pl is empty.
958 # - In the simple rename case, both parent are nullid, pl is empty.
957 # - In case of merge, only one of the parent is null id and should
959 # - In case of merge, only one of the parent is null id and should
958 # be replaced with the rename information. This parent is -always-
960 # be replaced with the rename information. This parent is -always-
959 # the first one.
961 # the first one.
960 #
962 #
961 # As null id have always been filtered out in the previous list
963 # As null id have always been filtered out in the previous list
962 # comprehension, inserting to 0 will always result in "replacing
964 # comprehension, inserting to 0 will always result in "replacing
963 # first nullid parent with rename information.
965 # first nullid parent with rename information.
964 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
966 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
965
967
966 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
968 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
967
969
968 def p1(self):
970 def p1(self):
969 return self.parents()[0]
971 return self.parents()[0]
970
972
971 def p2(self):
973 def p2(self):
972 p = self.parents()
974 p = self.parents()
973 if len(p) == 2:
975 if len(p) == 2:
974 return p[1]
976 return p[1]
975 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
977 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
976
978
977 def annotate(self, follow=False, linenumber=False, skiprevs=None,
979 def annotate(self, follow=False, linenumber=False, skiprevs=None,
978 diffopts=None):
980 diffopts=None):
979 '''returns a list of tuples of ((ctx, number), line) for each line
981 '''returns a list of tuples of ((ctx, number), line) for each line
980 in the file, where ctx is the filectx of the node where
982 in the file, where ctx is the filectx of the node where
981 that line was last changed; if linenumber parameter is true, number is
983 that line was last changed; if linenumber parameter is true, number is
982 the line number at the first appearance in the managed file, otherwise,
984 the line number at the first appearance in the managed file, otherwise,
983 number has a fixed value of False.
985 number has a fixed value of False.
984 '''
986 '''
985
987
986 def lines(text):
988 def lines(text):
987 if text.endswith("\n"):
989 if text.endswith("\n"):
988 return text.count("\n")
990 return text.count("\n")
989 return text.count("\n") + int(bool(text))
991 return text.count("\n") + int(bool(text))
990
992
991 if linenumber:
993 if linenumber:
992 def decorate(text, rev):
994 def decorate(text, rev):
993 return ([annotateline(fctx=rev, lineno=i)
995 return ([annotateline(fctx=rev, lineno=i)
994 for i in xrange(1, lines(text) + 1)], text)
996 for i in xrange(1, lines(text) + 1)], text)
995 else:
997 else:
996 def decorate(text, rev):
998 def decorate(text, rev):
997 return ([annotateline(fctx=rev)] * lines(text), text)
999 return ([annotateline(fctx=rev)] * lines(text), text)
998
1000
999 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1001 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1000
1002
1001 def parents(f):
1003 def parents(f):
1002 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1004 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1003 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1005 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1004 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1006 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1005 # isn't an ancestor of the srcrev.
1007 # isn't an ancestor of the srcrev.
1006 f._changeid
1008 f._changeid
1007 pl = f.parents()
1009 pl = f.parents()
1008
1010
1009 # Don't return renamed parents if we aren't following.
1011 # Don't return renamed parents if we aren't following.
1010 if not follow:
1012 if not follow:
1011 pl = [p for p in pl if p.path() == f.path()]
1013 pl = [p for p in pl if p.path() == f.path()]
1012
1014
1013 # renamed filectx won't have a filelog yet, so set it
1015 # renamed filectx won't have a filelog yet, so set it
1014 # from the cache to save time
1016 # from the cache to save time
1015 for p in pl:
1017 for p in pl:
1016 if not '_filelog' in p.__dict__:
1018 if not '_filelog' in p.__dict__:
1017 p._filelog = getlog(p.path())
1019 p._filelog = getlog(p.path())
1018
1020
1019 return pl
1021 return pl
1020
1022
1021 # use linkrev to find the first changeset where self appeared
1023 # use linkrev to find the first changeset where self appeared
1022 base = self
1024 base = self
1023 introrev = self.introrev()
1025 introrev = self.introrev()
1024 if self.rev() != introrev:
1026 if self.rev() != introrev:
1025 base = self.filectx(self.filenode(), changeid=introrev)
1027 base = self.filectx(self.filenode(), changeid=introrev)
1026 if getattr(base, '_ancestrycontext', None) is None:
1028 if getattr(base, '_ancestrycontext', None) is None:
1027 cl = self._repo.changelog
1029 cl = self._repo.changelog
1028 if introrev is None:
1030 if introrev is None:
1029 # wctx is not inclusive, but works because _ancestrycontext
1031 # wctx is not inclusive, but works because _ancestrycontext
1030 # is used to test filelog revisions
1032 # is used to test filelog revisions
1031 ac = cl.ancestors([p.rev() for p in base.parents()],
1033 ac = cl.ancestors([p.rev() for p in base.parents()],
1032 inclusive=True)
1034 inclusive=True)
1033 else:
1035 else:
1034 ac = cl.ancestors([introrev], inclusive=True)
1036 ac = cl.ancestors([introrev], inclusive=True)
1035 base._ancestrycontext = ac
1037 base._ancestrycontext = ac
1036
1038
1037 # This algorithm would prefer to be recursive, but Python is a
1039 # This algorithm would prefer to be recursive, but Python is a
1038 # bit recursion-hostile. Instead we do an iterative
1040 # bit recursion-hostile. Instead we do an iterative
1039 # depth-first search.
1041 # depth-first search.
1040
1042
1041 # 1st DFS pre-calculates pcache and needed
1043 # 1st DFS pre-calculates pcache and needed
1042 visit = [base]
1044 visit = [base]
1043 pcache = {}
1045 pcache = {}
1044 needed = {base: 1}
1046 needed = {base: 1}
1045 while visit:
1047 while visit:
1046 f = visit.pop()
1048 f = visit.pop()
1047 if f in pcache:
1049 if f in pcache:
1048 continue
1050 continue
1049 pl = parents(f)
1051 pl = parents(f)
1050 pcache[f] = pl
1052 pcache[f] = pl
1051 for p in pl:
1053 for p in pl:
1052 needed[p] = needed.get(p, 0) + 1
1054 needed[p] = needed.get(p, 0) + 1
1053 if p not in pcache:
1055 if p not in pcache:
1054 visit.append(p)
1056 visit.append(p)
1055
1057
1056 # 2nd DFS does the actual annotate
1058 # 2nd DFS does the actual annotate
1057 visit[:] = [base]
1059 visit[:] = [base]
1058 hist = {}
1060 hist = {}
1059 while visit:
1061 while visit:
1060 f = visit[-1]
1062 f = visit[-1]
1061 if f in hist:
1063 if f in hist:
1062 visit.pop()
1064 visit.pop()
1063 continue
1065 continue
1064
1066
1065 ready = True
1067 ready = True
1066 pl = pcache[f]
1068 pl = pcache[f]
1067 for p in pl:
1069 for p in pl:
1068 if p not in hist:
1070 if p not in hist:
1069 ready = False
1071 ready = False
1070 visit.append(p)
1072 visit.append(p)
1071 if ready:
1073 if ready:
1072 visit.pop()
1074 visit.pop()
1073 curr = decorate(f.data(), f)
1075 curr = decorate(f.data(), f)
1074 skipchild = False
1076 skipchild = False
1075 if skiprevs is not None:
1077 if skiprevs is not None:
1076 skipchild = f._changeid in skiprevs
1078 skipchild = f._changeid in skiprevs
1077 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1079 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1078 diffopts)
1080 diffopts)
1079 for p in pl:
1081 for p in pl:
1080 if needed[p] == 1:
1082 if needed[p] == 1:
1081 del hist[p]
1083 del hist[p]
1082 del needed[p]
1084 del needed[p]
1083 else:
1085 else:
1084 needed[p] -= 1
1086 needed[p] -= 1
1085
1087
1086 hist[f] = curr
1088 hist[f] = curr
1087 del pcache[f]
1089 del pcache[f]
1088
1090
1089 return zip(hist[base][0], hist[base][1].splitlines(True))
1091 return zip(hist[base][0], hist[base][1].splitlines(True))
1090
1092
1091 def ancestors(self, followfirst=False):
1093 def ancestors(self, followfirst=False):
1092 visit = {}
1094 visit = {}
1093 c = self
1095 c = self
1094 if followfirst:
1096 if followfirst:
1095 cut = 1
1097 cut = 1
1096 else:
1098 else:
1097 cut = None
1099 cut = None
1098
1100
1099 while True:
1101 while True:
1100 for parent in c.parents()[:cut]:
1102 for parent in c.parents()[:cut]:
1101 visit[(parent.linkrev(), parent.filenode())] = parent
1103 visit[(parent.linkrev(), parent.filenode())] = parent
1102 if not visit:
1104 if not visit:
1103 break
1105 break
1104 c = visit.pop(max(visit))
1106 c = visit.pop(max(visit))
1105 yield c
1107 yield c
1106
1108
1107 def decodeddata(self):
1109 def decodeddata(self):
1108 """Returns `data()` after running repository decoding filters.
1110 """Returns `data()` after running repository decoding filters.
1109
1111
1110 This is often equivalent to how the data would be expressed on disk.
1112 This is often equivalent to how the data would be expressed on disk.
1111 """
1113 """
1112 return self._repo.wwritedata(self.path(), self.data())
1114 return self._repo.wwritedata(self.path(), self.data())
1113
1115
1114 @attr.s(slots=True, frozen=True)
1116 @attr.s(slots=True, frozen=True)
1115 class annotateline(object):
1117 class annotateline(object):
1116 fctx = attr.ib()
1118 fctx = attr.ib()
1117 lineno = attr.ib(default=False)
1119 lineno = attr.ib(default=False)
1118 # Whether this annotation was the result of a skip-annotate.
1120 # Whether this annotation was the result of a skip-annotate.
1119 skip = attr.ib(default=False)
1121 skip = attr.ib(default=False)
1120
1122
1121 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1123 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1122 r'''
1124 r'''
1123 Given parent and child fctxes and annotate data for parents, for all lines
1125 Given parent and child fctxes and annotate data for parents, for all lines
1124 in either parent that match the child, annotate the child with the parent's
1126 in either parent that match the child, annotate the child with the parent's
1125 data.
1127 data.
1126
1128
1127 Additionally, if `skipchild` is True, replace all other lines with parent
1129 Additionally, if `skipchild` is True, replace all other lines with parent
1128 annotate data as well such that child is never blamed for any lines.
1130 annotate data as well such that child is never blamed for any lines.
1129
1131
1130 See test-annotate.py for unit tests.
1132 See test-annotate.py for unit tests.
1131 '''
1133 '''
1132 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1134 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1133 for parent in parents]
1135 for parent in parents]
1134
1136
1135 if skipchild:
1137 if skipchild:
1136 # Need to iterate over the blocks twice -- make it a list
1138 # Need to iterate over the blocks twice -- make it a list
1137 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1139 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1138 # Mercurial currently prefers p2 over p1 for annotate.
1140 # Mercurial currently prefers p2 over p1 for annotate.
1139 # TODO: change this?
1141 # TODO: change this?
1140 for parent, blocks in pblocks:
1142 for parent, blocks in pblocks:
1141 for (a1, a2, b1, b2), t in blocks:
1143 for (a1, a2, b1, b2), t in blocks:
1142 # Changed blocks ('!') or blocks made only of blank lines ('~')
1144 # Changed blocks ('!') or blocks made only of blank lines ('~')
1143 # belong to the child.
1145 # belong to the child.
1144 if t == '=':
1146 if t == '=':
1145 child[0][b1:b2] = parent[0][a1:a2]
1147 child[0][b1:b2] = parent[0][a1:a2]
1146
1148
1147 if skipchild:
1149 if skipchild:
1148 # Now try and match up anything that couldn't be matched,
1150 # Now try and match up anything that couldn't be matched,
1149 # Reversing pblocks maintains bias towards p2, matching above
1151 # Reversing pblocks maintains bias towards p2, matching above
1150 # behavior.
1152 # behavior.
1151 pblocks.reverse()
1153 pblocks.reverse()
1152
1154
1153 # The heuristics are:
1155 # The heuristics are:
1154 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1156 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1155 # This could potentially be smarter but works well enough.
1157 # This could potentially be smarter but works well enough.
1156 # * For a non-matching section, do a best-effort fit. Match lines in
1158 # * For a non-matching section, do a best-effort fit. Match lines in
1157 # diff hunks 1:1, dropping lines as necessary.
1159 # diff hunks 1:1, dropping lines as necessary.
1158 # * Repeat the last line as a last resort.
1160 # * Repeat the last line as a last resort.
1159
1161
1160 # First, replace as much as possible without repeating the last line.
1162 # First, replace as much as possible without repeating the last line.
1161 remaining = [(parent, []) for parent, _blocks in pblocks]
1163 remaining = [(parent, []) for parent, _blocks in pblocks]
1162 for idx, (parent, blocks) in enumerate(pblocks):
1164 for idx, (parent, blocks) in enumerate(pblocks):
1163 for (a1, a2, b1, b2), _t in blocks:
1165 for (a1, a2, b1, b2), _t in blocks:
1164 if a2 - a1 >= b2 - b1:
1166 if a2 - a1 >= b2 - b1:
1165 for bk in xrange(b1, b2):
1167 for bk in xrange(b1, b2):
1166 if child[0][bk].fctx == childfctx:
1168 if child[0][bk].fctx == childfctx:
1167 ak = min(a1 + (bk - b1), a2 - 1)
1169 ak = min(a1 + (bk - b1), a2 - 1)
1168 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1170 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1169 else:
1171 else:
1170 remaining[idx][1].append((a1, a2, b1, b2))
1172 remaining[idx][1].append((a1, a2, b1, b2))
1171
1173
1172 # Then, look at anything left, which might involve repeating the last
1174 # Then, look at anything left, which might involve repeating the last
1173 # line.
1175 # line.
1174 for parent, blocks in remaining:
1176 for parent, blocks in remaining:
1175 for a1, a2, b1, b2 in blocks:
1177 for a1, a2, b1, b2 in blocks:
1176 for bk in xrange(b1, b2):
1178 for bk in xrange(b1, b2):
1177 if child[0][bk].fctx == childfctx:
1179 if child[0][bk].fctx == childfctx:
1178 ak = min(a1 + (bk - b1), a2 - 1)
1180 ak = min(a1 + (bk - b1), a2 - 1)
1179 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1181 child[0][bk] = attr.evolve(parent[0][ak], skip=True)
1180 return child
1182 return child
1181
1183
1182 class filectx(basefilectx):
1184 class filectx(basefilectx):
1183 """A filecontext object makes access to data related to a particular
1185 """A filecontext object makes access to data related to a particular
1184 filerevision convenient."""
1186 filerevision convenient."""
1185 def __init__(self, repo, path, changeid=None, fileid=None,
1187 def __init__(self, repo, path, changeid=None, fileid=None,
1186 filelog=None, changectx=None):
1188 filelog=None, changectx=None):
1187 """changeid can be a changeset revision, node, or tag.
1189 """changeid can be a changeset revision, node, or tag.
1188 fileid can be a file revision or node."""
1190 fileid can be a file revision or node."""
1189 self._repo = repo
1191 self._repo = repo
1190 self._path = path
1192 self._path = path
1191
1193
1192 assert (changeid is not None
1194 assert (changeid is not None
1193 or fileid is not None
1195 or fileid is not None
1194 or changectx is not None), \
1196 or changectx is not None), \
1195 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1197 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1196 % (changeid, fileid, changectx))
1198 % (changeid, fileid, changectx))
1197
1199
1198 if filelog is not None:
1200 if filelog is not None:
1199 self._filelog = filelog
1201 self._filelog = filelog
1200
1202
1201 if changeid is not None:
1203 if changeid is not None:
1202 self._changeid = changeid
1204 self._changeid = changeid
1203 if changectx is not None:
1205 if changectx is not None:
1204 self._changectx = changectx
1206 self._changectx = changectx
1205 if fileid is not None:
1207 if fileid is not None:
1206 self._fileid = fileid
1208 self._fileid = fileid
1207
1209
1208 @propertycache
1210 @propertycache
1209 def _changectx(self):
1211 def _changectx(self):
1210 try:
1212 try:
1211 return changectx(self._repo, self._changeid)
1213 return changectx(self._repo, self._changeid)
1212 except error.FilteredRepoLookupError:
1214 except error.FilteredRepoLookupError:
1213 # Linkrev may point to any revision in the repository. When the
1215 # Linkrev may point to any revision in the repository. When the
1214 # repository is filtered this may lead to `filectx` trying to build
1216 # repository is filtered this may lead to `filectx` trying to build
1215 # `changectx` for filtered revision. In such case we fallback to
1217 # `changectx` for filtered revision. In such case we fallback to
1216 # creating `changectx` on the unfiltered version of the reposition.
1218 # creating `changectx` on the unfiltered version of the reposition.
1217 # This fallback should not be an issue because `changectx` from
1219 # This fallback should not be an issue because `changectx` from
1218 # `filectx` are not used in complex operations that care about
1220 # `filectx` are not used in complex operations that care about
1219 # filtering.
1221 # filtering.
1220 #
1222 #
1221 # This fallback is a cheap and dirty fix that prevent several
1223 # This fallback is a cheap and dirty fix that prevent several
1222 # crashes. It does not ensure the behavior is correct. However the
1224 # crashes. It does not ensure the behavior is correct. However the
1223 # behavior was not correct before filtering either and "incorrect
1225 # behavior was not correct before filtering either and "incorrect
1224 # behavior" is seen as better as "crash"
1226 # behavior" is seen as better as "crash"
1225 #
1227 #
1226 # Linkrevs have several serious troubles with filtering that are
1228 # Linkrevs have several serious troubles with filtering that are
1227 # complicated to solve. Proper handling of the issue here should be
1229 # complicated to solve. Proper handling of the issue here should be
1228 # considered when solving linkrev issue are on the table.
1230 # considered when solving linkrev issue are on the table.
1229 return changectx(self._repo.unfiltered(), self._changeid)
1231 return changectx(self._repo.unfiltered(), self._changeid)
1230
1232
1231 def filectx(self, fileid, changeid=None):
1233 def filectx(self, fileid, changeid=None):
1232 '''opens an arbitrary revision of the file without
1234 '''opens an arbitrary revision of the file without
1233 opening a new filelog'''
1235 opening a new filelog'''
1234 return filectx(self._repo, self._path, fileid=fileid,
1236 return filectx(self._repo, self._path, fileid=fileid,
1235 filelog=self._filelog, changeid=changeid)
1237 filelog=self._filelog, changeid=changeid)
1236
1238
1237 def rawdata(self):
1239 def rawdata(self):
1238 return self._filelog.revision(self._filenode, raw=True)
1240 return self._filelog.revision(self._filenode, raw=True)
1239
1241
1240 def rawflags(self):
1242 def rawflags(self):
1241 """low-level revlog flags"""
1243 """low-level revlog flags"""
1242 return self._filelog.flags(self._filerev)
1244 return self._filelog.flags(self._filerev)
1243
1245
1244 def data(self):
1246 def data(self):
1245 try:
1247 try:
1246 return self._filelog.read(self._filenode)
1248 return self._filelog.read(self._filenode)
1247 except error.CensoredNodeError:
1249 except error.CensoredNodeError:
1248 if self._repo.ui.config("censor", "policy") == "ignore":
1250 if self._repo.ui.config("censor", "policy") == "ignore":
1249 return ""
1251 return ""
1250 raise error.Abort(_("censored node: %s") % short(self._filenode),
1252 raise error.Abort(_("censored node: %s") % short(self._filenode),
1251 hint=_("set censor.policy to ignore errors"))
1253 hint=_("set censor.policy to ignore errors"))
1252
1254
1253 def size(self):
1255 def size(self):
1254 return self._filelog.size(self._filerev)
1256 return self._filelog.size(self._filerev)
1255
1257
1256 @propertycache
1258 @propertycache
1257 def _copied(self):
1259 def _copied(self):
1258 """check if file was actually renamed in this changeset revision
1260 """check if file was actually renamed in this changeset revision
1259
1261
1260 If rename logged in file revision, we report copy for changeset only
1262 If rename logged in file revision, we report copy for changeset only
1261 if file revisions linkrev points back to the changeset in question
1263 if file revisions linkrev points back to the changeset in question
1262 or both changeset parents contain different file revisions.
1264 or both changeset parents contain different file revisions.
1263 """
1265 """
1264
1266
1265 renamed = self._filelog.renamed(self._filenode)
1267 renamed = self._filelog.renamed(self._filenode)
1266 if not renamed:
1268 if not renamed:
1267 return renamed
1269 return renamed
1268
1270
1269 if self.rev() == self.linkrev():
1271 if self.rev() == self.linkrev():
1270 return renamed
1272 return renamed
1271
1273
1272 name = self.path()
1274 name = self.path()
1273 fnode = self._filenode
1275 fnode = self._filenode
1274 for p in self._changectx.parents():
1276 for p in self._changectx.parents():
1275 try:
1277 try:
1276 if fnode == p.filenode(name):
1278 if fnode == p.filenode(name):
1277 return None
1279 return None
1278 except error.LookupError:
1280 except error.LookupError:
1279 pass
1281 pass
1280 return renamed
1282 return renamed
1281
1283
1282 def children(self):
1284 def children(self):
1283 # hard for renames
1285 # hard for renames
1284 c = self._filelog.children(self._filenode)
1286 c = self._filelog.children(self._filenode)
1285 return [filectx(self._repo, self._path, fileid=x,
1287 return [filectx(self._repo, self._path, fileid=x,
1286 filelog=self._filelog) for x in c]
1288 filelog=self._filelog) for x in c]
1287
1289
1288 class committablectx(basectx):
1290 class committablectx(basectx):
1289 """A committablectx object provides common functionality for a context that
1291 """A committablectx object provides common functionality for a context that
1290 wants the ability to commit, e.g. workingctx or memctx."""
1292 wants the ability to commit, e.g. workingctx or memctx."""
1291 def __init__(self, repo, text="", user=None, date=None, extra=None,
1293 def __init__(self, repo, text="", user=None, date=None, extra=None,
1292 changes=None):
1294 changes=None):
1293 self._repo = repo
1295 self._repo = repo
1294 self._rev = None
1296 self._rev = None
1295 self._node = None
1297 self._node = None
1296 self._text = text
1298 self._text = text
1297 if date:
1299 if date:
1298 self._date = util.parsedate(date)
1300 self._date = util.parsedate(date)
1299 if user:
1301 if user:
1300 self._user = user
1302 self._user = user
1301 if changes:
1303 if changes:
1302 self._status = changes
1304 self._status = changes
1303
1305
1304 self._extra = {}
1306 self._extra = {}
1305 if extra:
1307 if extra:
1306 self._extra = extra.copy()
1308 self._extra = extra.copy()
1307 if 'branch' not in self._extra:
1309 if 'branch' not in self._extra:
1308 try:
1310 try:
1309 branch = encoding.fromlocal(self._repo.dirstate.branch())
1311 branch = encoding.fromlocal(self._repo.dirstate.branch())
1310 except UnicodeDecodeError:
1312 except UnicodeDecodeError:
1311 raise error.Abort(_('branch name not in UTF-8!'))
1313 raise error.Abort(_('branch name not in UTF-8!'))
1312 self._extra['branch'] = branch
1314 self._extra['branch'] = branch
1313 if self._extra['branch'] == '':
1315 if self._extra['branch'] == '':
1314 self._extra['branch'] = 'default'
1316 self._extra['branch'] = 'default'
1315
1317
1316 def __bytes__(self):
1318 def __bytes__(self):
1317 return bytes(self._parents[0]) + "+"
1319 return bytes(self._parents[0]) + "+"
1318
1320
1319 __str__ = encoding.strmethod(__bytes__)
1321 __str__ = encoding.strmethod(__bytes__)
1320
1322
1321 def __nonzero__(self):
1323 def __nonzero__(self):
1322 return True
1324 return True
1323
1325
1324 __bool__ = __nonzero__
1326 __bool__ = __nonzero__
1325
1327
1326 def _buildflagfunc(self):
1328 def _buildflagfunc(self):
1327 # Create a fallback function for getting file flags when the
1329 # Create a fallback function for getting file flags when the
1328 # filesystem doesn't support them
1330 # filesystem doesn't support them
1329
1331
1330 copiesget = self._repo.dirstate.copies().get
1332 copiesget = self._repo.dirstate.copies().get
1331 parents = self.parents()
1333 parents = self.parents()
1332 if len(parents) < 2:
1334 if len(parents) < 2:
1333 # when we have one parent, it's easy: copy from parent
1335 # when we have one parent, it's easy: copy from parent
1334 man = parents[0].manifest()
1336 man = parents[0].manifest()
1335 def func(f):
1337 def func(f):
1336 f = copiesget(f, f)
1338 f = copiesget(f, f)
1337 return man.flags(f)
1339 return man.flags(f)
1338 else:
1340 else:
1339 # merges are tricky: we try to reconstruct the unstored
1341 # merges are tricky: we try to reconstruct the unstored
1340 # result from the merge (issue1802)
1342 # result from the merge (issue1802)
1341 p1, p2 = parents
1343 p1, p2 = parents
1342 pa = p1.ancestor(p2)
1344 pa = p1.ancestor(p2)
1343 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1345 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1344
1346
1345 def func(f):
1347 def func(f):
1346 f = copiesget(f, f) # may be wrong for merges with copies
1348 f = copiesget(f, f) # may be wrong for merges with copies
1347 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1349 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1348 if fl1 == fl2:
1350 if fl1 == fl2:
1349 return fl1
1351 return fl1
1350 if fl1 == fla:
1352 if fl1 == fla:
1351 return fl2
1353 return fl2
1352 if fl2 == fla:
1354 if fl2 == fla:
1353 return fl1
1355 return fl1
1354 return '' # punt for conflicts
1356 return '' # punt for conflicts
1355
1357
1356 return func
1358 return func
1357
1359
1358 @propertycache
1360 @propertycache
1359 def _flagfunc(self):
1361 def _flagfunc(self):
1360 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1362 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1361
1363
1362 @propertycache
1364 @propertycache
1363 def _status(self):
1365 def _status(self):
1364 return self._repo.status()
1366 return self._repo.status()
1365
1367
1366 @propertycache
1368 @propertycache
1367 def _user(self):
1369 def _user(self):
1368 return self._repo.ui.username()
1370 return self._repo.ui.username()
1369
1371
1370 @propertycache
1372 @propertycache
1371 def _date(self):
1373 def _date(self):
1372 ui = self._repo.ui
1374 ui = self._repo.ui
1373 date = ui.configdate('devel', 'default-date')
1375 date = ui.configdate('devel', 'default-date')
1374 if date is None:
1376 if date is None:
1375 date = util.makedate()
1377 date = util.makedate()
1376 return date
1378 return date
1377
1379
1378 def subrev(self, subpath):
1380 def subrev(self, subpath):
1379 return None
1381 return None
1380
1382
1381 def manifestnode(self):
1383 def manifestnode(self):
1382 return None
1384 return None
1383 def user(self):
1385 def user(self):
1384 return self._user or self._repo.ui.username()
1386 return self._user or self._repo.ui.username()
1385 def date(self):
1387 def date(self):
1386 return self._date
1388 return self._date
1387 def description(self):
1389 def description(self):
1388 return self._text
1390 return self._text
1389 def files(self):
1391 def files(self):
1390 return sorted(self._status.modified + self._status.added +
1392 return sorted(self._status.modified + self._status.added +
1391 self._status.removed)
1393 self._status.removed)
1392
1394
1393 def modified(self):
1395 def modified(self):
1394 return self._status.modified
1396 return self._status.modified
1395 def added(self):
1397 def added(self):
1396 return self._status.added
1398 return self._status.added
1397 def removed(self):
1399 def removed(self):
1398 return self._status.removed
1400 return self._status.removed
1399 def deleted(self):
1401 def deleted(self):
1400 return self._status.deleted
1402 return self._status.deleted
1401 def branch(self):
1403 def branch(self):
1402 return encoding.tolocal(self._extra['branch'])
1404 return encoding.tolocal(self._extra['branch'])
1403 def closesbranch(self):
1405 def closesbranch(self):
1404 return 'close' in self._extra
1406 return 'close' in self._extra
1405 def extra(self):
1407 def extra(self):
1406 return self._extra
1408 return self._extra
1407
1409
1408 def isinmemory(self):
1410 def isinmemory(self):
1409 return False
1411 return False
1410
1412
1411 def tags(self):
1413 def tags(self):
1412 return []
1414 return []
1413
1415
1414 def bookmarks(self):
1416 def bookmarks(self):
1415 b = []
1417 b = []
1416 for p in self.parents():
1418 for p in self.parents():
1417 b.extend(p.bookmarks())
1419 b.extend(p.bookmarks())
1418 return b
1420 return b
1419
1421
1420 def phase(self):
1422 def phase(self):
1421 phase = phases.draft # default phase to draft
1423 phase = phases.draft # default phase to draft
1422 for p in self.parents():
1424 for p in self.parents():
1423 phase = max(phase, p.phase())
1425 phase = max(phase, p.phase())
1424 return phase
1426 return phase
1425
1427
1426 def hidden(self):
1428 def hidden(self):
1427 return False
1429 return False
1428
1430
1429 def children(self):
1431 def children(self):
1430 return []
1432 return []
1431
1433
1432 def flags(self, path):
1434 def flags(self, path):
1433 if r'_manifest' in self.__dict__:
1435 if r'_manifest' in self.__dict__:
1434 try:
1436 try:
1435 return self._manifest.flags(path)
1437 return self._manifest.flags(path)
1436 except KeyError:
1438 except KeyError:
1437 return ''
1439 return ''
1438
1440
1439 try:
1441 try:
1440 return self._flagfunc(path)
1442 return self._flagfunc(path)
1441 except OSError:
1443 except OSError:
1442 return ''
1444 return ''
1443
1445
1444 def ancestor(self, c2):
1446 def ancestor(self, c2):
1445 """return the "best" ancestor context of self and c2"""
1447 """return the "best" ancestor context of self and c2"""
1446 return self._parents[0].ancestor(c2) # punt on two parents for now
1448 return self._parents[0].ancestor(c2) # punt on two parents for now
1447
1449
1448 def walk(self, match):
1450 def walk(self, match):
1449 '''Generates matching file names.'''
1451 '''Generates matching file names.'''
1450 return sorted(self._repo.dirstate.walk(match,
1452 return sorted(self._repo.dirstate.walk(match,
1451 subrepos=sorted(self.substate),
1453 subrepos=sorted(self.substate),
1452 unknown=True, ignored=False))
1454 unknown=True, ignored=False))
1453
1455
1454 def matches(self, match):
1456 def matches(self, match):
1455 return sorted(self._repo.dirstate.matches(match))
1457 return sorted(self._repo.dirstate.matches(match))
1456
1458
1457 def ancestors(self):
1459 def ancestors(self):
1458 for p in self._parents:
1460 for p in self._parents:
1459 yield p
1461 yield p
1460 for a in self._repo.changelog.ancestors(
1462 for a in self._repo.changelog.ancestors(
1461 [p.rev() for p in self._parents]):
1463 [p.rev() for p in self._parents]):
1462 yield changectx(self._repo, a)
1464 yield changectx(self._repo, a)
1463
1465
1464 def markcommitted(self, node):
1466 def markcommitted(self, node):
1465 """Perform post-commit cleanup necessary after committing this ctx
1467 """Perform post-commit cleanup necessary after committing this ctx
1466
1468
1467 Specifically, this updates backing stores this working context
1469 Specifically, this updates backing stores this working context
1468 wraps to reflect the fact that the changes reflected by this
1470 wraps to reflect the fact that the changes reflected by this
1469 workingctx have been committed. For example, it marks
1471 workingctx have been committed. For example, it marks
1470 modified and added files as normal in the dirstate.
1472 modified and added files as normal in the dirstate.
1471
1473
1472 """
1474 """
1473
1475
1474 with self._repo.dirstate.parentchange():
1476 with self._repo.dirstate.parentchange():
1475 for f in self.modified() + self.added():
1477 for f in self.modified() + self.added():
1476 self._repo.dirstate.normal(f)
1478 self._repo.dirstate.normal(f)
1477 for f in self.removed():
1479 for f in self.removed():
1478 self._repo.dirstate.drop(f)
1480 self._repo.dirstate.drop(f)
1479 self._repo.dirstate.setparents(node)
1481 self._repo.dirstate.setparents(node)
1480
1482
1481 # write changes out explicitly, because nesting wlock at
1483 # write changes out explicitly, because nesting wlock at
1482 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1484 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1483 # from immediately doing so for subsequent changing files
1485 # from immediately doing so for subsequent changing files
1484 self._repo.dirstate.write(self._repo.currenttransaction())
1486 self._repo.dirstate.write(self._repo.currenttransaction())
1485
1487
1486 def dirty(self, missing=False, merge=True, branch=True):
1488 def dirty(self, missing=False, merge=True, branch=True):
1487 return False
1489 return False
1488
1490
1489 class workingctx(committablectx):
1491 class workingctx(committablectx):
1490 """A workingctx object makes access to data related to
1492 """A workingctx object makes access to data related to
1491 the current working directory convenient.
1493 the current working directory convenient.
1492 date - any valid date string or (unixtime, offset), or None.
1494 date - any valid date string or (unixtime, offset), or None.
1493 user - username string, or None.
1495 user - username string, or None.
1494 extra - a dictionary of extra values, or None.
1496 extra - a dictionary of extra values, or None.
1495 changes - a list of file lists as returned by localrepo.status()
1497 changes - a list of file lists as returned by localrepo.status()
1496 or None to use the repository status.
1498 or None to use the repository status.
1497 """
1499 """
1498 def __init__(self, repo, text="", user=None, date=None, extra=None,
1500 def __init__(self, repo, text="", user=None, date=None, extra=None,
1499 changes=None):
1501 changes=None):
1500 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1502 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1501
1503
1502 def __iter__(self):
1504 def __iter__(self):
1503 d = self._repo.dirstate
1505 d = self._repo.dirstate
1504 for f in d:
1506 for f in d:
1505 if d[f] != 'r':
1507 if d[f] != 'r':
1506 yield f
1508 yield f
1507
1509
1508 def __contains__(self, key):
1510 def __contains__(self, key):
1509 return self._repo.dirstate[key] not in "?r"
1511 return self._repo.dirstate[key] not in "?r"
1510
1512
1511 def hex(self):
1513 def hex(self):
1512 return hex(wdirid)
1514 return hex(wdirid)
1513
1515
1514 @propertycache
1516 @propertycache
1515 def _parents(self):
1517 def _parents(self):
1516 p = self._repo.dirstate.parents()
1518 p = self._repo.dirstate.parents()
1517 if p[1] == nullid:
1519 if p[1] == nullid:
1518 p = p[:-1]
1520 p = p[:-1]
1519 return [changectx(self._repo, x) for x in p]
1521 return [changectx(self._repo, x) for x in p]
1520
1522
1521 def filectx(self, path, filelog=None):
1523 def filectx(self, path, filelog=None):
1522 """get a file context from the working directory"""
1524 """get a file context from the working directory"""
1523 return workingfilectx(self._repo, path, workingctx=self,
1525 return workingfilectx(self._repo, path, workingctx=self,
1524 filelog=filelog)
1526 filelog=filelog)
1525
1527
1526 def dirty(self, missing=False, merge=True, branch=True):
1528 def dirty(self, missing=False, merge=True, branch=True):
1527 "check whether a working directory is modified"
1529 "check whether a working directory is modified"
1528 # check subrepos first
1530 # check subrepos first
1529 for s in sorted(self.substate):
1531 for s in sorted(self.substate):
1530 if self.sub(s).dirty(missing=missing):
1532 if self.sub(s).dirty(missing=missing):
1531 return True
1533 return True
1532 # check current working dir
1534 # check current working dir
1533 return ((merge and self.p2()) or
1535 return ((merge and self.p2()) or
1534 (branch and self.branch() != self.p1().branch()) or
1536 (branch and self.branch() != self.p1().branch()) or
1535 self.modified() or self.added() or self.removed() or
1537 self.modified() or self.added() or self.removed() or
1536 (missing and self.deleted()))
1538 (missing and self.deleted()))
1537
1539
1538 def add(self, list, prefix=""):
1540 def add(self, list, prefix=""):
1539 with self._repo.wlock():
1541 with self._repo.wlock():
1540 ui, ds = self._repo.ui, self._repo.dirstate
1542 ui, ds = self._repo.ui, self._repo.dirstate
1541 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1543 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1542 rejected = []
1544 rejected = []
1543 lstat = self._repo.wvfs.lstat
1545 lstat = self._repo.wvfs.lstat
1544 for f in list:
1546 for f in list:
1545 # ds.pathto() returns an absolute file when this is invoked from
1547 # ds.pathto() returns an absolute file when this is invoked from
1546 # the keyword extension. That gets flagged as non-portable on
1548 # the keyword extension. That gets flagged as non-portable on
1547 # Windows, since it contains the drive letter and colon.
1549 # Windows, since it contains the drive letter and colon.
1548 scmutil.checkportable(ui, os.path.join(prefix, f))
1550 scmutil.checkportable(ui, os.path.join(prefix, f))
1549 try:
1551 try:
1550 st = lstat(f)
1552 st = lstat(f)
1551 except OSError:
1553 except OSError:
1552 ui.warn(_("%s does not exist!\n") % uipath(f))
1554 ui.warn(_("%s does not exist!\n") % uipath(f))
1553 rejected.append(f)
1555 rejected.append(f)
1554 continue
1556 continue
1555 if st.st_size > 10000000:
1557 if st.st_size > 10000000:
1556 ui.warn(_("%s: up to %d MB of RAM may be required "
1558 ui.warn(_("%s: up to %d MB of RAM may be required "
1557 "to manage this file\n"
1559 "to manage this file\n"
1558 "(use 'hg revert %s' to cancel the "
1560 "(use 'hg revert %s' to cancel the "
1559 "pending addition)\n")
1561 "pending addition)\n")
1560 % (f, 3 * st.st_size // 1000000, uipath(f)))
1562 % (f, 3 * st.st_size // 1000000, uipath(f)))
1561 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1563 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1562 ui.warn(_("%s not added: only files and symlinks "
1564 ui.warn(_("%s not added: only files and symlinks "
1563 "supported currently\n") % uipath(f))
1565 "supported currently\n") % uipath(f))
1564 rejected.append(f)
1566 rejected.append(f)
1565 elif ds[f] in 'amn':
1567 elif ds[f] in 'amn':
1566 ui.warn(_("%s already tracked!\n") % uipath(f))
1568 ui.warn(_("%s already tracked!\n") % uipath(f))
1567 elif ds[f] == 'r':
1569 elif ds[f] == 'r':
1568 ds.normallookup(f)
1570 ds.normallookup(f)
1569 else:
1571 else:
1570 ds.add(f)
1572 ds.add(f)
1571 return rejected
1573 return rejected
1572
1574
1573 def forget(self, files, prefix=""):
1575 def forget(self, files, prefix=""):
1574 with self._repo.wlock():
1576 with self._repo.wlock():
1575 ds = self._repo.dirstate
1577 ds = self._repo.dirstate
1576 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1578 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1577 rejected = []
1579 rejected = []
1578 for f in files:
1580 for f in files:
1579 if f not in self._repo.dirstate:
1581 if f not in self._repo.dirstate:
1580 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1582 self._repo.ui.warn(_("%s not tracked!\n") % uipath(f))
1581 rejected.append(f)
1583 rejected.append(f)
1582 elif self._repo.dirstate[f] != 'a':
1584 elif self._repo.dirstate[f] != 'a':
1583 self._repo.dirstate.remove(f)
1585 self._repo.dirstate.remove(f)
1584 else:
1586 else:
1585 self._repo.dirstate.drop(f)
1587 self._repo.dirstate.drop(f)
1586 return rejected
1588 return rejected
1587
1589
1588 def undelete(self, list):
1590 def undelete(self, list):
1589 pctxs = self.parents()
1591 pctxs = self.parents()
1590 with self._repo.wlock():
1592 with self._repo.wlock():
1591 ds = self._repo.dirstate
1593 ds = self._repo.dirstate
1592 for f in list:
1594 for f in list:
1593 if self._repo.dirstate[f] != 'r':
1595 if self._repo.dirstate[f] != 'r':
1594 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1596 self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f))
1595 else:
1597 else:
1596 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1598 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1597 t = fctx.data()
1599 t = fctx.data()
1598 self._repo.wwrite(f, t, fctx.flags())
1600 self._repo.wwrite(f, t, fctx.flags())
1599 self._repo.dirstate.normal(f)
1601 self._repo.dirstate.normal(f)
1600
1602
1601 def copy(self, source, dest):
1603 def copy(self, source, dest):
1602 try:
1604 try:
1603 st = self._repo.wvfs.lstat(dest)
1605 st = self._repo.wvfs.lstat(dest)
1604 except OSError as err:
1606 except OSError as err:
1605 if err.errno != errno.ENOENT:
1607 if err.errno != errno.ENOENT:
1606 raise
1608 raise
1607 self._repo.ui.warn(_("%s does not exist!\n")
1609 self._repo.ui.warn(_("%s does not exist!\n")
1608 % self._repo.dirstate.pathto(dest))
1610 % self._repo.dirstate.pathto(dest))
1609 return
1611 return
1610 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1612 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1611 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1613 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1612 "symbolic link\n")
1614 "symbolic link\n")
1613 % self._repo.dirstate.pathto(dest))
1615 % self._repo.dirstate.pathto(dest))
1614 else:
1616 else:
1615 with self._repo.wlock():
1617 with self._repo.wlock():
1616 if self._repo.dirstate[dest] in '?':
1618 if self._repo.dirstate[dest] in '?':
1617 self._repo.dirstate.add(dest)
1619 self._repo.dirstate.add(dest)
1618 elif self._repo.dirstate[dest] in 'r':
1620 elif self._repo.dirstate[dest] in 'r':
1619 self._repo.dirstate.normallookup(dest)
1621 self._repo.dirstate.normallookup(dest)
1620 self._repo.dirstate.copy(source, dest)
1622 self._repo.dirstate.copy(source, dest)
1621
1623
1622 def match(self, pats=None, include=None, exclude=None, default='glob',
1624 def match(self, pats=None, include=None, exclude=None, default='glob',
1623 listsubrepos=False, badfn=None):
1625 listsubrepos=False, badfn=None):
1624 r = self._repo
1626 r = self._repo
1625
1627
1626 # Only a case insensitive filesystem needs magic to translate user input
1628 # Only a case insensitive filesystem needs magic to translate user input
1627 # to actual case in the filesystem.
1629 # to actual case in the filesystem.
1628 icasefs = not util.fscasesensitive(r.root)
1630 icasefs = not util.fscasesensitive(r.root)
1629 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1631 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1630 default, auditor=r.auditor, ctx=self,
1632 default, auditor=r.auditor, ctx=self,
1631 listsubrepos=listsubrepos, badfn=badfn,
1633 listsubrepos=listsubrepos, badfn=badfn,
1632 icasefs=icasefs)
1634 icasefs=icasefs)
1633
1635
1634 def flushall(self):
1636 def flushall(self):
1635 pass # For overlayworkingfilectx compatibility.
1637 pass # For overlayworkingfilectx compatibility.
1636
1638
1637 def _filtersuspectsymlink(self, files):
1639 def _filtersuspectsymlink(self, files):
1638 if not files or self._repo.dirstate._checklink:
1640 if not files or self._repo.dirstate._checklink:
1639 return files
1641 return files
1640
1642
1641 # Symlink placeholders may get non-symlink-like contents
1643 # Symlink placeholders may get non-symlink-like contents
1642 # via user error or dereferencing by NFS or Samba servers,
1644 # via user error or dereferencing by NFS or Samba servers,
1643 # so we filter out any placeholders that don't look like a
1645 # so we filter out any placeholders that don't look like a
1644 # symlink
1646 # symlink
1645 sane = []
1647 sane = []
1646 for f in files:
1648 for f in files:
1647 if self.flags(f) == 'l':
1649 if self.flags(f) == 'l':
1648 d = self[f].data()
1650 d = self[f].data()
1649 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1651 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1650 self._repo.ui.debug('ignoring suspect symlink placeholder'
1652 self._repo.ui.debug('ignoring suspect symlink placeholder'
1651 ' "%s"\n' % f)
1653 ' "%s"\n' % f)
1652 continue
1654 continue
1653 sane.append(f)
1655 sane.append(f)
1654 return sane
1656 return sane
1655
1657
1656 def _checklookup(self, files):
1658 def _checklookup(self, files):
1657 # check for any possibly clean files
1659 # check for any possibly clean files
1658 if not files:
1660 if not files:
1659 return [], [], []
1661 return [], [], []
1660
1662
1661 modified = []
1663 modified = []
1662 deleted = []
1664 deleted = []
1663 fixup = []
1665 fixup = []
1664 pctx = self._parents[0]
1666 pctx = self._parents[0]
1665 # do a full compare of any files that might have changed
1667 # do a full compare of any files that might have changed
1666 for f in sorted(files):
1668 for f in sorted(files):
1667 try:
1669 try:
1668 # This will return True for a file that got replaced by a
1670 # This will return True for a file that got replaced by a
1669 # directory in the interim, but fixing that is pretty hard.
1671 # directory in the interim, but fixing that is pretty hard.
1670 if (f not in pctx or self.flags(f) != pctx.flags(f)
1672 if (f not in pctx or self.flags(f) != pctx.flags(f)
1671 or pctx[f].cmp(self[f])):
1673 or pctx[f].cmp(self[f])):
1672 modified.append(f)
1674 modified.append(f)
1673 else:
1675 else:
1674 fixup.append(f)
1676 fixup.append(f)
1675 except (IOError, OSError):
1677 except (IOError, OSError):
1676 # A file become inaccessible in between? Mark it as deleted,
1678 # A file become inaccessible in between? Mark it as deleted,
1677 # matching dirstate behavior (issue5584).
1679 # matching dirstate behavior (issue5584).
1678 # The dirstate has more complex behavior around whether a
1680 # The dirstate has more complex behavior around whether a
1679 # missing file matches a directory, etc, but we don't need to
1681 # missing file matches a directory, etc, but we don't need to
1680 # bother with that: if f has made it to this point, we're sure
1682 # bother with that: if f has made it to this point, we're sure
1681 # it's in the dirstate.
1683 # it's in the dirstate.
1682 deleted.append(f)
1684 deleted.append(f)
1683
1685
1684 return modified, deleted, fixup
1686 return modified, deleted, fixup
1685
1687
1686 def _poststatusfixup(self, status, fixup):
1688 def _poststatusfixup(self, status, fixup):
1687 """update dirstate for files that are actually clean"""
1689 """update dirstate for files that are actually clean"""
1688 poststatus = self._repo.postdsstatus()
1690 poststatus = self._repo.postdsstatus()
1689 if fixup or poststatus:
1691 if fixup or poststatus:
1690 try:
1692 try:
1691 oldid = self._repo.dirstate.identity()
1693 oldid = self._repo.dirstate.identity()
1692
1694
1693 # updating the dirstate is optional
1695 # updating the dirstate is optional
1694 # so we don't wait on the lock
1696 # so we don't wait on the lock
1695 # wlock can invalidate the dirstate, so cache normal _after_
1697 # wlock can invalidate the dirstate, so cache normal _after_
1696 # taking the lock
1698 # taking the lock
1697 with self._repo.wlock(False):
1699 with self._repo.wlock(False):
1698 if self._repo.dirstate.identity() == oldid:
1700 if self._repo.dirstate.identity() == oldid:
1699 if fixup:
1701 if fixup:
1700 normal = self._repo.dirstate.normal
1702 normal = self._repo.dirstate.normal
1701 for f in fixup:
1703 for f in fixup:
1702 normal(f)
1704 normal(f)
1703 # write changes out explicitly, because nesting
1705 # write changes out explicitly, because nesting
1704 # wlock at runtime may prevent 'wlock.release()'
1706 # wlock at runtime may prevent 'wlock.release()'
1705 # after this block from doing so for subsequent
1707 # after this block from doing so for subsequent
1706 # changing files
1708 # changing files
1707 tr = self._repo.currenttransaction()
1709 tr = self._repo.currenttransaction()
1708 self._repo.dirstate.write(tr)
1710 self._repo.dirstate.write(tr)
1709
1711
1710 if poststatus:
1712 if poststatus:
1711 for ps in poststatus:
1713 for ps in poststatus:
1712 ps(self, status)
1714 ps(self, status)
1713 else:
1715 else:
1714 # in this case, writing changes out breaks
1716 # in this case, writing changes out breaks
1715 # consistency, because .hg/dirstate was
1717 # consistency, because .hg/dirstate was
1716 # already changed simultaneously after last
1718 # already changed simultaneously after last
1717 # caching (see also issue5584 for detail)
1719 # caching (see also issue5584 for detail)
1718 self._repo.ui.debug('skip updating dirstate: '
1720 self._repo.ui.debug('skip updating dirstate: '
1719 'identity mismatch\n')
1721 'identity mismatch\n')
1720 except error.LockError:
1722 except error.LockError:
1721 pass
1723 pass
1722 finally:
1724 finally:
1723 # Even if the wlock couldn't be grabbed, clear out the list.
1725 # Even if the wlock couldn't be grabbed, clear out the list.
1724 self._repo.clearpostdsstatus()
1726 self._repo.clearpostdsstatus()
1725
1727
1726 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1728 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1727 '''Gets the status from the dirstate -- internal use only.'''
1729 '''Gets the status from the dirstate -- internal use only.'''
1728 subrepos = []
1730 subrepos = []
1729 if '.hgsub' in self:
1731 if '.hgsub' in self:
1730 subrepos = sorted(self.substate)
1732 subrepos = sorted(self.substate)
1731 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1733 cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored,
1732 clean=clean, unknown=unknown)
1734 clean=clean, unknown=unknown)
1733
1735
1734 # check for any possibly clean files
1736 # check for any possibly clean files
1735 fixup = []
1737 fixup = []
1736 if cmp:
1738 if cmp:
1737 modified2, deleted2, fixup = self._checklookup(cmp)
1739 modified2, deleted2, fixup = self._checklookup(cmp)
1738 s.modified.extend(modified2)
1740 s.modified.extend(modified2)
1739 s.deleted.extend(deleted2)
1741 s.deleted.extend(deleted2)
1740
1742
1741 if fixup and clean:
1743 if fixup and clean:
1742 s.clean.extend(fixup)
1744 s.clean.extend(fixup)
1743
1745
1744 self._poststatusfixup(s, fixup)
1746 self._poststatusfixup(s, fixup)
1745
1747
1746 if match.always():
1748 if match.always():
1747 # cache for performance
1749 # cache for performance
1748 if s.unknown or s.ignored or s.clean:
1750 if s.unknown or s.ignored or s.clean:
1749 # "_status" is cached with list*=False in the normal route
1751 # "_status" is cached with list*=False in the normal route
1750 self._status = scmutil.status(s.modified, s.added, s.removed,
1752 self._status = scmutil.status(s.modified, s.added, s.removed,
1751 s.deleted, [], [], [])
1753 s.deleted, [], [], [])
1752 else:
1754 else:
1753 self._status = s
1755 self._status = s
1754
1756
1755 return s
1757 return s
1756
1758
1757 @propertycache
1759 @propertycache
1758 def _manifest(self):
1760 def _manifest(self):
1759 """generate a manifest corresponding to the values in self._status
1761 """generate a manifest corresponding to the values in self._status
1760
1762
1761 This reuse the file nodeid from parent, but we use special node
1763 This reuse the file nodeid from parent, but we use special node
1762 identifiers for added and modified files. This is used by manifests
1764 identifiers for added and modified files. This is used by manifests
1763 merge to see that files are different and by update logic to avoid
1765 merge to see that files are different and by update logic to avoid
1764 deleting newly added files.
1766 deleting newly added files.
1765 """
1767 """
1766 return self._buildstatusmanifest(self._status)
1768 return self._buildstatusmanifest(self._status)
1767
1769
1768 def _buildstatusmanifest(self, status):
1770 def _buildstatusmanifest(self, status):
1769 """Builds a manifest that includes the given status results."""
1771 """Builds a manifest that includes the given status results."""
1770 parents = self.parents()
1772 parents = self.parents()
1771
1773
1772 man = parents[0].manifest().copy()
1774 man = parents[0].manifest().copy()
1773
1775
1774 ff = self._flagfunc
1776 ff = self._flagfunc
1775 for i, l in ((addednodeid, status.added),
1777 for i, l in ((addednodeid, status.added),
1776 (modifiednodeid, status.modified)):
1778 (modifiednodeid, status.modified)):
1777 for f in l:
1779 for f in l:
1778 man[f] = i
1780 man[f] = i
1779 try:
1781 try:
1780 man.setflag(f, ff(f))
1782 man.setflag(f, ff(f))
1781 except OSError:
1783 except OSError:
1782 pass
1784 pass
1783
1785
1784 for f in status.deleted + status.removed:
1786 for f in status.deleted + status.removed:
1785 if f in man:
1787 if f in man:
1786 del man[f]
1788 del man[f]
1787
1789
1788 return man
1790 return man
1789
1791
1790 def _buildstatus(self, other, s, match, listignored, listclean,
1792 def _buildstatus(self, other, s, match, listignored, listclean,
1791 listunknown):
1793 listunknown):
1792 """build a status with respect to another context
1794 """build a status with respect to another context
1793
1795
1794 This includes logic for maintaining the fast path of status when
1796 This includes logic for maintaining the fast path of status when
1795 comparing the working directory against its parent, which is to skip
1797 comparing the working directory against its parent, which is to skip
1796 building a new manifest if self (working directory) is not comparing
1798 building a new manifest if self (working directory) is not comparing
1797 against its parent (repo['.']).
1799 against its parent (repo['.']).
1798 """
1800 """
1799 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1801 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1800 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1802 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1801 # might have accidentally ended up with the entire contents of the file
1803 # might have accidentally ended up with the entire contents of the file
1802 # they are supposed to be linking to.
1804 # they are supposed to be linking to.
1803 s.modified[:] = self._filtersuspectsymlink(s.modified)
1805 s.modified[:] = self._filtersuspectsymlink(s.modified)
1804 if other != self._repo['.']:
1806 if other != self._repo['.']:
1805 s = super(workingctx, self)._buildstatus(other, s, match,
1807 s = super(workingctx, self)._buildstatus(other, s, match,
1806 listignored, listclean,
1808 listignored, listclean,
1807 listunknown)
1809 listunknown)
1808 return s
1810 return s
1809
1811
1810 def _matchstatus(self, other, match):
1812 def _matchstatus(self, other, match):
1811 """override the match method with a filter for directory patterns
1813 """override the match method with a filter for directory patterns
1812
1814
1813 We use inheritance to customize the match.bad method only in cases of
1815 We use inheritance to customize the match.bad method only in cases of
1814 workingctx since it belongs only to the working directory when
1816 workingctx since it belongs only to the working directory when
1815 comparing against the parent changeset.
1817 comparing against the parent changeset.
1816
1818
1817 If we aren't comparing against the working directory's parent, then we
1819 If we aren't comparing against the working directory's parent, then we
1818 just use the default match object sent to us.
1820 just use the default match object sent to us.
1819 """
1821 """
1820 if other != self._repo['.']:
1822 if other != self._repo['.']:
1821 def bad(f, msg):
1823 def bad(f, msg):
1822 # 'f' may be a directory pattern from 'match.files()',
1824 # 'f' may be a directory pattern from 'match.files()',
1823 # so 'f not in ctx1' is not enough
1825 # so 'f not in ctx1' is not enough
1824 if f not in other and not other.hasdir(f):
1826 if f not in other and not other.hasdir(f):
1825 self._repo.ui.warn('%s: %s\n' %
1827 self._repo.ui.warn('%s: %s\n' %
1826 (self._repo.dirstate.pathto(f), msg))
1828 (self._repo.dirstate.pathto(f), msg))
1827 match.bad = bad
1829 match.bad = bad
1828 return match
1830 return match
1829
1831
1830 def markcommitted(self, node):
1832 def markcommitted(self, node):
1831 super(workingctx, self).markcommitted(node)
1833 super(workingctx, self).markcommitted(node)
1832
1834
1833 sparse.aftercommit(self._repo, node)
1835 sparse.aftercommit(self._repo, node)
1834
1836
1835 class committablefilectx(basefilectx):
1837 class committablefilectx(basefilectx):
1836 """A committablefilectx provides common functionality for a file context
1838 """A committablefilectx provides common functionality for a file context
1837 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1839 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1838 def __init__(self, repo, path, filelog=None, ctx=None):
1840 def __init__(self, repo, path, filelog=None, ctx=None):
1839 self._repo = repo
1841 self._repo = repo
1840 self._path = path
1842 self._path = path
1841 self._changeid = None
1843 self._changeid = None
1842 self._filerev = self._filenode = None
1844 self._filerev = self._filenode = None
1843
1845
1844 if filelog is not None:
1846 if filelog is not None:
1845 self._filelog = filelog
1847 self._filelog = filelog
1846 if ctx:
1848 if ctx:
1847 self._changectx = ctx
1849 self._changectx = ctx
1848
1850
1849 def __nonzero__(self):
1851 def __nonzero__(self):
1850 return True
1852 return True
1851
1853
1852 __bool__ = __nonzero__
1854 __bool__ = __nonzero__
1853
1855
1854 def linkrev(self):
1856 def linkrev(self):
1855 # linked to self._changectx no matter if file is modified or not
1857 # linked to self._changectx no matter if file is modified or not
1856 return self.rev()
1858 return self.rev()
1857
1859
1858 def parents(self):
1860 def parents(self):
1859 '''return parent filectxs, following copies if necessary'''
1861 '''return parent filectxs, following copies if necessary'''
1860 def filenode(ctx, path):
1862 def filenode(ctx, path):
1861 return ctx._manifest.get(path, nullid)
1863 return ctx._manifest.get(path, nullid)
1862
1864
1863 path = self._path
1865 path = self._path
1864 fl = self._filelog
1866 fl = self._filelog
1865 pcl = self._changectx._parents
1867 pcl = self._changectx._parents
1866 renamed = self.renamed()
1868 renamed = self.renamed()
1867
1869
1868 if renamed:
1870 if renamed:
1869 pl = [renamed + (None,)]
1871 pl = [renamed + (None,)]
1870 else:
1872 else:
1871 pl = [(path, filenode(pcl[0], path), fl)]
1873 pl = [(path, filenode(pcl[0], path), fl)]
1872
1874
1873 for pc in pcl[1:]:
1875 for pc in pcl[1:]:
1874 pl.append((path, filenode(pc, path), fl))
1876 pl.append((path, filenode(pc, path), fl))
1875
1877
1876 return [self._parentfilectx(p, fileid=n, filelog=l)
1878 return [self._parentfilectx(p, fileid=n, filelog=l)
1877 for p, n, l in pl if n != nullid]
1879 for p, n, l in pl if n != nullid]
1878
1880
1879 def children(self):
1881 def children(self):
1880 return []
1882 return []
1881
1883
1882 class workingfilectx(committablefilectx):
1884 class workingfilectx(committablefilectx):
1883 """A workingfilectx object makes access to data related to a particular
1885 """A workingfilectx object makes access to data related to a particular
1884 file in the working directory convenient."""
1886 file in the working directory convenient."""
1885 def __init__(self, repo, path, filelog=None, workingctx=None):
1887 def __init__(self, repo, path, filelog=None, workingctx=None):
1886 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1888 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1887
1889
1888 @propertycache
1890 @propertycache
1889 def _changectx(self):
1891 def _changectx(self):
1890 return workingctx(self._repo)
1892 return workingctx(self._repo)
1891
1893
1892 def data(self):
1894 def data(self):
1893 return self._repo.wread(self._path)
1895 return self._repo.wread(self._path)
1894 def renamed(self):
1896 def renamed(self):
1895 rp = self._repo.dirstate.copied(self._path)
1897 rp = self._repo.dirstate.copied(self._path)
1896 if not rp:
1898 if not rp:
1897 return None
1899 return None
1898 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1900 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1899
1901
1900 def size(self):
1902 def size(self):
1901 return self._repo.wvfs.lstat(self._path).st_size
1903 return self._repo.wvfs.lstat(self._path).st_size
1902 def date(self):
1904 def date(self):
1903 t, tz = self._changectx.date()
1905 t, tz = self._changectx.date()
1904 try:
1906 try:
1905 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1907 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1906 except OSError as err:
1908 except OSError as err:
1907 if err.errno != errno.ENOENT:
1909 if err.errno != errno.ENOENT:
1908 raise
1910 raise
1909 return (t, tz)
1911 return (t, tz)
1910
1912
1911 def exists(self):
1913 def exists(self):
1912 return self._repo.wvfs.exists(self._path)
1914 return self._repo.wvfs.exists(self._path)
1913
1915
1914 def lexists(self):
1916 def lexists(self):
1915 return self._repo.wvfs.lexists(self._path)
1917 return self._repo.wvfs.lexists(self._path)
1916
1918
1917 def audit(self):
1919 def audit(self):
1918 return self._repo.wvfs.audit(self._path)
1920 return self._repo.wvfs.audit(self._path)
1919
1921
1920 def cmp(self, fctx):
1922 def cmp(self, fctx):
1921 """compare with other file context
1923 """compare with other file context
1922
1924
1923 returns True if different than fctx.
1925 returns True if different than fctx.
1924 """
1926 """
1925 # fctx should be a filectx (not a workingfilectx)
1927 # fctx should be a filectx (not a workingfilectx)
1926 # invert comparison to reuse the same code path
1928 # invert comparison to reuse the same code path
1927 return fctx.cmp(self)
1929 return fctx.cmp(self)
1928
1930
1929 def remove(self, ignoremissing=False):
1931 def remove(self, ignoremissing=False):
1930 """wraps unlink for a repo's working directory"""
1932 """wraps unlink for a repo's working directory"""
1931 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1933 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1932
1934
1933 def write(self, data, flags, backgroundclose=False):
1935 def write(self, data, flags, backgroundclose=False):
1934 """wraps repo.wwrite"""
1936 """wraps repo.wwrite"""
1935 self._repo.wwrite(self._path, data, flags,
1937 self._repo.wwrite(self._path, data, flags,
1936 backgroundclose=backgroundclose)
1938 backgroundclose=backgroundclose)
1937
1939
1938 def markcopied(self, src):
1940 def markcopied(self, src):
1939 """marks this file a copy of `src`"""
1941 """marks this file a copy of `src`"""
1940 if self._repo.dirstate[self._path] in "nma":
1942 if self._repo.dirstate[self._path] in "nma":
1941 self._repo.dirstate.copy(src, self._path)
1943 self._repo.dirstate.copy(src, self._path)
1942
1944
1943 def clearunknown(self):
1945 def clearunknown(self):
1944 """Removes conflicting items in the working directory so that
1946 """Removes conflicting items in the working directory so that
1945 ``write()`` can be called successfully.
1947 ``write()`` can be called successfully.
1946 """
1948 """
1947 wvfs = self._repo.wvfs
1949 wvfs = self._repo.wvfs
1948 f = self._path
1950 f = self._path
1949 wvfs.audit(f)
1951 wvfs.audit(f)
1950 if wvfs.isdir(f) and not wvfs.islink(f):
1952 if wvfs.isdir(f) and not wvfs.islink(f):
1951 wvfs.rmtree(f, forcibly=True)
1953 wvfs.rmtree(f, forcibly=True)
1952 for p in reversed(list(util.finddirs(f))):
1954 for p in reversed(list(util.finddirs(f))):
1953 if wvfs.isfileorlink(p):
1955 if wvfs.isfileorlink(p):
1954 wvfs.unlink(p)
1956 wvfs.unlink(p)
1955 break
1957 break
1956
1958
1957 def setflags(self, l, x):
1959 def setflags(self, l, x):
1958 self._repo.wvfs.setflags(self._path, l, x)
1960 self._repo.wvfs.setflags(self._path, l, x)
1959
1961
1960 class overlayworkingctx(workingctx):
1962 class overlayworkingctx(workingctx):
1961 """Wraps another mutable context with a write-back cache that can be flushed
1963 """Wraps another mutable context with a write-back cache that can be flushed
1962 at a later time.
1964 at a later time.
1963
1965
1964 self._cache[path] maps to a dict with keys: {
1966 self._cache[path] maps to a dict with keys: {
1965 'exists': bool?
1967 'exists': bool?
1966 'date': date?
1968 'date': date?
1967 'data': str?
1969 'data': str?
1968 'flags': str?
1970 'flags': str?
1969 }
1971 }
1970 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1972 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
1971 is `False`, the file was deleted.
1973 is `False`, the file was deleted.
1972 """
1974 """
1973
1975
1974 def __init__(self, repo, wrappedctx):
1976 def __init__(self, repo, wrappedctx):
1975 super(overlayworkingctx, self).__init__(repo)
1977 super(overlayworkingctx, self).__init__(repo)
1976 self._repo = repo
1978 self._repo = repo
1977 self._wrappedctx = wrappedctx
1979 self._wrappedctx = wrappedctx
1978 self._clean()
1980 self._clean()
1979
1981
1980 def data(self, path):
1982 def data(self, path):
1981 if self.isdirty(path):
1983 if self.isdirty(path):
1982 if self._cache[path]['exists']:
1984 if self._cache[path]['exists']:
1983 if self._cache[path]['data']:
1985 if self._cache[path]['data']:
1984 return self._cache[path]['data']
1986 return self._cache[path]['data']
1985 else:
1987 else:
1986 # Must fallback here, too, because we only set flags.
1988 # Must fallback here, too, because we only set flags.
1987 return self._wrappedctx[path].data()
1989 return self._wrappedctx[path].data()
1988 else:
1990 else:
1989 raise error.ProgrammingError("No such file or directory: %s" %
1991 raise error.ProgrammingError("No such file or directory: %s" %
1990 self._path)
1992 self._path)
1991 else:
1993 else:
1992 return self._wrappedctx[path].data()
1994 return self._wrappedctx[path].data()
1993
1995
1994 def isinmemory(self):
1996 def isinmemory(self):
1995 return True
1997 return True
1996
1998
1997 def filedate(self, path):
1999 def filedate(self, path):
1998 if self.isdirty(path):
2000 if self.isdirty(path):
1999 return self._cache[path]['date']
2001 return self._cache[path]['date']
2000 else:
2002 else:
2001 return self._wrappedctx[path].date()
2003 return self._wrappedctx[path].date()
2002
2004
2003 def flags(self, path):
2005 def flags(self, path):
2004 if self.isdirty(path):
2006 if self.isdirty(path):
2005 if self._cache[path]['exists']:
2007 if self._cache[path]['exists']:
2006 return self._cache[path]['flags']
2008 return self._cache[path]['flags']
2007 else:
2009 else:
2008 raise error.ProgrammingError("No such file or directory: %s" %
2010 raise error.ProgrammingError("No such file or directory: %s" %
2009 self._path)
2011 self._path)
2010 else:
2012 else:
2011 return self._wrappedctx[path].flags()
2013 return self._wrappedctx[path].flags()
2012
2014
2013 def write(self, path, data, flags=''):
2015 def write(self, path, data, flags=''):
2014 if data is None:
2016 if data is None:
2015 raise error.ProgrammingError("data must be non-None")
2017 raise error.ProgrammingError("data must be non-None")
2016 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2018 self._markdirty(path, exists=True, data=data, date=util.makedate(),
2017 flags=flags)
2019 flags=flags)
2018
2020
2019 def setflags(self, path, l, x):
2021 def setflags(self, path, l, x):
2020 self._markdirty(path, exists=True, date=util.makedate(),
2022 self._markdirty(path, exists=True, date=util.makedate(),
2021 flags=(l and 'l' or '') + (x and 'x' or ''))
2023 flags=(l and 'l' or '') + (x and 'x' or ''))
2022
2024
2023 def remove(self, path):
2025 def remove(self, path):
2024 self._markdirty(path, exists=False)
2026 self._markdirty(path, exists=False)
2025
2027
2026 def exists(self, path):
2028 def exists(self, path):
2027 """exists behaves like `lexists`, but needs to follow symlinks and
2029 """exists behaves like `lexists`, but needs to follow symlinks and
2028 return False if they are broken.
2030 return False if they are broken.
2029 """
2031 """
2030 if self.isdirty(path):
2032 if self.isdirty(path):
2031 # If this path exists and is a symlink, "follow" it by calling
2033 # If this path exists and is a symlink, "follow" it by calling
2032 # exists on the destination path.
2034 # exists on the destination path.
2033 if (self._cache[path]['exists'] and
2035 if (self._cache[path]['exists'] and
2034 'l' in self._cache[path]['flags']):
2036 'l' in self._cache[path]['flags']):
2035 return self.exists(self._cache[path]['data'].strip())
2037 return self.exists(self._cache[path]['data'].strip())
2036 else:
2038 else:
2037 return self._cache[path]['exists']
2039 return self._cache[path]['exists']
2038 return self._wrappedctx[path].exists()
2040 return self._wrappedctx[path].exists()
2039
2041
2040 def lexists(self, path):
2042 def lexists(self, path):
2041 """lexists returns True if the path exists"""
2043 """lexists returns True if the path exists"""
2042 if self.isdirty(path):
2044 if self.isdirty(path):
2043 return self._cache[path]['exists']
2045 return self._cache[path]['exists']
2044 return self._wrappedctx[path].lexists()
2046 return self._wrappedctx[path].lexists()
2045
2047
2046 def size(self, path):
2048 def size(self, path):
2047 if self.isdirty(path):
2049 if self.isdirty(path):
2048 if self._cache[path]['exists']:
2050 if self._cache[path]['exists']:
2049 return len(self._cache[path]['data'])
2051 return len(self._cache[path]['data'])
2050 else:
2052 else:
2051 raise error.ProgrammingError("No such file or directory: %s" %
2053 raise error.ProgrammingError("No such file or directory: %s" %
2052 self._path)
2054 self._path)
2053 return self._wrappedctx[path].size()
2055 return self._wrappedctx[path].size()
2054
2056
2055 def flushall(self):
2057 def flushall(self):
2056 for path in self._writeorder:
2058 for path in self._writeorder:
2057 entry = self._cache[path]
2059 entry = self._cache[path]
2058 if entry['exists']:
2060 if entry['exists']:
2059 self._wrappedctx[path].clearunknown()
2061 self._wrappedctx[path].clearunknown()
2060 if entry['data'] is not None:
2062 if entry['data'] is not None:
2061 if entry['flags'] is None:
2063 if entry['flags'] is None:
2062 raise error.ProgrammingError('data set but not flags')
2064 raise error.ProgrammingError('data set but not flags')
2063 self._wrappedctx[path].write(
2065 self._wrappedctx[path].write(
2064 entry['data'],
2066 entry['data'],
2065 entry['flags'])
2067 entry['flags'])
2066 else:
2068 else:
2067 self._wrappedctx[path].setflags(
2069 self._wrappedctx[path].setflags(
2068 'l' in entry['flags'],
2070 'l' in entry['flags'],
2069 'x' in entry['flags'])
2071 'x' in entry['flags'])
2070 else:
2072 else:
2071 self._wrappedctx[path].remove(path)
2073 self._wrappedctx[path].remove(path)
2072 self._clean()
2074 self._clean()
2073
2075
2074 def isdirty(self, path):
2076 def isdirty(self, path):
2075 return path in self._cache
2077 return path in self._cache
2076
2078
2077 def _clean(self):
2079 def _clean(self):
2078 self._cache = {}
2080 self._cache = {}
2079 self._writeorder = []
2081 self._writeorder = []
2080
2082
2081 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2083 def _markdirty(self, path, exists, data=None, date=None, flags=''):
2082 if path not in self._cache:
2084 if path not in self._cache:
2083 self._writeorder.append(path)
2085 self._writeorder.append(path)
2084
2086
2085 self._cache[path] = {
2087 self._cache[path] = {
2086 'exists': exists,
2088 'exists': exists,
2087 'data': data,
2089 'data': data,
2088 'date': date,
2090 'date': date,
2089 'flags': flags,
2091 'flags': flags,
2090 }
2092 }
2091
2093
2092 def filectx(self, path, filelog=None):
2094 def filectx(self, path, filelog=None):
2093 return overlayworkingfilectx(self._repo, path, parent=self,
2095 return overlayworkingfilectx(self._repo, path, parent=self,
2094 filelog=filelog)
2096 filelog=filelog)
2095
2097
2096 class overlayworkingfilectx(workingfilectx):
2098 class overlayworkingfilectx(workingfilectx):
2097 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2099 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2098 cache, which can be flushed through later by calling ``flush()``."""
2100 cache, which can be flushed through later by calling ``flush()``."""
2099
2101
2100 def __init__(self, repo, path, filelog=None, parent=None):
2102 def __init__(self, repo, path, filelog=None, parent=None):
2101 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2103 super(overlayworkingfilectx, self).__init__(repo, path, filelog,
2102 parent)
2104 parent)
2103 self._repo = repo
2105 self._repo = repo
2104 self._parent = parent
2106 self._parent = parent
2105 self._path = path
2107 self._path = path
2106
2108
2107 def cmp(self, fctx):
2109 def cmp(self, fctx):
2108 return self.data() != fctx.data()
2110 return self.data() != fctx.data()
2109
2111
2110 def ctx(self):
2112 def ctx(self):
2111 return self._parent
2113 return self._parent
2112
2114
2113 def data(self):
2115 def data(self):
2114 return self._parent.data(self._path)
2116 return self._parent.data(self._path)
2115
2117
2116 def date(self):
2118 def date(self):
2117 return self._parent.filedate(self._path)
2119 return self._parent.filedate(self._path)
2118
2120
2119 def exists(self):
2121 def exists(self):
2120 return self.lexists()
2122 return self.lexists()
2121
2123
2122 def lexists(self):
2124 def lexists(self):
2123 return self._parent.exists(self._path)
2125 return self._parent.exists(self._path)
2124
2126
2125 def renamed(self):
2127 def renamed(self):
2126 # Copies are currently tracked in the dirstate as before. Straight copy
2128 # Copies are currently tracked in the dirstate as before. Straight copy
2127 # from workingfilectx.
2129 # from workingfilectx.
2128 rp = self._repo.dirstate.copied(self._path)
2130 rp = self._repo.dirstate.copied(self._path)
2129 if not rp:
2131 if not rp:
2130 return None
2132 return None
2131 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2133 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
2132
2134
2133 def size(self):
2135 def size(self):
2134 return self._parent.size(self._path)
2136 return self._parent.size(self._path)
2135
2137
2136 def audit(self):
2138 def audit(self):
2137 pass
2139 pass
2138
2140
2139 def flags(self):
2141 def flags(self):
2140 return self._parent.flags(self._path)
2142 return self._parent.flags(self._path)
2141
2143
2142 def setflags(self, islink, isexec):
2144 def setflags(self, islink, isexec):
2143 return self._parent.setflags(self._path, islink, isexec)
2145 return self._parent.setflags(self._path, islink, isexec)
2144
2146
2145 def write(self, data, flags, backgroundclose=False):
2147 def write(self, data, flags, backgroundclose=False):
2146 return self._parent.write(self._path, data, flags)
2148 return self._parent.write(self._path, data, flags)
2147
2149
2148 def remove(self, ignoremissing=False):
2150 def remove(self, ignoremissing=False):
2149 return self._parent.remove(self._path)
2151 return self._parent.remove(self._path)
2150
2152
2151 class workingcommitctx(workingctx):
2153 class workingcommitctx(workingctx):
2152 """A workingcommitctx object makes access to data related to
2154 """A workingcommitctx object makes access to data related to
2153 the revision being committed convenient.
2155 the revision being committed convenient.
2154
2156
2155 This hides changes in the working directory, if they aren't
2157 This hides changes in the working directory, if they aren't
2156 committed in this context.
2158 committed in this context.
2157 """
2159 """
2158 def __init__(self, repo, changes,
2160 def __init__(self, repo, changes,
2159 text="", user=None, date=None, extra=None):
2161 text="", user=None, date=None, extra=None):
2160 super(workingctx, self).__init__(repo, text, user, date, extra,
2162 super(workingctx, self).__init__(repo, text, user, date, extra,
2161 changes)
2163 changes)
2162
2164
2163 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2165 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2164 """Return matched files only in ``self._status``
2166 """Return matched files only in ``self._status``
2165
2167
2166 Uncommitted files appear "clean" via this context, even if
2168 Uncommitted files appear "clean" via this context, even if
2167 they aren't actually so in the working directory.
2169 they aren't actually so in the working directory.
2168 """
2170 """
2169 if clean:
2171 if clean:
2170 clean = [f for f in self._manifest if f not in self._changedset]
2172 clean = [f for f in self._manifest if f not in self._changedset]
2171 else:
2173 else:
2172 clean = []
2174 clean = []
2173 return scmutil.status([f for f in self._status.modified if match(f)],
2175 return scmutil.status([f for f in self._status.modified if match(f)],
2174 [f for f in self._status.added if match(f)],
2176 [f for f in self._status.added if match(f)],
2175 [f for f in self._status.removed if match(f)],
2177 [f for f in self._status.removed if match(f)],
2176 [], [], [], clean)
2178 [], [], [], clean)
2177
2179
2178 @propertycache
2180 @propertycache
2179 def _changedset(self):
2181 def _changedset(self):
2180 """Return the set of files changed in this context
2182 """Return the set of files changed in this context
2181 """
2183 """
2182 changed = set(self._status.modified)
2184 changed = set(self._status.modified)
2183 changed.update(self._status.added)
2185 changed.update(self._status.added)
2184 changed.update(self._status.removed)
2186 changed.update(self._status.removed)
2185 return changed
2187 return changed
2186
2188
2187 def makecachingfilectxfn(func):
2189 def makecachingfilectxfn(func):
2188 """Create a filectxfn that caches based on the path.
2190 """Create a filectxfn that caches based on the path.
2189
2191
2190 We can't use util.cachefunc because it uses all arguments as the cache
2192 We can't use util.cachefunc because it uses all arguments as the cache
2191 key and this creates a cycle since the arguments include the repo and
2193 key and this creates a cycle since the arguments include the repo and
2192 memctx.
2194 memctx.
2193 """
2195 """
2194 cache = {}
2196 cache = {}
2195
2197
2196 def getfilectx(repo, memctx, path):
2198 def getfilectx(repo, memctx, path):
2197 if path not in cache:
2199 if path not in cache:
2198 cache[path] = func(repo, memctx, path)
2200 cache[path] = func(repo, memctx, path)
2199 return cache[path]
2201 return cache[path]
2200
2202
2201 return getfilectx
2203 return getfilectx
2202
2204
2203 def memfilefromctx(ctx):
2205 def memfilefromctx(ctx):
2204 """Given a context return a memfilectx for ctx[path]
2206 """Given a context return a memfilectx for ctx[path]
2205
2207
2206 This is a convenience method for building a memctx based on another
2208 This is a convenience method for building a memctx based on another
2207 context.
2209 context.
2208 """
2210 """
2209 def getfilectx(repo, memctx, path):
2211 def getfilectx(repo, memctx, path):
2210 fctx = ctx[path]
2212 fctx = ctx[path]
2211 # this is weird but apparently we only keep track of one parent
2213 # this is weird but apparently we only keep track of one parent
2212 # (why not only store that instead of a tuple?)
2214 # (why not only store that instead of a tuple?)
2213 copied = fctx.renamed()
2215 copied = fctx.renamed()
2214 if copied:
2216 if copied:
2215 copied = copied[0]
2217 copied = copied[0]
2216 return memfilectx(repo, path, fctx.data(),
2218 return memfilectx(repo, path, fctx.data(),
2217 islink=fctx.islink(), isexec=fctx.isexec(),
2219 islink=fctx.islink(), isexec=fctx.isexec(),
2218 copied=copied, memctx=memctx)
2220 copied=copied, memctx=memctx)
2219
2221
2220 return getfilectx
2222 return getfilectx
2221
2223
2222 def memfilefrompatch(patchstore):
2224 def memfilefrompatch(patchstore):
2223 """Given a patch (e.g. patchstore object) return a memfilectx
2225 """Given a patch (e.g. patchstore object) return a memfilectx
2224
2226
2225 This is a convenience method for building a memctx based on a patchstore.
2227 This is a convenience method for building a memctx based on a patchstore.
2226 """
2228 """
2227 def getfilectx(repo, memctx, path):
2229 def getfilectx(repo, memctx, path):
2228 data, mode, copied = patchstore.getfile(path)
2230 data, mode, copied = patchstore.getfile(path)
2229 if data is None:
2231 if data is None:
2230 return None
2232 return None
2231 islink, isexec = mode
2233 islink, isexec = mode
2232 return memfilectx(repo, path, data, islink=islink,
2234 return memfilectx(repo, path, data, islink=islink,
2233 isexec=isexec, copied=copied,
2235 isexec=isexec, copied=copied,
2234 memctx=memctx)
2236 memctx=memctx)
2235
2237
2236 return getfilectx
2238 return getfilectx
2237
2239
2238 class memctx(committablectx):
2240 class memctx(committablectx):
2239 """Use memctx to perform in-memory commits via localrepo.commitctx().
2241 """Use memctx to perform in-memory commits via localrepo.commitctx().
2240
2242
2241 Revision information is supplied at initialization time while
2243 Revision information is supplied at initialization time while
2242 related files data and is made available through a callback
2244 related files data and is made available through a callback
2243 mechanism. 'repo' is the current localrepo, 'parents' is a
2245 mechanism. 'repo' is the current localrepo, 'parents' is a
2244 sequence of two parent revisions identifiers (pass None for every
2246 sequence of two parent revisions identifiers (pass None for every
2245 missing parent), 'text' is the commit message and 'files' lists
2247 missing parent), 'text' is the commit message and 'files' lists
2246 names of files touched by the revision (normalized and relative to
2248 names of files touched by the revision (normalized and relative to
2247 repository root).
2249 repository root).
2248
2250
2249 filectxfn(repo, memctx, path) is a callable receiving the
2251 filectxfn(repo, memctx, path) is a callable receiving the
2250 repository, the current memctx object and the normalized path of
2252 repository, the current memctx object and the normalized path of
2251 requested file, relative to repository root. It is fired by the
2253 requested file, relative to repository root. It is fired by the
2252 commit function for every file in 'files', but calls order is
2254 commit function for every file in 'files', but calls order is
2253 undefined. If the file is available in the revision being
2255 undefined. If the file is available in the revision being
2254 committed (updated or added), filectxfn returns a memfilectx
2256 committed (updated or added), filectxfn returns a memfilectx
2255 object. If the file was removed, filectxfn return None for recent
2257 object. If the file was removed, filectxfn return None for recent
2256 Mercurial. Moved files are represented by marking the source file
2258 Mercurial. Moved files are represented by marking the source file
2257 removed and the new file added with copy information (see
2259 removed and the new file added with copy information (see
2258 memfilectx).
2260 memfilectx).
2259
2261
2260 user receives the committer name and defaults to current
2262 user receives the committer name and defaults to current
2261 repository username, date is the commit date in any format
2263 repository username, date is the commit date in any format
2262 supported by util.parsedate() and defaults to current date, extra
2264 supported by util.parsedate() and defaults to current date, extra
2263 is a dictionary of metadata or is left empty.
2265 is a dictionary of metadata or is left empty.
2264 """
2266 """
2265
2267
2266 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2268 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2267 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2269 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2268 # this field to determine what to do in filectxfn.
2270 # this field to determine what to do in filectxfn.
2269 _returnnoneformissingfiles = True
2271 _returnnoneformissingfiles = True
2270
2272
2271 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2273 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2272 date=None, extra=None, branch=None, editor=False):
2274 date=None, extra=None, branch=None, editor=False):
2273 super(memctx, self).__init__(repo, text, user, date, extra)
2275 super(memctx, self).__init__(repo, text, user, date, extra)
2274 self._rev = None
2276 self._rev = None
2275 self._node = None
2277 self._node = None
2276 parents = [(p or nullid) for p in parents]
2278 parents = [(p or nullid) for p in parents]
2277 p1, p2 = parents
2279 p1, p2 = parents
2278 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2280 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2279 files = sorted(set(files))
2281 files = sorted(set(files))
2280 self._files = files
2282 self._files = files
2281 if branch is not None:
2283 if branch is not None:
2282 self._extra['branch'] = encoding.fromlocal(branch)
2284 self._extra['branch'] = encoding.fromlocal(branch)
2283 self.substate = {}
2285 self.substate = {}
2284
2286
2285 if isinstance(filectxfn, patch.filestore):
2287 if isinstance(filectxfn, patch.filestore):
2286 filectxfn = memfilefrompatch(filectxfn)
2288 filectxfn = memfilefrompatch(filectxfn)
2287 elif not callable(filectxfn):
2289 elif not callable(filectxfn):
2288 # if store is not callable, wrap it in a function
2290 # if store is not callable, wrap it in a function
2289 filectxfn = memfilefromctx(filectxfn)
2291 filectxfn = memfilefromctx(filectxfn)
2290
2292
2291 # memoizing increases performance for e.g. vcs convert scenarios.
2293 # memoizing increases performance for e.g. vcs convert scenarios.
2292 self._filectxfn = makecachingfilectxfn(filectxfn)
2294 self._filectxfn = makecachingfilectxfn(filectxfn)
2293
2295
2294 if editor:
2296 if editor:
2295 self._text = editor(self._repo, self, [])
2297 self._text = editor(self._repo, self, [])
2296 self._repo.savecommitmessage(self._text)
2298 self._repo.savecommitmessage(self._text)
2297
2299
2298 def filectx(self, path, filelog=None):
2300 def filectx(self, path, filelog=None):
2299 """get a file context from the working directory
2301 """get a file context from the working directory
2300
2302
2301 Returns None if file doesn't exist and should be removed."""
2303 Returns None if file doesn't exist and should be removed."""
2302 return self._filectxfn(self._repo, self, path)
2304 return self._filectxfn(self._repo, self, path)
2303
2305
2304 def commit(self):
2306 def commit(self):
2305 """commit context to the repo"""
2307 """commit context to the repo"""
2306 return self._repo.commitctx(self)
2308 return self._repo.commitctx(self)
2307
2309
2308 @propertycache
2310 @propertycache
2309 def _manifest(self):
2311 def _manifest(self):
2310 """generate a manifest based on the return values of filectxfn"""
2312 """generate a manifest based on the return values of filectxfn"""
2311
2313
2312 # keep this simple for now; just worry about p1
2314 # keep this simple for now; just worry about p1
2313 pctx = self._parents[0]
2315 pctx = self._parents[0]
2314 man = pctx.manifest().copy()
2316 man = pctx.manifest().copy()
2315
2317
2316 for f in self._status.modified:
2318 for f in self._status.modified:
2317 p1node = nullid
2319 p1node = nullid
2318 p2node = nullid
2320 p2node = nullid
2319 p = pctx[f].parents() # if file isn't in pctx, check p2?
2321 p = pctx[f].parents() # if file isn't in pctx, check p2?
2320 if len(p) > 0:
2322 if len(p) > 0:
2321 p1node = p[0].filenode()
2323 p1node = p[0].filenode()
2322 if len(p) > 1:
2324 if len(p) > 1:
2323 p2node = p[1].filenode()
2325 p2node = p[1].filenode()
2324 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2326 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2325
2327
2326 for f in self._status.added:
2328 for f in self._status.added:
2327 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2329 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2328
2330
2329 for f in self._status.removed:
2331 for f in self._status.removed:
2330 if f in man:
2332 if f in man:
2331 del man[f]
2333 del man[f]
2332
2334
2333 return man
2335 return man
2334
2336
2335 @propertycache
2337 @propertycache
2336 def _status(self):
2338 def _status(self):
2337 """Calculate exact status from ``files`` specified at construction
2339 """Calculate exact status from ``files`` specified at construction
2338 """
2340 """
2339 man1 = self.p1().manifest()
2341 man1 = self.p1().manifest()
2340 p2 = self._parents[1]
2342 p2 = self._parents[1]
2341 # "1 < len(self._parents)" can't be used for checking
2343 # "1 < len(self._parents)" can't be used for checking
2342 # existence of the 2nd parent, because "memctx._parents" is
2344 # existence of the 2nd parent, because "memctx._parents" is
2343 # explicitly initialized by the list, of which length is 2.
2345 # explicitly initialized by the list, of which length is 2.
2344 if p2.node() != nullid:
2346 if p2.node() != nullid:
2345 man2 = p2.manifest()
2347 man2 = p2.manifest()
2346 managing = lambda f: f in man1 or f in man2
2348 managing = lambda f: f in man1 or f in man2
2347 else:
2349 else:
2348 managing = lambda f: f in man1
2350 managing = lambda f: f in man1
2349
2351
2350 modified, added, removed = [], [], []
2352 modified, added, removed = [], [], []
2351 for f in self._files:
2353 for f in self._files:
2352 if not managing(f):
2354 if not managing(f):
2353 added.append(f)
2355 added.append(f)
2354 elif self[f]:
2356 elif self[f]:
2355 modified.append(f)
2357 modified.append(f)
2356 else:
2358 else:
2357 removed.append(f)
2359 removed.append(f)
2358
2360
2359 return scmutil.status(modified, added, removed, [], [], [], [])
2361 return scmutil.status(modified, added, removed, [], [], [], [])
2360
2362
2361 class memfilectx(committablefilectx):
2363 class memfilectx(committablefilectx):
2362 """memfilectx represents an in-memory file to commit.
2364 """memfilectx represents an in-memory file to commit.
2363
2365
2364 See memctx and committablefilectx for more details.
2366 See memctx and committablefilectx for more details.
2365 """
2367 """
2366 def __init__(self, repo, path, data, islink=False,
2368 def __init__(self, repo, path, data, islink=False,
2367 isexec=False, copied=None, memctx=None):
2369 isexec=False, copied=None, memctx=None):
2368 """
2370 """
2369 path is the normalized file path relative to repository root.
2371 path is the normalized file path relative to repository root.
2370 data is the file content as a string.
2372 data is the file content as a string.
2371 islink is True if the file is a symbolic link.
2373 islink is True if the file is a symbolic link.
2372 isexec is True if the file is executable.
2374 isexec is True if the file is executable.
2373 copied is the source file path if current file was copied in the
2375 copied is the source file path if current file was copied in the
2374 revision being committed, or None."""
2376 revision being committed, or None."""
2375 super(memfilectx, self).__init__(repo, path, None, memctx)
2377 super(memfilectx, self).__init__(repo, path, None, memctx)
2376 self._data = data
2378 self._data = data
2377 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2379 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2378 self._copied = None
2380 self._copied = None
2379 if copied:
2381 if copied:
2380 self._copied = (copied, nullid)
2382 self._copied = (copied, nullid)
2381
2383
2382 def data(self):
2384 def data(self):
2383 return self._data
2385 return self._data
2384
2386
2385 def remove(self, ignoremissing=False):
2387 def remove(self, ignoremissing=False):
2386 """wraps unlink for a repo's working directory"""
2388 """wraps unlink for a repo's working directory"""
2387 # need to figure out what to do here
2389 # need to figure out what to do here
2388 del self._changectx[self._path]
2390 del self._changectx[self._path]
2389
2391
2390 def write(self, data, flags):
2392 def write(self, data, flags):
2391 """wraps repo.wwrite"""
2393 """wraps repo.wwrite"""
2392 self._data = data
2394 self._data = data
2393
2395
2394 class overlayfilectx(committablefilectx):
2396 class overlayfilectx(committablefilectx):
2395 """Like memfilectx but take an original filectx and optional parameters to
2397 """Like memfilectx but take an original filectx and optional parameters to
2396 override parts of it. This is useful when fctx.data() is expensive (i.e.
2398 override parts of it. This is useful when fctx.data() is expensive (i.e.
2397 flag processor is expensive) and raw data, flags, and filenode could be
2399 flag processor is expensive) and raw data, flags, and filenode could be
2398 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2400 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2399 """
2401 """
2400
2402
2401 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2403 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2402 copied=None, ctx=None):
2404 copied=None, ctx=None):
2403 """originalfctx: filecontext to duplicate
2405 """originalfctx: filecontext to duplicate
2404
2406
2405 datafunc: None or a function to override data (file content). It is a
2407 datafunc: None or a function to override data (file content). It is a
2406 function to be lazy. path, flags, copied, ctx: None or overridden value
2408 function to be lazy. path, flags, copied, ctx: None or overridden value
2407
2409
2408 copied could be (path, rev), or False. copied could also be just path,
2410 copied could be (path, rev), or False. copied could also be just path,
2409 and will be converted to (path, nullid). This simplifies some callers.
2411 and will be converted to (path, nullid). This simplifies some callers.
2410 """
2412 """
2411
2413
2412 if path is None:
2414 if path is None:
2413 path = originalfctx.path()
2415 path = originalfctx.path()
2414 if ctx is None:
2416 if ctx is None:
2415 ctx = originalfctx.changectx()
2417 ctx = originalfctx.changectx()
2416 ctxmatch = lambda: True
2418 ctxmatch = lambda: True
2417 else:
2419 else:
2418 ctxmatch = lambda: ctx == originalfctx.changectx()
2420 ctxmatch = lambda: ctx == originalfctx.changectx()
2419
2421
2420 repo = originalfctx.repo()
2422 repo = originalfctx.repo()
2421 flog = originalfctx.filelog()
2423 flog = originalfctx.filelog()
2422 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2424 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2423
2425
2424 if copied is None:
2426 if copied is None:
2425 copied = originalfctx.renamed()
2427 copied = originalfctx.renamed()
2426 copiedmatch = lambda: True
2428 copiedmatch = lambda: True
2427 else:
2429 else:
2428 if copied and not isinstance(copied, tuple):
2430 if copied and not isinstance(copied, tuple):
2429 # repo._filecommit will recalculate copyrev so nullid is okay
2431 # repo._filecommit will recalculate copyrev so nullid is okay
2430 copied = (copied, nullid)
2432 copied = (copied, nullid)
2431 copiedmatch = lambda: copied == originalfctx.renamed()
2433 copiedmatch = lambda: copied == originalfctx.renamed()
2432
2434
2433 # When data, copied (could affect data), ctx (could affect filelog
2435 # When data, copied (could affect data), ctx (could affect filelog
2434 # parents) are not overridden, rawdata, rawflags, and filenode may be
2436 # parents) are not overridden, rawdata, rawflags, and filenode may be
2435 # reused (repo._filecommit should double check filelog parents).
2437 # reused (repo._filecommit should double check filelog parents).
2436 #
2438 #
2437 # path, flags are not hashed in filelog (but in manifestlog) so they do
2439 # path, flags are not hashed in filelog (but in manifestlog) so they do
2438 # not affect reusable here.
2440 # not affect reusable here.
2439 #
2441 #
2440 # If ctx or copied is overridden to a same value with originalfctx,
2442 # If ctx or copied is overridden to a same value with originalfctx,
2441 # still consider it's reusable. originalfctx.renamed() may be a bit
2443 # still consider it's reusable. originalfctx.renamed() may be a bit
2442 # expensive so it's not called unless necessary. Assuming datafunc is
2444 # expensive so it's not called unless necessary. Assuming datafunc is
2443 # always expensive, do not call it for this "reusable" test.
2445 # always expensive, do not call it for this "reusable" test.
2444 reusable = datafunc is None and ctxmatch() and copiedmatch()
2446 reusable = datafunc is None and ctxmatch() and copiedmatch()
2445
2447
2446 if datafunc is None:
2448 if datafunc is None:
2447 datafunc = originalfctx.data
2449 datafunc = originalfctx.data
2448 if flags is None:
2450 if flags is None:
2449 flags = originalfctx.flags()
2451 flags = originalfctx.flags()
2450
2452
2451 self._datafunc = datafunc
2453 self._datafunc = datafunc
2452 self._flags = flags
2454 self._flags = flags
2453 self._copied = copied
2455 self._copied = copied
2454
2456
2455 if reusable:
2457 if reusable:
2456 # copy extra fields from originalfctx
2458 # copy extra fields from originalfctx
2457 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2459 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2458 for attr_ in attrs:
2460 for attr_ in attrs:
2459 if util.safehasattr(originalfctx, attr_):
2461 if util.safehasattr(originalfctx, attr_):
2460 setattr(self, attr_, getattr(originalfctx, attr_))
2462 setattr(self, attr_, getattr(originalfctx, attr_))
2461
2463
2462 def data(self):
2464 def data(self):
2463 return self._datafunc()
2465 return self._datafunc()
2464
2466
2465 class metadataonlyctx(committablectx):
2467 class metadataonlyctx(committablectx):
2466 """Like memctx but it's reusing the manifest of different commit.
2468 """Like memctx but it's reusing the manifest of different commit.
2467 Intended to be used by lightweight operations that are creating
2469 Intended to be used by lightweight operations that are creating
2468 metadata-only changes.
2470 metadata-only changes.
2469
2471
2470 Revision information is supplied at initialization time. 'repo' is the
2472 Revision information is supplied at initialization time. 'repo' is the
2471 current localrepo, 'ctx' is original revision which manifest we're reuisng
2473 current localrepo, 'ctx' is original revision which manifest we're reuisng
2472 'parents' is a sequence of two parent revisions identifiers (pass None for
2474 'parents' is a sequence of two parent revisions identifiers (pass None for
2473 every missing parent), 'text' is the commit.
2475 every missing parent), 'text' is the commit.
2474
2476
2475 user receives the committer name and defaults to current repository
2477 user receives the committer name and defaults to current repository
2476 username, date is the commit date in any format supported by
2478 username, date is the commit date in any format supported by
2477 util.parsedate() and defaults to current date, extra is a dictionary of
2479 util.parsedate() and defaults to current date, extra is a dictionary of
2478 metadata or is left empty.
2480 metadata or is left empty.
2479 """
2481 """
2480 def __new__(cls, repo, originalctx, *args, **kwargs):
2482 def __new__(cls, repo, originalctx, *args, **kwargs):
2481 return super(metadataonlyctx, cls).__new__(cls, repo)
2483 return super(metadataonlyctx, cls).__new__(cls, repo)
2482
2484
2483 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2485 def __init__(self, repo, originalctx, parents=None, text=None, user=None,
2484 date=None, extra=None, editor=False):
2486 date=None, extra=None, editor=False):
2485 if text is None:
2487 if text is None:
2486 text = originalctx.description()
2488 text = originalctx.description()
2487 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2489 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2488 self._rev = None
2490 self._rev = None
2489 self._node = None
2491 self._node = None
2490 self._originalctx = originalctx
2492 self._originalctx = originalctx
2491 self._manifestnode = originalctx.manifestnode()
2493 self._manifestnode = originalctx.manifestnode()
2492 if parents is None:
2494 if parents is None:
2493 parents = originalctx.parents()
2495 parents = originalctx.parents()
2494 else:
2496 else:
2495 parents = [repo[p] for p in parents if p is not None]
2497 parents = [repo[p] for p in parents if p is not None]
2496 parents = parents[:]
2498 parents = parents[:]
2497 while len(parents) < 2:
2499 while len(parents) < 2:
2498 parents.append(repo[nullid])
2500 parents.append(repo[nullid])
2499 p1, p2 = self._parents = parents
2501 p1, p2 = self._parents = parents
2500
2502
2501 # sanity check to ensure that the reused manifest parents are
2503 # sanity check to ensure that the reused manifest parents are
2502 # manifests of our commit parents
2504 # manifests of our commit parents
2503 mp1, mp2 = self.manifestctx().parents
2505 mp1, mp2 = self.manifestctx().parents
2504 if p1 != nullid and p1.manifestnode() != mp1:
2506 if p1 != nullid and p1.manifestnode() != mp1:
2505 raise RuntimeError('can\'t reuse the manifest: '
2507 raise RuntimeError('can\'t reuse the manifest: '
2506 'its p1 doesn\'t match the new ctx p1')
2508 'its p1 doesn\'t match the new ctx p1')
2507 if p2 != nullid and p2.manifestnode() != mp2:
2509 if p2 != nullid and p2.manifestnode() != mp2:
2508 raise RuntimeError('can\'t reuse the manifest: '
2510 raise RuntimeError('can\'t reuse the manifest: '
2509 'its p2 doesn\'t match the new ctx p2')
2511 'its p2 doesn\'t match the new ctx p2')
2510
2512
2511 self._files = originalctx.files()
2513 self._files = originalctx.files()
2512 self.substate = {}
2514 self.substate = {}
2513
2515
2514 if editor:
2516 if editor:
2515 self._text = editor(self._repo, self, [])
2517 self._text = editor(self._repo, self, [])
2516 self._repo.savecommitmessage(self._text)
2518 self._repo.savecommitmessage(self._text)
2517
2519
2518 def manifestnode(self):
2520 def manifestnode(self):
2519 return self._manifestnode
2521 return self._manifestnode
2520
2522
2521 @property
2523 @property
2522 def _manifestctx(self):
2524 def _manifestctx(self):
2523 return self._repo.manifestlog[self._manifestnode]
2525 return self._repo.manifestlog[self._manifestnode]
2524
2526
2525 def filectx(self, path, filelog=None):
2527 def filectx(self, path, filelog=None):
2526 return self._originalctx.filectx(path, filelog=filelog)
2528 return self._originalctx.filectx(path, filelog=filelog)
2527
2529
2528 def commit(self):
2530 def commit(self):
2529 """commit context to the repo"""
2531 """commit context to the repo"""
2530 return self._repo.commitctx(self)
2532 return self._repo.commitctx(self)
2531
2533
2532 @property
2534 @property
2533 def _manifest(self):
2535 def _manifest(self):
2534 return self._originalctx.manifest()
2536 return self._originalctx.manifest()
2535
2537
2536 @propertycache
2538 @propertycache
2537 def _status(self):
2539 def _status(self):
2538 """Calculate exact status from ``files`` specified in the ``origctx``
2540 """Calculate exact status from ``files`` specified in the ``origctx``
2539 and parents manifests.
2541 and parents manifests.
2540 """
2542 """
2541 man1 = self.p1().manifest()
2543 man1 = self.p1().manifest()
2542 p2 = self._parents[1]
2544 p2 = self._parents[1]
2543 # "1 < len(self._parents)" can't be used for checking
2545 # "1 < len(self._parents)" can't be used for checking
2544 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2546 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2545 # explicitly initialized by the list, of which length is 2.
2547 # explicitly initialized by the list, of which length is 2.
2546 if p2.node() != nullid:
2548 if p2.node() != nullid:
2547 man2 = p2.manifest()
2549 man2 = p2.manifest()
2548 managing = lambda f: f in man1 or f in man2
2550 managing = lambda f: f in man1 or f in man2
2549 else:
2551 else:
2550 managing = lambda f: f in man1
2552 managing = lambda f: f in man1
2551
2553
2552 modified, added, removed = [], [], []
2554 modified, added, removed = [], [], []
2553 for f in self._files:
2555 for f in self._files:
2554 if not managing(f):
2556 if not managing(f):
2555 added.append(f)
2557 added.append(f)
2556 elif f in self:
2558 elif f in self:
2557 modified.append(f)
2559 modified.append(f)
2558 else:
2560 else:
2559 removed.append(f)
2561 removed.append(f)
2560
2562
2561 return scmutil.status(modified, added, removed, [], [], [], [])
2563 return scmutil.status(modified, added, removed, [], [], [], [])
2562
2564
2563 class arbitraryfilectx(object):
2565 class arbitraryfilectx(object):
2564 """Allows you to use filectx-like functions on a file in an arbitrary
2566 """Allows you to use filectx-like functions on a file in an arbitrary
2565 location on disk, possibly not in the working directory.
2567 location on disk, possibly not in the working directory.
2566 """
2568 """
2567 def __init__(self, path, repo=None):
2569 def __init__(self, path, repo=None):
2568 # Repo is optional because contrib/simplemerge uses this class.
2570 # Repo is optional because contrib/simplemerge uses this class.
2569 self._repo = repo
2571 self._repo = repo
2570 self._path = path
2572 self._path = path
2571
2573
2572 def cmp(self, fctx):
2574 def cmp(self, fctx):
2573 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2575 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2574 # path if either side is a symlink.
2576 # path if either side is a symlink.
2575 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2577 symlinks = ('l' in self.flags() or 'l' in fctx.flags())
2576 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2578 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2577 # Add a fast-path for merge if both sides are disk-backed.
2579 # Add a fast-path for merge if both sides are disk-backed.
2578 # Note that filecmp uses the opposite return values (True if same)
2580 # Note that filecmp uses the opposite return values (True if same)
2579 # from our cmp functions (True if different).
2581 # from our cmp functions (True if different).
2580 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2582 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2581 return self.data() != fctx.data()
2583 return self.data() != fctx.data()
2582
2584
2583 def path(self):
2585 def path(self):
2584 return self._path
2586 return self._path
2585
2587
2586 def flags(self):
2588 def flags(self):
2587 return ''
2589 return ''
2588
2590
2589 def data(self):
2591 def data(self):
2590 return util.readfile(self._path)
2592 return util.readfile(self._path)
2591
2593
2592 def decodeddata(self):
2594 def decodeddata(self):
2593 with open(self._path, "rb") as f:
2595 with open(self._path, "rb") as f:
2594 return f.read()
2596 return f.read()
2595
2597
2596 def remove(self):
2598 def remove(self):
2597 util.unlink(self._path)
2599 util.unlink(self._path)
2598
2600
2599 def write(self, data, flags):
2601 def write(self, data, flags):
2600 assert not flags
2602 assert not flags
2601 with open(self._path, "w") as f:
2603 with open(self._path, "w") as f:
2602 f.write(data)
2604 f.write(data)
@@ -1,276 +1,272 b''
1 #require killdaemons
1 #require killdaemons
2
2
3 $ hg clone http://localhost:$HGPORT/ copy
3 $ hg clone http://localhost:$HGPORT/ copy
4 abort: * (glob)
4 abort: * (glob)
5 [255]
5 [255]
6 $ test -d copy
6 $ test -d copy
7 [1]
7 [1]
8
8
9 This server doesn't do range requests so it's basically only good for
9 This server doesn't do range requests so it's basically only good for
10 one pull
10 one pull
11
11
12 $ $PYTHON "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
12 $ $PYTHON "$TESTDIR/dumbhttp.py" -p $HGPORT --pid dumb.pid \
13 > --logfile server.log
13 > --logfile server.log
14 $ cat dumb.pid >> $DAEMON_PIDS
14 $ cat dumb.pid >> $DAEMON_PIDS
15 $ hg init remote
15 $ hg init remote
16 $ cd remote
16 $ cd remote
17 $ echo foo > bar
17 $ echo foo > bar
18 $ echo c2 > '.dotfile with spaces'
18 $ echo c2 > '.dotfile with spaces'
19 $ hg add
19 $ hg add
20 adding .dotfile with spaces
20 adding .dotfile with spaces
21 adding bar
21 adding bar
22 $ hg commit -m"test"
22 $ hg commit -m"test"
23 $ hg tip
23 $ hg tip
24 changeset: 0:02770d679fb8
24 changeset: 0:02770d679fb8
25 tag: tip
25 tag: tip
26 user: test
26 user: test
27 date: Thu Jan 01 00:00:00 1970 +0000
27 date: Thu Jan 01 00:00:00 1970 +0000
28 summary: test
28 summary: test
29
29
30 $ cd ..
30 $ cd ..
31 $ hg clone static-http://localhost:$HGPORT/remote local
31 $ hg clone static-http://localhost:$HGPORT/remote local
32 requesting all changes
32 requesting all changes
33 adding changesets
33 adding changesets
34 adding manifests
34 adding manifests
35 adding file changes
35 adding file changes
36 added 1 changesets with 2 changes to 2 files
36 added 1 changesets with 2 changes to 2 files
37 new changesets 02770d679fb8
37 new changesets 02770d679fb8
38 updating to branch default
38 updating to branch default
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
39 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 $ cd local
40 $ cd local
41 $ hg verify
41 $ hg verify
42 checking changesets
42 checking changesets
43 checking manifests
43 checking manifests
44 crosschecking files in changesets and manifests
44 crosschecking files in changesets and manifests
45 checking files
45 checking files
46 2 files, 1 changesets, 2 total revisions
46 2 files, 1 changesets, 2 total revisions
47 $ cat bar
47 $ cat bar
48 foo
48 foo
49 $ cd ../remote
49 $ cd ../remote
50 $ echo baz > quux
50 $ echo baz > quux
51 $ hg commit -A -mtest2
51 $ hg commit -A -mtest2
52 adding quux
52 adding quux
53
53
54 check for HTTP opener failures when cachefile does not exist
54 check for HTTP opener failures when cachefile does not exist
55
55
56 $ rm .hg/cache/*
56 $ rm .hg/cache/*
57 $ cd ../local
57 $ cd ../local
58 $ cat >> .hg/hgrc <<EOF
58 $ cat >> .hg/hgrc <<EOF
59 > [hooks]
59 > [hooks]
60 > changegroup = sh -c "printenv.py changegroup"
60 > changegroup = sh -c "printenv.py changegroup"
61 > EOF
61 > EOF
62 $ hg pull
62 $ hg pull
63 pulling from static-http://localhost:$HGPORT/remote
63 pulling from static-http://localhost:$HGPORT/remote
64 searching for changes
64 searching for changes
65 adding changesets
65 adding changesets
66 adding manifests
66 adding manifests
67 adding file changes
67 adding file changes
68 added 1 changesets with 1 changes to 1 files
68 added 1 changesets with 1 changes to 1 files
69 new changesets 4ac2e3648604
69 new changesets 4ac2e3648604
70 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT/remote
70 changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_NODE_LAST=4ac2e3648604439c580c69b09ec9d93a88d93432 HG_SOURCE=pull HG_TXNID=TXN:$ID$ HG_URL=http://localhost:$HGPORT/remote
71 (run 'hg update' to get a working copy)
71 (run 'hg update' to get a working copy)
72
72
73 trying to push
73 trying to push
74
74
75 $ hg update
75 $ hg update
76 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
76 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 $ echo more foo >> bar
77 $ echo more foo >> bar
78 $ hg commit -m"test"
78 $ hg commit -m"test"
79 $ hg push
79 $ hg push
80 pushing to static-http://localhost:$HGPORT/remote
80 pushing to static-http://localhost:$HGPORT/remote
81 abort: destination does not support push
81 abort: destination does not support push
82 [255]
82 [255]
83
83
84 trying clone -r
84 trying clone -r
85
85
86 $ cd ..
86 $ cd ..
87 $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
87 $ hg clone -r doesnotexist static-http://localhost:$HGPORT/remote local0
88 abort: unknown revision 'doesnotexist'!
88 abort: unknown revision 'doesnotexist'!
89 [255]
89 [255]
90 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
90 $ hg clone -r 0 static-http://localhost:$HGPORT/remote local0
91 adding changesets
91 adding changesets
92 adding manifests
92 adding manifests
93 adding file changes
93 adding file changes
94 added 1 changesets with 2 changes to 2 files
94 added 1 changesets with 2 changes to 2 files
95 new changesets 02770d679fb8
95 new changesets 02770d679fb8
96 updating to branch default
96 updating to branch default
97 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
97 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
98
98
99 test with "/" URI (issue747) and subrepo
99 test with "/" URI (issue747) and subrepo
100
100
101 $ hg init
101 $ hg init
102 $ hg init sub
102 $ hg init sub
103 $ touch sub/test
103 $ touch sub/test
104 $ hg -R sub commit -A -m "test"
104 $ hg -R sub commit -A -m "test"
105 adding test
105 adding test
106 $ hg -R sub tag not-empty
106 $ hg -R sub tag not-empty
107 $ echo sub=sub > .hgsub
107 $ echo sub=sub > .hgsub
108 $ echo a > a
108 $ echo a > a
109 $ hg add a .hgsub
109 $ hg add a .hgsub
110 $ hg -q ci -ma
110 $ hg -q ci -ma
111 $ hg clone static-http://localhost:$HGPORT/ local2
111 $ hg clone static-http://localhost:$HGPORT/ local2
112 requesting all changes
112 requesting all changes
113 adding changesets
113 adding changesets
114 adding manifests
114 adding manifests
115 adding file changes
115 adding file changes
116 added 1 changesets with 3 changes to 3 files
116 added 1 changesets with 3 changes to 3 files
117 new changesets a9ebfbe8e587
117 new changesets a9ebfbe8e587
118 updating to branch default
118 updating to branch default
119 cloning subrepo sub from static-http://localhost:$HGPORT/sub
119 cloning subrepo sub from static-http://localhost:$HGPORT/sub
120 requesting all changes
120 requesting all changes
121 adding changesets
121 adding changesets
122 adding manifests
122 adding manifests
123 adding file changes
123 adding file changes
124 added 2 changesets with 2 changes to 2 files
124 added 2 changesets with 2 changes to 2 files
125 new changesets be090ea66256:322ea90975df
125 new changesets be090ea66256:322ea90975df
126 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
126 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
127 $ cd local2
127 $ cd local2
128 $ hg verify
128 $ hg verify
129 checking changesets
129 checking changesets
130 checking manifests
130 checking manifests
131 crosschecking files in changesets and manifests
131 crosschecking files in changesets and manifests
132 checking files
132 checking files
133 3 files, 1 changesets, 3 total revisions
133 3 files, 1 changesets, 3 total revisions
134 checking subrepo links
134 checking subrepo links
135 $ cat a
135 $ cat a
136 a
136 a
137 $ hg paths
137 $ hg paths
138 default = static-http://localhost:$HGPORT/
138 default = static-http://localhost:$HGPORT/
139
139
140 test with empty repo (issue965)
140 test with empty repo (issue965)
141
141
142 $ cd ..
142 $ cd ..
143 $ hg init remotempty
143 $ hg init remotempty
144 $ hg clone static-http://localhost:$HGPORT/remotempty local3
144 $ hg clone static-http://localhost:$HGPORT/remotempty local3
145 no changes found
145 no changes found
146 updating to branch default
146 updating to branch default
147 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
147 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
148 $ cd local3
148 $ cd local3
149 $ hg verify
149 $ hg verify
150 checking changesets
150 checking changesets
151 checking manifests
151 checking manifests
152 crosschecking files in changesets and manifests
152 crosschecking files in changesets and manifests
153 checking files
153 checking files
154 0 files, 0 changesets, 0 total revisions
154 0 files, 0 changesets, 0 total revisions
155 $ hg paths
155 $ hg paths
156 default = static-http://localhost:$HGPORT/remotempty
156 default = static-http://localhost:$HGPORT/remotempty
157
157
158 test with non-repo
158 test with non-repo
159
159
160 $ cd ..
160 $ cd ..
161 $ mkdir notarepo
161 $ mkdir notarepo
162 $ hg clone static-http://localhost:$HGPORT/notarepo local3
162 $ hg clone static-http://localhost:$HGPORT/notarepo local3
163 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository!
163 abort: 'http://localhost:$HGPORT/notarepo' does not appear to be an hg repository!
164 [255]
164 [255]
165
165
166 Clone with tags and branches works
166 Clone with tags and branches works
167
167
168 $ hg init remote-with-names
168 $ hg init remote-with-names
169 $ cd remote-with-names
169 $ cd remote-with-names
170 $ echo 0 > foo
170 $ echo 0 > foo
171 $ hg -q commit -A -m initial
171 $ hg -q commit -A -m initial
172 $ echo 1 > foo
172 $ echo 1 > foo
173 $ hg commit -m 'commit 1'
173 $ hg commit -m 'commit 1'
174 $ hg -q up 0
174 $ hg -q up 0
175 $ hg branch mybranch
175 $ hg branch mybranch
176 marked working directory as branch mybranch
176 marked working directory as branch mybranch
177 (branches are permanent and global, did you want a bookmark?)
177 (branches are permanent and global, did you want a bookmark?)
178 $ echo 2 > foo
178 $ echo 2 > foo
179 $ hg commit -m 'commit 2 (mybranch)'
179 $ hg commit -m 'commit 2 (mybranch)'
180 $ hg tag -r 1 'default-tag'
180 $ hg tag -r 1 'default-tag'
181 $ hg tag -r 2 'branch-tag'
181 $ hg tag -r 2 'branch-tag'
182
182
183 $ cd ..
183 $ cd ..
184
184
185 $ hg clone static-http://localhost:$HGPORT/remote-with-names local-with-names
185 $ hg clone static-http://localhost:$HGPORT/remote-with-names local-with-names
186 requesting all changes
186 requesting all changes
187 adding changesets
187 adding changesets
188 adding manifests
188 adding manifests
189 adding file changes
189 adding file changes
190 added 5 changesets with 5 changes to 2 files (+1 heads)
190 added 5 changesets with 5 changes to 2 files (+1 heads)
191 new changesets 68986213bd44:0c325bd2b5a7
191 new changesets 68986213bd44:0c325bd2b5a7
192 updating to branch default
192 updating to branch default
193 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
193 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
194
194
195 Clone a specific branch works
195 Clone a specific branch works
196
196
197 $ hg clone -r mybranch static-http://localhost:$HGPORT/remote-with-names local-with-names-branch
197 $ hg clone -r mybranch static-http://localhost:$HGPORT/remote-with-names local-with-names-branch
198 adding changesets
198 adding changesets
199 adding manifests
199 adding manifests
200 adding file changes
200 adding file changes
201 added 4 changesets with 4 changes to 2 files
201 added 4 changesets with 4 changes to 2 files
202 new changesets 68986213bd44:0c325bd2b5a7
202 new changesets 68986213bd44:0c325bd2b5a7
203 updating to branch mybranch
203 updating to branch mybranch
204 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
204 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
205
205
206 Clone a specific tag works
206 Clone a specific tag works
207
207
208 $ hg clone -r default-tag static-http://localhost:$HGPORT/remote-with-names local-with-names-tag
208 $ hg clone -r default-tag static-http://localhost:$HGPORT/remote-with-names local-with-names-tag
209 adding changesets
209 adding changesets
210 adding manifests
210 adding manifests
211 adding file changes
211 adding file changes
212 added 2 changesets with 2 changes to 1 files
212 added 2 changesets with 2 changes to 1 files
213 new changesets 68986213bd44:4ee3fcef1c80
213 new changesets 68986213bd44:4ee3fcef1c80
214 updating to branch default
214 updating to branch default
215 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
215 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
216
216
217 $ killdaemons.py
217 $ killdaemons.py
218
218
219 List of files accessed over HTTP:
219 List of files accessed over HTTP:
220
220
221 $ cat server.log | sed -n -e 's|.*GET \(/[^ ]*\).*|\1|p' | sort -u
221 $ cat server.log | sed -n -e 's|.*GET \(/[^ ]*\).*|\1|p' | sort -u
222 /.hg/bookmarks
222 /.hg/bookmarks
223 /.hg/bookmarks.current
223 /.hg/bookmarks.current
224 /.hg/cache/hgtagsfnodes1
224 /.hg/cache/hgtagsfnodes1
225 /.hg/dirstate
226 /.hg/requires
225 /.hg/requires
227 /.hg/store/00changelog.i
226 /.hg/store/00changelog.i
228 /.hg/store/00manifest.i
227 /.hg/store/00manifest.i
229 /.hg/store/data/%7E2ehgsub.i
228 /.hg/store/data/%7E2ehgsub.i
230 /.hg/store/data/%7E2ehgsubstate.i
229 /.hg/store/data/%7E2ehgsubstate.i
231 /.hg/store/data/a.i
230 /.hg/store/data/a.i
232 /notarepo/.hg/00changelog.i
231 /notarepo/.hg/00changelog.i
233 /notarepo/.hg/requires
232 /notarepo/.hg/requires
234 /remote-with-names/.hg/bookmarks
233 /remote-with-names/.hg/bookmarks
235 /remote-with-names/.hg/bookmarks.current
234 /remote-with-names/.hg/bookmarks.current
236 /remote-with-names/.hg/cache/branch2-served
235 /remote-with-names/.hg/cache/branch2-served
237 /remote-with-names/.hg/cache/hgtagsfnodes1
236 /remote-with-names/.hg/cache/hgtagsfnodes1
238 /remote-with-names/.hg/cache/tags2-served
237 /remote-with-names/.hg/cache/tags2-served
239 /remote-with-names/.hg/dirstate
240 /remote-with-names/.hg/localtags
238 /remote-with-names/.hg/localtags
241 /remote-with-names/.hg/requires
239 /remote-with-names/.hg/requires
242 /remote-with-names/.hg/store/00changelog.i
240 /remote-with-names/.hg/store/00changelog.i
243 /remote-with-names/.hg/store/00manifest.i
241 /remote-with-names/.hg/store/00manifest.i
244 /remote-with-names/.hg/store/data/%7E2ehgtags.i
242 /remote-with-names/.hg/store/data/%7E2ehgtags.i
245 /remote-with-names/.hg/store/data/foo.i
243 /remote-with-names/.hg/store/data/foo.i
246 /remote/.hg/bookmarks
244 /remote/.hg/bookmarks
247 /remote/.hg/bookmarks.current
245 /remote/.hg/bookmarks.current
248 /remote/.hg/cache/branch2-base
246 /remote/.hg/cache/branch2-base
249 /remote/.hg/cache/branch2-immutable
247 /remote/.hg/cache/branch2-immutable
250 /remote/.hg/cache/branch2-served
248 /remote/.hg/cache/branch2-served
251 /remote/.hg/cache/hgtagsfnodes1
249 /remote/.hg/cache/hgtagsfnodes1
252 /remote/.hg/cache/rbc-names-v1
250 /remote/.hg/cache/rbc-names-v1
253 /remote/.hg/cache/tags2-served
251 /remote/.hg/cache/tags2-served
254 /remote/.hg/dirstate
255 /remote/.hg/localtags
252 /remote/.hg/localtags
256 /remote/.hg/requires
253 /remote/.hg/requires
257 /remote/.hg/store/00changelog.i
254 /remote/.hg/store/00changelog.i
258 /remote/.hg/store/00manifest.i
255 /remote/.hg/store/00manifest.i
259 /remote/.hg/store/data/%7E2edotfile%20with%20spaces.i
256 /remote/.hg/store/data/%7E2edotfile%20with%20spaces.i
260 /remote/.hg/store/data/%7E2ehgtags.i
257 /remote/.hg/store/data/%7E2ehgtags.i
261 /remote/.hg/store/data/bar.i
258 /remote/.hg/store/data/bar.i
262 /remote/.hg/store/data/quux.i
259 /remote/.hg/store/data/quux.i
263 /remotempty/.hg/bookmarks
260 /remotempty/.hg/bookmarks
264 /remotempty/.hg/bookmarks.current
261 /remotempty/.hg/bookmarks.current
265 /remotempty/.hg/requires
262 /remotempty/.hg/requires
266 /remotempty/.hg/store/00changelog.i
263 /remotempty/.hg/store/00changelog.i
267 /remotempty/.hg/store/00manifest.i
264 /remotempty/.hg/store/00manifest.i
268 /sub/.hg/bookmarks
265 /sub/.hg/bookmarks
269 /sub/.hg/bookmarks.current
266 /sub/.hg/bookmarks.current
270 /sub/.hg/cache/hgtagsfnodes1
267 /sub/.hg/cache/hgtagsfnodes1
271 /sub/.hg/dirstate
272 /sub/.hg/requires
268 /sub/.hg/requires
273 /sub/.hg/store/00changelog.i
269 /sub/.hg/store/00changelog.i
274 /sub/.hg/store/00manifest.i
270 /sub/.hg/store/00manifest.i
275 /sub/.hg/store/data/%7E2ehgtags.i
271 /sub/.hg/store/data/%7E2ehgtags.i
276 /sub/.hg/store/data/test.i
272 /sub/.hg/store/data/test.i
General Comments 0
You need to be logged in to leave comments. Login now