##// END OF EJS Templates
changectx: use unfiltered changelog to access parents of unfiltered revs...
marmoute -
r44203:f9068413 default
parent child Browse files
Show More
@@ -1,3011 +1,3016 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, expr, badfn=None):
203 def matchfileset(self, expr, badfn=None):
204 return fileset.match(self, expr, badfn=badfn)
204 return fileset.match(self, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 ):
331 ):
332 r = self._repo
332 r = self._repo
333 return matchmod.match(
333 return matchmod.match(
334 r.root,
334 r.root,
335 r.getcwd(),
335 r.getcwd(),
336 pats,
336 pats,
337 include,
337 include,
338 exclude,
338 exclude,
339 default,
339 default,
340 auditor=r.nofsauditor,
340 auditor=r.nofsauditor,
341 ctx=self,
341 ctx=self,
342 listsubrepos=listsubrepos,
342 listsubrepos=listsubrepos,
343 badfn=badfn,
343 badfn=badfn,
344 )
344 )
345
345
346 def diff(
346 def diff(
347 self,
347 self,
348 ctx2=None,
348 ctx2=None,
349 match=None,
349 match=None,
350 changes=None,
350 changes=None,
351 opts=None,
351 opts=None,
352 losedatafn=None,
352 losedatafn=None,
353 pathfn=None,
353 pathfn=None,
354 copy=None,
354 copy=None,
355 copysourcematch=None,
355 copysourcematch=None,
356 hunksfilterfn=None,
356 hunksfilterfn=None,
357 ):
357 ):
358 """Returns a diff generator for the given contexts and matcher"""
358 """Returns a diff generator for the given contexts and matcher"""
359 if ctx2 is None:
359 if ctx2 is None:
360 ctx2 = self.p1()
360 ctx2 = self.p1()
361 if ctx2 is not None:
361 if ctx2 is not None:
362 ctx2 = self._repo[ctx2]
362 ctx2 = self._repo[ctx2]
363 return patch.diff(
363 return patch.diff(
364 self._repo,
364 self._repo,
365 ctx2,
365 ctx2,
366 self,
366 self,
367 match=match,
367 match=match,
368 changes=changes,
368 changes=changes,
369 opts=opts,
369 opts=opts,
370 losedatafn=losedatafn,
370 losedatafn=losedatafn,
371 pathfn=pathfn,
371 pathfn=pathfn,
372 copy=copy,
372 copy=copy,
373 copysourcematch=copysourcematch,
373 copysourcematch=copysourcematch,
374 hunksfilterfn=hunksfilterfn,
374 hunksfilterfn=hunksfilterfn,
375 )
375 )
376
376
377 def dirs(self):
377 def dirs(self):
378 return self._manifest.dirs()
378 return self._manifest.dirs()
379
379
380 def hasdir(self, dir):
380 def hasdir(self, dir):
381 return self._manifest.hasdir(dir)
381 return self._manifest.hasdir(dir)
382
382
383 def status(
383 def status(
384 self,
384 self,
385 other=None,
385 other=None,
386 match=None,
386 match=None,
387 listignored=False,
387 listignored=False,
388 listclean=False,
388 listclean=False,
389 listunknown=False,
389 listunknown=False,
390 listsubrepos=False,
390 listsubrepos=False,
391 ):
391 ):
392 """return status of files between two nodes or node and working
392 """return status of files between two nodes or node and working
393 directory.
393 directory.
394
394
395 If other is None, compare this node with working directory.
395 If other is None, compare this node with working directory.
396
396
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
398 """
398 """
399
399
400 ctx1 = self
400 ctx1 = self
401 ctx2 = self._repo[other]
401 ctx2 = self._repo[other]
402
402
403 # This next code block is, admittedly, fragile logic that tests for
403 # This next code block is, admittedly, fragile logic that tests for
404 # reversing the contexts and wouldn't need to exist if it weren't for
404 # reversing the contexts and wouldn't need to exist if it weren't for
405 # the fast (and common) code path of comparing the working directory
405 # the fast (and common) code path of comparing the working directory
406 # with its first parent.
406 # with its first parent.
407 #
407 #
408 # What we're aiming for here is the ability to call:
408 # What we're aiming for here is the ability to call:
409 #
409 #
410 # workingctx.status(parentctx)
410 # workingctx.status(parentctx)
411 #
411 #
412 # If we always built the manifest for each context and compared those,
412 # If we always built the manifest for each context and compared those,
413 # then we'd be done. But the special case of the above call means we
413 # then we'd be done. But the special case of the above call means we
414 # just copy the manifest of the parent.
414 # just copy the manifest of the parent.
415 reversed = False
415 reversed = False
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
417 reversed = True
417 reversed = True
418 ctx1, ctx2 = ctx2, ctx1
418 ctx1, ctx2 = ctx2, ctx1
419
419
420 match = self._repo.narrowmatch(match)
420 match = self._repo.narrowmatch(match)
421 match = ctx2._matchstatus(ctx1, match)
421 match = ctx2._matchstatus(ctx1, match)
422 r = scmutil.status([], [], [], [], [], [], [])
422 r = scmutil.status([], [], [], [], [], [], [])
423 r = ctx2._buildstatus(
423 r = ctx2._buildstatus(
424 ctx1, r, match, listignored, listclean, listunknown
424 ctx1, r, match, listignored, listclean, listunknown
425 )
425 )
426
426
427 if reversed:
427 if reversed:
428 # Reverse added and removed. Clear deleted, unknown and ignored as
428 # Reverse added and removed. Clear deleted, unknown and ignored as
429 # these make no sense to reverse.
429 # these make no sense to reverse.
430 r = scmutil.status(
430 r = scmutil.status(
431 r.modified, r.removed, r.added, [], [], [], r.clean
431 r.modified, r.removed, r.added, [], [], [], r.clean
432 )
432 )
433
433
434 if listsubrepos:
434 if listsubrepos:
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
436 try:
436 try:
437 rev2 = ctx2.subrev(subpath)
437 rev2 = ctx2.subrev(subpath)
438 except KeyError:
438 except KeyError:
439 # A subrepo that existed in node1 was deleted between
439 # A subrepo that existed in node1 was deleted between
440 # node1 and node2 (inclusive). Thus, ctx2's substate
440 # node1 and node2 (inclusive). Thus, ctx2's substate
441 # won't contain that subpath. The best we can do ignore it.
441 # won't contain that subpath. The best we can do ignore it.
442 rev2 = None
442 rev2 = None
443 submatch = matchmod.subdirmatcher(subpath, match)
443 submatch = matchmod.subdirmatcher(subpath, match)
444 s = sub.status(
444 s = sub.status(
445 rev2,
445 rev2,
446 match=submatch,
446 match=submatch,
447 ignored=listignored,
447 ignored=listignored,
448 clean=listclean,
448 clean=listclean,
449 unknown=listunknown,
449 unknown=listunknown,
450 listsubrepos=True,
450 listsubrepos=True,
451 )
451 )
452 for k in (
452 for k in (
453 'modified',
453 'modified',
454 'added',
454 'added',
455 'removed',
455 'removed',
456 'deleted',
456 'deleted',
457 'unknown',
457 'unknown',
458 'ignored',
458 'ignored',
459 'clean',
459 'clean',
460 ):
460 ):
461 rfiles, sfiles = getattr(r, k), getattr(s, k)
461 rfiles, sfiles = getattr(r, k), getattr(s, k)
462 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
462 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
463
463
464 r.modified.sort()
464 r.modified.sort()
465 r.added.sort()
465 r.added.sort()
466 r.removed.sort()
466 r.removed.sort()
467 r.deleted.sort()
467 r.deleted.sort()
468 r.unknown.sort()
468 r.unknown.sort()
469 r.ignored.sort()
469 r.ignored.sort()
470 r.clean.sort()
470 r.clean.sort()
471
471
472 return r
472 return r
473
473
474
474
475 class changectx(basectx):
475 class changectx(basectx):
476 """A changecontext object makes access to data related to a particular
476 """A changecontext object makes access to data related to a particular
477 changeset convenient. It represents a read-only context already present in
477 changeset convenient. It represents a read-only context already present in
478 the repo."""
478 the repo."""
479
479
480 def __init__(self, repo, rev, node, maybe_filtered=True):
480 def __init__(self, repo, rev, node, maybe_filtered=True):
481 super(changectx, self).__init__(repo)
481 super(changectx, self).__init__(repo)
482 self._rev = rev
482 self._rev = rev
483 self._node = node
483 self._node = node
484 # When maybe_filtered is True, the revision might be affected by
484 # When maybe_filtered is True, the revision might be affected by
485 # changelog filtering and operation through the filtered changelog must be used.
485 # changelog filtering and operation through the filtered changelog must be used.
486 #
486 #
487 # When maybe_filtered is False, the revision has already been checked
487 # When maybe_filtered is False, the revision has already been checked
488 # against filtering and is not filtered. Operation through the
488 # against filtering and is not filtered. Operation through the
489 # unfiltered changelog might be used in some case.
489 # unfiltered changelog might be used in some case.
490 self._maybe_filtered = maybe_filtered
490 self._maybe_filtered = maybe_filtered
491
491
492 def __hash__(self):
492 def __hash__(self):
493 try:
493 try:
494 return hash(self._rev)
494 return hash(self._rev)
495 except AttributeError:
495 except AttributeError:
496 return id(self)
496 return id(self)
497
497
498 def __nonzero__(self):
498 def __nonzero__(self):
499 return self._rev != nullrev
499 return self._rev != nullrev
500
500
501 __bool__ = __nonzero__
501 __bool__ = __nonzero__
502
502
503 @propertycache
503 @propertycache
504 def _changeset(self):
504 def _changeset(self):
505 if self._maybe_filtered:
505 if self._maybe_filtered:
506 repo = self._repo
506 repo = self._repo
507 else:
507 else:
508 repo = self._repo.unfiltered()
508 repo = self._repo.unfiltered()
509 return repo.changelog.changelogrevision(self.rev())
509 return repo.changelog.changelogrevision(self.rev())
510
510
511 @propertycache
511 @propertycache
512 def _manifest(self):
512 def _manifest(self):
513 return self._manifestctx.read()
513 return self._manifestctx.read()
514
514
515 @property
515 @property
516 def _manifestctx(self):
516 def _manifestctx(self):
517 return self._repo.manifestlog[self._changeset.manifest]
517 return self._repo.manifestlog[self._changeset.manifest]
518
518
519 @propertycache
519 @propertycache
520 def _manifestdelta(self):
520 def _manifestdelta(self):
521 return self._manifestctx.readdelta()
521 return self._manifestctx.readdelta()
522
522
523 @propertycache
523 @propertycache
524 def _parents(self):
524 def _parents(self):
525 repo = self._repo
525 repo = self._repo
526 p1, p2 = repo.changelog.parentrevs(self._rev)
526 if self._maybe_filtered:
527 cl = repo.changelog
528 else:
529 cl = repo.unfiltered().changelog
530
531 p1, p2 = cl.parentrevs(self._rev)
527 if p2 == nullrev:
532 if p2 == nullrev:
528 return [repo[p1]]
533 return [repo[p1]]
529 return [repo[p1], repo[p2]]
534 return [repo[p1], repo[p2]]
530
535
531 def changeset(self):
536 def changeset(self):
532 c = self._changeset
537 c = self._changeset
533 return (
538 return (
534 c.manifest,
539 c.manifest,
535 c.user,
540 c.user,
536 c.date,
541 c.date,
537 c.files,
542 c.files,
538 c.description,
543 c.description,
539 c.extra,
544 c.extra,
540 )
545 )
541
546
542 def manifestnode(self):
547 def manifestnode(self):
543 return self._changeset.manifest
548 return self._changeset.manifest
544
549
545 def user(self):
550 def user(self):
546 return self._changeset.user
551 return self._changeset.user
547
552
548 def date(self):
553 def date(self):
549 return self._changeset.date
554 return self._changeset.date
550
555
551 def files(self):
556 def files(self):
552 return self._changeset.files
557 return self._changeset.files
553
558
554 def filesmodified(self):
559 def filesmodified(self):
555 modified = set(self.files())
560 modified = set(self.files())
556 modified.difference_update(self.filesadded())
561 modified.difference_update(self.filesadded())
557 modified.difference_update(self.filesremoved())
562 modified.difference_update(self.filesremoved())
558 return sorted(modified)
563 return sorted(modified)
559
564
560 def filesadded(self):
565 def filesadded(self):
561 filesadded = self._changeset.filesadded
566 filesadded = self._changeset.filesadded
562 compute_on_none = True
567 compute_on_none = True
563 if self._repo.filecopiesmode == b'changeset-sidedata':
568 if self._repo.filecopiesmode == b'changeset-sidedata':
564 compute_on_none = False
569 compute_on_none = False
565 else:
570 else:
566 source = self._repo.ui.config(b'experimental', b'copies.read-from')
571 source = self._repo.ui.config(b'experimental', b'copies.read-from')
567 if source == b'changeset-only':
572 if source == b'changeset-only':
568 compute_on_none = False
573 compute_on_none = False
569 elif source != b'compatibility':
574 elif source != b'compatibility':
570 # filelog mode, ignore any changelog content
575 # filelog mode, ignore any changelog content
571 filesadded = None
576 filesadded = None
572 if filesadded is None:
577 if filesadded is None:
573 if compute_on_none:
578 if compute_on_none:
574 filesadded = copies.computechangesetfilesadded(self)
579 filesadded = copies.computechangesetfilesadded(self)
575 else:
580 else:
576 filesadded = []
581 filesadded = []
577 return filesadded
582 return filesadded
578
583
579 def filesremoved(self):
584 def filesremoved(self):
580 filesremoved = self._changeset.filesremoved
585 filesremoved = self._changeset.filesremoved
581 compute_on_none = True
586 compute_on_none = True
582 if self._repo.filecopiesmode == b'changeset-sidedata':
587 if self._repo.filecopiesmode == b'changeset-sidedata':
583 compute_on_none = False
588 compute_on_none = False
584 else:
589 else:
585 source = self._repo.ui.config(b'experimental', b'copies.read-from')
590 source = self._repo.ui.config(b'experimental', b'copies.read-from')
586 if source == b'changeset-only':
591 if source == b'changeset-only':
587 compute_on_none = False
592 compute_on_none = False
588 elif source != b'compatibility':
593 elif source != b'compatibility':
589 # filelog mode, ignore any changelog content
594 # filelog mode, ignore any changelog content
590 filesremoved = None
595 filesremoved = None
591 if filesremoved is None:
596 if filesremoved is None:
592 if compute_on_none:
597 if compute_on_none:
593 filesremoved = copies.computechangesetfilesremoved(self)
598 filesremoved = copies.computechangesetfilesremoved(self)
594 else:
599 else:
595 filesremoved = []
600 filesremoved = []
596 return filesremoved
601 return filesremoved
597
602
598 @propertycache
603 @propertycache
599 def _copies(self):
604 def _copies(self):
600 p1copies = self._changeset.p1copies
605 p1copies = self._changeset.p1copies
601 p2copies = self._changeset.p2copies
606 p2copies = self._changeset.p2copies
602 compute_on_none = True
607 compute_on_none = True
603 if self._repo.filecopiesmode == b'changeset-sidedata':
608 if self._repo.filecopiesmode == b'changeset-sidedata':
604 compute_on_none = False
609 compute_on_none = False
605 else:
610 else:
606 source = self._repo.ui.config(b'experimental', b'copies.read-from')
611 source = self._repo.ui.config(b'experimental', b'copies.read-from')
607 # If config says to get copy metadata only from changeset, then
612 # If config says to get copy metadata only from changeset, then
608 # return that, defaulting to {} if there was no copy metadata. In
613 # return that, defaulting to {} if there was no copy metadata. In
609 # compatibility mode, we return copy data from the changeset if it
614 # compatibility mode, we return copy data from the changeset if it
610 # was recorded there, and otherwise we fall back to getting it from
615 # was recorded there, and otherwise we fall back to getting it from
611 # the filelogs (below).
616 # the filelogs (below).
612 #
617 #
613 # If we are in compatiblity mode and there is not data in the
618 # If we are in compatiblity mode and there is not data in the
614 # changeset), we get the copy metadata from the filelogs.
619 # changeset), we get the copy metadata from the filelogs.
615 #
620 #
616 # otherwise, when config said to read only from filelog, we get the
621 # otherwise, when config said to read only from filelog, we get the
617 # copy metadata from the filelogs.
622 # copy metadata from the filelogs.
618 if source == b'changeset-only':
623 if source == b'changeset-only':
619 compute_on_none = False
624 compute_on_none = False
620 elif source != b'compatibility':
625 elif source != b'compatibility':
621 # filelog mode, ignore any changelog content
626 # filelog mode, ignore any changelog content
622 p1copies = p2copies = None
627 p1copies = p2copies = None
623 if p1copies is None:
628 if p1copies is None:
624 if compute_on_none:
629 if compute_on_none:
625 p1copies, p2copies = super(changectx, self)._copies
630 p1copies, p2copies = super(changectx, self)._copies
626 else:
631 else:
627 if p1copies is None:
632 if p1copies is None:
628 p1copies = {}
633 p1copies = {}
629 if p2copies is None:
634 if p2copies is None:
630 p2copies = {}
635 p2copies = {}
631 return p1copies, p2copies
636 return p1copies, p2copies
632
637
633 def description(self):
638 def description(self):
634 return self._changeset.description
639 return self._changeset.description
635
640
636 def branch(self):
641 def branch(self):
637 return encoding.tolocal(self._changeset.extra.get(b"branch"))
642 return encoding.tolocal(self._changeset.extra.get(b"branch"))
638
643
639 def closesbranch(self):
644 def closesbranch(self):
640 return b'close' in self._changeset.extra
645 return b'close' in self._changeset.extra
641
646
642 def extra(self):
647 def extra(self):
643 """Return a dict of extra information."""
648 """Return a dict of extra information."""
644 return self._changeset.extra
649 return self._changeset.extra
645
650
646 def tags(self):
651 def tags(self):
647 """Return a list of byte tag names"""
652 """Return a list of byte tag names"""
648 return self._repo.nodetags(self._node)
653 return self._repo.nodetags(self._node)
649
654
650 def bookmarks(self):
655 def bookmarks(self):
651 """Return a list of byte bookmark names."""
656 """Return a list of byte bookmark names."""
652 return self._repo.nodebookmarks(self._node)
657 return self._repo.nodebookmarks(self._node)
653
658
654 def phase(self):
659 def phase(self):
655 return self._repo._phasecache.phase(self._repo, self._rev)
660 return self._repo._phasecache.phase(self._repo, self._rev)
656
661
657 def hidden(self):
662 def hidden(self):
658 return self._rev in repoview.filterrevs(self._repo, b'visible')
663 return self._rev in repoview.filterrevs(self._repo, b'visible')
659
664
660 def isinmemory(self):
665 def isinmemory(self):
661 return False
666 return False
662
667
663 def children(self):
668 def children(self):
664 """return list of changectx contexts for each child changeset.
669 """return list of changectx contexts for each child changeset.
665
670
666 This returns only the immediate child changesets. Use descendants() to
671 This returns only the immediate child changesets. Use descendants() to
667 recursively walk children.
672 recursively walk children.
668 """
673 """
669 c = self._repo.changelog.children(self._node)
674 c = self._repo.changelog.children(self._node)
670 return [self._repo[x] for x in c]
675 return [self._repo[x] for x in c]
671
676
672 def ancestors(self):
677 def ancestors(self):
673 for a in self._repo.changelog.ancestors([self._rev]):
678 for a in self._repo.changelog.ancestors([self._rev]):
674 yield self._repo[a]
679 yield self._repo[a]
675
680
676 def descendants(self):
681 def descendants(self):
677 """Recursively yield all children of the changeset.
682 """Recursively yield all children of the changeset.
678
683
679 For just the immediate children, use children()
684 For just the immediate children, use children()
680 """
685 """
681 for d in self._repo.changelog.descendants([self._rev]):
686 for d in self._repo.changelog.descendants([self._rev]):
682 yield self._repo[d]
687 yield self._repo[d]
683
688
684 def filectx(self, path, fileid=None, filelog=None):
689 def filectx(self, path, fileid=None, filelog=None):
685 """get a file context from this changeset"""
690 """get a file context from this changeset"""
686 if fileid is None:
691 if fileid is None:
687 fileid = self.filenode(path)
692 fileid = self.filenode(path)
688 return filectx(
693 return filectx(
689 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
694 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
690 )
695 )
691
696
692 def ancestor(self, c2, warn=False):
697 def ancestor(self, c2, warn=False):
693 """return the "best" ancestor context of self and c2
698 """return the "best" ancestor context of self and c2
694
699
695 If there are multiple candidates, it will show a message and check
700 If there are multiple candidates, it will show a message and check
696 merge.preferancestor configuration before falling back to the
701 merge.preferancestor configuration before falling back to the
697 revlog ancestor."""
702 revlog ancestor."""
698 # deal with workingctxs
703 # deal with workingctxs
699 n2 = c2._node
704 n2 = c2._node
700 if n2 is None:
705 if n2 is None:
701 n2 = c2._parents[0]._node
706 n2 = c2._parents[0]._node
702 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
707 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
703 if not cahs:
708 if not cahs:
704 anc = nullid
709 anc = nullid
705 elif len(cahs) == 1:
710 elif len(cahs) == 1:
706 anc = cahs[0]
711 anc = cahs[0]
707 else:
712 else:
708 # experimental config: merge.preferancestor
713 # experimental config: merge.preferancestor
709 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
714 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
710 try:
715 try:
711 ctx = scmutil.revsymbol(self._repo, r)
716 ctx = scmutil.revsymbol(self._repo, r)
712 except error.RepoLookupError:
717 except error.RepoLookupError:
713 continue
718 continue
714 anc = ctx.node()
719 anc = ctx.node()
715 if anc in cahs:
720 if anc in cahs:
716 break
721 break
717 else:
722 else:
718 anc = self._repo.changelog.ancestor(self._node, n2)
723 anc = self._repo.changelog.ancestor(self._node, n2)
719 if warn:
724 if warn:
720 self._repo.ui.status(
725 self._repo.ui.status(
721 (
726 (
722 _(b"note: using %s as ancestor of %s and %s\n")
727 _(b"note: using %s as ancestor of %s and %s\n")
723 % (short(anc), short(self._node), short(n2))
728 % (short(anc), short(self._node), short(n2))
724 )
729 )
725 + b''.join(
730 + b''.join(
726 _(
731 _(
727 b" alternatively, use --config "
732 b" alternatively, use --config "
728 b"merge.preferancestor=%s\n"
733 b"merge.preferancestor=%s\n"
729 )
734 )
730 % short(n)
735 % short(n)
731 for n in sorted(cahs)
736 for n in sorted(cahs)
732 if n != anc
737 if n != anc
733 )
738 )
734 )
739 )
735 return self._repo[anc]
740 return self._repo[anc]
736
741
737 def isancestorof(self, other):
742 def isancestorof(self, other):
738 """True if this changeset is an ancestor of other"""
743 """True if this changeset is an ancestor of other"""
739 return self._repo.changelog.isancestorrev(self._rev, other._rev)
744 return self._repo.changelog.isancestorrev(self._rev, other._rev)
740
745
741 def walk(self, match):
746 def walk(self, match):
742 '''Generates matching file names.'''
747 '''Generates matching file names.'''
743
748
744 # Wrap match.bad method to have message with nodeid
749 # Wrap match.bad method to have message with nodeid
745 def bad(fn, msg):
750 def bad(fn, msg):
746 # The manifest doesn't know about subrepos, so don't complain about
751 # The manifest doesn't know about subrepos, so don't complain about
747 # paths into valid subrepos.
752 # paths into valid subrepos.
748 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
753 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
749 return
754 return
750 match.bad(fn, _(b'no such file in rev %s') % self)
755 match.bad(fn, _(b'no such file in rev %s') % self)
751
756
752 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
757 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
753 return self._manifest.walk(m)
758 return self._manifest.walk(m)
754
759
755 def matches(self, match):
760 def matches(self, match):
756 return self.walk(match)
761 return self.walk(match)
757
762
758
763
759 class basefilectx(object):
764 class basefilectx(object):
760 """A filecontext object represents the common logic for its children:
765 """A filecontext object represents the common logic for its children:
761 filectx: read-only access to a filerevision that is already present
766 filectx: read-only access to a filerevision that is already present
762 in the repo,
767 in the repo,
763 workingfilectx: a filecontext that represents files from the working
768 workingfilectx: a filecontext that represents files from the working
764 directory,
769 directory,
765 memfilectx: a filecontext that represents files in-memory,
770 memfilectx: a filecontext that represents files in-memory,
766 """
771 """
767
772
768 @propertycache
773 @propertycache
769 def _filelog(self):
774 def _filelog(self):
770 return self._repo.file(self._path)
775 return self._repo.file(self._path)
771
776
772 @propertycache
777 @propertycache
773 def _changeid(self):
778 def _changeid(self):
774 if '_changectx' in self.__dict__:
779 if '_changectx' in self.__dict__:
775 return self._changectx.rev()
780 return self._changectx.rev()
776 elif '_descendantrev' in self.__dict__:
781 elif '_descendantrev' in self.__dict__:
777 # this file context was created from a revision with a known
782 # this file context was created from a revision with a known
778 # descendant, we can (lazily) correct for linkrev aliases
783 # descendant, we can (lazily) correct for linkrev aliases
779 return self._adjustlinkrev(self._descendantrev)
784 return self._adjustlinkrev(self._descendantrev)
780 else:
785 else:
781 return self._filelog.linkrev(self._filerev)
786 return self._filelog.linkrev(self._filerev)
782
787
783 @propertycache
788 @propertycache
784 def _filenode(self):
789 def _filenode(self):
785 if '_fileid' in self.__dict__:
790 if '_fileid' in self.__dict__:
786 return self._filelog.lookup(self._fileid)
791 return self._filelog.lookup(self._fileid)
787 else:
792 else:
788 return self._changectx.filenode(self._path)
793 return self._changectx.filenode(self._path)
789
794
790 @propertycache
795 @propertycache
791 def _filerev(self):
796 def _filerev(self):
792 return self._filelog.rev(self._filenode)
797 return self._filelog.rev(self._filenode)
793
798
794 @propertycache
799 @propertycache
795 def _repopath(self):
800 def _repopath(self):
796 return self._path
801 return self._path
797
802
798 def __nonzero__(self):
803 def __nonzero__(self):
799 try:
804 try:
800 self._filenode
805 self._filenode
801 return True
806 return True
802 except error.LookupError:
807 except error.LookupError:
803 # file is missing
808 # file is missing
804 return False
809 return False
805
810
806 __bool__ = __nonzero__
811 __bool__ = __nonzero__
807
812
808 def __bytes__(self):
813 def __bytes__(self):
809 try:
814 try:
810 return b"%s@%s" % (self.path(), self._changectx)
815 return b"%s@%s" % (self.path(), self._changectx)
811 except error.LookupError:
816 except error.LookupError:
812 return b"%s@???" % self.path()
817 return b"%s@???" % self.path()
813
818
814 __str__ = encoding.strmethod(__bytes__)
819 __str__ = encoding.strmethod(__bytes__)
815
820
816 def __repr__(self):
821 def __repr__(self):
817 return "<%s %s>" % (type(self).__name__, str(self))
822 return "<%s %s>" % (type(self).__name__, str(self))
818
823
819 def __hash__(self):
824 def __hash__(self):
820 try:
825 try:
821 return hash((self._path, self._filenode))
826 return hash((self._path, self._filenode))
822 except AttributeError:
827 except AttributeError:
823 return id(self)
828 return id(self)
824
829
825 def __eq__(self, other):
830 def __eq__(self, other):
826 try:
831 try:
827 return (
832 return (
828 type(self) == type(other)
833 type(self) == type(other)
829 and self._path == other._path
834 and self._path == other._path
830 and self._filenode == other._filenode
835 and self._filenode == other._filenode
831 )
836 )
832 except AttributeError:
837 except AttributeError:
833 return False
838 return False
834
839
835 def __ne__(self, other):
840 def __ne__(self, other):
836 return not (self == other)
841 return not (self == other)
837
842
838 def filerev(self):
843 def filerev(self):
839 return self._filerev
844 return self._filerev
840
845
841 def filenode(self):
846 def filenode(self):
842 return self._filenode
847 return self._filenode
843
848
844 @propertycache
849 @propertycache
845 def _flags(self):
850 def _flags(self):
846 return self._changectx.flags(self._path)
851 return self._changectx.flags(self._path)
847
852
848 def flags(self):
853 def flags(self):
849 return self._flags
854 return self._flags
850
855
851 def filelog(self):
856 def filelog(self):
852 return self._filelog
857 return self._filelog
853
858
854 def rev(self):
859 def rev(self):
855 return self._changeid
860 return self._changeid
856
861
857 def linkrev(self):
862 def linkrev(self):
858 return self._filelog.linkrev(self._filerev)
863 return self._filelog.linkrev(self._filerev)
859
864
860 def node(self):
865 def node(self):
861 return self._changectx.node()
866 return self._changectx.node()
862
867
863 def hex(self):
868 def hex(self):
864 return self._changectx.hex()
869 return self._changectx.hex()
865
870
866 def user(self):
871 def user(self):
867 return self._changectx.user()
872 return self._changectx.user()
868
873
869 def date(self):
874 def date(self):
870 return self._changectx.date()
875 return self._changectx.date()
871
876
872 def files(self):
877 def files(self):
873 return self._changectx.files()
878 return self._changectx.files()
874
879
875 def description(self):
880 def description(self):
876 return self._changectx.description()
881 return self._changectx.description()
877
882
878 def branch(self):
883 def branch(self):
879 return self._changectx.branch()
884 return self._changectx.branch()
880
885
881 def extra(self):
886 def extra(self):
882 return self._changectx.extra()
887 return self._changectx.extra()
883
888
884 def phase(self):
889 def phase(self):
885 return self._changectx.phase()
890 return self._changectx.phase()
886
891
887 def phasestr(self):
892 def phasestr(self):
888 return self._changectx.phasestr()
893 return self._changectx.phasestr()
889
894
890 def obsolete(self):
895 def obsolete(self):
891 return self._changectx.obsolete()
896 return self._changectx.obsolete()
892
897
893 def instabilities(self):
898 def instabilities(self):
894 return self._changectx.instabilities()
899 return self._changectx.instabilities()
895
900
896 def manifest(self):
901 def manifest(self):
897 return self._changectx.manifest()
902 return self._changectx.manifest()
898
903
899 def changectx(self):
904 def changectx(self):
900 return self._changectx
905 return self._changectx
901
906
902 def renamed(self):
907 def renamed(self):
903 return self._copied
908 return self._copied
904
909
905 def copysource(self):
910 def copysource(self):
906 return self._copied and self._copied[0]
911 return self._copied and self._copied[0]
907
912
908 def repo(self):
913 def repo(self):
909 return self._repo
914 return self._repo
910
915
911 def size(self):
916 def size(self):
912 return len(self.data())
917 return len(self.data())
913
918
914 def path(self):
919 def path(self):
915 return self._path
920 return self._path
916
921
917 def isbinary(self):
922 def isbinary(self):
918 try:
923 try:
919 return stringutil.binary(self.data())
924 return stringutil.binary(self.data())
920 except IOError:
925 except IOError:
921 return False
926 return False
922
927
923 def isexec(self):
928 def isexec(self):
924 return b'x' in self.flags()
929 return b'x' in self.flags()
925
930
926 def islink(self):
931 def islink(self):
927 return b'l' in self.flags()
932 return b'l' in self.flags()
928
933
929 def isabsent(self):
934 def isabsent(self):
930 """whether this filectx represents a file not in self._changectx
935 """whether this filectx represents a file not in self._changectx
931
936
932 This is mainly for merge code to detect change/delete conflicts. This is
937 This is mainly for merge code to detect change/delete conflicts. This is
933 expected to be True for all subclasses of basectx."""
938 expected to be True for all subclasses of basectx."""
934 return False
939 return False
935
940
936 _customcmp = False
941 _customcmp = False
937
942
938 def cmp(self, fctx):
943 def cmp(self, fctx):
939 """compare with other file context
944 """compare with other file context
940
945
941 returns True if different than fctx.
946 returns True if different than fctx.
942 """
947 """
943 if fctx._customcmp:
948 if fctx._customcmp:
944 return fctx.cmp(self)
949 return fctx.cmp(self)
945
950
946 if self._filenode is None:
951 if self._filenode is None:
947 raise error.ProgrammingError(
952 raise error.ProgrammingError(
948 b'filectx.cmp() must be reimplemented if not backed by revlog'
953 b'filectx.cmp() must be reimplemented if not backed by revlog'
949 )
954 )
950
955
951 if fctx._filenode is None:
956 if fctx._filenode is None:
952 if self._repo._encodefilterpats:
957 if self._repo._encodefilterpats:
953 # can't rely on size() because wdir content may be decoded
958 # can't rely on size() because wdir content may be decoded
954 return self._filelog.cmp(self._filenode, fctx.data())
959 return self._filelog.cmp(self._filenode, fctx.data())
955 if self.size() - 4 == fctx.size():
960 if self.size() - 4 == fctx.size():
956 # size() can match:
961 # size() can match:
957 # if file data starts with '\1\n', empty metadata block is
962 # if file data starts with '\1\n', empty metadata block is
958 # prepended, which adds 4 bytes to filelog.size().
963 # prepended, which adds 4 bytes to filelog.size().
959 return self._filelog.cmp(self._filenode, fctx.data())
964 return self._filelog.cmp(self._filenode, fctx.data())
960 if self.size() == fctx.size():
965 if self.size() == fctx.size():
961 # size() matches: need to compare content
966 # size() matches: need to compare content
962 return self._filelog.cmp(self._filenode, fctx.data())
967 return self._filelog.cmp(self._filenode, fctx.data())
963
968
964 # size() differs
969 # size() differs
965 return True
970 return True
966
971
967 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
972 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
968 """return the first ancestor of <srcrev> introducing <fnode>
973 """return the first ancestor of <srcrev> introducing <fnode>
969
974
970 If the linkrev of the file revision does not point to an ancestor of
975 If the linkrev of the file revision does not point to an ancestor of
971 srcrev, we'll walk down the ancestors until we find one introducing
976 srcrev, we'll walk down the ancestors until we find one introducing
972 this file revision.
977 this file revision.
973
978
974 :srcrev: the changeset revision we search ancestors from
979 :srcrev: the changeset revision we search ancestors from
975 :inclusive: if true, the src revision will also be checked
980 :inclusive: if true, the src revision will also be checked
976 :stoprev: an optional revision to stop the walk at. If no introduction
981 :stoprev: an optional revision to stop the walk at. If no introduction
977 of this file content could be found before this floor
982 of this file content could be found before this floor
978 revision, the function will returns "None" and stops its
983 revision, the function will returns "None" and stops its
979 iteration.
984 iteration.
980 """
985 """
981 repo = self._repo
986 repo = self._repo
982 cl = repo.unfiltered().changelog
987 cl = repo.unfiltered().changelog
983 mfl = repo.manifestlog
988 mfl = repo.manifestlog
984 # fetch the linkrev
989 # fetch the linkrev
985 lkr = self.linkrev()
990 lkr = self.linkrev()
986 if srcrev == lkr:
991 if srcrev == lkr:
987 return lkr
992 return lkr
988 # hack to reuse ancestor computation when searching for renames
993 # hack to reuse ancestor computation when searching for renames
989 memberanc = getattr(self, '_ancestrycontext', None)
994 memberanc = getattr(self, '_ancestrycontext', None)
990 iteranc = None
995 iteranc = None
991 if srcrev is None:
996 if srcrev is None:
992 # wctx case, used by workingfilectx during mergecopy
997 # wctx case, used by workingfilectx during mergecopy
993 revs = [p.rev() for p in self._repo[None].parents()]
998 revs = [p.rev() for p in self._repo[None].parents()]
994 inclusive = True # we skipped the real (revless) source
999 inclusive = True # we skipped the real (revless) source
995 else:
1000 else:
996 revs = [srcrev]
1001 revs = [srcrev]
997 if memberanc is None:
1002 if memberanc is None:
998 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1003 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
999 # check if this linkrev is an ancestor of srcrev
1004 # check if this linkrev is an ancestor of srcrev
1000 if lkr not in memberanc:
1005 if lkr not in memberanc:
1001 if iteranc is None:
1006 if iteranc is None:
1002 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1007 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1003 fnode = self._filenode
1008 fnode = self._filenode
1004 path = self._path
1009 path = self._path
1005 for a in iteranc:
1010 for a in iteranc:
1006 if stoprev is not None and a < stoprev:
1011 if stoprev is not None and a < stoprev:
1007 return None
1012 return None
1008 ac = cl.read(a) # get changeset data (we avoid object creation)
1013 ac = cl.read(a) # get changeset data (we avoid object creation)
1009 if path in ac[3]: # checking the 'files' field.
1014 if path in ac[3]: # checking the 'files' field.
1010 # The file has been touched, check if the content is
1015 # The file has been touched, check if the content is
1011 # similar to the one we search for.
1016 # similar to the one we search for.
1012 if fnode == mfl[ac[0]].readfast().get(path):
1017 if fnode == mfl[ac[0]].readfast().get(path):
1013 return a
1018 return a
1014 # In theory, we should never get out of that loop without a result.
1019 # In theory, we should never get out of that loop without a result.
1015 # But if manifest uses a buggy file revision (not children of the
1020 # But if manifest uses a buggy file revision (not children of the
1016 # one it replaces) we could. Such a buggy situation will likely
1021 # one it replaces) we could. Such a buggy situation will likely
1017 # result is crash somewhere else at to some point.
1022 # result is crash somewhere else at to some point.
1018 return lkr
1023 return lkr
1019
1024
1020 def isintroducedafter(self, changelogrev):
1025 def isintroducedafter(self, changelogrev):
1021 """True if a filectx has been introduced after a given floor revision
1026 """True if a filectx has been introduced after a given floor revision
1022 """
1027 """
1023 if self.linkrev() >= changelogrev:
1028 if self.linkrev() >= changelogrev:
1024 return True
1029 return True
1025 introrev = self._introrev(stoprev=changelogrev)
1030 introrev = self._introrev(stoprev=changelogrev)
1026 if introrev is None:
1031 if introrev is None:
1027 return False
1032 return False
1028 return introrev >= changelogrev
1033 return introrev >= changelogrev
1029
1034
1030 def introrev(self):
1035 def introrev(self):
1031 """return the rev of the changeset which introduced this file revision
1036 """return the rev of the changeset which introduced this file revision
1032
1037
1033 This method is different from linkrev because it take into account the
1038 This method is different from linkrev because it take into account the
1034 changeset the filectx was created from. It ensures the returned
1039 changeset the filectx was created from. It ensures the returned
1035 revision is one of its ancestors. This prevents bugs from
1040 revision is one of its ancestors. This prevents bugs from
1036 'linkrev-shadowing' when a file revision is used by multiple
1041 'linkrev-shadowing' when a file revision is used by multiple
1037 changesets.
1042 changesets.
1038 """
1043 """
1039 return self._introrev()
1044 return self._introrev()
1040
1045
1041 def _introrev(self, stoprev=None):
1046 def _introrev(self, stoprev=None):
1042 """
1047 """
1043 Same as `introrev` but, with an extra argument to limit changelog
1048 Same as `introrev` but, with an extra argument to limit changelog
1044 iteration range in some internal usecase.
1049 iteration range in some internal usecase.
1045
1050
1046 If `stoprev` is set, the `introrev` will not be searched past that
1051 If `stoprev` is set, the `introrev` will not be searched past that
1047 `stoprev` revision and "None" might be returned. This is useful to
1052 `stoprev` revision and "None" might be returned. This is useful to
1048 limit the iteration range.
1053 limit the iteration range.
1049 """
1054 """
1050 toprev = None
1055 toprev = None
1051 attrs = vars(self)
1056 attrs = vars(self)
1052 if '_changeid' in attrs:
1057 if '_changeid' in attrs:
1053 # We have a cached value already
1058 # We have a cached value already
1054 toprev = self._changeid
1059 toprev = self._changeid
1055 elif '_changectx' in attrs:
1060 elif '_changectx' in attrs:
1056 # We know which changelog entry we are coming from
1061 # We know which changelog entry we are coming from
1057 toprev = self._changectx.rev()
1062 toprev = self._changectx.rev()
1058
1063
1059 if toprev is not None:
1064 if toprev is not None:
1060 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1065 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1061 elif '_descendantrev' in attrs:
1066 elif '_descendantrev' in attrs:
1062 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1067 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1063 # be nice and cache the result of the computation
1068 # be nice and cache the result of the computation
1064 if introrev is not None:
1069 if introrev is not None:
1065 self._changeid = introrev
1070 self._changeid = introrev
1066 return introrev
1071 return introrev
1067 else:
1072 else:
1068 return self.linkrev()
1073 return self.linkrev()
1069
1074
1070 def introfilectx(self):
1075 def introfilectx(self):
1071 """Return filectx having identical contents, but pointing to the
1076 """Return filectx having identical contents, but pointing to the
1072 changeset revision where this filectx was introduced"""
1077 changeset revision where this filectx was introduced"""
1073 introrev = self.introrev()
1078 introrev = self.introrev()
1074 if self.rev() == introrev:
1079 if self.rev() == introrev:
1075 return self
1080 return self
1076 return self.filectx(self.filenode(), changeid=introrev)
1081 return self.filectx(self.filenode(), changeid=introrev)
1077
1082
1078 def _parentfilectx(self, path, fileid, filelog):
1083 def _parentfilectx(self, path, fileid, filelog):
1079 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1084 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1080 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1085 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1081 if '_changeid' in vars(self) or '_changectx' in vars(self):
1086 if '_changeid' in vars(self) or '_changectx' in vars(self):
1082 # If self is associated with a changeset (probably explicitly
1087 # If self is associated with a changeset (probably explicitly
1083 # fed), ensure the created filectx is associated with a
1088 # fed), ensure the created filectx is associated with a
1084 # changeset that is an ancestor of self.changectx.
1089 # changeset that is an ancestor of self.changectx.
1085 # This lets us later use _adjustlinkrev to get a correct link.
1090 # This lets us later use _adjustlinkrev to get a correct link.
1086 fctx._descendantrev = self.rev()
1091 fctx._descendantrev = self.rev()
1087 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1092 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1088 elif '_descendantrev' in vars(self):
1093 elif '_descendantrev' in vars(self):
1089 # Otherwise propagate _descendantrev if we have one associated.
1094 # Otherwise propagate _descendantrev if we have one associated.
1090 fctx._descendantrev = self._descendantrev
1095 fctx._descendantrev = self._descendantrev
1091 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1096 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1092 return fctx
1097 return fctx
1093
1098
1094 def parents(self):
1099 def parents(self):
1095 _path = self._path
1100 _path = self._path
1096 fl = self._filelog
1101 fl = self._filelog
1097 parents = self._filelog.parents(self._filenode)
1102 parents = self._filelog.parents(self._filenode)
1098 pl = [(_path, node, fl) for node in parents if node != nullid]
1103 pl = [(_path, node, fl) for node in parents if node != nullid]
1099
1104
1100 r = fl.renamed(self._filenode)
1105 r = fl.renamed(self._filenode)
1101 if r:
1106 if r:
1102 # - In the simple rename case, both parent are nullid, pl is empty.
1107 # - In the simple rename case, both parent are nullid, pl is empty.
1103 # - In case of merge, only one of the parent is null id and should
1108 # - In case of merge, only one of the parent is null id and should
1104 # be replaced with the rename information. This parent is -always-
1109 # be replaced with the rename information. This parent is -always-
1105 # the first one.
1110 # the first one.
1106 #
1111 #
1107 # As null id have always been filtered out in the previous list
1112 # As null id have always been filtered out in the previous list
1108 # comprehension, inserting to 0 will always result in "replacing
1113 # comprehension, inserting to 0 will always result in "replacing
1109 # first nullid parent with rename information.
1114 # first nullid parent with rename information.
1110 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1115 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1111
1116
1112 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1117 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1113
1118
1114 def p1(self):
1119 def p1(self):
1115 return self.parents()[0]
1120 return self.parents()[0]
1116
1121
1117 def p2(self):
1122 def p2(self):
1118 p = self.parents()
1123 p = self.parents()
1119 if len(p) == 2:
1124 if len(p) == 2:
1120 return p[1]
1125 return p[1]
1121 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1126 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1122
1127
1123 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1128 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1124 """Returns a list of annotateline objects for each line in the file
1129 """Returns a list of annotateline objects for each line in the file
1125
1130
1126 - line.fctx is the filectx of the node where that line was last changed
1131 - line.fctx is the filectx of the node where that line was last changed
1127 - line.lineno is the line number at the first appearance in the managed
1132 - line.lineno is the line number at the first appearance in the managed
1128 file
1133 file
1129 - line.text is the data on that line (including newline character)
1134 - line.text is the data on that line (including newline character)
1130 """
1135 """
1131 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1136 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1132
1137
1133 def parents(f):
1138 def parents(f):
1134 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1139 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1135 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1140 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1136 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1141 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1137 # isn't an ancestor of the srcrev.
1142 # isn't an ancestor of the srcrev.
1138 f._changeid
1143 f._changeid
1139 pl = f.parents()
1144 pl = f.parents()
1140
1145
1141 # Don't return renamed parents if we aren't following.
1146 # Don't return renamed parents if we aren't following.
1142 if not follow:
1147 if not follow:
1143 pl = [p for p in pl if p.path() == f.path()]
1148 pl = [p for p in pl if p.path() == f.path()]
1144
1149
1145 # renamed filectx won't have a filelog yet, so set it
1150 # renamed filectx won't have a filelog yet, so set it
1146 # from the cache to save time
1151 # from the cache to save time
1147 for p in pl:
1152 for p in pl:
1148 if not '_filelog' in p.__dict__:
1153 if not '_filelog' in p.__dict__:
1149 p._filelog = getlog(p.path())
1154 p._filelog = getlog(p.path())
1150
1155
1151 return pl
1156 return pl
1152
1157
1153 # use linkrev to find the first changeset where self appeared
1158 # use linkrev to find the first changeset where self appeared
1154 base = self.introfilectx()
1159 base = self.introfilectx()
1155 if getattr(base, '_ancestrycontext', None) is None:
1160 if getattr(base, '_ancestrycontext', None) is None:
1156 cl = self._repo.changelog
1161 cl = self._repo.changelog
1157 if base.rev() is None:
1162 if base.rev() is None:
1158 # wctx is not inclusive, but works because _ancestrycontext
1163 # wctx is not inclusive, but works because _ancestrycontext
1159 # is used to test filelog revisions
1164 # is used to test filelog revisions
1160 ac = cl.ancestors(
1165 ac = cl.ancestors(
1161 [p.rev() for p in base.parents()], inclusive=True
1166 [p.rev() for p in base.parents()], inclusive=True
1162 )
1167 )
1163 else:
1168 else:
1164 ac = cl.ancestors([base.rev()], inclusive=True)
1169 ac = cl.ancestors([base.rev()], inclusive=True)
1165 base._ancestrycontext = ac
1170 base._ancestrycontext = ac
1166
1171
1167 return dagop.annotate(
1172 return dagop.annotate(
1168 base, parents, skiprevs=skiprevs, diffopts=diffopts
1173 base, parents, skiprevs=skiprevs, diffopts=diffopts
1169 )
1174 )
1170
1175
1171 def ancestors(self, followfirst=False):
1176 def ancestors(self, followfirst=False):
1172 visit = {}
1177 visit = {}
1173 c = self
1178 c = self
1174 if followfirst:
1179 if followfirst:
1175 cut = 1
1180 cut = 1
1176 else:
1181 else:
1177 cut = None
1182 cut = None
1178
1183
1179 while True:
1184 while True:
1180 for parent in c.parents()[:cut]:
1185 for parent in c.parents()[:cut]:
1181 visit[(parent.linkrev(), parent.filenode())] = parent
1186 visit[(parent.linkrev(), parent.filenode())] = parent
1182 if not visit:
1187 if not visit:
1183 break
1188 break
1184 c = visit.pop(max(visit))
1189 c = visit.pop(max(visit))
1185 yield c
1190 yield c
1186
1191
1187 def decodeddata(self):
1192 def decodeddata(self):
1188 """Returns `data()` after running repository decoding filters.
1193 """Returns `data()` after running repository decoding filters.
1189
1194
1190 This is often equivalent to how the data would be expressed on disk.
1195 This is often equivalent to how the data would be expressed on disk.
1191 """
1196 """
1192 return self._repo.wwritedata(self.path(), self.data())
1197 return self._repo.wwritedata(self.path(), self.data())
1193
1198
1194
1199
1195 class filectx(basefilectx):
1200 class filectx(basefilectx):
1196 """A filecontext object makes access to data related to a particular
1201 """A filecontext object makes access to data related to a particular
1197 filerevision convenient."""
1202 filerevision convenient."""
1198
1203
1199 def __init__(
1204 def __init__(
1200 self,
1205 self,
1201 repo,
1206 repo,
1202 path,
1207 path,
1203 changeid=None,
1208 changeid=None,
1204 fileid=None,
1209 fileid=None,
1205 filelog=None,
1210 filelog=None,
1206 changectx=None,
1211 changectx=None,
1207 ):
1212 ):
1208 """changeid must be a revision number, if specified.
1213 """changeid must be a revision number, if specified.
1209 fileid can be a file revision or node."""
1214 fileid can be a file revision or node."""
1210 self._repo = repo
1215 self._repo = repo
1211 self._path = path
1216 self._path = path
1212
1217
1213 assert (
1218 assert (
1214 changeid is not None or fileid is not None or changectx is not None
1219 changeid is not None or fileid is not None or changectx is not None
1215 ), (
1220 ), (
1216 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1221 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1217 % (changeid, fileid, changectx,)
1222 % (changeid, fileid, changectx,)
1218 )
1223 )
1219
1224
1220 if filelog is not None:
1225 if filelog is not None:
1221 self._filelog = filelog
1226 self._filelog = filelog
1222
1227
1223 if changeid is not None:
1228 if changeid is not None:
1224 self._changeid = changeid
1229 self._changeid = changeid
1225 if changectx is not None:
1230 if changectx is not None:
1226 self._changectx = changectx
1231 self._changectx = changectx
1227 if fileid is not None:
1232 if fileid is not None:
1228 self._fileid = fileid
1233 self._fileid = fileid
1229
1234
1230 @propertycache
1235 @propertycache
1231 def _changectx(self):
1236 def _changectx(self):
1232 try:
1237 try:
1233 return self._repo[self._changeid]
1238 return self._repo[self._changeid]
1234 except error.FilteredRepoLookupError:
1239 except error.FilteredRepoLookupError:
1235 # Linkrev may point to any revision in the repository. When the
1240 # Linkrev may point to any revision in the repository. When the
1236 # repository is filtered this may lead to `filectx` trying to build
1241 # repository is filtered this may lead to `filectx` trying to build
1237 # `changectx` for filtered revision. In such case we fallback to
1242 # `changectx` for filtered revision. In such case we fallback to
1238 # creating `changectx` on the unfiltered version of the reposition.
1243 # creating `changectx` on the unfiltered version of the reposition.
1239 # This fallback should not be an issue because `changectx` from
1244 # This fallback should not be an issue because `changectx` from
1240 # `filectx` are not used in complex operations that care about
1245 # `filectx` are not used in complex operations that care about
1241 # filtering.
1246 # filtering.
1242 #
1247 #
1243 # This fallback is a cheap and dirty fix that prevent several
1248 # This fallback is a cheap and dirty fix that prevent several
1244 # crashes. It does not ensure the behavior is correct. However the
1249 # crashes. It does not ensure the behavior is correct. However the
1245 # behavior was not correct before filtering either and "incorrect
1250 # behavior was not correct before filtering either and "incorrect
1246 # behavior" is seen as better as "crash"
1251 # behavior" is seen as better as "crash"
1247 #
1252 #
1248 # Linkrevs have several serious troubles with filtering that are
1253 # Linkrevs have several serious troubles with filtering that are
1249 # complicated to solve. Proper handling of the issue here should be
1254 # complicated to solve. Proper handling of the issue here should be
1250 # considered when solving linkrev issue are on the table.
1255 # considered when solving linkrev issue are on the table.
1251 return self._repo.unfiltered()[self._changeid]
1256 return self._repo.unfiltered()[self._changeid]
1252
1257
1253 def filectx(self, fileid, changeid=None):
1258 def filectx(self, fileid, changeid=None):
1254 '''opens an arbitrary revision of the file without
1259 '''opens an arbitrary revision of the file without
1255 opening a new filelog'''
1260 opening a new filelog'''
1256 return filectx(
1261 return filectx(
1257 self._repo,
1262 self._repo,
1258 self._path,
1263 self._path,
1259 fileid=fileid,
1264 fileid=fileid,
1260 filelog=self._filelog,
1265 filelog=self._filelog,
1261 changeid=changeid,
1266 changeid=changeid,
1262 )
1267 )
1263
1268
1264 def rawdata(self):
1269 def rawdata(self):
1265 return self._filelog.rawdata(self._filenode)
1270 return self._filelog.rawdata(self._filenode)
1266
1271
1267 def rawflags(self):
1272 def rawflags(self):
1268 """low-level revlog flags"""
1273 """low-level revlog flags"""
1269 return self._filelog.flags(self._filerev)
1274 return self._filelog.flags(self._filerev)
1270
1275
1271 def data(self):
1276 def data(self):
1272 try:
1277 try:
1273 return self._filelog.read(self._filenode)
1278 return self._filelog.read(self._filenode)
1274 except error.CensoredNodeError:
1279 except error.CensoredNodeError:
1275 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1280 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1276 return b""
1281 return b""
1277 raise error.Abort(
1282 raise error.Abort(
1278 _(b"censored node: %s") % short(self._filenode),
1283 _(b"censored node: %s") % short(self._filenode),
1279 hint=_(b"set censor.policy to ignore errors"),
1284 hint=_(b"set censor.policy to ignore errors"),
1280 )
1285 )
1281
1286
1282 def size(self):
1287 def size(self):
1283 return self._filelog.size(self._filerev)
1288 return self._filelog.size(self._filerev)
1284
1289
1285 @propertycache
1290 @propertycache
1286 def _copied(self):
1291 def _copied(self):
1287 """check if file was actually renamed in this changeset revision
1292 """check if file was actually renamed in this changeset revision
1288
1293
1289 If rename logged in file revision, we report copy for changeset only
1294 If rename logged in file revision, we report copy for changeset only
1290 if file revisions linkrev points back to the changeset in question
1295 if file revisions linkrev points back to the changeset in question
1291 or both changeset parents contain different file revisions.
1296 or both changeset parents contain different file revisions.
1292 """
1297 """
1293
1298
1294 renamed = self._filelog.renamed(self._filenode)
1299 renamed = self._filelog.renamed(self._filenode)
1295 if not renamed:
1300 if not renamed:
1296 return None
1301 return None
1297
1302
1298 if self.rev() == self.linkrev():
1303 if self.rev() == self.linkrev():
1299 return renamed
1304 return renamed
1300
1305
1301 name = self.path()
1306 name = self.path()
1302 fnode = self._filenode
1307 fnode = self._filenode
1303 for p in self._changectx.parents():
1308 for p in self._changectx.parents():
1304 try:
1309 try:
1305 if fnode == p.filenode(name):
1310 if fnode == p.filenode(name):
1306 return None
1311 return None
1307 except error.LookupError:
1312 except error.LookupError:
1308 pass
1313 pass
1309 return renamed
1314 return renamed
1310
1315
1311 def children(self):
1316 def children(self):
1312 # hard for renames
1317 # hard for renames
1313 c = self._filelog.children(self._filenode)
1318 c = self._filelog.children(self._filenode)
1314 return [
1319 return [
1315 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1320 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1316 for x in c
1321 for x in c
1317 ]
1322 ]
1318
1323
1319
1324
1320 class committablectx(basectx):
1325 class committablectx(basectx):
1321 """A committablectx object provides common functionality for a context that
1326 """A committablectx object provides common functionality for a context that
1322 wants the ability to commit, e.g. workingctx or memctx."""
1327 wants the ability to commit, e.g. workingctx or memctx."""
1323
1328
1324 def __init__(
1329 def __init__(
1325 self,
1330 self,
1326 repo,
1331 repo,
1327 text=b"",
1332 text=b"",
1328 user=None,
1333 user=None,
1329 date=None,
1334 date=None,
1330 extra=None,
1335 extra=None,
1331 changes=None,
1336 changes=None,
1332 branch=None,
1337 branch=None,
1333 ):
1338 ):
1334 super(committablectx, self).__init__(repo)
1339 super(committablectx, self).__init__(repo)
1335 self._rev = None
1340 self._rev = None
1336 self._node = None
1341 self._node = None
1337 self._text = text
1342 self._text = text
1338 if date:
1343 if date:
1339 self._date = dateutil.parsedate(date)
1344 self._date = dateutil.parsedate(date)
1340 if user:
1345 if user:
1341 self._user = user
1346 self._user = user
1342 if changes:
1347 if changes:
1343 self._status = changes
1348 self._status = changes
1344
1349
1345 self._extra = {}
1350 self._extra = {}
1346 if extra:
1351 if extra:
1347 self._extra = extra.copy()
1352 self._extra = extra.copy()
1348 if branch is not None:
1353 if branch is not None:
1349 self._extra[b'branch'] = encoding.fromlocal(branch)
1354 self._extra[b'branch'] = encoding.fromlocal(branch)
1350 if not self._extra.get(b'branch'):
1355 if not self._extra.get(b'branch'):
1351 self._extra[b'branch'] = b'default'
1356 self._extra[b'branch'] = b'default'
1352
1357
1353 def __bytes__(self):
1358 def __bytes__(self):
1354 return bytes(self._parents[0]) + b"+"
1359 return bytes(self._parents[0]) + b"+"
1355
1360
1356 __str__ = encoding.strmethod(__bytes__)
1361 __str__ = encoding.strmethod(__bytes__)
1357
1362
1358 def __nonzero__(self):
1363 def __nonzero__(self):
1359 return True
1364 return True
1360
1365
1361 __bool__ = __nonzero__
1366 __bool__ = __nonzero__
1362
1367
1363 @propertycache
1368 @propertycache
1364 def _status(self):
1369 def _status(self):
1365 return self._repo.status()
1370 return self._repo.status()
1366
1371
1367 @propertycache
1372 @propertycache
1368 def _user(self):
1373 def _user(self):
1369 return self._repo.ui.username()
1374 return self._repo.ui.username()
1370
1375
1371 @propertycache
1376 @propertycache
1372 def _date(self):
1377 def _date(self):
1373 ui = self._repo.ui
1378 ui = self._repo.ui
1374 date = ui.configdate(b'devel', b'default-date')
1379 date = ui.configdate(b'devel', b'default-date')
1375 if date is None:
1380 if date is None:
1376 date = dateutil.makedate()
1381 date = dateutil.makedate()
1377 return date
1382 return date
1378
1383
1379 def subrev(self, subpath):
1384 def subrev(self, subpath):
1380 return None
1385 return None
1381
1386
1382 def manifestnode(self):
1387 def manifestnode(self):
1383 return None
1388 return None
1384
1389
1385 def user(self):
1390 def user(self):
1386 return self._user or self._repo.ui.username()
1391 return self._user or self._repo.ui.username()
1387
1392
1388 def date(self):
1393 def date(self):
1389 return self._date
1394 return self._date
1390
1395
1391 def description(self):
1396 def description(self):
1392 return self._text
1397 return self._text
1393
1398
1394 def files(self):
1399 def files(self):
1395 return sorted(
1400 return sorted(
1396 self._status.modified + self._status.added + self._status.removed
1401 self._status.modified + self._status.added + self._status.removed
1397 )
1402 )
1398
1403
1399 def modified(self):
1404 def modified(self):
1400 return self._status.modified
1405 return self._status.modified
1401
1406
1402 def added(self):
1407 def added(self):
1403 return self._status.added
1408 return self._status.added
1404
1409
1405 def removed(self):
1410 def removed(self):
1406 return self._status.removed
1411 return self._status.removed
1407
1412
1408 def deleted(self):
1413 def deleted(self):
1409 return self._status.deleted
1414 return self._status.deleted
1410
1415
1411 filesmodified = modified
1416 filesmodified = modified
1412 filesadded = added
1417 filesadded = added
1413 filesremoved = removed
1418 filesremoved = removed
1414
1419
1415 def branch(self):
1420 def branch(self):
1416 return encoding.tolocal(self._extra[b'branch'])
1421 return encoding.tolocal(self._extra[b'branch'])
1417
1422
1418 def closesbranch(self):
1423 def closesbranch(self):
1419 return b'close' in self._extra
1424 return b'close' in self._extra
1420
1425
1421 def extra(self):
1426 def extra(self):
1422 return self._extra
1427 return self._extra
1423
1428
1424 def isinmemory(self):
1429 def isinmemory(self):
1425 return False
1430 return False
1426
1431
1427 def tags(self):
1432 def tags(self):
1428 return []
1433 return []
1429
1434
1430 def bookmarks(self):
1435 def bookmarks(self):
1431 b = []
1436 b = []
1432 for p in self.parents():
1437 for p in self.parents():
1433 b.extend(p.bookmarks())
1438 b.extend(p.bookmarks())
1434 return b
1439 return b
1435
1440
1436 def phase(self):
1441 def phase(self):
1437 phase = phases.draft # default phase to draft
1442 phase = phases.draft # default phase to draft
1438 for p in self.parents():
1443 for p in self.parents():
1439 phase = max(phase, p.phase())
1444 phase = max(phase, p.phase())
1440 return phase
1445 return phase
1441
1446
1442 def hidden(self):
1447 def hidden(self):
1443 return False
1448 return False
1444
1449
1445 def children(self):
1450 def children(self):
1446 return []
1451 return []
1447
1452
1448 def ancestor(self, c2):
1453 def ancestor(self, c2):
1449 """return the "best" ancestor context of self and c2"""
1454 """return the "best" ancestor context of self and c2"""
1450 return self._parents[0].ancestor(c2) # punt on two parents for now
1455 return self._parents[0].ancestor(c2) # punt on two parents for now
1451
1456
1452 def ancestors(self):
1457 def ancestors(self):
1453 for p in self._parents:
1458 for p in self._parents:
1454 yield p
1459 yield p
1455 for a in self._repo.changelog.ancestors(
1460 for a in self._repo.changelog.ancestors(
1456 [p.rev() for p in self._parents]
1461 [p.rev() for p in self._parents]
1457 ):
1462 ):
1458 yield self._repo[a]
1463 yield self._repo[a]
1459
1464
1460 def markcommitted(self, node):
1465 def markcommitted(self, node):
1461 """Perform post-commit cleanup necessary after committing this ctx
1466 """Perform post-commit cleanup necessary after committing this ctx
1462
1467
1463 Specifically, this updates backing stores this working context
1468 Specifically, this updates backing stores this working context
1464 wraps to reflect the fact that the changes reflected by this
1469 wraps to reflect the fact that the changes reflected by this
1465 workingctx have been committed. For example, it marks
1470 workingctx have been committed. For example, it marks
1466 modified and added files as normal in the dirstate.
1471 modified and added files as normal in the dirstate.
1467
1472
1468 """
1473 """
1469
1474
1470 def dirty(self, missing=False, merge=True, branch=True):
1475 def dirty(self, missing=False, merge=True, branch=True):
1471 return False
1476 return False
1472
1477
1473
1478
1474 class workingctx(committablectx):
1479 class workingctx(committablectx):
1475 """A workingctx object makes access to data related to
1480 """A workingctx object makes access to data related to
1476 the current working directory convenient.
1481 the current working directory convenient.
1477 date - any valid date string or (unixtime, offset), or None.
1482 date - any valid date string or (unixtime, offset), or None.
1478 user - username string, or None.
1483 user - username string, or None.
1479 extra - a dictionary of extra values, or None.
1484 extra - a dictionary of extra values, or None.
1480 changes - a list of file lists as returned by localrepo.status()
1485 changes - a list of file lists as returned by localrepo.status()
1481 or None to use the repository status.
1486 or None to use the repository status.
1482 """
1487 """
1483
1488
1484 def __init__(
1489 def __init__(
1485 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1490 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1486 ):
1491 ):
1487 branch = None
1492 branch = None
1488 if not extra or b'branch' not in extra:
1493 if not extra or b'branch' not in extra:
1489 try:
1494 try:
1490 branch = repo.dirstate.branch()
1495 branch = repo.dirstate.branch()
1491 except UnicodeDecodeError:
1496 except UnicodeDecodeError:
1492 raise error.Abort(_(b'branch name not in UTF-8!'))
1497 raise error.Abort(_(b'branch name not in UTF-8!'))
1493 super(workingctx, self).__init__(
1498 super(workingctx, self).__init__(
1494 repo, text, user, date, extra, changes, branch=branch
1499 repo, text, user, date, extra, changes, branch=branch
1495 )
1500 )
1496
1501
1497 def __iter__(self):
1502 def __iter__(self):
1498 d = self._repo.dirstate
1503 d = self._repo.dirstate
1499 for f in d:
1504 for f in d:
1500 if d[f] != b'r':
1505 if d[f] != b'r':
1501 yield f
1506 yield f
1502
1507
1503 def __contains__(self, key):
1508 def __contains__(self, key):
1504 return self._repo.dirstate[key] not in b"?r"
1509 return self._repo.dirstate[key] not in b"?r"
1505
1510
1506 def hex(self):
1511 def hex(self):
1507 return wdirhex
1512 return wdirhex
1508
1513
1509 @propertycache
1514 @propertycache
1510 def _parents(self):
1515 def _parents(self):
1511 p = self._repo.dirstate.parents()
1516 p = self._repo.dirstate.parents()
1512 if p[1] == nullid:
1517 if p[1] == nullid:
1513 p = p[:-1]
1518 p = p[:-1]
1514 # use unfiltered repo to delay/avoid loading obsmarkers
1519 # use unfiltered repo to delay/avoid loading obsmarkers
1515 unfi = self._repo.unfiltered()
1520 unfi = self._repo.unfiltered()
1516 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1521 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1517
1522
1518 def _fileinfo(self, path):
1523 def _fileinfo(self, path):
1519 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1524 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1520 self._manifest
1525 self._manifest
1521 return super(workingctx, self)._fileinfo(path)
1526 return super(workingctx, self)._fileinfo(path)
1522
1527
1523 def _buildflagfunc(self):
1528 def _buildflagfunc(self):
1524 # Create a fallback function for getting file flags when the
1529 # Create a fallback function for getting file flags when the
1525 # filesystem doesn't support them
1530 # filesystem doesn't support them
1526
1531
1527 copiesget = self._repo.dirstate.copies().get
1532 copiesget = self._repo.dirstate.copies().get
1528 parents = self.parents()
1533 parents = self.parents()
1529 if len(parents) < 2:
1534 if len(parents) < 2:
1530 # when we have one parent, it's easy: copy from parent
1535 # when we have one parent, it's easy: copy from parent
1531 man = parents[0].manifest()
1536 man = parents[0].manifest()
1532
1537
1533 def func(f):
1538 def func(f):
1534 f = copiesget(f, f)
1539 f = copiesget(f, f)
1535 return man.flags(f)
1540 return man.flags(f)
1536
1541
1537 else:
1542 else:
1538 # merges are tricky: we try to reconstruct the unstored
1543 # merges are tricky: we try to reconstruct the unstored
1539 # result from the merge (issue1802)
1544 # result from the merge (issue1802)
1540 p1, p2 = parents
1545 p1, p2 = parents
1541 pa = p1.ancestor(p2)
1546 pa = p1.ancestor(p2)
1542 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1547 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1543
1548
1544 def func(f):
1549 def func(f):
1545 f = copiesget(f, f) # may be wrong for merges with copies
1550 f = copiesget(f, f) # may be wrong for merges with copies
1546 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1551 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1547 if fl1 == fl2:
1552 if fl1 == fl2:
1548 return fl1
1553 return fl1
1549 if fl1 == fla:
1554 if fl1 == fla:
1550 return fl2
1555 return fl2
1551 if fl2 == fla:
1556 if fl2 == fla:
1552 return fl1
1557 return fl1
1553 return b'' # punt for conflicts
1558 return b'' # punt for conflicts
1554
1559
1555 return func
1560 return func
1556
1561
1557 @propertycache
1562 @propertycache
1558 def _flagfunc(self):
1563 def _flagfunc(self):
1559 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1564 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1560
1565
1561 def flags(self, path):
1566 def flags(self, path):
1562 if '_manifest' in self.__dict__:
1567 if '_manifest' in self.__dict__:
1563 try:
1568 try:
1564 return self._manifest.flags(path)
1569 return self._manifest.flags(path)
1565 except KeyError:
1570 except KeyError:
1566 return b''
1571 return b''
1567
1572
1568 try:
1573 try:
1569 return self._flagfunc(path)
1574 return self._flagfunc(path)
1570 except OSError:
1575 except OSError:
1571 return b''
1576 return b''
1572
1577
1573 def filectx(self, path, filelog=None):
1578 def filectx(self, path, filelog=None):
1574 """get a file context from the working directory"""
1579 """get a file context from the working directory"""
1575 return workingfilectx(
1580 return workingfilectx(
1576 self._repo, path, workingctx=self, filelog=filelog
1581 self._repo, path, workingctx=self, filelog=filelog
1577 )
1582 )
1578
1583
1579 def dirty(self, missing=False, merge=True, branch=True):
1584 def dirty(self, missing=False, merge=True, branch=True):
1580 b"check whether a working directory is modified"
1585 b"check whether a working directory is modified"
1581 # check subrepos first
1586 # check subrepos first
1582 for s in sorted(self.substate):
1587 for s in sorted(self.substate):
1583 if self.sub(s).dirty(missing=missing):
1588 if self.sub(s).dirty(missing=missing):
1584 return True
1589 return True
1585 # check current working dir
1590 # check current working dir
1586 return (
1591 return (
1587 (merge and self.p2())
1592 (merge and self.p2())
1588 or (branch and self.branch() != self.p1().branch())
1593 or (branch and self.branch() != self.p1().branch())
1589 or self.modified()
1594 or self.modified()
1590 or self.added()
1595 or self.added()
1591 or self.removed()
1596 or self.removed()
1592 or (missing and self.deleted())
1597 or (missing and self.deleted())
1593 )
1598 )
1594
1599
1595 def add(self, list, prefix=b""):
1600 def add(self, list, prefix=b""):
1596 with self._repo.wlock():
1601 with self._repo.wlock():
1597 ui, ds = self._repo.ui, self._repo.dirstate
1602 ui, ds = self._repo.ui, self._repo.dirstate
1598 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1603 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1599 rejected = []
1604 rejected = []
1600 lstat = self._repo.wvfs.lstat
1605 lstat = self._repo.wvfs.lstat
1601 for f in list:
1606 for f in list:
1602 # ds.pathto() returns an absolute file when this is invoked from
1607 # ds.pathto() returns an absolute file when this is invoked from
1603 # the keyword extension. That gets flagged as non-portable on
1608 # the keyword extension. That gets flagged as non-portable on
1604 # Windows, since it contains the drive letter and colon.
1609 # Windows, since it contains the drive letter and colon.
1605 scmutil.checkportable(ui, os.path.join(prefix, f))
1610 scmutil.checkportable(ui, os.path.join(prefix, f))
1606 try:
1611 try:
1607 st = lstat(f)
1612 st = lstat(f)
1608 except OSError:
1613 except OSError:
1609 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1614 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1610 rejected.append(f)
1615 rejected.append(f)
1611 continue
1616 continue
1612 limit = ui.configbytes(b'ui', b'large-file-limit')
1617 limit = ui.configbytes(b'ui', b'large-file-limit')
1613 if limit != 0 and st.st_size > limit:
1618 if limit != 0 and st.st_size > limit:
1614 ui.warn(
1619 ui.warn(
1615 _(
1620 _(
1616 b"%s: up to %d MB of RAM may be required "
1621 b"%s: up to %d MB of RAM may be required "
1617 b"to manage this file\n"
1622 b"to manage this file\n"
1618 b"(use 'hg revert %s' to cancel the "
1623 b"(use 'hg revert %s' to cancel the "
1619 b"pending addition)\n"
1624 b"pending addition)\n"
1620 )
1625 )
1621 % (f, 3 * st.st_size // 1000000, uipath(f))
1626 % (f, 3 * st.st_size // 1000000, uipath(f))
1622 )
1627 )
1623 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1628 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1624 ui.warn(
1629 ui.warn(
1625 _(
1630 _(
1626 b"%s not added: only files and symlinks "
1631 b"%s not added: only files and symlinks "
1627 b"supported currently\n"
1632 b"supported currently\n"
1628 )
1633 )
1629 % uipath(f)
1634 % uipath(f)
1630 )
1635 )
1631 rejected.append(f)
1636 rejected.append(f)
1632 elif ds[f] in b'amn':
1637 elif ds[f] in b'amn':
1633 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1638 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1634 elif ds[f] == b'r':
1639 elif ds[f] == b'r':
1635 ds.normallookup(f)
1640 ds.normallookup(f)
1636 else:
1641 else:
1637 ds.add(f)
1642 ds.add(f)
1638 return rejected
1643 return rejected
1639
1644
1640 def forget(self, files, prefix=b""):
1645 def forget(self, files, prefix=b""):
1641 with self._repo.wlock():
1646 with self._repo.wlock():
1642 ds = self._repo.dirstate
1647 ds = self._repo.dirstate
1643 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1648 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1644 rejected = []
1649 rejected = []
1645 for f in files:
1650 for f in files:
1646 if f not in ds:
1651 if f not in ds:
1647 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1652 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1648 rejected.append(f)
1653 rejected.append(f)
1649 elif ds[f] != b'a':
1654 elif ds[f] != b'a':
1650 ds.remove(f)
1655 ds.remove(f)
1651 else:
1656 else:
1652 ds.drop(f)
1657 ds.drop(f)
1653 return rejected
1658 return rejected
1654
1659
1655 def copy(self, source, dest):
1660 def copy(self, source, dest):
1656 try:
1661 try:
1657 st = self._repo.wvfs.lstat(dest)
1662 st = self._repo.wvfs.lstat(dest)
1658 except OSError as err:
1663 except OSError as err:
1659 if err.errno != errno.ENOENT:
1664 if err.errno != errno.ENOENT:
1660 raise
1665 raise
1661 self._repo.ui.warn(
1666 self._repo.ui.warn(
1662 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1667 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1663 )
1668 )
1664 return
1669 return
1665 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1670 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1666 self._repo.ui.warn(
1671 self._repo.ui.warn(
1667 _(b"copy failed: %s is not a file or a symbolic link\n")
1672 _(b"copy failed: %s is not a file or a symbolic link\n")
1668 % self._repo.dirstate.pathto(dest)
1673 % self._repo.dirstate.pathto(dest)
1669 )
1674 )
1670 else:
1675 else:
1671 with self._repo.wlock():
1676 with self._repo.wlock():
1672 ds = self._repo.dirstate
1677 ds = self._repo.dirstate
1673 if ds[dest] in b'?':
1678 if ds[dest] in b'?':
1674 ds.add(dest)
1679 ds.add(dest)
1675 elif ds[dest] in b'r':
1680 elif ds[dest] in b'r':
1676 ds.normallookup(dest)
1681 ds.normallookup(dest)
1677 ds.copy(source, dest)
1682 ds.copy(source, dest)
1678
1683
1679 def match(
1684 def match(
1680 self,
1685 self,
1681 pats=None,
1686 pats=None,
1682 include=None,
1687 include=None,
1683 exclude=None,
1688 exclude=None,
1684 default=b'glob',
1689 default=b'glob',
1685 listsubrepos=False,
1690 listsubrepos=False,
1686 badfn=None,
1691 badfn=None,
1687 ):
1692 ):
1688 r = self._repo
1693 r = self._repo
1689
1694
1690 # Only a case insensitive filesystem needs magic to translate user input
1695 # Only a case insensitive filesystem needs magic to translate user input
1691 # to actual case in the filesystem.
1696 # to actual case in the filesystem.
1692 icasefs = not util.fscasesensitive(r.root)
1697 icasefs = not util.fscasesensitive(r.root)
1693 return matchmod.match(
1698 return matchmod.match(
1694 r.root,
1699 r.root,
1695 r.getcwd(),
1700 r.getcwd(),
1696 pats,
1701 pats,
1697 include,
1702 include,
1698 exclude,
1703 exclude,
1699 default,
1704 default,
1700 auditor=r.auditor,
1705 auditor=r.auditor,
1701 ctx=self,
1706 ctx=self,
1702 listsubrepos=listsubrepos,
1707 listsubrepos=listsubrepos,
1703 badfn=badfn,
1708 badfn=badfn,
1704 icasefs=icasefs,
1709 icasefs=icasefs,
1705 )
1710 )
1706
1711
1707 def _filtersuspectsymlink(self, files):
1712 def _filtersuspectsymlink(self, files):
1708 if not files or self._repo.dirstate._checklink:
1713 if not files or self._repo.dirstate._checklink:
1709 return files
1714 return files
1710
1715
1711 # Symlink placeholders may get non-symlink-like contents
1716 # Symlink placeholders may get non-symlink-like contents
1712 # via user error or dereferencing by NFS or Samba servers,
1717 # via user error or dereferencing by NFS or Samba servers,
1713 # so we filter out any placeholders that don't look like a
1718 # so we filter out any placeholders that don't look like a
1714 # symlink
1719 # symlink
1715 sane = []
1720 sane = []
1716 for f in files:
1721 for f in files:
1717 if self.flags(f) == b'l':
1722 if self.flags(f) == b'l':
1718 d = self[f].data()
1723 d = self[f].data()
1719 if (
1724 if (
1720 d == b''
1725 d == b''
1721 or len(d) >= 1024
1726 or len(d) >= 1024
1722 or b'\n' in d
1727 or b'\n' in d
1723 or stringutil.binary(d)
1728 or stringutil.binary(d)
1724 ):
1729 ):
1725 self._repo.ui.debug(
1730 self._repo.ui.debug(
1726 b'ignoring suspect symlink placeholder "%s"\n' % f
1731 b'ignoring suspect symlink placeholder "%s"\n' % f
1727 )
1732 )
1728 continue
1733 continue
1729 sane.append(f)
1734 sane.append(f)
1730 return sane
1735 return sane
1731
1736
1732 def _checklookup(self, files):
1737 def _checklookup(self, files):
1733 # check for any possibly clean files
1738 # check for any possibly clean files
1734 if not files:
1739 if not files:
1735 return [], [], []
1740 return [], [], []
1736
1741
1737 modified = []
1742 modified = []
1738 deleted = []
1743 deleted = []
1739 fixup = []
1744 fixup = []
1740 pctx = self._parents[0]
1745 pctx = self._parents[0]
1741 # do a full compare of any files that might have changed
1746 # do a full compare of any files that might have changed
1742 for f in sorted(files):
1747 for f in sorted(files):
1743 try:
1748 try:
1744 # This will return True for a file that got replaced by a
1749 # This will return True for a file that got replaced by a
1745 # directory in the interim, but fixing that is pretty hard.
1750 # directory in the interim, but fixing that is pretty hard.
1746 if (
1751 if (
1747 f not in pctx
1752 f not in pctx
1748 or self.flags(f) != pctx.flags(f)
1753 or self.flags(f) != pctx.flags(f)
1749 or pctx[f].cmp(self[f])
1754 or pctx[f].cmp(self[f])
1750 ):
1755 ):
1751 modified.append(f)
1756 modified.append(f)
1752 else:
1757 else:
1753 fixup.append(f)
1758 fixup.append(f)
1754 except (IOError, OSError):
1759 except (IOError, OSError):
1755 # A file become inaccessible in between? Mark it as deleted,
1760 # A file become inaccessible in between? Mark it as deleted,
1756 # matching dirstate behavior (issue5584).
1761 # matching dirstate behavior (issue5584).
1757 # The dirstate has more complex behavior around whether a
1762 # The dirstate has more complex behavior around whether a
1758 # missing file matches a directory, etc, but we don't need to
1763 # missing file matches a directory, etc, but we don't need to
1759 # bother with that: if f has made it to this point, we're sure
1764 # bother with that: if f has made it to this point, we're sure
1760 # it's in the dirstate.
1765 # it's in the dirstate.
1761 deleted.append(f)
1766 deleted.append(f)
1762
1767
1763 return modified, deleted, fixup
1768 return modified, deleted, fixup
1764
1769
1765 def _poststatusfixup(self, status, fixup):
1770 def _poststatusfixup(self, status, fixup):
1766 """update dirstate for files that are actually clean"""
1771 """update dirstate for files that are actually clean"""
1767 poststatus = self._repo.postdsstatus()
1772 poststatus = self._repo.postdsstatus()
1768 if fixup or poststatus:
1773 if fixup or poststatus:
1769 try:
1774 try:
1770 oldid = self._repo.dirstate.identity()
1775 oldid = self._repo.dirstate.identity()
1771
1776
1772 # updating the dirstate is optional
1777 # updating the dirstate is optional
1773 # so we don't wait on the lock
1778 # so we don't wait on the lock
1774 # wlock can invalidate the dirstate, so cache normal _after_
1779 # wlock can invalidate the dirstate, so cache normal _after_
1775 # taking the lock
1780 # taking the lock
1776 with self._repo.wlock(False):
1781 with self._repo.wlock(False):
1777 if self._repo.dirstate.identity() == oldid:
1782 if self._repo.dirstate.identity() == oldid:
1778 if fixup:
1783 if fixup:
1779 normal = self._repo.dirstate.normal
1784 normal = self._repo.dirstate.normal
1780 for f in fixup:
1785 for f in fixup:
1781 normal(f)
1786 normal(f)
1782 # write changes out explicitly, because nesting
1787 # write changes out explicitly, because nesting
1783 # wlock at runtime may prevent 'wlock.release()'
1788 # wlock at runtime may prevent 'wlock.release()'
1784 # after this block from doing so for subsequent
1789 # after this block from doing so for subsequent
1785 # changing files
1790 # changing files
1786 tr = self._repo.currenttransaction()
1791 tr = self._repo.currenttransaction()
1787 self._repo.dirstate.write(tr)
1792 self._repo.dirstate.write(tr)
1788
1793
1789 if poststatus:
1794 if poststatus:
1790 for ps in poststatus:
1795 for ps in poststatus:
1791 ps(self, status)
1796 ps(self, status)
1792 else:
1797 else:
1793 # in this case, writing changes out breaks
1798 # in this case, writing changes out breaks
1794 # consistency, because .hg/dirstate was
1799 # consistency, because .hg/dirstate was
1795 # already changed simultaneously after last
1800 # already changed simultaneously after last
1796 # caching (see also issue5584 for detail)
1801 # caching (see also issue5584 for detail)
1797 self._repo.ui.debug(
1802 self._repo.ui.debug(
1798 b'skip updating dirstate: identity mismatch\n'
1803 b'skip updating dirstate: identity mismatch\n'
1799 )
1804 )
1800 except error.LockError:
1805 except error.LockError:
1801 pass
1806 pass
1802 finally:
1807 finally:
1803 # Even if the wlock couldn't be grabbed, clear out the list.
1808 # Even if the wlock couldn't be grabbed, clear out the list.
1804 self._repo.clearpostdsstatus()
1809 self._repo.clearpostdsstatus()
1805
1810
1806 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1811 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1807 '''Gets the status from the dirstate -- internal use only.'''
1812 '''Gets the status from the dirstate -- internal use only.'''
1808 subrepos = []
1813 subrepos = []
1809 if b'.hgsub' in self:
1814 if b'.hgsub' in self:
1810 subrepos = sorted(self.substate)
1815 subrepos = sorted(self.substate)
1811 cmp, s = self._repo.dirstate.status(
1816 cmp, s = self._repo.dirstate.status(
1812 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1817 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1813 )
1818 )
1814
1819
1815 # check for any possibly clean files
1820 # check for any possibly clean files
1816 fixup = []
1821 fixup = []
1817 if cmp:
1822 if cmp:
1818 modified2, deleted2, fixup = self._checklookup(cmp)
1823 modified2, deleted2, fixup = self._checklookup(cmp)
1819 s.modified.extend(modified2)
1824 s.modified.extend(modified2)
1820 s.deleted.extend(deleted2)
1825 s.deleted.extend(deleted2)
1821
1826
1822 if fixup and clean:
1827 if fixup and clean:
1823 s.clean.extend(fixup)
1828 s.clean.extend(fixup)
1824
1829
1825 self._poststatusfixup(s, fixup)
1830 self._poststatusfixup(s, fixup)
1826
1831
1827 if match.always():
1832 if match.always():
1828 # cache for performance
1833 # cache for performance
1829 if s.unknown or s.ignored or s.clean:
1834 if s.unknown or s.ignored or s.clean:
1830 # "_status" is cached with list*=False in the normal route
1835 # "_status" is cached with list*=False in the normal route
1831 self._status = scmutil.status(
1836 self._status = scmutil.status(
1832 s.modified, s.added, s.removed, s.deleted, [], [], []
1837 s.modified, s.added, s.removed, s.deleted, [], [], []
1833 )
1838 )
1834 else:
1839 else:
1835 self._status = s
1840 self._status = s
1836
1841
1837 return s
1842 return s
1838
1843
1839 @propertycache
1844 @propertycache
1840 def _copies(self):
1845 def _copies(self):
1841 p1copies = {}
1846 p1copies = {}
1842 p2copies = {}
1847 p2copies = {}
1843 parents = self._repo.dirstate.parents()
1848 parents = self._repo.dirstate.parents()
1844 p1manifest = self._repo[parents[0]].manifest()
1849 p1manifest = self._repo[parents[0]].manifest()
1845 p2manifest = self._repo[parents[1]].manifest()
1850 p2manifest = self._repo[parents[1]].manifest()
1846 changedset = set(self.added()) | set(self.modified())
1851 changedset = set(self.added()) | set(self.modified())
1847 narrowmatch = self._repo.narrowmatch()
1852 narrowmatch = self._repo.narrowmatch()
1848 for dst, src in self._repo.dirstate.copies().items():
1853 for dst, src in self._repo.dirstate.copies().items():
1849 if dst not in changedset or not narrowmatch(dst):
1854 if dst not in changedset or not narrowmatch(dst):
1850 continue
1855 continue
1851 if src in p1manifest:
1856 if src in p1manifest:
1852 p1copies[dst] = src
1857 p1copies[dst] = src
1853 elif src in p2manifest:
1858 elif src in p2manifest:
1854 p2copies[dst] = src
1859 p2copies[dst] = src
1855 return p1copies, p2copies
1860 return p1copies, p2copies
1856
1861
1857 @propertycache
1862 @propertycache
1858 def _manifest(self):
1863 def _manifest(self):
1859 """generate a manifest corresponding to the values in self._status
1864 """generate a manifest corresponding to the values in self._status
1860
1865
1861 This reuse the file nodeid from parent, but we use special node
1866 This reuse the file nodeid from parent, but we use special node
1862 identifiers for added and modified files. This is used by manifests
1867 identifiers for added and modified files. This is used by manifests
1863 merge to see that files are different and by update logic to avoid
1868 merge to see that files are different and by update logic to avoid
1864 deleting newly added files.
1869 deleting newly added files.
1865 """
1870 """
1866 return self._buildstatusmanifest(self._status)
1871 return self._buildstatusmanifest(self._status)
1867
1872
1868 def _buildstatusmanifest(self, status):
1873 def _buildstatusmanifest(self, status):
1869 """Builds a manifest that includes the given status results."""
1874 """Builds a manifest that includes the given status results."""
1870 parents = self.parents()
1875 parents = self.parents()
1871
1876
1872 man = parents[0].manifest().copy()
1877 man = parents[0].manifest().copy()
1873
1878
1874 ff = self._flagfunc
1879 ff = self._flagfunc
1875 for i, l in (
1880 for i, l in (
1876 (addednodeid, status.added),
1881 (addednodeid, status.added),
1877 (modifiednodeid, status.modified),
1882 (modifiednodeid, status.modified),
1878 ):
1883 ):
1879 for f in l:
1884 for f in l:
1880 man[f] = i
1885 man[f] = i
1881 try:
1886 try:
1882 man.setflag(f, ff(f))
1887 man.setflag(f, ff(f))
1883 except OSError:
1888 except OSError:
1884 pass
1889 pass
1885
1890
1886 for f in status.deleted + status.removed:
1891 for f in status.deleted + status.removed:
1887 if f in man:
1892 if f in man:
1888 del man[f]
1893 del man[f]
1889
1894
1890 return man
1895 return man
1891
1896
1892 def _buildstatus(
1897 def _buildstatus(
1893 self, other, s, match, listignored, listclean, listunknown
1898 self, other, s, match, listignored, listclean, listunknown
1894 ):
1899 ):
1895 """build a status with respect to another context
1900 """build a status with respect to another context
1896
1901
1897 This includes logic for maintaining the fast path of status when
1902 This includes logic for maintaining the fast path of status when
1898 comparing the working directory against its parent, which is to skip
1903 comparing the working directory against its parent, which is to skip
1899 building a new manifest if self (working directory) is not comparing
1904 building a new manifest if self (working directory) is not comparing
1900 against its parent (repo['.']).
1905 against its parent (repo['.']).
1901 """
1906 """
1902 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1907 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1903 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1908 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1904 # might have accidentally ended up with the entire contents of the file
1909 # might have accidentally ended up with the entire contents of the file
1905 # they are supposed to be linking to.
1910 # they are supposed to be linking to.
1906 s.modified[:] = self._filtersuspectsymlink(s.modified)
1911 s.modified[:] = self._filtersuspectsymlink(s.modified)
1907 if other != self._repo[b'.']:
1912 if other != self._repo[b'.']:
1908 s = super(workingctx, self)._buildstatus(
1913 s = super(workingctx, self)._buildstatus(
1909 other, s, match, listignored, listclean, listunknown
1914 other, s, match, listignored, listclean, listunknown
1910 )
1915 )
1911 return s
1916 return s
1912
1917
1913 def _matchstatus(self, other, match):
1918 def _matchstatus(self, other, match):
1914 """override the match method with a filter for directory patterns
1919 """override the match method with a filter for directory patterns
1915
1920
1916 We use inheritance to customize the match.bad method only in cases of
1921 We use inheritance to customize the match.bad method only in cases of
1917 workingctx since it belongs only to the working directory when
1922 workingctx since it belongs only to the working directory when
1918 comparing against the parent changeset.
1923 comparing against the parent changeset.
1919
1924
1920 If we aren't comparing against the working directory's parent, then we
1925 If we aren't comparing against the working directory's parent, then we
1921 just use the default match object sent to us.
1926 just use the default match object sent to us.
1922 """
1927 """
1923 if other != self._repo[b'.']:
1928 if other != self._repo[b'.']:
1924
1929
1925 def bad(f, msg):
1930 def bad(f, msg):
1926 # 'f' may be a directory pattern from 'match.files()',
1931 # 'f' may be a directory pattern from 'match.files()',
1927 # so 'f not in ctx1' is not enough
1932 # so 'f not in ctx1' is not enough
1928 if f not in other and not other.hasdir(f):
1933 if f not in other and not other.hasdir(f):
1929 self._repo.ui.warn(
1934 self._repo.ui.warn(
1930 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1935 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1931 )
1936 )
1932
1937
1933 match.bad = bad
1938 match.bad = bad
1934 return match
1939 return match
1935
1940
1936 def walk(self, match):
1941 def walk(self, match):
1937 '''Generates matching file names.'''
1942 '''Generates matching file names.'''
1938 return sorted(
1943 return sorted(
1939 self._repo.dirstate.walk(
1944 self._repo.dirstate.walk(
1940 self._repo.narrowmatch(match),
1945 self._repo.narrowmatch(match),
1941 subrepos=sorted(self.substate),
1946 subrepos=sorted(self.substate),
1942 unknown=True,
1947 unknown=True,
1943 ignored=False,
1948 ignored=False,
1944 )
1949 )
1945 )
1950 )
1946
1951
1947 def matches(self, match):
1952 def matches(self, match):
1948 match = self._repo.narrowmatch(match)
1953 match = self._repo.narrowmatch(match)
1949 ds = self._repo.dirstate
1954 ds = self._repo.dirstate
1950 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1955 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1951
1956
1952 def markcommitted(self, node):
1957 def markcommitted(self, node):
1953 with self._repo.dirstate.parentchange():
1958 with self._repo.dirstate.parentchange():
1954 for f in self.modified() + self.added():
1959 for f in self.modified() + self.added():
1955 self._repo.dirstate.normal(f)
1960 self._repo.dirstate.normal(f)
1956 for f in self.removed():
1961 for f in self.removed():
1957 self._repo.dirstate.drop(f)
1962 self._repo.dirstate.drop(f)
1958 self._repo.dirstate.setparents(node)
1963 self._repo.dirstate.setparents(node)
1959
1964
1960 # write changes out explicitly, because nesting wlock at
1965 # write changes out explicitly, because nesting wlock at
1961 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1966 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1962 # from immediately doing so for subsequent changing files
1967 # from immediately doing so for subsequent changing files
1963 self._repo.dirstate.write(self._repo.currenttransaction())
1968 self._repo.dirstate.write(self._repo.currenttransaction())
1964
1969
1965 sparse.aftercommit(self._repo, node)
1970 sparse.aftercommit(self._repo, node)
1966
1971
1967
1972
1968 class committablefilectx(basefilectx):
1973 class committablefilectx(basefilectx):
1969 """A committablefilectx provides common functionality for a file context
1974 """A committablefilectx provides common functionality for a file context
1970 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1975 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1971
1976
1972 def __init__(self, repo, path, filelog=None, ctx=None):
1977 def __init__(self, repo, path, filelog=None, ctx=None):
1973 self._repo = repo
1978 self._repo = repo
1974 self._path = path
1979 self._path = path
1975 self._changeid = None
1980 self._changeid = None
1976 self._filerev = self._filenode = None
1981 self._filerev = self._filenode = None
1977
1982
1978 if filelog is not None:
1983 if filelog is not None:
1979 self._filelog = filelog
1984 self._filelog = filelog
1980 if ctx:
1985 if ctx:
1981 self._changectx = ctx
1986 self._changectx = ctx
1982
1987
1983 def __nonzero__(self):
1988 def __nonzero__(self):
1984 return True
1989 return True
1985
1990
1986 __bool__ = __nonzero__
1991 __bool__ = __nonzero__
1987
1992
1988 def linkrev(self):
1993 def linkrev(self):
1989 # linked to self._changectx no matter if file is modified or not
1994 # linked to self._changectx no matter if file is modified or not
1990 return self.rev()
1995 return self.rev()
1991
1996
1992 def renamed(self):
1997 def renamed(self):
1993 path = self.copysource()
1998 path = self.copysource()
1994 if not path:
1999 if not path:
1995 return None
2000 return None
1996 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2001 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1997
2002
1998 def parents(self):
2003 def parents(self):
1999 '''return parent filectxs, following copies if necessary'''
2004 '''return parent filectxs, following copies if necessary'''
2000
2005
2001 def filenode(ctx, path):
2006 def filenode(ctx, path):
2002 return ctx._manifest.get(path, nullid)
2007 return ctx._manifest.get(path, nullid)
2003
2008
2004 path = self._path
2009 path = self._path
2005 fl = self._filelog
2010 fl = self._filelog
2006 pcl = self._changectx._parents
2011 pcl = self._changectx._parents
2007 renamed = self.renamed()
2012 renamed = self.renamed()
2008
2013
2009 if renamed:
2014 if renamed:
2010 pl = [renamed + (None,)]
2015 pl = [renamed + (None,)]
2011 else:
2016 else:
2012 pl = [(path, filenode(pcl[0], path), fl)]
2017 pl = [(path, filenode(pcl[0], path), fl)]
2013
2018
2014 for pc in pcl[1:]:
2019 for pc in pcl[1:]:
2015 pl.append((path, filenode(pc, path), fl))
2020 pl.append((path, filenode(pc, path), fl))
2016
2021
2017 return [
2022 return [
2018 self._parentfilectx(p, fileid=n, filelog=l)
2023 self._parentfilectx(p, fileid=n, filelog=l)
2019 for p, n, l in pl
2024 for p, n, l in pl
2020 if n != nullid
2025 if n != nullid
2021 ]
2026 ]
2022
2027
2023 def children(self):
2028 def children(self):
2024 return []
2029 return []
2025
2030
2026
2031
2027 class workingfilectx(committablefilectx):
2032 class workingfilectx(committablefilectx):
2028 """A workingfilectx object makes access to data related to a particular
2033 """A workingfilectx object makes access to data related to a particular
2029 file in the working directory convenient."""
2034 file in the working directory convenient."""
2030
2035
2031 def __init__(self, repo, path, filelog=None, workingctx=None):
2036 def __init__(self, repo, path, filelog=None, workingctx=None):
2032 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2037 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2033
2038
2034 @propertycache
2039 @propertycache
2035 def _changectx(self):
2040 def _changectx(self):
2036 return workingctx(self._repo)
2041 return workingctx(self._repo)
2037
2042
2038 def data(self):
2043 def data(self):
2039 return self._repo.wread(self._path)
2044 return self._repo.wread(self._path)
2040
2045
2041 def copysource(self):
2046 def copysource(self):
2042 return self._repo.dirstate.copied(self._path)
2047 return self._repo.dirstate.copied(self._path)
2043
2048
2044 def size(self):
2049 def size(self):
2045 return self._repo.wvfs.lstat(self._path).st_size
2050 return self._repo.wvfs.lstat(self._path).st_size
2046
2051
2047 def lstat(self):
2052 def lstat(self):
2048 return self._repo.wvfs.lstat(self._path)
2053 return self._repo.wvfs.lstat(self._path)
2049
2054
2050 def date(self):
2055 def date(self):
2051 t, tz = self._changectx.date()
2056 t, tz = self._changectx.date()
2052 try:
2057 try:
2053 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2058 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2054 except OSError as err:
2059 except OSError as err:
2055 if err.errno != errno.ENOENT:
2060 if err.errno != errno.ENOENT:
2056 raise
2061 raise
2057 return (t, tz)
2062 return (t, tz)
2058
2063
2059 def exists(self):
2064 def exists(self):
2060 return self._repo.wvfs.exists(self._path)
2065 return self._repo.wvfs.exists(self._path)
2061
2066
2062 def lexists(self):
2067 def lexists(self):
2063 return self._repo.wvfs.lexists(self._path)
2068 return self._repo.wvfs.lexists(self._path)
2064
2069
2065 def audit(self):
2070 def audit(self):
2066 return self._repo.wvfs.audit(self._path)
2071 return self._repo.wvfs.audit(self._path)
2067
2072
2068 def cmp(self, fctx):
2073 def cmp(self, fctx):
2069 """compare with other file context
2074 """compare with other file context
2070
2075
2071 returns True if different than fctx.
2076 returns True if different than fctx.
2072 """
2077 """
2073 # fctx should be a filectx (not a workingfilectx)
2078 # fctx should be a filectx (not a workingfilectx)
2074 # invert comparison to reuse the same code path
2079 # invert comparison to reuse the same code path
2075 return fctx.cmp(self)
2080 return fctx.cmp(self)
2076
2081
2077 def remove(self, ignoremissing=False):
2082 def remove(self, ignoremissing=False):
2078 """wraps unlink for a repo's working directory"""
2083 """wraps unlink for a repo's working directory"""
2079 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2084 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2080 self._repo.wvfs.unlinkpath(
2085 self._repo.wvfs.unlinkpath(
2081 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2086 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2082 )
2087 )
2083
2088
2084 def write(self, data, flags, backgroundclose=False, **kwargs):
2089 def write(self, data, flags, backgroundclose=False, **kwargs):
2085 """wraps repo.wwrite"""
2090 """wraps repo.wwrite"""
2086 return self._repo.wwrite(
2091 return self._repo.wwrite(
2087 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2092 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2088 )
2093 )
2089
2094
2090 def markcopied(self, src):
2095 def markcopied(self, src):
2091 """marks this file a copy of `src`"""
2096 """marks this file a copy of `src`"""
2092 self._repo.dirstate.copy(src, self._path)
2097 self._repo.dirstate.copy(src, self._path)
2093
2098
2094 def clearunknown(self):
2099 def clearunknown(self):
2095 """Removes conflicting items in the working directory so that
2100 """Removes conflicting items in the working directory so that
2096 ``write()`` can be called successfully.
2101 ``write()`` can be called successfully.
2097 """
2102 """
2098 wvfs = self._repo.wvfs
2103 wvfs = self._repo.wvfs
2099 f = self._path
2104 f = self._path
2100 wvfs.audit(f)
2105 wvfs.audit(f)
2101 if self._repo.ui.configbool(
2106 if self._repo.ui.configbool(
2102 b'experimental', b'merge.checkpathconflicts'
2107 b'experimental', b'merge.checkpathconflicts'
2103 ):
2108 ):
2104 # remove files under the directory as they should already be
2109 # remove files under the directory as they should already be
2105 # warned and backed up
2110 # warned and backed up
2106 if wvfs.isdir(f) and not wvfs.islink(f):
2111 if wvfs.isdir(f) and not wvfs.islink(f):
2107 wvfs.rmtree(f, forcibly=True)
2112 wvfs.rmtree(f, forcibly=True)
2108 for p in reversed(list(pathutil.finddirs(f))):
2113 for p in reversed(list(pathutil.finddirs(f))):
2109 if wvfs.isfileorlink(p):
2114 if wvfs.isfileorlink(p):
2110 wvfs.unlink(p)
2115 wvfs.unlink(p)
2111 break
2116 break
2112 else:
2117 else:
2113 # don't remove files if path conflicts are not processed
2118 # don't remove files if path conflicts are not processed
2114 if wvfs.isdir(f) and not wvfs.islink(f):
2119 if wvfs.isdir(f) and not wvfs.islink(f):
2115 wvfs.removedirs(f)
2120 wvfs.removedirs(f)
2116
2121
2117 def setflags(self, l, x):
2122 def setflags(self, l, x):
2118 self._repo.wvfs.setflags(self._path, l, x)
2123 self._repo.wvfs.setflags(self._path, l, x)
2119
2124
2120
2125
2121 class overlayworkingctx(committablectx):
2126 class overlayworkingctx(committablectx):
2122 """Wraps another mutable context with a write-back cache that can be
2127 """Wraps another mutable context with a write-back cache that can be
2123 converted into a commit context.
2128 converted into a commit context.
2124
2129
2125 self._cache[path] maps to a dict with keys: {
2130 self._cache[path] maps to a dict with keys: {
2126 'exists': bool?
2131 'exists': bool?
2127 'date': date?
2132 'date': date?
2128 'data': str?
2133 'data': str?
2129 'flags': str?
2134 'flags': str?
2130 'copied': str? (path or None)
2135 'copied': str? (path or None)
2131 }
2136 }
2132 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2137 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2133 is `False`, the file was deleted.
2138 is `False`, the file was deleted.
2134 """
2139 """
2135
2140
2136 def __init__(self, repo):
2141 def __init__(self, repo):
2137 super(overlayworkingctx, self).__init__(repo)
2142 super(overlayworkingctx, self).__init__(repo)
2138 self.clean()
2143 self.clean()
2139
2144
2140 def setbase(self, wrappedctx):
2145 def setbase(self, wrappedctx):
2141 self._wrappedctx = wrappedctx
2146 self._wrappedctx = wrappedctx
2142 self._parents = [wrappedctx]
2147 self._parents = [wrappedctx]
2143 # Drop old manifest cache as it is now out of date.
2148 # Drop old manifest cache as it is now out of date.
2144 # This is necessary when, e.g., rebasing several nodes with one
2149 # This is necessary when, e.g., rebasing several nodes with one
2145 # ``overlayworkingctx`` (e.g. with --collapse).
2150 # ``overlayworkingctx`` (e.g. with --collapse).
2146 util.clearcachedproperty(self, b'_manifest')
2151 util.clearcachedproperty(self, b'_manifest')
2147
2152
2148 def data(self, path):
2153 def data(self, path):
2149 if self.isdirty(path):
2154 if self.isdirty(path):
2150 if self._cache[path][b'exists']:
2155 if self._cache[path][b'exists']:
2151 if self._cache[path][b'data'] is not None:
2156 if self._cache[path][b'data'] is not None:
2152 return self._cache[path][b'data']
2157 return self._cache[path][b'data']
2153 else:
2158 else:
2154 # Must fallback here, too, because we only set flags.
2159 # Must fallback here, too, because we only set flags.
2155 return self._wrappedctx[path].data()
2160 return self._wrappedctx[path].data()
2156 else:
2161 else:
2157 raise error.ProgrammingError(
2162 raise error.ProgrammingError(
2158 b"No such file or directory: %s" % path
2163 b"No such file or directory: %s" % path
2159 )
2164 )
2160 else:
2165 else:
2161 return self._wrappedctx[path].data()
2166 return self._wrappedctx[path].data()
2162
2167
2163 @propertycache
2168 @propertycache
2164 def _manifest(self):
2169 def _manifest(self):
2165 parents = self.parents()
2170 parents = self.parents()
2166 man = parents[0].manifest().copy()
2171 man = parents[0].manifest().copy()
2167
2172
2168 flag = self._flagfunc
2173 flag = self._flagfunc
2169 for path in self.added():
2174 for path in self.added():
2170 man[path] = addednodeid
2175 man[path] = addednodeid
2171 man.setflag(path, flag(path))
2176 man.setflag(path, flag(path))
2172 for path in self.modified():
2177 for path in self.modified():
2173 man[path] = modifiednodeid
2178 man[path] = modifiednodeid
2174 man.setflag(path, flag(path))
2179 man.setflag(path, flag(path))
2175 for path in self.removed():
2180 for path in self.removed():
2176 del man[path]
2181 del man[path]
2177 return man
2182 return man
2178
2183
2179 @propertycache
2184 @propertycache
2180 def _flagfunc(self):
2185 def _flagfunc(self):
2181 def f(path):
2186 def f(path):
2182 return self._cache[path][b'flags']
2187 return self._cache[path][b'flags']
2183
2188
2184 return f
2189 return f
2185
2190
2186 def files(self):
2191 def files(self):
2187 return sorted(self.added() + self.modified() + self.removed())
2192 return sorted(self.added() + self.modified() + self.removed())
2188
2193
2189 def modified(self):
2194 def modified(self):
2190 return [
2195 return [
2191 f
2196 f
2192 for f in self._cache.keys()
2197 for f in self._cache.keys()
2193 if self._cache[f][b'exists'] and self._existsinparent(f)
2198 if self._cache[f][b'exists'] and self._existsinparent(f)
2194 ]
2199 ]
2195
2200
2196 def added(self):
2201 def added(self):
2197 return [
2202 return [
2198 f
2203 f
2199 for f in self._cache.keys()
2204 for f in self._cache.keys()
2200 if self._cache[f][b'exists'] and not self._existsinparent(f)
2205 if self._cache[f][b'exists'] and not self._existsinparent(f)
2201 ]
2206 ]
2202
2207
2203 def removed(self):
2208 def removed(self):
2204 return [
2209 return [
2205 f
2210 f
2206 for f in self._cache.keys()
2211 for f in self._cache.keys()
2207 if not self._cache[f][b'exists'] and self._existsinparent(f)
2212 if not self._cache[f][b'exists'] and self._existsinparent(f)
2208 ]
2213 ]
2209
2214
2210 def p1copies(self):
2215 def p1copies(self):
2211 copies = self._repo._wrappedctx.p1copies().copy()
2216 copies = self._repo._wrappedctx.p1copies().copy()
2212 narrowmatch = self._repo.narrowmatch()
2217 narrowmatch = self._repo.narrowmatch()
2213 for f in self._cache.keys():
2218 for f in self._cache.keys():
2214 if not narrowmatch(f):
2219 if not narrowmatch(f):
2215 continue
2220 continue
2216 copies.pop(f, None) # delete if it exists
2221 copies.pop(f, None) # delete if it exists
2217 source = self._cache[f][b'copied']
2222 source = self._cache[f][b'copied']
2218 if source:
2223 if source:
2219 copies[f] = source
2224 copies[f] = source
2220 return copies
2225 return copies
2221
2226
2222 def p2copies(self):
2227 def p2copies(self):
2223 copies = self._repo._wrappedctx.p2copies().copy()
2228 copies = self._repo._wrappedctx.p2copies().copy()
2224 narrowmatch = self._repo.narrowmatch()
2229 narrowmatch = self._repo.narrowmatch()
2225 for f in self._cache.keys():
2230 for f in self._cache.keys():
2226 if not narrowmatch(f):
2231 if not narrowmatch(f):
2227 continue
2232 continue
2228 copies.pop(f, None) # delete if it exists
2233 copies.pop(f, None) # delete if it exists
2229 source = self._cache[f][b'copied']
2234 source = self._cache[f][b'copied']
2230 if source:
2235 if source:
2231 copies[f] = source
2236 copies[f] = source
2232 return copies
2237 return copies
2233
2238
2234 def isinmemory(self):
2239 def isinmemory(self):
2235 return True
2240 return True
2236
2241
2237 def filedate(self, path):
2242 def filedate(self, path):
2238 if self.isdirty(path):
2243 if self.isdirty(path):
2239 return self._cache[path][b'date']
2244 return self._cache[path][b'date']
2240 else:
2245 else:
2241 return self._wrappedctx[path].date()
2246 return self._wrappedctx[path].date()
2242
2247
2243 def markcopied(self, path, origin):
2248 def markcopied(self, path, origin):
2244 self._markdirty(
2249 self._markdirty(
2245 path,
2250 path,
2246 exists=True,
2251 exists=True,
2247 date=self.filedate(path),
2252 date=self.filedate(path),
2248 flags=self.flags(path),
2253 flags=self.flags(path),
2249 copied=origin,
2254 copied=origin,
2250 )
2255 )
2251
2256
2252 def copydata(self, path):
2257 def copydata(self, path):
2253 if self.isdirty(path):
2258 if self.isdirty(path):
2254 return self._cache[path][b'copied']
2259 return self._cache[path][b'copied']
2255 else:
2260 else:
2256 return None
2261 return None
2257
2262
2258 def flags(self, path):
2263 def flags(self, path):
2259 if self.isdirty(path):
2264 if self.isdirty(path):
2260 if self._cache[path][b'exists']:
2265 if self._cache[path][b'exists']:
2261 return self._cache[path][b'flags']
2266 return self._cache[path][b'flags']
2262 else:
2267 else:
2263 raise error.ProgrammingError(
2268 raise error.ProgrammingError(
2264 b"No such file or directory: %s" % self._path
2269 b"No such file or directory: %s" % self._path
2265 )
2270 )
2266 else:
2271 else:
2267 return self._wrappedctx[path].flags()
2272 return self._wrappedctx[path].flags()
2268
2273
2269 def __contains__(self, key):
2274 def __contains__(self, key):
2270 if key in self._cache:
2275 if key in self._cache:
2271 return self._cache[key][b'exists']
2276 return self._cache[key][b'exists']
2272 return key in self.p1()
2277 return key in self.p1()
2273
2278
2274 def _existsinparent(self, path):
2279 def _existsinparent(self, path):
2275 try:
2280 try:
2276 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2281 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2277 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2282 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2278 # with an ``exists()`` function.
2283 # with an ``exists()`` function.
2279 self._wrappedctx[path]
2284 self._wrappedctx[path]
2280 return True
2285 return True
2281 except error.ManifestLookupError:
2286 except error.ManifestLookupError:
2282 return False
2287 return False
2283
2288
2284 def _auditconflicts(self, path):
2289 def _auditconflicts(self, path):
2285 """Replicates conflict checks done by wvfs.write().
2290 """Replicates conflict checks done by wvfs.write().
2286
2291
2287 Since we never write to the filesystem and never call `applyupdates` in
2292 Since we never write to the filesystem and never call `applyupdates` in
2288 IMM, we'll never check that a path is actually writable -- e.g., because
2293 IMM, we'll never check that a path is actually writable -- e.g., because
2289 it adds `a/foo`, but `a` is actually a file in the other commit.
2294 it adds `a/foo`, but `a` is actually a file in the other commit.
2290 """
2295 """
2291
2296
2292 def fail(path, component):
2297 def fail(path, component):
2293 # p1() is the base and we're receiving "writes" for p2()'s
2298 # p1() is the base and we're receiving "writes" for p2()'s
2294 # files.
2299 # files.
2295 if b'l' in self.p1()[component].flags():
2300 if b'l' in self.p1()[component].flags():
2296 raise error.Abort(
2301 raise error.Abort(
2297 b"error: %s conflicts with symlink %s "
2302 b"error: %s conflicts with symlink %s "
2298 b"in %d." % (path, component, self.p1().rev())
2303 b"in %d." % (path, component, self.p1().rev())
2299 )
2304 )
2300 else:
2305 else:
2301 raise error.Abort(
2306 raise error.Abort(
2302 b"error: '%s' conflicts with file '%s' in "
2307 b"error: '%s' conflicts with file '%s' in "
2303 b"%d." % (path, component, self.p1().rev())
2308 b"%d." % (path, component, self.p1().rev())
2304 )
2309 )
2305
2310
2306 # Test that each new directory to be created to write this path from p2
2311 # Test that each new directory to be created to write this path from p2
2307 # is not a file in p1.
2312 # is not a file in p1.
2308 components = path.split(b'/')
2313 components = path.split(b'/')
2309 for i in pycompat.xrange(len(components)):
2314 for i in pycompat.xrange(len(components)):
2310 component = b"/".join(components[0:i])
2315 component = b"/".join(components[0:i])
2311 if component in self:
2316 if component in self:
2312 fail(path, component)
2317 fail(path, component)
2313
2318
2314 # Test the other direction -- that this path from p2 isn't a directory
2319 # Test the other direction -- that this path from p2 isn't a directory
2315 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2320 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2316 match = self.match([path], default=b'path')
2321 match = self.match([path], default=b'path')
2317 matches = self.p1().manifest().matches(match)
2322 matches = self.p1().manifest().matches(match)
2318 mfiles = matches.keys()
2323 mfiles = matches.keys()
2319 if len(mfiles) > 0:
2324 if len(mfiles) > 0:
2320 if len(mfiles) == 1 and mfiles[0] == path:
2325 if len(mfiles) == 1 and mfiles[0] == path:
2321 return
2326 return
2322 # omit the files which are deleted in current IMM wctx
2327 # omit the files which are deleted in current IMM wctx
2323 mfiles = [m for m in mfiles if m in self]
2328 mfiles = [m for m in mfiles if m in self]
2324 if not mfiles:
2329 if not mfiles:
2325 return
2330 return
2326 raise error.Abort(
2331 raise error.Abort(
2327 b"error: file '%s' cannot be written because "
2332 b"error: file '%s' cannot be written because "
2328 b" '%s/' is a directory in %s (containing %d "
2333 b" '%s/' is a directory in %s (containing %d "
2329 b"entries: %s)"
2334 b"entries: %s)"
2330 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2335 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2331 )
2336 )
2332
2337
2333 def write(self, path, data, flags=b'', **kwargs):
2338 def write(self, path, data, flags=b'', **kwargs):
2334 if data is None:
2339 if data is None:
2335 raise error.ProgrammingError(b"data must be non-None")
2340 raise error.ProgrammingError(b"data must be non-None")
2336 self._auditconflicts(path)
2341 self._auditconflicts(path)
2337 self._markdirty(
2342 self._markdirty(
2338 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2343 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2339 )
2344 )
2340
2345
2341 def setflags(self, path, l, x):
2346 def setflags(self, path, l, x):
2342 flag = b''
2347 flag = b''
2343 if l:
2348 if l:
2344 flag = b'l'
2349 flag = b'l'
2345 elif x:
2350 elif x:
2346 flag = b'x'
2351 flag = b'x'
2347 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2352 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2348
2353
2349 def remove(self, path):
2354 def remove(self, path):
2350 self._markdirty(path, exists=False)
2355 self._markdirty(path, exists=False)
2351
2356
2352 def exists(self, path):
2357 def exists(self, path):
2353 """exists behaves like `lexists`, but needs to follow symlinks and
2358 """exists behaves like `lexists`, but needs to follow symlinks and
2354 return False if they are broken.
2359 return False if they are broken.
2355 """
2360 """
2356 if self.isdirty(path):
2361 if self.isdirty(path):
2357 # If this path exists and is a symlink, "follow" it by calling
2362 # If this path exists and is a symlink, "follow" it by calling
2358 # exists on the destination path.
2363 # exists on the destination path.
2359 if (
2364 if (
2360 self._cache[path][b'exists']
2365 self._cache[path][b'exists']
2361 and b'l' in self._cache[path][b'flags']
2366 and b'l' in self._cache[path][b'flags']
2362 ):
2367 ):
2363 return self.exists(self._cache[path][b'data'].strip())
2368 return self.exists(self._cache[path][b'data'].strip())
2364 else:
2369 else:
2365 return self._cache[path][b'exists']
2370 return self._cache[path][b'exists']
2366
2371
2367 return self._existsinparent(path)
2372 return self._existsinparent(path)
2368
2373
2369 def lexists(self, path):
2374 def lexists(self, path):
2370 """lexists returns True if the path exists"""
2375 """lexists returns True if the path exists"""
2371 if self.isdirty(path):
2376 if self.isdirty(path):
2372 return self._cache[path][b'exists']
2377 return self._cache[path][b'exists']
2373
2378
2374 return self._existsinparent(path)
2379 return self._existsinparent(path)
2375
2380
2376 def size(self, path):
2381 def size(self, path):
2377 if self.isdirty(path):
2382 if self.isdirty(path):
2378 if self._cache[path][b'exists']:
2383 if self._cache[path][b'exists']:
2379 return len(self._cache[path][b'data'])
2384 return len(self._cache[path][b'data'])
2380 else:
2385 else:
2381 raise error.ProgrammingError(
2386 raise error.ProgrammingError(
2382 b"No such file or directory: %s" % self._path
2387 b"No such file or directory: %s" % self._path
2383 )
2388 )
2384 return self._wrappedctx[path].size()
2389 return self._wrappedctx[path].size()
2385
2390
2386 def tomemctx(
2391 def tomemctx(
2387 self,
2392 self,
2388 text,
2393 text,
2389 branch=None,
2394 branch=None,
2390 extra=None,
2395 extra=None,
2391 date=None,
2396 date=None,
2392 parents=None,
2397 parents=None,
2393 user=None,
2398 user=None,
2394 editor=None,
2399 editor=None,
2395 ):
2400 ):
2396 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2401 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2397 committed.
2402 committed.
2398
2403
2399 ``text`` is the commit message.
2404 ``text`` is the commit message.
2400 ``parents`` (optional) are rev numbers.
2405 ``parents`` (optional) are rev numbers.
2401 """
2406 """
2402 # Default parents to the wrapped contexts' if not passed.
2407 # Default parents to the wrapped contexts' if not passed.
2403 if parents is None:
2408 if parents is None:
2404 parents = self._wrappedctx.parents()
2409 parents = self._wrappedctx.parents()
2405 if len(parents) == 1:
2410 if len(parents) == 1:
2406 parents = (parents[0], None)
2411 parents = (parents[0], None)
2407
2412
2408 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2413 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2409 if parents[1] is None:
2414 if parents[1] is None:
2410 parents = (self._repo[parents[0]], None)
2415 parents = (self._repo[parents[0]], None)
2411 else:
2416 else:
2412 parents = (self._repo[parents[0]], self._repo[parents[1]])
2417 parents = (self._repo[parents[0]], self._repo[parents[1]])
2413
2418
2414 files = self.files()
2419 files = self.files()
2415
2420
2416 def getfile(repo, memctx, path):
2421 def getfile(repo, memctx, path):
2417 if self._cache[path][b'exists']:
2422 if self._cache[path][b'exists']:
2418 return memfilectx(
2423 return memfilectx(
2419 repo,
2424 repo,
2420 memctx,
2425 memctx,
2421 path,
2426 path,
2422 self._cache[path][b'data'],
2427 self._cache[path][b'data'],
2423 b'l' in self._cache[path][b'flags'],
2428 b'l' in self._cache[path][b'flags'],
2424 b'x' in self._cache[path][b'flags'],
2429 b'x' in self._cache[path][b'flags'],
2425 self._cache[path][b'copied'],
2430 self._cache[path][b'copied'],
2426 )
2431 )
2427 else:
2432 else:
2428 # Returning None, but including the path in `files`, is
2433 # Returning None, but including the path in `files`, is
2429 # necessary for memctx to register a deletion.
2434 # necessary for memctx to register a deletion.
2430 return None
2435 return None
2431
2436
2432 return memctx(
2437 return memctx(
2433 self._repo,
2438 self._repo,
2434 parents,
2439 parents,
2435 text,
2440 text,
2436 files,
2441 files,
2437 getfile,
2442 getfile,
2438 date=date,
2443 date=date,
2439 extra=extra,
2444 extra=extra,
2440 user=user,
2445 user=user,
2441 branch=branch,
2446 branch=branch,
2442 editor=editor,
2447 editor=editor,
2443 )
2448 )
2444
2449
2445 def isdirty(self, path):
2450 def isdirty(self, path):
2446 return path in self._cache
2451 return path in self._cache
2447
2452
2448 def isempty(self):
2453 def isempty(self):
2449 # We need to discard any keys that are actually clean before the empty
2454 # We need to discard any keys that are actually clean before the empty
2450 # commit check.
2455 # commit check.
2451 self._compact()
2456 self._compact()
2452 return len(self._cache) == 0
2457 return len(self._cache) == 0
2453
2458
2454 def clean(self):
2459 def clean(self):
2455 self._cache = {}
2460 self._cache = {}
2456
2461
2457 def _compact(self):
2462 def _compact(self):
2458 """Removes keys from the cache that are actually clean, by comparing
2463 """Removes keys from the cache that are actually clean, by comparing
2459 them with the underlying context.
2464 them with the underlying context.
2460
2465
2461 This can occur during the merge process, e.g. by passing --tool :local
2466 This can occur during the merge process, e.g. by passing --tool :local
2462 to resolve a conflict.
2467 to resolve a conflict.
2463 """
2468 """
2464 keys = []
2469 keys = []
2465 # This won't be perfect, but can help performance significantly when
2470 # This won't be perfect, but can help performance significantly when
2466 # using things like remotefilelog.
2471 # using things like remotefilelog.
2467 scmutil.prefetchfiles(
2472 scmutil.prefetchfiles(
2468 self.repo(),
2473 self.repo(),
2469 [self.p1().rev()],
2474 [self.p1().rev()],
2470 scmutil.matchfiles(self.repo(), self._cache.keys()),
2475 scmutil.matchfiles(self.repo(), self._cache.keys()),
2471 )
2476 )
2472
2477
2473 for path in self._cache.keys():
2478 for path in self._cache.keys():
2474 cache = self._cache[path]
2479 cache = self._cache[path]
2475 try:
2480 try:
2476 underlying = self._wrappedctx[path]
2481 underlying = self._wrappedctx[path]
2477 if (
2482 if (
2478 underlying.data() == cache[b'data']
2483 underlying.data() == cache[b'data']
2479 and underlying.flags() == cache[b'flags']
2484 and underlying.flags() == cache[b'flags']
2480 ):
2485 ):
2481 keys.append(path)
2486 keys.append(path)
2482 except error.ManifestLookupError:
2487 except error.ManifestLookupError:
2483 # Path not in the underlying manifest (created).
2488 # Path not in the underlying manifest (created).
2484 continue
2489 continue
2485
2490
2486 for path in keys:
2491 for path in keys:
2487 del self._cache[path]
2492 del self._cache[path]
2488 return keys
2493 return keys
2489
2494
2490 def _markdirty(
2495 def _markdirty(
2491 self, path, exists, data=None, date=None, flags=b'', copied=None
2496 self, path, exists, data=None, date=None, flags=b'', copied=None
2492 ):
2497 ):
2493 # data not provided, let's see if we already have some; if not, let's
2498 # data not provided, let's see if we already have some; if not, let's
2494 # grab it from our underlying context, so that we always have data if
2499 # grab it from our underlying context, so that we always have data if
2495 # the file is marked as existing.
2500 # the file is marked as existing.
2496 if exists and data is None:
2501 if exists and data is None:
2497 oldentry = self._cache.get(path) or {}
2502 oldentry = self._cache.get(path) or {}
2498 data = oldentry.get(b'data')
2503 data = oldentry.get(b'data')
2499 if data is None:
2504 if data is None:
2500 data = self._wrappedctx[path].data()
2505 data = self._wrappedctx[path].data()
2501
2506
2502 self._cache[path] = {
2507 self._cache[path] = {
2503 b'exists': exists,
2508 b'exists': exists,
2504 b'data': data,
2509 b'data': data,
2505 b'date': date,
2510 b'date': date,
2506 b'flags': flags,
2511 b'flags': flags,
2507 b'copied': copied,
2512 b'copied': copied,
2508 }
2513 }
2509
2514
2510 def filectx(self, path, filelog=None):
2515 def filectx(self, path, filelog=None):
2511 return overlayworkingfilectx(
2516 return overlayworkingfilectx(
2512 self._repo, path, parent=self, filelog=filelog
2517 self._repo, path, parent=self, filelog=filelog
2513 )
2518 )
2514
2519
2515
2520
2516 class overlayworkingfilectx(committablefilectx):
2521 class overlayworkingfilectx(committablefilectx):
2517 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2522 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2518 cache, which can be flushed through later by calling ``flush()``."""
2523 cache, which can be flushed through later by calling ``flush()``."""
2519
2524
2520 def __init__(self, repo, path, filelog=None, parent=None):
2525 def __init__(self, repo, path, filelog=None, parent=None):
2521 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2526 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2522 self._repo = repo
2527 self._repo = repo
2523 self._parent = parent
2528 self._parent = parent
2524 self._path = path
2529 self._path = path
2525
2530
2526 def cmp(self, fctx):
2531 def cmp(self, fctx):
2527 return self.data() != fctx.data()
2532 return self.data() != fctx.data()
2528
2533
2529 def changectx(self):
2534 def changectx(self):
2530 return self._parent
2535 return self._parent
2531
2536
2532 def data(self):
2537 def data(self):
2533 return self._parent.data(self._path)
2538 return self._parent.data(self._path)
2534
2539
2535 def date(self):
2540 def date(self):
2536 return self._parent.filedate(self._path)
2541 return self._parent.filedate(self._path)
2537
2542
2538 def exists(self):
2543 def exists(self):
2539 return self.lexists()
2544 return self.lexists()
2540
2545
2541 def lexists(self):
2546 def lexists(self):
2542 return self._parent.exists(self._path)
2547 return self._parent.exists(self._path)
2543
2548
2544 def copysource(self):
2549 def copysource(self):
2545 return self._parent.copydata(self._path)
2550 return self._parent.copydata(self._path)
2546
2551
2547 def size(self):
2552 def size(self):
2548 return self._parent.size(self._path)
2553 return self._parent.size(self._path)
2549
2554
2550 def markcopied(self, origin):
2555 def markcopied(self, origin):
2551 self._parent.markcopied(self._path, origin)
2556 self._parent.markcopied(self._path, origin)
2552
2557
2553 def audit(self):
2558 def audit(self):
2554 pass
2559 pass
2555
2560
2556 def flags(self):
2561 def flags(self):
2557 return self._parent.flags(self._path)
2562 return self._parent.flags(self._path)
2558
2563
2559 def setflags(self, islink, isexec):
2564 def setflags(self, islink, isexec):
2560 return self._parent.setflags(self._path, islink, isexec)
2565 return self._parent.setflags(self._path, islink, isexec)
2561
2566
2562 def write(self, data, flags, backgroundclose=False, **kwargs):
2567 def write(self, data, flags, backgroundclose=False, **kwargs):
2563 return self._parent.write(self._path, data, flags, **kwargs)
2568 return self._parent.write(self._path, data, flags, **kwargs)
2564
2569
2565 def remove(self, ignoremissing=False):
2570 def remove(self, ignoremissing=False):
2566 return self._parent.remove(self._path)
2571 return self._parent.remove(self._path)
2567
2572
2568 def clearunknown(self):
2573 def clearunknown(self):
2569 pass
2574 pass
2570
2575
2571
2576
2572 class workingcommitctx(workingctx):
2577 class workingcommitctx(workingctx):
2573 """A workingcommitctx object makes access to data related to
2578 """A workingcommitctx object makes access to data related to
2574 the revision being committed convenient.
2579 the revision being committed convenient.
2575
2580
2576 This hides changes in the working directory, if they aren't
2581 This hides changes in the working directory, if they aren't
2577 committed in this context.
2582 committed in this context.
2578 """
2583 """
2579
2584
2580 def __init__(
2585 def __init__(
2581 self, repo, changes, text=b"", user=None, date=None, extra=None
2586 self, repo, changes, text=b"", user=None, date=None, extra=None
2582 ):
2587 ):
2583 super(workingcommitctx, self).__init__(
2588 super(workingcommitctx, self).__init__(
2584 repo, text, user, date, extra, changes
2589 repo, text, user, date, extra, changes
2585 )
2590 )
2586
2591
2587 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2592 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2588 """Return matched files only in ``self._status``
2593 """Return matched files only in ``self._status``
2589
2594
2590 Uncommitted files appear "clean" via this context, even if
2595 Uncommitted files appear "clean" via this context, even if
2591 they aren't actually so in the working directory.
2596 they aren't actually so in the working directory.
2592 """
2597 """
2593 if clean:
2598 if clean:
2594 clean = [f for f in self._manifest if f not in self._changedset]
2599 clean = [f for f in self._manifest if f not in self._changedset]
2595 else:
2600 else:
2596 clean = []
2601 clean = []
2597 return scmutil.status(
2602 return scmutil.status(
2598 [f for f in self._status.modified if match(f)],
2603 [f for f in self._status.modified if match(f)],
2599 [f for f in self._status.added if match(f)],
2604 [f for f in self._status.added if match(f)],
2600 [f for f in self._status.removed if match(f)],
2605 [f for f in self._status.removed if match(f)],
2601 [],
2606 [],
2602 [],
2607 [],
2603 [],
2608 [],
2604 clean,
2609 clean,
2605 )
2610 )
2606
2611
2607 @propertycache
2612 @propertycache
2608 def _changedset(self):
2613 def _changedset(self):
2609 """Return the set of files changed in this context
2614 """Return the set of files changed in this context
2610 """
2615 """
2611 changed = set(self._status.modified)
2616 changed = set(self._status.modified)
2612 changed.update(self._status.added)
2617 changed.update(self._status.added)
2613 changed.update(self._status.removed)
2618 changed.update(self._status.removed)
2614 return changed
2619 return changed
2615
2620
2616
2621
2617 def makecachingfilectxfn(func):
2622 def makecachingfilectxfn(func):
2618 """Create a filectxfn that caches based on the path.
2623 """Create a filectxfn that caches based on the path.
2619
2624
2620 We can't use util.cachefunc because it uses all arguments as the cache
2625 We can't use util.cachefunc because it uses all arguments as the cache
2621 key and this creates a cycle since the arguments include the repo and
2626 key and this creates a cycle since the arguments include the repo and
2622 memctx.
2627 memctx.
2623 """
2628 """
2624 cache = {}
2629 cache = {}
2625
2630
2626 def getfilectx(repo, memctx, path):
2631 def getfilectx(repo, memctx, path):
2627 if path not in cache:
2632 if path not in cache:
2628 cache[path] = func(repo, memctx, path)
2633 cache[path] = func(repo, memctx, path)
2629 return cache[path]
2634 return cache[path]
2630
2635
2631 return getfilectx
2636 return getfilectx
2632
2637
2633
2638
2634 def memfilefromctx(ctx):
2639 def memfilefromctx(ctx):
2635 """Given a context return a memfilectx for ctx[path]
2640 """Given a context return a memfilectx for ctx[path]
2636
2641
2637 This is a convenience method for building a memctx based on another
2642 This is a convenience method for building a memctx based on another
2638 context.
2643 context.
2639 """
2644 """
2640
2645
2641 def getfilectx(repo, memctx, path):
2646 def getfilectx(repo, memctx, path):
2642 fctx = ctx[path]
2647 fctx = ctx[path]
2643 copysource = fctx.copysource()
2648 copysource = fctx.copysource()
2644 return memfilectx(
2649 return memfilectx(
2645 repo,
2650 repo,
2646 memctx,
2651 memctx,
2647 path,
2652 path,
2648 fctx.data(),
2653 fctx.data(),
2649 islink=fctx.islink(),
2654 islink=fctx.islink(),
2650 isexec=fctx.isexec(),
2655 isexec=fctx.isexec(),
2651 copysource=copysource,
2656 copysource=copysource,
2652 )
2657 )
2653
2658
2654 return getfilectx
2659 return getfilectx
2655
2660
2656
2661
2657 def memfilefrompatch(patchstore):
2662 def memfilefrompatch(patchstore):
2658 """Given a patch (e.g. patchstore object) return a memfilectx
2663 """Given a patch (e.g. patchstore object) return a memfilectx
2659
2664
2660 This is a convenience method for building a memctx based on a patchstore.
2665 This is a convenience method for building a memctx based on a patchstore.
2661 """
2666 """
2662
2667
2663 def getfilectx(repo, memctx, path):
2668 def getfilectx(repo, memctx, path):
2664 data, mode, copysource = patchstore.getfile(path)
2669 data, mode, copysource = patchstore.getfile(path)
2665 if data is None:
2670 if data is None:
2666 return None
2671 return None
2667 islink, isexec = mode
2672 islink, isexec = mode
2668 return memfilectx(
2673 return memfilectx(
2669 repo,
2674 repo,
2670 memctx,
2675 memctx,
2671 path,
2676 path,
2672 data,
2677 data,
2673 islink=islink,
2678 islink=islink,
2674 isexec=isexec,
2679 isexec=isexec,
2675 copysource=copysource,
2680 copysource=copysource,
2676 )
2681 )
2677
2682
2678 return getfilectx
2683 return getfilectx
2679
2684
2680
2685
2681 class memctx(committablectx):
2686 class memctx(committablectx):
2682 """Use memctx to perform in-memory commits via localrepo.commitctx().
2687 """Use memctx to perform in-memory commits via localrepo.commitctx().
2683
2688
2684 Revision information is supplied at initialization time while
2689 Revision information is supplied at initialization time while
2685 related files data and is made available through a callback
2690 related files data and is made available through a callback
2686 mechanism. 'repo' is the current localrepo, 'parents' is a
2691 mechanism. 'repo' is the current localrepo, 'parents' is a
2687 sequence of two parent revisions identifiers (pass None for every
2692 sequence of two parent revisions identifiers (pass None for every
2688 missing parent), 'text' is the commit message and 'files' lists
2693 missing parent), 'text' is the commit message and 'files' lists
2689 names of files touched by the revision (normalized and relative to
2694 names of files touched by the revision (normalized and relative to
2690 repository root).
2695 repository root).
2691
2696
2692 filectxfn(repo, memctx, path) is a callable receiving the
2697 filectxfn(repo, memctx, path) is a callable receiving the
2693 repository, the current memctx object and the normalized path of
2698 repository, the current memctx object and the normalized path of
2694 requested file, relative to repository root. It is fired by the
2699 requested file, relative to repository root. It is fired by the
2695 commit function for every file in 'files', but calls order is
2700 commit function for every file in 'files', but calls order is
2696 undefined. If the file is available in the revision being
2701 undefined. If the file is available in the revision being
2697 committed (updated or added), filectxfn returns a memfilectx
2702 committed (updated or added), filectxfn returns a memfilectx
2698 object. If the file was removed, filectxfn return None for recent
2703 object. If the file was removed, filectxfn return None for recent
2699 Mercurial. Moved files are represented by marking the source file
2704 Mercurial. Moved files are represented by marking the source file
2700 removed and the new file added with copy information (see
2705 removed and the new file added with copy information (see
2701 memfilectx).
2706 memfilectx).
2702
2707
2703 user receives the committer name and defaults to current
2708 user receives the committer name and defaults to current
2704 repository username, date is the commit date in any format
2709 repository username, date is the commit date in any format
2705 supported by dateutil.parsedate() and defaults to current date, extra
2710 supported by dateutil.parsedate() and defaults to current date, extra
2706 is a dictionary of metadata or is left empty.
2711 is a dictionary of metadata or is left empty.
2707 """
2712 """
2708
2713
2709 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2714 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2710 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2715 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2711 # this field to determine what to do in filectxfn.
2716 # this field to determine what to do in filectxfn.
2712 _returnnoneformissingfiles = True
2717 _returnnoneformissingfiles = True
2713
2718
2714 def __init__(
2719 def __init__(
2715 self,
2720 self,
2716 repo,
2721 repo,
2717 parents,
2722 parents,
2718 text,
2723 text,
2719 files,
2724 files,
2720 filectxfn,
2725 filectxfn,
2721 user=None,
2726 user=None,
2722 date=None,
2727 date=None,
2723 extra=None,
2728 extra=None,
2724 branch=None,
2729 branch=None,
2725 editor=False,
2730 editor=False,
2726 ):
2731 ):
2727 super(memctx, self).__init__(
2732 super(memctx, self).__init__(
2728 repo, text, user, date, extra, branch=branch
2733 repo, text, user, date, extra, branch=branch
2729 )
2734 )
2730 self._rev = None
2735 self._rev = None
2731 self._node = None
2736 self._node = None
2732 parents = [(p or nullid) for p in parents]
2737 parents = [(p or nullid) for p in parents]
2733 p1, p2 = parents
2738 p1, p2 = parents
2734 self._parents = [self._repo[p] for p in (p1, p2)]
2739 self._parents = [self._repo[p] for p in (p1, p2)]
2735 files = sorted(set(files))
2740 files = sorted(set(files))
2736 self._files = files
2741 self._files = files
2737 self.substate = {}
2742 self.substate = {}
2738
2743
2739 if isinstance(filectxfn, patch.filestore):
2744 if isinstance(filectxfn, patch.filestore):
2740 filectxfn = memfilefrompatch(filectxfn)
2745 filectxfn = memfilefrompatch(filectxfn)
2741 elif not callable(filectxfn):
2746 elif not callable(filectxfn):
2742 # if store is not callable, wrap it in a function
2747 # if store is not callable, wrap it in a function
2743 filectxfn = memfilefromctx(filectxfn)
2748 filectxfn = memfilefromctx(filectxfn)
2744
2749
2745 # memoizing increases performance for e.g. vcs convert scenarios.
2750 # memoizing increases performance for e.g. vcs convert scenarios.
2746 self._filectxfn = makecachingfilectxfn(filectxfn)
2751 self._filectxfn = makecachingfilectxfn(filectxfn)
2747
2752
2748 if editor:
2753 if editor:
2749 self._text = editor(self._repo, self, [])
2754 self._text = editor(self._repo, self, [])
2750 self._repo.savecommitmessage(self._text)
2755 self._repo.savecommitmessage(self._text)
2751
2756
2752 def filectx(self, path, filelog=None):
2757 def filectx(self, path, filelog=None):
2753 """get a file context from the working directory
2758 """get a file context from the working directory
2754
2759
2755 Returns None if file doesn't exist and should be removed."""
2760 Returns None if file doesn't exist and should be removed."""
2756 return self._filectxfn(self._repo, self, path)
2761 return self._filectxfn(self._repo, self, path)
2757
2762
2758 def commit(self):
2763 def commit(self):
2759 """commit context to the repo"""
2764 """commit context to the repo"""
2760 return self._repo.commitctx(self)
2765 return self._repo.commitctx(self)
2761
2766
2762 @propertycache
2767 @propertycache
2763 def _manifest(self):
2768 def _manifest(self):
2764 """generate a manifest based on the return values of filectxfn"""
2769 """generate a manifest based on the return values of filectxfn"""
2765
2770
2766 # keep this simple for now; just worry about p1
2771 # keep this simple for now; just worry about p1
2767 pctx = self._parents[0]
2772 pctx = self._parents[0]
2768 man = pctx.manifest().copy()
2773 man = pctx.manifest().copy()
2769
2774
2770 for f in self._status.modified:
2775 for f in self._status.modified:
2771 man[f] = modifiednodeid
2776 man[f] = modifiednodeid
2772
2777
2773 for f in self._status.added:
2778 for f in self._status.added:
2774 man[f] = addednodeid
2779 man[f] = addednodeid
2775
2780
2776 for f in self._status.removed:
2781 for f in self._status.removed:
2777 if f in man:
2782 if f in man:
2778 del man[f]
2783 del man[f]
2779
2784
2780 return man
2785 return man
2781
2786
2782 @propertycache
2787 @propertycache
2783 def _status(self):
2788 def _status(self):
2784 """Calculate exact status from ``files`` specified at construction
2789 """Calculate exact status from ``files`` specified at construction
2785 """
2790 """
2786 man1 = self.p1().manifest()
2791 man1 = self.p1().manifest()
2787 p2 = self._parents[1]
2792 p2 = self._parents[1]
2788 # "1 < len(self._parents)" can't be used for checking
2793 # "1 < len(self._parents)" can't be used for checking
2789 # existence of the 2nd parent, because "memctx._parents" is
2794 # existence of the 2nd parent, because "memctx._parents" is
2790 # explicitly initialized by the list, of which length is 2.
2795 # explicitly initialized by the list, of which length is 2.
2791 if p2.node() != nullid:
2796 if p2.node() != nullid:
2792 man2 = p2.manifest()
2797 man2 = p2.manifest()
2793 managing = lambda f: f in man1 or f in man2
2798 managing = lambda f: f in man1 or f in man2
2794 else:
2799 else:
2795 managing = lambda f: f in man1
2800 managing = lambda f: f in man1
2796
2801
2797 modified, added, removed = [], [], []
2802 modified, added, removed = [], [], []
2798 for f in self._files:
2803 for f in self._files:
2799 if not managing(f):
2804 if not managing(f):
2800 added.append(f)
2805 added.append(f)
2801 elif self[f]:
2806 elif self[f]:
2802 modified.append(f)
2807 modified.append(f)
2803 else:
2808 else:
2804 removed.append(f)
2809 removed.append(f)
2805
2810
2806 return scmutil.status(modified, added, removed, [], [], [], [])
2811 return scmutil.status(modified, added, removed, [], [], [], [])
2807
2812
2808
2813
2809 class memfilectx(committablefilectx):
2814 class memfilectx(committablefilectx):
2810 """memfilectx represents an in-memory file to commit.
2815 """memfilectx represents an in-memory file to commit.
2811
2816
2812 See memctx and committablefilectx for more details.
2817 See memctx and committablefilectx for more details.
2813 """
2818 """
2814
2819
2815 def __init__(
2820 def __init__(
2816 self,
2821 self,
2817 repo,
2822 repo,
2818 changectx,
2823 changectx,
2819 path,
2824 path,
2820 data,
2825 data,
2821 islink=False,
2826 islink=False,
2822 isexec=False,
2827 isexec=False,
2823 copysource=None,
2828 copysource=None,
2824 ):
2829 ):
2825 """
2830 """
2826 path is the normalized file path relative to repository root.
2831 path is the normalized file path relative to repository root.
2827 data is the file content as a string.
2832 data is the file content as a string.
2828 islink is True if the file is a symbolic link.
2833 islink is True if the file is a symbolic link.
2829 isexec is True if the file is executable.
2834 isexec is True if the file is executable.
2830 copied is the source file path if current file was copied in the
2835 copied is the source file path if current file was copied in the
2831 revision being committed, or None."""
2836 revision being committed, or None."""
2832 super(memfilectx, self).__init__(repo, path, None, changectx)
2837 super(memfilectx, self).__init__(repo, path, None, changectx)
2833 self._data = data
2838 self._data = data
2834 if islink:
2839 if islink:
2835 self._flags = b'l'
2840 self._flags = b'l'
2836 elif isexec:
2841 elif isexec:
2837 self._flags = b'x'
2842 self._flags = b'x'
2838 else:
2843 else:
2839 self._flags = b''
2844 self._flags = b''
2840 self._copysource = copysource
2845 self._copysource = copysource
2841
2846
2842 def copysource(self):
2847 def copysource(self):
2843 return self._copysource
2848 return self._copysource
2844
2849
2845 def cmp(self, fctx):
2850 def cmp(self, fctx):
2846 return self.data() != fctx.data()
2851 return self.data() != fctx.data()
2847
2852
2848 def data(self):
2853 def data(self):
2849 return self._data
2854 return self._data
2850
2855
2851 def remove(self, ignoremissing=False):
2856 def remove(self, ignoremissing=False):
2852 """wraps unlink for a repo's working directory"""
2857 """wraps unlink for a repo's working directory"""
2853 # need to figure out what to do here
2858 # need to figure out what to do here
2854 del self._changectx[self._path]
2859 del self._changectx[self._path]
2855
2860
2856 def write(self, data, flags, **kwargs):
2861 def write(self, data, flags, **kwargs):
2857 """wraps repo.wwrite"""
2862 """wraps repo.wwrite"""
2858 self._data = data
2863 self._data = data
2859
2864
2860
2865
2861 class metadataonlyctx(committablectx):
2866 class metadataonlyctx(committablectx):
2862 """Like memctx but it's reusing the manifest of different commit.
2867 """Like memctx but it's reusing the manifest of different commit.
2863 Intended to be used by lightweight operations that are creating
2868 Intended to be used by lightweight operations that are creating
2864 metadata-only changes.
2869 metadata-only changes.
2865
2870
2866 Revision information is supplied at initialization time. 'repo' is the
2871 Revision information is supplied at initialization time. 'repo' is the
2867 current localrepo, 'ctx' is original revision which manifest we're reuisng
2872 current localrepo, 'ctx' is original revision which manifest we're reuisng
2868 'parents' is a sequence of two parent revisions identifiers (pass None for
2873 'parents' is a sequence of two parent revisions identifiers (pass None for
2869 every missing parent), 'text' is the commit.
2874 every missing parent), 'text' is the commit.
2870
2875
2871 user receives the committer name and defaults to current repository
2876 user receives the committer name and defaults to current repository
2872 username, date is the commit date in any format supported by
2877 username, date is the commit date in any format supported by
2873 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2878 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2874 metadata or is left empty.
2879 metadata or is left empty.
2875 """
2880 """
2876
2881
2877 def __init__(
2882 def __init__(
2878 self,
2883 self,
2879 repo,
2884 repo,
2880 originalctx,
2885 originalctx,
2881 parents=None,
2886 parents=None,
2882 text=None,
2887 text=None,
2883 user=None,
2888 user=None,
2884 date=None,
2889 date=None,
2885 extra=None,
2890 extra=None,
2886 editor=False,
2891 editor=False,
2887 ):
2892 ):
2888 if text is None:
2893 if text is None:
2889 text = originalctx.description()
2894 text = originalctx.description()
2890 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2895 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2891 self._rev = None
2896 self._rev = None
2892 self._node = None
2897 self._node = None
2893 self._originalctx = originalctx
2898 self._originalctx = originalctx
2894 self._manifestnode = originalctx.manifestnode()
2899 self._manifestnode = originalctx.manifestnode()
2895 if parents is None:
2900 if parents is None:
2896 parents = originalctx.parents()
2901 parents = originalctx.parents()
2897 else:
2902 else:
2898 parents = [repo[p] for p in parents if p is not None]
2903 parents = [repo[p] for p in parents if p is not None]
2899 parents = parents[:]
2904 parents = parents[:]
2900 while len(parents) < 2:
2905 while len(parents) < 2:
2901 parents.append(repo[nullid])
2906 parents.append(repo[nullid])
2902 p1, p2 = self._parents = parents
2907 p1, p2 = self._parents = parents
2903
2908
2904 # sanity check to ensure that the reused manifest parents are
2909 # sanity check to ensure that the reused manifest parents are
2905 # manifests of our commit parents
2910 # manifests of our commit parents
2906 mp1, mp2 = self.manifestctx().parents
2911 mp1, mp2 = self.manifestctx().parents
2907 if p1 != nullid and p1.manifestnode() != mp1:
2912 if p1 != nullid and p1.manifestnode() != mp1:
2908 raise RuntimeError(
2913 raise RuntimeError(
2909 r"can't reuse the manifest: its p1 "
2914 r"can't reuse the manifest: its p1 "
2910 r"doesn't match the new ctx p1"
2915 r"doesn't match the new ctx p1"
2911 )
2916 )
2912 if p2 != nullid and p2.manifestnode() != mp2:
2917 if p2 != nullid and p2.manifestnode() != mp2:
2913 raise RuntimeError(
2918 raise RuntimeError(
2914 r"can't reuse the manifest: "
2919 r"can't reuse the manifest: "
2915 r"its p2 doesn't match the new ctx p2"
2920 r"its p2 doesn't match the new ctx p2"
2916 )
2921 )
2917
2922
2918 self._files = originalctx.files()
2923 self._files = originalctx.files()
2919 self.substate = {}
2924 self.substate = {}
2920
2925
2921 if editor:
2926 if editor:
2922 self._text = editor(self._repo, self, [])
2927 self._text = editor(self._repo, self, [])
2923 self._repo.savecommitmessage(self._text)
2928 self._repo.savecommitmessage(self._text)
2924
2929
2925 def manifestnode(self):
2930 def manifestnode(self):
2926 return self._manifestnode
2931 return self._manifestnode
2927
2932
2928 @property
2933 @property
2929 def _manifestctx(self):
2934 def _manifestctx(self):
2930 return self._repo.manifestlog[self._manifestnode]
2935 return self._repo.manifestlog[self._manifestnode]
2931
2936
2932 def filectx(self, path, filelog=None):
2937 def filectx(self, path, filelog=None):
2933 return self._originalctx.filectx(path, filelog=filelog)
2938 return self._originalctx.filectx(path, filelog=filelog)
2934
2939
2935 def commit(self):
2940 def commit(self):
2936 """commit context to the repo"""
2941 """commit context to the repo"""
2937 return self._repo.commitctx(self)
2942 return self._repo.commitctx(self)
2938
2943
2939 @property
2944 @property
2940 def _manifest(self):
2945 def _manifest(self):
2941 return self._originalctx.manifest()
2946 return self._originalctx.manifest()
2942
2947
2943 @propertycache
2948 @propertycache
2944 def _status(self):
2949 def _status(self):
2945 """Calculate exact status from ``files`` specified in the ``origctx``
2950 """Calculate exact status from ``files`` specified in the ``origctx``
2946 and parents manifests.
2951 and parents manifests.
2947 """
2952 """
2948 man1 = self.p1().manifest()
2953 man1 = self.p1().manifest()
2949 p2 = self._parents[1]
2954 p2 = self._parents[1]
2950 # "1 < len(self._parents)" can't be used for checking
2955 # "1 < len(self._parents)" can't be used for checking
2951 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2956 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2952 # explicitly initialized by the list, of which length is 2.
2957 # explicitly initialized by the list, of which length is 2.
2953 if p2.node() != nullid:
2958 if p2.node() != nullid:
2954 man2 = p2.manifest()
2959 man2 = p2.manifest()
2955 managing = lambda f: f in man1 or f in man2
2960 managing = lambda f: f in man1 or f in man2
2956 else:
2961 else:
2957 managing = lambda f: f in man1
2962 managing = lambda f: f in man1
2958
2963
2959 modified, added, removed = [], [], []
2964 modified, added, removed = [], [], []
2960 for f in self._files:
2965 for f in self._files:
2961 if not managing(f):
2966 if not managing(f):
2962 added.append(f)
2967 added.append(f)
2963 elif f in self:
2968 elif f in self:
2964 modified.append(f)
2969 modified.append(f)
2965 else:
2970 else:
2966 removed.append(f)
2971 removed.append(f)
2967
2972
2968 return scmutil.status(modified, added, removed, [], [], [], [])
2973 return scmutil.status(modified, added, removed, [], [], [], [])
2969
2974
2970
2975
2971 class arbitraryfilectx(object):
2976 class arbitraryfilectx(object):
2972 """Allows you to use filectx-like functions on a file in an arbitrary
2977 """Allows you to use filectx-like functions on a file in an arbitrary
2973 location on disk, possibly not in the working directory.
2978 location on disk, possibly not in the working directory.
2974 """
2979 """
2975
2980
2976 def __init__(self, path, repo=None):
2981 def __init__(self, path, repo=None):
2977 # Repo is optional because contrib/simplemerge uses this class.
2982 # Repo is optional because contrib/simplemerge uses this class.
2978 self._repo = repo
2983 self._repo = repo
2979 self._path = path
2984 self._path = path
2980
2985
2981 def cmp(self, fctx):
2986 def cmp(self, fctx):
2982 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2987 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2983 # path if either side is a symlink.
2988 # path if either side is a symlink.
2984 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2989 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2985 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2990 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2986 # Add a fast-path for merge if both sides are disk-backed.
2991 # Add a fast-path for merge if both sides are disk-backed.
2987 # Note that filecmp uses the opposite return values (True if same)
2992 # Note that filecmp uses the opposite return values (True if same)
2988 # from our cmp functions (True if different).
2993 # from our cmp functions (True if different).
2989 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2994 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2990 return self.data() != fctx.data()
2995 return self.data() != fctx.data()
2991
2996
2992 def path(self):
2997 def path(self):
2993 return self._path
2998 return self._path
2994
2999
2995 def flags(self):
3000 def flags(self):
2996 return b''
3001 return b''
2997
3002
2998 def data(self):
3003 def data(self):
2999 return util.readfile(self._path)
3004 return util.readfile(self._path)
3000
3005
3001 def decodeddata(self):
3006 def decodeddata(self):
3002 with open(self._path, b"rb") as f:
3007 with open(self._path, b"rb") as f:
3003 return f.read()
3008 return f.read()
3004
3009
3005 def remove(self):
3010 def remove(self):
3006 util.unlink(self._path)
3011 util.unlink(self._path)
3007
3012
3008 def write(self, data, flags, **kwargs):
3013 def write(self, data, flags, **kwargs):
3009 assert not flags
3014 assert not flags
3010 with open(self._path, b"wb") as f:
3015 with open(self._path, b"wb") as f:
3011 f.write(data)
3016 f.write(data)
@@ -1,154 +1,153 b''
1 ===================================
1 ===================================
2 Test repository filtering avoidance
2 Test repository filtering avoidance
3 ===================================
3 ===================================
4
4
5 This test file is a bit special as he does not check feature, but performance related internal code path.
5 This test file is a bit special as he does not check feature, but performance related internal code path.
6
6
7 Right now, filtering a repository comes with a cost that might be significant.
7 Right now, filtering a repository comes with a cost that might be significant.
8 Until this get better, ther are various operation that try hard not to trigger
8 Until this get better, ther are various operation that try hard not to trigger
9 a filtering computation. This test file make sure we don't reintroduce code that trigger the filtering for these operation:
9 a filtering computation. This test file make sure we don't reintroduce code that trigger the filtering for these operation:
10
10
11 Setup
11 Setup
12 -----
12 -----
13 $ hg init test-repo
13 $ hg init test-repo
14 $ cd test-repo
14 $ cd test-repo
15 $ echo "some line" > z
15 $ echo "some line" > z
16 $ echo a > a
16 $ echo a > a
17 $ hg commit -Am a
17 $ hg commit -Am a
18 adding a
18 adding a
19 adding z
19 adding z
20 $ echo "in a" >> z
20 $ echo "in a" >> z
21 $ echo b > b
21 $ echo b > b
22 $ hg commit -Am b
22 $ hg commit -Am b
23 adding b
23 adding b
24 $ echo "file" >> z
24 $ echo "file" >> z
25 $ echo c > c
25 $ echo c > c
26 $ hg commit -Am c
26 $ hg commit -Am c
27 adding c
27 adding c
28 $ hg rm a
28 $ hg rm a
29 $ echo c1 > c
29 $ echo c1 > c
30 $ hg add c
30 $ hg add c
31 c already tracked!
31 c already tracked!
32 $ echo d > d
32 $ echo d > d
33 $ hg add d
33 $ hg add d
34 $ rm b
34 $ rm b
35
35
36 $ cat << EOF >> $HGRCPATH
36 $ cat << EOF >> $HGRCPATH
37 > [devel]
37 > [devel]
38 > debug.repo-filters = yes
38 > debug.repo-filters = yes
39 > [ui]
39 > [ui]
40 > debug = yes
40 > debug = yes
41 > EOF
41 > EOF
42
42
43
43
44 tests
44 tests
45 -----
45 -----
46
46
47 Getting the node of `null`
47 Getting the node of `null`
48
48
49 $ hg log -r null -T "{node}\n"
49 $ hg log -r null -T "{node}\n"
50 0000000000000000000000000000000000000000
50 0000000000000000000000000000000000000000
51
51
52 Getting basic changeset inforation about `null`
52 Getting basic changeset inforation about `null`
53
53
54 $ hg log -r null -T "{node}\n{date}\n"
54 $ hg log -r null -T "{node}\n{date}\n"
55 0000000000000000000000000000000000000000
55 0000000000000000000000000000000000000000
56 0.00
56 0.00
57
57
58 Getting status of null
58 Getting status of null
59
59
60 $ hg status --change null
60 $ hg status --change null
61 debug.filters: computing revision filter for "visible"
62
61
63 Getting status of working copy
62 Getting status of working copy
64
63
65 $ hg status
64 $ hg status
66 debug.filters: computing revision filter for "visible"
65 debug.filters: computing revision filter for "visible"
67 M c
66 M c
68 A d
67 A d
69 R a
68 R a
70 ! b
69 ! b
71
70
72 Getting data about the working copy parent
71 Getting data about the working copy parent
73
72
74 $ hg log -r '.' -T "{node}\n{date}\n"
73 $ hg log -r '.' -T "{node}\n{date}\n"
75 debug.filters: computing revision filter for "visible"
74 debug.filters: computing revision filter for "visible"
76 c2932ca7786be30b67154d541a8764fae5532261
75 c2932ca7786be30b67154d541a8764fae5532261
77 0.00
76 0.00
78
77
79 Getting working copy diff
78 Getting working copy diff
80
79
81 $ hg diff
80 $ hg diff
82 debug.filters: computing revision filter for "visible"
81 debug.filters: computing revision filter for "visible"
83 diff -r c2932ca7786be30b67154d541a8764fae5532261 a
82 diff -r c2932ca7786be30b67154d541a8764fae5532261 a
84 --- a/a Thu Jan 01 00:00:00 1970 +0000
83 --- a/a Thu Jan 01 00:00:00 1970 +0000
85 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
84 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000
86 @@ -1,1 +0,0 @@
85 @@ -1,1 +0,0 @@
87 -a
86 -a
88 diff -r c2932ca7786be30b67154d541a8764fae5532261 c
87 diff -r c2932ca7786be30b67154d541a8764fae5532261 c
89 --- a/c Thu Jan 01 00:00:00 1970 +0000
88 --- a/c Thu Jan 01 00:00:00 1970 +0000
90 +++ b/c Thu Jan 01 00:00:00 1970 +0000
89 +++ b/c Thu Jan 01 00:00:00 1970 +0000
91 @@ -1,1 +1,1 @@
90 @@ -1,1 +1,1 @@
92 -c
91 -c
93 +c1
92 +c1
94 diff -r c2932ca7786be30b67154d541a8764fae5532261 d
93 diff -r c2932ca7786be30b67154d541a8764fae5532261 d
95 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
94 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
96 +++ b/d Thu Jan 01 00:00:00 1970 +0000
95 +++ b/d Thu Jan 01 00:00:00 1970 +0000
97 @@ -0,0 +1,1 @@
96 @@ -0,0 +1,1 @@
98 +d
97 +d
99 $ hg diff --change .
98 $ hg diff --change .
100 debug.filters: computing revision filter for "visible"
99 debug.filters: computing revision filter for "visible"
101 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
100 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
102 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
101 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
103 +++ b/c Thu Jan 01 00:00:00 1970 +0000
102 +++ b/c Thu Jan 01 00:00:00 1970 +0000
104 @@ -0,0 +1,1 @@
103 @@ -0,0 +1,1 @@
105 +c
104 +c
106 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
105 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
107 --- a/z Thu Jan 01 00:00:00 1970 +0000
106 --- a/z Thu Jan 01 00:00:00 1970 +0000
108 +++ b/z Thu Jan 01 00:00:00 1970 +0000
107 +++ b/z Thu Jan 01 00:00:00 1970 +0000
109 @@ -1,2 +1,3 @@
108 @@ -1,2 +1,3 @@
110 some line
109 some line
111 in a
110 in a
112 +file
111 +file
113
112
114 exporting the current changeset
113 exporting the current changeset
115
114
116 $ hg export
115 $ hg export
117 debug.filters: computing revision filter for "visible"
116 debug.filters: computing revision filter for "visible"
118 exporting patch:
117 exporting patch:
119 # HG changeset patch
118 # HG changeset patch
120 # User test
119 # User test
121 # Date 0 0
120 # Date 0 0
122 # Thu Jan 01 00:00:00 1970 +0000
121 # Thu Jan 01 00:00:00 1970 +0000
123 # Node ID c2932ca7786be30b67154d541a8764fae5532261
122 # Node ID c2932ca7786be30b67154d541a8764fae5532261
124 # Parent 05293e5dd8d1ae4f84a8520a11c6f97cad26deca
123 # Parent 05293e5dd8d1ae4f84a8520a11c6f97cad26deca
125 c
124 c
126
125
127 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
126 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 c
128 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
127 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
129 +++ b/c Thu Jan 01 00:00:00 1970 +0000
128 +++ b/c Thu Jan 01 00:00:00 1970 +0000
130 @@ -0,0 +1,1 @@
129 @@ -0,0 +1,1 @@
131 +c
130 +c
132 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
131 diff -r 05293e5dd8d1ae4f84a8520a11c6f97cad26deca -r c2932ca7786be30b67154d541a8764fae5532261 z
133 --- a/z Thu Jan 01 00:00:00 1970 +0000
132 --- a/z Thu Jan 01 00:00:00 1970 +0000
134 +++ b/z Thu Jan 01 00:00:00 1970 +0000
133 +++ b/z Thu Jan 01 00:00:00 1970 +0000
135 @@ -1,2 +1,3 @@
134 @@ -1,2 +1,3 @@
136 some line
135 some line
137 in a
136 in a
138 +file
137 +file
139
138
140 using annotate
139 using annotate
141
140
142 - file with a single change
141 - file with a single change
143
142
144 $ hg annotate a
143 $ hg annotate a
145 debug.filters: computing revision filter for "visible"
144 debug.filters: computing revision filter for "visible"
146 0: a
145 0: a
147
146
148 - file with multiple change
147 - file with multiple change
149
148
150 $ hg annotate z
149 $ hg annotate z
151 debug.filters: computing revision filter for "visible"
150 debug.filters: computing revision filter for "visible"
152 0: some line
151 0: some line
153 1: in a
152 1: in a
154 2: file
153 2: file
General Comments 0
You need to be logged in to leave comments. Login now