##// END OF EJS Templates
changectx: add a "maybe filtered" filtered attribute...
marmoute -
r44148:8a37203a default draft
parent child Browse files
Show More
@@ -1,3000 +1,3011 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, expr, badfn=None):
203 def matchfileset(self, expr, badfn=None):
204 return fileset.match(self, expr, badfn=badfn)
204 return fileset.match(self, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 ):
331 ):
332 r = self._repo
332 r = self._repo
333 return matchmod.match(
333 return matchmod.match(
334 r.root,
334 r.root,
335 r.getcwd(),
335 r.getcwd(),
336 pats,
336 pats,
337 include,
337 include,
338 exclude,
338 exclude,
339 default,
339 default,
340 auditor=r.nofsauditor,
340 auditor=r.nofsauditor,
341 ctx=self,
341 ctx=self,
342 listsubrepos=listsubrepos,
342 listsubrepos=listsubrepos,
343 badfn=badfn,
343 badfn=badfn,
344 )
344 )
345
345
346 def diff(
346 def diff(
347 self,
347 self,
348 ctx2=None,
348 ctx2=None,
349 match=None,
349 match=None,
350 changes=None,
350 changes=None,
351 opts=None,
351 opts=None,
352 losedatafn=None,
352 losedatafn=None,
353 pathfn=None,
353 pathfn=None,
354 copy=None,
354 copy=None,
355 copysourcematch=None,
355 copysourcematch=None,
356 hunksfilterfn=None,
356 hunksfilterfn=None,
357 ):
357 ):
358 """Returns a diff generator for the given contexts and matcher"""
358 """Returns a diff generator for the given contexts and matcher"""
359 if ctx2 is None:
359 if ctx2 is None:
360 ctx2 = self.p1()
360 ctx2 = self.p1()
361 if ctx2 is not None:
361 if ctx2 is not None:
362 ctx2 = self._repo[ctx2]
362 ctx2 = self._repo[ctx2]
363 return patch.diff(
363 return patch.diff(
364 self._repo,
364 self._repo,
365 ctx2,
365 ctx2,
366 self,
366 self,
367 match=match,
367 match=match,
368 changes=changes,
368 changes=changes,
369 opts=opts,
369 opts=opts,
370 losedatafn=losedatafn,
370 losedatafn=losedatafn,
371 pathfn=pathfn,
371 pathfn=pathfn,
372 copy=copy,
372 copy=copy,
373 copysourcematch=copysourcematch,
373 copysourcematch=copysourcematch,
374 hunksfilterfn=hunksfilterfn,
374 hunksfilterfn=hunksfilterfn,
375 )
375 )
376
376
377 def dirs(self):
377 def dirs(self):
378 return self._manifest.dirs()
378 return self._manifest.dirs()
379
379
380 def hasdir(self, dir):
380 def hasdir(self, dir):
381 return self._manifest.hasdir(dir)
381 return self._manifest.hasdir(dir)
382
382
383 def status(
383 def status(
384 self,
384 self,
385 other=None,
385 other=None,
386 match=None,
386 match=None,
387 listignored=False,
387 listignored=False,
388 listclean=False,
388 listclean=False,
389 listunknown=False,
389 listunknown=False,
390 listsubrepos=False,
390 listsubrepos=False,
391 ):
391 ):
392 """return status of files between two nodes or node and working
392 """return status of files between two nodes or node and working
393 directory.
393 directory.
394
394
395 If other is None, compare this node with working directory.
395 If other is None, compare this node with working directory.
396
396
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
398 """
398 """
399
399
400 ctx1 = self
400 ctx1 = self
401 ctx2 = self._repo[other]
401 ctx2 = self._repo[other]
402
402
403 # This next code block is, admittedly, fragile logic that tests for
403 # This next code block is, admittedly, fragile logic that tests for
404 # reversing the contexts and wouldn't need to exist if it weren't for
404 # reversing the contexts and wouldn't need to exist if it weren't for
405 # the fast (and common) code path of comparing the working directory
405 # the fast (and common) code path of comparing the working directory
406 # with its first parent.
406 # with its first parent.
407 #
407 #
408 # What we're aiming for here is the ability to call:
408 # What we're aiming for here is the ability to call:
409 #
409 #
410 # workingctx.status(parentctx)
410 # workingctx.status(parentctx)
411 #
411 #
412 # If we always built the manifest for each context and compared those,
412 # If we always built the manifest for each context and compared those,
413 # then we'd be done. But the special case of the above call means we
413 # then we'd be done. But the special case of the above call means we
414 # just copy the manifest of the parent.
414 # just copy the manifest of the parent.
415 reversed = False
415 reversed = False
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
417 reversed = True
417 reversed = True
418 ctx1, ctx2 = ctx2, ctx1
418 ctx1, ctx2 = ctx2, ctx1
419
419
420 match = self._repo.narrowmatch(match)
420 match = self._repo.narrowmatch(match)
421 match = ctx2._matchstatus(ctx1, match)
421 match = ctx2._matchstatus(ctx1, match)
422 r = scmutil.status([], [], [], [], [], [], [])
422 r = scmutil.status([], [], [], [], [], [], [])
423 r = ctx2._buildstatus(
423 r = ctx2._buildstatus(
424 ctx1, r, match, listignored, listclean, listunknown
424 ctx1, r, match, listignored, listclean, listunknown
425 )
425 )
426
426
427 if reversed:
427 if reversed:
428 # Reverse added and removed. Clear deleted, unknown and ignored as
428 # Reverse added and removed. Clear deleted, unknown and ignored as
429 # these make no sense to reverse.
429 # these make no sense to reverse.
430 r = scmutil.status(
430 r = scmutil.status(
431 r.modified, r.removed, r.added, [], [], [], r.clean
431 r.modified, r.removed, r.added, [], [], [], r.clean
432 )
432 )
433
433
434 if listsubrepos:
434 if listsubrepos:
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
436 try:
436 try:
437 rev2 = ctx2.subrev(subpath)
437 rev2 = ctx2.subrev(subpath)
438 except KeyError:
438 except KeyError:
439 # A subrepo that existed in node1 was deleted between
439 # A subrepo that existed in node1 was deleted between
440 # node1 and node2 (inclusive). Thus, ctx2's substate
440 # node1 and node2 (inclusive). Thus, ctx2's substate
441 # won't contain that subpath. The best we can do ignore it.
441 # won't contain that subpath. The best we can do ignore it.
442 rev2 = None
442 rev2 = None
443 submatch = matchmod.subdirmatcher(subpath, match)
443 submatch = matchmod.subdirmatcher(subpath, match)
444 s = sub.status(
444 s = sub.status(
445 rev2,
445 rev2,
446 match=submatch,
446 match=submatch,
447 ignored=listignored,
447 ignored=listignored,
448 clean=listclean,
448 clean=listclean,
449 unknown=listunknown,
449 unknown=listunknown,
450 listsubrepos=True,
450 listsubrepos=True,
451 )
451 )
452 for k in (
452 for k in (
453 'modified',
453 'modified',
454 'added',
454 'added',
455 'removed',
455 'removed',
456 'deleted',
456 'deleted',
457 'unknown',
457 'unknown',
458 'ignored',
458 'ignored',
459 'clean',
459 'clean',
460 ):
460 ):
461 rfiles, sfiles = getattr(r, k), getattr(s, k)
461 rfiles, sfiles = getattr(r, k), getattr(s, k)
462 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
462 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
463
463
464 r.modified.sort()
464 r.modified.sort()
465 r.added.sort()
465 r.added.sort()
466 r.removed.sort()
466 r.removed.sort()
467 r.deleted.sort()
467 r.deleted.sort()
468 r.unknown.sort()
468 r.unknown.sort()
469 r.ignored.sort()
469 r.ignored.sort()
470 r.clean.sort()
470 r.clean.sort()
471
471
472 return r
472 return r
473
473
474
474
475 class changectx(basectx):
475 class changectx(basectx):
476 """A changecontext object makes access to data related to a particular
476 """A changecontext object makes access to data related to a particular
477 changeset convenient. It represents a read-only context already present in
477 changeset convenient. It represents a read-only context already present in
478 the repo."""
478 the repo."""
479
479
480 def __init__(self, repo, rev, node):
480 def __init__(self, repo, rev, node, maybe_filtered=True):
481 super(changectx, self).__init__(repo)
481 super(changectx, self).__init__(repo)
482 self._rev = rev
482 self._rev = rev
483 self._node = node
483 self._node = node
484 # When maybe_filtered is True, the revision might be affected by
485 # changelog filtering and operation through the filtered changelog must be used.
486 #
487 # When maybe_filtered is False, the revision has already been checked
488 # against filtering and is not filtered. Operation through the
489 # unfiltered changelog might be used in some case.
490 self._maybe_filtered = maybe_filtered
484
491
485 def __hash__(self):
492 def __hash__(self):
486 try:
493 try:
487 return hash(self._rev)
494 return hash(self._rev)
488 except AttributeError:
495 except AttributeError:
489 return id(self)
496 return id(self)
490
497
491 def __nonzero__(self):
498 def __nonzero__(self):
492 return self._rev != nullrev
499 return self._rev != nullrev
493
500
494 __bool__ = __nonzero__
501 __bool__ = __nonzero__
495
502
496 @propertycache
503 @propertycache
497 def _changeset(self):
504 def _changeset(self):
498 return self._repo.changelog.changelogrevision(self.rev())
505 if self._maybe_filtered:
506 repo = self._repo
507 else:
508 repo = self._repo.unfiltered()
509 return repo.changelog.changelogrevision(self.rev())
499
510
500 @propertycache
511 @propertycache
501 def _manifest(self):
512 def _manifest(self):
502 return self._manifestctx.read()
513 return self._manifestctx.read()
503
514
504 @property
515 @property
505 def _manifestctx(self):
516 def _manifestctx(self):
506 return self._repo.manifestlog[self._changeset.manifest]
517 return self._repo.manifestlog[self._changeset.manifest]
507
518
508 @propertycache
519 @propertycache
509 def _manifestdelta(self):
520 def _manifestdelta(self):
510 return self._manifestctx.readdelta()
521 return self._manifestctx.readdelta()
511
522
512 @propertycache
523 @propertycache
513 def _parents(self):
524 def _parents(self):
514 repo = self._repo
525 repo = self._repo
515 p1, p2 = repo.changelog.parentrevs(self._rev)
526 p1, p2 = repo.changelog.parentrevs(self._rev)
516 if p2 == nullrev:
527 if p2 == nullrev:
517 return [repo[p1]]
528 return [repo[p1]]
518 return [repo[p1], repo[p2]]
529 return [repo[p1], repo[p2]]
519
530
520 def changeset(self):
531 def changeset(self):
521 c = self._changeset
532 c = self._changeset
522 return (
533 return (
523 c.manifest,
534 c.manifest,
524 c.user,
535 c.user,
525 c.date,
536 c.date,
526 c.files,
537 c.files,
527 c.description,
538 c.description,
528 c.extra,
539 c.extra,
529 )
540 )
530
541
531 def manifestnode(self):
542 def manifestnode(self):
532 return self._changeset.manifest
543 return self._changeset.manifest
533
544
534 def user(self):
545 def user(self):
535 return self._changeset.user
546 return self._changeset.user
536
547
537 def date(self):
548 def date(self):
538 return self._changeset.date
549 return self._changeset.date
539
550
540 def files(self):
551 def files(self):
541 return self._changeset.files
552 return self._changeset.files
542
553
543 def filesmodified(self):
554 def filesmodified(self):
544 modified = set(self.files())
555 modified = set(self.files())
545 modified.difference_update(self.filesadded())
556 modified.difference_update(self.filesadded())
546 modified.difference_update(self.filesremoved())
557 modified.difference_update(self.filesremoved())
547 return sorted(modified)
558 return sorted(modified)
548
559
549 def filesadded(self):
560 def filesadded(self):
550 filesadded = self._changeset.filesadded
561 filesadded = self._changeset.filesadded
551 compute_on_none = True
562 compute_on_none = True
552 if self._repo.filecopiesmode == b'changeset-sidedata':
563 if self._repo.filecopiesmode == b'changeset-sidedata':
553 compute_on_none = False
564 compute_on_none = False
554 else:
565 else:
555 source = self._repo.ui.config(b'experimental', b'copies.read-from')
566 source = self._repo.ui.config(b'experimental', b'copies.read-from')
556 if source == b'changeset-only':
567 if source == b'changeset-only':
557 compute_on_none = False
568 compute_on_none = False
558 elif source != b'compatibility':
569 elif source != b'compatibility':
559 # filelog mode, ignore any changelog content
570 # filelog mode, ignore any changelog content
560 filesadded = None
571 filesadded = None
561 if filesadded is None:
572 if filesadded is None:
562 if compute_on_none:
573 if compute_on_none:
563 filesadded = copies.computechangesetfilesadded(self)
574 filesadded = copies.computechangesetfilesadded(self)
564 else:
575 else:
565 filesadded = []
576 filesadded = []
566 return filesadded
577 return filesadded
567
578
568 def filesremoved(self):
579 def filesremoved(self):
569 filesremoved = self._changeset.filesremoved
580 filesremoved = self._changeset.filesremoved
570 compute_on_none = True
581 compute_on_none = True
571 if self._repo.filecopiesmode == b'changeset-sidedata':
582 if self._repo.filecopiesmode == b'changeset-sidedata':
572 compute_on_none = False
583 compute_on_none = False
573 else:
584 else:
574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
585 source = self._repo.ui.config(b'experimental', b'copies.read-from')
575 if source == b'changeset-only':
586 if source == b'changeset-only':
576 compute_on_none = False
587 compute_on_none = False
577 elif source != b'compatibility':
588 elif source != b'compatibility':
578 # filelog mode, ignore any changelog content
589 # filelog mode, ignore any changelog content
579 filesremoved = None
590 filesremoved = None
580 if filesremoved is None:
591 if filesremoved is None:
581 if compute_on_none:
592 if compute_on_none:
582 filesremoved = copies.computechangesetfilesremoved(self)
593 filesremoved = copies.computechangesetfilesremoved(self)
583 else:
594 else:
584 filesremoved = []
595 filesremoved = []
585 return filesremoved
596 return filesremoved
586
597
587 @propertycache
598 @propertycache
588 def _copies(self):
599 def _copies(self):
589 p1copies = self._changeset.p1copies
600 p1copies = self._changeset.p1copies
590 p2copies = self._changeset.p2copies
601 p2copies = self._changeset.p2copies
591 compute_on_none = True
602 compute_on_none = True
592 if self._repo.filecopiesmode == b'changeset-sidedata':
603 if self._repo.filecopiesmode == b'changeset-sidedata':
593 compute_on_none = False
604 compute_on_none = False
594 else:
605 else:
595 source = self._repo.ui.config(b'experimental', b'copies.read-from')
606 source = self._repo.ui.config(b'experimental', b'copies.read-from')
596 # If config says to get copy metadata only from changeset, then
607 # If config says to get copy metadata only from changeset, then
597 # return that, defaulting to {} if there was no copy metadata. In
608 # return that, defaulting to {} if there was no copy metadata. In
598 # compatibility mode, we return copy data from the changeset if it
609 # compatibility mode, we return copy data from the changeset if it
599 # was recorded there, and otherwise we fall back to getting it from
610 # was recorded there, and otherwise we fall back to getting it from
600 # the filelogs (below).
611 # the filelogs (below).
601 #
612 #
602 # If we are in compatiblity mode and there is not data in the
613 # If we are in compatiblity mode and there is not data in the
603 # changeset), we get the copy metadata from the filelogs.
614 # changeset), we get the copy metadata from the filelogs.
604 #
615 #
605 # otherwise, when config said to read only from filelog, we get the
616 # otherwise, when config said to read only from filelog, we get the
606 # copy metadata from the filelogs.
617 # copy metadata from the filelogs.
607 if source == b'changeset-only':
618 if source == b'changeset-only':
608 compute_on_none = False
619 compute_on_none = False
609 elif source != b'compatibility':
620 elif source != b'compatibility':
610 # filelog mode, ignore any changelog content
621 # filelog mode, ignore any changelog content
611 p1copies = p2copies = None
622 p1copies = p2copies = None
612 if p1copies is None:
623 if p1copies is None:
613 if compute_on_none:
624 if compute_on_none:
614 p1copies, p2copies = super(changectx, self)._copies
625 p1copies, p2copies = super(changectx, self)._copies
615 else:
626 else:
616 if p1copies is None:
627 if p1copies is None:
617 p1copies = {}
628 p1copies = {}
618 if p2copies is None:
629 if p2copies is None:
619 p2copies = {}
630 p2copies = {}
620 return p1copies, p2copies
631 return p1copies, p2copies
621
632
622 def description(self):
633 def description(self):
623 return self._changeset.description
634 return self._changeset.description
624
635
625 def branch(self):
636 def branch(self):
626 return encoding.tolocal(self._changeset.extra.get(b"branch"))
637 return encoding.tolocal(self._changeset.extra.get(b"branch"))
627
638
628 def closesbranch(self):
639 def closesbranch(self):
629 return b'close' in self._changeset.extra
640 return b'close' in self._changeset.extra
630
641
631 def extra(self):
642 def extra(self):
632 """Return a dict of extra information."""
643 """Return a dict of extra information."""
633 return self._changeset.extra
644 return self._changeset.extra
634
645
635 def tags(self):
646 def tags(self):
636 """Return a list of byte tag names"""
647 """Return a list of byte tag names"""
637 return self._repo.nodetags(self._node)
648 return self._repo.nodetags(self._node)
638
649
639 def bookmarks(self):
650 def bookmarks(self):
640 """Return a list of byte bookmark names."""
651 """Return a list of byte bookmark names."""
641 return self._repo.nodebookmarks(self._node)
652 return self._repo.nodebookmarks(self._node)
642
653
643 def phase(self):
654 def phase(self):
644 return self._repo._phasecache.phase(self._repo, self._rev)
655 return self._repo._phasecache.phase(self._repo, self._rev)
645
656
646 def hidden(self):
657 def hidden(self):
647 return self._rev in repoview.filterrevs(self._repo, b'visible')
658 return self._rev in repoview.filterrevs(self._repo, b'visible')
648
659
649 def isinmemory(self):
660 def isinmemory(self):
650 return False
661 return False
651
662
652 def children(self):
663 def children(self):
653 """return list of changectx contexts for each child changeset.
664 """return list of changectx contexts for each child changeset.
654
665
655 This returns only the immediate child changesets. Use descendants() to
666 This returns only the immediate child changesets. Use descendants() to
656 recursively walk children.
667 recursively walk children.
657 """
668 """
658 c = self._repo.changelog.children(self._node)
669 c = self._repo.changelog.children(self._node)
659 return [self._repo[x] for x in c]
670 return [self._repo[x] for x in c]
660
671
661 def ancestors(self):
672 def ancestors(self):
662 for a in self._repo.changelog.ancestors([self._rev]):
673 for a in self._repo.changelog.ancestors([self._rev]):
663 yield self._repo[a]
674 yield self._repo[a]
664
675
665 def descendants(self):
676 def descendants(self):
666 """Recursively yield all children of the changeset.
677 """Recursively yield all children of the changeset.
667
678
668 For just the immediate children, use children()
679 For just the immediate children, use children()
669 """
680 """
670 for d in self._repo.changelog.descendants([self._rev]):
681 for d in self._repo.changelog.descendants([self._rev]):
671 yield self._repo[d]
682 yield self._repo[d]
672
683
673 def filectx(self, path, fileid=None, filelog=None):
684 def filectx(self, path, fileid=None, filelog=None):
674 """get a file context from this changeset"""
685 """get a file context from this changeset"""
675 if fileid is None:
686 if fileid is None:
676 fileid = self.filenode(path)
687 fileid = self.filenode(path)
677 return filectx(
688 return filectx(
678 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
689 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
679 )
690 )
680
691
681 def ancestor(self, c2, warn=False):
692 def ancestor(self, c2, warn=False):
682 """return the "best" ancestor context of self and c2
693 """return the "best" ancestor context of self and c2
683
694
684 If there are multiple candidates, it will show a message and check
695 If there are multiple candidates, it will show a message and check
685 merge.preferancestor configuration before falling back to the
696 merge.preferancestor configuration before falling back to the
686 revlog ancestor."""
697 revlog ancestor."""
687 # deal with workingctxs
698 # deal with workingctxs
688 n2 = c2._node
699 n2 = c2._node
689 if n2 is None:
700 if n2 is None:
690 n2 = c2._parents[0]._node
701 n2 = c2._parents[0]._node
691 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
702 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
692 if not cahs:
703 if not cahs:
693 anc = nullid
704 anc = nullid
694 elif len(cahs) == 1:
705 elif len(cahs) == 1:
695 anc = cahs[0]
706 anc = cahs[0]
696 else:
707 else:
697 # experimental config: merge.preferancestor
708 # experimental config: merge.preferancestor
698 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
709 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
699 try:
710 try:
700 ctx = scmutil.revsymbol(self._repo, r)
711 ctx = scmutil.revsymbol(self._repo, r)
701 except error.RepoLookupError:
712 except error.RepoLookupError:
702 continue
713 continue
703 anc = ctx.node()
714 anc = ctx.node()
704 if anc in cahs:
715 if anc in cahs:
705 break
716 break
706 else:
717 else:
707 anc = self._repo.changelog.ancestor(self._node, n2)
718 anc = self._repo.changelog.ancestor(self._node, n2)
708 if warn:
719 if warn:
709 self._repo.ui.status(
720 self._repo.ui.status(
710 (
721 (
711 _(b"note: using %s as ancestor of %s and %s\n")
722 _(b"note: using %s as ancestor of %s and %s\n")
712 % (short(anc), short(self._node), short(n2))
723 % (short(anc), short(self._node), short(n2))
713 )
724 )
714 + b''.join(
725 + b''.join(
715 _(
726 _(
716 b" alternatively, use --config "
727 b" alternatively, use --config "
717 b"merge.preferancestor=%s\n"
728 b"merge.preferancestor=%s\n"
718 )
729 )
719 % short(n)
730 % short(n)
720 for n in sorted(cahs)
731 for n in sorted(cahs)
721 if n != anc
732 if n != anc
722 )
733 )
723 )
734 )
724 return self._repo[anc]
735 return self._repo[anc]
725
736
726 def isancestorof(self, other):
737 def isancestorof(self, other):
727 """True if this changeset is an ancestor of other"""
738 """True if this changeset is an ancestor of other"""
728 return self._repo.changelog.isancestorrev(self._rev, other._rev)
739 return self._repo.changelog.isancestorrev(self._rev, other._rev)
729
740
730 def walk(self, match):
741 def walk(self, match):
731 '''Generates matching file names.'''
742 '''Generates matching file names.'''
732
743
733 # Wrap match.bad method to have message with nodeid
744 # Wrap match.bad method to have message with nodeid
734 def bad(fn, msg):
745 def bad(fn, msg):
735 # The manifest doesn't know about subrepos, so don't complain about
746 # The manifest doesn't know about subrepos, so don't complain about
736 # paths into valid subrepos.
747 # paths into valid subrepos.
737 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
748 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
738 return
749 return
739 match.bad(fn, _(b'no such file in rev %s') % self)
750 match.bad(fn, _(b'no such file in rev %s') % self)
740
751
741 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
752 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
742 return self._manifest.walk(m)
753 return self._manifest.walk(m)
743
754
744 def matches(self, match):
755 def matches(self, match):
745 return self.walk(match)
756 return self.walk(match)
746
757
747
758
748 class basefilectx(object):
759 class basefilectx(object):
749 """A filecontext object represents the common logic for its children:
760 """A filecontext object represents the common logic for its children:
750 filectx: read-only access to a filerevision that is already present
761 filectx: read-only access to a filerevision that is already present
751 in the repo,
762 in the repo,
752 workingfilectx: a filecontext that represents files from the working
763 workingfilectx: a filecontext that represents files from the working
753 directory,
764 directory,
754 memfilectx: a filecontext that represents files in-memory,
765 memfilectx: a filecontext that represents files in-memory,
755 """
766 """
756
767
757 @propertycache
768 @propertycache
758 def _filelog(self):
769 def _filelog(self):
759 return self._repo.file(self._path)
770 return self._repo.file(self._path)
760
771
761 @propertycache
772 @propertycache
762 def _changeid(self):
773 def _changeid(self):
763 if '_changectx' in self.__dict__:
774 if '_changectx' in self.__dict__:
764 return self._changectx.rev()
775 return self._changectx.rev()
765 elif '_descendantrev' in self.__dict__:
776 elif '_descendantrev' in self.__dict__:
766 # this file context was created from a revision with a known
777 # this file context was created from a revision with a known
767 # descendant, we can (lazily) correct for linkrev aliases
778 # descendant, we can (lazily) correct for linkrev aliases
768 return self._adjustlinkrev(self._descendantrev)
779 return self._adjustlinkrev(self._descendantrev)
769 else:
780 else:
770 return self._filelog.linkrev(self._filerev)
781 return self._filelog.linkrev(self._filerev)
771
782
772 @propertycache
783 @propertycache
773 def _filenode(self):
784 def _filenode(self):
774 if '_fileid' in self.__dict__:
785 if '_fileid' in self.__dict__:
775 return self._filelog.lookup(self._fileid)
786 return self._filelog.lookup(self._fileid)
776 else:
787 else:
777 return self._changectx.filenode(self._path)
788 return self._changectx.filenode(self._path)
778
789
779 @propertycache
790 @propertycache
780 def _filerev(self):
791 def _filerev(self):
781 return self._filelog.rev(self._filenode)
792 return self._filelog.rev(self._filenode)
782
793
783 @propertycache
794 @propertycache
784 def _repopath(self):
795 def _repopath(self):
785 return self._path
796 return self._path
786
797
787 def __nonzero__(self):
798 def __nonzero__(self):
788 try:
799 try:
789 self._filenode
800 self._filenode
790 return True
801 return True
791 except error.LookupError:
802 except error.LookupError:
792 # file is missing
803 # file is missing
793 return False
804 return False
794
805
795 __bool__ = __nonzero__
806 __bool__ = __nonzero__
796
807
797 def __bytes__(self):
808 def __bytes__(self):
798 try:
809 try:
799 return b"%s@%s" % (self.path(), self._changectx)
810 return b"%s@%s" % (self.path(), self._changectx)
800 except error.LookupError:
811 except error.LookupError:
801 return b"%s@???" % self.path()
812 return b"%s@???" % self.path()
802
813
803 __str__ = encoding.strmethod(__bytes__)
814 __str__ = encoding.strmethod(__bytes__)
804
815
805 def __repr__(self):
816 def __repr__(self):
806 return "<%s %s>" % (type(self).__name__, str(self))
817 return "<%s %s>" % (type(self).__name__, str(self))
807
818
808 def __hash__(self):
819 def __hash__(self):
809 try:
820 try:
810 return hash((self._path, self._filenode))
821 return hash((self._path, self._filenode))
811 except AttributeError:
822 except AttributeError:
812 return id(self)
823 return id(self)
813
824
814 def __eq__(self, other):
825 def __eq__(self, other):
815 try:
826 try:
816 return (
827 return (
817 type(self) == type(other)
828 type(self) == type(other)
818 and self._path == other._path
829 and self._path == other._path
819 and self._filenode == other._filenode
830 and self._filenode == other._filenode
820 )
831 )
821 except AttributeError:
832 except AttributeError:
822 return False
833 return False
823
834
824 def __ne__(self, other):
835 def __ne__(self, other):
825 return not (self == other)
836 return not (self == other)
826
837
827 def filerev(self):
838 def filerev(self):
828 return self._filerev
839 return self._filerev
829
840
830 def filenode(self):
841 def filenode(self):
831 return self._filenode
842 return self._filenode
832
843
833 @propertycache
844 @propertycache
834 def _flags(self):
845 def _flags(self):
835 return self._changectx.flags(self._path)
846 return self._changectx.flags(self._path)
836
847
837 def flags(self):
848 def flags(self):
838 return self._flags
849 return self._flags
839
850
840 def filelog(self):
851 def filelog(self):
841 return self._filelog
852 return self._filelog
842
853
843 def rev(self):
854 def rev(self):
844 return self._changeid
855 return self._changeid
845
856
846 def linkrev(self):
857 def linkrev(self):
847 return self._filelog.linkrev(self._filerev)
858 return self._filelog.linkrev(self._filerev)
848
859
849 def node(self):
860 def node(self):
850 return self._changectx.node()
861 return self._changectx.node()
851
862
852 def hex(self):
863 def hex(self):
853 return self._changectx.hex()
864 return self._changectx.hex()
854
865
855 def user(self):
866 def user(self):
856 return self._changectx.user()
867 return self._changectx.user()
857
868
858 def date(self):
869 def date(self):
859 return self._changectx.date()
870 return self._changectx.date()
860
871
861 def files(self):
872 def files(self):
862 return self._changectx.files()
873 return self._changectx.files()
863
874
864 def description(self):
875 def description(self):
865 return self._changectx.description()
876 return self._changectx.description()
866
877
867 def branch(self):
878 def branch(self):
868 return self._changectx.branch()
879 return self._changectx.branch()
869
880
870 def extra(self):
881 def extra(self):
871 return self._changectx.extra()
882 return self._changectx.extra()
872
883
873 def phase(self):
884 def phase(self):
874 return self._changectx.phase()
885 return self._changectx.phase()
875
886
876 def phasestr(self):
887 def phasestr(self):
877 return self._changectx.phasestr()
888 return self._changectx.phasestr()
878
889
879 def obsolete(self):
890 def obsolete(self):
880 return self._changectx.obsolete()
891 return self._changectx.obsolete()
881
892
882 def instabilities(self):
893 def instabilities(self):
883 return self._changectx.instabilities()
894 return self._changectx.instabilities()
884
895
885 def manifest(self):
896 def manifest(self):
886 return self._changectx.manifest()
897 return self._changectx.manifest()
887
898
888 def changectx(self):
899 def changectx(self):
889 return self._changectx
900 return self._changectx
890
901
891 def renamed(self):
902 def renamed(self):
892 return self._copied
903 return self._copied
893
904
894 def copysource(self):
905 def copysource(self):
895 return self._copied and self._copied[0]
906 return self._copied and self._copied[0]
896
907
897 def repo(self):
908 def repo(self):
898 return self._repo
909 return self._repo
899
910
900 def size(self):
911 def size(self):
901 return len(self.data())
912 return len(self.data())
902
913
903 def path(self):
914 def path(self):
904 return self._path
915 return self._path
905
916
906 def isbinary(self):
917 def isbinary(self):
907 try:
918 try:
908 return stringutil.binary(self.data())
919 return stringutil.binary(self.data())
909 except IOError:
920 except IOError:
910 return False
921 return False
911
922
912 def isexec(self):
923 def isexec(self):
913 return b'x' in self.flags()
924 return b'x' in self.flags()
914
925
915 def islink(self):
926 def islink(self):
916 return b'l' in self.flags()
927 return b'l' in self.flags()
917
928
918 def isabsent(self):
929 def isabsent(self):
919 """whether this filectx represents a file not in self._changectx
930 """whether this filectx represents a file not in self._changectx
920
931
921 This is mainly for merge code to detect change/delete conflicts. This is
932 This is mainly for merge code to detect change/delete conflicts. This is
922 expected to be True for all subclasses of basectx."""
933 expected to be True for all subclasses of basectx."""
923 return False
934 return False
924
935
925 _customcmp = False
936 _customcmp = False
926
937
927 def cmp(self, fctx):
938 def cmp(self, fctx):
928 """compare with other file context
939 """compare with other file context
929
940
930 returns True if different than fctx.
941 returns True if different than fctx.
931 """
942 """
932 if fctx._customcmp:
943 if fctx._customcmp:
933 return fctx.cmp(self)
944 return fctx.cmp(self)
934
945
935 if self._filenode is None:
946 if self._filenode is None:
936 raise error.ProgrammingError(
947 raise error.ProgrammingError(
937 b'filectx.cmp() must be reimplemented if not backed by revlog'
948 b'filectx.cmp() must be reimplemented if not backed by revlog'
938 )
949 )
939
950
940 if fctx._filenode is None:
951 if fctx._filenode is None:
941 if self._repo._encodefilterpats:
952 if self._repo._encodefilterpats:
942 # can't rely on size() because wdir content may be decoded
953 # can't rely on size() because wdir content may be decoded
943 return self._filelog.cmp(self._filenode, fctx.data())
954 return self._filelog.cmp(self._filenode, fctx.data())
944 if self.size() - 4 == fctx.size():
955 if self.size() - 4 == fctx.size():
945 # size() can match:
956 # size() can match:
946 # if file data starts with '\1\n', empty metadata block is
957 # if file data starts with '\1\n', empty metadata block is
947 # prepended, which adds 4 bytes to filelog.size().
958 # prepended, which adds 4 bytes to filelog.size().
948 return self._filelog.cmp(self._filenode, fctx.data())
959 return self._filelog.cmp(self._filenode, fctx.data())
949 if self.size() == fctx.size():
960 if self.size() == fctx.size():
950 # size() matches: need to compare content
961 # size() matches: need to compare content
951 return self._filelog.cmp(self._filenode, fctx.data())
962 return self._filelog.cmp(self._filenode, fctx.data())
952
963
953 # size() differs
964 # size() differs
954 return True
965 return True
955
966
956 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
967 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
957 """return the first ancestor of <srcrev> introducing <fnode>
968 """return the first ancestor of <srcrev> introducing <fnode>
958
969
959 If the linkrev of the file revision does not point to an ancestor of
970 If the linkrev of the file revision does not point to an ancestor of
960 srcrev, we'll walk down the ancestors until we find one introducing
971 srcrev, we'll walk down the ancestors until we find one introducing
961 this file revision.
972 this file revision.
962
973
963 :srcrev: the changeset revision we search ancestors from
974 :srcrev: the changeset revision we search ancestors from
964 :inclusive: if true, the src revision will also be checked
975 :inclusive: if true, the src revision will also be checked
965 :stoprev: an optional revision to stop the walk at. If no introduction
976 :stoprev: an optional revision to stop the walk at. If no introduction
966 of this file content could be found before this floor
977 of this file content could be found before this floor
967 revision, the function will returns "None" and stops its
978 revision, the function will returns "None" and stops its
968 iteration.
979 iteration.
969 """
980 """
970 repo = self._repo
981 repo = self._repo
971 cl = repo.unfiltered().changelog
982 cl = repo.unfiltered().changelog
972 mfl = repo.manifestlog
983 mfl = repo.manifestlog
973 # fetch the linkrev
984 # fetch the linkrev
974 lkr = self.linkrev()
985 lkr = self.linkrev()
975 if srcrev == lkr:
986 if srcrev == lkr:
976 return lkr
987 return lkr
977 # hack to reuse ancestor computation when searching for renames
988 # hack to reuse ancestor computation when searching for renames
978 memberanc = getattr(self, '_ancestrycontext', None)
989 memberanc = getattr(self, '_ancestrycontext', None)
979 iteranc = None
990 iteranc = None
980 if srcrev is None:
991 if srcrev is None:
981 # wctx case, used by workingfilectx during mergecopy
992 # wctx case, used by workingfilectx during mergecopy
982 revs = [p.rev() for p in self._repo[None].parents()]
993 revs = [p.rev() for p in self._repo[None].parents()]
983 inclusive = True # we skipped the real (revless) source
994 inclusive = True # we skipped the real (revless) source
984 else:
995 else:
985 revs = [srcrev]
996 revs = [srcrev]
986 if memberanc is None:
997 if memberanc is None:
987 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
998 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
988 # check if this linkrev is an ancestor of srcrev
999 # check if this linkrev is an ancestor of srcrev
989 if lkr not in memberanc:
1000 if lkr not in memberanc:
990 if iteranc is None:
1001 if iteranc is None:
991 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1002 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
992 fnode = self._filenode
1003 fnode = self._filenode
993 path = self._path
1004 path = self._path
994 for a in iteranc:
1005 for a in iteranc:
995 if stoprev is not None and a < stoprev:
1006 if stoprev is not None and a < stoprev:
996 return None
1007 return None
997 ac = cl.read(a) # get changeset data (we avoid object creation)
1008 ac = cl.read(a) # get changeset data (we avoid object creation)
998 if path in ac[3]: # checking the 'files' field.
1009 if path in ac[3]: # checking the 'files' field.
999 # The file has been touched, check if the content is
1010 # The file has been touched, check if the content is
1000 # similar to the one we search for.
1011 # similar to the one we search for.
1001 if fnode == mfl[ac[0]].readfast().get(path):
1012 if fnode == mfl[ac[0]].readfast().get(path):
1002 return a
1013 return a
1003 # In theory, we should never get out of that loop without a result.
1014 # In theory, we should never get out of that loop without a result.
1004 # But if manifest uses a buggy file revision (not children of the
1015 # But if manifest uses a buggy file revision (not children of the
1005 # one it replaces) we could. Such a buggy situation will likely
1016 # one it replaces) we could. Such a buggy situation will likely
1006 # result is crash somewhere else at to some point.
1017 # result is crash somewhere else at to some point.
1007 return lkr
1018 return lkr
1008
1019
1009 def isintroducedafter(self, changelogrev):
1020 def isintroducedafter(self, changelogrev):
1010 """True if a filectx has been introduced after a given floor revision
1021 """True if a filectx has been introduced after a given floor revision
1011 """
1022 """
1012 if self.linkrev() >= changelogrev:
1023 if self.linkrev() >= changelogrev:
1013 return True
1024 return True
1014 introrev = self._introrev(stoprev=changelogrev)
1025 introrev = self._introrev(stoprev=changelogrev)
1015 if introrev is None:
1026 if introrev is None:
1016 return False
1027 return False
1017 return introrev >= changelogrev
1028 return introrev >= changelogrev
1018
1029
1019 def introrev(self):
1030 def introrev(self):
1020 """return the rev of the changeset which introduced this file revision
1031 """return the rev of the changeset which introduced this file revision
1021
1032
1022 This method is different from linkrev because it take into account the
1033 This method is different from linkrev because it take into account the
1023 changeset the filectx was created from. It ensures the returned
1034 changeset the filectx was created from. It ensures the returned
1024 revision is one of its ancestors. This prevents bugs from
1035 revision is one of its ancestors. This prevents bugs from
1025 'linkrev-shadowing' when a file revision is used by multiple
1036 'linkrev-shadowing' when a file revision is used by multiple
1026 changesets.
1037 changesets.
1027 """
1038 """
1028 return self._introrev()
1039 return self._introrev()
1029
1040
1030 def _introrev(self, stoprev=None):
1041 def _introrev(self, stoprev=None):
1031 """
1042 """
1032 Same as `introrev` but, with an extra argument to limit changelog
1043 Same as `introrev` but, with an extra argument to limit changelog
1033 iteration range in some internal usecase.
1044 iteration range in some internal usecase.
1034
1045
1035 If `stoprev` is set, the `introrev` will not be searched past that
1046 If `stoprev` is set, the `introrev` will not be searched past that
1036 `stoprev` revision and "None" might be returned. This is useful to
1047 `stoprev` revision and "None" might be returned. This is useful to
1037 limit the iteration range.
1048 limit the iteration range.
1038 """
1049 """
1039 toprev = None
1050 toprev = None
1040 attrs = vars(self)
1051 attrs = vars(self)
1041 if '_changeid' in attrs:
1052 if '_changeid' in attrs:
1042 # We have a cached value already
1053 # We have a cached value already
1043 toprev = self._changeid
1054 toprev = self._changeid
1044 elif '_changectx' in attrs:
1055 elif '_changectx' in attrs:
1045 # We know which changelog entry we are coming from
1056 # We know which changelog entry we are coming from
1046 toprev = self._changectx.rev()
1057 toprev = self._changectx.rev()
1047
1058
1048 if toprev is not None:
1059 if toprev is not None:
1049 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1060 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1050 elif '_descendantrev' in attrs:
1061 elif '_descendantrev' in attrs:
1051 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1062 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1052 # be nice and cache the result of the computation
1063 # be nice and cache the result of the computation
1053 if introrev is not None:
1064 if introrev is not None:
1054 self._changeid = introrev
1065 self._changeid = introrev
1055 return introrev
1066 return introrev
1056 else:
1067 else:
1057 return self.linkrev()
1068 return self.linkrev()
1058
1069
1059 def introfilectx(self):
1070 def introfilectx(self):
1060 """Return filectx having identical contents, but pointing to the
1071 """Return filectx having identical contents, but pointing to the
1061 changeset revision where this filectx was introduced"""
1072 changeset revision where this filectx was introduced"""
1062 introrev = self.introrev()
1073 introrev = self.introrev()
1063 if self.rev() == introrev:
1074 if self.rev() == introrev:
1064 return self
1075 return self
1065 return self.filectx(self.filenode(), changeid=introrev)
1076 return self.filectx(self.filenode(), changeid=introrev)
1066
1077
1067 def _parentfilectx(self, path, fileid, filelog):
1078 def _parentfilectx(self, path, fileid, filelog):
1068 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1079 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1069 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1080 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1070 if '_changeid' in vars(self) or '_changectx' in vars(self):
1081 if '_changeid' in vars(self) or '_changectx' in vars(self):
1071 # If self is associated with a changeset (probably explicitly
1082 # If self is associated with a changeset (probably explicitly
1072 # fed), ensure the created filectx is associated with a
1083 # fed), ensure the created filectx is associated with a
1073 # changeset that is an ancestor of self.changectx.
1084 # changeset that is an ancestor of self.changectx.
1074 # This lets us later use _adjustlinkrev to get a correct link.
1085 # This lets us later use _adjustlinkrev to get a correct link.
1075 fctx._descendantrev = self.rev()
1086 fctx._descendantrev = self.rev()
1076 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1087 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1077 elif '_descendantrev' in vars(self):
1088 elif '_descendantrev' in vars(self):
1078 # Otherwise propagate _descendantrev if we have one associated.
1089 # Otherwise propagate _descendantrev if we have one associated.
1079 fctx._descendantrev = self._descendantrev
1090 fctx._descendantrev = self._descendantrev
1080 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1091 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1081 return fctx
1092 return fctx
1082
1093
1083 def parents(self):
1094 def parents(self):
1084 _path = self._path
1095 _path = self._path
1085 fl = self._filelog
1096 fl = self._filelog
1086 parents = self._filelog.parents(self._filenode)
1097 parents = self._filelog.parents(self._filenode)
1087 pl = [(_path, node, fl) for node in parents if node != nullid]
1098 pl = [(_path, node, fl) for node in parents if node != nullid]
1088
1099
1089 r = fl.renamed(self._filenode)
1100 r = fl.renamed(self._filenode)
1090 if r:
1101 if r:
1091 # - In the simple rename case, both parent are nullid, pl is empty.
1102 # - In the simple rename case, both parent are nullid, pl is empty.
1092 # - In case of merge, only one of the parent is null id and should
1103 # - In case of merge, only one of the parent is null id and should
1093 # be replaced with the rename information. This parent is -always-
1104 # be replaced with the rename information. This parent is -always-
1094 # the first one.
1105 # the first one.
1095 #
1106 #
1096 # As null id have always been filtered out in the previous list
1107 # As null id have always been filtered out in the previous list
1097 # comprehension, inserting to 0 will always result in "replacing
1108 # comprehension, inserting to 0 will always result in "replacing
1098 # first nullid parent with rename information.
1109 # first nullid parent with rename information.
1099 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1110 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1100
1111
1101 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1112 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1102
1113
1103 def p1(self):
1114 def p1(self):
1104 return self.parents()[0]
1115 return self.parents()[0]
1105
1116
1106 def p2(self):
1117 def p2(self):
1107 p = self.parents()
1118 p = self.parents()
1108 if len(p) == 2:
1119 if len(p) == 2:
1109 return p[1]
1120 return p[1]
1110 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1121 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1111
1122
1112 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1123 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1113 """Returns a list of annotateline objects for each line in the file
1124 """Returns a list of annotateline objects for each line in the file
1114
1125
1115 - line.fctx is the filectx of the node where that line was last changed
1126 - line.fctx is the filectx of the node where that line was last changed
1116 - line.lineno is the line number at the first appearance in the managed
1127 - line.lineno is the line number at the first appearance in the managed
1117 file
1128 file
1118 - line.text is the data on that line (including newline character)
1129 - line.text is the data on that line (including newline character)
1119 """
1130 """
1120 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1131 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1121
1132
1122 def parents(f):
1133 def parents(f):
1123 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1134 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1124 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1135 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1125 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1136 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1126 # isn't an ancestor of the srcrev.
1137 # isn't an ancestor of the srcrev.
1127 f._changeid
1138 f._changeid
1128 pl = f.parents()
1139 pl = f.parents()
1129
1140
1130 # Don't return renamed parents if we aren't following.
1141 # Don't return renamed parents if we aren't following.
1131 if not follow:
1142 if not follow:
1132 pl = [p for p in pl if p.path() == f.path()]
1143 pl = [p for p in pl if p.path() == f.path()]
1133
1144
1134 # renamed filectx won't have a filelog yet, so set it
1145 # renamed filectx won't have a filelog yet, so set it
1135 # from the cache to save time
1146 # from the cache to save time
1136 for p in pl:
1147 for p in pl:
1137 if not '_filelog' in p.__dict__:
1148 if not '_filelog' in p.__dict__:
1138 p._filelog = getlog(p.path())
1149 p._filelog = getlog(p.path())
1139
1150
1140 return pl
1151 return pl
1141
1152
1142 # use linkrev to find the first changeset where self appeared
1153 # use linkrev to find the first changeset where self appeared
1143 base = self.introfilectx()
1154 base = self.introfilectx()
1144 if getattr(base, '_ancestrycontext', None) is None:
1155 if getattr(base, '_ancestrycontext', None) is None:
1145 cl = self._repo.changelog
1156 cl = self._repo.changelog
1146 if base.rev() is None:
1157 if base.rev() is None:
1147 # wctx is not inclusive, but works because _ancestrycontext
1158 # wctx is not inclusive, but works because _ancestrycontext
1148 # is used to test filelog revisions
1159 # is used to test filelog revisions
1149 ac = cl.ancestors(
1160 ac = cl.ancestors(
1150 [p.rev() for p in base.parents()], inclusive=True
1161 [p.rev() for p in base.parents()], inclusive=True
1151 )
1162 )
1152 else:
1163 else:
1153 ac = cl.ancestors([base.rev()], inclusive=True)
1164 ac = cl.ancestors([base.rev()], inclusive=True)
1154 base._ancestrycontext = ac
1165 base._ancestrycontext = ac
1155
1166
1156 return dagop.annotate(
1167 return dagop.annotate(
1157 base, parents, skiprevs=skiprevs, diffopts=diffopts
1168 base, parents, skiprevs=skiprevs, diffopts=diffopts
1158 )
1169 )
1159
1170
1160 def ancestors(self, followfirst=False):
1171 def ancestors(self, followfirst=False):
1161 visit = {}
1172 visit = {}
1162 c = self
1173 c = self
1163 if followfirst:
1174 if followfirst:
1164 cut = 1
1175 cut = 1
1165 else:
1176 else:
1166 cut = None
1177 cut = None
1167
1178
1168 while True:
1179 while True:
1169 for parent in c.parents()[:cut]:
1180 for parent in c.parents()[:cut]:
1170 visit[(parent.linkrev(), parent.filenode())] = parent
1181 visit[(parent.linkrev(), parent.filenode())] = parent
1171 if not visit:
1182 if not visit:
1172 break
1183 break
1173 c = visit.pop(max(visit))
1184 c = visit.pop(max(visit))
1174 yield c
1185 yield c
1175
1186
1176 def decodeddata(self):
1187 def decodeddata(self):
1177 """Returns `data()` after running repository decoding filters.
1188 """Returns `data()` after running repository decoding filters.
1178
1189
1179 This is often equivalent to how the data would be expressed on disk.
1190 This is often equivalent to how the data would be expressed on disk.
1180 """
1191 """
1181 return self._repo.wwritedata(self.path(), self.data())
1192 return self._repo.wwritedata(self.path(), self.data())
1182
1193
1183
1194
1184 class filectx(basefilectx):
1195 class filectx(basefilectx):
1185 """A filecontext object makes access to data related to a particular
1196 """A filecontext object makes access to data related to a particular
1186 filerevision convenient."""
1197 filerevision convenient."""
1187
1198
1188 def __init__(
1199 def __init__(
1189 self,
1200 self,
1190 repo,
1201 repo,
1191 path,
1202 path,
1192 changeid=None,
1203 changeid=None,
1193 fileid=None,
1204 fileid=None,
1194 filelog=None,
1205 filelog=None,
1195 changectx=None,
1206 changectx=None,
1196 ):
1207 ):
1197 """changeid must be a revision number, if specified.
1208 """changeid must be a revision number, if specified.
1198 fileid can be a file revision or node."""
1209 fileid can be a file revision or node."""
1199 self._repo = repo
1210 self._repo = repo
1200 self._path = path
1211 self._path = path
1201
1212
1202 assert (
1213 assert (
1203 changeid is not None or fileid is not None or changectx is not None
1214 changeid is not None or fileid is not None or changectx is not None
1204 ), (
1215 ), (
1205 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1216 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1206 % (changeid, fileid, changectx,)
1217 % (changeid, fileid, changectx,)
1207 )
1218 )
1208
1219
1209 if filelog is not None:
1220 if filelog is not None:
1210 self._filelog = filelog
1221 self._filelog = filelog
1211
1222
1212 if changeid is not None:
1223 if changeid is not None:
1213 self._changeid = changeid
1224 self._changeid = changeid
1214 if changectx is not None:
1225 if changectx is not None:
1215 self._changectx = changectx
1226 self._changectx = changectx
1216 if fileid is not None:
1227 if fileid is not None:
1217 self._fileid = fileid
1228 self._fileid = fileid
1218
1229
1219 @propertycache
1230 @propertycache
1220 def _changectx(self):
1231 def _changectx(self):
1221 try:
1232 try:
1222 return self._repo[self._changeid]
1233 return self._repo[self._changeid]
1223 except error.FilteredRepoLookupError:
1234 except error.FilteredRepoLookupError:
1224 # Linkrev may point to any revision in the repository. When the
1235 # Linkrev may point to any revision in the repository. When the
1225 # repository is filtered this may lead to `filectx` trying to build
1236 # repository is filtered this may lead to `filectx` trying to build
1226 # `changectx` for filtered revision. In such case we fallback to
1237 # `changectx` for filtered revision. In such case we fallback to
1227 # creating `changectx` on the unfiltered version of the reposition.
1238 # creating `changectx` on the unfiltered version of the reposition.
1228 # This fallback should not be an issue because `changectx` from
1239 # This fallback should not be an issue because `changectx` from
1229 # `filectx` are not used in complex operations that care about
1240 # `filectx` are not used in complex operations that care about
1230 # filtering.
1241 # filtering.
1231 #
1242 #
1232 # This fallback is a cheap and dirty fix that prevent several
1243 # This fallback is a cheap and dirty fix that prevent several
1233 # crashes. It does not ensure the behavior is correct. However the
1244 # crashes. It does not ensure the behavior is correct. However the
1234 # behavior was not correct before filtering either and "incorrect
1245 # behavior was not correct before filtering either and "incorrect
1235 # behavior" is seen as better as "crash"
1246 # behavior" is seen as better as "crash"
1236 #
1247 #
1237 # Linkrevs have several serious troubles with filtering that are
1248 # Linkrevs have several serious troubles with filtering that are
1238 # complicated to solve. Proper handling of the issue here should be
1249 # complicated to solve. Proper handling of the issue here should be
1239 # considered when solving linkrev issue are on the table.
1250 # considered when solving linkrev issue are on the table.
1240 return self._repo.unfiltered()[self._changeid]
1251 return self._repo.unfiltered()[self._changeid]
1241
1252
1242 def filectx(self, fileid, changeid=None):
1253 def filectx(self, fileid, changeid=None):
1243 '''opens an arbitrary revision of the file without
1254 '''opens an arbitrary revision of the file without
1244 opening a new filelog'''
1255 opening a new filelog'''
1245 return filectx(
1256 return filectx(
1246 self._repo,
1257 self._repo,
1247 self._path,
1258 self._path,
1248 fileid=fileid,
1259 fileid=fileid,
1249 filelog=self._filelog,
1260 filelog=self._filelog,
1250 changeid=changeid,
1261 changeid=changeid,
1251 )
1262 )
1252
1263
1253 def rawdata(self):
1264 def rawdata(self):
1254 return self._filelog.rawdata(self._filenode)
1265 return self._filelog.rawdata(self._filenode)
1255
1266
1256 def rawflags(self):
1267 def rawflags(self):
1257 """low-level revlog flags"""
1268 """low-level revlog flags"""
1258 return self._filelog.flags(self._filerev)
1269 return self._filelog.flags(self._filerev)
1259
1270
1260 def data(self):
1271 def data(self):
1261 try:
1272 try:
1262 return self._filelog.read(self._filenode)
1273 return self._filelog.read(self._filenode)
1263 except error.CensoredNodeError:
1274 except error.CensoredNodeError:
1264 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1275 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1265 return b""
1276 return b""
1266 raise error.Abort(
1277 raise error.Abort(
1267 _(b"censored node: %s") % short(self._filenode),
1278 _(b"censored node: %s") % short(self._filenode),
1268 hint=_(b"set censor.policy to ignore errors"),
1279 hint=_(b"set censor.policy to ignore errors"),
1269 )
1280 )
1270
1281
1271 def size(self):
1282 def size(self):
1272 return self._filelog.size(self._filerev)
1283 return self._filelog.size(self._filerev)
1273
1284
1274 @propertycache
1285 @propertycache
1275 def _copied(self):
1286 def _copied(self):
1276 """check if file was actually renamed in this changeset revision
1287 """check if file was actually renamed in this changeset revision
1277
1288
1278 If rename logged in file revision, we report copy for changeset only
1289 If rename logged in file revision, we report copy for changeset only
1279 if file revisions linkrev points back to the changeset in question
1290 if file revisions linkrev points back to the changeset in question
1280 or both changeset parents contain different file revisions.
1291 or both changeset parents contain different file revisions.
1281 """
1292 """
1282
1293
1283 renamed = self._filelog.renamed(self._filenode)
1294 renamed = self._filelog.renamed(self._filenode)
1284 if not renamed:
1295 if not renamed:
1285 return None
1296 return None
1286
1297
1287 if self.rev() == self.linkrev():
1298 if self.rev() == self.linkrev():
1288 return renamed
1299 return renamed
1289
1300
1290 name = self.path()
1301 name = self.path()
1291 fnode = self._filenode
1302 fnode = self._filenode
1292 for p in self._changectx.parents():
1303 for p in self._changectx.parents():
1293 try:
1304 try:
1294 if fnode == p.filenode(name):
1305 if fnode == p.filenode(name):
1295 return None
1306 return None
1296 except error.LookupError:
1307 except error.LookupError:
1297 pass
1308 pass
1298 return renamed
1309 return renamed
1299
1310
1300 def children(self):
1311 def children(self):
1301 # hard for renames
1312 # hard for renames
1302 c = self._filelog.children(self._filenode)
1313 c = self._filelog.children(self._filenode)
1303 return [
1314 return [
1304 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1315 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1305 for x in c
1316 for x in c
1306 ]
1317 ]
1307
1318
1308
1319
1309 class committablectx(basectx):
1320 class committablectx(basectx):
1310 """A committablectx object provides common functionality for a context that
1321 """A committablectx object provides common functionality for a context that
1311 wants the ability to commit, e.g. workingctx or memctx."""
1322 wants the ability to commit, e.g. workingctx or memctx."""
1312
1323
1313 def __init__(
1324 def __init__(
1314 self,
1325 self,
1315 repo,
1326 repo,
1316 text=b"",
1327 text=b"",
1317 user=None,
1328 user=None,
1318 date=None,
1329 date=None,
1319 extra=None,
1330 extra=None,
1320 changes=None,
1331 changes=None,
1321 branch=None,
1332 branch=None,
1322 ):
1333 ):
1323 super(committablectx, self).__init__(repo)
1334 super(committablectx, self).__init__(repo)
1324 self._rev = None
1335 self._rev = None
1325 self._node = None
1336 self._node = None
1326 self._text = text
1337 self._text = text
1327 if date:
1338 if date:
1328 self._date = dateutil.parsedate(date)
1339 self._date = dateutil.parsedate(date)
1329 if user:
1340 if user:
1330 self._user = user
1341 self._user = user
1331 if changes:
1342 if changes:
1332 self._status = changes
1343 self._status = changes
1333
1344
1334 self._extra = {}
1345 self._extra = {}
1335 if extra:
1346 if extra:
1336 self._extra = extra.copy()
1347 self._extra = extra.copy()
1337 if branch is not None:
1348 if branch is not None:
1338 self._extra[b'branch'] = encoding.fromlocal(branch)
1349 self._extra[b'branch'] = encoding.fromlocal(branch)
1339 if not self._extra.get(b'branch'):
1350 if not self._extra.get(b'branch'):
1340 self._extra[b'branch'] = b'default'
1351 self._extra[b'branch'] = b'default'
1341
1352
1342 def __bytes__(self):
1353 def __bytes__(self):
1343 return bytes(self._parents[0]) + b"+"
1354 return bytes(self._parents[0]) + b"+"
1344
1355
1345 __str__ = encoding.strmethod(__bytes__)
1356 __str__ = encoding.strmethod(__bytes__)
1346
1357
1347 def __nonzero__(self):
1358 def __nonzero__(self):
1348 return True
1359 return True
1349
1360
1350 __bool__ = __nonzero__
1361 __bool__ = __nonzero__
1351
1362
1352 @propertycache
1363 @propertycache
1353 def _status(self):
1364 def _status(self):
1354 return self._repo.status()
1365 return self._repo.status()
1355
1366
1356 @propertycache
1367 @propertycache
1357 def _user(self):
1368 def _user(self):
1358 return self._repo.ui.username()
1369 return self._repo.ui.username()
1359
1370
1360 @propertycache
1371 @propertycache
1361 def _date(self):
1372 def _date(self):
1362 ui = self._repo.ui
1373 ui = self._repo.ui
1363 date = ui.configdate(b'devel', b'default-date')
1374 date = ui.configdate(b'devel', b'default-date')
1364 if date is None:
1375 if date is None:
1365 date = dateutil.makedate()
1376 date = dateutil.makedate()
1366 return date
1377 return date
1367
1378
1368 def subrev(self, subpath):
1379 def subrev(self, subpath):
1369 return None
1380 return None
1370
1381
1371 def manifestnode(self):
1382 def manifestnode(self):
1372 return None
1383 return None
1373
1384
1374 def user(self):
1385 def user(self):
1375 return self._user or self._repo.ui.username()
1386 return self._user or self._repo.ui.username()
1376
1387
1377 def date(self):
1388 def date(self):
1378 return self._date
1389 return self._date
1379
1390
1380 def description(self):
1391 def description(self):
1381 return self._text
1392 return self._text
1382
1393
1383 def files(self):
1394 def files(self):
1384 return sorted(
1395 return sorted(
1385 self._status.modified + self._status.added + self._status.removed
1396 self._status.modified + self._status.added + self._status.removed
1386 )
1397 )
1387
1398
1388 def modified(self):
1399 def modified(self):
1389 return self._status.modified
1400 return self._status.modified
1390
1401
1391 def added(self):
1402 def added(self):
1392 return self._status.added
1403 return self._status.added
1393
1404
1394 def removed(self):
1405 def removed(self):
1395 return self._status.removed
1406 return self._status.removed
1396
1407
1397 def deleted(self):
1408 def deleted(self):
1398 return self._status.deleted
1409 return self._status.deleted
1399
1410
1400 filesmodified = modified
1411 filesmodified = modified
1401 filesadded = added
1412 filesadded = added
1402 filesremoved = removed
1413 filesremoved = removed
1403
1414
1404 def branch(self):
1415 def branch(self):
1405 return encoding.tolocal(self._extra[b'branch'])
1416 return encoding.tolocal(self._extra[b'branch'])
1406
1417
1407 def closesbranch(self):
1418 def closesbranch(self):
1408 return b'close' in self._extra
1419 return b'close' in self._extra
1409
1420
1410 def extra(self):
1421 def extra(self):
1411 return self._extra
1422 return self._extra
1412
1423
1413 def isinmemory(self):
1424 def isinmemory(self):
1414 return False
1425 return False
1415
1426
1416 def tags(self):
1427 def tags(self):
1417 return []
1428 return []
1418
1429
1419 def bookmarks(self):
1430 def bookmarks(self):
1420 b = []
1431 b = []
1421 for p in self.parents():
1432 for p in self.parents():
1422 b.extend(p.bookmarks())
1433 b.extend(p.bookmarks())
1423 return b
1434 return b
1424
1435
1425 def phase(self):
1436 def phase(self):
1426 phase = phases.draft # default phase to draft
1437 phase = phases.draft # default phase to draft
1427 for p in self.parents():
1438 for p in self.parents():
1428 phase = max(phase, p.phase())
1439 phase = max(phase, p.phase())
1429 return phase
1440 return phase
1430
1441
1431 def hidden(self):
1442 def hidden(self):
1432 return False
1443 return False
1433
1444
1434 def children(self):
1445 def children(self):
1435 return []
1446 return []
1436
1447
1437 def ancestor(self, c2):
1448 def ancestor(self, c2):
1438 """return the "best" ancestor context of self and c2"""
1449 """return the "best" ancestor context of self and c2"""
1439 return self._parents[0].ancestor(c2) # punt on two parents for now
1450 return self._parents[0].ancestor(c2) # punt on two parents for now
1440
1451
1441 def ancestors(self):
1452 def ancestors(self):
1442 for p in self._parents:
1453 for p in self._parents:
1443 yield p
1454 yield p
1444 for a in self._repo.changelog.ancestors(
1455 for a in self._repo.changelog.ancestors(
1445 [p.rev() for p in self._parents]
1456 [p.rev() for p in self._parents]
1446 ):
1457 ):
1447 yield self._repo[a]
1458 yield self._repo[a]
1448
1459
1449 def markcommitted(self, node):
1460 def markcommitted(self, node):
1450 """Perform post-commit cleanup necessary after committing this ctx
1461 """Perform post-commit cleanup necessary after committing this ctx
1451
1462
1452 Specifically, this updates backing stores this working context
1463 Specifically, this updates backing stores this working context
1453 wraps to reflect the fact that the changes reflected by this
1464 wraps to reflect the fact that the changes reflected by this
1454 workingctx have been committed. For example, it marks
1465 workingctx have been committed. For example, it marks
1455 modified and added files as normal in the dirstate.
1466 modified and added files as normal in the dirstate.
1456
1467
1457 """
1468 """
1458
1469
1459 def dirty(self, missing=False, merge=True, branch=True):
1470 def dirty(self, missing=False, merge=True, branch=True):
1460 return False
1471 return False
1461
1472
1462
1473
1463 class workingctx(committablectx):
1474 class workingctx(committablectx):
1464 """A workingctx object makes access to data related to
1475 """A workingctx object makes access to data related to
1465 the current working directory convenient.
1476 the current working directory convenient.
1466 date - any valid date string or (unixtime, offset), or None.
1477 date - any valid date string or (unixtime, offset), or None.
1467 user - username string, or None.
1478 user - username string, or None.
1468 extra - a dictionary of extra values, or None.
1479 extra - a dictionary of extra values, or None.
1469 changes - a list of file lists as returned by localrepo.status()
1480 changes - a list of file lists as returned by localrepo.status()
1470 or None to use the repository status.
1481 or None to use the repository status.
1471 """
1482 """
1472
1483
1473 def __init__(
1484 def __init__(
1474 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1485 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1475 ):
1486 ):
1476 branch = None
1487 branch = None
1477 if not extra or b'branch' not in extra:
1488 if not extra or b'branch' not in extra:
1478 try:
1489 try:
1479 branch = repo.dirstate.branch()
1490 branch = repo.dirstate.branch()
1480 except UnicodeDecodeError:
1491 except UnicodeDecodeError:
1481 raise error.Abort(_(b'branch name not in UTF-8!'))
1492 raise error.Abort(_(b'branch name not in UTF-8!'))
1482 super(workingctx, self).__init__(
1493 super(workingctx, self).__init__(
1483 repo, text, user, date, extra, changes, branch=branch
1494 repo, text, user, date, extra, changes, branch=branch
1484 )
1495 )
1485
1496
1486 def __iter__(self):
1497 def __iter__(self):
1487 d = self._repo.dirstate
1498 d = self._repo.dirstate
1488 for f in d:
1499 for f in d:
1489 if d[f] != b'r':
1500 if d[f] != b'r':
1490 yield f
1501 yield f
1491
1502
1492 def __contains__(self, key):
1503 def __contains__(self, key):
1493 return self._repo.dirstate[key] not in b"?r"
1504 return self._repo.dirstate[key] not in b"?r"
1494
1505
1495 def hex(self):
1506 def hex(self):
1496 return wdirhex
1507 return wdirhex
1497
1508
1498 @propertycache
1509 @propertycache
1499 def _parents(self):
1510 def _parents(self):
1500 p = self._repo.dirstate.parents()
1511 p = self._repo.dirstate.parents()
1501 if p[1] == nullid:
1512 if p[1] == nullid:
1502 p = p[:-1]
1513 p = p[:-1]
1503 # use unfiltered repo to delay/avoid loading obsmarkers
1514 # use unfiltered repo to delay/avoid loading obsmarkers
1504 unfi = self._repo.unfiltered()
1515 unfi = self._repo.unfiltered()
1505 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1516 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1506
1517
1507 def _fileinfo(self, path):
1518 def _fileinfo(self, path):
1508 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1519 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1509 self._manifest
1520 self._manifest
1510 return super(workingctx, self)._fileinfo(path)
1521 return super(workingctx, self)._fileinfo(path)
1511
1522
1512 def _buildflagfunc(self):
1523 def _buildflagfunc(self):
1513 # Create a fallback function for getting file flags when the
1524 # Create a fallback function for getting file flags when the
1514 # filesystem doesn't support them
1525 # filesystem doesn't support them
1515
1526
1516 copiesget = self._repo.dirstate.copies().get
1527 copiesget = self._repo.dirstate.copies().get
1517 parents = self.parents()
1528 parents = self.parents()
1518 if len(parents) < 2:
1529 if len(parents) < 2:
1519 # when we have one parent, it's easy: copy from parent
1530 # when we have one parent, it's easy: copy from parent
1520 man = parents[0].manifest()
1531 man = parents[0].manifest()
1521
1532
1522 def func(f):
1533 def func(f):
1523 f = copiesget(f, f)
1534 f = copiesget(f, f)
1524 return man.flags(f)
1535 return man.flags(f)
1525
1536
1526 else:
1537 else:
1527 # merges are tricky: we try to reconstruct the unstored
1538 # merges are tricky: we try to reconstruct the unstored
1528 # result from the merge (issue1802)
1539 # result from the merge (issue1802)
1529 p1, p2 = parents
1540 p1, p2 = parents
1530 pa = p1.ancestor(p2)
1541 pa = p1.ancestor(p2)
1531 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1542 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1532
1543
1533 def func(f):
1544 def func(f):
1534 f = copiesget(f, f) # may be wrong for merges with copies
1545 f = copiesget(f, f) # may be wrong for merges with copies
1535 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1546 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1536 if fl1 == fl2:
1547 if fl1 == fl2:
1537 return fl1
1548 return fl1
1538 if fl1 == fla:
1549 if fl1 == fla:
1539 return fl2
1550 return fl2
1540 if fl2 == fla:
1551 if fl2 == fla:
1541 return fl1
1552 return fl1
1542 return b'' # punt for conflicts
1553 return b'' # punt for conflicts
1543
1554
1544 return func
1555 return func
1545
1556
1546 @propertycache
1557 @propertycache
1547 def _flagfunc(self):
1558 def _flagfunc(self):
1548 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1559 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1549
1560
1550 def flags(self, path):
1561 def flags(self, path):
1551 if '_manifest' in self.__dict__:
1562 if '_manifest' in self.__dict__:
1552 try:
1563 try:
1553 return self._manifest.flags(path)
1564 return self._manifest.flags(path)
1554 except KeyError:
1565 except KeyError:
1555 return b''
1566 return b''
1556
1567
1557 try:
1568 try:
1558 return self._flagfunc(path)
1569 return self._flagfunc(path)
1559 except OSError:
1570 except OSError:
1560 return b''
1571 return b''
1561
1572
1562 def filectx(self, path, filelog=None):
1573 def filectx(self, path, filelog=None):
1563 """get a file context from the working directory"""
1574 """get a file context from the working directory"""
1564 return workingfilectx(
1575 return workingfilectx(
1565 self._repo, path, workingctx=self, filelog=filelog
1576 self._repo, path, workingctx=self, filelog=filelog
1566 )
1577 )
1567
1578
1568 def dirty(self, missing=False, merge=True, branch=True):
1579 def dirty(self, missing=False, merge=True, branch=True):
1569 b"check whether a working directory is modified"
1580 b"check whether a working directory is modified"
1570 # check subrepos first
1581 # check subrepos first
1571 for s in sorted(self.substate):
1582 for s in sorted(self.substate):
1572 if self.sub(s).dirty(missing=missing):
1583 if self.sub(s).dirty(missing=missing):
1573 return True
1584 return True
1574 # check current working dir
1585 # check current working dir
1575 return (
1586 return (
1576 (merge and self.p2())
1587 (merge and self.p2())
1577 or (branch and self.branch() != self.p1().branch())
1588 or (branch and self.branch() != self.p1().branch())
1578 or self.modified()
1589 or self.modified()
1579 or self.added()
1590 or self.added()
1580 or self.removed()
1591 or self.removed()
1581 or (missing and self.deleted())
1592 or (missing and self.deleted())
1582 )
1593 )
1583
1594
1584 def add(self, list, prefix=b""):
1595 def add(self, list, prefix=b""):
1585 with self._repo.wlock():
1596 with self._repo.wlock():
1586 ui, ds = self._repo.ui, self._repo.dirstate
1597 ui, ds = self._repo.ui, self._repo.dirstate
1587 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1598 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1588 rejected = []
1599 rejected = []
1589 lstat = self._repo.wvfs.lstat
1600 lstat = self._repo.wvfs.lstat
1590 for f in list:
1601 for f in list:
1591 # ds.pathto() returns an absolute file when this is invoked from
1602 # ds.pathto() returns an absolute file when this is invoked from
1592 # the keyword extension. That gets flagged as non-portable on
1603 # the keyword extension. That gets flagged as non-portable on
1593 # Windows, since it contains the drive letter and colon.
1604 # Windows, since it contains the drive letter and colon.
1594 scmutil.checkportable(ui, os.path.join(prefix, f))
1605 scmutil.checkportable(ui, os.path.join(prefix, f))
1595 try:
1606 try:
1596 st = lstat(f)
1607 st = lstat(f)
1597 except OSError:
1608 except OSError:
1598 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1609 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1599 rejected.append(f)
1610 rejected.append(f)
1600 continue
1611 continue
1601 limit = ui.configbytes(b'ui', b'large-file-limit')
1612 limit = ui.configbytes(b'ui', b'large-file-limit')
1602 if limit != 0 and st.st_size > limit:
1613 if limit != 0 and st.st_size > limit:
1603 ui.warn(
1614 ui.warn(
1604 _(
1615 _(
1605 b"%s: up to %d MB of RAM may be required "
1616 b"%s: up to %d MB of RAM may be required "
1606 b"to manage this file\n"
1617 b"to manage this file\n"
1607 b"(use 'hg revert %s' to cancel the "
1618 b"(use 'hg revert %s' to cancel the "
1608 b"pending addition)\n"
1619 b"pending addition)\n"
1609 )
1620 )
1610 % (f, 3 * st.st_size // 1000000, uipath(f))
1621 % (f, 3 * st.st_size // 1000000, uipath(f))
1611 )
1622 )
1612 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1623 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1613 ui.warn(
1624 ui.warn(
1614 _(
1625 _(
1615 b"%s not added: only files and symlinks "
1626 b"%s not added: only files and symlinks "
1616 b"supported currently\n"
1627 b"supported currently\n"
1617 )
1628 )
1618 % uipath(f)
1629 % uipath(f)
1619 )
1630 )
1620 rejected.append(f)
1631 rejected.append(f)
1621 elif ds[f] in b'amn':
1632 elif ds[f] in b'amn':
1622 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1633 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1623 elif ds[f] == b'r':
1634 elif ds[f] == b'r':
1624 ds.normallookup(f)
1635 ds.normallookup(f)
1625 else:
1636 else:
1626 ds.add(f)
1637 ds.add(f)
1627 return rejected
1638 return rejected
1628
1639
1629 def forget(self, files, prefix=b""):
1640 def forget(self, files, prefix=b""):
1630 with self._repo.wlock():
1641 with self._repo.wlock():
1631 ds = self._repo.dirstate
1642 ds = self._repo.dirstate
1632 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1643 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1633 rejected = []
1644 rejected = []
1634 for f in files:
1645 for f in files:
1635 if f not in ds:
1646 if f not in ds:
1636 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1647 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1637 rejected.append(f)
1648 rejected.append(f)
1638 elif ds[f] != b'a':
1649 elif ds[f] != b'a':
1639 ds.remove(f)
1650 ds.remove(f)
1640 else:
1651 else:
1641 ds.drop(f)
1652 ds.drop(f)
1642 return rejected
1653 return rejected
1643
1654
1644 def copy(self, source, dest):
1655 def copy(self, source, dest):
1645 try:
1656 try:
1646 st = self._repo.wvfs.lstat(dest)
1657 st = self._repo.wvfs.lstat(dest)
1647 except OSError as err:
1658 except OSError as err:
1648 if err.errno != errno.ENOENT:
1659 if err.errno != errno.ENOENT:
1649 raise
1660 raise
1650 self._repo.ui.warn(
1661 self._repo.ui.warn(
1651 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1662 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1652 )
1663 )
1653 return
1664 return
1654 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1665 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1655 self._repo.ui.warn(
1666 self._repo.ui.warn(
1656 _(b"copy failed: %s is not a file or a symbolic link\n")
1667 _(b"copy failed: %s is not a file or a symbolic link\n")
1657 % self._repo.dirstate.pathto(dest)
1668 % self._repo.dirstate.pathto(dest)
1658 )
1669 )
1659 else:
1670 else:
1660 with self._repo.wlock():
1671 with self._repo.wlock():
1661 ds = self._repo.dirstate
1672 ds = self._repo.dirstate
1662 if ds[dest] in b'?':
1673 if ds[dest] in b'?':
1663 ds.add(dest)
1674 ds.add(dest)
1664 elif ds[dest] in b'r':
1675 elif ds[dest] in b'r':
1665 ds.normallookup(dest)
1676 ds.normallookup(dest)
1666 ds.copy(source, dest)
1677 ds.copy(source, dest)
1667
1678
1668 def match(
1679 def match(
1669 self,
1680 self,
1670 pats=None,
1681 pats=None,
1671 include=None,
1682 include=None,
1672 exclude=None,
1683 exclude=None,
1673 default=b'glob',
1684 default=b'glob',
1674 listsubrepos=False,
1685 listsubrepos=False,
1675 badfn=None,
1686 badfn=None,
1676 ):
1687 ):
1677 r = self._repo
1688 r = self._repo
1678
1689
1679 # Only a case insensitive filesystem needs magic to translate user input
1690 # Only a case insensitive filesystem needs magic to translate user input
1680 # to actual case in the filesystem.
1691 # to actual case in the filesystem.
1681 icasefs = not util.fscasesensitive(r.root)
1692 icasefs = not util.fscasesensitive(r.root)
1682 return matchmod.match(
1693 return matchmod.match(
1683 r.root,
1694 r.root,
1684 r.getcwd(),
1695 r.getcwd(),
1685 pats,
1696 pats,
1686 include,
1697 include,
1687 exclude,
1698 exclude,
1688 default,
1699 default,
1689 auditor=r.auditor,
1700 auditor=r.auditor,
1690 ctx=self,
1701 ctx=self,
1691 listsubrepos=listsubrepos,
1702 listsubrepos=listsubrepos,
1692 badfn=badfn,
1703 badfn=badfn,
1693 icasefs=icasefs,
1704 icasefs=icasefs,
1694 )
1705 )
1695
1706
1696 def _filtersuspectsymlink(self, files):
1707 def _filtersuspectsymlink(self, files):
1697 if not files or self._repo.dirstate._checklink:
1708 if not files or self._repo.dirstate._checklink:
1698 return files
1709 return files
1699
1710
1700 # Symlink placeholders may get non-symlink-like contents
1711 # Symlink placeholders may get non-symlink-like contents
1701 # via user error or dereferencing by NFS or Samba servers,
1712 # via user error or dereferencing by NFS or Samba servers,
1702 # so we filter out any placeholders that don't look like a
1713 # so we filter out any placeholders that don't look like a
1703 # symlink
1714 # symlink
1704 sane = []
1715 sane = []
1705 for f in files:
1716 for f in files:
1706 if self.flags(f) == b'l':
1717 if self.flags(f) == b'l':
1707 d = self[f].data()
1718 d = self[f].data()
1708 if (
1719 if (
1709 d == b''
1720 d == b''
1710 or len(d) >= 1024
1721 or len(d) >= 1024
1711 or b'\n' in d
1722 or b'\n' in d
1712 or stringutil.binary(d)
1723 or stringutil.binary(d)
1713 ):
1724 ):
1714 self._repo.ui.debug(
1725 self._repo.ui.debug(
1715 b'ignoring suspect symlink placeholder "%s"\n' % f
1726 b'ignoring suspect symlink placeholder "%s"\n' % f
1716 )
1727 )
1717 continue
1728 continue
1718 sane.append(f)
1729 sane.append(f)
1719 return sane
1730 return sane
1720
1731
1721 def _checklookup(self, files):
1732 def _checklookup(self, files):
1722 # check for any possibly clean files
1733 # check for any possibly clean files
1723 if not files:
1734 if not files:
1724 return [], [], []
1735 return [], [], []
1725
1736
1726 modified = []
1737 modified = []
1727 deleted = []
1738 deleted = []
1728 fixup = []
1739 fixup = []
1729 pctx = self._parents[0]
1740 pctx = self._parents[0]
1730 # do a full compare of any files that might have changed
1741 # do a full compare of any files that might have changed
1731 for f in sorted(files):
1742 for f in sorted(files):
1732 try:
1743 try:
1733 # This will return True for a file that got replaced by a
1744 # This will return True for a file that got replaced by a
1734 # directory in the interim, but fixing that is pretty hard.
1745 # directory in the interim, but fixing that is pretty hard.
1735 if (
1746 if (
1736 f not in pctx
1747 f not in pctx
1737 or self.flags(f) != pctx.flags(f)
1748 or self.flags(f) != pctx.flags(f)
1738 or pctx[f].cmp(self[f])
1749 or pctx[f].cmp(self[f])
1739 ):
1750 ):
1740 modified.append(f)
1751 modified.append(f)
1741 else:
1752 else:
1742 fixup.append(f)
1753 fixup.append(f)
1743 except (IOError, OSError):
1754 except (IOError, OSError):
1744 # A file become inaccessible in between? Mark it as deleted,
1755 # A file become inaccessible in between? Mark it as deleted,
1745 # matching dirstate behavior (issue5584).
1756 # matching dirstate behavior (issue5584).
1746 # The dirstate has more complex behavior around whether a
1757 # The dirstate has more complex behavior around whether a
1747 # missing file matches a directory, etc, but we don't need to
1758 # missing file matches a directory, etc, but we don't need to
1748 # bother with that: if f has made it to this point, we're sure
1759 # bother with that: if f has made it to this point, we're sure
1749 # it's in the dirstate.
1760 # it's in the dirstate.
1750 deleted.append(f)
1761 deleted.append(f)
1751
1762
1752 return modified, deleted, fixup
1763 return modified, deleted, fixup
1753
1764
1754 def _poststatusfixup(self, status, fixup):
1765 def _poststatusfixup(self, status, fixup):
1755 """update dirstate for files that are actually clean"""
1766 """update dirstate for files that are actually clean"""
1756 poststatus = self._repo.postdsstatus()
1767 poststatus = self._repo.postdsstatus()
1757 if fixup or poststatus:
1768 if fixup or poststatus:
1758 try:
1769 try:
1759 oldid = self._repo.dirstate.identity()
1770 oldid = self._repo.dirstate.identity()
1760
1771
1761 # updating the dirstate is optional
1772 # updating the dirstate is optional
1762 # so we don't wait on the lock
1773 # so we don't wait on the lock
1763 # wlock can invalidate the dirstate, so cache normal _after_
1774 # wlock can invalidate the dirstate, so cache normal _after_
1764 # taking the lock
1775 # taking the lock
1765 with self._repo.wlock(False):
1776 with self._repo.wlock(False):
1766 if self._repo.dirstate.identity() == oldid:
1777 if self._repo.dirstate.identity() == oldid:
1767 if fixup:
1778 if fixup:
1768 normal = self._repo.dirstate.normal
1779 normal = self._repo.dirstate.normal
1769 for f in fixup:
1780 for f in fixup:
1770 normal(f)
1781 normal(f)
1771 # write changes out explicitly, because nesting
1782 # write changes out explicitly, because nesting
1772 # wlock at runtime may prevent 'wlock.release()'
1783 # wlock at runtime may prevent 'wlock.release()'
1773 # after this block from doing so for subsequent
1784 # after this block from doing so for subsequent
1774 # changing files
1785 # changing files
1775 tr = self._repo.currenttransaction()
1786 tr = self._repo.currenttransaction()
1776 self._repo.dirstate.write(tr)
1787 self._repo.dirstate.write(tr)
1777
1788
1778 if poststatus:
1789 if poststatus:
1779 for ps in poststatus:
1790 for ps in poststatus:
1780 ps(self, status)
1791 ps(self, status)
1781 else:
1792 else:
1782 # in this case, writing changes out breaks
1793 # in this case, writing changes out breaks
1783 # consistency, because .hg/dirstate was
1794 # consistency, because .hg/dirstate was
1784 # already changed simultaneously after last
1795 # already changed simultaneously after last
1785 # caching (see also issue5584 for detail)
1796 # caching (see also issue5584 for detail)
1786 self._repo.ui.debug(
1797 self._repo.ui.debug(
1787 b'skip updating dirstate: identity mismatch\n'
1798 b'skip updating dirstate: identity mismatch\n'
1788 )
1799 )
1789 except error.LockError:
1800 except error.LockError:
1790 pass
1801 pass
1791 finally:
1802 finally:
1792 # Even if the wlock couldn't be grabbed, clear out the list.
1803 # Even if the wlock couldn't be grabbed, clear out the list.
1793 self._repo.clearpostdsstatus()
1804 self._repo.clearpostdsstatus()
1794
1805
1795 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1806 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1796 '''Gets the status from the dirstate -- internal use only.'''
1807 '''Gets the status from the dirstate -- internal use only.'''
1797 subrepos = []
1808 subrepos = []
1798 if b'.hgsub' in self:
1809 if b'.hgsub' in self:
1799 subrepos = sorted(self.substate)
1810 subrepos = sorted(self.substate)
1800 cmp, s = self._repo.dirstate.status(
1811 cmp, s = self._repo.dirstate.status(
1801 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1812 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1802 )
1813 )
1803
1814
1804 # check for any possibly clean files
1815 # check for any possibly clean files
1805 fixup = []
1816 fixup = []
1806 if cmp:
1817 if cmp:
1807 modified2, deleted2, fixup = self._checklookup(cmp)
1818 modified2, deleted2, fixup = self._checklookup(cmp)
1808 s.modified.extend(modified2)
1819 s.modified.extend(modified2)
1809 s.deleted.extend(deleted2)
1820 s.deleted.extend(deleted2)
1810
1821
1811 if fixup and clean:
1822 if fixup and clean:
1812 s.clean.extend(fixup)
1823 s.clean.extend(fixup)
1813
1824
1814 self._poststatusfixup(s, fixup)
1825 self._poststatusfixup(s, fixup)
1815
1826
1816 if match.always():
1827 if match.always():
1817 # cache for performance
1828 # cache for performance
1818 if s.unknown or s.ignored or s.clean:
1829 if s.unknown or s.ignored or s.clean:
1819 # "_status" is cached with list*=False in the normal route
1830 # "_status" is cached with list*=False in the normal route
1820 self._status = scmutil.status(
1831 self._status = scmutil.status(
1821 s.modified, s.added, s.removed, s.deleted, [], [], []
1832 s.modified, s.added, s.removed, s.deleted, [], [], []
1822 )
1833 )
1823 else:
1834 else:
1824 self._status = s
1835 self._status = s
1825
1836
1826 return s
1837 return s
1827
1838
1828 @propertycache
1839 @propertycache
1829 def _copies(self):
1840 def _copies(self):
1830 p1copies = {}
1841 p1copies = {}
1831 p2copies = {}
1842 p2copies = {}
1832 parents = self._repo.dirstate.parents()
1843 parents = self._repo.dirstate.parents()
1833 p1manifest = self._repo[parents[0]].manifest()
1844 p1manifest = self._repo[parents[0]].manifest()
1834 p2manifest = self._repo[parents[1]].manifest()
1845 p2manifest = self._repo[parents[1]].manifest()
1835 changedset = set(self.added()) | set(self.modified())
1846 changedset = set(self.added()) | set(self.modified())
1836 narrowmatch = self._repo.narrowmatch()
1847 narrowmatch = self._repo.narrowmatch()
1837 for dst, src in self._repo.dirstate.copies().items():
1848 for dst, src in self._repo.dirstate.copies().items():
1838 if dst not in changedset or not narrowmatch(dst):
1849 if dst not in changedset or not narrowmatch(dst):
1839 continue
1850 continue
1840 if src in p1manifest:
1851 if src in p1manifest:
1841 p1copies[dst] = src
1852 p1copies[dst] = src
1842 elif src in p2manifest:
1853 elif src in p2manifest:
1843 p2copies[dst] = src
1854 p2copies[dst] = src
1844 return p1copies, p2copies
1855 return p1copies, p2copies
1845
1856
1846 @propertycache
1857 @propertycache
1847 def _manifest(self):
1858 def _manifest(self):
1848 """generate a manifest corresponding to the values in self._status
1859 """generate a manifest corresponding to the values in self._status
1849
1860
1850 This reuse the file nodeid from parent, but we use special node
1861 This reuse the file nodeid from parent, but we use special node
1851 identifiers for added and modified files. This is used by manifests
1862 identifiers for added and modified files. This is used by manifests
1852 merge to see that files are different and by update logic to avoid
1863 merge to see that files are different and by update logic to avoid
1853 deleting newly added files.
1864 deleting newly added files.
1854 """
1865 """
1855 return self._buildstatusmanifest(self._status)
1866 return self._buildstatusmanifest(self._status)
1856
1867
1857 def _buildstatusmanifest(self, status):
1868 def _buildstatusmanifest(self, status):
1858 """Builds a manifest that includes the given status results."""
1869 """Builds a manifest that includes the given status results."""
1859 parents = self.parents()
1870 parents = self.parents()
1860
1871
1861 man = parents[0].manifest().copy()
1872 man = parents[0].manifest().copy()
1862
1873
1863 ff = self._flagfunc
1874 ff = self._flagfunc
1864 for i, l in (
1875 for i, l in (
1865 (addednodeid, status.added),
1876 (addednodeid, status.added),
1866 (modifiednodeid, status.modified),
1877 (modifiednodeid, status.modified),
1867 ):
1878 ):
1868 for f in l:
1879 for f in l:
1869 man[f] = i
1880 man[f] = i
1870 try:
1881 try:
1871 man.setflag(f, ff(f))
1882 man.setflag(f, ff(f))
1872 except OSError:
1883 except OSError:
1873 pass
1884 pass
1874
1885
1875 for f in status.deleted + status.removed:
1886 for f in status.deleted + status.removed:
1876 if f in man:
1887 if f in man:
1877 del man[f]
1888 del man[f]
1878
1889
1879 return man
1890 return man
1880
1891
1881 def _buildstatus(
1892 def _buildstatus(
1882 self, other, s, match, listignored, listclean, listunknown
1893 self, other, s, match, listignored, listclean, listunknown
1883 ):
1894 ):
1884 """build a status with respect to another context
1895 """build a status with respect to another context
1885
1896
1886 This includes logic for maintaining the fast path of status when
1897 This includes logic for maintaining the fast path of status when
1887 comparing the working directory against its parent, which is to skip
1898 comparing the working directory against its parent, which is to skip
1888 building a new manifest if self (working directory) is not comparing
1899 building a new manifest if self (working directory) is not comparing
1889 against its parent (repo['.']).
1900 against its parent (repo['.']).
1890 """
1901 """
1891 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1902 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1892 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1903 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1893 # might have accidentally ended up with the entire contents of the file
1904 # might have accidentally ended up with the entire contents of the file
1894 # they are supposed to be linking to.
1905 # they are supposed to be linking to.
1895 s.modified[:] = self._filtersuspectsymlink(s.modified)
1906 s.modified[:] = self._filtersuspectsymlink(s.modified)
1896 if other != self._repo[b'.']:
1907 if other != self._repo[b'.']:
1897 s = super(workingctx, self)._buildstatus(
1908 s = super(workingctx, self)._buildstatus(
1898 other, s, match, listignored, listclean, listunknown
1909 other, s, match, listignored, listclean, listunknown
1899 )
1910 )
1900 return s
1911 return s
1901
1912
1902 def _matchstatus(self, other, match):
1913 def _matchstatus(self, other, match):
1903 """override the match method with a filter for directory patterns
1914 """override the match method with a filter for directory patterns
1904
1915
1905 We use inheritance to customize the match.bad method only in cases of
1916 We use inheritance to customize the match.bad method only in cases of
1906 workingctx since it belongs only to the working directory when
1917 workingctx since it belongs only to the working directory when
1907 comparing against the parent changeset.
1918 comparing against the parent changeset.
1908
1919
1909 If we aren't comparing against the working directory's parent, then we
1920 If we aren't comparing against the working directory's parent, then we
1910 just use the default match object sent to us.
1921 just use the default match object sent to us.
1911 """
1922 """
1912 if other != self._repo[b'.']:
1923 if other != self._repo[b'.']:
1913
1924
1914 def bad(f, msg):
1925 def bad(f, msg):
1915 # 'f' may be a directory pattern from 'match.files()',
1926 # 'f' may be a directory pattern from 'match.files()',
1916 # so 'f not in ctx1' is not enough
1927 # so 'f not in ctx1' is not enough
1917 if f not in other and not other.hasdir(f):
1928 if f not in other and not other.hasdir(f):
1918 self._repo.ui.warn(
1929 self._repo.ui.warn(
1919 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1930 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1920 )
1931 )
1921
1932
1922 match.bad = bad
1933 match.bad = bad
1923 return match
1934 return match
1924
1935
1925 def walk(self, match):
1936 def walk(self, match):
1926 '''Generates matching file names.'''
1937 '''Generates matching file names.'''
1927 return sorted(
1938 return sorted(
1928 self._repo.dirstate.walk(
1939 self._repo.dirstate.walk(
1929 self._repo.narrowmatch(match),
1940 self._repo.narrowmatch(match),
1930 subrepos=sorted(self.substate),
1941 subrepos=sorted(self.substate),
1931 unknown=True,
1942 unknown=True,
1932 ignored=False,
1943 ignored=False,
1933 )
1944 )
1934 )
1945 )
1935
1946
1936 def matches(self, match):
1947 def matches(self, match):
1937 match = self._repo.narrowmatch(match)
1948 match = self._repo.narrowmatch(match)
1938 ds = self._repo.dirstate
1949 ds = self._repo.dirstate
1939 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1950 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1940
1951
1941 def markcommitted(self, node):
1952 def markcommitted(self, node):
1942 with self._repo.dirstate.parentchange():
1953 with self._repo.dirstate.parentchange():
1943 for f in self.modified() + self.added():
1954 for f in self.modified() + self.added():
1944 self._repo.dirstate.normal(f)
1955 self._repo.dirstate.normal(f)
1945 for f in self.removed():
1956 for f in self.removed():
1946 self._repo.dirstate.drop(f)
1957 self._repo.dirstate.drop(f)
1947 self._repo.dirstate.setparents(node)
1958 self._repo.dirstate.setparents(node)
1948
1959
1949 # write changes out explicitly, because nesting wlock at
1960 # write changes out explicitly, because nesting wlock at
1950 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1961 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1951 # from immediately doing so for subsequent changing files
1962 # from immediately doing so for subsequent changing files
1952 self._repo.dirstate.write(self._repo.currenttransaction())
1963 self._repo.dirstate.write(self._repo.currenttransaction())
1953
1964
1954 sparse.aftercommit(self._repo, node)
1965 sparse.aftercommit(self._repo, node)
1955
1966
1956
1967
1957 class committablefilectx(basefilectx):
1968 class committablefilectx(basefilectx):
1958 """A committablefilectx provides common functionality for a file context
1969 """A committablefilectx provides common functionality for a file context
1959 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1970 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1960
1971
1961 def __init__(self, repo, path, filelog=None, ctx=None):
1972 def __init__(self, repo, path, filelog=None, ctx=None):
1962 self._repo = repo
1973 self._repo = repo
1963 self._path = path
1974 self._path = path
1964 self._changeid = None
1975 self._changeid = None
1965 self._filerev = self._filenode = None
1976 self._filerev = self._filenode = None
1966
1977
1967 if filelog is not None:
1978 if filelog is not None:
1968 self._filelog = filelog
1979 self._filelog = filelog
1969 if ctx:
1980 if ctx:
1970 self._changectx = ctx
1981 self._changectx = ctx
1971
1982
1972 def __nonzero__(self):
1983 def __nonzero__(self):
1973 return True
1984 return True
1974
1985
1975 __bool__ = __nonzero__
1986 __bool__ = __nonzero__
1976
1987
1977 def linkrev(self):
1988 def linkrev(self):
1978 # linked to self._changectx no matter if file is modified or not
1989 # linked to self._changectx no matter if file is modified or not
1979 return self.rev()
1990 return self.rev()
1980
1991
1981 def renamed(self):
1992 def renamed(self):
1982 path = self.copysource()
1993 path = self.copysource()
1983 if not path:
1994 if not path:
1984 return None
1995 return None
1985 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1996 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1986
1997
1987 def parents(self):
1998 def parents(self):
1988 '''return parent filectxs, following copies if necessary'''
1999 '''return parent filectxs, following copies if necessary'''
1989
2000
1990 def filenode(ctx, path):
2001 def filenode(ctx, path):
1991 return ctx._manifest.get(path, nullid)
2002 return ctx._manifest.get(path, nullid)
1992
2003
1993 path = self._path
2004 path = self._path
1994 fl = self._filelog
2005 fl = self._filelog
1995 pcl = self._changectx._parents
2006 pcl = self._changectx._parents
1996 renamed = self.renamed()
2007 renamed = self.renamed()
1997
2008
1998 if renamed:
2009 if renamed:
1999 pl = [renamed + (None,)]
2010 pl = [renamed + (None,)]
2000 else:
2011 else:
2001 pl = [(path, filenode(pcl[0], path), fl)]
2012 pl = [(path, filenode(pcl[0], path), fl)]
2002
2013
2003 for pc in pcl[1:]:
2014 for pc in pcl[1:]:
2004 pl.append((path, filenode(pc, path), fl))
2015 pl.append((path, filenode(pc, path), fl))
2005
2016
2006 return [
2017 return [
2007 self._parentfilectx(p, fileid=n, filelog=l)
2018 self._parentfilectx(p, fileid=n, filelog=l)
2008 for p, n, l in pl
2019 for p, n, l in pl
2009 if n != nullid
2020 if n != nullid
2010 ]
2021 ]
2011
2022
2012 def children(self):
2023 def children(self):
2013 return []
2024 return []
2014
2025
2015
2026
2016 class workingfilectx(committablefilectx):
2027 class workingfilectx(committablefilectx):
2017 """A workingfilectx object makes access to data related to a particular
2028 """A workingfilectx object makes access to data related to a particular
2018 file in the working directory convenient."""
2029 file in the working directory convenient."""
2019
2030
2020 def __init__(self, repo, path, filelog=None, workingctx=None):
2031 def __init__(self, repo, path, filelog=None, workingctx=None):
2021 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2032 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2022
2033
2023 @propertycache
2034 @propertycache
2024 def _changectx(self):
2035 def _changectx(self):
2025 return workingctx(self._repo)
2036 return workingctx(self._repo)
2026
2037
2027 def data(self):
2038 def data(self):
2028 return self._repo.wread(self._path)
2039 return self._repo.wread(self._path)
2029
2040
2030 def copysource(self):
2041 def copysource(self):
2031 return self._repo.dirstate.copied(self._path)
2042 return self._repo.dirstate.copied(self._path)
2032
2043
2033 def size(self):
2044 def size(self):
2034 return self._repo.wvfs.lstat(self._path).st_size
2045 return self._repo.wvfs.lstat(self._path).st_size
2035
2046
2036 def lstat(self):
2047 def lstat(self):
2037 return self._repo.wvfs.lstat(self._path)
2048 return self._repo.wvfs.lstat(self._path)
2038
2049
2039 def date(self):
2050 def date(self):
2040 t, tz = self._changectx.date()
2051 t, tz = self._changectx.date()
2041 try:
2052 try:
2042 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2053 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2043 except OSError as err:
2054 except OSError as err:
2044 if err.errno != errno.ENOENT:
2055 if err.errno != errno.ENOENT:
2045 raise
2056 raise
2046 return (t, tz)
2057 return (t, tz)
2047
2058
2048 def exists(self):
2059 def exists(self):
2049 return self._repo.wvfs.exists(self._path)
2060 return self._repo.wvfs.exists(self._path)
2050
2061
2051 def lexists(self):
2062 def lexists(self):
2052 return self._repo.wvfs.lexists(self._path)
2063 return self._repo.wvfs.lexists(self._path)
2053
2064
2054 def audit(self):
2065 def audit(self):
2055 return self._repo.wvfs.audit(self._path)
2066 return self._repo.wvfs.audit(self._path)
2056
2067
2057 def cmp(self, fctx):
2068 def cmp(self, fctx):
2058 """compare with other file context
2069 """compare with other file context
2059
2070
2060 returns True if different than fctx.
2071 returns True if different than fctx.
2061 """
2072 """
2062 # fctx should be a filectx (not a workingfilectx)
2073 # fctx should be a filectx (not a workingfilectx)
2063 # invert comparison to reuse the same code path
2074 # invert comparison to reuse the same code path
2064 return fctx.cmp(self)
2075 return fctx.cmp(self)
2065
2076
2066 def remove(self, ignoremissing=False):
2077 def remove(self, ignoremissing=False):
2067 """wraps unlink for a repo's working directory"""
2078 """wraps unlink for a repo's working directory"""
2068 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2079 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2069 self._repo.wvfs.unlinkpath(
2080 self._repo.wvfs.unlinkpath(
2070 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2081 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2071 )
2082 )
2072
2083
2073 def write(self, data, flags, backgroundclose=False, **kwargs):
2084 def write(self, data, flags, backgroundclose=False, **kwargs):
2074 """wraps repo.wwrite"""
2085 """wraps repo.wwrite"""
2075 return self._repo.wwrite(
2086 return self._repo.wwrite(
2076 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2087 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2077 )
2088 )
2078
2089
2079 def markcopied(self, src):
2090 def markcopied(self, src):
2080 """marks this file a copy of `src`"""
2091 """marks this file a copy of `src`"""
2081 self._repo.dirstate.copy(src, self._path)
2092 self._repo.dirstate.copy(src, self._path)
2082
2093
2083 def clearunknown(self):
2094 def clearunknown(self):
2084 """Removes conflicting items in the working directory so that
2095 """Removes conflicting items in the working directory so that
2085 ``write()`` can be called successfully.
2096 ``write()`` can be called successfully.
2086 """
2097 """
2087 wvfs = self._repo.wvfs
2098 wvfs = self._repo.wvfs
2088 f = self._path
2099 f = self._path
2089 wvfs.audit(f)
2100 wvfs.audit(f)
2090 if self._repo.ui.configbool(
2101 if self._repo.ui.configbool(
2091 b'experimental', b'merge.checkpathconflicts'
2102 b'experimental', b'merge.checkpathconflicts'
2092 ):
2103 ):
2093 # remove files under the directory as they should already be
2104 # remove files under the directory as they should already be
2094 # warned and backed up
2105 # warned and backed up
2095 if wvfs.isdir(f) and not wvfs.islink(f):
2106 if wvfs.isdir(f) and not wvfs.islink(f):
2096 wvfs.rmtree(f, forcibly=True)
2107 wvfs.rmtree(f, forcibly=True)
2097 for p in reversed(list(pathutil.finddirs(f))):
2108 for p in reversed(list(pathutil.finddirs(f))):
2098 if wvfs.isfileorlink(p):
2109 if wvfs.isfileorlink(p):
2099 wvfs.unlink(p)
2110 wvfs.unlink(p)
2100 break
2111 break
2101 else:
2112 else:
2102 # don't remove files if path conflicts are not processed
2113 # don't remove files if path conflicts are not processed
2103 if wvfs.isdir(f) and not wvfs.islink(f):
2114 if wvfs.isdir(f) and not wvfs.islink(f):
2104 wvfs.removedirs(f)
2115 wvfs.removedirs(f)
2105
2116
2106 def setflags(self, l, x):
2117 def setflags(self, l, x):
2107 self._repo.wvfs.setflags(self._path, l, x)
2118 self._repo.wvfs.setflags(self._path, l, x)
2108
2119
2109
2120
2110 class overlayworkingctx(committablectx):
2121 class overlayworkingctx(committablectx):
2111 """Wraps another mutable context with a write-back cache that can be
2122 """Wraps another mutable context with a write-back cache that can be
2112 converted into a commit context.
2123 converted into a commit context.
2113
2124
2114 self._cache[path] maps to a dict with keys: {
2125 self._cache[path] maps to a dict with keys: {
2115 'exists': bool?
2126 'exists': bool?
2116 'date': date?
2127 'date': date?
2117 'data': str?
2128 'data': str?
2118 'flags': str?
2129 'flags': str?
2119 'copied': str? (path or None)
2130 'copied': str? (path or None)
2120 }
2131 }
2121 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2132 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2122 is `False`, the file was deleted.
2133 is `False`, the file was deleted.
2123 """
2134 """
2124
2135
2125 def __init__(self, repo):
2136 def __init__(self, repo):
2126 super(overlayworkingctx, self).__init__(repo)
2137 super(overlayworkingctx, self).__init__(repo)
2127 self.clean()
2138 self.clean()
2128
2139
2129 def setbase(self, wrappedctx):
2140 def setbase(self, wrappedctx):
2130 self._wrappedctx = wrappedctx
2141 self._wrappedctx = wrappedctx
2131 self._parents = [wrappedctx]
2142 self._parents = [wrappedctx]
2132 # Drop old manifest cache as it is now out of date.
2143 # Drop old manifest cache as it is now out of date.
2133 # This is necessary when, e.g., rebasing several nodes with one
2144 # This is necessary when, e.g., rebasing several nodes with one
2134 # ``overlayworkingctx`` (e.g. with --collapse).
2145 # ``overlayworkingctx`` (e.g. with --collapse).
2135 util.clearcachedproperty(self, b'_manifest')
2146 util.clearcachedproperty(self, b'_manifest')
2136
2147
2137 def data(self, path):
2148 def data(self, path):
2138 if self.isdirty(path):
2149 if self.isdirty(path):
2139 if self._cache[path][b'exists']:
2150 if self._cache[path][b'exists']:
2140 if self._cache[path][b'data'] is not None:
2151 if self._cache[path][b'data'] is not None:
2141 return self._cache[path][b'data']
2152 return self._cache[path][b'data']
2142 else:
2153 else:
2143 # Must fallback here, too, because we only set flags.
2154 # Must fallback here, too, because we only set flags.
2144 return self._wrappedctx[path].data()
2155 return self._wrappedctx[path].data()
2145 else:
2156 else:
2146 raise error.ProgrammingError(
2157 raise error.ProgrammingError(
2147 b"No such file or directory: %s" % path
2158 b"No such file or directory: %s" % path
2148 )
2159 )
2149 else:
2160 else:
2150 return self._wrappedctx[path].data()
2161 return self._wrappedctx[path].data()
2151
2162
2152 @propertycache
2163 @propertycache
2153 def _manifest(self):
2164 def _manifest(self):
2154 parents = self.parents()
2165 parents = self.parents()
2155 man = parents[0].manifest().copy()
2166 man = parents[0].manifest().copy()
2156
2167
2157 flag = self._flagfunc
2168 flag = self._flagfunc
2158 for path in self.added():
2169 for path in self.added():
2159 man[path] = addednodeid
2170 man[path] = addednodeid
2160 man.setflag(path, flag(path))
2171 man.setflag(path, flag(path))
2161 for path in self.modified():
2172 for path in self.modified():
2162 man[path] = modifiednodeid
2173 man[path] = modifiednodeid
2163 man.setflag(path, flag(path))
2174 man.setflag(path, flag(path))
2164 for path in self.removed():
2175 for path in self.removed():
2165 del man[path]
2176 del man[path]
2166 return man
2177 return man
2167
2178
2168 @propertycache
2179 @propertycache
2169 def _flagfunc(self):
2180 def _flagfunc(self):
2170 def f(path):
2181 def f(path):
2171 return self._cache[path][b'flags']
2182 return self._cache[path][b'flags']
2172
2183
2173 return f
2184 return f
2174
2185
2175 def files(self):
2186 def files(self):
2176 return sorted(self.added() + self.modified() + self.removed())
2187 return sorted(self.added() + self.modified() + self.removed())
2177
2188
2178 def modified(self):
2189 def modified(self):
2179 return [
2190 return [
2180 f
2191 f
2181 for f in self._cache.keys()
2192 for f in self._cache.keys()
2182 if self._cache[f][b'exists'] and self._existsinparent(f)
2193 if self._cache[f][b'exists'] and self._existsinparent(f)
2183 ]
2194 ]
2184
2195
2185 def added(self):
2196 def added(self):
2186 return [
2197 return [
2187 f
2198 f
2188 for f in self._cache.keys()
2199 for f in self._cache.keys()
2189 if self._cache[f][b'exists'] and not self._existsinparent(f)
2200 if self._cache[f][b'exists'] and not self._existsinparent(f)
2190 ]
2201 ]
2191
2202
2192 def removed(self):
2203 def removed(self):
2193 return [
2204 return [
2194 f
2205 f
2195 for f in self._cache.keys()
2206 for f in self._cache.keys()
2196 if not self._cache[f][b'exists'] and self._existsinparent(f)
2207 if not self._cache[f][b'exists'] and self._existsinparent(f)
2197 ]
2208 ]
2198
2209
2199 def p1copies(self):
2210 def p1copies(self):
2200 copies = self._repo._wrappedctx.p1copies().copy()
2211 copies = self._repo._wrappedctx.p1copies().copy()
2201 narrowmatch = self._repo.narrowmatch()
2212 narrowmatch = self._repo.narrowmatch()
2202 for f in self._cache.keys():
2213 for f in self._cache.keys():
2203 if not narrowmatch(f):
2214 if not narrowmatch(f):
2204 continue
2215 continue
2205 copies.pop(f, None) # delete if it exists
2216 copies.pop(f, None) # delete if it exists
2206 source = self._cache[f][b'copied']
2217 source = self._cache[f][b'copied']
2207 if source:
2218 if source:
2208 copies[f] = source
2219 copies[f] = source
2209 return copies
2220 return copies
2210
2221
2211 def p2copies(self):
2222 def p2copies(self):
2212 copies = self._repo._wrappedctx.p2copies().copy()
2223 copies = self._repo._wrappedctx.p2copies().copy()
2213 narrowmatch = self._repo.narrowmatch()
2224 narrowmatch = self._repo.narrowmatch()
2214 for f in self._cache.keys():
2225 for f in self._cache.keys():
2215 if not narrowmatch(f):
2226 if not narrowmatch(f):
2216 continue
2227 continue
2217 copies.pop(f, None) # delete if it exists
2228 copies.pop(f, None) # delete if it exists
2218 source = self._cache[f][b'copied']
2229 source = self._cache[f][b'copied']
2219 if source:
2230 if source:
2220 copies[f] = source
2231 copies[f] = source
2221 return copies
2232 return copies
2222
2233
2223 def isinmemory(self):
2234 def isinmemory(self):
2224 return True
2235 return True
2225
2236
2226 def filedate(self, path):
2237 def filedate(self, path):
2227 if self.isdirty(path):
2238 if self.isdirty(path):
2228 return self._cache[path][b'date']
2239 return self._cache[path][b'date']
2229 else:
2240 else:
2230 return self._wrappedctx[path].date()
2241 return self._wrappedctx[path].date()
2231
2242
2232 def markcopied(self, path, origin):
2243 def markcopied(self, path, origin):
2233 self._markdirty(
2244 self._markdirty(
2234 path,
2245 path,
2235 exists=True,
2246 exists=True,
2236 date=self.filedate(path),
2247 date=self.filedate(path),
2237 flags=self.flags(path),
2248 flags=self.flags(path),
2238 copied=origin,
2249 copied=origin,
2239 )
2250 )
2240
2251
2241 def copydata(self, path):
2252 def copydata(self, path):
2242 if self.isdirty(path):
2253 if self.isdirty(path):
2243 return self._cache[path][b'copied']
2254 return self._cache[path][b'copied']
2244 else:
2255 else:
2245 return None
2256 return None
2246
2257
2247 def flags(self, path):
2258 def flags(self, path):
2248 if self.isdirty(path):
2259 if self.isdirty(path):
2249 if self._cache[path][b'exists']:
2260 if self._cache[path][b'exists']:
2250 return self._cache[path][b'flags']
2261 return self._cache[path][b'flags']
2251 else:
2262 else:
2252 raise error.ProgrammingError(
2263 raise error.ProgrammingError(
2253 b"No such file or directory: %s" % self._path
2264 b"No such file or directory: %s" % self._path
2254 )
2265 )
2255 else:
2266 else:
2256 return self._wrappedctx[path].flags()
2267 return self._wrappedctx[path].flags()
2257
2268
2258 def __contains__(self, key):
2269 def __contains__(self, key):
2259 if key in self._cache:
2270 if key in self._cache:
2260 return self._cache[key][b'exists']
2271 return self._cache[key][b'exists']
2261 return key in self.p1()
2272 return key in self.p1()
2262
2273
2263 def _existsinparent(self, path):
2274 def _existsinparent(self, path):
2264 try:
2275 try:
2265 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2276 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2266 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2277 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2267 # with an ``exists()`` function.
2278 # with an ``exists()`` function.
2268 self._wrappedctx[path]
2279 self._wrappedctx[path]
2269 return True
2280 return True
2270 except error.ManifestLookupError:
2281 except error.ManifestLookupError:
2271 return False
2282 return False
2272
2283
2273 def _auditconflicts(self, path):
2284 def _auditconflicts(self, path):
2274 """Replicates conflict checks done by wvfs.write().
2285 """Replicates conflict checks done by wvfs.write().
2275
2286
2276 Since we never write to the filesystem and never call `applyupdates` in
2287 Since we never write to the filesystem and never call `applyupdates` in
2277 IMM, we'll never check that a path is actually writable -- e.g., because
2288 IMM, we'll never check that a path is actually writable -- e.g., because
2278 it adds `a/foo`, but `a` is actually a file in the other commit.
2289 it adds `a/foo`, but `a` is actually a file in the other commit.
2279 """
2290 """
2280
2291
2281 def fail(path, component):
2292 def fail(path, component):
2282 # p1() is the base and we're receiving "writes" for p2()'s
2293 # p1() is the base and we're receiving "writes" for p2()'s
2283 # files.
2294 # files.
2284 if b'l' in self.p1()[component].flags():
2295 if b'l' in self.p1()[component].flags():
2285 raise error.Abort(
2296 raise error.Abort(
2286 b"error: %s conflicts with symlink %s "
2297 b"error: %s conflicts with symlink %s "
2287 b"in %d." % (path, component, self.p1().rev())
2298 b"in %d." % (path, component, self.p1().rev())
2288 )
2299 )
2289 else:
2300 else:
2290 raise error.Abort(
2301 raise error.Abort(
2291 b"error: '%s' conflicts with file '%s' in "
2302 b"error: '%s' conflicts with file '%s' in "
2292 b"%d." % (path, component, self.p1().rev())
2303 b"%d." % (path, component, self.p1().rev())
2293 )
2304 )
2294
2305
2295 # Test that each new directory to be created to write this path from p2
2306 # Test that each new directory to be created to write this path from p2
2296 # is not a file in p1.
2307 # is not a file in p1.
2297 components = path.split(b'/')
2308 components = path.split(b'/')
2298 for i in pycompat.xrange(len(components)):
2309 for i in pycompat.xrange(len(components)):
2299 component = b"/".join(components[0:i])
2310 component = b"/".join(components[0:i])
2300 if component in self:
2311 if component in self:
2301 fail(path, component)
2312 fail(path, component)
2302
2313
2303 # Test the other direction -- that this path from p2 isn't a directory
2314 # Test the other direction -- that this path from p2 isn't a directory
2304 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2315 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2305 match = self.match([path], default=b'path')
2316 match = self.match([path], default=b'path')
2306 matches = self.p1().manifest().matches(match)
2317 matches = self.p1().manifest().matches(match)
2307 mfiles = matches.keys()
2318 mfiles = matches.keys()
2308 if len(mfiles) > 0:
2319 if len(mfiles) > 0:
2309 if len(mfiles) == 1 and mfiles[0] == path:
2320 if len(mfiles) == 1 and mfiles[0] == path:
2310 return
2321 return
2311 # omit the files which are deleted in current IMM wctx
2322 # omit the files which are deleted in current IMM wctx
2312 mfiles = [m for m in mfiles if m in self]
2323 mfiles = [m for m in mfiles if m in self]
2313 if not mfiles:
2324 if not mfiles:
2314 return
2325 return
2315 raise error.Abort(
2326 raise error.Abort(
2316 b"error: file '%s' cannot be written because "
2327 b"error: file '%s' cannot be written because "
2317 b" '%s/' is a directory in %s (containing %d "
2328 b" '%s/' is a directory in %s (containing %d "
2318 b"entries: %s)"
2329 b"entries: %s)"
2319 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2330 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2320 )
2331 )
2321
2332
2322 def write(self, path, data, flags=b'', **kwargs):
2333 def write(self, path, data, flags=b'', **kwargs):
2323 if data is None:
2334 if data is None:
2324 raise error.ProgrammingError(b"data must be non-None")
2335 raise error.ProgrammingError(b"data must be non-None")
2325 self._auditconflicts(path)
2336 self._auditconflicts(path)
2326 self._markdirty(
2337 self._markdirty(
2327 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2338 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2328 )
2339 )
2329
2340
2330 def setflags(self, path, l, x):
2341 def setflags(self, path, l, x):
2331 flag = b''
2342 flag = b''
2332 if l:
2343 if l:
2333 flag = b'l'
2344 flag = b'l'
2334 elif x:
2345 elif x:
2335 flag = b'x'
2346 flag = b'x'
2336 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2347 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2337
2348
2338 def remove(self, path):
2349 def remove(self, path):
2339 self._markdirty(path, exists=False)
2350 self._markdirty(path, exists=False)
2340
2351
2341 def exists(self, path):
2352 def exists(self, path):
2342 """exists behaves like `lexists`, but needs to follow symlinks and
2353 """exists behaves like `lexists`, but needs to follow symlinks and
2343 return False if they are broken.
2354 return False if they are broken.
2344 """
2355 """
2345 if self.isdirty(path):
2356 if self.isdirty(path):
2346 # If this path exists and is a symlink, "follow" it by calling
2357 # If this path exists and is a symlink, "follow" it by calling
2347 # exists on the destination path.
2358 # exists on the destination path.
2348 if (
2359 if (
2349 self._cache[path][b'exists']
2360 self._cache[path][b'exists']
2350 and b'l' in self._cache[path][b'flags']
2361 and b'l' in self._cache[path][b'flags']
2351 ):
2362 ):
2352 return self.exists(self._cache[path][b'data'].strip())
2363 return self.exists(self._cache[path][b'data'].strip())
2353 else:
2364 else:
2354 return self._cache[path][b'exists']
2365 return self._cache[path][b'exists']
2355
2366
2356 return self._existsinparent(path)
2367 return self._existsinparent(path)
2357
2368
2358 def lexists(self, path):
2369 def lexists(self, path):
2359 """lexists returns True if the path exists"""
2370 """lexists returns True if the path exists"""
2360 if self.isdirty(path):
2371 if self.isdirty(path):
2361 return self._cache[path][b'exists']
2372 return self._cache[path][b'exists']
2362
2373
2363 return self._existsinparent(path)
2374 return self._existsinparent(path)
2364
2375
2365 def size(self, path):
2376 def size(self, path):
2366 if self.isdirty(path):
2377 if self.isdirty(path):
2367 if self._cache[path][b'exists']:
2378 if self._cache[path][b'exists']:
2368 return len(self._cache[path][b'data'])
2379 return len(self._cache[path][b'data'])
2369 else:
2380 else:
2370 raise error.ProgrammingError(
2381 raise error.ProgrammingError(
2371 b"No such file or directory: %s" % self._path
2382 b"No such file or directory: %s" % self._path
2372 )
2383 )
2373 return self._wrappedctx[path].size()
2384 return self._wrappedctx[path].size()
2374
2385
2375 def tomemctx(
2386 def tomemctx(
2376 self,
2387 self,
2377 text,
2388 text,
2378 branch=None,
2389 branch=None,
2379 extra=None,
2390 extra=None,
2380 date=None,
2391 date=None,
2381 parents=None,
2392 parents=None,
2382 user=None,
2393 user=None,
2383 editor=None,
2394 editor=None,
2384 ):
2395 ):
2385 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2396 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2386 committed.
2397 committed.
2387
2398
2388 ``text`` is the commit message.
2399 ``text`` is the commit message.
2389 ``parents`` (optional) are rev numbers.
2400 ``parents`` (optional) are rev numbers.
2390 """
2401 """
2391 # Default parents to the wrapped contexts' if not passed.
2402 # Default parents to the wrapped contexts' if not passed.
2392 if parents is None:
2403 if parents is None:
2393 parents = self._wrappedctx.parents()
2404 parents = self._wrappedctx.parents()
2394 if len(parents) == 1:
2405 if len(parents) == 1:
2395 parents = (parents[0], None)
2406 parents = (parents[0], None)
2396
2407
2397 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2408 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2398 if parents[1] is None:
2409 if parents[1] is None:
2399 parents = (self._repo[parents[0]], None)
2410 parents = (self._repo[parents[0]], None)
2400 else:
2411 else:
2401 parents = (self._repo[parents[0]], self._repo[parents[1]])
2412 parents = (self._repo[parents[0]], self._repo[parents[1]])
2402
2413
2403 files = self.files()
2414 files = self.files()
2404
2415
2405 def getfile(repo, memctx, path):
2416 def getfile(repo, memctx, path):
2406 if self._cache[path][b'exists']:
2417 if self._cache[path][b'exists']:
2407 return memfilectx(
2418 return memfilectx(
2408 repo,
2419 repo,
2409 memctx,
2420 memctx,
2410 path,
2421 path,
2411 self._cache[path][b'data'],
2422 self._cache[path][b'data'],
2412 b'l' in self._cache[path][b'flags'],
2423 b'l' in self._cache[path][b'flags'],
2413 b'x' in self._cache[path][b'flags'],
2424 b'x' in self._cache[path][b'flags'],
2414 self._cache[path][b'copied'],
2425 self._cache[path][b'copied'],
2415 )
2426 )
2416 else:
2427 else:
2417 # Returning None, but including the path in `files`, is
2428 # Returning None, but including the path in `files`, is
2418 # necessary for memctx to register a deletion.
2429 # necessary for memctx to register a deletion.
2419 return None
2430 return None
2420
2431
2421 return memctx(
2432 return memctx(
2422 self._repo,
2433 self._repo,
2423 parents,
2434 parents,
2424 text,
2435 text,
2425 files,
2436 files,
2426 getfile,
2437 getfile,
2427 date=date,
2438 date=date,
2428 extra=extra,
2439 extra=extra,
2429 user=user,
2440 user=user,
2430 branch=branch,
2441 branch=branch,
2431 editor=editor,
2442 editor=editor,
2432 )
2443 )
2433
2444
2434 def isdirty(self, path):
2445 def isdirty(self, path):
2435 return path in self._cache
2446 return path in self._cache
2436
2447
2437 def isempty(self):
2448 def isempty(self):
2438 # We need to discard any keys that are actually clean before the empty
2449 # We need to discard any keys that are actually clean before the empty
2439 # commit check.
2450 # commit check.
2440 self._compact()
2451 self._compact()
2441 return len(self._cache) == 0
2452 return len(self._cache) == 0
2442
2453
2443 def clean(self):
2454 def clean(self):
2444 self._cache = {}
2455 self._cache = {}
2445
2456
2446 def _compact(self):
2457 def _compact(self):
2447 """Removes keys from the cache that are actually clean, by comparing
2458 """Removes keys from the cache that are actually clean, by comparing
2448 them with the underlying context.
2459 them with the underlying context.
2449
2460
2450 This can occur during the merge process, e.g. by passing --tool :local
2461 This can occur during the merge process, e.g. by passing --tool :local
2451 to resolve a conflict.
2462 to resolve a conflict.
2452 """
2463 """
2453 keys = []
2464 keys = []
2454 # This won't be perfect, but can help performance significantly when
2465 # This won't be perfect, but can help performance significantly when
2455 # using things like remotefilelog.
2466 # using things like remotefilelog.
2456 scmutil.prefetchfiles(
2467 scmutil.prefetchfiles(
2457 self.repo(),
2468 self.repo(),
2458 [self.p1().rev()],
2469 [self.p1().rev()],
2459 scmutil.matchfiles(self.repo(), self._cache.keys()),
2470 scmutil.matchfiles(self.repo(), self._cache.keys()),
2460 )
2471 )
2461
2472
2462 for path in self._cache.keys():
2473 for path in self._cache.keys():
2463 cache = self._cache[path]
2474 cache = self._cache[path]
2464 try:
2475 try:
2465 underlying = self._wrappedctx[path]
2476 underlying = self._wrappedctx[path]
2466 if (
2477 if (
2467 underlying.data() == cache[b'data']
2478 underlying.data() == cache[b'data']
2468 and underlying.flags() == cache[b'flags']
2479 and underlying.flags() == cache[b'flags']
2469 ):
2480 ):
2470 keys.append(path)
2481 keys.append(path)
2471 except error.ManifestLookupError:
2482 except error.ManifestLookupError:
2472 # Path not in the underlying manifest (created).
2483 # Path not in the underlying manifest (created).
2473 continue
2484 continue
2474
2485
2475 for path in keys:
2486 for path in keys:
2476 del self._cache[path]
2487 del self._cache[path]
2477 return keys
2488 return keys
2478
2489
2479 def _markdirty(
2490 def _markdirty(
2480 self, path, exists, data=None, date=None, flags=b'', copied=None
2491 self, path, exists, data=None, date=None, flags=b'', copied=None
2481 ):
2492 ):
2482 # data not provided, let's see if we already have some; if not, let's
2493 # data not provided, let's see if we already have some; if not, let's
2483 # grab it from our underlying context, so that we always have data if
2494 # grab it from our underlying context, so that we always have data if
2484 # the file is marked as existing.
2495 # the file is marked as existing.
2485 if exists and data is None:
2496 if exists and data is None:
2486 oldentry = self._cache.get(path) or {}
2497 oldentry = self._cache.get(path) or {}
2487 data = oldentry.get(b'data')
2498 data = oldentry.get(b'data')
2488 if data is None:
2499 if data is None:
2489 data = self._wrappedctx[path].data()
2500 data = self._wrappedctx[path].data()
2490
2501
2491 self._cache[path] = {
2502 self._cache[path] = {
2492 b'exists': exists,
2503 b'exists': exists,
2493 b'data': data,
2504 b'data': data,
2494 b'date': date,
2505 b'date': date,
2495 b'flags': flags,
2506 b'flags': flags,
2496 b'copied': copied,
2507 b'copied': copied,
2497 }
2508 }
2498
2509
2499 def filectx(self, path, filelog=None):
2510 def filectx(self, path, filelog=None):
2500 return overlayworkingfilectx(
2511 return overlayworkingfilectx(
2501 self._repo, path, parent=self, filelog=filelog
2512 self._repo, path, parent=self, filelog=filelog
2502 )
2513 )
2503
2514
2504
2515
2505 class overlayworkingfilectx(committablefilectx):
2516 class overlayworkingfilectx(committablefilectx):
2506 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2517 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2507 cache, which can be flushed through later by calling ``flush()``."""
2518 cache, which can be flushed through later by calling ``flush()``."""
2508
2519
2509 def __init__(self, repo, path, filelog=None, parent=None):
2520 def __init__(self, repo, path, filelog=None, parent=None):
2510 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2521 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2511 self._repo = repo
2522 self._repo = repo
2512 self._parent = parent
2523 self._parent = parent
2513 self._path = path
2524 self._path = path
2514
2525
2515 def cmp(self, fctx):
2526 def cmp(self, fctx):
2516 return self.data() != fctx.data()
2527 return self.data() != fctx.data()
2517
2528
2518 def changectx(self):
2529 def changectx(self):
2519 return self._parent
2530 return self._parent
2520
2531
2521 def data(self):
2532 def data(self):
2522 return self._parent.data(self._path)
2533 return self._parent.data(self._path)
2523
2534
2524 def date(self):
2535 def date(self):
2525 return self._parent.filedate(self._path)
2536 return self._parent.filedate(self._path)
2526
2537
2527 def exists(self):
2538 def exists(self):
2528 return self.lexists()
2539 return self.lexists()
2529
2540
2530 def lexists(self):
2541 def lexists(self):
2531 return self._parent.exists(self._path)
2542 return self._parent.exists(self._path)
2532
2543
2533 def copysource(self):
2544 def copysource(self):
2534 return self._parent.copydata(self._path)
2545 return self._parent.copydata(self._path)
2535
2546
2536 def size(self):
2547 def size(self):
2537 return self._parent.size(self._path)
2548 return self._parent.size(self._path)
2538
2549
2539 def markcopied(self, origin):
2550 def markcopied(self, origin):
2540 self._parent.markcopied(self._path, origin)
2551 self._parent.markcopied(self._path, origin)
2541
2552
2542 def audit(self):
2553 def audit(self):
2543 pass
2554 pass
2544
2555
2545 def flags(self):
2556 def flags(self):
2546 return self._parent.flags(self._path)
2557 return self._parent.flags(self._path)
2547
2558
2548 def setflags(self, islink, isexec):
2559 def setflags(self, islink, isexec):
2549 return self._parent.setflags(self._path, islink, isexec)
2560 return self._parent.setflags(self._path, islink, isexec)
2550
2561
2551 def write(self, data, flags, backgroundclose=False, **kwargs):
2562 def write(self, data, flags, backgroundclose=False, **kwargs):
2552 return self._parent.write(self._path, data, flags, **kwargs)
2563 return self._parent.write(self._path, data, flags, **kwargs)
2553
2564
2554 def remove(self, ignoremissing=False):
2565 def remove(self, ignoremissing=False):
2555 return self._parent.remove(self._path)
2566 return self._parent.remove(self._path)
2556
2567
2557 def clearunknown(self):
2568 def clearunknown(self):
2558 pass
2569 pass
2559
2570
2560
2571
2561 class workingcommitctx(workingctx):
2572 class workingcommitctx(workingctx):
2562 """A workingcommitctx object makes access to data related to
2573 """A workingcommitctx object makes access to data related to
2563 the revision being committed convenient.
2574 the revision being committed convenient.
2564
2575
2565 This hides changes in the working directory, if they aren't
2576 This hides changes in the working directory, if they aren't
2566 committed in this context.
2577 committed in this context.
2567 """
2578 """
2568
2579
2569 def __init__(
2580 def __init__(
2570 self, repo, changes, text=b"", user=None, date=None, extra=None
2581 self, repo, changes, text=b"", user=None, date=None, extra=None
2571 ):
2582 ):
2572 super(workingcommitctx, self).__init__(
2583 super(workingcommitctx, self).__init__(
2573 repo, text, user, date, extra, changes
2584 repo, text, user, date, extra, changes
2574 )
2585 )
2575
2586
2576 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2587 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2577 """Return matched files only in ``self._status``
2588 """Return matched files only in ``self._status``
2578
2589
2579 Uncommitted files appear "clean" via this context, even if
2590 Uncommitted files appear "clean" via this context, even if
2580 they aren't actually so in the working directory.
2591 they aren't actually so in the working directory.
2581 """
2592 """
2582 if clean:
2593 if clean:
2583 clean = [f for f in self._manifest if f not in self._changedset]
2594 clean = [f for f in self._manifest if f not in self._changedset]
2584 else:
2595 else:
2585 clean = []
2596 clean = []
2586 return scmutil.status(
2597 return scmutil.status(
2587 [f for f in self._status.modified if match(f)],
2598 [f for f in self._status.modified if match(f)],
2588 [f for f in self._status.added if match(f)],
2599 [f for f in self._status.added if match(f)],
2589 [f for f in self._status.removed if match(f)],
2600 [f for f in self._status.removed if match(f)],
2590 [],
2601 [],
2591 [],
2602 [],
2592 [],
2603 [],
2593 clean,
2604 clean,
2594 )
2605 )
2595
2606
2596 @propertycache
2607 @propertycache
2597 def _changedset(self):
2608 def _changedset(self):
2598 """Return the set of files changed in this context
2609 """Return the set of files changed in this context
2599 """
2610 """
2600 changed = set(self._status.modified)
2611 changed = set(self._status.modified)
2601 changed.update(self._status.added)
2612 changed.update(self._status.added)
2602 changed.update(self._status.removed)
2613 changed.update(self._status.removed)
2603 return changed
2614 return changed
2604
2615
2605
2616
2606 def makecachingfilectxfn(func):
2617 def makecachingfilectxfn(func):
2607 """Create a filectxfn that caches based on the path.
2618 """Create a filectxfn that caches based on the path.
2608
2619
2609 We can't use util.cachefunc because it uses all arguments as the cache
2620 We can't use util.cachefunc because it uses all arguments as the cache
2610 key and this creates a cycle since the arguments include the repo and
2621 key and this creates a cycle since the arguments include the repo and
2611 memctx.
2622 memctx.
2612 """
2623 """
2613 cache = {}
2624 cache = {}
2614
2625
2615 def getfilectx(repo, memctx, path):
2626 def getfilectx(repo, memctx, path):
2616 if path not in cache:
2627 if path not in cache:
2617 cache[path] = func(repo, memctx, path)
2628 cache[path] = func(repo, memctx, path)
2618 return cache[path]
2629 return cache[path]
2619
2630
2620 return getfilectx
2631 return getfilectx
2621
2632
2622
2633
2623 def memfilefromctx(ctx):
2634 def memfilefromctx(ctx):
2624 """Given a context return a memfilectx for ctx[path]
2635 """Given a context return a memfilectx for ctx[path]
2625
2636
2626 This is a convenience method for building a memctx based on another
2637 This is a convenience method for building a memctx based on another
2627 context.
2638 context.
2628 """
2639 """
2629
2640
2630 def getfilectx(repo, memctx, path):
2641 def getfilectx(repo, memctx, path):
2631 fctx = ctx[path]
2642 fctx = ctx[path]
2632 copysource = fctx.copysource()
2643 copysource = fctx.copysource()
2633 return memfilectx(
2644 return memfilectx(
2634 repo,
2645 repo,
2635 memctx,
2646 memctx,
2636 path,
2647 path,
2637 fctx.data(),
2648 fctx.data(),
2638 islink=fctx.islink(),
2649 islink=fctx.islink(),
2639 isexec=fctx.isexec(),
2650 isexec=fctx.isexec(),
2640 copysource=copysource,
2651 copysource=copysource,
2641 )
2652 )
2642
2653
2643 return getfilectx
2654 return getfilectx
2644
2655
2645
2656
2646 def memfilefrompatch(patchstore):
2657 def memfilefrompatch(patchstore):
2647 """Given a patch (e.g. patchstore object) return a memfilectx
2658 """Given a patch (e.g. patchstore object) return a memfilectx
2648
2659
2649 This is a convenience method for building a memctx based on a patchstore.
2660 This is a convenience method for building a memctx based on a patchstore.
2650 """
2661 """
2651
2662
2652 def getfilectx(repo, memctx, path):
2663 def getfilectx(repo, memctx, path):
2653 data, mode, copysource = patchstore.getfile(path)
2664 data, mode, copysource = patchstore.getfile(path)
2654 if data is None:
2665 if data is None:
2655 return None
2666 return None
2656 islink, isexec = mode
2667 islink, isexec = mode
2657 return memfilectx(
2668 return memfilectx(
2658 repo,
2669 repo,
2659 memctx,
2670 memctx,
2660 path,
2671 path,
2661 data,
2672 data,
2662 islink=islink,
2673 islink=islink,
2663 isexec=isexec,
2674 isexec=isexec,
2664 copysource=copysource,
2675 copysource=copysource,
2665 )
2676 )
2666
2677
2667 return getfilectx
2678 return getfilectx
2668
2679
2669
2680
2670 class memctx(committablectx):
2681 class memctx(committablectx):
2671 """Use memctx to perform in-memory commits via localrepo.commitctx().
2682 """Use memctx to perform in-memory commits via localrepo.commitctx().
2672
2683
2673 Revision information is supplied at initialization time while
2684 Revision information is supplied at initialization time while
2674 related files data and is made available through a callback
2685 related files data and is made available through a callback
2675 mechanism. 'repo' is the current localrepo, 'parents' is a
2686 mechanism. 'repo' is the current localrepo, 'parents' is a
2676 sequence of two parent revisions identifiers (pass None for every
2687 sequence of two parent revisions identifiers (pass None for every
2677 missing parent), 'text' is the commit message and 'files' lists
2688 missing parent), 'text' is the commit message and 'files' lists
2678 names of files touched by the revision (normalized and relative to
2689 names of files touched by the revision (normalized and relative to
2679 repository root).
2690 repository root).
2680
2691
2681 filectxfn(repo, memctx, path) is a callable receiving the
2692 filectxfn(repo, memctx, path) is a callable receiving the
2682 repository, the current memctx object and the normalized path of
2693 repository, the current memctx object and the normalized path of
2683 requested file, relative to repository root. It is fired by the
2694 requested file, relative to repository root. It is fired by the
2684 commit function for every file in 'files', but calls order is
2695 commit function for every file in 'files', but calls order is
2685 undefined. If the file is available in the revision being
2696 undefined. If the file is available in the revision being
2686 committed (updated or added), filectxfn returns a memfilectx
2697 committed (updated or added), filectxfn returns a memfilectx
2687 object. If the file was removed, filectxfn return None for recent
2698 object. If the file was removed, filectxfn return None for recent
2688 Mercurial. Moved files are represented by marking the source file
2699 Mercurial. Moved files are represented by marking the source file
2689 removed and the new file added with copy information (see
2700 removed and the new file added with copy information (see
2690 memfilectx).
2701 memfilectx).
2691
2702
2692 user receives the committer name and defaults to current
2703 user receives the committer name and defaults to current
2693 repository username, date is the commit date in any format
2704 repository username, date is the commit date in any format
2694 supported by dateutil.parsedate() and defaults to current date, extra
2705 supported by dateutil.parsedate() and defaults to current date, extra
2695 is a dictionary of metadata or is left empty.
2706 is a dictionary of metadata or is left empty.
2696 """
2707 """
2697
2708
2698 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2709 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2699 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2710 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2700 # this field to determine what to do in filectxfn.
2711 # this field to determine what to do in filectxfn.
2701 _returnnoneformissingfiles = True
2712 _returnnoneformissingfiles = True
2702
2713
2703 def __init__(
2714 def __init__(
2704 self,
2715 self,
2705 repo,
2716 repo,
2706 parents,
2717 parents,
2707 text,
2718 text,
2708 files,
2719 files,
2709 filectxfn,
2720 filectxfn,
2710 user=None,
2721 user=None,
2711 date=None,
2722 date=None,
2712 extra=None,
2723 extra=None,
2713 branch=None,
2724 branch=None,
2714 editor=False,
2725 editor=False,
2715 ):
2726 ):
2716 super(memctx, self).__init__(
2727 super(memctx, self).__init__(
2717 repo, text, user, date, extra, branch=branch
2728 repo, text, user, date, extra, branch=branch
2718 )
2729 )
2719 self._rev = None
2730 self._rev = None
2720 self._node = None
2731 self._node = None
2721 parents = [(p or nullid) for p in parents]
2732 parents = [(p or nullid) for p in parents]
2722 p1, p2 = parents
2733 p1, p2 = parents
2723 self._parents = [self._repo[p] for p in (p1, p2)]
2734 self._parents = [self._repo[p] for p in (p1, p2)]
2724 files = sorted(set(files))
2735 files = sorted(set(files))
2725 self._files = files
2736 self._files = files
2726 self.substate = {}
2737 self.substate = {}
2727
2738
2728 if isinstance(filectxfn, patch.filestore):
2739 if isinstance(filectxfn, patch.filestore):
2729 filectxfn = memfilefrompatch(filectxfn)
2740 filectxfn = memfilefrompatch(filectxfn)
2730 elif not callable(filectxfn):
2741 elif not callable(filectxfn):
2731 # if store is not callable, wrap it in a function
2742 # if store is not callable, wrap it in a function
2732 filectxfn = memfilefromctx(filectxfn)
2743 filectxfn = memfilefromctx(filectxfn)
2733
2744
2734 # memoizing increases performance for e.g. vcs convert scenarios.
2745 # memoizing increases performance for e.g. vcs convert scenarios.
2735 self._filectxfn = makecachingfilectxfn(filectxfn)
2746 self._filectxfn = makecachingfilectxfn(filectxfn)
2736
2747
2737 if editor:
2748 if editor:
2738 self._text = editor(self._repo, self, [])
2749 self._text = editor(self._repo, self, [])
2739 self._repo.savecommitmessage(self._text)
2750 self._repo.savecommitmessage(self._text)
2740
2751
2741 def filectx(self, path, filelog=None):
2752 def filectx(self, path, filelog=None):
2742 """get a file context from the working directory
2753 """get a file context from the working directory
2743
2754
2744 Returns None if file doesn't exist and should be removed."""
2755 Returns None if file doesn't exist and should be removed."""
2745 return self._filectxfn(self._repo, self, path)
2756 return self._filectxfn(self._repo, self, path)
2746
2757
2747 def commit(self):
2758 def commit(self):
2748 """commit context to the repo"""
2759 """commit context to the repo"""
2749 return self._repo.commitctx(self)
2760 return self._repo.commitctx(self)
2750
2761
2751 @propertycache
2762 @propertycache
2752 def _manifest(self):
2763 def _manifest(self):
2753 """generate a manifest based on the return values of filectxfn"""
2764 """generate a manifest based on the return values of filectxfn"""
2754
2765
2755 # keep this simple for now; just worry about p1
2766 # keep this simple for now; just worry about p1
2756 pctx = self._parents[0]
2767 pctx = self._parents[0]
2757 man = pctx.manifest().copy()
2768 man = pctx.manifest().copy()
2758
2769
2759 for f in self._status.modified:
2770 for f in self._status.modified:
2760 man[f] = modifiednodeid
2771 man[f] = modifiednodeid
2761
2772
2762 for f in self._status.added:
2773 for f in self._status.added:
2763 man[f] = addednodeid
2774 man[f] = addednodeid
2764
2775
2765 for f in self._status.removed:
2776 for f in self._status.removed:
2766 if f in man:
2777 if f in man:
2767 del man[f]
2778 del man[f]
2768
2779
2769 return man
2780 return man
2770
2781
2771 @propertycache
2782 @propertycache
2772 def _status(self):
2783 def _status(self):
2773 """Calculate exact status from ``files`` specified at construction
2784 """Calculate exact status from ``files`` specified at construction
2774 """
2785 """
2775 man1 = self.p1().manifest()
2786 man1 = self.p1().manifest()
2776 p2 = self._parents[1]
2787 p2 = self._parents[1]
2777 # "1 < len(self._parents)" can't be used for checking
2788 # "1 < len(self._parents)" can't be used for checking
2778 # existence of the 2nd parent, because "memctx._parents" is
2789 # existence of the 2nd parent, because "memctx._parents" is
2779 # explicitly initialized by the list, of which length is 2.
2790 # explicitly initialized by the list, of which length is 2.
2780 if p2.node() != nullid:
2791 if p2.node() != nullid:
2781 man2 = p2.manifest()
2792 man2 = p2.manifest()
2782 managing = lambda f: f in man1 or f in man2
2793 managing = lambda f: f in man1 or f in man2
2783 else:
2794 else:
2784 managing = lambda f: f in man1
2795 managing = lambda f: f in man1
2785
2796
2786 modified, added, removed = [], [], []
2797 modified, added, removed = [], [], []
2787 for f in self._files:
2798 for f in self._files:
2788 if not managing(f):
2799 if not managing(f):
2789 added.append(f)
2800 added.append(f)
2790 elif self[f]:
2801 elif self[f]:
2791 modified.append(f)
2802 modified.append(f)
2792 else:
2803 else:
2793 removed.append(f)
2804 removed.append(f)
2794
2805
2795 return scmutil.status(modified, added, removed, [], [], [], [])
2806 return scmutil.status(modified, added, removed, [], [], [], [])
2796
2807
2797
2808
2798 class memfilectx(committablefilectx):
2809 class memfilectx(committablefilectx):
2799 """memfilectx represents an in-memory file to commit.
2810 """memfilectx represents an in-memory file to commit.
2800
2811
2801 See memctx and committablefilectx for more details.
2812 See memctx and committablefilectx for more details.
2802 """
2813 """
2803
2814
2804 def __init__(
2815 def __init__(
2805 self,
2816 self,
2806 repo,
2817 repo,
2807 changectx,
2818 changectx,
2808 path,
2819 path,
2809 data,
2820 data,
2810 islink=False,
2821 islink=False,
2811 isexec=False,
2822 isexec=False,
2812 copysource=None,
2823 copysource=None,
2813 ):
2824 ):
2814 """
2825 """
2815 path is the normalized file path relative to repository root.
2826 path is the normalized file path relative to repository root.
2816 data is the file content as a string.
2827 data is the file content as a string.
2817 islink is True if the file is a symbolic link.
2828 islink is True if the file is a symbolic link.
2818 isexec is True if the file is executable.
2829 isexec is True if the file is executable.
2819 copied is the source file path if current file was copied in the
2830 copied is the source file path if current file was copied in the
2820 revision being committed, or None."""
2831 revision being committed, or None."""
2821 super(memfilectx, self).__init__(repo, path, None, changectx)
2832 super(memfilectx, self).__init__(repo, path, None, changectx)
2822 self._data = data
2833 self._data = data
2823 if islink:
2834 if islink:
2824 self._flags = b'l'
2835 self._flags = b'l'
2825 elif isexec:
2836 elif isexec:
2826 self._flags = b'x'
2837 self._flags = b'x'
2827 else:
2838 else:
2828 self._flags = b''
2839 self._flags = b''
2829 self._copysource = copysource
2840 self._copysource = copysource
2830
2841
2831 def copysource(self):
2842 def copysource(self):
2832 return self._copysource
2843 return self._copysource
2833
2844
2834 def cmp(self, fctx):
2845 def cmp(self, fctx):
2835 return self.data() != fctx.data()
2846 return self.data() != fctx.data()
2836
2847
2837 def data(self):
2848 def data(self):
2838 return self._data
2849 return self._data
2839
2850
2840 def remove(self, ignoremissing=False):
2851 def remove(self, ignoremissing=False):
2841 """wraps unlink for a repo's working directory"""
2852 """wraps unlink for a repo's working directory"""
2842 # need to figure out what to do here
2853 # need to figure out what to do here
2843 del self._changectx[self._path]
2854 del self._changectx[self._path]
2844
2855
2845 def write(self, data, flags, **kwargs):
2856 def write(self, data, flags, **kwargs):
2846 """wraps repo.wwrite"""
2857 """wraps repo.wwrite"""
2847 self._data = data
2858 self._data = data
2848
2859
2849
2860
2850 class metadataonlyctx(committablectx):
2861 class metadataonlyctx(committablectx):
2851 """Like memctx but it's reusing the manifest of different commit.
2862 """Like memctx but it's reusing the manifest of different commit.
2852 Intended to be used by lightweight operations that are creating
2863 Intended to be used by lightweight operations that are creating
2853 metadata-only changes.
2864 metadata-only changes.
2854
2865
2855 Revision information is supplied at initialization time. 'repo' is the
2866 Revision information is supplied at initialization time. 'repo' is the
2856 current localrepo, 'ctx' is original revision which manifest we're reuisng
2867 current localrepo, 'ctx' is original revision which manifest we're reuisng
2857 'parents' is a sequence of two parent revisions identifiers (pass None for
2868 'parents' is a sequence of two parent revisions identifiers (pass None for
2858 every missing parent), 'text' is the commit.
2869 every missing parent), 'text' is the commit.
2859
2870
2860 user receives the committer name and defaults to current repository
2871 user receives the committer name and defaults to current repository
2861 username, date is the commit date in any format supported by
2872 username, date is the commit date in any format supported by
2862 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2873 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2863 metadata or is left empty.
2874 metadata or is left empty.
2864 """
2875 """
2865
2876
2866 def __init__(
2877 def __init__(
2867 self,
2878 self,
2868 repo,
2879 repo,
2869 originalctx,
2880 originalctx,
2870 parents=None,
2881 parents=None,
2871 text=None,
2882 text=None,
2872 user=None,
2883 user=None,
2873 date=None,
2884 date=None,
2874 extra=None,
2885 extra=None,
2875 editor=False,
2886 editor=False,
2876 ):
2887 ):
2877 if text is None:
2888 if text is None:
2878 text = originalctx.description()
2889 text = originalctx.description()
2879 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2890 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2880 self._rev = None
2891 self._rev = None
2881 self._node = None
2892 self._node = None
2882 self._originalctx = originalctx
2893 self._originalctx = originalctx
2883 self._manifestnode = originalctx.manifestnode()
2894 self._manifestnode = originalctx.manifestnode()
2884 if parents is None:
2895 if parents is None:
2885 parents = originalctx.parents()
2896 parents = originalctx.parents()
2886 else:
2897 else:
2887 parents = [repo[p] for p in parents if p is not None]
2898 parents = [repo[p] for p in parents if p is not None]
2888 parents = parents[:]
2899 parents = parents[:]
2889 while len(parents) < 2:
2900 while len(parents) < 2:
2890 parents.append(repo[nullid])
2901 parents.append(repo[nullid])
2891 p1, p2 = self._parents = parents
2902 p1, p2 = self._parents = parents
2892
2903
2893 # sanity check to ensure that the reused manifest parents are
2904 # sanity check to ensure that the reused manifest parents are
2894 # manifests of our commit parents
2905 # manifests of our commit parents
2895 mp1, mp2 = self.manifestctx().parents
2906 mp1, mp2 = self.manifestctx().parents
2896 if p1 != nullid and p1.manifestnode() != mp1:
2907 if p1 != nullid and p1.manifestnode() != mp1:
2897 raise RuntimeError(
2908 raise RuntimeError(
2898 r"can't reuse the manifest: its p1 "
2909 r"can't reuse the manifest: its p1 "
2899 r"doesn't match the new ctx p1"
2910 r"doesn't match the new ctx p1"
2900 )
2911 )
2901 if p2 != nullid and p2.manifestnode() != mp2:
2912 if p2 != nullid and p2.manifestnode() != mp2:
2902 raise RuntimeError(
2913 raise RuntimeError(
2903 r"can't reuse the manifest: "
2914 r"can't reuse the manifest: "
2904 r"its p2 doesn't match the new ctx p2"
2915 r"its p2 doesn't match the new ctx p2"
2905 )
2916 )
2906
2917
2907 self._files = originalctx.files()
2918 self._files = originalctx.files()
2908 self.substate = {}
2919 self.substate = {}
2909
2920
2910 if editor:
2921 if editor:
2911 self._text = editor(self._repo, self, [])
2922 self._text = editor(self._repo, self, [])
2912 self._repo.savecommitmessage(self._text)
2923 self._repo.savecommitmessage(self._text)
2913
2924
2914 def manifestnode(self):
2925 def manifestnode(self):
2915 return self._manifestnode
2926 return self._manifestnode
2916
2927
2917 @property
2928 @property
2918 def _manifestctx(self):
2929 def _manifestctx(self):
2919 return self._repo.manifestlog[self._manifestnode]
2930 return self._repo.manifestlog[self._manifestnode]
2920
2931
2921 def filectx(self, path, filelog=None):
2932 def filectx(self, path, filelog=None):
2922 return self._originalctx.filectx(path, filelog=filelog)
2933 return self._originalctx.filectx(path, filelog=filelog)
2923
2934
2924 def commit(self):
2935 def commit(self):
2925 """commit context to the repo"""
2936 """commit context to the repo"""
2926 return self._repo.commitctx(self)
2937 return self._repo.commitctx(self)
2927
2938
2928 @property
2939 @property
2929 def _manifest(self):
2940 def _manifest(self):
2930 return self._originalctx.manifest()
2941 return self._originalctx.manifest()
2931
2942
2932 @propertycache
2943 @propertycache
2933 def _status(self):
2944 def _status(self):
2934 """Calculate exact status from ``files`` specified in the ``origctx``
2945 """Calculate exact status from ``files`` specified in the ``origctx``
2935 and parents manifests.
2946 and parents manifests.
2936 """
2947 """
2937 man1 = self.p1().manifest()
2948 man1 = self.p1().manifest()
2938 p2 = self._parents[1]
2949 p2 = self._parents[1]
2939 # "1 < len(self._parents)" can't be used for checking
2950 # "1 < len(self._parents)" can't be used for checking
2940 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2951 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2941 # explicitly initialized by the list, of which length is 2.
2952 # explicitly initialized by the list, of which length is 2.
2942 if p2.node() != nullid:
2953 if p2.node() != nullid:
2943 man2 = p2.manifest()
2954 man2 = p2.manifest()
2944 managing = lambda f: f in man1 or f in man2
2955 managing = lambda f: f in man1 or f in man2
2945 else:
2956 else:
2946 managing = lambda f: f in man1
2957 managing = lambda f: f in man1
2947
2958
2948 modified, added, removed = [], [], []
2959 modified, added, removed = [], [], []
2949 for f in self._files:
2960 for f in self._files:
2950 if not managing(f):
2961 if not managing(f):
2951 added.append(f)
2962 added.append(f)
2952 elif f in self:
2963 elif f in self:
2953 modified.append(f)
2964 modified.append(f)
2954 else:
2965 else:
2955 removed.append(f)
2966 removed.append(f)
2956
2967
2957 return scmutil.status(modified, added, removed, [], [], [], [])
2968 return scmutil.status(modified, added, removed, [], [], [], [])
2958
2969
2959
2970
2960 class arbitraryfilectx(object):
2971 class arbitraryfilectx(object):
2961 """Allows you to use filectx-like functions on a file in an arbitrary
2972 """Allows you to use filectx-like functions on a file in an arbitrary
2962 location on disk, possibly not in the working directory.
2973 location on disk, possibly not in the working directory.
2963 """
2974 """
2964
2975
2965 def __init__(self, path, repo=None):
2976 def __init__(self, path, repo=None):
2966 # Repo is optional because contrib/simplemerge uses this class.
2977 # Repo is optional because contrib/simplemerge uses this class.
2967 self._repo = repo
2978 self._repo = repo
2968 self._path = path
2979 self._path = path
2969
2980
2970 def cmp(self, fctx):
2981 def cmp(self, fctx):
2971 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2982 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2972 # path if either side is a symlink.
2983 # path if either side is a symlink.
2973 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2984 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2974 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2985 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2975 # Add a fast-path for merge if both sides are disk-backed.
2986 # Add a fast-path for merge if both sides are disk-backed.
2976 # Note that filecmp uses the opposite return values (True if same)
2987 # Note that filecmp uses the opposite return values (True if same)
2977 # from our cmp functions (True if different).
2988 # from our cmp functions (True if different).
2978 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2989 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2979 return self.data() != fctx.data()
2990 return self.data() != fctx.data()
2980
2991
2981 def path(self):
2992 def path(self):
2982 return self._path
2993 return self._path
2983
2994
2984 def flags(self):
2995 def flags(self):
2985 return b''
2996 return b''
2986
2997
2987 def data(self):
2998 def data(self):
2988 return util.readfile(self._path)
2999 return util.readfile(self._path)
2989
3000
2990 def decodeddata(self):
3001 def decodeddata(self):
2991 with open(self._path, b"rb") as f:
3002 with open(self._path, b"rb") as f:
2992 return f.read()
3003 return f.read()
2993
3004
2994 def remove(self):
3005 def remove(self):
2995 util.unlink(self._path)
3006 util.unlink(self._path)
2996
3007
2997 def write(self, data, flags, **kwargs):
3008 def write(self, data, flags, **kwargs):
2998 assert not flags
3009 assert not flags
2999 with open(self._path, b"wb") as f:
3010 with open(self._path, b"wb") as f:
3000 f.write(data)
3011 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now