##// END OF EJS Templates
py3: don't risk passing a None value to error.ManifestLookupError()...
Martin von Zweigbergk -
r46061:a108f7ff default
parent child Browse files
Show More
@@ -1,3096 +1,3096 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 dagop,
31 dagop,
32 encoding,
32 encoding,
33 error,
33 error,
34 fileset,
34 fileset,
35 match as matchmod,
35 match as matchmod,
36 mergestate as mergestatemod,
36 mergestate as mergestatemod,
37 metadata,
37 metadata,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 patch,
39 patch,
40 pathutil,
40 pathutil,
41 phases,
41 phases,
42 pycompat,
42 pycompat,
43 repoview,
43 repoview,
44 scmutil,
44 scmutil,
45 sparse,
45 sparse,
46 subrepo,
46 subrepo,
47 subrepoutil,
47 subrepoutil,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 dateutil,
51 dateutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 propertycache = util.propertycache
55 propertycache = util.propertycache
56
56
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65
65
66 def __init__(self, repo):
66 def __init__(self, repo):
67 self._repo = repo
67 self._repo = repo
68
68
69 def __bytes__(self):
69 def __bytes__(self):
70 return short(self.node())
70 return short(self.node())
71
71
72 __str__ = encoding.strmethod(__bytes__)
72 __str__ = encoding.strmethod(__bytes__)
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _buildstatusmanifest(self, status):
95 def _buildstatusmanifest(self, status):
96 """Builds a manifest that includes the given status results, if this is
96 """Builds a manifest that includes the given status results, if this is
97 a working copy context. For non-working copy contexts, it just returns
97 a working copy context. For non-working copy contexts, it just returns
98 the normal manifest."""
98 the normal manifest."""
99 return self.manifest()
99 return self.manifest()
100
100
101 def _matchstatus(self, other, match):
101 def _matchstatus(self, other, match):
102 """This internal method provides a way for child objects to override the
102 """This internal method provides a way for child objects to override the
103 match operator.
103 match operator.
104 """
104 """
105 return match
105 return match
106
106
107 def _buildstatus(
107 def _buildstatus(
108 self, other, s, match, listignored, listclean, listunknown
108 self, other, s, match, listignored, listclean, listunknown
109 ):
109 ):
110 """build a status with respect to another context"""
110 """build a status with respect to another context"""
111 # Load earliest manifest first for caching reasons. More specifically,
111 # Load earliest manifest first for caching reasons. More specifically,
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta to what's in the cache. So that's one full reconstruction + one
116 # delta application.
116 # delta application.
117 mf2 = None
117 mf2 = None
118 if self.rev() is not None and self.rev() < other.rev():
118 if self.rev() is not None and self.rev() < other.rev():
119 mf2 = self._buildstatusmanifest(s)
119 mf2 = self._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
121 if mf2 is None:
121 if mf2 is None:
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, match=match, clean=listclean)
129 d = mf1.diff(mf2, match=match, clean=listclean)
130 for fn, value in pycompat.iteritems(d):
130 for fn, value in pycompat.iteritems(d):
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 not in wdirfilenodeids:
143 elif node2 not in wdirfilenodeids:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [
156 unknown = [
157 fn
157 fn
158 for fn in unknown
158 for fn in unknown
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 ignored = [
161 ignored = [
162 fn
162 fn
163 for fn in ignored
163 for fn in ignored
164 if fn not in mf1 and (not match or match(fn))
164 if fn not in mf1 and (not match or match(fn))
165 ]
165 ]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(
169 return scmutil.status(
170 modified, added, removed, deleted, unknown, ignored, clean
170 modified, added, removed, deleted, unknown, ignored, clean
171 )
171 )
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
175 return subrepoutil.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182
182
183 def node(self):
183 def node(self):
184 return self._node
184 return self._node
185
185
186 def hex(self):
186 def hex(self):
187 return hex(self.node())
187 return hex(self.node())
188
188
189 def manifest(self):
189 def manifest(self):
190 return self._manifest
190 return self._manifest
191
191
192 def manifestctx(self):
192 def manifestctx(self):
193 return self._manifestctx
193 return self._manifestctx
194
194
195 def repo(self):
195 def repo(self):
196 return self._repo
196 return self._repo
197
197
198 def phasestr(self):
198 def phasestr(self):
199 return phases.phasenames[self.phase()]
199 return phases.phasenames[self.phase()]
200
200
201 def mutable(self):
201 def mutable(self):
202 return self.phase() > phases.public
202 return self.phase() > phases.public
203
203
204 def matchfileset(self, cwd, expr, badfn=None):
204 def matchfileset(self, cwd, expr, badfn=None):
205 return fileset.match(self, cwd, expr, badfn=badfn)
205 return fileset.match(self, cwd, expr, badfn=badfn)
206
206
207 def obsolete(self):
207 def obsolete(self):
208 """True if the changeset is obsolete"""
208 """True if the changeset is obsolete"""
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210
210
211 def extinct(self):
211 def extinct(self):
212 """True if the changeset is extinct"""
212 """True if the changeset is extinct"""
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214
214
215 def orphan(self):
215 def orphan(self):
216 """True if the changeset is not obsolete, but its ancestor is"""
216 """True if the changeset is not obsolete, but its ancestor is"""
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218
218
219 def phasedivergent(self):
219 def phasedivergent(self):
220 """True if the changeset tries to be a successor of a public changeset
220 """True if the changeset tries to be a successor of a public changeset
221
221
222 Only non-public and non-obsolete changesets may be phase-divergent.
222 Only non-public and non-obsolete changesets may be phase-divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225
225
226 def contentdivergent(self):
226 def contentdivergent(self):
227 """Is a successor of a changeset with multiple possible successor sets
227 """Is a successor of a changeset with multiple possible successor sets
228
228
229 Only non-public and non-obsolete changesets may be content-divergent.
229 Only non-public and non-obsolete changesets may be content-divergent.
230 """
230 """
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232
232
233 def isunstable(self):
233 def isunstable(self):
234 """True if the changeset is either orphan, phase-divergent or
234 """True if the changeset is either orphan, phase-divergent or
235 content-divergent"""
235 content-divergent"""
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237
237
238 def instabilities(self):
238 def instabilities(self):
239 """return the list of instabilities affecting this changeset.
239 """return the list of instabilities affecting this changeset.
240
240
241 Instabilities are returned as strings. possible values are:
241 Instabilities are returned as strings. possible values are:
242 - orphan,
242 - orphan,
243 - phase-divergent,
243 - phase-divergent,
244 - content-divergent.
244 - content-divergent.
245 """
245 """
246 instabilities = []
246 instabilities = []
247 if self.orphan():
247 if self.orphan():
248 instabilities.append(b'orphan')
248 instabilities.append(b'orphan')
249 if self.phasedivergent():
249 if self.phasedivergent():
250 instabilities.append(b'phase-divergent')
250 instabilities.append(b'phase-divergent')
251 if self.contentdivergent():
251 if self.contentdivergent():
252 instabilities.append(b'content-divergent')
252 instabilities.append(b'content-divergent')
253 return instabilities
253 return instabilities
254
254
255 def parents(self):
255 def parents(self):
256 """return contexts for each parent changeset"""
256 """return contexts for each parent changeset"""
257 return self._parents
257 return self._parents
258
258
259 def p1(self):
259 def p1(self):
260 return self._parents[0]
260 return self._parents[0]
261
261
262 def p2(self):
262 def p2(self):
263 parents = self._parents
263 parents = self._parents
264 if len(parents) == 2:
264 if len(parents) == 2:
265 return parents[1]
265 return parents[1]
266 return self._repo[nullrev]
266 return self._repo[nullrev]
267
267
268 def _fileinfo(self, path):
268 def _fileinfo(self, path):
269 if '_manifest' in self.__dict__:
269 if '_manifest' in self.__dict__:
270 try:
270 try:
271 return self._manifest.find(path)
271 return self._manifest.find(path)
272 except KeyError:
272 except KeyError:
273 raise error.ManifestLookupError(
273 raise error.ManifestLookupError(
274 self._node, path, _(b'not found in manifest')
274 self._node or b'None', path, _(b'not found in manifest')
275 )
275 )
276 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 if path in self._manifestdelta:
277 if path in self._manifestdelta:
278 return (
278 return (
279 self._manifestdelta[path],
279 self._manifestdelta[path],
280 self._manifestdelta.flags(path),
280 self._manifestdelta.flags(path),
281 )
281 )
282 mfl = self._repo.manifestlog
282 mfl = self._repo.manifestlog
283 try:
283 try:
284 node, flag = mfl[self._changeset.manifest].find(path)
284 node, flag = mfl[self._changeset.manifest].find(path)
285 except KeyError:
285 except KeyError:
286 raise error.ManifestLookupError(
286 raise error.ManifestLookupError(
287 self._node, path, _(b'not found in manifest')
287 self._node or b'None', path, _(b'not found in manifest')
288 )
288 )
289
289
290 return node, flag
290 return node, flag
291
291
292 def filenode(self, path):
292 def filenode(self, path):
293 return self._fileinfo(path)[0]
293 return self._fileinfo(path)[0]
294
294
295 def flags(self, path):
295 def flags(self, path):
296 try:
296 try:
297 return self._fileinfo(path)[1]
297 return self._fileinfo(path)[1]
298 except error.LookupError:
298 except error.LookupError:
299 return b''
299 return b''
300
300
301 @propertycache
301 @propertycache
302 def _copies(self):
302 def _copies(self):
303 return metadata.computechangesetcopies(self)
303 return metadata.computechangesetcopies(self)
304
304
305 def p1copies(self):
305 def p1copies(self):
306 return self._copies[0]
306 return self._copies[0]
307
307
308 def p2copies(self):
308 def p2copies(self):
309 return self._copies[1]
309 return self._copies[1]
310
310
311 def sub(self, path, allowcreate=True):
311 def sub(self, path, allowcreate=True):
312 '''return a subrepo for the stored revision of path, never wdir()'''
312 '''return a subrepo for the stored revision of path, never wdir()'''
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314
314
315 def nullsub(self, path, pctx):
315 def nullsub(self, path, pctx):
316 return subrepo.nullsubrepo(self, path, pctx)
316 return subrepo.nullsubrepo(self, path, pctx)
317
317
318 def workingsub(self, path):
318 def workingsub(self, path):
319 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 '''return a subrepo for the stored revision, or wdir if this is a wdir
320 context.
320 context.
321 '''
321 '''
322 return subrepo.subrepo(self, path, allowwdir=True)
322 return subrepo.subrepo(self, path, allowwdir=True)
323
323
324 def match(
324 def match(
325 self,
325 self,
326 pats=None,
326 pats=None,
327 include=None,
327 include=None,
328 exclude=None,
328 exclude=None,
329 default=b'glob',
329 default=b'glob',
330 listsubrepos=False,
330 listsubrepos=False,
331 badfn=None,
331 badfn=None,
332 cwd=None,
332 cwd=None,
333 ):
333 ):
334 r = self._repo
334 r = self._repo
335 if not cwd:
335 if not cwd:
336 cwd = r.getcwd()
336 cwd = r.getcwd()
337 return matchmod.match(
337 return matchmod.match(
338 r.root,
338 r.root,
339 cwd,
339 cwd,
340 pats,
340 pats,
341 include,
341 include,
342 exclude,
342 exclude,
343 default,
343 default,
344 auditor=r.nofsauditor,
344 auditor=r.nofsauditor,
345 ctx=self,
345 ctx=self,
346 listsubrepos=listsubrepos,
346 listsubrepos=listsubrepos,
347 badfn=badfn,
347 badfn=badfn,
348 )
348 )
349
349
350 def diff(
350 def diff(
351 self,
351 self,
352 ctx2=None,
352 ctx2=None,
353 match=None,
353 match=None,
354 changes=None,
354 changes=None,
355 opts=None,
355 opts=None,
356 losedatafn=None,
356 losedatafn=None,
357 pathfn=None,
357 pathfn=None,
358 copy=None,
358 copy=None,
359 copysourcematch=None,
359 copysourcematch=None,
360 hunksfilterfn=None,
360 hunksfilterfn=None,
361 ):
361 ):
362 """Returns a diff generator for the given contexts and matcher"""
362 """Returns a diff generator for the given contexts and matcher"""
363 if ctx2 is None:
363 if ctx2 is None:
364 ctx2 = self.p1()
364 ctx2 = self.p1()
365 if ctx2 is not None:
365 if ctx2 is not None:
366 ctx2 = self._repo[ctx2]
366 ctx2 = self._repo[ctx2]
367 return patch.diff(
367 return patch.diff(
368 self._repo,
368 self._repo,
369 ctx2,
369 ctx2,
370 self,
370 self,
371 match=match,
371 match=match,
372 changes=changes,
372 changes=changes,
373 opts=opts,
373 opts=opts,
374 losedatafn=losedatafn,
374 losedatafn=losedatafn,
375 pathfn=pathfn,
375 pathfn=pathfn,
376 copy=copy,
376 copy=copy,
377 copysourcematch=copysourcematch,
377 copysourcematch=copysourcematch,
378 hunksfilterfn=hunksfilterfn,
378 hunksfilterfn=hunksfilterfn,
379 )
379 )
380
380
381 def dirs(self):
381 def dirs(self):
382 return self._manifest.dirs()
382 return self._manifest.dirs()
383
383
384 def hasdir(self, dir):
384 def hasdir(self, dir):
385 return self._manifest.hasdir(dir)
385 return self._manifest.hasdir(dir)
386
386
387 def status(
387 def status(
388 self,
388 self,
389 other=None,
389 other=None,
390 match=None,
390 match=None,
391 listignored=False,
391 listignored=False,
392 listclean=False,
392 listclean=False,
393 listunknown=False,
393 listunknown=False,
394 listsubrepos=False,
394 listsubrepos=False,
395 ):
395 ):
396 """return status of files between two nodes or node and working
396 """return status of files between two nodes or node and working
397 directory.
397 directory.
398
398
399 If other is None, compare this node with working directory.
399 If other is None, compare this node with working directory.
400
400
401 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 returns (modified, added, removed, deleted, unknown, ignored, clean)
402 """
402 """
403
403
404 ctx1 = self
404 ctx1 = self
405 ctx2 = self._repo[other]
405 ctx2 = self._repo[other]
406
406
407 # This next code block is, admittedly, fragile logic that tests for
407 # This next code block is, admittedly, fragile logic that tests for
408 # reversing the contexts and wouldn't need to exist if it weren't for
408 # reversing the contexts and wouldn't need to exist if it weren't for
409 # the fast (and common) code path of comparing the working directory
409 # the fast (and common) code path of comparing the working directory
410 # with its first parent.
410 # with its first parent.
411 #
411 #
412 # What we're aiming for here is the ability to call:
412 # What we're aiming for here is the ability to call:
413 #
413 #
414 # workingctx.status(parentctx)
414 # workingctx.status(parentctx)
415 #
415 #
416 # If we always built the manifest for each context and compared those,
416 # If we always built the manifest for each context and compared those,
417 # then we'd be done. But the special case of the above call means we
417 # then we'd be done. But the special case of the above call means we
418 # just copy the manifest of the parent.
418 # just copy the manifest of the parent.
419 reversed = False
419 reversed = False
420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
421 reversed = True
421 reversed = True
422 ctx1, ctx2 = ctx2, ctx1
422 ctx1, ctx2 = ctx2, ctx1
423
423
424 match = self._repo.narrowmatch(match)
424 match = self._repo.narrowmatch(match)
425 match = ctx2._matchstatus(ctx1, match)
425 match = ctx2._matchstatus(ctx1, match)
426 r = scmutil.status([], [], [], [], [], [], [])
426 r = scmutil.status([], [], [], [], [], [], [])
427 r = ctx2._buildstatus(
427 r = ctx2._buildstatus(
428 ctx1, r, match, listignored, listclean, listunknown
428 ctx1, r, match, listignored, listclean, listunknown
429 )
429 )
430
430
431 if reversed:
431 if reversed:
432 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # Reverse added and removed. Clear deleted, unknown and ignored as
433 # these make no sense to reverse.
433 # these make no sense to reverse.
434 r = scmutil.status(
434 r = scmutil.status(
435 r.modified, r.removed, r.added, [], [], [], r.clean
435 r.modified, r.removed, r.added, [], [], [], r.clean
436 )
436 )
437
437
438 if listsubrepos:
438 if listsubrepos:
439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
440 try:
440 try:
441 rev2 = ctx2.subrev(subpath)
441 rev2 = ctx2.subrev(subpath)
442 except KeyError:
442 except KeyError:
443 # A subrepo that existed in node1 was deleted between
443 # A subrepo that existed in node1 was deleted between
444 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # node1 and node2 (inclusive). Thus, ctx2's substate
445 # won't contain that subpath. The best we can do ignore it.
445 # won't contain that subpath. The best we can do ignore it.
446 rev2 = None
446 rev2 = None
447 submatch = matchmod.subdirmatcher(subpath, match)
447 submatch = matchmod.subdirmatcher(subpath, match)
448 s = sub.status(
448 s = sub.status(
449 rev2,
449 rev2,
450 match=submatch,
450 match=submatch,
451 ignored=listignored,
451 ignored=listignored,
452 clean=listclean,
452 clean=listclean,
453 unknown=listunknown,
453 unknown=listunknown,
454 listsubrepos=True,
454 listsubrepos=True,
455 )
455 )
456 for k in (
456 for k in (
457 'modified',
457 'modified',
458 'added',
458 'added',
459 'removed',
459 'removed',
460 'deleted',
460 'deleted',
461 'unknown',
461 'unknown',
462 'ignored',
462 'ignored',
463 'clean',
463 'clean',
464 ):
464 ):
465 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles, sfiles = getattr(r, k), getattr(s, k)
466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
467
467
468 r.modified.sort()
468 r.modified.sort()
469 r.added.sort()
469 r.added.sort()
470 r.removed.sort()
470 r.removed.sort()
471 r.deleted.sort()
471 r.deleted.sort()
472 r.unknown.sort()
472 r.unknown.sort()
473 r.ignored.sort()
473 r.ignored.sort()
474 r.clean.sort()
474 r.clean.sort()
475
475
476 return r
476 return r
477
477
478 def mergestate(self, clean=False):
478 def mergestate(self, clean=False):
479 """Get a mergestate object for this context."""
479 """Get a mergestate object for this context."""
480 raise NotImplementedError(
480 raise NotImplementedError(
481 '%s does not implement mergestate()' % self.__class__
481 '%s does not implement mergestate()' % self.__class__
482 )
482 )
483
483
484 def isempty(self):
484 def isempty(self):
485 return not (
485 return not (
486 len(self.parents()) > 1
486 len(self.parents()) > 1
487 or self.branch() != self.p1().branch()
487 or self.branch() != self.p1().branch()
488 or self.closesbranch()
488 or self.closesbranch()
489 or self.files()
489 or self.files()
490 )
490 )
491
491
492
492
493 class changectx(basectx):
493 class changectx(basectx):
494 """A changecontext object makes access to data related to a particular
494 """A changecontext object makes access to data related to a particular
495 changeset convenient. It represents a read-only context already present in
495 changeset convenient. It represents a read-only context already present in
496 the repo."""
496 the repo."""
497
497
498 def __init__(self, repo, rev, node, maybe_filtered=True):
498 def __init__(self, repo, rev, node, maybe_filtered=True):
499 super(changectx, self).__init__(repo)
499 super(changectx, self).__init__(repo)
500 self._rev = rev
500 self._rev = rev
501 self._node = node
501 self._node = node
502 # When maybe_filtered is True, the revision might be affected by
502 # When maybe_filtered is True, the revision might be affected by
503 # changelog filtering and operation through the filtered changelog must be used.
503 # changelog filtering and operation through the filtered changelog must be used.
504 #
504 #
505 # When maybe_filtered is False, the revision has already been checked
505 # When maybe_filtered is False, the revision has already been checked
506 # against filtering and is not filtered. Operation through the
506 # against filtering and is not filtered. Operation through the
507 # unfiltered changelog might be used in some case.
507 # unfiltered changelog might be used in some case.
508 self._maybe_filtered = maybe_filtered
508 self._maybe_filtered = maybe_filtered
509
509
510 def __hash__(self):
510 def __hash__(self):
511 try:
511 try:
512 return hash(self._rev)
512 return hash(self._rev)
513 except AttributeError:
513 except AttributeError:
514 return id(self)
514 return id(self)
515
515
516 def __nonzero__(self):
516 def __nonzero__(self):
517 return self._rev != nullrev
517 return self._rev != nullrev
518
518
519 __bool__ = __nonzero__
519 __bool__ = __nonzero__
520
520
521 @propertycache
521 @propertycache
522 def _changeset(self):
522 def _changeset(self):
523 if self._maybe_filtered:
523 if self._maybe_filtered:
524 repo = self._repo
524 repo = self._repo
525 else:
525 else:
526 repo = self._repo.unfiltered()
526 repo = self._repo.unfiltered()
527 return repo.changelog.changelogrevision(self.rev())
527 return repo.changelog.changelogrevision(self.rev())
528
528
529 @propertycache
529 @propertycache
530 def _manifest(self):
530 def _manifest(self):
531 return self._manifestctx.read()
531 return self._manifestctx.read()
532
532
533 @property
533 @property
534 def _manifestctx(self):
534 def _manifestctx(self):
535 return self._repo.manifestlog[self._changeset.manifest]
535 return self._repo.manifestlog[self._changeset.manifest]
536
536
537 @propertycache
537 @propertycache
538 def _manifestdelta(self):
538 def _manifestdelta(self):
539 return self._manifestctx.readdelta()
539 return self._manifestctx.readdelta()
540
540
541 @propertycache
541 @propertycache
542 def _parents(self):
542 def _parents(self):
543 repo = self._repo
543 repo = self._repo
544 if self._maybe_filtered:
544 if self._maybe_filtered:
545 cl = repo.changelog
545 cl = repo.changelog
546 else:
546 else:
547 cl = repo.unfiltered().changelog
547 cl = repo.unfiltered().changelog
548
548
549 p1, p2 = cl.parentrevs(self._rev)
549 p1, p2 = cl.parentrevs(self._rev)
550 if p2 == nullrev:
550 if p2 == nullrev:
551 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
551 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
552 return [
552 return [
553 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
553 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
554 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
554 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
555 ]
555 ]
556
556
557 def changeset(self):
557 def changeset(self):
558 c = self._changeset
558 c = self._changeset
559 return (
559 return (
560 c.manifest,
560 c.manifest,
561 c.user,
561 c.user,
562 c.date,
562 c.date,
563 c.files,
563 c.files,
564 c.description,
564 c.description,
565 c.extra,
565 c.extra,
566 )
566 )
567
567
568 def manifestnode(self):
568 def manifestnode(self):
569 return self._changeset.manifest
569 return self._changeset.manifest
570
570
571 def user(self):
571 def user(self):
572 return self._changeset.user
572 return self._changeset.user
573
573
574 def date(self):
574 def date(self):
575 return self._changeset.date
575 return self._changeset.date
576
576
577 def files(self):
577 def files(self):
578 return self._changeset.files
578 return self._changeset.files
579
579
580 def filesmodified(self):
580 def filesmodified(self):
581 modified = set(self.files())
581 modified = set(self.files())
582 modified.difference_update(self.filesadded())
582 modified.difference_update(self.filesadded())
583 modified.difference_update(self.filesremoved())
583 modified.difference_update(self.filesremoved())
584 return sorted(modified)
584 return sorted(modified)
585
585
586 def filesadded(self):
586 def filesadded(self):
587 filesadded = self._changeset.filesadded
587 filesadded = self._changeset.filesadded
588 compute_on_none = True
588 compute_on_none = True
589 if self._repo.filecopiesmode == b'changeset-sidedata':
589 if self._repo.filecopiesmode == b'changeset-sidedata':
590 compute_on_none = False
590 compute_on_none = False
591 else:
591 else:
592 source = self._repo.ui.config(b'experimental', b'copies.read-from')
592 source = self._repo.ui.config(b'experimental', b'copies.read-from')
593 if source == b'changeset-only':
593 if source == b'changeset-only':
594 compute_on_none = False
594 compute_on_none = False
595 elif source != b'compatibility':
595 elif source != b'compatibility':
596 # filelog mode, ignore any changelog content
596 # filelog mode, ignore any changelog content
597 filesadded = None
597 filesadded = None
598 if filesadded is None:
598 if filesadded is None:
599 if compute_on_none:
599 if compute_on_none:
600 filesadded = metadata.computechangesetfilesadded(self)
600 filesadded = metadata.computechangesetfilesadded(self)
601 else:
601 else:
602 filesadded = []
602 filesadded = []
603 return filesadded
603 return filesadded
604
604
605 def filesremoved(self):
605 def filesremoved(self):
606 filesremoved = self._changeset.filesremoved
606 filesremoved = self._changeset.filesremoved
607 compute_on_none = True
607 compute_on_none = True
608 if self._repo.filecopiesmode == b'changeset-sidedata':
608 if self._repo.filecopiesmode == b'changeset-sidedata':
609 compute_on_none = False
609 compute_on_none = False
610 else:
610 else:
611 source = self._repo.ui.config(b'experimental', b'copies.read-from')
611 source = self._repo.ui.config(b'experimental', b'copies.read-from')
612 if source == b'changeset-only':
612 if source == b'changeset-only':
613 compute_on_none = False
613 compute_on_none = False
614 elif source != b'compatibility':
614 elif source != b'compatibility':
615 # filelog mode, ignore any changelog content
615 # filelog mode, ignore any changelog content
616 filesremoved = None
616 filesremoved = None
617 if filesremoved is None:
617 if filesremoved is None:
618 if compute_on_none:
618 if compute_on_none:
619 filesremoved = metadata.computechangesetfilesremoved(self)
619 filesremoved = metadata.computechangesetfilesremoved(self)
620 else:
620 else:
621 filesremoved = []
621 filesremoved = []
622 return filesremoved
622 return filesremoved
623
623
624 @propertycache
624 @propertycache
625 def _copies(self):
625 def _copies(self):
626 p1copies = self._changeset.p1copies
626 p1copies = self._changeset.p1copies
627 p2copies = self._changeset.p2copies
627 p2copies = self._changeset.p2copies
628 compute_on_none = True
628 compute_on_none = True
629 if self._repo.filecopiesmode == b'changeset-sidedata':
629 if self._repo.filecopiesmode == b'changeset-sidedata':
630 compute_on_none = False
630 compute_on_none = False
631 else:
631 else:
632 source = self._repo.ui.config(b'experimental', b'copies.read-from')
632 source = self._repo.ui.config(b'experimental', b'copies.read-from')
633 # If config says to get copy metadata only from changeset, then
633 # If config says to get copy metadata only from changeset, then
634 # return that, defaulting to {} if there was no copy metadata. In
634 # return that, defaulting to {} if there was no copy metadata. In
635 # compatibility mode, we return copy data from the changeset if it
635 # compatibility mode, we return copy data from the changeset if it
636 # was recorded there, and otherwise we fall back to getting it from
636 # was recorded there, and otherwise we fall back to getting it from
637 # the filelogs (below).
637 # the filelogs (below).
638 #
638 #
639 # If we are in compatiblity mode and there is not data in the
639 # If we are in compatiblity mode and there is not data in the
640 # changeset), we get the copy metadata from the filelogs.
640 # changeset), we get the copy metadata from the filelogs.
641 #
641 #
642 # otherwise, when config said to read only from filelog, we get the
642 # otherwise, when config said to read only from filelog, we get the
643 # copy metadata from the filelogs.
643 # copy metadata from the filelogs.
644 if source == b'changeset-only':
644 if source == b'changeset-only':
645 compute_on_none = False
645 compute_on_none = False
646 elif source != b'compatibility':
646 elif source != b'compatibility':
647 # filelog mode, ignore any changelog content
647 # filelog mode, ignore any changelog content
648 p1copies = p2copies = None
648 p1copies = p2copies = None
649 if p1copies is None:
649 if p1copies is None:
650 if compute_on_none:
650 if compute_on_none:
651 p1copies, p2copies = super(changectx, self)._copies
651 p1copies, p2copies = super(changectx, self)._copies
652 else:
652 else:
653 if p1copies is None:
653 if p1copies is None:
654 p1copies = {}
654 p1copies = {}
655 if p2copies is None:
655 if p2copies is None:
656 p2copies = {}
656 p2copies = {}
657 return p1copies, p2copies
657 return p1copies, p2copies
658
658
659 def description(self):
659 def description(self):
660 return self._changeset.description
660 return self._changeset.description
661
661
662 def branch(self):
662 def branch(self):
663 return encoding.tolocal(self._changeset.extra.get(b"branch"))
663 return encoding.tolocal(self._changeset.extra.get(b"branch"))
664
664
665 def closesbranch(self):
665 def closesbranch(self):
666 return b'close' in self._changeset.extra
666 return b'close' in self._changeset.extra
667
667
668 def extra(self):
668 def extra(self):
669 """Return a dict of extra information."""
669 """Return a dict of extra information."""
670 return self._changeset.extra
670 return self._changeset.extra
671
671
672 def tags(self):
672 def tags(self):
673 """Return a list of byte tag names"""
673 """Return a list of byte tag names"""
674 return self._repo.nodetags(self._node)
674 return self._repo.nodetags(self._node)
675
675
676 def bookmarks(self):
676 def bookmarks(self):
677 """Return a list of byte bookmark names."""
677 """Return a list of byte bookmark names."""
678 return self._repo.nodebookmarks(self._node)
678 return self._repo.nodebookmarks(self._node)
679
679
680 def phase(self):
680 def phase(self):
681 return self._repo._phasecache.phase(self._repo, self._rev)
681 return self._repo._phasecache.phase(self._repo, self._rev)
682
682
683 def hidden(self):
683 def hidden(self):
684 return self._rev in repoview.filterrevs(self._repo, b'visible')
684 return self._rev in repoview.filterrevs(self._repo, b'visible')
685
685
686 def isinmemory(self):
686 def isinmemory(self):
687 return False
687 return False
688
688
689 def children(self):
689 def children(self):
690 """return list of changectx contexts for each child changeset.
690 """return list of changectx contexts for each child changeset.
691
691
692 This returns only the immediate child changesets. Use descendants() to
692 This returns only the immediate child changesets. Use descendants() to
693 recursively walk children.
693 recursively walk children.
694 """
694 """
695 c = self._repo.changelog.children(self._node)
695 c = self._repo.changelog.children(self._node)
696 return [self._repo[x] for x in c]
696 return [self._repo[x] for x in c]
697
697
698 def ancestors(self):
698 def ancestors(self):
699 for a in self._repo.changelog.ancestors([self._rev]):
699 for a in self._repo.changelog.ancestors([self._rev]):
700 yield self._repo[a]
700 yield self._repo[a]
701
701
702 def descendants(self):
702 def descendants(self):
703 """Recursively yield all children of the changeset.
703 """Recursively yield all children of the changeset.
704
704
705 For just the immediate children, use children()
705 For just the immediate children, use children()
706 """
706 """
707 for d in self._repo.changelog.descendants([self._rev]):
707 for d in self._repo.changelog.descendants([self._rev]):
708 yield self._repo[d]
708 yield self._repo[d]
709
709
710 def filectx(self, path, fileid=None, filelog=None):
710 def filectx(self, path, fileid=None, filelog=None):
711 """get a file context from this changeset"""
711 """get a file context from this changeset"""
712 if fileid is None:
712 if fileid is None:
713 fileid = self.filenode(path)
713 fileid = self.filenode(path)
714 return filectx(
714 return filectx(
715 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
715 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
716 )
716 )
717
717
718 def ancestor(self, c2, warn=False):
718 def ancestor(self, c2, warn=False):
719 """return the "best" ancestor context of self and c2
719 """return the "best" ancestor context of self and c2
720
720
721 If there are multiple candidates, it will show a message and check
721 If there are multiple candidates, it will show a message and check
722 merge.preferancestor configuration before falling back to the
722 merge.preferancestor configuration before falling back to the
723 revlog ancestor."""
723 revlog ancestor."""
724 # deal with workingctxs
724 # deal with workingctxs
725 n2 = c2._node
725 n2 = c2._node
726 if n2 is None:
726 if n2 is None:
727 n2 = c2._parents[0]._node
727 n2 = c2._parents[0]._node
728 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
728 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
729 if not cahs:
729 if not cahs:
730 anc = nullid
730 anc = nullid
731 elif len(cahs) == 1:
731 elif len(cahs) == 1:
732 anc = cahs[0]
732 anc = cahs[0]
733 else:
733 else:
734 # experimental config: merge.preferancestor
734 # experimental config: merge.preferancestor
735 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
735 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
736 try:
736 try:
737 ctx = scmutil.revsymbol(self._repo, r)
737 ctx = scmutil.revsymbol(self._repo, r)
738 except error.RepoLookupError:
738 except error.RepoLookupError:
739 continue
739 continue
740 anc = ctx.node()
740 anc = ctx.node()
741 if anc in cahs:
741 if anc in cahs:
742 break
742 break
743 else:
743 else:
744 anc = self._repo.changelog.ancestor(self._node, n2)
744 anc = self._repo.changelog.ancestor(self._node, n2)
745 if warn:
745 if warn:
746 self._repo.ui.status(
746 self._repo.ui.status(
747 (
747 (
748 _(b"note: using %s as ancestor of %s and %s\n")
748 _(b"note: using %s as ancestor of %s and %s\n")
749 % (short(anc), short(self._node), short(n2))
749 % (short(anc), short(self._node), short(n2))
750 )
750 )
751 + b''.join(
751 + b''.join(
752 _(
752 _(
753 b" alternatively, use --config "
753 b" alternatively, use --config "
754 b"merge.preferancestor=%s\n"
754 b"merge.preferancestor=%s\n"
755 )
755 )
756 % short(n)
756 % short(n)
757 for n in sorted(cahs)
757 for n in sorted(cahs)
758 if n != anc
758 if n != anc
759 )
759 )
760 )
760 )
761 return self._repo[anc]
761 return self._repo[anc]
762
762
763 def isancestorof(self, other):
763 def isancestorof(self, other):
764 """True if this changeset is an ancestor of other"""
764 """True if this changeset is an ancestor of other"""
765 return self._repo.changelog.isancestorrev(self._rev, other._rev)
765 return self._repo.changelog.isancestorrev(self._rev, other._rev)
766
766
767 def walk(self, match):
767 def walk(self, match):
768 '''Generates matching file names.'''
768 '''Generates matching file names.'''
769
769
770 # Wrap match.bad method to have message with nodeid
770 # Wrap match.bad method to have message with nodeid
771 def bad(fn, msg):
771 def bad(fn, msg):
772 # The manifest doesn't know about subrepos, so don't complain about
772 # The manifest doesn't know about subrepos, so don't complain about
773 # paths into valid subrepos.
773 # paths into valid subrepos.
774 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
774 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
775 return
775 return
776 match.bad(fn, _(b'no such file in rev %s') % self)
776 match.bad(fn, _(b'no such file in rev %s') % self)
777
777
778 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
778 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
779 return self._manifest.walk(m)
779 return self._manifest.walk(m)
780
780
781 def matches(self, match):
781 def matches(self, match):
782 return self.walk(match)
782 return self.walk(match)
783
783
784
784
785 class basefilectx(object):
785 class basefilectx(object):
786 """A filecontext object represents the common logic for its children:
786 """A filecontext object represents the common logic for its children:
787 filectx: read-only access to a filerevision that is already present
787 filectx: read-only access to a filerevision that is already present
788 in the repo,
788 in the repo,
789 workingfilectx: a filecontext that represents files from the working
789 workingfilectx: a filecontext that represents files from the working
790 directory,
790 directory,
791 memfilectx: a filecontext that represents files in-memory,
791 memfilectx: a filecontext that represents files in-memory,
792 """
792 """
793
793
794 @propertycache
794 @propertycache
795 def _filelog(self):
795 def _filelog(self):
796 return self._repo.file(self._path)
796 return self._repo.file(self._path)
797
797
798 @propertycache
798 @propertycache
799 def _changeid(self):
799 def _changeid(self):
800 if '_changectx' in self.__dict__:
800 if '_changectx' in self.__dict__:
801 return self._changectx.rev()
801 return self._changectx.rev()
802 elif '_descendantrev' in self.__dict__:
802 elif '_descendantrev' in self.__dict__:
803 # this file context was created from a revision with a known
803 # this file context was created from a revision with a known
804 # descendant, we can (lazily) correct for linkrev aliases
804 # descendant, we can (lazily) correct for linkrev aliases
805 return self._adjustlinkrev(self._descendantrev)
805 return self._adjustlinkrev(self._descendantrev)
806 else:
806 else:
807 return self._filelog.linkrev(self._filerev)
807 return self._filelog.linkrev(self._filerev)
808
808
809 @propertycache
809 @propertycache
810 def _filenode(self):
810 def _filenode(self):
811 if '_fileid' in self.__dict__:
811 if '_fileid' in self.__dict__:
812 return self._filelog.lookup(self._fileid)
812 return self._filelog.lookup(self._fileid)
813 else:
813 else:
814 return self._changectx.filenode(self._path)
814 return self._changectx.filenode(self._path)
815
815
816 @propertycache
816 @propertycache
817 def _filerev(self):
817 def _filerev(self):
818 return self._filelog.rev(self._filenode)
818 return self._filelog.rev(self._filenode)
819
819
820 @propertycache
820 @propertycache
821 def _repopath(self):
821 def _repopath(self):
822 return self._path
822 return self._path
823
823
824 def __nonzero__(self):
824 def __nonzero__(self):
825 try:
825 try:
826 self._filenode
826 self._filenode
827 return True
827 return True
828 except error.LookupError:
828 except error.LookupError:
829 # file is missing
829 # file is missing
830 return False
830 return False
831
831
832 __bool__ = __nonzero__
832 __bool__ = __nonzero__
833
833
834 def __bytes__(self):
834 def __bytes__(self):
835 try:
835 try:
836 return b"%s@%s" % (self.path(), self._changectx)
836 return b"%s@%s" % (self.path(), self._changectx)
837 except error.LookupError:
837 except error.LookupError:
838 return b"%s@???" % self.path()
838 return b"%s@???" % self.path()
839
839
840 __str__ = encoding.strmethod(__bytes__)
840 __str__ = encoding.strmethod(__bytes__)
841
841
842 def __repr__(self):
842 def __repr__(self):
843 return "<%s %s>" % (type(self).__name__, str(self))
843 return "<%s %s>" % (type(self).__name__, str(self))
844
844
845 def __hash__(self):
845 def __hash__(self):
846 try:
846 try:
847 return hash((self._path, self._filenode))
847 return hash((self._path, self._filenode))
848 except AttributeError:
848 except AttributeError:
849 return id(self)
849 return id(self)
850
850
851 def __eq__(self, other):
851 def __eq__(self, other):
852 try:
852 try:
853 return (
853 return (
854 type(self) == type(other)
854 type(self) == type(other)
855 and self._path == other._path
855 and self._path == other._path
856 and self._filenode == other._filenode
856 and self._filenode == other._filenode
857 )
857 )
858 except AttributeError:
858 except AttributeError:
859 return False
859 return False
860
860
861 def __ne__(self, other):
861 def __ne__(self, other):
862 return not (self == other)
862 return not (self == other)
863
863
864 def filerev(self):
864 def filerev(self):
865 return self._filerev
865 return self._filerev
866
866
867 def filenode(self):
867 def filenode(self):
868 return self._filenode
868 return self._filenode
869
869
870 @propertycache
870 @propertycache
871 def _flags(self):
871 def _flags(self):
872 return self._changectx.flags(self._path)
872 return self._changectx.flags(self._path)
873
873
874 def flags(self):
874 def flags(self):
875 return self._flags
875 return self._flags
876
876
877 def filelog(self):
877 def filelog(self):
878 return self._filelog
878 return self._filelog
879
879
880 def rev(self):
880 def rev(self):
881 return self._changeid
881 return self._changeid
882
882
883 def linkrev(self):
883 def linkrev(self):
884 return self._filelog.linkrev(self._filerev)
884 return self._filelog.linkrev(self._filerev)
885
885
886 def node(self):
886 def node(self):
887 return self._changectx.node()
887 return self._changectx.node()
888
888
889 def hex(self):
889 def hex(self):
890 return self._changectx.hex()
890 return self._changectx.hex()
891
891
892 def user(self):
892 def user(self):
893 return self._changectx.user()
893 return self._changectx.user()
894
894
895 def date(self):
895 def date(self):
896 return self._changectx.date()
896 return self._changectx.date()
897
897
898 def files(self):
898 def files(self):
899 return self._changectx.files()
899 return self._changectx.files()
900
900
901 def description(self):
901 def description(self):
902 return self._changectx.description()
902 return self._changectx.description()
903
903
904 def branch(self):
904 def branch(self):
905 return self._changectx.branch()
905 return self._changectx.branch()
906
906
907 def extra(self):
907 def extra(self):
908 return self._changectx.extra()
908 return self._changectx.extra()
909
909
910 def phase(self):
910 def phase(self):
911 return self._changectx.phase()
911 return self._changectx.phase()
912
912
913 def phasestr(self):
913 def phasestr(self):
914 return self._changectx.phasestr()
914 return self._changectx.phasestr()
915
915
916 def obsolete(self):
916 def obsolete(self):
917 return self._changectx.obsolete()
917 return self._changectx.obsolete()
918
918
919 def instabilities(self):
919 def instabilities(self):
920 return self._changectx.instabilities()
920 return self._changectx.instabilities()
921
921
922 def manifest(self):
922 def manifest(self):
923 return self._changectx.manifest()
923 return self._changectx.manifest()
924
924
925 def changectx(self):
925 def changectx(self):
926 return self._changectx
926 return self._changectx
927
927
928 def renamed(self):
928 def renamed(self):
929 return self._copied
929 return self._copied
930
930
931 def copysource(self):
931 def copysource(self):
932 return self._copied and self._copied[0]
932 return self._copied and self._copied[0]
933
933
934 def repo(self):
934 def repo(self):
935 return self._repo
935 return self._repo
936
936
937 def size(self):
937 def size(self):
938 return len(self.data())
938 return len(self.data())
939
939
940 def path(self):
940 def path(self):
941 return self._path
941 return self._path
942
942
943 def isbinary(self):
943 def isbinary(self):
944 try:
944 try:
945 return stringutil.binary(self.data())
945 return stringutil.binary(self.data())
946 except IOError:
946 except IOError:
947 return False
947 return False
948
948
949 def isexec(self):
949 def isexec(self):
950 return b'x' in self.flags()
950 return b'x' in self.flags()
951
951
952 def islink(self):
952 def islink(self):
953 return b'l' in self.flags()
953 return b'l' in self.flags()
954
954
955 def isabsent(self):
955 def isabsent(self):
956 """whether this filectx represents a file not in self._changectx
956 """whether this filectx represents a file not in self._changectx
957
957
958 This is mainly for merge code to detect change/delete conflicts. This is
958 This is mainly for merge code to detect change/delete conflicts. This is
959 expected to be True for all subclasses of basectx."""
959 expected to be True for all subclasses of basectx."""
960 return False
960 return False
961
961
962 _customcmp = False
962 _customcmp = False
963
963
964 def cmp(self, fctx):
964 def cmp(self, fctx):
965 """compare with other file context
965 """compare with other file context
966
966
967 returns True if different than fctx.
967 returns True if different than fctx.
968 """
968 """
969 if fctx._customcmp:
969 if fctx._customcmp:
970 return fctx.cmp(self)
970 return fctx.cmp(self)
971
971
972 if self._filenode is None:
972 if self._filenode is None:
973 raise error.ProgrammingError(
973 raise error.ProgrammingError(
974 b'filectx.cmp() must be reimplemented if not backed by revlog'
974 b'filectx.cmp() must be reimplemented if not backed by revlog'
975 )
975 )
976
976
977 if fctx._filenode is None:
977 if fctx._filenode is None:
978 if self._repo._encodefilterpats:
978 if self._repo._encodefilterpats:
979 # can't rely on size() because wdir content may be decoded
979 # can't rely on size() because wdir content may be decoded
980 return self._filelog.cmp(self._filenode, fctx.data())
980 return self._filelog.cmp(self._filenode, fctx.data())
981 if self.size() - 4 == fctx.size():
981 if self.size() - 4 == fctx.size():
982 # size() can match:
982 # size() can match:
983 # if file data starts with '\1\n', empty metadata block is
983 # if file data starts with '\1\n', empty metadata block is
984 # prepended, which adds 4 bytes to filelog.size().
984 # prepended, which adds 4 bytes to filelog.size().
985 return self._filelog.cmp(self._filenode, fctx.data())
985 return self._filelog.cmp(self._filenode, fctx.data())
986 if self.size() == fctx.size():
986 if self.size() == fctx.size():
987 # size() matches: need to compare content
987 # size() matches: need to compare content
988 return self._filelog.cmp(self._filenode, fctx.data())
988 return self._filelog.cmp(self._filenode, fctx.data())
989
989
990 # size() differs
990 # size() differs
991 return True
991 return True
992
992
993 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
993 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
994 """return the first ancestor of <srcrev> introducing <fnode>
994 """return the first ancestor of <srcrev> introducing <fnode>
995
995
996 If the linkrev of the file revision does not point to an ancestor of
996 If the linkrev of the file revision does not point to an ancestor of
997 srcrev, we'll walk down the ancestors until we find one introducing
997 srcrev, we'll walk down the ancestors until we find one introducing
998 this file revision.
998 this file revision.
999
999
1000 :srcrev: the changeset revision we search ancestors from
1000 :srcrev: the changeset revision we search ancestors from
1001 :inclusive: if true, the src revision will also be checked
1001 :inclusive: if true, the src revision will also be checked
1002 :stoprev: an optional revision to stop the walk at. If no introduction
1002 :stoprev: an optional revision to stop the walk at. If no introduction
1003 of this file content could be found before this floor
1003 of this file content could be found before this floor
1004 revision, the function will returns "None" and stops its
1004 revision, the function will returns "None" and stops its
1005 iteration.
1005 iteration.
1006 """
1006 """
1007 repo = self._repo
1007 repo = self._repo
1008 cl = repo.unfiltered().changelog
1008 cl = repo.unfiltered().changelog
1009 mfl = repo.manifestlog
1009 mfl = repo.manifestlog
1010 # fetch the linkrev
1010 # fetch the linkrev
1011 lkr = self.linkrev()
1011 lkr = self.linkrev()
1012 if srcrev == lkr:
1012 if srcrev == lkr:
1013 return lkr
1013 return lkr
1014 # hack to reuse ancestor computation when searching for renames
1014 # hack to reuse ancestor computation when searching for renames
1015 memberanc = getattr(self, '_ancestrycontext', None)
1015 memberanc = getattr(self, '_ancestrycontext', None)
1016 iteranc = None
1016 iteranc = None
1017 if srcrev is None:
1017 if srcrev is None:
1018 # wctx case, used by workingfilectx during mergecopy
1018 # wctx case, used by workingfilectx during mergecopy
1019 revs = [p.rev() for p in self._repo[None].parents()]
1019 revs = [p.rev() for p in self._repo[None].parents()]
1020 inclusive = True # we skipped the real (revless) source
1020 inclusive = True # we skipped the real (revless) source
1021 else:
1021 else:
1022 revs = [srcrev]
1022 revs = [srcrev]
1023 if memberanc is None:
1023 if memberanc is None:
1024 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1024 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1025 # check if this linkrev is an ancestor of srcrev
1025 # check if this linkrev is an ancestor of srcrev
1026 if lkr not in memberanc:
1026 if lkr not in memberanc:
1027 if iteranc is None:
1027 if iteranc is None:
1028 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1028 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1029 fnode = self._filenode
1029 fnode = self._filenode
1030 path = self._path
1030 path = self._path
1031 for a in iteranc:
1031 for a in iteranc:
1032 if stoprev is not None and a < stoprev:
1032 if stoprev is not None and a < stoprev:
1033 return None
1033 return None
1034 ac = cl.read(a) # get changeset data (we avoid object creation)
1034 ac = cl.read(a) # get changeset data (we avoid object creation)
1035 if path in ac[3]: # checking the 'files' field.
1035 if path in ac[3]: # checking the 'files' field.
1036 # The file has been touched, check if the content is
1036 # The file has been touched, check if the content is
1037 # similar to the one we search for.
1037 # similar to the one we search for.
1038 if fnode == mfl[ac[0]].readfast().get(path):
1038 if fnode == mfl[ac[0]].readfast().get(path):
1039 return a
1039 return a
1040 # In theory, we should never get out of that loop without a result.
1040 # In theory, we should never get out of that loop without a result.
1041 # But if manifest uses a buggy file revision (not children of the
1041 # But if manifest uses a buggy file revision (not children of the
1042 # one it replaces) we could. Such a buggy situation will likely
1042 # one it replaces) we could. Such a buggy situation will likely
1043 # result is crash somewhere else at to some point.
1043 # result is crash somewhere else at to some point.
1044 return lkr
1044 return lkr
1045
1045
1046 def isintroducedafter(self, changelogrev):
1046 def isintroducedafter(self, changelogrev):
1047 """True if a filectx has been introduced after a given floor revision
1047 """True if a filectx has been introduced after a given floor revision
1048 """
1048 """
1049 if self.linkrev() >= changelogrev:
1049 if self.linkrev() >= changelogrev:
1050 return True
1050 return True
1051 introrev = self._introrev(stoprev=changelogrev)
1051 introrev = self._introrev(stoprev=changelogrev)
1052 if introrev is None:
1052 if introrev is None:
1053 return False
1053 return False
1054 return introrev >= changelogrev
1054 return introrev >= changelogrev
1055
1055
1056 def introrev(self):
1056 def introrev(self):
1057 """return the rev of the changeset which introduced this file revision
1057 """return the rev of the changeset which introduced this file revision
1058
1058
1059 This method is different from linkrev because it take into account the
1059 This method is different from linkrev because it take into account the
1060 changeset the filectx was created from. It ensures the returned
1060 changeset the filectx was created from. It ensures the returned
1061 revision is one of its ancestors. This prevents bugs from
1061 revision is one of its ancestors. This prevents bugs from
1062 'linkrev-shadowing' when a file revision is used by multiple
1062 'linkrev-shadowing' when a file revision is used by multiple
1063 changesets.
1063 changesets.
1064 """
1064 """
1065 return self._introrev()
1065 return self._introrev()
1066
1066
1067 def _introrev(self, stoprev=None):
1067 def _introrev(self, stoprev=None):
1068 """
1068 """
1069 Same as `introrev` but, with an extra argument to limit changelog
1069 Same as `introrev` but, with an extra argument to limit changelog
1070 iteration range in some internal usecase.
1070 iteration range in some internal usecase.
1071
1071
1072 If `stoprev` is set, the `introrev` will not be searched past that
1072 If `stoprev` is set, the `introrev` will not be searched past that
1073 `stoprev` revision and "None" might be returned. This is useful to
1073 `stoprev` revision and "None" might be returned. This is useful to
1074 limit the iteration range.
1074 limit the iteration range.
1075 """
1075 """
1076 toprev = None
1076 toprev = None
1077 attrs = vars(self)
1077 attrs = vars(self)
1078 if '_changeid' in attrs:
1078 if '_changeid' in attrs:
1079 # We have a cached value already
1079 # We have a cached value already
1080 toprev = self._changeid
1080 toprev = self._changeid
1081 elif '_changectx' in attrs:
1081 elif '_changectx' in attrs:
1082 # We know which changelog entry we are coming from
1082 # We know which changelog entry we are coming from
1083 toprev = self._changectx.rev()
1083 toprev = self._changectx.rev()
1084
1084
1085 if toprev is not None:
1085 if toprev is not None:
1086 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1086 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1087 elif '_descendantrev' in attrs:
1087 elif '_descendantrev' in attrs:
1088 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1088 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1089 # be nice and cache the result of the computation
1089 # be nice and cache the result of the computation
1090 if introrev is not None:
1090 if introrev is not None:
1091 self._changeid = introrev
1091 self._changeid = introrev
1092 return introrev
1092 return introrev
1093 else:
1093 else:
1094 return self.linkrev()
1094 return self.linkrev()
1095
1095
1096 def introfilectx(self):
1096 def introfilectx(self):
1097 """Return filectx having identical contents, but pointing to the
1097 """Return filectx having identical contents, but pointing to the
1098 changeset revision where this filectx was introduced"""
1098 changeset revision where this filectx was introduced"""
1099 introrev = self.introrev()
1099 introrev = self.introrev()
1100 if self.rev() == introrev:
1100 if self.rev() == introrev:
1101 return self
1101 return self
1102 return self.filectx(self.filenode(), changeid=introrev)
1102 return self.filectx(self.filenode(), changeid=introrev)
1103
1103
1104 def _parentfilectx(self, path, fileid, filelog):
1104 def _parentfilectx(self, path, fileid, filelog):
1105 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1105 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1106 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1106 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1107 if '_changeid' in vars(self) or '_changectx' in vars(self):
1107 if '_changeid' in vars(self) or '_changectx' in vars(self):
1108 # If self is associated with a changeset (probably explicitly
1108 # If self is associated with a changeset (probably explicitly
1109 # fed), ensure the created filectx is associated with a
1109 # fed), ensure the created filectx is associated with a
1110 # changeset that is an ancestor of self.changectx.
1110 # changeset that is an ancestor of self.changectx.
1111 # This lets us later use _adjustlinkrev to get a correct link.
1111 # This lets us later use _adjustlinkrev to get a correct link.
1112 fctx._descendantrev = self.rev()
1112 fctx._descendantrev = self.rev()
1113 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1113 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1114 elif '_descendantrev' in vars(self):
1114 elif '_descendantrev' in vars(self):
1115 # Otherwise propagate _descendantrev if we have one associated.
1115 # Otherwise propagate _descendantrev if we have one associated.
1116 fctx._descendantrev = self._descendantrev
1116 fctx._descendantrev = self._descendantrev
1117 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1117 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1118 return fctx
1118 return fctx
1119
1119
1120 def parents(self):
1120 def parents(self):
1121 _path = self._path
1121 _path = self._path
1122 fl = self._filelog
1122 fl = self._filelog
1123 parents = self._filelog.parents(self._filenode)
1123 parents = self._filelog.parents(self._filenode)
1124 pl = [(_path, node, fl) for node in parents if node != nullid]
1124 pl = [(_path, node, fl) for node in parents if node != nullid]
1125
1125
1126 r = fl.renamed(self._filenode)
1126 r = fl.renamed(self._filenode)
1127 if r:
1127 if r:
1128 # - In the simple rename case, both parent are nullid, pl is empty.
1128 # - In the simple rename case, both parent are nullid, pl is empty.
1129 # - In case of merge, only one of the parent is null id and should
1129 # - In case of merge, only one of the parent is null id and should
1130 # be replaced with the rename information. This parent is -always-
1130 # be replaced with the rename information. This parent is -always-
1131 # the first one.
1131 # the first one.
1132 #
1132 #
1133 # As null id have always been filtered out in the previous list
1133 # As null id have always been filtered out in the previous list
1134 # comprehension, inserting to 0 will always result in "replacing
1134 # comprehension, inserting to 0 will always result in "replacing
1135 # first nullid parent with rename information.
1135 # first nullid parent with rename information.
1136 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1136 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1137
1137
1138 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1138 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1139
1139
1140 def p1(self):
1140 def p1(self):
1141 return self.parents()[0]
1141 return self.parents()[0]
1142
1142
1143 def p2(self):
1143 def p2(self):
1144 p = self.parents()
1144 p = self.parents()
1145 if len(p) == 2:
1145 if len(p) == 2:
1146 return p[1]
1146 return p[1]
1147 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1147 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1148
1148
1149 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1149 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1150 """Returns a list of annotateline objects for each line in the file
1150 """Returns a list of annotateline objects for each line in the file
1151
1151
1152 - line.fctx is the filectx of the node where that line was last changed
1152 - line.fctx is the filectx of the node where that line was last changed
1153 - line.lineno is the line number at the first appearance in the managed
1153 - line.lineno is the line number at the first appearance in the managed
1154 file
1154 file
1155 - line.text is the data on that line (including newline character)
1155 - line.text is the data on that line (including newline character)
1156 """
1156 """
1157 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1157 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1158
1158
1159 def parents(f):
1159 def parents(f):
1160 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1160 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1161 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1161 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1162 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1162 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1163 # isn't an ancestor of the srcrev.
1163 # isn't an ancestor of the srcrev.
1164 f._changeid
1164 f._changeid
1165 pl = f.parents()
1165 pl = f.parents()
1166
1166
1167 # Don't return renamed parents if we aren't following.
1167 # Don't return renamed parents if we aren't following.
1168 if not follow:
1168 if not follow:
1169 pl = [p for p in pl if p.path() == f.path()]
1169 pl = [p for p in pl if p.path() == f.path()]
1170
1170
1171 # renamed filectx won't have a filelog yet, so set it
1171 # renamed filectx won't have a filelog yet, so set it
1172 # from the cache to save time
1172 # from the cache to save time
1173 for p in pl:
1173 for p in pl:
1174 if not '_filelog' in p.__dict__:
1174 if not '_filelog' in p.__dict__:
1175 p._filelog = getlog(p.path())
1175 p._filelog = getlog(p.path())
1176
1176
1177 return pl
1177 return pl
1178
1178
1179 # use linkrev to find the first changeset where self appeared
1179 # use linkrev to find the first changeset where self appeared
1180 base = self.introfilectx()
1180 base = self.introfilectx()
1181 if getattr(base, '_ancestrycontext', None) is None:
1181 if getattr(base, '_ancestrycontext', None) is None:
1182 # it is safe to use an unfiltered repository here because we are
1182 # it is safe to use an unfiltered repository here because we are
1183 # walking ancestors only.
1183 # walking ancestors only.
1184 cl = self._repo.unfiltered().changelog
1184 cl = self._repo.unfiltered().changelog
1185 if base.rev() is None:
1185 if base.rev() is None:
1186 # wctx is not inclusive, but works because _ancestrycontext
1186 # wctx is not inclusive, but works because _ancestrycontext
1187 # is used to test filelog revisions
1187 # is used to test filelog revisions
1188 ac = cl.ancestors(
1188 ac = cl.ancestors(
1189 [p.rev() for p in base.parents()], inclusive=True
1189 [p.rev() for p in base.parents()], inclusive=True
1190 )
1190 )
1191 else:
1191 else:
1192 ac = cl.ancestors([base.rev()], inclusive=True)
1192 ac = cl.ancestors([base.rev()], inclusive=True)
1193 base._ancestrycontext = ac
1193 base._ancestrycontext = ac
1194
1194
1195 return dagop.annotate(
1195 return dagop.annotate(
1196 base, parents, skiprevs=skiprevs, diffopts=diffopts
1196 base, parents, skiprevs=skiprevs, diffopts=diffopts
1197 )
1197 )
1198
1198
1199 def ancestors(self, followfirst=False):
1199 def ancestors(self, followfirst=False):
1200 visit = {}
1200 visit = {}
1201 c = self
1201 c = self
1202 if followfirst:
1202 if followfirst:
1203 cut = 1
1203 cut = 1
1204 else:
1204 else:
1205 cut = None
1205 cut = None
1206
1206
1207 while True:
1207 while True:
1208 for parent in c.parents()[:cut]:
1208 for parent in c.parents()[:cut]:
1209 visit[(parent.linkrev(), parent.filenode())] = parent
1209 visit[(parent.linkrev(), parent.filenode())] = parent
1210 if not visit:
1210 if not visit:
1211 break
1211 break
1212 c = visit.pop(max(visit))
1212 c = visit.pop(max(visit))
1213 yield c
1213 yield c
1214
1214
1215 def decodeddata(self):
1215 def decodeddata(self):
1216 """Returns `data()` after running repository decoding filters.
1216 """Returns `data()` after running repository decoding filters.
1217
1217
1218 This is often equivalent to how the data would be expressed on disk.
1218 This is often equivalent to how the data would be expressed on disk.
1219 """
1219 """
1220 return self._repo.wwritedata(self.path(), self.data())
1220 return self._repo.wwritedata(self.path(), self.data())
1221
1221
1222
1222
1223 class filectx(basefilectx):
1223 class filectx(basefilectx):
1224 """A filecontext object makes access to data related to a particular
1224 """A filecontext object makes access to data related to a particular
1225 filerevision convenient."""
1225 filerevision convenient."""
1226
1226
1227 def __init__(
1227 def __init__(
1228 self,
1228 self,
1229 repo,
1229 repo,
1230 path,
1230 path,
1231 changeid=None,
1231 changeid=None,
1232 fileid=None,
1232 fileid=None,
1233 filelog=None,
1233 filelog=None,
1234 changectx=None,
1234 changectx=None,
1235 ):
1235 ):
1236 """changeid must be a revision number, if specified.
1236 """changeid must be a revision number, if specified.
1237 fileid can be a file revision or node."""
1237 fileid can be a file revision or node."""
1238 self._repo = repo
1238 self._repo = repo
1239 self._path = path
1239 self._path = path
1240
1240
1241 assert (
1241 assert (
1242 changeid is not None or fileid is not None or changectx is not None
1242 changeid is not None or fileid is not None or changectx is not None
1243 ), (
1243 ), (
1244 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1244 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1245 % (changeid, fileid, changectx,)
1245 % (changeid, fileid, changectx,)
1246 )
1246 )
1247
1247
1248 if filelog is not None:
1248 if filelog is not None:
1249 self._filelog = filelog
1249 self._filelog = filelog
1250
1250
1251 if changeid is not None:
1251 if changeid is not None:
1252 self._changeid = changeid
1252 self._changeid = changeid
1253 if changectx is not None:
1253 if changectx is not None:
1254 self._changectx = changectx
1254 self._changectx = changectx
1255 if fileid is not None:
1255 if fileid is not None:
1256 self._fileid = fileid
1256 self._fileid = fileid
1257
1257
1258 @propertycache
1258 @propertycache
1259 def _changectx(self):
1259 def _changectx(self):
1260 try:
1260 try:
1261 return self._repo[self._changeid]
1261 return self._repo[self._changeid]
1262 except error.FilteredRepoLookupError:
1262 except error.FilteredRepoLookupError:
1263 # Linkrev may point to any revision in the repository. When the
1263 # Linkrev may point to any revision in the repository. When the
1264 # repository is filtered this may lead to `filectx` trying to build
1264 # repository is filtered this may lead to `filectx` trying to build
1265 # `changectx` for filtered revision. In such case we fallback to
1265 # `changectx` for filtered revision. In such case we fallback to
1266 # creating `changectx` on the unfiltered version of the reposition.
1266 # creating `changectx` on the unfiltered version of the reposition.
1267 # This fallback should not be an issue because `changectx` from
1267 # This fallback should not be an issue because `changectx` from
1268 # `filectx` are not used in complex operations that care about
1268 # `filectx` are not used in complex operations that care about
1269 # filtering.
1269 # filtering.
1270 #
1270 #
1271 # This fallback is a cheap and dirty fix that prevent several
1271 # This fallback is a cheap and dirty fix that prevent several
1272 # crashes. It does not ensure the behavior is correct. However the
1272 # crashes. It does not ensure the behavior is correct. However the
1273 # behavior was not correct before filtering either and "incorrect
1273 # behavior was not correct before filtering either and "incorrect
1274 # behavior" is seen as better as "crash"
1274 # behavior" is seen as better as "crash"
1275 #
1275 #
1276 # Linkrevs have several serious troubles with filtering that are
1276 # Linkrevs have several serious troubles with filtering that are
1277 # complicated to solve. Proper handling of the issue here should be
1277 # complicated to solve. Proper handling of the issue here should be
1278 # considered when solving linkrev issue are on the table.
1278 # considered when solving linkrev issue are on the table.
1279 return self._repo.unfiltered()[self._changeid]
1279 return self._repo.unfiltered()[self._changeid]
1280
1280
1281 def filectx(self, fileid, changeid=None):
1281 def filectx(self, fileid, changeid=None):
1282 '''opens an arbitrary revision of the file without
1282 '''opens an arbitrary revision of the file without
1283 opening a new filelog'''
1283 opening a new filelog'''
1284 return filectx(
1284 return filectx(
1285 self._repo,
1285 self._repo,
1286 self._path,
1286 self._path,
1287 fileid=fileid,
1287 fileid=fileid,
1288 filelog=self._filelog,
1288 filelog=self._filelog,
1289 changeid=changeid,
1289 changeid=changeid,
1290 )
1290 )
1291
1291
1292 def rawdata(self):
1292 def rawdata(self):
1293 return self._filelog.rawdata(self._filenode)
1293 return self._filelog.rawdata(self._filenode)
1294
1294
1295 def rawflags(self):
1295 def rawflags(self):
1296 """low-level revlog flags"""
1296 """low-level revlog flags"""
1297 return self._filelog.flags(self._filerev)
1297 return self._filelog.flags(self._filerev)
1298
1298
1299 def data(self):
1299 def data(self):
1300 try:
1300 try:
1301 return self._filelog.read(self._filenode)
1301 return self._filelog.read(self._filenode)
1302 except error.CensoredNodeError:
1302 except error.CensoredNodeError:
1303 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1303 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1304 return b""
1304 return b""
1305 raise error.Abort(
1305 raise error.Abort(
1306 _(b"censored node: %s") % short(self._filenode),
1306 _(b"censored node: %s") % short(self._filenode),
1307 hint=_(b"set censor.policy to ignore errors"),
1307 hint=_(b"set censor.policy to ignore errors"),
1308 )
1308 )
1309
1309
1310 def size(self):
1310 def size(self):
1311 return self._filelog.size(self._filerev)
1311 return self._filelog.size(self._filerev)
1312
1312
1313 @propertycache
1313 @propertycache
1314 def _copied(self):
1314 def _copied(self):
1315 """check if file was actually renamed in this changeset revision
1315 """check if file was actually renamed in this changeset revision
1316
1316
1317 If rename logged in file revision, we report copy for changeset only
1317 If rename logged in file revision, we report copy for changeset only
1318 if file revisions linkrev points back to the changeset in question
1318 if file revisions linkrev points back to the changeset in question
1319 or both changeset parents contain different file revisions.
1319 or both changeset parents contain different file revisions.
1320 """
1320 """
1321
1321
1322 renamed = self._filelog.renamed(self._filenode)
1322 renamed = self._filelog.renamed(self._filenode)
1323 if not renamed:
1323 if not renamed:
1324 return None
1324 return None
1325
1325
1326 if self.rev() == self.linkrev():
1326 if self.rev() == self.linkrev():
1327 return renamed
1327 return renamed
1328
1328
1329 name = self.path()
1329 name = self.path()
1330 fnode = self._filenode
1330 fnode = self._filenode
1331 for p in self._changectx.parents():
1331 for p in self._changectx.parents():
1332 try:
1332 try:
1333 if fnode == p.filenode(name):
1333 if fnode == p.filenode(name):
1334 return None
1334 return None
1335 except error.LookupError:
1335 except error.LookupError:
1336 pass
1336 pass
1337 return renamed
1337 return renamed
1338
1338
1339 def children(self):
1339 def children(self):
1340 # hard for renames
1340 # hard for renames
1341 c = self._filelog.children(self._filenode)
1341 c = self._filelog.children(self._filenode)
1342 return [
1342 return [
1343 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1343 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1344 for x in c
1344 for x in c
1345 ]
1345 ]
1346
1346
1347
1347
1348 class committablectx(basectx):
1348 class committablectx(basectx):
1349 """A committablectx object provides common functionality for a context that
1349 """A committablectx object provides common functionality for a context that
1350 wants the ability to commit, e.g. workingctx or memctx."""
1350 wants the ability to commit, e.g. workingctx or memctx."""
1351
1351
1352 def __init__(
1352 def __init__(
1353 self,
1353 self,
1354 repo,
1354 repo,
1355 text=b"",
1355 text=b"",
1356 user=None,
1356 user=None,
1357 date=None,
1357 date=None,
1358 extra=None,
1358 extra=None,
1359 changes=None,
1359 changes=None,
1360 branch=None,
1360 branch=None,
1361 ):
1361 ):
1362 super(committablectx, self).__init__(repo)
1362 super(committablectx, self).__init__(repo)
1363 self._rev = None
1363 self._rev = None
1364 self._node = None
1364 self._node = None
1365 self._text = text
1365 self._text = text
1366 if date:
1366 if date:
1367 self._date = dateutil.parsedate(date)
1367 self._date = dateutil.parsedate(date)
1368 if user:
1368 if user:
1369 self._user = user
1369 self._user = user
1370 if changes:
1370 if changes:
1371 self._status = changes
1371 self._status = changes
1372
1372
1373 self._extra = {}
1373 self._extra = {}
1374 if extra:
1374 if extra:
1375 self._extra = extra.copy()
1375 self._extra = extra.copy()
1376 if branch is not None:
1376 if branch is not None:
1377 self._extra[b'branch'] = encoding.fromlocal(branch)
1377 self._extra[b'branch'] = encoding.fromlocal(branch)
1378 if not self._extra.get(b'branch'):
1378 if not self._extra.get(b'branch'):
1379 self._extra[b'branch'] = b'default'
1379 self._extra[b'branch'] = b'default'
1380
1380
1381 def __bytes__(self):
1381 def __bytes__(self):
1382 return bytes(self._parents[0]) + b"+"
1382 return bytes(self._parents[0]) + b"+"
1383
1383
1384 __str__ = encoding.strmethod(__bytes__)
1384 __str__ = encoding.strmethod(__bytes__)
1385
1385
1386 def __nonzero__(self):
1386 def __nonzero__(self):
1387 return True
1387 return True
1388
1388
1389 __bool__ = __nonzero__
1389 __bool__ = __nonzero__
1390
1390
1391 @propertycache
1391 @propertycache
1392 def _status(self):
1392 def _status(self):
1393 return self._repo.status()
1393 return self._repo.status()
1394
1394
1395 @propertycache
1395 @propertycache
1396 def _user(self):
1396 def _user(self):
1397 return self._repo.ui.username()
1397 return self._repo.ui.username()
1398
1398
1399 @propertycache
1399 @propertycache
1400 def _date(self):
1400 def _date(self):
1401 ui = self._repo.ui
1401 ui = self._repo.ui
1402 date = ui.configdate(b'devel', b'default-date')
1402 date = ui.configdate(b'devel', b'default-date')
1403 if date is None:
1403 if date is None:
1404 date = dateutil.makedate()
1404 date = dateutil.makedate()
1405 return date
1405 return date
1406
1406
1407 def subrev(self, subpath):
1407 def subrev(self, subpath):
1408 return None
1408 return None
1409
1409
1410 def manifestnode(self):
1410 def manifestnode(self):
1411 return None
1411 return None
1412
1412
1413 def user(self):
1413 def user(self):
1414 return self._user or self._repo.ui.username()
1414 return self._user or self._repo.ui.username()
1415
1415
1416 def date(self):
1416 def date(self):
1417 return self._date
1417 return self._date
1418
1418
1419 def description(self):
1419 def description(self):
1420 return self._text
1420 return self._text
1421
1421
1422 def files(self):
1422 def files(self):
1423 return sorted(
1423 return sorted(
1424 self._status.modified + self._status.added + self._status.removed
1424 self._status.modified + self._status.added + self._status.removed
1425 )
1425 )
1426
1426
1427 def modified(self):
1427 def modified(self):
1428 return self._status.modified
1428 return self._status.modified
1429
1429
1430 def added(self):
1430 def added(self):
1431 return self._status.added
1431 return self._status.added
1432
1432
1433 def removed(self):
1433 def removed(self):
1434 return self._status.removed
1434 return self._status.removed
1435
1435
1436 def deleted(self):
1436 def deleted(self):
1437 return self._status.deleted
1437 return self._status.deleted
1438
1438
1439 filesmodified = modified
1439 filesmodified = modified
1440 filesadded = added
1440 filesadded = added
1441 filesremoved = removed
1441 filesremoved = removed
1442
1442
1443 def branch(self):
1443 def branch(self):
1444 return encoding.tolocal(self._extra[b'branch'])
1444 return encoding.tolocal(self._extra[b'branch'])
1445
1445
1446 def closesbranch(self):
1446 def closesbranch(self):
1447 return b'close' in self._extra
1447 return b'close' in self._extra
1448
1448
1449 def extra(self):
1449 def extra(self):
1450 return self._extra
1450 return self._extra
1451
1451
1452 def isinmemory(self):
1452 def isinmemory(self):
1453 return False
1453 return False
1454
1454
1455 def tags(self):
1455 def tags(self):
1456 return []
1456 return []
1457
1457
1458 def bookmarks(self):
1458 def bookmarks(self):
1459 b = []
1459 b = []
1460 for p in self.parents():
1460 for p in self.parents():
1461 b.extend(p.bookmarks())
1461 b.extend(p.bookmarks())
1462 return b
1462 return b
1463
1463
1464 def phase(self):
1464 def phase(self):
1465 phase = phases.newcommitphase(self._repo.ui)
1465 phase = phases.newcommitphase(self._repo.ui)
1466 for p in self.parents():
1466 for p in self.parents():
1467 phase = max(phase, p.phase())
1467 phase = max(phase, p.phase())
1468 return phase
1468 return phase
1469
1469
1470 def hidden(self):
1470 def hidden(self):
1471 return False
1471 return False
1472
1472
1473 def children(self):
1473 def children(self):
1474 return []
1474 return []
1475
1475
1476 def flags(self, path):
1476 def flags(self, path):
1477 if '_manifest' in self.__dict__:
1477 if '_manifest' in self.__dict__:
1478 try:
1478 try:
1479 return self._manifest.flags(path)
1479 return self._manifest.flags(path)
1480 except KeyError:
1480 except KeyError:
1481 return b''
1481 return b''
1482
1482
1483 try:
1483 try:
1484 return self._flagfunc(path)
1484 return self._flagfunc(path)
1485 except OSError:
1485 except OSError:
1486 return b''
1486 return b''
1487
1487
1488 def ancestor(self, c2):
1488 def ancestor(self, c2):
1489 """return the "best" ancestor context of self and c2"""
1489 """return the "best" ancestor context of self and c2"""
1490 return self._parents[0].ancestor(c2) # punt on two parents for now
1490 return self._parents[0].ancestor(c2) # punt on two parents for now
1491
1491
1492 def ancestors(self):
1492 def ancestors(self):
1493 for p in self._parents:
1493 for p in self._parents:
1494 yield p
1494 yield p
1495 for a in self._repo.changelog.ancestors(
1495 for a in self._repo.changelog.ancestors(
1496 [p.rev() for p in self._parents]
1496 [p.rev() for p in self._parents]
1497 ):
1497 ):
1498 yield self._repo[a]
1498 yield self._repo[a]
1499
1499
1500 def markcommitted(self, node):
1500 def markcommitted(self, node):
1501 """Perform post-commit cleanup necessary after committing this ctx
1501 """Perform post-commit cleanup necessary after committing this ctx
1502
1502
1503 Specifically, this updates backing stores this working context
1503 Specifically, this updates backing stores this working context
1504 wraps to reflect the fact that the changes reflected by this
1504 wraps to reflect the fact that the changes reflected by this
1505 workingctx have been committed. For example, it marks
1505 workingctx have been committed. For example, it marks
1506 modified and added files as normal in the dirstate.
1506 modified and added files as normal in the dirstate.
1507
1507
1508 """
1508 """
1509
1509
1510 def dirty(self, missing=False, merge=True, branch=True):
1510 def dirty(self, missing=False, merge=True, branch=True):
1511 return False
1511 return False
1512
1512
1513
1513
1514 class workingctx(committablectx):
1514 class workingctx(committablectx):
1515 """A workingctx object makes access to data related to
1515 """A workingctx object makes access to data related to
1516 the current working directory convenient.
1516 the current working directory convenient.
1517 date - any valid date string or (unixtime, offset), or None.
1517 date - any valid date string or (unixtime, offset), or None.
1518 user - username string, or None.
1518 user - username string, or None.
1519 extra - a dictionary of extra values, or None.
1519 extra - a dictionary of extra values, or None.
1520 changes - a list of file lists as returned by localrepo.status()
1520 changes - a list of file lists as returned by localrepo.status()
1521 or None to use the repository status.
1521 or None to use the repository status.
1522 """
1522 """
1523
1523
1524 def __init__(
1524 def __init__(
1525 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1525 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1526 ):
1526 ):
1527 branch = None
1527 branch = None
1528 if not extra or b'branch' not in extra:
1528 if not extra or b'branch' not in extra:
1529 try:
1529 try:
1530 branch = repo.dirstate.branch()
1530 branch = repo.dirstate.branch()
1531 except UnicodeDecodeError:
1531 except UnicodeDecodeError:
1532 raise error.Abort(_(b'branch name not in UTF-8!'))
1532 raise error.Abort(_(b'branch name not in UTF-8!'))
1533 super(workingctx, self).__init__(
1533 super(workingctx, self).__init__(
1534 repo, text, user, date, extra, changes, branch=branch
1534 repo, text, user, date, extra, changes, branch=branch
1535 )
1535 )
1536
1536
1537 def __iter__(self):
1537 def __iter__(self):
1538 d = self._repo.dirstate
1538 d = self._repo.dirstate
1539 for f in d:
1539 for f in d:
1540 if d[f] != b'r':
1540 if d[f] != b'r':
1541 yield f
1541 yield f
1542
1542
1543 def __contains__(self, key):
1543 def __contains__(self, key):
1544 return self._repo.dirstate[key] not in b"?r"
1544 return self._repo.dirstate[key] not in b"?r"
1545
1545
1546 def hex(self):
1546 def hex(self):
1547 return wdirhex
1547 return wdirhex
1548
1548
1549 @propertycache
1549 @propertycache
1550 def _parents(self):
1550 def _parents(self):
1551 p = self._repo.dirstate.parents()
1551 p = self._repo.dirstate.parents()
1552 if p[1] == nullid:
1552 if p[1] == nullid:
1553 p = p[:-1]
1553 p = p[:-1]
1554 # use unfiltered repo to delay/avoid loading obsmarkers
1554 # use unfiltered repo to delay/avoid loading obsmarkers
1555 unfi = self._repo.unfiltered()
1555 unfi = self._repo.unfiltered()
1556 return [
1556 return [
1557 changectx(
1557 changectx(
1558 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1558 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1559 )
1559 )
1560 for n in p
1560 for n in p
1561 ]
1561 ]
1562
1562
1563 def setparents(self, p1node, p2node=nullid):
1563 def setparents(self, p1node, p2node=nullid):
1564 dirstate = self._repo.dirstate
1564 dirstate = self._repo.dirstate
1565 with dirstate.parentchange():
1565 with dirstate.parentchange():
1566 copies = dirstate.setparents(p1node, p2node)
1566 copies = dirstate.setparents(p1node, p2node)
1567 pctx = self._repo[p1node]
1567 pctx = self._repo[p1node]
1568 if copies:
1568 if copies:
1569 # Adjust copy records, the dirstate cannot do it, it
1569 # Adjust copy records, the dirstate cannot do it, it
1570 # requires access to parents manifests. Preserve them
1570 # requires access to parents manifests. Preserve them
1571 # only for entries added to first parent.
1571 # only for entries added to first parent.
1572 for f in copies:
1572 for f in copies:
1573 if f not in pctx and copies[f] in pctx:
1573 if f not in pctx and copies[f] in pctx:
1574 dirstate.copy(copies[f], f)
1574 dirstate.copy(copies[f], f)
1575 if p2node == nullid:
1575 if p2node == nullid:
1576 for f, s in sorted(dirstate.copies().items()):
1576 for f, s in sorted(dirstate.copies().items()):
1577 if f not in pctx and s not in pctx:
1577 if f not in pctx and s not in pctx:
1578 dirstate.copy(None, f)
1578 dirstate.copy(None, f)
1579
1579
1580 def _fileinfo(self, path):
1580 def _fileinfo(self, path):
1581 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1581 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1582 self._manifest
1582 self._manifest
1583 return super(workingctx, self)._fileinfo(path)
1583 return super(workingctx, self)._fileinfo(path)
1584
1584
1585 def _buildflagfunc(self):
1585 def _buildflagfunc(self):
1586 # Create a fallback function for getting file flags when the
1586 # Create a fallback function for getting file flags when the
1587 # filesystem doesn't support them
1587 # filesystem doesn't support them
1588
1588
1589 copiesget = self._repo.dirstate.copies().get
1589 copiesget = self._repo.dirstate.copies().get
1590 parents = self.parents()
1590 parents = self.parents()
1591 if len(parents) < 2:
1591 if len(parents) < 2:
1592 # when we have one parent, it's easy: copy from parent
1592 # when we have one parent, it's easy: copy from parent
1593 man = parents[0].manifest()
1593 man = parents[0].manifest()
1594
1594
1595 def func(f):
1595 def func(f):
1596 f = copiesget(f, f)
1596 f = copiesget(f, f)
1597 return man.flags(f)
1597 return man.flags(f)
1598
1598
1599 else:
1599 else:
1600 # merges are tricky: we try to reconstruct the unstored
1600 # merges are tricky: we try to reconstruct the unstored
1601 # result from the merge (issue1802)
1601 # result from the merge (issue1802)
1602 p1, p2 = parents
1602 p1, p2 = parents
1603 pa = p1.ancestor(p2)
1603 pa = p1.ancestor(p2)
1604 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1604 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1605
1605
1606 def func(f):
1606 def func(f):
1607 f = copiesget(f, f) # may be wrong for merges with copies
1607 f = copiesget(f, f) # may be wrong for merges with copies
1608 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1608 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1609 if fl1 == fl2:
1609 if fl1 == fl2:
1610 return fl1
1610 return fl1
1611 if fl1 == fla:
1611 if fl1 == fla:
1612 return fl2
1612 return fl2
1613 if fl2 == fla:
1613 if fl2 == fla:
1614 return fl1
1614 return fl1
1615 return b'' # punt for conflicts
1615 return b'' # punt for conflicts
1616
1616
1617 return func
1617 return func
1618
1618
1619 @propertycache
1619 @propertycache
1620 def _flagfunc(self):
1620 def _flagfunc(self):
1621 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1621 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1622
1622
1623 def flags(self, path):
1623 def flags(self, path):
1624 try:
1624 try:
1625 return self._flagfunc(path)
1625 return self._flagfunc(path)
1626 except OSError:
1626 except OSError:
1627 return b''
1627 return b''
1628
1628
1629 def filectx(self, path, filelog=None):
1629 def filectx(self, path, filelog=None):
1630 """get a file context from the working directory"""
1630 """get a file context from the working directory"""
1631 return workingfilectx(
1631 return workingfilectx(
1632 self._repo, path, workingctx=self, filelog=filelog
1632 self._repo, path, workingctx=self, filelog=filelog
1633 )
1633 )
1634
1634
1635 def dirty(self, missing=False, merge=True, branch=True):
1635 def dirty(self, missing=False, merge=True, branch=True):
1636 """check whether a working directory is modified"""
1636 """check whether a working directory is modified"""
1637 # check subrepos first
1637 # check subrepos first
1638 for s in sorted(self.substate):
1638 for s in sorted(self.substate):
1639 if self.sub(s).dirty(missing=missing):
1639 if self.sub(s).dirty(missing=missing):
1640 return True
1640 return True
1641 # check current working dir
1641 # check current working dir
1642 return (
1642 return (
1643 (merge and self.p2())
1643 (merge and self.p2())
1644 or (branch and self.branch() != self.p1().branch())
1644 or (branch and self.branch() != self.p1().branch())
1645 or self.modified()
1645 or self.modified()
1646 or self.added()
1646 or self.added()
1647 or self.removed()
1647 or self.removed()
1648 or (missing and self.deleted())
1648 or (missing and self.deleted())
1649 )
1649 )
1650
1650
1651 def add(self, list, prefix=b""):
1651 def add(self, list, prefix=b""):
1652 with self._repo.wlock():
1652 with self._repo.wlock():
1653 ui, ds = self._repo.ui, self._repo.dirstate
1653 ui, ds = self._repo.ui, self._repo.dirstate
1654 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1654 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1655 rejected = []
1655 rejected = []
1656 lstat = self._repo.wvfs.lstat
1656 lstat = self._repo.wvfs.lstat
1657 for f in list:
1657 for f in list:
1658 # ds.pathto() returns an absolute file when this is invoked from
1658 # ds.pathto() returns an absolute file when this is invoked from
1659 # the keyword extension. That gets flagged as non-portable on
1659 # the keyword extension. That gets flagged as non-portable on
1660 # Windows, since it contains the drive letter and colon.
1660 # Windows, since it contains the drive letter and colon.
1661 scmutil.checkportable(ui, os.path.join(prefix, f))
1661 scmutil.checkportable(ui, os.path.join(prefix, f))
1662 try:
1662 try:
1663 st = lstat(f)
1663 st = lstat(f)
1664 except OSError:
1664 except OSError:
1665 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1665 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1666 rejected.append(f)
1666 rejected.append(f)
1667 continue
1667 continue
1668 limit = ui.configbytes(b'ui', b'large-file-limit')
1668 limit = ui.configbytes(b'ui', b'large-file-limit')
1669 if limit != 0 and st.st_size > limit:
1669 if limit != 0 and st.st_size > limit:
1670 ui.warn(
1670 ui.warn(
1671 _(
1671 _(
1672 b"%s: up to %d MB of RAM may be required "
1672 b"%s: up to %d MB of RAM may be required "
1673 b"to manage this file\n"
1673 b"to manage this file\n"
1674 b"(use 'hg revert %s' to cancel the "
1674 b"(use 'hg revert %s' to cancel the "
1675 b"pending addition)\n"
1675 b"pending addition)\n"
1676 )
1676 )
1677 % (f, 3 * st.st_size // 1000000, uipath(f))
1677 % (f, 3 * st.st_size // 1000000, uipath(f))
1678 )
1678 )
1679 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1679 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1680 ui.warn(
1680 ui.warn(
1681 _(
1681 _(
1682 b"%s not added: only files and symlinks "
1682 b"%s not added: only files and symlinks "
1683 b"supported currently\n"
1683 b"supported currently\n"
1684 )
1684 )
1685 % uipath(f)
1685 % uipath(f)
1686 )
1686 )
1687 rejected.append(f)
1687 rejected.append(f)
1688 elif ds[f] in b'amn':
1688 elif ds[f] in b'amn':
1689 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1689 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1690 elif ds[f] == b'r':
1690 elif ds[f] == b'r':
1691 ds.normallookup(f)
1691 ds.normallookup(f)
1692 else:
1692 else:
1693 ds.add(f)
1693 ds.add(f)
1694 return rejected
1694 return rejected
1695
1695
1696 def forget(self, files, prefix=b""):
1696 def forget(self, files, prefix=b""):
1697 with self._repo.wlock():
1697 with self._repo.wlock():
1698 ds = self._repo.dirstate
1698 ds = self._repo.dirstate
1699 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1699 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1700 rejected = []
1700 rejected = []
1701 for f in files:
1701 for f in files:
1702 if f not in ds:
1702 if f not in ds:
1703 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1703 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1704 rejected.append(f)
1704 rejected.append(f)
1705 elif ds[f] != b'a':
1705 elif ds[f] != b'a':
1706 ds.remove(f)
1706 ds.remove(f)
1707 else:
1707 else:
1708 ds.drop(f)
1708 ds.drop(f)
1709 return rejected
1709 return rejected
1710
1710
1711 def copy(self, source, dest):
1711 def copy(self, source, dest):
1712 try:
1712 try:
1713 st = self._repo.wvfs.lstat(dest)
1713 st = self._repo.wvfs.lstat(dest)
1714 except OSError as err:
1714 except OSError as err:
1715 if err.errno != errno.ENOENT:
1715 if err.errno != errno.ENOENT:
1716 raise
1716 raise
1717 self._repo.ui.warn(
1717 self._repo.ui.warn(
1718 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1718 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1719 )
1719 )
1720 return
1720 return
1721 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1721 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1722 self._repo.ui.warn(
1722 self._repo.ui.warn(
1723 _(b"copy failed: %s is not a file or a symbolic link\n")
1723 _(b"copy failed: %s is not a file or a symbolic link\n")
1724 % self._repo.dirstate.pathto(dest)
1724 % self._repo.dirstate.pathto(dest)
1725 )
1725 )
1726 else:
1726 else:
1727 with self._repo.wlock():
1727 with self._repo.wlock():
1728 ds = self._repo.dirstate
1728 ds = self._repo.dirstate
1729 if ds[dest] in b'?':
1729 if ds[dest] in b'?':
1730 ds.add(dest)
1730 ds.add(dest)
1731 elif ds[dest] in b'r':
1731 elif ds[dest] in b'r':
1732 ds.normallookup(dest)
1732 ds.normallookup(dest)
1733 ds.copy(source, dest)
1733 ds.copy(source, dest)
1734
1734
1735 def match(
1735 def match(
1736 self,
1736 self,
1737 pats=None,
1737 pats=None,
1738 include=None,
1738 include=None,
1739 exclude=None,
1739 exclude=None,
1740 default=b'glob',
1740 default=b'glob',
1741 listsubrepos=False,
1741 listsubrepos=False,
1742 badfn=None,
1742 badfn=None,
1743 cwd=None,
1743 cwd=None,
1744 ):
1744 ):
1745 r = self._repo
1745 r = self._repo
1746 if not cwd:
1746 if not cwd:
1747 cwd = r.getcwd()
1747 cwd = r.getcwd()
1748
1748
1749 # Only a case insensitive filesystem needs magic to translate user input
1749 # Only a case insensitive filesystem needs magic to translate user input
1750 # to actual case in the filesystem.
1750 # to actual case in the filesystem.
1751 icasefs = not util.fscasesensitive(r.root)
1751 icasefs = not util.fscasesensitive(r.root)
1752 return matchmod.match(
1752 return matchmod.match(
1753 r.root,
1753 r.root,
1754 cwd,
1754 cwd,
1755 pats,
1755 pats,
1756 include,
1756 include,
1757 exclude,
1757 exclude,
1758 default,
1758 default,
1759 auditor=r.auditor,
1759 auditor=r.auditor,
1760 ctx=self,
1760 ctx=self,
1761 listsubrepos=listsubrepos,
1761 listsubrepos=listsubrepos,
1762 badfn=badfn,
1762 badfn=badfn,
1763 icasefs=icasefs,
1763 icasefs=icasefs,
1764 )
1764 )
1765
1765
1766 def _filtersuspectsymlink(self, files):
1766 def _filtersuspectsymlink(self, files):
1767 if not files or self._repo.dirstate._checklink:
1767 if not files or self._repo.dirstate._checklink:
1768 return files
1768 return files
1769
1769
1770 # Symlink placeholders may get non-symlink-like contents
1770 # Symlink placeholders may get non-symlink-like contents
1771 # via user error or dereferencing by NFS or Samba servers,
1771 # via user error or dereferencing by NFS or Samba servers,
1772 # so we filter out any placeholders that don't look like a
1772 # so we filter out any placeholders that don't look like a
1773 # symlink
1773 # symlink
1774 sane = []
1774 sane = []
1775 for f in files:
1775 for f in files:
1776 if self.flags(f) == b'l':
1776 if self.flags(f) == b'l':
1777 d = self[f].data()
1777 d = self[f].data()
1778 if (
1778 if (
1779 d == b''
1779 d == b''
1780 or len(d) >= 1024
1780 or len(d) >= 1024
1781 or b'\n' in d
1781 or b'\n' in d
1782 or stringutil.binary(d)
1782 or stringutil.binary(d)
1783 ):
1783 ):
1784 self._repo.ui.debug(
1784 self._repo.ui.debug(
1785 b'ignoring suspect symlink placeholder "%s"\n' % f
1785 b'ignoring suspect symlink placeholder "%s"\n' % f
1786 )
1786 )
1787 continue
1787 continue
1788 sane.append(f)
1788 sane.append(f)
1789 return sane
1789 return sane
1790
1790
1791 def _checklookup(self, files):
1791 def _checklookup(self, files):
1792 # check for any possibly clean files
1792 # check for any possibly clean files
1793 if not files:
1793 if not files:
1794 return [], [], []
1794 return [], [], []
1795
1795
1796 modified = []
1796 modified = []
1797 deleted = []
1797 deleted = []
1798 fixup = []
1798 fixup = []
1799 pctx = self._parents[0]
1799 pctx = self._parents[0]
1800 # do a full compare of any files that might have changed
1800 # do a full compare of any files that might have changed
1801 for f in sorted(files):
1801 for f in sorted(files):
1802 try:
1802 try:
1803 # This will return True for a file that got replaced by a
1803 # This will return True for a file that got replaced by a
1804 # directory in the interim, but fixing that is pretty hard.
1804 # directory in the interim, but fixing that is pretty hard.
1805 if (
1805 if (
1806 f not in pctx
1806 f not in pctx
1807 or self.flags(f) != pctx.flags(f)
1807 or self.flags(f) != pctx.flags(f)
1808 or pctx[f].cmp(self[f])
1808 or pctx[f].cmp(self[f])
1809 ):
1809 ):
1810 modified.append(f)
1810 modified.append(f)
1811 else:
1811 else:
1812 fixup.append(f)
1812 fixup.append(f)
1813 except (IOError, OSError):
1813 except (IOError, OSError):
1814 # A file become inaccessible in between? Mark it as deleted,
1814 # A file become inaccessible in between? Mark it as deleted,
1815 # matching dirstate behavior (issue5584).
1815 # matching dirstate behavior (issue5584).
1816 # The dirstate has more complex behavior around whether a
1816 # The dirstate has more complex behavior around whether a
1817 # missing file matches a directory, etc, but we don't need to
1817 # missing file matches a directory, etc, but we don't need to
1818 # bother with that: if f has made it to this point, we're sure
1818 # bother with that: if f has made it to this point, we're sure
1819 # it's in the dirstate.
1819 # it's in the dirstate.
1820 deleted.append(f)
1820 deleted.append(f)
1821
1821
1822 return modified, deleted, fixup
1822 return modified, deleted, fixup
1823
1823
1824 def _poststatusfixup(self, status, fixup):
1824 def _poststatusfixup(self, status, fixup):
1825 """update dirstate for files that are actually clean"""
1825 """update dirstate for files that are actually clean"""
1826 poststatus = self._repo.postdsstatus()
1826 poststatus = self._repo.postdsstatus()
1827 if fixup or poststatus:
1827 if fixup or poststatus:
1828 try:
1828 try:
1829 oldid = self._repo.dirstate.identity()
1829 oldid = self._repo.dirstate.identity()
1830
1830
1831 # updating the dirstate is optional
1831 # updating the dirstate is optional
1832 # so we don't wait on the lock
1832 # so we don't wait on the lock
1833 # wlock can invalidate the dirstate, so cache normal _after_
1833 # wlock can invalidate the dirstate, so cache normal _after_
1834 # taking the lock
1834 # taking the lock
1835 with self._repo.wlock(False):
1835 with self._repo.wlock(False):
1836 if self._repo.dirstate.identity() == oldid:
1836 if self._repo.dirstate.identity() == oldid:
1837 if fixup:
1837 if fixup:
1838 normal = self._repo.dirstate.normal
1838 normal = self._repo.dirstate.normal
1839 for f in fixup:
1839 for f in fixup:
1840 normal(f)
1840 normal(f)
1841 # write changes out explicitly, because nesting
1841 # write changes out explicitly, because nesting
1842 # wlock at runtime may prevent 'wlock.release()'
1842 # wlock at runtime may prevent 'wlock.release()'
1843 # after this block from doing so for subsequent
1843 # after this block from doing so for subsequent
1844 # changing files
1844 # changing files
1845 tr = self._repo.currenttransaction()
1845 tr = self._repo.currenttransaction()
1846 self._repo.dirstate.write(tr)
1846 self._repo.dirstate.write(tr)
1847
1847
1848 if poststatus:
1848 if poststatus:
1849 for ps in poststatus:
1849 for ps in poststatus:
1850 ps(self, status)
1850 ps(self, status)
1851 else:
1851 else:
1852 # in this case, writing changes out breaks
1852 # in this case, writing changes out breaks
1853 # consistency, because .hg/dirstate was
1853 # consistency, because .hg/dirstate was
1854 # already changed simultaneously after last
1854 # already changed simultaneously after last
1855 # caching (see also issue5584 for detail)
1855 # caching (see also issue5584 for detail)
1856 self._repo.ui.debug(
1856 self._repo.ui.debug(
1857 b'skip updating dirstate: identity mismatch\n'
1857 b'skip updating dirstate: identity mismatch\n'
1858 )
1858 )
1859 except error.LockError:
1859 except error.LockError:
1860 pass
1860 pass
1861 finally:
1861 finally:
1862 # Even if the wlock couldn't be grabbed, clear out the list.
1862 # Even if the wlock couldn't be grabbed, clear out the list.
1863 self._repo.clearpostdsstatus()
1863 self._repo.clearpostdsstatus()
1864
1864
1865 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1865 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1866 '''Gets the status from the dirstate -- internal use only.'''
1866 '''Gets the status from the dirstate -- internal use only.'''
1867 subrepos = []
1867 subrepos = []
1868 if b'.hgsub' in self:
1868 if b'.hgsub' in self:
1869 subrepos = sorted(self.substate)
1869 subrepos = sorted(self.substate)
1870 cmp, s = self._repo.dirstate.status(
1870 cmp, s = self._repo.dirstate.status(
1871 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1871 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1872 )
1872 )
1873
1873
1874 # check for any possibly clean files
1874 # check for any possibly clean files
1875 fixup = []
1875 fixup = []
1876 if cmp:
1876 if cmp:
1877 modified2, deleted2, fixup = self._checklookup(cmp)
1877 modified2, deleted2, fixup = self._checklookup(cmp)
1878 s.modified.extend(modified2)
1878 s.modified.extend(modified2)
1879 s.deleted.extend(deleted2)
1879 s.deleted.extend(deleted2)
1880
1880
1881 if fixup and clean:
1881 if fixup and clean:
1882 s.clean.extend(fixup)
1882 s.clean.extend(fixup)
1883
1883
1884 self._poststatusfixup(s, fixup)
1884 self._poststatusfixup(s, fixup)
1885
1885
1886 if match.always():
1886 if match.always():
1887 # cache for performance
1887 # cache for performance
1888 if s.unknown or s.ignored or s.clean:
1888 if s.unknown or s.ignored or s.clean:
1889 # "_status" is cached with list*=False in the normal route
1889 # "_status" is cached with list*=False in the normal route
1890 self._status = scmutil.status(
1890 self._status = scmutil.status(
1891 s.modified, s.added, s.removed, s.deleted, [], [], []
1891 s.modified, s.added, s.removed, s.deleted, [], [], []
1892 )
1892 )
1893 else:
1893 else:
1894 self._status = s
1894 self._status = s
1895
1895
1896 return s
1896 return s
1897
1897
1898 @propertycache
1898 @propertycache
1899 def _copies(self):
1899 def _copies(self):
1900 p1copies = {}
1900 p1copies = {}
1901 p2copies = {}
1901 p2copies = {}
1902 parents = self._repo.dirstate.parents()
1902 parents = self._repo.dirstate.parents()
1903 p1manifest = self._repo[parents[0]].manifest()
1903 p1manifest = self._repo[parents[0]].manifest()
1904 p2manifest = self._repo[parents[1]].manifest()
1904 p2manifest = self._repo[parents[1]].manifest()
1905 changedset = set(self.added()) | set(self.modified())
1905 changedset = set(self.added()) | set(self.modified())
1906 narrowmatch = self._repo.narrowmatch()
1906 narrowmatch = self._repo.narrowmatch()
1907 for dst, src in self._repo.dirstate.copies().items():
1907 for dst, src in self._repo.dirstate.copies().items():
1908 if dst not in changedset or not narrowmatch(dst):
1908 if dst not in changedset or not narrowmatch(dst):
1909 continue
1909 continue
1910 if src in p1manifest:
1910 if src in p1manifest:
1911 p1copies[dst] = src
1911 p1copies[dst] = src
1912 elif src in p2manifest:
1912 elif src in p2manifest:
1913 p2copies[dst] = src
1913 p2copies[dst] = src
1914 return p1copies, p2copies
1914 return p1copies, p2copies
1915
1915
1916 @propertycache
1916 @propertycache
1917 def _manifest(self):
1917 def _manifest(self):
1918 """generate a manifest corresponding to the values in self._status
1918 """generate a manifest corresponding to the values in self._status
1919
1919
1920 This reuse the file nodeid from parent, but we use special node
1920 This reuse the file nodeid from parent, but we use special node
1921 identifiers for added and modified files. This is used by manifests
1921 identifiers for added and modified files. This is used by manifests
1922 merge to see that files are different and by update logic to avoid
1922 merge to see that files are different and by update logic to avoid
1923 deleting newly added files.
1923 deleting newly added files.
1924 """
1924 """
1925 return self._buildstatusmanifest(self._status)
1925 return self._buildstatusmanifest(self._status)
1926
1926
1927 def _buildstatusmanifest(self, status):
1927 def _buildstatusmanifest(self, status):
1928 """Builds a manifest that includes the given status results."""
1928 """Builds a manifest that includes the given status results."""
1929 parents = self.parents()
1929 parents = self.parents()
1930
1930
1931 man = parents[0].manifest().copy()
1931 man = parents[0].manifest().copy()
1932
1932
1933 ff = self._flagfunc
1933 ff = self._flagfunc
1934 for i, l in (
1934 for i, l in (
1935 (addednodeid, status.added),
1935 (addednodeid, status.added),
1936 (modifiednodeid, status.modified),
1936 (modifiednodeid, status.modified),
1937 ):
1937 ):
1938 for f in l:
1938 for f in l:
1939 man[f] = i
1939 man[f] = i
1940 try:
1940 try:
1941 man.setflag(f, ff(f))
1941 man.setflag(f, ff(f))
1942 except OSError:
1942 except OSError:
1943 pass
1943 pass
1944
1944
1945 for f in status.deleted + status.removed:
1945 for f in status.deleted + status.removed:
1946 if f in man:
1946 if f in man:
1947 del man[f]
1947 del man[f]
1948
1948
1949 return man
1949 return man
1950
1950
1951 def _buildstatus(
1951 def _buildstatus(
1952 self, other, s, match, listignored, listclean, listunknown
1952 self, other, s, match, listignored, listclean, listunknown
1953 ):
1953 ):
1954 """build a status with respect to another context
1954 """build a status with respect to another context
1955
1955
1956 This includes logic for maintaining the fast path of status when
1956 This includes logic for maintaining the fast path of status when
1957 comparing the working directory against its parent, which is to skip
1957 comparing the working directory against its parent, which is to skip
1958 building a new manifest if self (working directory) is not comparing
1958 building a new manifest if self (working directory) is not comparing
1959 against its parent (repo['.']).
1959 against its parent (repo['.']).
1960 """
1960 """
1961 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1961 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1962 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1962 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1963 # might have accidentally ended up with the entire contents of the file
1963 # might have accidentally ended up with the entire contents of the file
1964 # they are supposed to be linking to.
1964 # they are supposed to be linking to.
1965 s.modified[:] = self._filtersuspectsymlink(s.modified)
1965 s.modified[:] = self._filtersuspectsymlink(s.modified)
1966 if other != self._repo[b'.']:
1966 if other != self._repo[b'.']:
1967 s = super(workingctx, self)._buildstatus(
1967 s = super(workingctx, self)._buildstatus(
1968 other, s, match, listignored, listclean, listunknown
1968 other, s, match, listignored, listclean, listunknown
1969 )
1969 )
1970 return s
1970 return s
1971
1971
1972 def _matchstatus(self, other, match):
1972 def _matchstatus(self, other, match):
1973 """override the match method with a filter for directory patterns
1973 """override the match method with a filter for directory patterns
1974
1974
1975 We use inheritance to customize the match.bad method only in cases of
1975 We use inheritance to customize the match.bad method only in cases of
1976 workingctx since it belongs only to the working directory when
1976 workingctx since it belongs only to the working directory when
1977 comparing against the parent changeset.
1977 comparing against the parent changeset.
1978
1978
1979 If we aren't comparing against the working directory's parent, then we
1979 If we aren't comparing against the working directory's parent, then we
1980 just use the default match object sent to us.
1980 just use the default match object sent to us.
1981 """
1981 """
1982 if other != self._repo[b'.']:
1982 if other != self._repo[b'.']:
1983
1983
1984 def bad(f, msg):
1984 def bad(f, msg):
1985 # 'f' may be a directory pattern from 'match.files()',
1985 # 'f' may be a directory pattern from 'match.files()',
1986 # so 'f not in ctx1' is not enough
1986 # so 'f not in ctx1' is not enough
1987 if f not in other and not other.hasdir(f):
1987 if f not in other and not other.hasdir(f):
1988 self._repo.ui.warn(
1988 self._repo.ui.warn(
1989 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1989 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1990 )
1990 )
1991
1991
1992 match.bad = bad
1992 match.bad = bad
1993 return match
1993 return match
1994
1994
1995 def walk(self, match):
1995 def walk(self, match):
1996 '''Generates matching file names.'''
1996 '''Generates matching file names.'''
1997 return sorted(
1997 return sorted(
1998 self._repo.dirstate.walk(
1998 self._repo.dirstate.walk(
1999 self._repo.narrowmatch(match),
1999 self._repo.narrowmatch(match),
2000 subrepos=sorted(self.substate),
2000 subrepos=sorted(self.substate),
2001 unknown=True,
2001 unknown=True,
2002 ignored=False,
2002 ignored=False,
2003 )
2003 )
2004 )
2004 )
2005
2005
2006 def matches(self, match):
2006 def matches(self, match):
2007 match = self._repo.narrowmatch(match)
2007 match = self._repo.narrowmatch(match)
2008 ds = self._repo.dirstate
2008 ds = self._repo.dirstate
2009 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2009 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2010
2010
2011 def markcommitted(self, node):
2011 def markcommitted(self, node):
2012 with self._repo.dirstate.parentchange():
2012 with self._repo.dirstate.parentchange():
2013 for f in self.modified() + self.added():
2013 for f in self.modified() + self.added():
2014 self._repo.dirstate.normal(f)
2014 self._repo.dirstate.normal(f)
2015 for f in self.removed():
2015 for f in self.removed():
2016 self._repo.dirstate.drop(f)
2016 self._repo.dirstate.drop(f)
2017 self._repo.dirstate.setparents(node)
2017 self._repo.dirstate.setparents(node)
2018 self._repo._quick_access_changeid_invalidate()
2018 self._repo._quick_access_changeid_invalidate()
2019
2019
2020 # write changes out explicitly, because nesting wlock at
2020 # write changes out explicitly, because nesting wlock at
2021 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2021 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2022 # from immediately doing so for subsequent changing files
2022 # from immediately doing so for subsequent changing files
2023 self._repo.dirstate.write(self._repo.currenttransaction())
2023 self._repo.dirstate.write(self._repo.currenttransaction())
2024
2024
2025 sparse.aftercommit(self._repo, node)
2025 sparse.aftercommit(self._repo, node)
2026
2026
2027 def mergestate(self, clean=False):
2027 def mergestate(self, clean=False):
2028 if clean:
2028 if clean:
2029 return mergestatemod.mergestate.clean(self._repo)
2029 return mergestatemod.mergestate.clean(self._repo)
2030 return mergestatemod.mergestate.read(self._repo)
2030 return mergestatemod.mergestate.read(self._repo)
2031
2031
2032
2032
2033 class committablefilectx(basefilectx):
2033 class committablefilectx(basefilectx):
2034 """A committablefilectx provides common functionality for a file context
2034 """A committablefilectx provides common functionality for a file context
2035 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2035 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2036
2036
2037 def __init__(self, repo, path, filelog=None, ctx=None):
2037 def __init__(self, repo, path, filelog=None, ctx=None):
2038 self._repo = repo
2038 self._repo = repo
2039 self._path = path
2039 self._path = path
2040 self._changeid = None
2040 self._changeid = None
2041 self._filerev = self._filenode = None
2041 self._filerev = self._filenode = None
2042
2042
2043 if filelog is not None:
2043 if filelog is not None:
2044 self._filelog = filelog
2044 self._filelog = filelog
2045 if ctx:
2045 if ctx:
2046 self._changectx = ctx
2046 self._changectx = ctx
2047
2047
2048 def __nonzero__(self):
2048 def __nonzero__(self):
2049 return True
2049 return True
2050
2050
2051 __bool__ = __nonzero__
2051 __bool__ = __nonzero__
2052
2052
2053 def linkrev(self):
2053 def linkrev(self):
2054 # linked to self._changectx no matter if file is modified or not
2054 # linked to self._changectx no matter if file is modified or not
2055 return self.rev()
2055 return self.rev()
2056
2056
2057 def renamed(self):
2057 def renamed(self):
2058 path = self.copysource()
2058 path = self.copysource()
2059 if not path:
2059 if not path:
2060 return None
2060 return None
2061 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2061 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2062
2062
2063 def parents(self):
2063 def parents(self):
2064 '''return parent filectxs, following copies if necessary'''
2064 '''return parent filectxs, following copies if necessary'''
2065
2065
2066 def filenode(ctx, path):
2066 def filenode(ctx, path):
2067 return ctx._manifest.get(path, nullid)
2067 return ctx._manifest.get(path, nullid)
2068
2068
2069 path = self._path
2069 path = self._path
2070 fl = self._filelog
2070 fl = self._filelog
2071 pcl = self._changectx._parents
2071 pcl = self._changectx._parents
2072 renamed = self.renamed()
2072 renamed = self.renamed()
2073
2073
2074 if renamed:
2074 if renamed:
2075 pl = [renamed + (None,)]
2075 pl = [renamed + (None,)]
2076 else:
2076 else:
2077 pl = [(path, filenode(pcl[0], path), fl)]
2077 pl = [(path, filenode(pcl[0], path), fl)]
2078
2078
2079 for pc in pcl[1:]:
2079 for pc in pcl[1:]:
2080 pl.append((path, filenode(pc, path), fl))
2080 pl.append((path, filenode(pc, path), fl))
2081
2081
2082 return [
2082 return [
2083 self._parentfilectx(p, fileid=n, filelog=l)
2083 self._parentfilectx(p, fileid=n, filelog=l)
2084 for p, n, l in pl
2084 for p, n, l in pl
2085 if n != nullid
2085 if n != nullid
2086 ]
2086 ]
2087
2087
2088 def children(self):
2088 def children(self):
2089 return []
2089 return []
2090
2090
2091
2091
2092 class workingfilectx(committablefilectx):
2092 class workingfilectx(committablefilectx):
2093 """A workingfilectx object makes access to data related to a particular
2093 """A workingfilectx object makes access to data related to a particular
2094 file in the working directory convenient."""
2094 file in the working directory convenient."""
2095
2095
2096 def __init__(self, repo, path, filelog=None, workingctx=None):
2096 def __init__(self, repo, path, filelog=None, workingctx=None):
2097 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2097 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2098
2098
2099 @propertycache
2099 @propertycache
2100 def _changectx(self):
2100 def _changectx(self):
2101 return workingctx(self._repo)
2101 return workingctx(self._repo)
2102
2102
2103 def data(self):
2103 def data(self):
2104 return self._repo.wread(self._path)
2104 return self._repo.wread(self._path)
2105
2105
2106 def copysource(self):
2106 def copysource(self):
2107 return self._repo.dirstate.copied(self._path)
2107 return self._repo.dirstate.copied(self._path)
2108
2108
2109 def size(self):
2109 def size(self):
2110 return self._repo.wvfs.lstat(self._path).st_size
2110 return self._repo.wvfs.lstat(self._path).st_size
2111
2111
2112 def lstat(self):
2112 def lstat(self):
2113 return self._repo.wvfs.lstat(self._path)
2113 return self._repo.wvfs.lstat(self._path)
2114
2114
2115 def date(self):
2115 def date(self):
2116 t, tz = self._changectx.date()
2116 t, tz = self._changectx.date()
2117 try:
2117 try:
2118 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2118 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2119 except OSError as err:
2119 except OSError as err:
2120 if err.errno != errno.ENOENT:
2120 if err.errno != errno.ENOENT:
2121 raise
2121 raise
2122 return (t, tz)
2122 return (t, tz)
2123
2123
2124 def exists(self):
2124 def exists(self):
2125 return self._repo.wvfs.exists(self._path)
2125 return self._repo.wvfs.exists(self._path)
2126
2126
2127 def lexists(self):
2127 def lexists(self):
2128 return self._repo.wvfs.lexists(self._path)
2128 return self._repo.wvfs.lexists(self._path)
2129
2129
2130 def audit(self):
2130 def audit(self):
2131 return self._repo.wvfs.audit(self._path)
2131 return self._repo.wvfs.audit(self._path)
2132
2132
2133 def cmp(self, fctx):
2133 def cmp(self, fctx):
2134 """compare with other file context
2134 """compare with other file context
2135
2135
2136 returns True if different than fctx.
2136 returns True if different than fctx.
2137 """
2137 """
2138 # fctx should be a filectx (not a workingfilectx)
2138 # fctx should be a filectx (not a workingfilectx)
2139 # invert comparison to reuse the same code path
2139 # invert comparison to reuse the same code path
2140 return fctx.cmp(self)
2140 return fctx.cmp(self)
2141
2141
2142 def remove(self, ignoremissing=False):
2142 def remove(self, ignoremissing=False):
2143 """wraps unlink for a repo's working directory"""
2143 """wraps unlink for a repo's working directory"""
2144 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2144 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2145 self._repo.wvfs.unlinkpath(
2145 self._repo.wvfs.unlinkpath(
2146 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2146 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2147 )
2147 )
2148
2148
2149 def write(self, data, flags, backgroundclose=False, **kwargs):
2149 def write(self, data, flags, backgroundclose=False, **kwargs):
2150 """wraps repo.wwrite"""
2150 """wraps repo.wwrite"""
2151 return self._repo.wwrite(
2151 return self._repo.wwrite(
2152 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2152 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2153 )
2153 )
2154
2154
2155 def markcopied(self, src):
2155 def markcopied(self, src):
2156 """marks this file a copy of `src`"""
2156 """marks this file a copy of `src`"""
2157 self._repo.dirstate.copy(src, self._path)
2157 self._repo.dirstate.copy(src, self._path)
2158
2158
2159 def clearunknown(self):
2159 def clearunknown(self):
2160 """Removes conflicting items in the working directory so that
2160 """Removes conflicting items in the working directory so that
2161 ``write()`` can be called successfully.
2161 ``write()`` can be called successfully.
2162 """
2162 """
2163 wvfs = self._repo.wvfs
2163 wvfs = self._repo.wvfs
2164 f = self._path
2164 f = self._path
2165 wvfs.audit(f)
2165 wvfs.audit(f)
2166 if self._repo.ui.configbool(
2166 if self._repo.ui.configbool(
2167 b'experimental', b'merge.checkpathconflicts'
2167 b'experimental', b'merge.checkpathconflicts'
2168 ):
2168 ):
2169 # remove files under the directory as they should already be
2169 # remove files under the directory as they should already be
2170 # warned and backed up
2170 # warned and backed up
2171 if wvfs.isdir(f) and not wvfs.islink(f):
2171 if wvfs.isdir(f) and not wvfs.islink(f):
2172 wvfs.rmtree(f, forcibly=True)
2172 wvfs.rmtree(f, forcibly=True)
2173 for p in reversed(list(pathutil.finddirs(f))):
2173 for p in reversed(list(pathutil.finddirs(f))):
2174 if wvfs.isfileorlink(p):
2174 if wvfs.isfileorlink(p):
2175 wvfs.unlink(p)
2175 wvfs.unlink(p)
2176 break
2176 break
2177 else:
2177 else:
2178 # don't remove files if path conflicts are not processed
2178 # don't remove files if path conflicts are not processed
2179 if wvfs.isdir(f) and not wvfs.islink(f):
2179 if wvfs.isdir(f) and not wvfs.islink(f):
2180 wvfs.removedirs(f)
2180 wvfs.removedirs(f)
2181
2181
2182 def setflags(self, l, x):
2182 def setflags(self, l, x):
2183 self._repo.wvfs.setflags(self._path, l, x)
2183 self._repo.wvfs.setflags(self._path, l, x)
2184
2184
2185
2185
2186 class overlayworkingctx(committablectx):
2186 class overlayworkingctx(committablectx):
2187 """Wraps another mutable context with a write-back cache that can be
2187 """Wraps another mutable context with a write-back cache that can be
2188 converted into a commit context.
2188 converted into a commit context.
2189
2189
2190 self._cache[path] maps to a dict with keys: {
2190 self._cache[path] maps to a dict with keys: {
2191 'exists': bool?
2191 'exists': bool?
2192 'date': date?
2192 'date': date?
2193 'data': str?
2193 'data': str?
2194 'flags': str?
2194 'flags': str?
2195 'copied': str? (path or None)
2195 'copied': str? (path or None)
2196 }
2196 }
2197 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2197 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2198 is `False`, the file was deleted.
2198 is `False`, the file was deleted.
2199 """
2199 """
2200
2200
2201 def __init__(self, repo):
2201 def __init__(self, repo):
2202 super(overlayworkingctx, self).__init__(repo)
2202 super(overlayworkingctx, self).__init__(repo)
2203 self.clean()
2203 self.clean()
2204
2204
2205 def setbase(self, wrappedctx):
2205 def setbase(self, wrappedctx):
2206 self._wrappedctx = wrappedctx
2206 self._wrappedctx = wrappedctx
2207 self._parents = [wrappedctx]
2207 self._parents = [wrappedctx]
2208 # Drop old manifest cache as it is now out of date.
2208 # Drop old manifest cache as it is now out of date.
2209 # This is necessary when, e.g., rebasing several nodes with one
2209 # This is necessary when, e.g., rebasing several nodes with one
2210 # ``overlayworkingctx`` (e.g. with --collapse).
2210 # ``overlayworkingctx`` (e.g. with --collapse).
2211 util.clearcachedproperty(self, b'_manifest')
2211 util.clearcachedproperty(self, b'_manifest')
2212
2212
2213 def setparents(self, p1node, p2node=nullid):
2213 def setparents(self, p1node, p2node=nullid):
2214 assert p1node == self._wrappedctx.node()
2214 assert p1node == self._wrappedctx.node()
2215 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2215 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2216
2216
2217 def data(self, path):
2217 def data(self, path):
2218 if self.isdirty(path):
2218 if self.isdirty(path):
2219 if self._cache[path][b'exists']:
2219 if self._cache[path][b'exists']:
2220 if self._cache[path][b'data'] is not None:
2220 if self._cache[path][b'data'] is not None:
2221 return self._cache[path][b'data']
2221 return self._cache[path][b'data']
2222 else:
2222 else:
2223 # Must fallback here, too, because we only set flags.
2223 # Must fallback here, too, because we only set flags.
2224 return self._wrappedctx[path].data()
2224 return self._wrappedctx[path].data()
2225 else:
2225 else:
2226 raise error.ProgrammingError(
2226 raise error.ProgrammingError(
2227 b"No such file or directory: %s" % path
2227 b"No such file or directory: %s" % path
2228 )
2228 )
2229 else:
2229 else:
2230 return self._wrappedctx[path].data()
2230 return self._wrappedctx[path].data()
2231
2231
2232 @propertycache
2232 @propertycache
2233 def _manifest(self):
2233 def _manifest(self):
2234 parents = self.parents()
2234 parents = self.parents()
2235 man = parents[0].manifest().copy()
2235 man = parents[0].manifest().copy()
2236
2236
2237 flag = self._flagfunc
2237 flag = self._flagfunc
2238 for path in self.added():
2238 for path in self.added():
2239 man[path] = addednodeid
2239 man[path] = addednodeid
2240 man.setflag(path, flag(path))
2240 man.setflag(path, flag(path))
2241 for path in self.modified():
2241 for path in self.modified():
2242 man[path] = modifiednodeid
2242 man[path] = modifiednodeid
2243 man.setflag(path, flag(path))
2243 man.setflag(path, flag(path))
2244 for path in self.removed():
2244 for path in self.removed():
2245 del man[path]
2245 del man[path]
2246 return man
2246 return man
2247
2247
2248 @propertycache
2248 @propertycache
2249 def _flagfunc(self):
2249 def _flagfunc(self):
2250 def f(path):
2250 def f(path):
2251 return self._cache[path][b'flags']
2251 return self._cache[path][b'flags']
2252
2252
2253 return f
2253 return f
2254
2254
2255 def files(self):
2255 def files(self):
2256 return sorted(self.added() + self.modified() + self.removed())
2256 return sorted(self.added() + self.modified() + self.removed())
2257
2257
2258 def modified(self):
2258 def modified(self):
2259 return [
2259 return [
2260 f
2260 f
2261 for f in self._cache.keys()
2261 for f in self._cache.keys()
2262 if self._cache[f][b'exists'] and self._existsinparent(f)
2262 if self._cache[f][b'exists'] and self._existsinparent(f)
2263 ]
2263 ]
2264
2264
2265 def added(self):
2265 def added(self):
2266 return [
2266 return [
2267 f
2267 f
2268 for f in self._cache.keys()
2268 for f in self._cache.keys()
2269 if self._cache[f][b'exists'] and not self._existsinparent(f)
2269 if self._cache[f][b'exists'] and not self._existsinparent(f)
2270 ]
2270 ]
2271
2271
2272 def removed(self):
2272 def removed(self):
2273 return [
2273 return [
2274 f
2274 f
2275 for f in self._cache.keys()
2275 for f in self._cache.keys()
2276 if not self._cache[f][b'exists'] and self._existsinparent(f)
2276 if not self._cache[f][b'exists'] and self._existsinparent(f)
2277 ]
2277 ]
2278
2278
2279 def p1copies(self):
2279 def p1copies(self):
2280 copies = {}
2280 copies = {}
2281 narrowmatch = self._repo.narrowmatch()
2281 narrowmatch = self._repo.narrowmatch()
2282 for f in self._cache.keys():
2282 for f in self._cache.keys():
2283 if not narrowmatch(f):
2283 if not narrowmatch(f):
2284 continue
2284 continue
2285 copies.pop(f, None) # delete if it exists
2285 copies.pop(f, None) # delete if it exists
2286 source = self._cache[f][b'copied']
2286 source = self._cache[f][b'copied']
2287 if source:
2287 if source:
2288 copies[f] = source
2288 copies[f] = source
2289 return copies
2289 return copies
2290
2290
2291 def p2copies(self):
2291 def p2copies(self):
2292 copies = {}
2292 copies = {}
2293 narrowmatch = self._repo.narrowmatch()
2293 narrowmatch = self._repo.narrowmatch()
2294 for f in self._cache.keys():
2294 for f in self._cache.keys():
2295 if not narrowmatch(f):
2295 if not narrowmatch(f):
2296 continue
2296 continue
2297 copies.pop(f, None) # delete if it exists
2297 copies.pop(f, None) # delete if it exists
2298 source = self._cache[f][b'copied']
2298 source = self._cache[f][b'copied']
2299 if source:
2299 if source:
2300 copies[f] = source
2300 copies[f] = source
2301 return copies
2301 return copies
2302
2302
2303 def isinmemory(self):
2303 def isinmemory(self):
2304 return True
2304 return True
2305
2305
2306 def filedate(self, path):
2306 def filedate(self, path):
2307 if self.isdirty(path):
2307 if self.isdirty(path):
2308 return self._cache[path][b'date']
2308 return self._cache[path][b'date']
2309 else:
2309 else:
2310 return self._wrappedctx[path].date()
2310 return self._wrappedctx[path].date()
2311
2311
2312 def markcopied(self, path, origin):
2312 def markcopied(self, path, origin):
2313 self._markdirty(
2313 self._markdirty(
2314 path,
2314 path,
2315 exists=True,
2315 exists=True,
2316 date=self.filedate(path),
2316 date=self.filedate(path),
2317 flags=self.flags(path),
2317 flags=self.flags(path),
2318 copied=origin,
2318 copied=origin,
2319 )
2319 )
2320
2320
2321 def copydata(self, path):
2321 def copydata(self, path):
2322 if self.isdirty(path):
2322 if self.isdirty(path):
2323 return self._cache[path][b'copied']
2323 return self._cache[path][b'copied']
2324 else:
2324 else:
2325 return None
2325 return None
2326
2326
2327 def flags(self, path):
2327 def flags(self, path):
2328 if self.isdirty(path):
2328 if self.isdirty(path):
2329 if self._cache[path][b'exists']:
2329 if self._cache[path][b'exists']:
2330 return self._cache[path][b'flags']
2330 return self._cache[path][b'flags']
2331 else:
2331 else:
2332 raise error.ProgrammingError(
2332 raise error.ProgrammingError(
2333 b"No such file or directory: %s" % path
2333 b"No such file or directory: %s" % path
2334 )
2334 )
2335 else:
2335 else:
2336 return self._wrappedctx[path].flags()
2336 return self._wrappedctx[path].flags()
2337
2337
2338 def __contains__(self, key):
2338 def __contains__(self, key):
2339 if key in self._cache:
2339 if key in self._cache:
2340 return self._cache[key][b'exists']
2340 return self._cache[key][b'exists']
2341 return key in self.p1()
2341 return key in self.p1()
2342
2342
2343 def _existsinparent(self, path):
2343 def _existsinparent(self, path):
2344 try:
2344 try:
2345 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2345 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2346 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2346 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2347 # with an ``exists()`` function.
2347 # with an ``exists()`` function.
2348 self._wrappedctx[path]
2348 self._wrappedctx[path]
2349 return True
2349 return True
2350 except error.ManifestLookupError:
2350 except error.ManifestLookupError:
2351 return False
2351 return False
2352
2352
2353 def _auditconflicts(self, path):
2353 def _auditconflicts(self, path):
2354 """Replicates conflict checks done by wvfs.write().
2354 """Replicates conflict checks done by wvfs.write().
2355
2355
2356 Since we never write to the filesystem and never call `applyupdates` in
2356 Since we never write to the filesystem and never call `applyupdates` in
2357 IMM, we'll never check that a path is actually writable -- e.g., because
2357 IMM, we'll never check that a path is actually writable -- e.g., because
2358 it adds `a/foo`, but `a` is actually a file in the other commit.
2358 it adds `a/foo`, but `a` is actually a file in the other commit.
2359 """
2359 """
2360
2360
2361 def fail(path, component):
2361 def fail(path, component):
2362 # p1() is the base and we're receiving "writes" for p2()'s
2362 # p1() is the base and we're receiving "writes" for p2()'s
2363 # files.
2363 # files.
2364 if b'l' in self.p1()[component].flags():
2364 if b'l' in self.p1()[component].flags():
2365 raise error.Abort(
2365 raise error.Abort(
2366 b"error: %s conflicts with symlink %s "
2366 b"error: %s conflicts with symlink %s "
2367 b"in %d." % (path, component, self.p1().rev())
2367 b"in %d." % (path, component, self.p1().rev())
2368 )
2368 )
2369 else:
2369 else:
2370 raise error.Abort(
2370 raise error.Abort(
2371 b"error: '%s' conflicts with file '%s' in "
2371 b"error: '%s' conflicts with file '%s' in "
2372 b"%d." % (path, component, self.p1().rev())
2372 b"%d." % (path, component, self.p1().rev())
2373 )
2373 )
2374
2374
2375 # Test that each new directory to be created to write this path from p2
2375 # Test that each new directory to be created to write this path from p2
2376 # is not a file in p1.
2376 # is not a file in p1.
2377 components = path.split(b'/')
2377 components = path.split(b'/')
2378 for i in pycompat.xrange(len(components)):
2378 for i in pycompat.xrange(len(components)):
2379 component = b"/".join(components[0:i])
2379 component = b"/".join(components[0:i])
2380 if component in self:
2380 if component in self:
2381 fail(path, component)
2381 fail(path, component)
2382
2382
2383 # Test the other direction -- that this path from p2 isn't a directory
2383 # Test the other direction -- that this path from p2 isn't a directory
2384 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2384 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2385 match = self.match([path], default=b'path')
2385 match = self.match([path], default=b'path')
2386 mfiles = list(self.p1().manifest().walk(match))
2386 mfiles = list(self.p1().manifest().walk(match))
2387 if len(mfiles) > 0:
2387 if len(mfiles) > 0:
2388 if len(mfiles) == 1 and mfiles[0] == path:
2388 if len(mfiles) == 1 and mfiles[0] == path:
2389 return
2389 return
2390 # omit the files which are deleted in current IMM wctx
2390 # omit the files which are deleted in current IMM wctx
2391 mfiles = [m for m in mfiles if m in self]
2391 mfiles = [m for m in mfiles if m in self]
2392 if not mfiles:
2392 if not mfiles:
2393 return
2393 return
2394 raise error.Abort(
2394 raise error.Abort(
2395 b"error: file '%s' cannot be written because "
2395 b"error: file '%s' cannot be written because "
2396 b" '%s/' is a directory in %s (containing %d "
2396 b" '%s/' is a directory in %s (containing %d "
2397 b"entries: %s)"
2397 b"entries: %s)"
2398 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2398 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2399 )
2399 )
2400
2400
2401 def write(self, path, data, flags=b'', **kwargs):
2401 def write(self, path, data, flags=b'', **kwargs):
2402 if data is None:
2402 if data is None:
2403 raise error.ProgrammingError(b"data must be non-None")
2403 raise error.ProgrammingError(b"data must be non-None")
2404 self._auditconflicts(path)
2404 self._auditconflicts(path)
2405 self._markdirty(
2405 self._markdirty(
2406 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2406 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2407 )
2407 )
2408
2408
2409 def setflags(self, path, l, x):
2409 def setflags(self, path, l, x):
2410 flag = b''
2410 flag = b''
2411 if l:
2411 if l:
2412 flag = b'l'
2412 flag = b'l'
2413 elif x:
2413 elif x:
2414 flag = b'x'
2414 flag = b'x'
2415 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2415 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2416
2416
2417 def remove(self, path):
2417 def remove(self, path):
2418 self._markdirty(path, exists=False)
2418 self._markdirty(path, exists=False)
2419
2419
2420 def exists(self, path):
2420 def exists(self, path):
2421 """exists behaves like `lexists`, but needs to follow symlinks and
2421 """exists behaves like `lexists`, but needs to follow symlinks and
2422 return False if they are broken.
2422 return False if they are broken.
2423 """
2423 """
2424 if self.isdirty(path):
2424 if self.isdirty(path):
2425 # If this path exists and is a symlink, "follow" it by calling
2425 # If this path exists and is a symlink, "follow" it by calling
2426 # exists on the destination path.
2426 # exists on the destination path.
2427 if (
2427 if (
2428 self._cache[path][b'exists']
2428 self._cache[path][b'exists']
2429 and b'l' in self._cache[path][b'flags']
2429 and b'l' in self._cache[path][b'flags']
2430 ):
2430 ):
2431 return self.exists(self._cache[path][b'data'].strip())
2431 return self.exists(self._cache[path][b'data'].strip())
2432 else:
2432 else:
2433 return self._cache[path][b'exists']
2433 return self._cache[path][b'exists']
2434
2434
2435 return self._existsinparent(path)
2435 return self._existsinparent(path)
2436
2436
2437 def lexists(self, path):
2437 def lexists(self, path):
2438 """lexists returns True if the path exists"""
2438 """lexists returns True if the path exists"""
2439 if self.isdirty(path):
2439 if self.isdirty(path):
2440 return self._cache[path][b'exists']
2440 return self._cache[path][b'exists']
2441
2441
2442 return self._existsinparent(path)
2442 return self._existsinparent(path)
2443
2443
2444 def size(self, path):
2444 def size(self, path):
2445 if self.isdirty(path):
2445 if self.isdirty(path):
2446 if self._cache[path][b'exists']:
2446 if self._cache[path][b'exists']:
2447 return len(self._cache[path][b'data'])
2447 return len(self._cache[path][b'data'])
2448 else:
2448 else:
2449 raise error.ProgrammingError(
2449 raise error.ProgrammingError(
2450 b"No such file or directory: %s" % path
2450 b"No such file or directory: %s" % path
2451 )
2451 )
2452 return self._wrappedctx[path].size()
2452 return self._wrappedctx[path].size()
2453
2453
2454 def tomemctx(
2454 def tomemctx(
2455 self,
2455 self,
2456 text,
2456 text,
2457 branch=None,
2457 branch=None,
2458 extra=None,
2458 extra=None,
2459 date=None,
2459 date=None,
2460 parents=None,
2460 parents=None,
2461 user=None,
2461 user=None,
2462 editor=None,
2462 editor=None,
2463 ):
2463 ):
2464 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2464 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2465 committed.
2465 committed.
2466
2466
2467 ``text`` is the commit message.
2467 ``text`` is the commit message.
2468 ``parents`` (optional) are rev numbers.
2468 ``parents`` (optional) are rev numbers.
2469 """
2469 """
2470 # Default parents to the wrapped context if not passed.
2470 # Default parents to the wrapped context if not passed.
2471 if parents is None:
2471 if parents is None:
2472 parents = self.parents()
2472 parents = self.parents()
2473 if len(parents) == 1:
2473 if len(parents) == 1:
2474 parents = (parents[0], None)
2474 parents = (parents[0], None)
2475
2475
2476 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2476 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2477 if parents[1] is None:
2477 if parents[1] is None:
2478 parents = (self._repo[parents[0]], None)
2478 parents = (self._repo[parents[0]], None)
2479 else:
2479 else:
2480 parents = (self._repo[parents[0]], self._repo[parents[1]])
2480 parents = (self._repo[parents[0]], self._repo[parents[1]])
2481
2481
2482 files = self.files()
2482 files = self.files()
2483
2483
2484 def getfile(repo, memctx, path):
2484 def getfile(repo, memctx, path):
2485 if self._cache[path][b'exists']:
2485 if self._cache[path][b'exists']:
2486 return memfilectx(
2486 return memfilectx(
2487 repo,
2487 repo,
2488 memctx,
2488 memctx,
2489 path,
2489 path,
2490 self._cache[path][b'data'],
2490 self._cache[path][b'data'],
2491 b'l' in self._cache[path][b'flags'],
2491 b'l' in self._cache[path][b'flags'],
2492 b'x' in self._cache[path][b'flags'],
2492 b'x' in self._cache[path][b'flags'],
2493 self._cache[path][b'copied'],
2493 self._cache[path][b'copied'],
2494 )
2494 )
2495 else:
2495 else:
2496 # Returning None, but including the path in `files`, is
2496 # Returning None, but including the path in `files`, is
2497 # necessary for memctx to register a deletion.
2497 # necessary for memctx to register a deletion.
2498 return None
2498 return None
2499
2499
2500 if branch is None:
2500 if branch is None:
2501 branch = self._wrappedctx.branch()
2501 branch = self._wrappedctx.branch()
2502
2502
2503 return memctx(
2503 return memctx(
2504 self._repo,
2504 self._repo,
2505 parents,
2505 parents,
2506 text,
2506 text,
2507 files,
2507 files,
2508 getfile,
2508 getfile,
2509 date=date,
2509 date=date,
2510 extra=extra,
2510 extra=extra,
2511 user=user,
2511 user=user,
2512 branch=branch,
2512 branch=branch,
2513 editor=editor,
2513 editor=editor,
2514 )
2514 )
2515
2515
2516 def tomemctx_for_amend(self, precursor):
2516 def tomemctx_for_amend(self, precursor):
2517 extra = precursor.extra().copy()
2517 extra = precursor.extra().copy()
2518 extra[b'amend_source'] = precursor.hex()
2518 extra[b'amend_source'] = precursor.hex()
2519 return self.tomemctx(
2519 return self.tomemctx(
2520 text=precursor.description(),
2520 text=precursor.description(),
2521 branch=precursor.branch(),
2521 branch=precursor.branch(),
2522 extra=extra,
2522 extra=extra,
2523 date=precursor.date(),
2523 date=precursor.date(),
2524 user=precursor.user(),
2524 user=precursor.user(),
2525 )
2525 )
2526
2526
2527 def isdirty(self, path):
2527 def isdirty(self, path):
2528 return path in self._cache
2528 return path in self._cache
2529
2529
2530 def clean(self):
2530 def clean(self):
2531 self._cache = {}
2531 self._cache = {}
2532
2532
2533 def _compact(self):
2533 def _compact(self):
2534 """Removes keys from the cache that are actually clean, by comparing
2534 """Removes keys from the cache that are actually clean, by comparing
2535 them with the underlying context.
2535 them with the underlying context.
2536
2536
2537 This can occur during the merge process, e.g. by passing --tool :local
2537 This can occur during the merge process, e.g. by passing --tool :local
2538 to resolve a conflict.
2538 to resolve a conflict.
2539 """
2539 """
2540 keys = []
2540 keys = []
2541 # This won't be perfect, but can help performance significantly when
2541 # This won't be perfect, but can help performance significantly when
2542 # using things like remotefilelog.
2542 # using things like remotefilelog.
2543 scmutil.prefetchfiles(
2543 scmutil.prefetchfiles(
2544 self.repo(),
2544 self.repo(),
2545 [
2545 [
2546 (
2546 (
2547 self.p1().rev(),
2547 self.p1().rev(),
2548 scmutil.matchfiles(self.repo(), self._cache.keys()),
2548 scmutil.matchfiles(self.repo(), self._cache.keys()),
2549 )
2549 )
2550 ],
2550 ],
2551 )
2551 )
2552
2552
2553 for path in self._cache.keys():
2553 for path in self._cache.keys():
2554 cache = self._cache[path]
2554 cache = self._cache[path]
2555 try:
2555 try:
2556 underlying = self._wrappedctx[path]
2556 underlying = self._wrappedctx[path]
2557 if (
2557 if (
2558 underlying.data() == cache[b'data']
2558 underlying.data() == cache[b'data']
2559 and underlying.flags() == cache[b'flags']
2559 and underlying.flags() == cache[b'flags']
2560 ):
2560 ):
2561 keys.append(path)
2561 keys.append(path)
2562 except error.ManifestLookupError:
2562 except error.ManifestLookupError:
2563 # Path not in the underlying manifest (created).
2563 # Path not in the underlying manifest (created).
2564 continue
2564 continue
2565
2565
2566 for path in keys:
2566 for path in keys:
2567 del self._cache[path]
2567 del self._cache[path]
2568 return keys
2568 return keys
2569
2569
2570 def _markdirty(
2570 def _markdirty(
2571 self, path, exists, data=None, date=None, flags=b'', copied=None
2571 self, path, exists, data=None, date=None, flags=b'', copied=None
2572 ):
2572 ):
2573 # data not provided, let's see if we already have some; if not, let's
2573 # data not provided, let's see if we already have some; if not, let's
2574 # grab it from our underlying context, so that we always have data if
2574 # grab it from our underlying context, so that we always have data if
2575 # the file is marked as existing.
2575 # the file is marked as existing.
2576 if exists and data is None:
2576 if exists and data is None:
2577 oldentry = self._cache.get(path) or {}
2577 oldentry = self._cache.get(path) or {}
2578 data = oldentry.get(b'data')
2578 data = oldentry.get(b'data')
2579 if data is None:
2579 if data is None:
2580 data = self._wrappedctx[path].data()
2580 data = self._wrappedctx[path].data()
2581
2581
2582 self._cache[path] = {
2582 self._cache[path] = {
2583 b'exists': exists,
2583 b'exists': exists,
2584 b'data': data,
2584 b'data': data,
2585 b'date': date,
2585 b'date': date,
2586 b'flags': flags,
2586 b'flags': flags,
2587 b'copied': copied,
2587 b'copied': copied,
2588 }
2588 }
2589
2589
2590 def filectx(self, path, filelog=None):
2590 def filectx(self, path, filelog=None):
2591 return overlayworkingfilectx(
2591 return overlayworkingfilectx(
2592 self._repo, path, parent=self, filelog=filelog
2592 self._repo, path, parent=self, filelog=filelog
2593 )
2593 )
2594
2594
2595
2595
2596 class overlayworkingfilectx(committablefilectx):
2596 class overlayworkingfilectx(committablefilectx):
2597 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2597 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2598 cache, which can be flushed through later by calling ``flush()``."""
2598 cache, which can be flushed through later by calling ``flush()``."""
2599
2599
2600 def __init__(self, repo, path, filelog=None, parent=None):
2600 def __init__(self, repo, path, filelog=None, parent=None):
2601 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2601 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2602 self._repo = repo
2602 self._repo = repo
2603 self._parent = parent
2603 self._parent = parent
2604 self._path = path
2604 self._path = path
2605
2605
2606 def cmp(self, fctx):
2606 def cmp(self, fctx):
2607 return self.data() != fctx.data()
2607 return self.data() != fctx.data()
2608
2608
2609 def changectx(self):
2609 def changectx(self):
2610 return self._parent
2610 return self._parent
2611
2611
2612 def data(self):
2612 def data(self):
2613 return self._parent.data(self._path)
2613 return self._parent.data(self._path)
2614
2614
2615 def date(self):
2615 def date(self):
2616 return self._parent.filedate(self._path)
2616 return self._parent.filedate(self._path)
2617
2617
2618 def exists(self):
2618 def exists(self):
2619 return self.lexists()
2619 return self.lexists()
2620
2620
2621 def lexists(self):
2621 def lexists(self):
2622 return self._parent.exists(self._path)
2622 return self._parent.exists(self._path)
2623
2623
2624 def copysource(self):
2624 def copysource(self):
2625 return self._parent.copydata(self._path)
2625 return self._parent.copydata(self._path)
2626
2626
2627 def size(self):
2627 def size(self):
2628 return self._parent.size(self._path)
2628 return self._parent.size(self._path)
2629
2629
2630 def markcopied(self, origin):
2630 def markcopied(self, origin):
2631 self._parent.markcopied(self._path, origin)
2631 self._parent.markcopied(self._path, origin)
2632
2632
2633 def audit(self):
2633 def audit(self):
2634 pass
2634 pass
2635
2635
2636 def flags(self):
2636 def flags(self):
2637 return self._parent.flags(self._path)
2637 return self._parent.flags(self._path)
2638
2638
2639 def setflags(self, islink, isexec):
2639 def setflags(self, islink, isexec):
2640 return self._parent.setflags(self._path, islink, isexec)
2640 return self._parent.setflags(self._path, islink, isexec)
2641
2641
2642 def write(self, data, flags, backgroundclose=False, **kwargs):
2642 def write(self, data, flags, backgroundclose=False, **kwargs):
2643 return self._parent.write(self._path, data, flags, **kwargs)
2643 return self._parent.write(self._path, data, flags, **kwargs)
2644
2644
2645 def remove(self, ignoremissing=False):
2645 def remove(self, ignoremissing=False):
2646 return self._parent.remove(self._path)
2646 return self._parent.remove(self._path)
2647
2647
2648 def clearunknown(self):
2648 def clearunknown(self):
2649 pass
2649 pass
2650
2650
2651
2651
2652 class workingcommitctx(workingctx):
2652 class workingcommitctx(workingctx):
2653 """A workingcommitctx object makes access to data related to
2653 """A workingcommitctx object makes access to data related to
2654 the revision being committed convenient.
2654 the revision being committed convenient.
2655
2655
2656 This hides changes in the working directory, if they aren't
2656 This hides changes in the working directory, if they aren't
2657 committed in this context.
2657 committed in this context.
2658 """
2658 """
2659
2659
2660 def __init__(
2660 def __init__(
2661 self, repo, changes, text=b"", user=None, date=None, extra=None
2661 self, repo, changes, text=b"", user=None, date=None, extra=None
2662 ):
2662 ):
2663 super(workingcommitctx, self).__init__(
2663 super(workingcommitctx, self).__init__(
2664 repo, text, user, date, extra, changes
2664 repo, text, user, date, extra, changes
2665 )
2665 )
2666
2666
2667 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2667 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2668 """Return matched files only in ``self._status``
2668 """Return matched files only in ``self._status``
2669
2669
2670 Uncommitted files appear "clean" via this context, even if
2670 Uncommitted files appear "clean" via this context, even if
2671 they aren't actually so in the working directory.
2671 they aren't actually so in the working directory.
2672 """
2672 """
2673 if clean:
2673 if clean:
2674 clean = [f for f in self._manifest if f not in self._changedset]
2674 clean = [f for f in self._manifest if f not in self._changedset]
2675 else:
2675 else:
2676 clean = []
2676 clean = []
2677 return scmutil.status(
2677 return scmutil.status(
2678 [f for f in self._status.modified if match(f)],
2678 [f for f in self._status.modified if match(f)],
2679 [f for f in self._status.added if match(f)],
2679 [f for f in self._status.added if match(f)],
2680 [f for f in self._status.removed if match(f)],
2680 [f for f in self._status.removed if match(f)],
2681 [],
2681 [],
2682 [],
2682 [],
2683 [],
2683 [],
2684 clean,
2684 clean,
2685 )
2685 )
2686
2686
2687 @propertycache
2687 @propertycache
2688 def _changedset(self):
2688 def _changedset(self):
2689 """Return the set of files changed in this context
2689 """Return the set of files changed in this context
2690 """
2690 """
2691 changed = set(self._status.modified)
2691 changed = set(self._status.modified)
2692 changed.update(self._status.added)
2692 changed.update(self._status.added)
2693 changed.update(self._status.removed)
2693 changed.update(self._status.removed)
2694 return changed
2694 return changed
2695
2695
2696
2696
2697 def makecachingfilectxfn(func):
2697 def makecachingfilectxfn(func):
2698 """Create a filectxfn that caches based on the path.
2698 """Create a filectxfn that caches based on the path.
2699
2699
2700 We can't use util.cachefunc because it uses all arguments as the cache
2700 We can't use util.cachefunc because it uses all arguments as the cache
2701 key and this creates a cycle since the arguments include the repo and
2701 key and this creates a cycle since the arguments include the repo and
2702 memctx.
2702 memctx.
2703 """
2703 """
2704 cache = {}
2704 cache = {}
2705
2705
2706 def getfilectx(repo, memctx, path):
2706 def getfilectx(repo, memctx, path):
2707 if path not in cache:
2707 if path not in cache:
2708 cache[path] = func(repo, memctx, path)
2708 cache[path] = func(repo, memctx, path)
2709 return cache[path]
2709 return cache[path]
2710
2710
2711 return getfilectx
2711 return getfilectx
2712
2712
2713
2713
2714 def memfilefromctx(ctx):
2714 def memfilefromctx(ctx):
2715 """Given a context return a memfilectx for ctx[path]
2715 """Given a context return a memfilectx for ctx[path]
2716
2716
2717 This is a convenience method for building a memctx based on another
2717 This is a convenience method for building a memctx based on another
2718 context.
2718 context.
2719 """
2719 """
2720
2720
2721 def getfilectx(repo, memctx, path):
2721 def getfilectx(repo, memctx, path):
2722 fctx = ctx[path]
2722 fctx = ctx[path]
2723 copysource = fctx.copysource()
2723 copysource = fctx.copysource()
2724 return memfilectx(
2724 return memfilectx(
2725 repo,
2725 repo,
2726 memctx,
2726 memctx,
2727 path,
2727 path,
2728 fctx.data(),
2728 fctx.data(),
2729 islink=fctx.islink(),
2729 islink=fctx.islink(),
2730 isexec=fctx.isexec(),
2730 isexec=fctx.isexec(),
2731 copysource=copysource,
2731 copysource=copysource,
2732 )
2732 )
2733
2733
2734 return getfilectx
2734 return getfilectx
2735
2735
2736
2736
2737 def memfilefrompatch(patchstore):
2737 def memfilefrompatch(patchstore):
2738 """Given a patch (e.g. patchstore object) return a memfilectx
2738 """Given a patch (e.g. patchstore object) return a memfilectx
2739
2739
2740 This is a convenience method for building a memctx based on a patchstore.
2740 This is a convenience method for building a memctx based on a patchstore.
2741 """
2741 """
2742
2742
2743 def getfilectx(repo, memctx, path):
2743 def getfilectx(repo, memctx, path):
2744 data, mode, copysource = patchstore.getfile(path)
2744 data, mode, copysource = patchstore.getfile(path)
2745 if data is None:
2745 if data is None:
2746 return None
2746 return None
2747 islink, isexec = mode
2747 islink, isexec = mode
2748 return memfilectx(
2748 return memfilectx(
2749 repo,
2749 repo,
2750 memctx,
2750 memctx,
2751 path,
2751 path,
2752 data,
2752 data,
2753 islink=islink,
2753 islink=islink,
2754 isexec=isexec,
2754 isexec=isexec,
2755 copysource=copysource,
2755 copysource=copysource,
2756 )
2756 )
2757
2757
2758 return getfilectx
2758 return getfilectx
2759
2759
2760
2760
2761 class memctx(committablectx):
2761 class memctx(committablectx):
2762 """Use memctx to perform in-memory commits via localrepo.commitctx().
2762 """Use memctx to perform in-memory commits via localrepo.commitctx().
2763
2763
2764 Revision information is supplied at initialization time while
2764 Revision information is supplied at initialization time while
2765 related files data and is made available through a callback
2765 related files data and is made available through a callback
2766 mechanism. 'repo' is the current localrepo, 'parents' is a
2766 mechanism. 'repo' is the current localrepo, 'parents' is a
2767 sequence of two parent revisions identifiers (pass None for every
2767 sequence of two parent revisions identifiers (pass None for every
2768 missing parent), 'text' is the commit message and 'files' lists
2768 missing parent), 'text' is the commit message and 'files' lists
2769 names of files touched by the revision (normalized and relative to
2769 names of files touched by the revision (normalized and relative to
2770 repository root).
2770 repository root).
2771
2771
2772 filectxfn(repo, memctx, path) is a callable receiving the
2772 filectxfn(repo, memctx, path) is a callable receiving the
2773 repository, the current memctx object and the normalized path of
2773 repository, the current memctx object and the normalized path of
2774 requested file, relative to repository root. It is fired by the
2774 requested file, relative to repository root. It is fired by the
2775 commit function for every file in 'files', but calls order is
2775 commit function for every file in 'files', but calls order is
2776 undefined. If the file is available in the revision being
2776 undefined. If the file is available in the revision being
2777 committed (updated or added), filectxfn returns a memfilectx
2777 committed (updated or added), filectxfn returns a memfilectx
2778 object. If the file was removed, filectxfn return None for recent
2778 object. If the file was removed, filectxfn return None for recent
2779 Mercurial. Moved files are represented by marking the source file
2779 Mercurial. Moved files are represented by marking the source file
2780 removed and the new file added with copy information (see
2780 removed and the new file added with copy information (see
2781 memfilectx).
2781 memfilectx).
2782
2782
2783 user receives the committer name and defaults to current
2783 user receives the committer name and defaults to current
2784 repository username, date is the commit date in any format
2784 repository username, date is the commit date in any format
2785 supported by dateutil.parsedate() and defaults to current date, extra
2785 supported by dateutil.parsedate() and defaults to current date, extra
2786 is a dictionary of metadata or is left empty.
2786 is a dictionary of metadata or is left empty.
2787 """
2787 """
2788
2788
2789 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2789 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2790 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2790 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2791 # this field to determine what to do in filectxfn.
2791 # this field to determine what to do in filectxfn.
2792 _returnnoneformissingfiles = True
2792 _returnnoneformissingfiles = True
2793
2793
2794 def __init__(
2794 def __init__(
2795 self,
2795 self,
2796 repo,
2796 repo,
2797 parents,
2797 parents,
2798 text,
2798 text,
2799 files,
2799 files,
2800 filectxfn,
2800 filectxfn,
2801 user=None,
2801 user=None,
2802 date=None,
2802 date=None,
2803 extra=None,
2803 extra=None,
2804 branch=None,
2804 branch=None,
2805 editor=None,
2805 editor=None,
2806 ):
2806 ):
2807 super(memctx, self).__init__(
2807 super(memctx, self).__init__(
2808 repo, text, user, date, extra, branch=branch
2808 repo, text, user, date, extra, branch=branch
2809 )
2809 )
2810 self._rev = None
2810 self._rev = None
2811 self._node = None
2811 self._node = None
2812 parents = [(p or nullid) for p in parents]
2812 parents = [(p or nullid) for p in parents]
2813 p1, p2 = parents
2813 p1, p2 = parents
2814 self._parents = [self._repo[p] for p in (p1, p2)]
2814 self._parents = [self._repo[p] for p in (p1, p2)]
2815 files = sorted(set(files))
2815 files = sorted(set(files))
2816 self._files = files
2816 self._files = files
2817 self.substate = {}
2817 self.substate = {}
2818
2818
2819 if isinstance(filectxfn, patch.filestore):
2819 if isinstance(filectxfn, patch.filestore):
2820 filectxfn = memfilefrompatch(filectxfn)
2820 filectxfn = memfilefrompatch(filectxfn)
2821 elif not callable(filectxfn):
2821 elif not callable(filectxfn):
2822 # if store is not callable, wrap it in a function
2822 # if store is not callable, wrap it in a function
2823 filectxfn = memfilefromctx(filectxfn)
2823 filectxfn = memfilefromctx(filectxfn)
2824
2824
2825 # memoizing increases performance for e.g. vcs convert scenarios.
2825 # memoizing increases performance for e.g. vcs convert scenarios.
2826 self._filectxfn = makecachingfilectxfn(filectxfn)
2826 self._filectxfn = makecachingfilectxfn(filectxfn)
2827
2827
2828 if editor:
2828 if editor:
2829 self._text = editor(self._repo, self, [])
2829 self._text = editor(self._repo, self, [])
2830 self._repo.savecommitmessage(self._text)
2830 self._repo.savecommitmessage(self._text)
2831
2831
2832 def filectx(self, path, filelog=None):
2832 def filectx(self, path, filelog=None):
2833 """get a file context from the working directory
2833 """get a file context from the working directory
2834
2834
2835 Returns None if file doesn't exist and should be removed."""
2835 Returns None if file doesn't exist and should be removed."""
2836 return self._filectxfn(self._repo, self, path)
2836 return self._filectxfn(self._repo, self, path)
2837
2837
2838 def commit(self):
2838 def commit(self):
2839 """commit context to the repo"""
2839 """commit context to the repo"""
2840 return self._repo.commitctx(self)
2840 return self._repo.commitctx(self)
2841
2841
2842 @propertycache
2842 @propertycache
2843 def _manifest(self):
2843 def _manifest(self):
2844 """generate a manifest based on the return values of filectxfn"""
2844 """generate a manifest based on the return values of filectxfn"""
2845
2845
2846 # keep this simple for now; just worry about p1
2846 # keep this simple for now; just worry about p1
2847 pctx = self._parents[0]
2847 pctx = self._parents[0]
2848 man = pctx.manifest().copy()
2848 man = pctx.manifest().copy()
2849
2849
2850 for f in self._status.modified:
2850 for f in self._status.modified:
2851 man[f] = modifiednodeid
2851 man[f] = modifiednodeid
2852
2852
2853 for f in self._status.added:
2853 for f in self._status.added:
2854 man[f] = addednodeid
2854 man[f] = addednodeid
2855
2855
2856 for f in self._status.removed:
2856 for f in self._status.removed:
2857 if f in man:
2857 if f in man:
2858 del man[f]
2858 del man[f]
2859
2859
2860 return man
2860 return man
2861
2861
2862 @propertycache
2862 @propertycache
2863 def _status(self):
2863 def _status(self):
2864 """Calculate exact status from ``files`` specified at construction
2864 """Calculate exact status from ``files`` specified at construction
2865 """
2865 """
2866 man1 = self.p1().manifest()
2866 man1 = self.p1().manifest()
2867 p2 = self._parents[1]
2867 p2 = self._parents[1]
2868 # "1 < len(self._parents)" can't be used for checking
2868 # "1 < len(self._parents)" can't be used for checking
2869 # existence of the 2nd parent, because "memctx._parents" is
2869 # existence of the 2nd parent, because "memctx._parents" is
2870 # explicitly initialized by the list, of which length is 2.
2870 # explicitly initialized by the list, of which length is 2.
2871 if p2.node() != nullid:
2871 if p2.node() != nullid:
2872 man2 = p2.manifest()
2872 man2 = p2.manifest()
2873 managing = lambda f: f in man1 or f in man2
2873 managing = lambda f: f in man1 or f in man2
2874 else:
2874 else:
2875 managing = lambda f: f in man1
2875 managing = lambda f: f in man1
2876
2876
2877 modified, added, removed = [], [], []
2877 modified, added, removed = [], [], []
2878 for f in self._files:
2878 for f in self._files:
2879 if not managing(f):
2879 if not managing(f):
2880 added.append(f)
2880 added.append(f)
2881 elif self[f]:
2881 elif self[f]:
2882 modified.append(f)
2882 modified.append(f)
2883 else:
2883 else:
2884 removed.append(f)
2884 removed.append(f)
2885
2885
2886 return scmutil.status(modified, added, removed, [], [], [], [])
2886 return scmutil.status(modified, added, removed, [], [], [], [])
2887
2887
2888 def parents(self):
2888 def parents(self):
2889 if self._parents[1].node() == nullid:
2889 if self._parents[1].node() == nullid:
2890 return [self._parents[0]]
2890 return [self._parents[0]]
2891 return self._parents
2891 return self._parents
2892
2892
2893
2893
2894 class memfilectx(committablefilectx):
2894 class memfilectx(committablefilectx):
2895 """memfilectx represents an in-memory file to commit.
2895 """memfilectx represents an in-memory file to commit.
2896
2896
2897 See memctx and committablefilectx for more details.
2897 See memctx and committablefilectx for more details.
2898 """
2898 """
2899
2899
2900 def __init__(
2900 def __init__(
2901 self,
2901 self,
2902 repo,
2902 repo,
2903 changectx,
2903 changectx,
2904 path,
2904 path,
2905 data,
2905 data,
2906 islink=False,
2906 islink=False,
2907 isexec=False,
2907 isexec=False,
2908 copysource=None,
2908 copysource=None,
2909 ):
2909 ):
2910 """
2910 """
2911 path is the normalized file path relative to repository root.
2911 path is the normalized file path relative to repository root.
2912 data is the file content as a string.
2912 data is the file content as a string.
2913 islink is True if the file is a symbolic link.
2913 islink is True if the file is a symbolic link.
2914 isexec is True if the file is executable.
2914 isexec is True if the file is executable.
2915 copied is the source file path if current file was copied in the
2915 copied is the source file path if current file was copied in the
2916 revision being committed, or None."""
2916 revision being committed, or None."""
2917 super(memfilectx, self).__init__(repo, path, None, changectx)
2917 super(memfilectx, self).__init__(repo, path, None, changectx)
2918 self._data = data
2918 self._data = data
2919 if islink:
2919 if islink:
2920 self._flags = b'l'
2920 self._flags = b'l'
2921 elif isexec:
2921 elif isexec:
2922 self._flags = b'x'
2922 self._flags = b'x'
2923 else:
2923 else:
2924 self._flags = b''
2924 self._flags = b''
2925 self._copysource = copysource
2925 self._copysource = copysource
2926
2926
2927 def copysource(self):
2927 def copysource(self):
2928 return self._copysource
2928 return self._copysource
2929
2929
2930 def cmp(self, fctx):
2930 def cmp(self, fctx):
2931 return self.data() != fctx.data()
2931 return self.data() != fctx.data()
2932
2932
2933 def data(self):
2933 def data(self):
2934 return self._data
2934 return self._data
2935
2935
2936 def remove(self, ignoremissing=False):
2936 def remove(self, ignoremissing=False):
2937 """wraps unlink for a repo's working directory"""
2937 """wraps unlink for a repo's working directory"""
2938 # need to figure out what to do here
2938 # need to figure out what to do here
2939 del self._changectx[self._path]
2939 del self._changectx[self._path]
2940
2940
2941 def write(self, data, flags, **kwargs):
2941 def write(self, data, flags, **kwargs):
2942 """wraps repo.wwrite"""
2942 """wraps repo.wwrite"""
2943 self._data = data
2943 self._data = data
2944
2944
2945
2945
2946 class metadataonlyctx(committablectx):
2946 class metadataonlyctx(committablectx):
2947 """Like memctx but it's reusing the manifest of different commit.
2947 """Like memctx but it's reusing the manifest of different commit.
2948 Intended to be used by lightweight operations that are creating
2948 Intended to be used by lightweight operations that are creating
2949 metadata-only changes.
2949 metadata-only changes.
2950
2950
2951 Revision information is supplied at initialization time. 'repo' is the
2951 Revision information is supplied at initialization time. 'repo' is the
2952 current localrepo, 'ctx' is original revision which manifest we're reuisng
2952 current localrepo, 'ctx' is original revision which manifest we're reuisng
2953 'parents' is a sequence of two parent revisions identifiers (pass None for
2953 'parents' is a sequence of two parent revisions identifiers (pass None for
2954 every missing parent), 'text' is the commit.
2954 every missing parent), 'text' is the commit.
2955
2955
2956 user receives the committer name and defaults to current repository
2956 user receives the committer name and defaults to current repository
2957 username, date is the commit date in any format supported by
2957 username, date is the commit date in any format supported by
2958 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2958 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2959 metadata or is left empty.
2959 metadata or is left empty.
2960 """
2960 """
2961
2961
2962 def __init__(
2962 def __init__(
2963 self,
2963 self,
2964 repo,
2964 repo,
2965 originalctx,
2965 originalctx,
2966 parents=None,
2966 parents=None,
2967 text=None,
2967 text=None,
2968 user=None,
2968 user=None,
2969 date=None,
2969 date=None,
2970 extra=None,
2970 extra=None,
2971 editor=None,
2971 editor=None,
2972 ):
2972 ):
2973 if text is None:
2973 if text is None:
2974 text = originalctx.description()
2974 text = originalctx.description()
2975 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2975 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2976 self._rev = None
2976 self._rev = None
2977 self._node = None
2977 self._node = None
2978 self._originalctx = originalctx
2978 self._originalctx = originalctx
2979 self._manifestnode = originalctx.manifestnode()
2979 self._manifestnode = originalctx.manifestnode()
2980 if parents is None:
2980 if parents is None:
2981 parents = originalctx.parents()
2981 parents = originalctx.parents()
2982 else:
2982 else:
2983 parents = [repo[p] for p in parents if p is not None]
2983 parents = [repo[p] for p in parents if p is not None]
2984 parents = parents[:]
2984 parents = parents[:]
2985 while len(parents) < 2:
2985 while len(parents) < 2:
2986 parents.append(repo[nullid])
2986 parents.append(repo[nullid])
2987 p1, p2 = self._parents = parents
2987 p1, p2 = self._parents = parents
2988
2988
2989 # sanity check to ensure that the reused manifest parents are
2989 # sanity check to ensure that the reused manifest parents are
2990 # manifests of our commit parents
2990 # manifests of our commit parents
2991 mp1, mp2 = self.manifestctx().parents
2991 mp1, mp2 = self.manifestctx().parents
2992 if p1 != nullid and p1.manifestnode() != mp1:
2992 if p1 != nullid and p1.manifestnode() != mp1:
2993 raise RuntimeError(
2993 raise RuntimeError(
2994 r"can't reuse the manifest: its p1 "
2994 r"can't reuse the manifest: its p1 "
2995 r"doesn't match the new ctx p1"
2995 r"doesn't match the new ctx p1"
2996 )
2996 )
2997 if p2 != nullid and p2.manifestnode() != mp2:
2997 if p2 != nullid and p2.manifestnode() != mp2:
2998 raise RuntimeError(
2998 raise RuntimeError(
2999 r"can't reuse the manifest: "
2999 r"can't reuse the manifest: "
3000 r"its p2 doesn't match the new ctx p2"
3000 r"its p2 doesn't match the new ctx p2"
3001 )
3001 )
3002
3002
3003 self._files = originalctx.files()
3003 self._files = originalctx.files()
3004 self.substate = {}
3004 self.substate = {}
3005
3005
3006 if editor:
3006 if editor:
3007 self._text = editor(self._repo, self, [])
3007 self._text = editor(self._repo, self, [])
3008 self._repo.savecommitmessage(self._text)
3008 self._repo.savecommitmessage(self._text)
3009
3009
3010 def manifestnode(self):
3010 def manifestnode(self):
3011 return self._manifestnode
3011 return self._manifestnode
3012
3012
3013 @property
3013 @property
3014 def _manifestctx(self):
3014 def _manifestctx(self):
3015 return self._repo.manifestlog[self._manifestnode]
3015 return self._repo.manifestlog[self._manifestnode]
3016
3016
3017 def filectx(self, path, filelog=None):
3017 def filectx(self, path, filelog=None):
3018 return self._originalctx.filectx(path, filelog=filelog)
3018 return self._originalctx.filectx(path, filelog=filelog)
3019
3019
3020 def commit(self):
3020 def commit(self):
3021 """commit context to the repo"""
3021 """commit context to the repo"""
3022 return self._repo.commitctx(self)
3022 return self._repo.commitctx(self)
3023
3023
3024 @property
3024 @property
3025 def _manifest(self):
3025 def _manifest(self):
3026 return self._originalctx.manifest()
3026 return self._originalctx.manifest()
3027
3027
3028 @propertycache
3028 @propertycache
3029 def _status(self):
3029 def _status(self):
3030 """Calculate exact status from ``files`` specified in the ``origctx``
3030 """Calculate exact status from ``files`` specified in the ``origctx``
3031 and parents manifests.
3031 and parents manifests.
3032 """
3032 """
3033 man1 = self.p1().manifest()
3033 man1 = self.p1().manifest()
3034 p2 = self._parents[1]
3034 p2 = self._parents[1]
3035 # "1 < len(self._parents)" can't be used for checking
3035 # "1 < len(self._parents)" can't be used for checking
3036 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3036 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3037 # explicitly initialized by the list, of which length is 2.
3037 # explicitly initialized by the list, of which length is 2.
3038 if p2.node() != nullid:
3038 if p2.node() != nullid:
3039 man2 = p2.manifest()
3039 man2 = p2.manifest()
3040 managing = lambda f: f in man1 or f in man2
3040 managing = lambda f: f in man1 or f in man2
3041 else:
3041 else:
3042 managing = lambda f: f in man1
3042 managing = lambda f: f in man1
3043
3043
3044 modified, added, removed = [], [], []
3044 modified, added, removed = [], [], []
3045 for f in self._files:
3045 for f in self._files:
3046 if not managing(f):
3046 if not managing(f):
3047 added.append(f)
3047 added.append(f)
3048 elif f in self:
3048 elif f in self:
3049 modified.append(f)
3049 modified.append(f)
3050 else:
3050 else:
3051 removed.append(f)
3051 removed.append(f)
3052
3052
3053 return scmutil.status(modified, added, removed, [], [], [], [])
3053 return scmutil.status(modified, added, removed, [], [], [], [])
3054
3054
3055
3055
3056 class arbitraryfilectx(object):
3056 class arbitraryfilectx(object):
3057 """Allows you to use filectx-like functions on a file in an arbitrary
3057 """Allows you to use filectx-like functions on a file in an arbitrary
3058 location on disk, possibly not in the working directory.
3058 location on disk, possibly not in the working directory.
3059 """
3059 """
3060
3060
3061 def __init__(self, path, repo=None):
3061 def __init__(self, path, repo=None):
3062 # Repo is optional because contrib/simplemerge uses this class.
3062 # Repo is optional because contrib/simplemerge uses this class.
3063 self._repo = repo
3063 self._repo = repo
3064 self._path = path
3064 self._path = path
3065
3065
3066 def cmp(self, fctx):
3066 def cmp(self, fctx):
3067 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3067 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3068 # path if either side is a symlink.
3068 # path if either side is a symlink.
3069 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3069 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3070 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3070 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3071 # Add a fast-path for merge if both sides are disk-backed.
3071 # Add a fast-path for merge if both sides are disk-backed.
3072 # Note that filecmp uses the opposite return values (True if same)
3072 # Note that filecmp uses the opposite return values (True if same)
3073 # from our cmp functions (True if different).
3073 # from our cmp functions (True if different).
3074 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3074 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3075 return self.data() != fctx.data()
3075 return self.data() != fctx.data()
3076
3076
3077 def path(self):
3077 def path(self):
3078 return self._path
3078 return self._path
3079
3079
3080 def flags(self):
3080 def flags(self):
3081 return b''
3081 return b''
3082
3082
3083 def data(self):
3083 def data(self):
3084 return util.readfile(self._path)
3084 return util.readfile(self._path)
3085
3085
3086 def decodeddata(self):
3086 def decodeddata(self):
3087 with open(self._path, b"rb") as f:
3087 with open(self._path, b"rb") as f:
3088 return f.read()
3088 return f.read()
3089
3089
3090 def remove(self):
3090 def remove(self):
3091 util.unlink(self._path)
3091 util.unlink(self._path)
3092
3092
3093 def write(self, data, flags, **kwargs):
3093 def write(self, data, flags, **kwargs):
3094 assert not flags
3094 assert not flags
3095 with open(self._path, b"wb") as f:
3095 with open(self._path, b"wb") as f:
3096 f.write(data)
3096 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now