##// END OF EJS Templates
context: fix creation of ProgrammingError to not use non-existent field...
Martin von Zweigbergk -
r45460:b2e5ec0c default
parent child Browse files
Show More
@@ -1,3085 +1,3085 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 mergestate as mergestatemod,
37 mergestate as mergestatemod,
38 obsolete as obsmod,
38 obsolete as obsmod,
39 patch,
39 patch,
40 pathutil,
40 pathutil,
41 phases,
41 phases,
42 pycompat,
42 pycompat,
43 repoview,
43 repoview,
44 scmutil,
44 scmutil,
45 sparse,
45 sparse,
46 subrepo,
46 subrepo,
47 subrepoutil,
47 subrepoutil,
48 util,
48 util,
49 )
49 )
50 from .utils import (
50 from .utils import (
51 dateutil,
51 dateutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 propertycache = util.propertycache
55 propertycache = util.propertycache
56
56
57
57
58 class basectx(object):
58 class basectx(object):
59 """A basectx object represents the common logic for its children:
59 """A basectx object represents the common logic for its children:
60 changectx: read-only context that is already present in the repo,
60 changectx: read-only context that is already present in the repo,
61 workingctx: a context that represents the working directory and can
61 workingctx: a context that represents the working directory and can
62 be committed,
62 be committed,
63 memctx: a context that represents changes in-memory and can also
63 memctx: a context that represents changes in-memory and can also
64 be committed."""
64 be committed."""
65
65
66 def __init__(self, repo):
66 def __init__(self, repo):
67 self._repo = repo
67 self._repo = repo
68
68
69 def __bytes__(self):
69 def __bytes__(self):
70 return short(self.node())
70 return short(self.node())
71
71
72 __str__ = encoding.strmethod(__bytes__)
72 __str__ = encoding.strmethod(__bytes__)
73
73
74 def __repr__(self):
74 def __repr__(self):
75 return "<%s %s>" % (type(self).__name__, str(self))
75 return "<%s %s>" % (type(self).__name__, str(self))
76
76
77 def __eq__(self, other):
77 def __eq__(self, other):
78 try:
78 try:
79 return type(self) == type(other) and self._rev == other._rev
79 return type(self) == type(other) and self._rev == other._rev
80 except AttributeError:
80 except AttributeError:
81 return False
81 return False
82
82
83 def __ne__(self, other):
83 def __ne__(self, other):
84 return not (self == other)
84 return not (self == other)
85
85
86 def __contains__(self, key):
86 def __contains__(self, key):
87 return key in self._manifest
87 return key in self._manifest
88
88
89 def __getitem__(self, key):
89 def __getitem__(self, key):
90 return self.filectx(key)
90 return self.filectx(key)
91
91
92 def __iter__(self):
92 def __iter__(self):
93 return iter(self._manifest)
93 return iter(self._manifest)
94
94
95 def _buildstatusmanifest(self, status):
95 def _buildstatusmanifest(self, status):
96 """Builds a manifest that includes the given status results, if this is
96 """Builds a manifest that includes the given status results, if this is
97 a working copy context. For non-working copy contexts, it just returns
97 a working copy context. For non-working copy contexts, it just returns
98 the normal manifest."""
98 the normal manifest."""
99 return self.manifest()
99 return self.manifest()
100
100
101 def _matchstatus(self, other, match):
101 def _matchstatus(self, other, match):
102 """This internal method provides a way for child objects to override the
102 """This internal method provides a way for child objects to override the
103 match operator.
103 match operator.
104 """
104 """
105 return match
105 return match
106
106
107 def _buildstatus(
107 def _buildstatus(
108 self, other, s, match, listignored, listclean, listunknown
108 self, other, s, match, listignored, listclean, listunknown
109 ):
109 ):
110 """build a status with respect to another context"""
110 """build a status with respect to another context"""
111 # Load earliest manifest first for caching reasons. More specifically,
111 # Load earliest manifest first for caching reasons. More specifically,
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # if you have revisions 1000 and 1001, 1001 is probably stored as a
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
114 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # 1000 and cache it so that when you read 1001, we just need to apply a
115 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta to what's in the cache. So that's one full reconstruction + one
116 # delta application.
116 # delta application.
117 mf2 = None
117 mf2 = None
118 if self.rev() is not None and self.rev() < other.rev():
118 if self.rev() is not None and self.rev() < other.rev():
119 mf2 = self._buildstatusmanifest(s)
119 mf2 = self._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
120 mf1 = other._buildstatusmanifest(s)
121 if mf2 is None:
121 if mf2 is None:
122 mf2 = self._buildstatusmanifest(s)
122 mf2 = self._buildstatusmanifest(s)
123
123
124 modified, added = [], []
124 modified, added = [], []
125 removed = []
125 removed = []
126 clean = []
126 clean = []
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
128 deletedset = set(deleted)
128 deletedset = set(deleted)
129 d = mf1.diff(mf2, match=match, clean=listclean)
129 d = mf1.diff(mf2, match=match, clean=listclean)
130 for fn, value in pycompat.iteritems(d):
130 for fn, value in pycompat.iteritems(d):
131 if fn in deletedset:
131 if fn in deletedset:
132 continue
132 continue
133 if value is None:
133 if value is None:
134 clean.append(fn)
134 clean.append(fn)
135 continue
135 continue
136 (node1, flag1), (node2, flag2) = value
136 (node1, flag1), (node2, flag2) = value
137 if node1 is None:
137 if node1 is None:
138 added.append(fn)
138 added.append(fn)
139 elif node2 is None:
139 elif node2 is None:
140 removed.append(fn)
140 removed.append(fn)
141 elif flag1 != flag2:
141 elif flag1 != flag2:
142 modified.append(fn)
142 modified.append(fn)
143 elif node2 not in wdirfilenodeids:
143 elif node2 not in wdirfilenodeids:
144 # When comparing files between two commits, we save time by
144 # When comparing files between two commits, we save time by
145 # not comparing the file contents when the nodeids differ.
145 # not comparing the file contents when the nodeids differ.
146 # Note that this means we incorrectly report a reverted change
146 # Note that this means we incorrectly report a reverted change
147 # to a file as a modification.
147 # to a file as a modification.
148 modified.append(fn)
148 modified.append(fn)
149 elif self[fn].cmp(other[fn]):
149 elif self[fn].cmp(other[fn]):
150 modified.append(fn)
150 modified.append(fn)
151 else:
151 else:
152 clean.append(fn)
152 clean.append(fn)
153
153
154 if removed:
154 if removed:
155 # need to filter files if they are already reported as removed
155 # need to filter files if they are already reported as removed
156 unknown = [
156 unknown = [
157 fn
157 fn
158 for fn in unknown
158 for fn in unknown
159 if fn not in mf1 and (not match or match(fn))
159 if fn not in mf1 and (not match or match(fn))
160 ]
160 ]
161 ignored = [
161 ignored = [
162 fn
162 fn
163 for fn in ignored
163 for fn in ignored
164 if fn not in mf1 and (not match or match(fn))
164 if fn not in mf1 and (not match or match(fn))
165 ]
165 ]
166 # if they're deleted, don't report them as removed
166 # if they're deleted, don't report them as removed
167 removed = [fn for fn in removed if fn not in deletedset]
167 removed = [fn for fn in removed if fn not in deletedset]
168
168
169 return scmutil.status(
169 return scmutil.status(
170 modified, added, removed, deleted, unknown, ignored, clean
170 modified, added, removed, deleted, unknown, ignored, clean
171 )
171 )
172
172
173 @propertycache
173 @propertycache
174 def substate(self):
174 def substate(self):
175 return subrepoutil.state(self, self._repo.ui)
175 return subrepoutil.state(self, self._repo.ui)
176
176
177 def subrev(self, subpath):
177 def subrev(self, subpath):
178 return self.substate[subpath][1]
178 return self.substate[subpath][1]
179
179
180 def rev(self):
180 def rev(self):
181 return self._rev
181 return self._rev
182
182
183 def node(self):
183 def node(self):
184 return self._node
184 return self._node
185
185
186 def hex(self):
186 def hex(self):
187 return hex(self.node())
187 return hex(self.node())
188
188
189 def manifest(self):
189 def manifest(self):
190 return self._manifest
190 return self._manifest
191
191
192 def manifestctx(self):
192 def manifestctx(self):
193 return self._manifestctx
193 return self._manifestctx
194
194
195 def repo(self):
195 def repo(self):
196 return self._repo
196 return self._repo
197
197
198 def phasestr(self):
198 def phasestr(self):
199 return phases.phasenames[self.phase()]
199 return phases.phasenames[self.phase()]
200
200
201 def mutable(self):
201 def mutable(self):
202 return self.phase() > phases.public
202 return self.phase() > phases.public
203
203
204 def matchfileset(self, cwd, expr, badfn=None):
204 def matchfileset(self, cwd, expr, badfn=None):
205 return fileset.match(self, cwd, expr, badfn=badfn)
205 return fileset.match(self, cwd, expr, badfn=badfn)
206
206
207 def obsolete(self):
207 def obsolete(self):
208 """True if the changeset is obsolete"""
208 """True if the changeset is obsolete"""
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
210
210
211 def extinct(self):
211 def extinct(self):
212 """True if the changeset is extinct"""
212 """True if the changeset is extinct"""
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
214
214
215 def orphan(self):
215 def orphan(self):
216 """True if the changeset is not obsolete, but its ancestor is"""
216 """True if the changeset is not obsolete, but its ancestor is"""
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
218
218
219 def phasedivergent(self):
219 def phasedivergent(self):
220 """True if the changeset tries to be a successor of a public changeset
220 """True if the changeset tries to be a successor of a public changeset
221
221
222 Only non-public and non-obsolete changesets may be phase-divergent.
222 Only non-public and non-obsolete changesets may be phase-divergent.
223 """
223 """
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
225
225
226 def contentdivergent(self):
226 def contentdivergent(self):
227 """Is a successor of a changeset with multiple possible successor sets
227 """Is a successor of a changeset with multiple possible successor sets
228
228
229 Only non-public and non-obsolete changesets may be content-divergent.
229 Only non-public and non-obsolete changesets may be content-divergent.
230 """
230 """
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
232
232
233 def isunstable(self):
233 def isunstable(self):
234 """True if the changeset is either orphan, phase-divergent or
234 """True if the changeset is either orphan, phase-divergent or
235 content-divergent"""
235 content-divergent"""
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236 return self.orphan() or self.phasedivergent() or self.contentdivergent()
237
237
238 def instabilities(self):
238 def instabilities(self):
239 """return the list of instabilities affecting this changeset.
239 """return the list of instabilities affecting this changeset.
240
240
241 Instabilities are returned as strings. possible values are:
241 Instabilities are returned as strings. possible values are:
242 - orphan,
242 - orphan,
243 - phase-divergent,
243 - phase-divergent,
244 - content-divergent.
244 - content-divergent.
245 """
245 """
246 instabilities = []
246 instabilities = []
247 if self.orphan():
247 if self.orphan():
248 instabilities.append(b'orphan')
248 instabilities.append(b'orphan')
249 if self.phasedivergent():
249 if self.phasedivergent():
250 instabilities.append(b'phase-divergent')
250 instabilities.append(b'phase-divergent')
251 if self.contentdivergent():
251 if self.contentdivergent():
252 instabilities.append(b'content-divergent')
252 instabilities.append(b'content-divergent')
253 return instabilities
253 return instabilities
254
254
255 def parents(self):
255 def parents(self):
256 """return contexts for each parent changeset"""
256 """return contexts for each parent changeset"""
257 return self._parents
257 return self._parents
258
258
259 def p1(self):
259 def p1(self):
260 return self._parents[0]
260 return self._parents[0]
261
261
262 def p2(self):
262 def p2(self):
263 parents = self._parents
263 parents = self._parents
264 if len(parents) == 2:
264 if len(parents) == 2:
265 return parents[1]
265 return parents[1]
266 return self._repo[nullrev]
266 return self._repo[nullrev]
267
267
268 def _fileinfo(self, path):
268 def _fileinfo(self, path):
269 if '_manifest' in self.__dict__:
269 if '_manifest' in self.__dict__:
270 try:
270 try:
271 return self._manifest.find(path)
271 return self._manifest.find(path)
272 except KeyError:
272 except KeyError:
273 raise error.ManifestLookupError(
273 raise error.ManifestLookupError(
274 self._node, path, _(b'not found in manifest')
274 self._node, path, _(b'not found in manifest')
275 )
275 )
276 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if '_manifestdelta' in self.__dict__ or path in self.files():
277 if path in self._manifestdelta:
277 if path in self._manifestdelta:
278 return (
278 return (
279 self._manifestdelta[path],
279 self._manifestdelta[path],
280 self._manifestdelta.flags(path),
280 self._manifestdelta.flags(path),
281 )
281 )
282 mfl = self._repo.manifestlog
282 mfl = self._repo.manifestlog
283 try:
283 try:
284 node, flag = mfl[self._changeset.manifest].find(path)
284 node, flag = mfl[self._changeset.manifest].find(path)
285 except KeyError:
285 except KeyError:
286 raise error.ManifestLookupError(
286 raise error.ManifestLookupError(
287 self._node, path, _(b'not found in manifest')
287 self._node, path, _(b'not found in manifest')
288 )
288 )
289
289
290 return node, flag
290 return node, flag
291
291
292 def filenode(self, path):
292 def filenode(self, path):
293 return self._fileinfo(path)[0]
293 return self._fileinfo(path)[0]
294
294
295 def flags(self, path):
295 def flags(self, path):
296 try:
296 try:
297 return self._fileinfo(path)[1]
297 return self._fileinfo(path)[1]
298 except error.LookupError:
298 except error.LookupError:
299 return b''
299 return b''
300
300
301 @propertycache
301 @propertycache
302 def _copies(self):
302 def _copies(self):
303 return copies.computechangesetcopies(self)
303 return copies.computechangesetcopies(self)
304
304
305 def p1copies(self):
305 def p1copies(self):
306 return self._copies[0]
306 return self._copies[0]
307
307
308 def p2copies(self):
308 def p2copies(self):
309 return self._copies[1]
309 return self._copies[1]
310
310
311 def sub(self, path, allowcreate=True):
311 def sub(self, path, allowcreate=True):
312 '''return a subrepo for the stored revision of path, never wdir()'''
312 '''return a subrepo for the stored revision of path, never wdir()'''
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313 return subrepo.subrepo(self, path, allowcreate=allowcreate)
314
314
315 def nullsub(self, path, pctx):
315 def nullsub(self, path, pctx):
316 return subrepo.nullsubrepo(self, path, pctx)
316 return subrepo.nullsubrepo(self, path, pctx)
317
317
318 def workingsub(self, path):
318 def workingsub(self, path):
319 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 '''return a subrepo for the stored revision, or wdir if this is a wdir
320 context.
320 context.
321 '''
321 '''
322 return subrepo.subrepo(self, path, allowwdir=True)
322 return subrepo.subrepo(self, path, allowwdir=True)
323
323
324 def match(
324 def match(
325 self,
325 self,
326 pats=None,
326 pats=None,
327 include=None,
327 include=None,
328 exclude=None,
328 exclude=None,
329 default=b'glob',
329 default=b'glob',
330 listsubrepos=False,
330 listsubrepos=False,
331 badfn=None,
331 badfn=None,
332 cwd=None,
332 cwd=None,
333 ):
333 ):
334 r = self._repo
334 r = self._repo
335 if not cwd:
335 if not cwd:
336 cwd = r.getcwd()
336 cwd = r.getcwd()
337 return matchmod.match(
337 return matchmod.match(
338 r.root,
338 r.root,
339 cwd,
339 cwd,
340 pats,
340 pats,
341 include,
341 include,
342 exclude,
342 exclude,
343 default,
343 default,
344 auditor=r.nofsauditor,
344 auditor=r.nofsauditor,
345 ctx=self,
345 ctx=self,
346 listsubrepos=listsubrepos,
346 listsubrepos=listsubrepos,
347 badfn=badfn,
347 badfn=badfn,
348 )
348 )
349
349
350 def diff(
350 def diff(
351 self,
351 self,
352 ctx2=None,
352 ctx2=None,
353 match=None,
353 match=None,
354 changes=None,
354 changes=None,
355 opts=None,
355 opts=None,
356 losedatafn=None,
356 losedatafn=None,
357 pathfn=None,
357 pathfn=None,
358 copy=None,
358 copy=None,
359 copysourcematch=None,
359 copysourcematch=None,
360 hunksfilterfn=None,
360 hunksfilterfn=None,
361 ):
361 ):
362 """Returns a diff generator for the given contexts and matcher"""
362 """Returns a diff generator for the given contexts and matcher"""
363 if ctx2 is None:
363 if ctx2 is None:
364 ctx2 = self.p1()
364 ctx2 = self.p1()
365 if ctx2 is not None:
365 if ctx2 is not None:
366 ctx2 = self._repo[ctx2]
366 ctx2 = self._repo[ctx2]
367 return patch.diff(
367 return patch.diff(
368 self._repo,
368 self._repo,
369 ctx2,
369 ctx2,
370 self,
370 self,
371 match=match,
371 match=match,
372 changes=changes,
372 changes=changes,
373 opts=opts,
373 opts=opts,
374 losedatafn=losedatafn,
374 losedatafn=losedatafn,
375 pathfn=pathfn,
375 pathfn=pathfn,
376 copy=copy,
376 copy=copy,
377 copysourcematch=copysourcematch,
377 copysourcematch=copysourcematch,
378 hunksfilterfn=hunksfilterfn,
378 hunksfilterfn=hunksfilterfn,
379 )
379 )
380
380
381 def dirs(self):
381 def dirs(self):
382 return self._manifest.dirs()
382 return self._manifest.dirs()
383
383
384 def hasdir(self, dir):
384 def hasdir(self, dir):
385 return self._manifest.hasdir(dir)
385 return self._manifest.hasdir(dir)
386
386
387 def status(
387 def status(
388 self,
388 self,
389 other=None,
389 other=None,
390 match=None,
390 match=None,
391 listignored=False,
391 listignored=False,
392 listclean=False,
392 listclean=False,
393 listunknown=False,
393 listunknown=False,
394 listsubrepos=False,
394 listsubrepos=False,
395 ):
395 ):
396 """return status of files between two nodes or node and working
396 """return status of files between two nodes or node and working
397 directory.
397 directory.
398
398
399 If other is None, compare this node with working directory.
399 If other is None, compare this node with working directory.
400
400
401 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 returns (modified, added, removed, deleted, unknown, ignored, clean)
402 """
402 """
403
403
404 ctx1 = self
404 ctx1 = self
405 ctx2 = self._repo[other]
405 ctx2 = self._repo[other]
406
406
407 # This next code block is, admittedly, fragile logic that tests for
407 # This next code block is, admittedly, fragile logic that tests for
408 # reversing the contexts and wouldn't need to exist if it weren't for
408 # reversing the contexts and wouldn't need to exist if it weren't for
409 # the fast (and common) code path of comparing the working directory
409 # the fast (and common) code path of comparing the working directory
410 # with its first parent.
410 # with its first parent.
411 #
411 #
412 # What we're aiming for here is the ability to call:
412 # What we're aiming for here is the ability to call:
413 #
413 #
414 # workingctx.status(parentctx)
414 # workingctx.status(parentctx)
415 #
415 #
416 # If we always built the manifest for each context and compared those,
416 # If we always built the manifest for each context and compared those,
417 # then we'd be done. But the special case of the above call means we
417 # then we'd be done. But the special case of the above call means we
418 # just copy the manifest of the parent.
418 # just copy the manifest of the parent.
419 reversed = False
419 reversed = False
420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
421 reversed = True
421 reversed = True
422 ctx1, ctx2 = ctx2, ctx1
422 ctx1, ctx2 = ctx2, ctx1
423
423
424 match = self._repo.narrowmatch(match)
424 match = self._repo.narrowmatch(match)
425 match = ctx2._matchstatus(ctx1, match)
425 match = ctx2._matchstatus(ctx1, match)
426 r = scmutil.status([], [], [], [], [], [], [])
426 r = scmutil.status([], [], [], [], [], [], [])
427 r = ctx2._buildstatus(
427 r = ctx2._buildstatus(
428 ctx1, r, match, listignored, listclean, listunknown
428 ctx1, r, match, listignored, listclean, listunknown
429 )
429 )
430
430
431 if reversed:
431 if reversed:
432 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # Reverse added and removed. Clear deleted, unknown and ignored as
433 # these make no sense to reverse.
433 # these make no sense to reverse.
434 r = scmutil.status(
434 r = scmutil.status(
435 r.modified, r.removed, r.added, [], [], [], r.clean
435 r.modified, r.removed, r.added, [], [], [], r.clean
436 )
436 )
437
437
438 if listsubrepos:
438 if listsubrepos:
439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
440 try:
440 try:
441 rev2 = ctx2.subrev(subpath)
441 rev2 = ctx2.subrev(subpath)
442 except KeyError:
442 except KeyError:
443 # A subrepo that existed in node1 was deleted between
443 # A subrepo that existed in node1 was deleted between
444 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # node1 and node2 (inclusive). Thus, ctx2's substate
445 # won't contain that subpath. The best we can do ignore it.
445 # won't contain that subpath. The best we can do ignore it.
446 rev2 = None
446 rev2 = None
447 submatch = matchmod.subdirmatcher(subpath, match)
447 submatch = matchmod.subdirmatcher(subpath, match)
448 s = sub.status(
448 s = sub.status(
449 rev2,
449 rev2,
450 match=submatch,
450 match=submatch,
451 ignored=listignored,
451 ignored=listignored,
452 clean=listclean,
452 clean=listclean,
453 unknown=listunknown,
453 unknown=listunknown,
454 listsubrepos=True,
454 listsubrepos=True,
455 )
455 )
456 for k in (
456 for k in (
457 'modified',
457 'modified',
458 'added',
458 'added',
459 'removed',
459 'removed',
460 'deleted',
460 'deleted',
461 'unknown',
461 'unknown',
462 'ignored',
462 'ignored',
463 'clean',
463 'clean',
464 ):
464 ):
465 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles, sfiles = getattr(r, k), getattr(s, k)
466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
467
467
468 r.modified.sort()
468 r.modified.sort()
469 r.added.sort()
469 r.added.sort()
470 r.removed.sort()
470 r.removed.sort()
471 r.deleted.sort()
471 r.deleted.sort()
472 r.unknown.sort()
472 r.unknown.sort()
473 r.ignored.sort()
473 r.ignored.sort()
474 r.clean.sort()
474 r.clean.sort()
475
475
476 return r
476 return r
477
477
478 def mergestate(self, clean=False):
478 def mergestate(self, clean=False):
479 """Get a mergestate object for this context."""
479 """Get a mergestate object for this context."""
480 raise NotImplementedError(
480 raise NotImplementedError(
481 '%s does not implement mergestate()' % self.__class__
481 '%s does not implement mergestate()' % self.__class__
482 )
482 )
483
483
484
484
485 class changectx(basectx):
485 class changectx(basectx):
486 """A changecontext object makes access to data related to a particular
486 """A changecontext object makes access to data related to a particular
487 changeset convenient. It represents a read-only context already present in
487 changeset convenient. It represents a read-only context already present in
488 the repo."""
488 the repo."""
489
489
490 def __init__(self, repo, rev, node, maybe_filtered=True):
490 def __init__(self, repo, rev, node, maybe_filtered=True):
491 super(changectx, self).__init__(repo)
491 super(changectx, self).__init__(repo)
492 self._rev = rev
492 self._rev = rev
493 self._node = node
493 self._node = node
494 # When maybe_filtered is True, the revision might be affected by
494 # When maybe_filtered is True, the revision might be affected by
495 # changelog filtering and operation through the filtered changelog must be used.
495 # changelog filtering and operation through the filtered changelog must be used.
496 #
496 #
497 # When maybe_filtered is False, the revision has already been checked
497 # When maybe_filtered is False, the revision has already been checked
498 # against filtering and is not filtered. Operation through the
498 # against filtering and is not filtered. Operation through the
499 # unfiltered changelog might be used in some case.
499 # unfiltered changelog might be used in some case.
500 self._maybe_filtered = maybe_filtered
500 self._maybe_filtered = maybe_filtered
501
501
502 def __hash__(self):
502 def __hash__(self):
503 try:
503 try:
504 return hash(self._rev)
504 return hash(self._rev)
505 except AttributeError:
505 except AttributeError:
506 return id(self)
506 return id(self)
507
507
508 def __nonzero__(self):
508 def __nonzero__(self):
509 return self._rev != nullrev
509 return self._rev != nullrev
510
510
511 __bool__ = __nonzero__
511 __bool__ = __nonzero__
512
512
513 @propertycache
513 @propertycache
514 def _changeset(self):
514 def _changeset(self):
515 if self._maybe_filtered:
515 if self._maybe_filtered:
516 repo = self._repo
516 repo = self._repo
517 else:
517 else:
518 repo = self._repo.unfiltered()
518 repo = self._repo.unfiltered()
519 return repo.changelog.changelogrevision(self.rev())
519 return repo.changelog.changelogrevision(self.rev())
520
520
521 @propertycache
521 @propertycache
522 def _manifest(self):
522 def _manifest(self):
523 return self._manifestctx.read()
523 return self._manifestctx.read()
524
524
525 @property
525 @property
526 def _manifestctx(self):
526 def _manifestctx(self):
527 return self._repo.manifestlog[self._changeset.manifest]
527 return self._repo.manifestlog[self._changeset.manifest]
528
528
529 @propertycache
529 @propertycache
530 def _manifestdelta(self):
530 def _manifestdelta(self):
531 return self._manifestctx.readdelta()
531 return self._manifestctx.readdelta()
532
532
533 @propertycache
533 @propertycache
534 def _parents(self):
534 def _parents(self):
535 repo = self._repo
535 repo = self._repo
536 if self._maybe_filtered:
536 if self._maybe_filtered:
537 cl = repo.changelog
537 cl = repo.changelog
538 else:
538 else:
539 cl = repo.unfiltered().changelog
539 cl = repo.unfiltered().changelog
540
540
541 p1, p2 = cl.parentrevs(self._rev)
541 p1, p2 = cl.parentrevs(self._rev)
542 if p2 == nullrev:
542 if p2 == nullrev:
543 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
543 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
544 return [
544 return [
545 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
545 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
546 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
546 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
547 ]
547 ]
548
548
549 def changeset(self):
549 def changeset(self):
550 c = self._changeset
550 c = self._changeset
551 return (
551 return (
552 c.manifest,
552 c.manifest,
553 c.user,
553 c.user,
554 c.date,
554 c.date,
555 c.files,
555 c.files,
556 c.description,
556 c.description,
557 c.extra,
557 c.extra,
558 )
558 )
559
559
560 def manifestnode(self):
560 def manifestnode(self):
561 return self._changeset.manifest
561 return self._changeset.manifest
562
562
563 def user(self):
563 def user(self):
564 return self._changeset.user
564 return self._changeset.user
565
565
566 def date(self):
566 def date(self):
567 return self._changeset.date
567 return self._changeset.date
568
568
569 def files(self):
569 def files(self):
570 return self._changeset.files
570 return self._changeset.files
571
571
572 def filesmodified(self):
572 def filesmodified(self):
573 modified = set(self.files())
573 modified = set(self.files())
574 modified.difference_update(self.filesadded())
574 modified.difference_update(self.filesadded())
575 modified.difference_update(self.filesremoved())
575 modified.difference_update(self.filesremoved())
576 return sorted(modified)
576 return sorted(modified)
577
577
578 def filesadded(self):
578 def filesadded(self):
579 filesadded = self._changeset.filesadded
579 filesadded = self._changeset.filesadded
580 compute_on_none = True
580 compute_on_none = True
581 if self._repo.filecopiesmode == b'changeset-sidedata':
581 if self._repo.filecopiesmode == b'changeset-sidedata':
582 compute_on_none = False
582 compute_on_none = False
583 else:
583 else:
584 source = self._repo.ui.config(b'experimental', b'copies.read-from')
584 source = self._repo.ui.config(b'experimental', b'copies.read-from')
585 if source == b'changeset-only':
585 if source == b'changeset-only':
586 compute_on_none = False
586 compute_on_none = False
587 elif source != b'compatibility':
587 elif source != b'compatibility':
588 # filelog mode, ignore any changelog content
588 # filelog mode, ignore any changelog content
589 filesadded = None
589 filesadded = None
590 if filesadded is None:
590 if filesadded is None:
591 if compute_on_none:
591 if compute_on_none:
592 filesadded = copies.computechangesetfilesadded(self)
592 filesadded = copies.computechangesetfilesadded(self)
593 else:
593 else:
594 filesadded = []
594 filesadded = []
595 return filesadded
595 return filesadded
596
596
597 def filesremoved(self):
597 def filesremoved(self):
598 filesremoved = self._changeset.filesremoved
598 filesremoved = self._changeset.filesremoved
599 compute_on_none = True
599 compute_on_none = True
600 if self._repo.filecopiesmode == b'changeset-sidedata':
600 if self._repo.filecopiesmode == b'changeset-sidedata':
601 compute_on_none = False
601 compute_on_none = False
602 else:
602 else:
603 source = self._repo.ui.config(b'experimental', b'copies.read-from')
603 source = self._repo.ui.config(b'experimental', b'copies.read-from')
604 if source == b'changeset-only':
604 if source == b'changeset-only':
605 compute_on_none = False
605 compute_on_none = False
606 elif source != b'compatibility':
606 elif source != b'compatibility':
607 # filelog mode, ignore any changelog content
607 # filelog mode, ignore any changelog content
608 filesremoved = None
608 filesremoved = None
609 if filesremoved is None:
609 if filesremoved is None:
610 if compute_on_none:
610 if compute_on_none:
611 filesremoved = copies.computechangesetfilesremoved(self)
611 filesremoved = copies.computechangesetfilesremoved(self)
612 else:
612 else:
613 filesremoved = []
613 filesremoved = []
614 return filesremoved
614 return filesremoved
615
615
616 @propertycache
616 @propertycache
617 def _copies(self):
617 def _copies(self):
618 p1copies = self._changeset.p1copies
618 p1copies = self._changeset.p1copies
619 p2copies = self._changeset.p2copies
619 p2copies = self._changeset.p2copies
620 compute_on_none = True
620 compute_on_none = True
621 if self._repo.filecopiesmode == b'changeset-sidedata':
621 if self._repo.filecopiesmode == b'changeset-sidedata':
622 compute_on_none = False
622 compute_on_none = False
623 else:
623 else:
624 source = self._repo.ui.config(b'experimental', b'copies.read-from')
624 source = self._repo.ui.config(b'experimental', b'copies.read-from')
625 # If config says to get copy metadata only from changeset, then
625 # If config says to get copy metadata only from changeset, then
626 # return that, defaulting to {} if there was no copy metadata. In
626 # return that, defaulting to {} if there was no copy metadata. In
627 # compatibility mode, we return copy data from the changeset if it
627 # compatibility mode, we return copy data from the changeset if it
628 # was recorded there, and otherwise we fall back to getting it from
628 # was recorded there, and otherwise we fall back to getting it from
629 # the filelogs (below).
629 # the filelogs (below).
630 #
630 #
631 # If we are in compatiblity mode and there is not data in the
631 # If we are in compatiblity mode and there is not data in the
632 # changeset), we get the copy metadata from the filelogs.
632 # changeset), we get the copy metadata from the filelogs.
633 #
633 #
634 # otherwise, when config said to read only from filelog, we get the
634 # otherwise, when config said to read only from filelog, we get the
635 # copy metadata from the filelogs.
635 # copy metadata from the filelogs.
636 if source == b'changeset-only':
636 if source == b'changeset-only':
637 compute_on_none = False
637 compute_on_none = False
638 elif source != b'compatibility':
638 elif source != b'compatibility':
639 # filelog mode, ignore any changelog content
639 # filelog mode, ignore any changelog content
640 p1copies = p2copies = None
640 p1copies = p2copies = None
641 if p1copies is None:
641 if p1copies is None:
642 if compute_on_none:
642 if compute_on_none:
643 p1copies, p2copies = super(changectx, self)._copies
643 p1copies, p2copies = super(changectx, self)._copies
644 else:
644 else:
645 if p1copies is None:
645 if p1copies is None:
646 p1copies = {}
646 p1copies = {}
647 if p2copies is None:
647 if p2copies is None:
648 p2copies = {}
648 p2copies = {}
649 return p1copies, p2copies
649 return p1copies, p2copies
650
650
651 def description(self):
651 def description(self):
652 return self._changeset.description
652 return self._changeset.description
653
653
654 def branch(self):
654 def branch(self):
655 return encoding.tolocal(self._changeset.extra.get(b"branch"))
655 return encoding.tolocal(self._changeset.extra.get(b"branch"))
656
656
657 def closesbranch(self):
657 def closesbranch(self):
658 return b'close' in self._changeset.extra
658 return b'close' in self._changeset.extra
659
659
660 def extra(self):
660 def extra(self):
661 """Return a dict of extra information."""
661 """Return a dict of extra information."""
662 return self._changeset.extra
662 return self._changeset.extra
663
663
664 def tags(self):
664 def tags(self):
665 """Return a list of byte tag names"""
665 """Return a list of byte tag names"""
666 return self._repo.nodetags(self._node)
666 return self._repo.nodetags(self._node)
667
667
668 def bookmarks(self):
668 def bookmarks(self):
669 """Return a list of byte bookmark names."""
669 """Return a list of byte bookmark names."""
670 return self._repo.nodebookmarks(self._node)
670 return self._repo.nodebookmarks(self._node)
671
671
672 def phase(self):
672 def phase(self):
673 return self._repo._phasecache.phase(self._repo, self._rev)
673 return self._repo._phasecache.phase(self._repo, self._rev)
674
674
675 def hidden(self):
675 def hidden(self):
676 return self._rev in repoview.filterrevs(self._repo, b'visible')
676 return self._rev in repoview.filterrevs(self._repo, b'visible')
677
677
678 def isinmemory(self):
678 def isinmemory(self):
679 return False
679 return False
680
680
681 def children(self):
681 def children(self):
682 """return list of changectx contexts for each child changeset.
682 """return list of changectx contexts for each child changeset.
683
683
684 This returns only the immediate child changesets. Use descendants() to
684 This returns only the immediate child changesets. Use descendants() to
685 recursively walk children.
685 recursively walk children.
686 """
686 """
687 c = self._repo.changelog.children(self._node)
687 c = self._repo.changelog.children(self._node)
688 return [self._repo[x] for x in c]
688 return [self._repo[x] for x in c]
689
689
690 def ancestors(self):
690 def ancestors(self):
691 for a in self._repo.changelog.ancestors([self._rev]):
691 for a in self._repo.changelog.ancestors([self._rev]):
692 yield self._repo[a]
692 yield self._repo[a]
693
693
694 def descendants(self):
694 def descendants(self):
695 """Recursively yield all children of the changeset.
695 """Recursively yield all children of the changeset.
696
696
697 For just the immediate children, use children()
697 For just the immediate children, use children()
698 """
698 """
699 for d in self._repo.changelog.descendants([self._rev]):
699 for d in self._repo.changelog.descendants([self._rev]):
700 yield self._repo[d]
700 yield self._repo[d]
701
701
702 def filectx(self, path, fileid=None, filelog=None):
702 def filectx(self, path, fileid=None, filelog=None):
703 """get a file context from this changeset"""
703 """get a file context from this changeset"""
704 if fileid is None:
704 if fileid is None:
705 fileid = self.filenode(path)
705 fileid = self.filenode(path)
706 return filectx(
706 return filectx(
707 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
707 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
708 )
708 )
709
709
710 def ancestor(self, c2, warn=False):
710 def ancestor(self, c2, warn=False):
711 """return the "best" ancestor context of self and c2
711 """return the "best" ancestor context of self and c2
712
712
713 If there are multiple candidates, it will show a message and check
713 If there are multiple candidates, it will show a message and check
714 merge.preferancestor configuration before falling back to the
714 merge.preferancestor configuration before falling back to the
715 revlog ancestor."""
715 revlog ancestor."""
716 # deal with workingctxs
716 # deal with workingctxs
717 n2 = c2._node
717 n2 = c2._node
718 if n2 is None:
718 if n2 is None:
719 n2 = c2._parents[0]._node
719 n2 = c2._parents[0]._node
720 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
720 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
721 if not cahs:
721 if not cahs:
722 anc = nullid
722 anc = nullid
723 elif len(cahs) == 1:
723 elif len(cahs) == 1:
724 anc = cahs[0]
724 anc = cahs[0]
725 else:
725 else:
726 # experimental config: merge.preferancestor
726 # experimental config: merge.preferancestor
727 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
727 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
728 try:
728 try:
729 ctx = scmutil.revsymbol(self._repo, r)
729 ctx = scmutil.revsymbol(self._repo, r)
730 except error.RepoLookupError:
730 except error.RepoLookupError:
731 continue
731 continue
732 anc = ctx.node()
732 anc = ctx.node()
733 if anc in cahs:
733 if anc in cahs:
734 break
734 break
735 else:
735 else:
736 anc = self._repo.changelog.ancestor(self._node, n2)
736 anc = self._repo.changelog.ancestor(self._node, n2)
737 if warn:
737 if warn:
738 self._repo.ui.status(
738 self._repo.ui.status(
739 (
739 (
740 _(b"note: using %s as ancestor of %s and %s\n")
740 _(b"note: using %s as ancestor of %s and %s\n")
741 % (short(anc), short(self._node), short(n2))
741 % (short(anc), short(self._node), short(n2))
742 )
742 )
743 + b''.join(
743 + b''.join(
744 _(
744 _(
745 b" alternatively, use --config "
745 b" alternatively, use --config "
746 b"merge.preferancestor=%s\n"
746 b"merge.preferancestor=%s\n"
747 )
747 )
748 % short(n)
748 % short(n)
749 for n in sorted(cahs)
749 for n in sorted(cahs)
750 if n != anc
750 if n != anc
751 )
751 )
752 )
752 )
753 return self._repo[anc]
753 return self._repo[anc]
754
754
755 def isancestorof(self, other):
755 def isancestorof(self, other):
756 """True if this changeset is an ancestor of other"""
756 """True if this changeset is an ancestor of other"""
757 return self._repo.changelog.isancestorrev(self._rev, other._rev)
757 return self._repo.changelog.isancestorrev(self._rev, other._rev)
758
758
759 def walk(self, match):
759 def walk(self, match):
760 '''Generates matching file names.'''
760 '''Generates matching file names.'''
761
761
762 # Wrap match.bad method to have message with nodeid
762 # Wrap match.bad method to have message with nodeid
763 def bad(fn, msg):
763 def bad(fn, msg):
764 # The manifest doesn't know about subrepos, so don't complain about
764 # The manifest doesn't know about subrepos, so don't complain about
765 # paths into valid subrepos.
765 # paths into valid subrepos.
766 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
766 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
767 return
767 return
768 match.bad(fn, _(b'no such file in rev %s') % self)
768 match.bad(fn, _(b'no such file in rev %s') % self)
769
769
770 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
770 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
771 return self._manifest.walk(m)
771 return self._manifest.walk(m)
772
772
773 def matches(self, match):
773 def matches(self, match):
774 return self.walk(match)
774 return self.walk(match)
775
775
776
776
777 class basefilectx(object):
777 class basefilectx(object):
778 """A filecontext object represents the common logic for its children:
778 """A filecontext object represents the common logic for its children:
779 filectx: read-only access to a filerevision that is already present
779 filectx: read-only access to a filerevision that is already present
780 in the repo,
780 in the repo,
781 workingfilectx: a filecontext that represents files from the working
781 workingfilectx: a filecontext that represents files from the working
782 directory,
782 directory,
783 memfilectx: a filecontext that represents files in-memory,
783 memfilectx: a filecontext that represents files in-memory,
784 """
784 """
785
785
786 @propertycache
786 @propertycache
787 def _filelog(self):
787 def _filelog(self):
788 return self._repo.file(self._path)
788 return self._repo.file(self._path)
789
789
790 @propertycache
790 @propertycache
791 def _changeid(self):
791 def _changeid(self):
792 if '_changectx' in self.__dict__:
792 if '_changectx' in self.__dict__:
793 return self._changectx.rev()
793 return self._changectx.rev()
794 elif '_descendantrev' in self.__dict__:
794 elif '_descendantrev' in self.__dict__:
795 # this file context was created from a revision with a known
795 # this file context was created from a revision with a known
796 # descendant, we can (lazily) correct for linkrev aliases
796 # descendant, we can (lazily) correct for linkrev aliases
797 return self._adjustlinkrev(self._descendantrev)
797 return self._adjustlinkrev(self._descendantrev)
798 else:
798 else:
799 return self._filelog.linkrev(self._filerev)
799 return self._filelog.linkrev(self._filerev)
800
800
801 @propertycache
801 @propertycache
802 def _filenode(self):
802 def _filenode(self):
803 if '_fileid' in self.__dict__:
803 if '_fileid' in self.__dict__:
804 return self._filelog.lookup(self._fileid)
804 return self._filelog.lookup(self._fileid)
805 else:
805 else:
806 return self._changectx.filenode(self._path)
806 return self._changectx.filenode(self._path)
807
807
808 @propertycache
808 @propertycache
809 def _filerev(self):
809 def _filerev(self):
810 return self._filelog.rev(self._filenode)
810 return self._filelog.rev(self._filenode)
811
811
812 @propertycache
812 @propertycache
813 def _repopath(self):
813 def _repopath(self):
814 return self._path
814 return self._path
815
815
816 def __nonzero__(self):
816 def __nonzero__(self):
817 try:
817 try:
818 self._filenode
818 self._filenode
819 return True
819 return True
820 except error.LookupError:
820 except error.LookupError:
821 # file is missing
821 # file is missing
822 return False
822 return False
823
823
824 __bool__ = __nonzero__
824 __bool__ = __nonzero__
825
825
826 def __bytes__(self):
826 def __bytes__(self):
827 try:
827 try:
828 return b"%s@%s" % (self.path(), self._changectx)
828 return b"%s@%s" % (self.path(), self._changectx)
829 except error.LookupError:
829 except error.LookupError:
830 return b"%s@???" % self.path()
830 return b"%s@???" % self.path()
831
831
832 __str__ = encoding.strmethod(__bytes__)
832 __str__ = encoding.strmethod(__bytes__)
833
833
834 def __repr__(self):
834 def __repr__(self):
835 return "<%s %s>" % (type(self).__name__, str(self))
835 return "<%s %s>" % (type(self).__name__, str(self))
836
836
837 def __hash__(self):
837 def __hash__(self):
838 try:
838 try:
839 return hash((self._path, self._filenode))
839 return hash((self._path, self._filenode))
840 except AttributeError:
840 except AttributeError:
841 return id(self)
841 return id(self)
842
842
843 def __eq__(self, other):
843 def __eq__(self, other):
844 try:
844 try:
845 return (
845 return (
846 type(self) == type(other)
846 type(self) == type(other)
847 and self._path == other._path
847 and self._path == other._path
848 and self._filenode == other._filenode
848 and self._filenode == other._filenode
849 )
849 )
850 except AttributeError:
850 except AttributeError:
851 return False
851 return False
852
852
853 def __ne__(self, other):
853 def __ne__(self, other):
854 return not (self == other)
854 return not (self == other)
855
855
856 def filerev(self):
856 def filerev(self):
857 return self._filerev
857 return self._filerev
858
858
859 def filenode(self):
859 def filenode(self):
860 return self._filenode
860 return self._filenode
861
861
862 @propertycache
862 @propertycache
863 def _flags(self):
863 def _flags(self):
864 return self._changectx.flags(self._path)
864 return self._changectx.flags(self._path)
865
865
866 def flags(self):
866 def flags(self):
867 return self._flags
867 return self._flags
868
868
869 def filelog(self):
869 def filelog(self):
870 return self._filelog
870 return self._filelog
871
871
872 def rev(self):
872 def rev(self):
873 return self._changeid
873 return self._changeid
874
874
875 def linkrev(self):
875 def linkrev(self):
876 return self._filelog.linkrev(self._filerev)
876 return self._filelog.linkrev(self._filerev)
877
877
878 def node(self):
878 def node(self):
879 return self._changectx.node()
879 return self._changectx.node()
880
880
881 def hex(self):
881 def hex(self):
882 return self._changectx.hex()
882 return self._changectx.hex()
883
883
884 def user(self):
884 def user(self):
885 return self._changectx.user()
885 return self._changectx.user()
886
886
887 def date(self):
887 def date(self):
888 return self._changectx.date()
888 return self._changectx.date()
889
889
890 def files(self):
890 def files(self):
891 return self._changectx.files()
891 return self._changectx.files()
892
892
893 def description(self):
893 def description(self):
894 return self._changectx.description()
894 return self._changectx.description()
895
895
896 def branch(self):
896 def branch(self):
897 return self._changectx.branch()
897 return self._changectx.branch()
898
898
899 def extra(self):
899 def extra(self):
900 return self._changectx.extra()
900 return self._changectx.extra()
901
901
902 def phase(self):
902 def phase(self):
903 return self._changectx.phase()
903 return self._changectx.phase()
904
904
905 def phasestr(self):
905 def phasestr(self):
906 return self._changectx.phasestr()
906 return self._changectx.phasestr()
907
907
908 def obsolete(self):
908 def obsolete(self):
909 return self._changectx.obsolete()
909 return self._changectx.obsolete()
910
910
911 def instabilities(self):
911 def instabilities(self):
912 return self._changectx.instabilities()
912 return self._changectx.instabilities()
913
913
914 def manifest(self):
914 def manifest(self):
915 return self._changectx.manifest()
915 return self._changectx.manifest()
916
916
917 def changectx(self):
917 def changectx(self):
918 return self._changectx
918 return self._changectx
919
919
920 def renamed(self):
920 def renamed(self):
921 return self._copied
921 return self._copied
922
922
923 def copysource(self):
923 def copysource(self):
924 return self._copied and self._copied[0]
924 return self._copied and self._copied[0]
925
925
926 def repo(self):
926 def repo(self):
927 return self._repo
927 return self._repo
928
928
929 def size(self):
929 def size(self):
930 return len(self.data())
930 return len(self.data())
931
931
932 def path(self):
932 def path(self):
933 return self._path
933 return self._path
934
934
935 def isbinary(self):
935 def isbinary(self):
936 try:
936 try:
937 return stringutil.binary(self.data())
937 return stringutil.binary(self.data())
938 except IOError:
938 except IOError:
939 return False
939 return False
940
940
941 def isexec(self):
941 def isexec(self):
942 return b'x' in self.flags()
942 return b'x' in self.flags()
943
943
944 def islink(self):
944 def islink(self):
945 return b'l' in self.flags()
945 return b'l' in self.flags()
946
946
947 def isabsent(self):
947 def isabsent(self):
948 """whether this filectx represents a file not in self._changectx
948 """whether this filectx represents a file not in self._changectx
949
949
950 This is mainly for merge code to detect change/delete conflicts. This is
950 This is mainly for merge code to detect change/delete conflicts. This is
951 expected to be True for all subclasses of basectx."""
951 expected to be True for all subclasses of basectx."""
952 return False
952 return False
953
953
954 _customcmp = False
954 _customcmp = False
955
955
956 def cmp(self, fctx):
956 def cmp(self, fctx):
957 """compare with other file context
957 """compare with other file context
958
958
959 returns True if different than fctx.
959 returns True if different than fctx.
960 """
960 """
961 if fctx._customcmp:
961 if fctx._customcmp:
962 return fctx.cmp(self)
962 return fctx.cmp(self)
963
963
964 if self._filenode is None:
964 if self._filenode is None:
965 raise error.ProgrammingError(
965 raise error.ProgrammingError(
966 b'filectx.cmp() must be reimplemented if not backed by revlog'
966 b'filectx.cmp() must be reimplemented if not backed by revlog'
967 )
967 )
968
968
969 if fctx._filenode is None:
969 if fctx._filenode is None:
970 if self._repo._encodefilterpats:
970 if self._repo._encodefilterpats:
971 # can't rely on size() because wdir content may be decoded
971 # can't rely on size() because wdir content may be decoded
972 return self._filelog.cmp(self._filenode, fctx.data())
972 return self._filelog.cmp(self._filenode, fctx.data())
973 if self.size() - 4 == fctx.size():
973 if self.size() - 4 == fctx.size():
974 # size() can match:
974 # size() can match:
975 # if file data starts with '\1\n', empty metadata block is
975 # if file data starts with '\1\n', empty metadata block is
976 # prepended, which adds 4 bytes to filelog.size().
976 # prepended, which adds 4 bytes to filelog.size().
977 return self._filelog.cmp(self._filenode, fctx.data())
977 return self._filelog.cmp(self._filenode, fctx.data())
978 if self.size() == fctx.size():
978 if self.size() == fctx.size():
979 # size() matches: need to compare content
979 # size() matches: need to compare content
980 return self._filelog.cmp(self._filenode, fctx.data())
980 return self._filelog.cmp(self._filenode, fctx.data())
981
981
982 # size() differs
982 # size() differs
983 return True
983 return True
984
984
985 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
985 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
986 """return the first ancestor of <srcrev> introducing <fnode>
986 """return the first ancestor of <srcrev> introducing <fnode>
987
987
988 If the linkrev of the file revision does not point to an ancestor of
988 If the linkrev of the file revision does not point to an ancestor of
989 srcrev, we'll walk down the ancestors until we find one introducing
989 srcrev, we'll walk down the ancestors until we find one introducing
990 this file revision.
990 this file revision.
991
991
992 :srcrev: the changeset revision we search ancestors from
992 :srcrev: the changeset revision we search ancestors from
993 :inclusive: if true, the src revision will also be checked
993 :inclusive: if true, the src revision will also be checked
994 :stoprev: an optional revision to stop the walk at. If no introduction
994 :stoprev: an optional revision to stop the walk at. If no introduction
995 of this file content could be found before this floor
995 of this file content could be found before this floor
996 revision, the function will returns "None" and stops its
996 revision, the function will returns "None" and stops its
997 iteration.
997 iteration.
998 """
998 """
999 repo = self._repo
999 repo = self._repo
1000 cl = repo.unfiltered().changelog
1000 cl = repo.unfiltered().changelog
1001 mfl = repo.manifestlog
1001 mfl = repo.manifestlog
1002 # fetch the linkrev
1002 # fetch the linkrev
1003 lkr = self.linkrev()
1003 lkr = self.linkrev()
1004 if srcrev == lkr:
1004 if srcrev == lkr:
1005 return lkr
1005 return lkr
1006 # hack to reuse ancestor computation when searching for renames
1006 # hack to reuse ancestor computation when searching for renames
1007 memberanc = getattr(self, '_ancestrycontext', None)
1007 memberanc = getattr(self, '_ancestrycontext', None)
1008 iteranc = None
1008 iteranc = None
1009 if srcrev is None:
1009 if srcrev is None:
1010 # wctx case, used by workingfilectx during mergecopy
1010 # wctx case, used by workingfilectx during mergecopy
1011 revs = [p.rev() for p in self._repo[None].parents()]
1011 revs = [p.rev() for p in self._repo[None].parents()]
1012 inclusive = True # we skipped the real (revless) source
1012 inclusive = True # we skipped the real (revless) source
1013 else:
1013 else:
1014 revs = [srcrev]
1014 revs = [srcrev]
1015 if memberanc is None:
1015 if memberanc is None:
1016 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1016 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1017 # check if this linkrev is an ancestor of srcrev
1017 # check if this linkrev is an ancestor of srcrev
1018 if lkr not in memberanc:
1018 if lkr not in memberanc:
1019 if iteranc is None:
1019 if iteranc is None:
1020 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1020 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1021 fnode = self._filenode
1021 fnode = self._filenode
1022 path = self._path
1022 path = self._path
1023 for a in iteranc:
1023 for a in iteranc:
1024 if stoprev is not None and a < stoprev:
1024 if stoprev is not None and a < stoprev:
1025 return None
1025 return None
1026 ac = cl.read(a) # get changeset data (we avoid object creation)
1026 ac = cl.read(a) # get changeset data (we avoid object creation)
1027 if path in ac[3]: # checking the 'files' field.
1027 if path in ac[3]: # checking the 'files' field.
1028 # The file has been touched, check if the content is
1028 # The file has been touched, check if the content is
1029 # similar to the one we search for.
1029 # similar to the one we search for.
1030 if fnode == mfl[ac[0]].readfast().get(path):
1030 if fnode == mfl[ac[0]].readfast().get(path):
1031 return a
1031 return a
1032 # In theory, we should never get out of that loop without a result.
1032 # In theory, we should never get out of that loop without a result.
1033 # But if manifest uses a buggy file revision (not children of the
1033 # But if manifest uses a buggy file revision (not children of the
1034 # one it replaces) we could. Such a buggy situation will likely
1034 # one it replaces) we could. Such a buggy situation will likely
1035 # result is crash somewhere else at to some point.
1035 # result is crash somewhere else at to some point.
1036 return lkr
1036 return lkr
1037
1037
1038 def isintroducedafter(self, changelogrev):
1038 def isintroducedafter(self, changelogrev):
1039 """True if a filectx has been introduced after a given floor revision
1039 """True if a filectx has been introduced after a given floor revision
1040 """
1040 """
1041 if self.linkrev() >= changelogrev:
1041 if self.linkrev() >= changelogrev:
1042 return True
1042 return True
1043 introrev = self._introrev(stoprev=changelogrev)
1043 introrev = self._introrev(stoprev=changelogrev)
1044 if introrev is None:
1044 if introrev is None:
1045 return False
1045 return False
1046 return introrev >= changelogrev
1046 return introrev >= changelogrev
1047
1047
1048 def introrev(self):
1048 def introrev(self):
1049 """return the rev of the changeset which introduced this file revision
1049 """return the rev of the changeset which introduced this file revision
1050
1050
1051 This method is different from linkrev because it take into account the
1051 This method is different from linkrev because it take into account the
1052 changeset the filectx was created from. It ensures the returned
1052 changeset the filectx was created from. It ensures the returned
1053 revision is one of its ancestors. This prevents bugs from
1053 revision is one of its ancestors. This prevents bugs from
1054 'linkrev-shadowing' when a file revision is used by multiple
1054 'linkrev-shadowing' when a file revision is used by multiple
1055 changesets.
1055 changesets.
1056 """
1056 """
1057 return self._introrev()
1057 return self._introrev()
1058
1058
1059 def _introrev(self, stoprev=None):
1059 def _introrev(self, stoprev=None):
1060 """
1060 """
1061 Same as `introrev` but, with an extra argument to limit changelog
1061 Same as `introrev` but, with an extra argument to limit changelog
1062 iteration range in some internal usecase.
1062 iteration range in some internal usecase.
1063
1063
1064 If `stoprev` is set, the `introrev` will not be searched past that
1064 If `stoprev` is set, the `introrev` will not be searched past that
1065 `stoprev` revision and "None" might be returned. This is useful to
1065 `stoprev` revision and "None" might be returned. This is useful to
1066 limit the iteration range.
1066 limit the iteration range.
1067 """
1067 """
1068 toprev = None
1068 toprev = None
1069 attrs = vars(self)
1069 attrs = vars(self)
1070 if '_changeid' in attrs:
1070 if '_changeid' in attrs:
1071 # We have a cached value already
1071 # We have a cached value already
1072 toprev = self._changeid
1072 toprev = self._changeid
1073 elif '_changectx' in attrs:
1073 elif '_changectx' in attrs:
1074 # We know which changelog entry we are coming from
1074 # We know which changelog entry we are coming from
1075 toprev = self._changectx.rev()
1075 toprev = self._changectx.rev()
1076
1076
1077 if toprev is not None:
1077 if toprev is not None:
1078 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1078 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1079 elif '_descendantrev' in attrs:
1079 elif '_descendantrev' in attrs:
1080 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1080 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1081 # be nice and cache the result of the computation
1081 # be nice and cache the result of the computation
1082 if introrev is not None:
1082 if introrev is not None:
1083 self._changeid = introrev
1083 self._changeid = introrev
1084 return introrev
1084 return introrev
1085 else:
1085 else:
1086 return self.linkrev()
1086 return self.linkrev()
1087
1087
1088 def introfilectx(self):
1088 def introfilectx(self):
1089 """Return filectx having identical contents, but pointing to the
1089 """Return filectx having identical contents, but pointing to the
1090 changeset revision where this filectx was introduced"""
1090 changeset revision where this filectx was introduced"""
1091 introrev = self.introrev()
1091 introrev = self.introrev()
1092 if self.rev() == introrev:
1092 if self.rev() == introrev:
1093 return self
1093 return self
1094 return self.filectx(self.filenode(), changeid=introrev)
1094 return self.filectx(self.filenode(), changeid=introrev)
1095
1095
1096 def _parentfilectx(self, path, fileid, filelog):
1096 def _parentfilectx(self, path, fileid, filelog):
1097 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1097 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1098 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1098 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1099 if '_changeid' in vars(self) or '_changectx' in vars(self):
1099 if '_changeid' in vars(self) or '_changectx' in vars(self):
1100 # If self is associated with a changeset (probably explicitly
1100 # If self is associated with a changeset (probably explicitly
1101 # fed), ensure the created filectx is associated with a
1101 # fed), ensure the created filectx is associated with a
1102 # changeset that is an ancestor of self.changectx.
1102 # changeset that is an ancestor of self.changectx.
1103 # This lets us later use _adjustlinkrev to get a correct link.
1103 # This lets us later use _adjustlinkrev to get a correct link.
1104 fctx._descendantrev = self.rev()
1104 fctx._descendantrev = self.rev()
1105 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1105 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1106 elif '_descendantrev' in vars(self):
1106 elif '_descendantrev' in vars(self):
1107 # Otherwise propagate _descendantrev if we have one associated.
1107 # Otherwise propagate _descendantrev if we have one associated.
1108 fctx._descendantrev = self._descendantrev
1108 fctx._descendantrev = self._descendantrev
1109 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1109 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1110 return fctx
1110 return fctx
1111
1111
1112 def parents(self):
1112 def parents(self):
1113 _path = self._path
1113 _path = self._path
1114 fl = self._filelog
1114 fl = self._filelog
1115 parents = self._filelog.parents(self._filenode)
1115 parents = self._filelog.parents(self._filenode)
1116 pl = [(_path, node, fl) for node in parents if node != nullid]
1116 pl = [(_path, node, fl) for node in parents if node != nullid]
1117
1117
1118 r = fl.renamed(self._filenode)
1118 r = fl.renamed(self._filenode)
1119 if r:
1119 if r:
1120 # - In the simple rename case, both parent are nullid, pl is empty.
1120 # - In the simple rename case, both parent are nullid, pl is empty.
1121 # - In case of merge, only one of the parent is null id and should
1121 # - In case of merge, only one of the parent is null id and should
1122 # be replaced with the rename information. This parent is -always-
1122 # be replaced with the rename information. This parent is -always-
1123 # the first one.
1123 # the first one.
1124 #
1124 #
1125 # As null id have always been filtered out in the previous list
1125 # As null id have always been filtered out in the previous list
1126 # comprehension, inserting to 0 will always result in "replacing
1126 # comprehension, inserting to 0 will always result in "replacing
1127 # first nullid parent with rename information.
1127 # first nullid parent with rename information.
1128 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1128 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1129
1129
1130 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1130 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1131
1131
1132 def p1(self):
1132 def p1(self):
1133 return self.parents()[0]
1133 return self.parents()[0]
1134
1134
1135 def p2(self):
1135 def p2(self):
1136 p = self.parents()
1136 p = self.parents()
1137 if len(p) == 2:
1137 if len(p) == 2:
1138 return p[1]
1138 return p[1]
1139 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1139 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1140
1140
1141 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1141 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1142 """Returns a list of annotateline objects for each line in the file
1142 """Returns a list of annotateline objects for each line in the file
1143
1143
1144 - line.fctx is the filectx of the node where that line was last changed
1144 - line.fctx is the filectx of the node where that line was last changed
1145 - line.lineno is the line number at the first appearance in the managed
1145 - line.lineno is the line number at the first appearance in the managed
1146 file
1146 file
1147 - line.text is the data on that line (including newline character)
1147 - line.text is the data on that line (including newline character)
1148 """
1148 """
1149 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1149 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1150
1150
1151 def parents(f):
1151 def parents(f):
1152 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1152 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1153 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1153 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1154 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1154 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1155 # isn't an ancestor of the srcrev.
1155 # isn't an ancestor of the srcrev.
1156 f._changeid
1156 f._changeid
1157 pl = f.parents()
1157 pl = f.parents()
1158
1158
1159 # Don't return renamed parents if we aren't following.
1159 # Don't return renamed parents if we aren't following.
1160 if not follow:
1160 if not follow:
1161 pl = [p for p in pl if p.path() == f.path()]
1161 pl = [p for p in pl if p.path() == f.path()]
1162
1162
1163 # renamed filectx won't have a filelog yet, so set it
1163 # renamed filectx won't have a filelog yet, so set it
1164 # from the cache to save time
1164 # from the cache to save time
1165 for p in pl:
1165 for p in pl:
1166 if not '_filelog' in p.__dict__:
1166 if not '_filelog' in p.__dict__:
1167 p._filelog = getlog(p.path())
1167 p._filelog = getlog(p.path())
1168
1168
1169 return pl
1169 return pl
1170
1170
1171 # use linkrev to find the first changeset where self appeared
1171 # use linkrev to find the first changeset where self appeared
1172 base = self.introfilectx()
1172 base = self.introfilectx()
1173 if getattr(base, '_ancestrycontext', None) is None:
1173 if getattr(base, '_ancestrycontext', None) is None:
1174 # it is safe to use an unfiltered repository here because we are
1174 # it is safe to use an unfiltered repository here because we are
1175 # walking ancestors only.
1175 # walking ancestors only.
1176 cl = self._repo.unfiltered().changelog
1176 cl = self._repo.unfiltered().changelog
1177 if base.rev() is None:
1177 if base.rev() is None:
1178 # wctx is not inclusive, but works because _ancestrycontext
1178 # wctx is not inclusive, but works because _ancestrycontext
1179 # is used to test filelog revisions
1179 # is used to test filelog revisions
1180 ac = cl.ancestors(
1180 ac = cl.ancestors(
1181 [p.rev() for p in base.parents()], inclusive=True
1181 [p.rev() for p in base.parents()], inclusive=True
1182 )
1182 )
1183 else:
1183 else:
1184 ac = cl.ancestors([base.rev()], inclusive=True)
1184 ac = cl.ancestors([base.rev()], inclusive=True)
1185 base._ancestrycontext = ac
1185 base._ancestrycontext = ac
1186
1186
1187 return dagop.annotate(
1187 return dagop.annotate(
1188 base, parents, skiprevs=skiprevs, diffopts=diffopts
1188 base, parents, skiprevs=skiprevs, diffopts=diffopts
1189 )
1189 )
1190
1190
1191 def ancestors(self, followfirst=False):
1191 def ancestors(self, followfirst=False):
1192 visit = {}
1192 visit = {}
1193 c = self
1193 c = self
1194 if followfirst:
1194 if followfirst:
1195 cut = 1
1195 cut = 1
1196 else:
1196 else:
1197 cut = None
1197 cut = None
1198
1198
1199 while True:
1199 while True:
1200 for parent in c.parents()[:cut]:
1200 for parent in c.parents()[:cut]:
1201 visit[(parent.linkrev(), parent.filenode())] = parent
1201 visit[(parent.linkrev(), parent.filenode())] = parent
1202 if not visit:
1202 if not visit:
1203 break
1203 break
1204 c = visit.pop(max(visit))
1204 c = visit.pop(max(visit))
1205 yield c
1205 yield c
1206
1206
1207 def decodeddata(self):
1207 def decodeddata(self):
1208 """Returns `data()` after running repository decoding filters.
1208 """Returns `data()` after running repository decoding filters.
1209
1209
1210 This is often equivalent to how the data would be expressed on disk.
1210 This is often equivalent to how the data would be expressed on disk.
1211 """
1211 """
1212 return self._repo.wwritedata(self.path(), self.data())
1212 return self._repo.wwritedata(self.path(), self.data())
1213
1213
1214
1214
1215 class filectx(basefilectx):
1215 class filectx(basefilectx):
1216 """A filecontext object makes access to data related to a particular
1216 """A filecontext object makes access to data related to a particular
1217 filerevision convenient."""
1217 filerevision convenient."""
1218
1218
1219 def __init__(
1219 def __init__(
1220 self,
1220 self,
1221 repo,
1221 repo,
1222 path,
1222 path,
1223 changeid=None,
1223 changeid=None,
1224 fileid=None,
1224 fileid=None,
1225 filelog=None,
1225 filelog=None,
1226 changectx=None,
1226 changectx=None,
1227 ):
1227 ):
1228 """changeid must be a revision number, if specified.
1228 """changeid must be a revision number, if specified.
1229 fileid can be a file revision or node."""
1229 fileid can be a file revision or node."""
1230 self._repo = repo
1230 self._repo = repo
1231 self._path = path
1231 self._path = path
1232
1232
1233 assert (
1233 assert (
1234 changeid is not None or fileid is not None or changectx is not None
1234 changeid is not None or fileid is not None or changectx is not None
1235 ), (
1235 ), (
1236 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1236 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1237 % (changeid, fileid, changectx,)
1237 % (changeid, fileid, changectx,)
1238 )
1238 )
1239
1239
1240 if filelog is not None:
1240 if filelog is not None:
1241 self._filelog = filelog
1241 self._filelog = filelog
1242
1242
1243 if changeid is not None:
1243 if changeid is not None:
1244 self._changeid = changeid
1244 self._changeid = changeid
1245 if changectx is not None:
1245 if changectx is not None:
1246 self._changectx = changectx
1246 self._changectx = changectx
1247 if fileid is not None:
1247 if fileid is not None:
1248 self._fileid = fileid
1248 self._fileid = fileid
1249
1249
1250 @propertycache
1250 @propertycache
1251 def _changectx(self):
1251 def _changectx(self):
1252 try:
1252 try:
1253 return self._repo[self._changeid]
1253 return self._repo[self._changeid]
1254 except error.FilteredRepoLookupError:
1254 except error.FilteredRepoLookupError:
1255 # Linkrev may point to any revision in the repository. When the
1255 # Linkrev may point to any revision in the repository. When the
1256 # repository is filtered this may lead to `filectx` trying to build
1256 # repository is filtered this may lead to `filectx` trying to build
1257 # `changectx` for filtered revision. In such case we fallback to
1257 # `changectx` for filtered revision. In such case we fallback to
1258 # creating `changectx` on the unfiltered version of the reposition.
1258 # creating `changectx` on the unfiltered version of the reposition.
1259 # This fallback should not be an issue because `changectx` from
1259 # This fallback should not be an issue because `changectx` from
1260 # `filectx` are not used in complex operations that care about
1260 # `filectx` are not used in complex operations that care about
1261 # filtering.
1261 # filtering.
1262 #
1262 #
1263 # This fallback is a cheap and dirty fix that prevent several
1263 # This fallback is a cheap and dirty fix that prevent several
1264 # crashes. It does not ensure the behavior is correct. However the
1264 # crashes. It does not ensure the behavior is correct. However the
1265 # behavior was not correct before filtering either and "incorrect
1265 # behavior was not correct before filtering either and "incorrect
1266 # behavior" is seen as better as "crash"
1266 # behavior" is seen as better as "crash"
1267 #
1267 #
1268 # Linkrevs have several serious troubles with filtering that are
1268 # Linkrevs have several serious troubles with filtering that are
1269 # complicated to solve. Proper handling of the issue here should be
1269 # complicated to solve. Proper handling of the issue here should be
1270 # considered when solving linkrev issue are on the table.
1270 # considered when solving linkrev issue are on the table.
1271 return self._repo.unfiltered()[self._changeid]
1271 return self._repo.unfiltered()[self._changeid]
1272
1272
1273 def filectx(self, fileid, changeid=None):
1273 def filectx(self, fileid, changeid=None):
1274 '''opens an arbitrary revision of the file without
1274 '''opens an arbitrary revision of the file without
1275 opening a new filelog'''
1275 opening a new filelog'''
1276 return filectx(
1276 return filectx(
1277 self._repo,
1277 self._repo,
1278 self._path,
1278 self._path,
1279 fileid=fileid,
1279 fileid=fileid,
1280 filelog=self._filelog,
1280 filelog=self._filelog,
1281 changeid=changeid,
1281 changeid=changeid,
1282 )
1282 )
1283
1283
1284 def rawdata(self):
1284 def rawdata(self):
1285 return self._filelog.rawdata(self._filenode)
1285 return self._filelog.rawdata(self._filenode)
1286
1286
1287 def rawflags(self):
1287 def rawflags(self):
1288 """low-level revlog flags"""
1288 """low-level revlog flags"""
1289 return self._filelog.flags(self._filerev)
1289 return self._filelog.flags(self._filerev)
1290
1290
1291 def data(self):
1291 def data(self):
1292 try:
1292 try:
1293 return self._filelog.read(self._filenode)
1293 return self._filelog.read(self._filenode)
1294 except error.CensoredNodeError:
1294 except error.CensoredNodeError:
1295 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1295 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1296 return b""
1296 return b""
1297 raise error.Abort(
1297 raise error.Abort(
1298 _(b"censored node: %s") % short(self._filenode),
1298 _(b"censored node: %s") % short(self._filenode),
1299 hint=_(b"set censor.policy to ignore errors"),
1299 hint=_(b"set censor.policy to ignore errors"),
1300 )
1300 )
1301
1301
1302 def size(self):
1302 def size(self):
1303 return self._filelog.size(self._filerev)
1303 return self._filelog.size(self._filerev)
1304
1304
1305 @propertycache
1305 @propertycache
1306 def _copied(self):
1306 def _copied(self):
1307 """check if file was actually renamed in this changeset revision
1307 """check if file was actually renamed in this changeset revision
1308
1308
1309 If rename logged in file revision, we report copy for changeset only
1309 If rename logged in file revision, we report copy for changeset only
1310 if file revisions linkrev points back to the changeset in question
1310 if file revisions linkrev points back to the changeset in question
1311 or both changeset parents contain different file revisions.
1311 or both changeset parents contain different file revisions.
1312 """
1312 """
1313
1313
1314 renamed = self._filelog.renamed(self._filenode)
1314 renamed = self._filelog.renamed(self._filenode)
1315 if not renamed:
1315 if not renamed:
1316 return None
1316 return None
1317
1317
1318 if self.rev() == self.linkrev():
1318 if self.rev() == self.linkrev():
1319 return renamed
1319 return renamed
1320
1320
1321 name = self.path()
1321 name = self.path()
1322 fnode = self._filenode
1322 fnode = self._filenode
1323 for p in self._changectx.parents():
1323 for p in self._changectx.parents():
1324 try:
1324 try:
1325 if fnode == p.filenode(name):
1325 if fnode == p.filenode(name):
1326 return None
1326 return None
1327 except error.LookupError:
1327 except error.LookupError:
1328 pass
1328 pass
1329 return renamed
1329 return renamed
1330
1330
1331 def children(self):
1331 def children(self):
1332 # hard for renames
1332 # hard for renames
1333 c = self._filelog.children(self._filenode)
1333 c = self._filelog.children(self._filenode)
1334 return [
1334 return [
1335 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1335 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1336 for x in c
1336 for x in c
1337 ]
1337 ]
1338
1338
1339
1339
1340 class committablectx(basectx):
1340 class committablectx(basectx):
1341 """A committablectx object provides common functionality for a context that
1341 """A committablectx object provides common functionality for a context that
1342 wants the ability to commit, e.g. workingctx or memctx."""
1342 wants the ability to commit, e.g. workingctx or memctx."""
1343
1343
1344 def __init__(
1344 def __init__(
1345 self,
1345 self,
1346 repo,
1346 repo,
1347 text=b"",
1347 text=b"",
1348 user=None,
1348 user=None,
1349 date=None,
1349 date=None,
1350 extra=None,
1350 extra=None,
1351 changes=None,
1351 changes=None,
1352 branch=None,
1352 branch=None,
1353 ):
1353 ):
1354 super(committablectx, self).__init__(repo)
1354 super(committablectx, self).__init__(repo)
1355 self._rev = None
1355 self._rev = None
1356 self._node = None
1356 self._node = None
1357 self._text = text
1357 self._text = text
1358 if date:
1358 if date:
1359 self._date = dateutil.parsedate(date)
1359 self._date = dateutil.parsedate(date)
1360 if user:
1360 if user:
1361 self._user = user
1361 self._user = user
1362 if changes:
1362 if changes:
1363 self._status = changes
1363 self._status = changes
1364
1364
1365 self._extra = {}
1365 self._extra = {}
1366 if extra:
1366 if extra:
1367 self._extra = extra.copy()
1367 self._extra = extra.copy()
1368 if branch is not None:
1368 if branch is not None:
1369 self._extra[b'branch'] = encoding.fromlocal(branch)
1369 self._extra[b'branch'] = encoding.fromlocal(branch)
1370 if not self._extra.get(b'branch'):
1370 if not self._extra.get(b'branch'):
1371 self._extra[b'branch'] = b'default'
1371 self._extra[b'branch'] = b'default'
1372
1372
1373 def __bytes__(self):
1373 def __bytes__(self):
1374 return bytes(self._parents[0]) + b"+"
1374 return bytes(self._parents[0]) + b"+"
1375
1375
1376 __str__ = encoding.strmethod(__bytes__)
1376 __str__ = encoding.strmethod(__bytes__)
1377
1377
1378 def __nonzero__(self):
1378 def __nonzero__(self):
1379 return True
1379 return True
1380
1380
1381 __bool__ = __nonzero__
1381 __bool__ = __nonzero__
1382
1382
1383 @propertycache
1383 @propertycache
1384 def _status(self):
1384 def _status(self):
1385 return self._repo.status()
1385 return self._repo.status()
1386
1386
1387 @propertycache
1387 @propertycache
1388 def _user(self):
1388 def _user(self):
1389 return self._repo.ui.username()
1389 return self._repo.ui.username()
1390
1390
1391 @propertycache
1391 @propertycache
1392 def _date(self):
1392 def _date(self):
1393 ui = self._repo.ui
1393 ui = self._repo.ui
1394 date = ui.configdate(b'devel', b'default-date')
1394 date = ui.configdate(b'devel', b'default-date')
1395 if date is None:
1395 if date is None:
1396 date = dateutil.makedate()
1396 date = dateutil.makedate()
1397 return date
1397 return date
1398
1398
1399 def subrev(self, subpath):
1399 def subrev(self, subpath):
1400 return None
1400 return None
1401
1401
1402 def manifestnode(self):
1402 def manifestnode(self):
1403 return None
1403 return None
1404
1404
1405 def user(self):
1405 def user(self):
1406 return self._user or self._repo.ui.username()
1406 return self._user or self._repo.ui.username()
1407
1407
1408 def date(self):
1408 def date(self):
1409 return self._date
1409 return self._date
1410
1410
1411 def description(self):
1411 def description(self):
1412 return self._text
1412 return self._text
1413
1413
1414 def files(self):
1414 def files(self):
1415 return sorted(
1415 return sorted(
1416 self._status.modified + self._status.added + self._status.removed
1416 self._status.modified + self._status.added + self._status.removed
1417 )
1417 )
1418
1418
1419 def modified(self):
1419 def modified(self):
1420 return self._status.modified
1420 return self._status.modified
1421
1421
1422 def added(self):
1422 def added(self):
1423 return self._status.added
1423 return self._status.added
1424
1424
1425 def removed(self):
1425 def removed(self):
1426 return self._status.removed
1426 return self._status.removed
1427
1427
1428 def deleted(self):
1428 def deleted(self):
1429 return self._status.deleted
1429 return self._status.deleted
1430
1430
1431 filesmodified = modified
1431 filesmodified = modified
1432 filesadded = added
1432 filesadded = added
1433 filesremoved = removed
1433 filesremoved = removed
1434
1434
1435 def branch(self):
1435 def branch(self):
1436 return encoding.tolocal(self._extra[b'branch'])
1436 return encoding.tolocal(self._extra[b'branch'])
1437
1437
1438 def closesbranch(self):
1438 def closesbranch(self):
1439 return b'close' in self._extra
1439 return b'close' in self._extra
1440
1440
1441 def extra(self):
1441 def extra(self):
1442 return self._extra
1442 return self._extra
1443
1443
1444 def isinmemory(self):
1444 def isinmemory(self):
1445 return False
1445 return False
1446
1446
1447 def tags(self):
1447 def tags(self):
1448 return []
1448 return []
1449
1449
1450 def bookmarks(self):
1450 def bookmarks(self):
1451 b = []
1451 b = []
1452 for p in self.parents():
1452 for p in self.parents():
1453 b.extend(p.bookmarks())
1453 b.extend(p.bookmarks())
1454 return b
1454 return b
1455
1455
1456 def phase(self):
1456 def phase(self):
1457 phase = phases.newcommitphase(self._repo.ui)
1457 phase = phases.newcommitphase(self._repo.ui)
1458 for p in self.parents():
1458 for p in self.parents():
1459 phase = max(phase, p.phase())
1459 phase = max(phase, p.phase())
1460 return phase
1460 return phase
1461
1461
1462 def hidden(self):
1462 def hidden(self):
1463 return False
1463 return False
1464
1464
1465 def children(self):
1465 def children(self):
1466 return []
1466 return []
1467
1467
1468 def flags(self, path):
1468 def flags(self, path):
1469 if '_manifest' in self.__dict__:
1469 if '_manifest' in self.__dict__:
1470 try:
1470 try:
1471 return self._manifest.flags(path)
1471 return self._manifest.flags(path)
1472 except KeyError:
1472 except KeyError:
1473 return b''
1473 return b''
1474
1474
1475 try:
1475 try:
1476 return self._flagfunc(path)
1476 return self._flagfunc(path)
1477 except OSError:
1477 except OSError:
1478 return b''
1478 return b''
1479
1479
1480 def ancestor(self, c2):
1480 def ancestor(self, c2):
1481 """return the "best" ancestor context of self and c2"""
1481 """return the "best" ancestor context of self and c2"""
1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1482 return self._parents[0].ancestor(c2) # punt on two parents for now
1483
1483
1484 def ancestors(self):
1484 def ancestors(self):
1485 for p in self._parents:
1485 for p in self._parents:
1486 yield p
1486 yield p
1487 for a in self._repo.changelog.ancestors(
1487 for a in self._repo.changelog.ancestors(
1488 [p.rev() for p in self._parents]
1488 [p.rev() for p in self._parents]
1489 ):
1489 ):
1490 yield self._repo[a]
1490 yield self._repo[a]
1491
1491
1492 def markcommitted(self, node):
1492 def markcommitted(self, node):
1493 """Perform post-commit cleanup necessary after committing this ctx
1493 """Perform post-commit cleanup necessary after committing this ctx
1494
1494
1495 Specifically, this updates backing stores this working context
1495 Specifically, this updates backing stores this working context
1496 wraps to reflect the fact that the changes reflected by this
1496 wraps to reflect the fact that the changes reflected by this
1497 workingctx have been committed. For example, it marks
1497 workingctx have been committed. For example, it marks
1498 modified and added files as normal in the dirstate.
1498 modified and added files as normal in the dirstate.
1499
1499
1500 """
1500 """
1501
1501
1502 def dirty(self, missing=False, merge=True, branch=True):
1502 def dirty(self, missing=False, merge=True, branch=True):
1503 return False
1503 return False
1504
1504
1505
1505
1506 class workingctx(committablectx):
1506 class workingctx(committablectx):
1507 """A workingctx object makes access to data related to
1507 """A workingctx object makes access to data related to
1508 the current working directory convenient.
1508 the current working directory convenient.
1509 date - any valid date string or (unixtime, offset), or None.
1509 date - any valid date string or (unixtime, offset), or None.
1510 user - username string, or None.
1510 user - username string, or None.
1511 extra - a dictionary of extra values, or None.
1511 extra - a dictionary of extra values, or None.
1512 changes - a list of file lists as returned by localrepo.status()
1512 changes - a list of file lists as returned by localrepo.status()
1513 or None to use the repository status.
1513 or None to use the repository status.
1514 """
1514 """
1515
1515
1516 def __init__(
1516 def __init__(
1517 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1517 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1518 ):
1518 ):
1519 branch = None
1519 branch = None
1520 if not extra or b'branch' not in extra:
1520 if not extra or b'branch' not in extra:
1521 try:
1521 try:
1522 branch = repo.dirstate.branch()
1522 branch = repo.dirstate.branch()
1523 except UnicodeDecodeError:
1523 except UnicodeDecodeError:
1524 raise error.Abort(_(b'branch name not in UTF-8!'))
1524 raise error.Abort(_(b'branch name not in UTF-8!'))
1525 super(workingctx, self).__init__(
1525 super(workingctx, self).__init__(
1526 repo, text, user, date, extra, changes, branch=branch
1526 repo, text, user, date, extra, changes, branch=branch
1527 )
1527 )
1528
1528
1529 def __iter__(self):
1529 def __iter__(self):
1530 d = self._repo.dirstate
1530 d = self._repo.dirstate
1531 for f in d:
1531 for f in d:
1532 if d[f] != b'r':
1532 if d[f] != b'r':
1533 yield f
1533 yield f
1534
1534
1535 def __contains__(self, key):
1535 def __contains__(self, key):
1536 return self._repo.dirstate[key] not in b"?r"
1536 return self._repo.dirstate[key] not in b"?r"
1537
1537
1538 def hex(self):
1538 def hex(self):
1539 return wdirhex
1539 return wdirhex
1540
1540
1541 @propertycache
1541 @propertycache
1542 def _parents(self):
1542 def _parents(self):
1543 p = self._repo.dirstate.parents()
1543 p = self._repo.dirstate.parents()
1544 if p[1] == nullid:
1544 if p[1] == nullid:
1545 p = p[:-1]
1545 p = p[:-1]
1546 # use unfiltered repo to delay/avoid loading obsmarkers
1546 # use unfiltered repo to delay/avoid loading obsmarkers
1547 unfi = self._repo.unfiltered()
1547 unfi = self._repo.unfiltered()
1548 return [
1548 return [
1549 changectx(
1549 changectx(
1550 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1550 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1551 )
1551 )
1552 for n in p
1552 for n in p
1553 ]
1553 ]
1554
1554
1555 def setparents(self, p1node, p2node=nullid):
1555 def setparents(self, p1node, p2node=nullid):
1556 dirstate = self._repo.dirstate
1556 dirstate = self._repo.dirstate
1557 with dirstate.parentchange():
1557 with dirstate.parentchange():
1558 copies = dirstate.setparents(p1node, p2node)
1558 copies = dirstate.setparents(p1node, p2node)
1559 pctx = self._repo[p1node]
1559 pctx = self._repo[p1node]
1560 if copies:
1560 if copies:
1561 # Adjust copy records, the dirstate cannot do it, it
1561 # Adjust copy records, the dirstate cannot do it, it
1562 # requires access to parents manifests. Preserve them
1562 # requires access to parents manifests. Preserve them
1563 # only for entries added to first parent.
1563 # only for entries added to first parent.
1564 for f in copies:
1564 for f in copies:
1565 if f not in pctx and copies[f] in pctx:
1565 if f not in pctx and copies[f] in pctx:
1566 dirstate.copy(copies[f], f)
1566 dirstate.copy(copies[f], f)
1567 if p2node == nullid:
1567 if p2node == nullid:
1568 for f, s in sorted(dirstate.copies().items()):
1568 for f, s in sorted(dirstate.copies().items()):
1569 if f not in pctx and s not in pctx:
1569 if f not in pctx and s not in pctx:
1570 dirstate.copy(None, f)
1570 dirstate.copy(None, f)
1571
1571
1572 def _fileinfo(self, path):
1572 def _fileinfo(self, path):
1573 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1573 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1574 self._manifest
1574 self._manifest
1575 return super(workingctx, self)._fileinfo(path)
1575 return super(workingctx, self)._fileinfo(path)
1576
1576
1577 def _buildflagfunc(self):
1577 def _buildflagfunc(self):
1578 # Create a fallback function for getting file flags when the
1578 # Create a fallback function for getting file flags when the
1579 # filesystem doesn't support them
1579 # filesystem doesn't support them
1580
1580
1581 copiesget = self._repo.dirstate.copies().get
1581 copiesget = self._repo.dirstate.copies().get
1582 parents = self.parents()
1582 parents = self.parents()
1583 if len(parents) < 2:
1583 if len(parents) < 2:
1584 # when we have one parent, it's easy: copy from parent
1584 # when we have one parent, it's easy: copy from parent
1585 man = parents[0].manifest()
1585 man = parents[0].manifest()
1586
1586
1587 def func(f):
1587 def func(f):
1588 f = copiesget(f, f)
1588 f = copiesget(f, f)
1589 return man.flags(f)
1589 return man.flags(f)
1590
1590
1591 else:
1591 else:
1592 # merges are tricky: we try to reconstruct the unstored
1592 # merges are tricky: we try to reconstruct the unstored
1593 # result from the merge (issue1802)
1593 # result from the merge (issue1802)
1594 p1, p2 = parents
1594 p1, p2 = parents
1595 pa = p1.ancestor(p2)
1595 pa = p1.ancestor(p2)
1596 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1596 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1597
1597
1598 def func(f):
1598 def func(f):
1599 f = copiesget(f, f) # may be wrong for merges with copies
1599 f = copiesget(f, f) # may be wrong for merges with copies
1600 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1600 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1601 if fl1 == fl2:
1601 if fl1 == fl2:
1602 return fl1
1602 return fl1
1603 if fl1 == fla:
1603 if fl1 == fla:
1604 return fl2
1604 return fl2
1605 if fl2 == fla:
1605 if fl2 == fla:
1606 return fl1
1606 return fl1
1607 return b'' # punt for conflicts
1607 return b'' # punt for conflicts
1608
1608
1609 return func
1609 return func
1610
1610
1611 @propertycache
1611 @propertycache
1612 def _flagfunc(self):
1612 def _flagfunc(self):
1613 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1613 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1614
1614
1615 def flags(self, path):
1615 def flags(self, path):
1616 try:
1616 try:
1617 return self._flagfunc(path)
1617 return self._flagfunc(path)
1618 except OSError:
1618 except OSError:
1619 return b''
1619 return b''
1620
1620
1621 def filectx(self, path, filelog=None):
1621 def filectx(self, path, filelog=None):
1622 """get a file context from the working directory"""
1622 """get a file context from the working directory"""
1623 return workingfilectx(
1623 return workingfilectx(
1624 self._repo, path, workingctx=self, filelog=filelog
1624 self._repo, path, workingctx=self, filelog=filelog
1625 )
1625 )
1626
1626
1627 def dirty(self, missing=False, merge=True, branch=True):
1627 def dirty(self, missing=False, merge=True, branch=True):
1628 """check whether a working directory is modified"""
1628 """check whether a working directory is modified"""
1629 # check subrepos first
1629 # check subrepos first
1630 for s in sorted(self.substate):
1630 for s in sorted(self.substate):
1631 if self.sub(s).dirty(missing=missing):
1631 if self.sub(s).dirty(missing=missing):
1632 return True
1632 return True
1633 # check current working dir
1633 # check current working dir
1634 return (
1634 return (
1635 (merge and self.p2())
1635 (merge and self.p2())
1636 or (branch and self.branch() != self.p1().branch())
1636 or (branch and self.branch() != self.p1().branch())
1637 or self.modified()
1637 or self.modified()
1638 or self.added()
1638 or self.added()
1639 or self.removed()
1639 or self.removed()
1640 or (missing and self.deleted())
1640 or (missing and self.deleted())
1641 )
1641 )
1642
1642
1643 def add(self, list, prefix=b""):
1643 def add(self, list, prefix=b""):
1644 with self._repo.wlock():
1644 with self._repo.wlock():
1645 ui, ds = self._repo.ui, self._repo.dirstate
1645 ui, ds = self._repo.ui, self._repo.dirstate
1646 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1646 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1647 rejected = []
1647 rejected = []
1648 lstat = self._repo.wvfs.lstat
1648 lstat = self._repo.wvfs.lstat
1649 for f in list:
1649 for f in list:
1650 # ds.pathto() returns an absolute file when this is invoked from
1650 # ds.pathto() returns an absolute file when this is invoked from
1651 # the keyword extension. That gets flagged as non-portable on
1651 # the keyword extension. That gets flagged as non-portable on
1652 # Windows, since it contains the drive letter and colon.
1652 # Windows, since it contains the drive letter and colon.
1653 scmutil.checkportable(ui, os.path.join(prefix, f))
1653 scmutil.checkportable(ui, os.path.join(prefix, f))
1654 try:
1654 try:
1655 st = lstat(f)
1655 st = lstat(f)
1656 except OSError:
1656 except OSError:
1657 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1657 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1658 rejected.append(f)
1658 rejected.append(f)
1659 continue
1659 continue
1660 limit = ui.configbytes(b'ui', b'large-file-limit')
1660 limit = ui.configbytes(b'ui', b'large-file-limit')
1661 if limit != 0 and st.st_size > limit:
1661 if limit != 0 and st.st_size > limit:
1662 ui.warn(
1662 ui.warn(
1663 _(
1663 _(
1664 b"%s: up to %d MB of RAM may be required "
1664 b"%s: up to %d MB of RAM may be required "
1665 b"to manage this file\n"
1665 b"to manage this file\n"
1666 b"(use 'hg revert %s' to cancel the "
1666 b"(use 'hg revert %s' to cancel the "
1667 b"pending addition)\n"
1667 b"pending addition)\n"
1668 )
1668 )
1669 % (f, 3 * st.st_size // 1000000, uipath(f))
1669 % (f, 3 * st.st_size // 1000000, uipath(f))
1670 )
1670 )
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1671 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1672 ui.warn(
1672 ui.warn(
1673 _(
1673 _(
1674 b"%s not added: only files and symlinks "
1674 b"%s not added: only files and symlinks "
1675 b"supported currently\n"
1675 b"supported currently\n"
1676 )
1676 )
1677 % uipath(f)
1677 % uipath(f)
1678 )
1678 )
1679 rejected.append(f)
1679 rejected.append(f)
1680 elif ds[f] in b'amn':
1680 elif ds[f] in b'amn':
1681 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1681 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1682 elif ds[f] == b'r':
1682 elif ds[f] == b'r':
1683 ds.normallookup(f)
1683 ds.normallookup(f)
1684 else:
1684 else:
1685 ds.add(f)
1685 ds.add(f)
1686 return rejected
1686 return rejected
1687
1687
1688 def forget(self, files, prefix=b""):
1688 def forget(self, files, prefix=b""):
1689 with self._repo.wlock():
1689 with self._repo.wlock():
1690 ds = self._repo.dirstate
1690 ds = self._repo.dirstate
1691 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1691 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1692 rejected = []
1692 rejected = []
1693 for f in files:
1693 for f in files:
1694 if f not in ds:
1694 if f not in ds:
1695 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1695 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1696 rejected.append(f)
1696 rejected.append(f)
1697 elif ds[f] != b'a':
1697 elif ds[f] != b'a':
1698 ds.remove(f)
1698 ds.remove(f)
1699 else:
1699 else:
1700 ds.drop(f)
1700 ds.drop(f)
1701 return rejected
1701 return rejected
1702
1702
1703 def copy(self, source, dest):
1703 def copy(self, source, dest):
1704 try:
1704 try:
1705 st = self._repo.wvfs.lstat(dest)
1705 st = self._repo.wvfs.lstat(dest)
1706 except OSError as err:
1706 except OSError as err:
1707 if err.errno != errno.ENOENT:
1707 if err.errno != errno.ENOENT:
1708 raise
1708 raise
1709 self._repo.ui.warn(
1709 self._repo.ui.warn(
1710 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1710 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1711 )
1711 )
1712 return
1712 return
1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1714 self._repo.ui.warn(
1714 self._repo.ui.warn(
1715 _(b"copy failed: %s is not a file or a symbolic link\n")
1715 _(b"copy failed: %s is not a file or a symbolic link\n")
1716 % self._repo.dirstate.pathto(dest)
1716 % self._repo.dirstate.pathto(dest)
1717 )
1717 )
1718 else:
1718 else:
1719 with self._repo.wlock():
1719 with self._repo.wlock():
1720 ds = self._repo.dirstate
1720 ds = self._repo.dirstate
1721 if ds[dest] in b'?':
1721 if ds[dest] in b'?':
1722 ds.add(dest)
1722 ds.add(dest)
1723 elif ds[dest] in b'r':
1723 elif ds[dest] in b'r':
1724 ds.normallookup(dest)
1724 ds.normallookup(dest)
1725 ds.copy(source, dest)
1725 ds.copy(source, dest)
1726
1726
1727 def match(
1727 def match(
1728 self,
1728 self,
1729 pats=None,
1729 pats=None,
1730 include=None,
1730 include=None,
1731 exclude=None,
1731 exclude=None,
1732 default=b'glob',
1732 default=b'glob',
1733 listsubrepos=False,
1733 listsubrepos=False,
1734 badfn=None,
1734 badfn=None,
1735 cwd=None,
1735 cwd=None,
1736 ):
1736 ):
1737 r = self._repo
1737 r = self._repo
1738 if not cwd:
1738 if not cwd:
1739 cwd = r.getcwd()
1739 cwd = r.getcwd()
1740
1740
1741 # Only a case insensitive filesystem needs magic to translate user input
1741 # Only a case insensitive filesystem needs magic to translate user input
1742 # to actual case in the filesystem.
1742 # to actual case in the filesystem.
1743 icasefs = not util.fscasesensitive(r.root)
1743 icasefs = not util.fscasesensitive(r.root)
1744 return matchmod.match(
1744 return matchmod.match(
1745 r.root,
1745 r.root,
1746 cwd,
1746 cwd,
1747 pats,
1747 pats,
1748 include,
1748 include,
1749 exclude,
1749 exclude,
1750 default,
1750 default,
1751 auditor=r.auditor,
1751 auditor=r.auditor,
1752 ctx=self,
1752 ctx=self,
1753 listsubrepos=listsubrepos,
1753 listsubrepos=listsubrepos,
1754 badfn=badfn,
1754 badfn=badfn,
1755 icasefs=icasefs,
1755 icasefs=icasefs,
1756 )
1756 )
1757
1757
1758 def _filtersuspectsymlink(self, files):
1758 def _filtersuspectsymlink(self, files):
1759 if not files or self._repo.dirstate._checklink:
1759 if not files or self._repo.dirstate._checklink:
1760 return files
1760 return files
1761
1761
1762 # Symlink placeholders may get non-symlink-like contents
1762 # Symlink placeholders may get non-symlink-like contents
1763 # via user error or dereferencing by NFS or Samba servers,
1763 # via user error or dereferencing by NFS or Samba servers,
1764 # so we filter out any placeholders that don't look like a
1764 # so we filter out any placeholders that don't look like a
1765 # symlink
1765 # symlink
1766 sane = []
1766 sane = []
1767 for f in files:
1767 for f in files:
1768 if self.flags(f) == b'l':
1768 if self.flags(f) == b'l':
1769 d = self[f].data()
1769 d = self[f].data()
1770 if (
1770 if (
1771 d == b''
1771 d == b''
1772 or len(d) >= 1024
1772 or len(d) >= 1024
1773 or b'\n' in d
1773 or b'\n' in d
1774 or stringutil.binary(d)
1774 or stringutil.binary(d)
1775 ):
1775 ):
1776 self._repo.ui.debug(
1776 self._repo.ui.debug(
1777 b'ignoring suspect symlink placeholder "%s"\n' % f
1777 b'ignoring suspect symlink placeholder "%s"\n' % f
1778 )
1778 )
1779 continue
1779 continue
1780 sane.append(f)
1780 sane.append(f)
1781 return sane
1781 return sane
1782
1782
1783 def _checklookup(self, files):
1783 def _checklookup(self, files):
1784 # check for any possibly clean files
1784 # check for any possibly clean files
1785 if not files:
1785 if not files:
1786 return [], [], []
1786 return [], [], []
1787
1787
1788 modified = []
1788 modified = []
1789 deleted = []
1789 deleted = []
1790 fixup = []
1790 fixup = []
1791 pctx = self._parents[0]
1791 pctx = self._parents[0]
1792 # do a full compare of any files that might have changed
1792 # do a full compare of any files that might have changed
1793 for f in sorted(files):
1793 for f in sorted(files):
1794 try:
1794 try:
1795 # This will return True for a file that got replaced by a
1795 # This will return True for a file that got replaced by a
1796 # directory in the interim, but fixing that is pretty hard.
1796 # directory in the interim, but fixing that is pretty hard.
1797 if (
1797 if (
1798 f not in pctx
1798 f not in pctx
1799 or self.flags(f) != pctx.flags(f)
1799 or self.flags(f) != pctx.flags(f)
1800 or pctx[f].cmp(self[f])
1800 or pctx[f].cmp(self[f])
1801 ):
1801 ):
1802 modified.append(f)
1802 modified.append(f)
1803 else:
1803 else:
1804 fixup.append(f)
1804 fixup.append(f)
1805 except (IOError, OSError):
1805 except (IOError, OSError):
1806 # A file become inaccessible in between? Mark it as deleted,
1806 # A file become inaccessible in between? Mark it as deleted,
1807 # matching dirstate behavior (issue5584).
1807 # matching dirstate behavior (issue5584).
1808 # The dirstate has more complex behavior around whether a
1808 # The dirstate has more complex behavior around whether a
1809 # missing file matches a directory, etc, but we don't need to
1809 # missing file matches a directory, etc, but we don't need to
1810 # bother with that: if f has made it to this point, we're sure
1810 # bother with that: if f has made it to this point, we're sure
1811 # it's in the dirstate.
1811 # it's in the dirstate.
1812 deleted.append(f)
1812 deleted.append(f)
1813
1813
1814 return modified, deleted, fixup
1814 return modified, deleted, fixup
1815
1815
1816 def _poststatusfixup(self, status, fixup):
1816 def _poststatusfixup(self, status, fixup):
1817 """update dirstate for files that are actually clean"""
1817 """update dirstate for files that are actually clean"""
1818 poststatus = self._repo.postdsstatus()
1818 poststatus = self._repo.postdsstatus()
1819 if fixup or poststatus:
1819 if fixup or poststatus:
1820 try:
1820 try:
1821 oldid = self._repo.dirstate.identity()
1821 oldid = self._repo.dirstate.identity()
1822
1822
1823 # updating the dirstate is optional
1823 # updating the dirstate is optional
1824 # so we don't wait on the lock
1824 # so we don't wait on the lock
1825 # wlock can invalidate the dirstate, so cache normal _after_
1825 # wlock can invalidate the dirstate, so cache normal _after_
1826 # taking the lock
1826 # taking the lock
1827 with self._repo.wlock(False):
1827 with self._repo.wlock(False):
1828 if self._repo.dirstate.identity() == oldid:
1828 if self._repo.dirstate.identity() == oldid:
1829 if fixup:
1829 if fixup:
1830 normal = self._repo.dirstate.normal
1830 normal = self._repo.dirstate.normal
1831 for f in fixup:
1831 for f in fixup:
1832 normal(f)
1832 normal(f)
1833 # write changes out explicitly, because nesting
1833 # write changes out explicitly, because nesting
1834 # wlock at runtime may prevent 'wlock.release()'
1834 # wlock at runtime may prevent 'wlock.release()'
1835 # after this block from doing so for subsequent
1835 # after this block from doing so for subsequent
1836 # changing files
1836 # changing files
1837 tr = self._repo.currenttransaction()
1837 tr = self._repo.currenttransaction()
1838 self._repo.dirstate.write(tr)
1838 self._repo.dirstate.write(tr)
1839
1839
1840 if poststatus:
1840 if poststatus:
1841 for ps in poststatus:
1841 for ps in poststatus:
1842 ps(self, status)
1842 ps(self, status)
1843 else:
1843 else:
1844 # in this case, writing changes out breaks
1844 # in this case, writing changes out breaks
1845 # consistency, because .hg/dirstate was
1845 # consistency, because .hg/dirstate was
1846 # already changed simultaneously after last
1846 # already changed simultaneously after last
1847 # caching (see also issue5584 for detail)
1847 # caching (see also issue5584 for detail)
1848 self._repo.ui.debug(
1848 self._repo.ui.debug(
1849 b'skip updating dirstate: identity mismatch\n'
1849 b'skip updating dirstate: identity mismatch\n'
1850 )
1850 )
1851 except error.LockError:
1851 except error.LockError:
1852 pass
1852 pass
1853 finally:
1853 finally:
1854 # Even if the wlock couldn't be grabbed, clear out the list.
1854 # Even if the wlock couldn't be grabbed, clear out the list.
1855 self._repo.clearpostdsstatus()
1855 self._repo.clearpostdsstatus()
1856
1856
1857 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1857 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1858 '''Gets the status from the dirstate -- internal use only.'''
1858 '''Gets the status from the dirstate -- internal use only.'''
1859 subrepos = []
1859 subrepos = []
1860 if b'.hgsub' in self:
1860 if b'.hgsub' in self:
1861 subrepos = sorted(self.substate)
1861 subrepos = sorted(self.substate)
1862 cmp, s = self._repo.dirstate.status(
1862 cmp, s = self._repo.dirstate.status(
1863 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1863 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1864 )
1864 )
1865
1865
1866 # check for any possibly clean files
1866 # check for any possibly clean files
1867 fixup = []
1867 fixup = []
1868 if cmp:
1868 if cmp:
1869 modified2, deleted2, fixup = self._checklookup(cmp)
1869 modified2, deleted2, fixup = self._checklookup(cmp)
1870 s.modified.extend(modified2)
1870 s.modified.extend(modified2)
1871 s.deleted.extend(deleted2)
1871 s.deleted.extend(deleted2)
1872
1872
1873 if fixup and clean:
1873 if fixup and clean:
1874 s.clean.extend(fixup)
1874 s.clean.extend(fixup)
1875
1875
1876 self._poststatusfixup(s, fixup)
1876 self._poststatusfixup(s, fixup)
1877
1877
1878 if match.always():
1878 if match.always():
1879 # cache for performance
1879 # cache for performance
1880 if s.unknown or s.ignored or s.clean:
1880 if s.unknown or s.ignored or s.clean:
1881 # "_status" is cached with list*=False in the normal route
1881 # "_status" is cached with list*=False in the normal route
1882 self._status = scmutil.status(
1882 self._status = scmutil.status(
1883 s.modified, s.added, s.removed, s.deleted, [], [], []
1883 s.modified, s.added, s.removed, s.deleted, [], [], []
1884 )
1884 )
1885 else:
1885 else:
1886 self._status = s
1886 self._status = s
1887
1887
1888 return s
1888 return s
1889
1889
1890 @propertycache
1890 @propertycache
1891 def _copies(self):
1891 def _copies(self):
1892 p1copies = {}
1892 p1copies = {}
1893 p2copies = {}
1893 p2copies = {}
1894 parents = self._repo.dirstate.parents()
1894 parents = self._repo.dirstate.parents()
1895 p1manifest = self._repo[parents[0]].manifest()
1895 p1manifest = self._repo[parents[0]].manifest()
1896 p2manifest = self._repo[parents[1]].manifest()
1896 p2manifest = self._repo[parents[1]].manifest()
1897 changedset = set(self.added()) | set(self.modified())
1897 changedset = set(self.added()) | set(self.modified())
1898 narrowmatch = self._repo.narrowmatch()
1898 narrowmatch = self._repo.narrowmatch()
1899 for dst, src in self._repo.dirstate.copies().items():
1899 for dst, src in self._repo.dirstate.copies().items():
1900 if dst not in changedset or not narrowmatch(dst):
1900 if dst not in changedset or not narrowmatch(dst):
1901 continue
1901 continue
1902 if src in p1manifest:
1902 if src in p1manifest:
1903 p1copies[dst] = src
1903 p1copies[dst] = src
1904 elif src in p2manifest:
1904 elif src in p2manifest:
1905 p2copies[dst] = src
1905 p2copies[dst] = src
1906 return p1copies, p2copies
1906 return p1copies, p2copies
1907
1907
1908 @propertycache
1908 @propertycache
1909 def _manifest(self):
1909 def _manifest(self):
1910 """generate a manifest corresponding to the values in self._status
1910 """generate a manifest corresponding to the values in self._status
1911
1911
1912 This reuse the file nodeid from parent, but we use special node
1912 This reuse the file nodeid from parent, but we use special node
1913 identifiers for added and modified files. This is used by manifests
1913 identifiers for added and modified files. This is used by manifests
1914 merge to see that files are different and by update logic to avoid
1914 merge to see that files are different and by update logic to avoid
1915 deleting newly added files.
1915 deleting newly added files.
1916 """
1916 """
1917 return self._buildstatusmanifest(self._status)
1917 return self._buildstatusmanifest(self._status)
1918
1918
1919 def _buildstatusmanifest(self, status):
1919 def _buildstatusmanifest(self, status):
1920 """Builds a manifest that includes the given status results."""
1920 """Builds a manifest that includes the given status results."""
1921 parents = self.parents()
1921 parents = self.parents()
1922
1922
1923 man = parents[0].manifest().copy()
1923 man = parents[0].manifest().copy()
1924
1924
1925 ff = self._flagfunc
1925 ff = self._flagfunc
1926 for i, l in (
1926 for i, l in (
1927 (addednodeid, status.added),
1927 (addednodeid, status.added),
1928 (modifiednodeid, status.modified),
1928 (modifiednodeid, status.modified),
1929 ):
1929 ):
1930 for f in l:
1930 for f in l:
1931 man[f] = i
1931 man[f] = i
1932 try:
1932 try:
1933 man.setflag(f, ff(f))
1933 man.setflag(f, ff(f))
1934 except OSError:
1934 except OSError:
1935 pass
1935 pass
1936
1936
1937 for f in status.deleted + status.removed:
1937 for f in status.deleted + status.removed:
1938 if f in man:
1938 if f in man:
1939 del man[f]
1939 del man[f]
1940
1940
1941 return man
1941 return man
1942
1942
1943 def _buildstatus(
1943 def _buildstatus(
1944 self, other, s, match, listignored, listclean, listunknown
1944 self, other, s, match, listignored, listclean, listunknown
1945 ):
1945 ):
1946 """build a status with respect to another context
1946 """build a status with respect to another context
1947
1947
1948 This includes logic for maintaining the fast path of status when
1948 This includes logic for maintaining the fast path of status when
1949 comparing the working directory against its parent, which is to skip
1949 comparing the working directory against its parent, which is to skip
1950 building a new manifest if self (working directory) is not comparing
1950 building a new manifest if self (working directory) is not comparing
1951 against its parent (repo['.']).
1951 against its parent (repo['.']).
1952 """
1952 """
1953 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1953 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1954 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1954 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1955 # might have accidentally ended up with the entire contents of the file
1955 # might have accidentally ended up with the entire contents of the file
1956 # they are supposed to be linking to.
1956 # they are supposed to be linking to.
1957 s.modified[:] = self._filtersuspectsymlink(s.modified)
1957 s.modified[:] = self._filtersuspectsymlink(s.modified)
1958 if other != self._repo[b'.']:
1958 if other != self._repo[b'.']:
1959 s = super(workingctx, self)._buildstatus(
1959 s = super(workingctx, self)._buildstatus(
1960 other, s, match, listignored, listclean, listunknown
1960 other, s, match, listignored, listclean, listunknown
1961 )
1961 )
1962 return s
1962 return s
1963
1963
1964 def _matchstatus(self, other, match):
1964 def _matchstatus(self, other, match):
1965 """override the match method with a filter for directory patterns
1965 """override the match method with a filter for directory patterns
1966
1966
1967 We use inheritance to customize the match.bad method only in cases of
1967 We use inheritance to customize the match.bad method only in cases of
1968 workingctx since it belongs only to the working directory when
1968 workingctx since it belongs only to the working directory when
1969 comparing against the parent changeset.
1969 comparing against the parent changeset.
1970
1970
1971 If we aren't comparing against the working directory's parent, then we
1971 If we aren't comparing against the working directory's parent, then we
1972 just use the default match object sent to us.
1972 just use the default match object sent to us.
1973 """
1973 """
1974 if other != self._repo[b'.']:
1974 if other != self._repo[b'.']:
1975
1975
1976 def bad(f, msg):
1976 def bad(f, msg):
1977 # 'f' may be a directory pattern from 'match.files()',
1977 # 'f' may be a directory pattern from 'match.files()',
1978 # so 'f not in ctx1' is not enough
1978 # so 'f not in ctx1' is not enough
1979 if f not in other and not other.hasdir(f):
1979 if f not in other and not other.hasdir(f):
1980 self._repo.ui.warn(
1980 self._repo.ui.warn(
1981 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1981 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1982 )
1982 )
1983
1983
1984 match.bad = bad
1984 match.bad = bad
1985 return match
1985 return match
1986
1986
1987 def walk(self, match):
1987 def walk(self, match):
1988 '''Generates matching file names.'''
1988 '''Generates matching file names.'''
1989 return sorted(
1989 return sorted(
1990 self._repo.dirstate.walk(
1990 self._repo.dirstate.walk(
1991 self._repo.narrowmatch(match),
1991 self._repo.narrowmatch(match),
1992 subrepos=sorted(self.substate),
1992 subrepos=sorted(self.substate),
1993 unknown=True,
1993 unknown=True,
1994 ignored=False,
1994 ignored=False,
1995 )
1995 )
1996 )
1996 )
1997
1997
1998 def matches(self, match):
1998 def matches(self, match):
1999 match = self._repo.narrowmatch(match)
1999 match = self._repo.narrowmatch(match)
2000 ds = self._repo.dirstate
2000 ds = self._repo.dirstate
2001 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2001 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
2002
2002
2003 def markcommitted(self, node):
2003 def markcommitted(self, node):
2004 with self._repo.dirstate.parentchange():
2004 with self._repo.dirstate.parentchange():
2005 for f in self.modified() + self.added():
2005 for f in self.modified() + self.added():
2006 self._repo.dirstate.normal(f)
2006 self._repo.dirstate.normal(f)
2007 for f in self.removed():
2007 for f in self.removed():
2008 self._repo.dirstate.drop(f)
2008 self._repo.dirstate.drop(f)
2009 self._repo.dirstate.setparents(node)
2009 self._repo.dirstate.setparents(node)
2010 self._repo._quick_access_changeid_invalidate()
2010 self._repo._quick_access_changeid_invalidate()
2011
2011
2012 # write changes out explicitly, because nesting wlock at
2012 # write changes out explicitly, because nesting wlock at
2013 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2013 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2014 # from immediately doing so for subsequent changing files
2014 # from immediately doing so for subsequent changing files
2015 self._repo.dirstate.write(self._repo.currenttransaction())
2015 self._repo.dirstate.write(self._repo.currenttransaction())
2016
2016
2017 sparse.aftercommit(self._repo, node)
2017 sparse.aftercommit(self._repo, node)
2018
2018
2019 def mergestate(self, clean=False):
2019 def mergestate(self, clean=False):
2020 if clean:
2020 if clean:
2021 return mergestatemod.mergestate.clean(self._repo)
2021 return mergestatemod.mergestate.clean(self._repo)
2022 return mergestatemod.mergestate.read(self._repo)
2022 return mergestatemod.mergestate.read(self._repo)
2023
2023
2024
2024
2025 class committablefilectx(basefilectx):
2025 class committablefilectx(basefilectx):
2026 """A committablefilectx provides common functionality for a file context
2026 """A committablefilectx provides common functionality for a file context
2027 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2027 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2028
2028
2029 def __init__(self, repo, path, filelog=None, ctx=None):
2029 def __init__(self, repo, path, filelog=None, ctx=None):
2030 self._repo = repo
2030 self._repo = repo
2031 self._path = path
2031 self._path = path
2032 self._changeid = None
2032 self._changeid = None
2033 self._filerev = self._filenode = None
2033 self._filerev = self._filenode = None
2034
2034
2035 if filelog is not None:
2035 if filelog is not None:
2036 self._filelog = filelog
2036 self._filelog = filelog
2037 if ctx:
2037 if ctx:
2038 self._changectx = ctx
2038 self._changectx = ctx
2039
2039
2040 def __nonzero__(self):
2040 def __nonzero__(self):
2041 return True
2041 return True
2042
2042
2043 __bool__ = __nonzero__
2043 __bool__ = __nonzero__
2044
2044
2045 def linkrev(self):
2045 def linkrev(self):
2046 # linked to self._changectx no matter if file is modified or not
2046 # linked to self._changectx no matter if file is modified or not
2047 return self.rev()
2047 return self.rev()
2048
2048
2049 def renamed(self):
2049 def renamed(self):
2050 path = self.copysource()
2050 path = self.copysource()
2051 if not path:
2051 if not path:
2052 return None
2052 return None
2053 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2053 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2054
2054
2055 def parents(self):
2055 def parents(self):
2056 '''return parent filectxs, following copies if necessary'''
2056 '''return parent filectxs, following copies if necessary'''
2057
2057
2058 def filenode(ctx, path):
2058 def filenode(ctx, path):
2059 return ctx._manifest.get(path, nullid)
2059 return ctx._manifest.get(path, nullid)
2060
2060
2061 path = self._path
2061 path = self._path
2062 fl = self._filelog
2062 fl = self._filelog
2063 pcl = self._changectx._parents
2063 pcl = self._changectx._parents
2064 renamed = self.renamed()
2064 renamed = self.renamed()
2065
2065
2066 if renamed:
2066 if renamed:
2067 pl = [renamed + (None,)]
2067 pl = [renamed + (None,)]
2068 else:
2068 else:
2069 pl = [(path, filenode(pcl[0], path), fl)]
2069 pl = [(path, filenode(pcl[0], path), fl)]
2070
2070
2071 for pc in pcl[1:]:
2071 for pc in pcl[1:]:
2072 pl.append((path, filenode(pc, path), fl))
2072 pl.append((path, filenode(pc, path), fl))
2073
2073
2074 return [
2074 return [
2075 self._parentfilectx(p, fileid=n, filelog=l)
2075 self._parentfilectx(p, fileid=n, filelog=l)
2076 for p, n, l in pl
2076 for p, n, l in pl
2077 if n != nullid
2077 if n != nullid
2078 ]
2078 ]
2079
2079
2080 def children(self):
2080 def children(self):
2081 return []
2081 return []
2082
2082
2083
2083
2084 class workingfilectx(committablefilectx):
2084 class workingfilectx(committablefilectx):
2085 """A workingfilectx object makes access to data related to a particular
2085 """A workingfilectx object makes access to data related to a particular
2086 file in the working directory convenient."""
2086 file in the working directory convenient."""
2087
2087
2088 def __init__(self, repo, path, filelog=None, workingctx=None):
2088 def __init__(self, repo, path, filelog=None, workingctx=None):
2089 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2089 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2090
2090
2091 @propertycache
2091 @propertycache
2092 def _changectx(self):
2092 def _changectx(self):
2093 return workingctx(self._repo)
2093 return workingctx(self._repo)
2094
2094
2095 def data(self):
2095 def data(self):
2096 return self._repo.wread(self._path)
2096 return self._repo.wread(self._path)
2097
2097
2098 def copysource(self):
2098 def copysource(self):
2099 return self._repo.dirstate.copied(self._path)
2099 return self._repo.dirstate.copied(self._path)
2100
2100
2101 def size(self):
2101 def size(self):
2102 return self._repo.wvfs.lstat(self._path).st_size
2102 return self._repo.wvfs.lstat(self._path).st_size
2103
2103
2104 def lstat(self):
2104 def lstat(self):
2105 return self._repo.wvfs.lstat(self._path)
2105 return self._repo.wvfs.lstat(self._path)
2106
2106
2107 def date(self):
2107 def date(self):
2108 t, tz = self._changectx.date()
2108 t, tz = self._changectx.date()
2109 try:
2109 try:
2110 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2110 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2111 except OSError as err:
2111 except OSError as err:
2112 if err.errno != errno.ENOENT:
2112 if err.errno != errno.ENOENT:
2113 raise
2113 raise
2114 return (t, tz)
2114 return (t, tz)
2115
2115
2116 def exists(self):
2116 def exists(self):
2117 return self._repo.wvfs.exists(self._path)
2117 return self._repo.wvfs.exists(self._path)
2118
2118
2119 def lexists(self):
2119 def lexists(self):
2120 return self._repo.wvfs.lexists(self._path)
2120 return self._repo.wvfs.lexists(self._path)
2121
2121
2122 def audit(self):
2122 def audit(self):
2123 return self._repo.wvfs.audit(self._path)
2123 return self._repo.wvfs.audit(self._path)
2124
2124
2125 def cmp(self, fctx):
2125 def cmp(self, fctx):
2126 """compare with other file context
2126 """compare with other file context
2127
2127
2128 returns True if different than fctx.
2128 returns True if different than fctx.
2129 """
2129 """
2130 # fctx should be a filectx (not a workingfilectx)
2130 # fctx should be a filectx (not a workingfilectx)
2131 # invert comparison to reuse the same code path
2131 # invert comparison to reuse the same code path
2132 return fctx.cmp(self)
2132 return fctx.cmp(self)
2133
2133
2134 def remove(self, ignoremissing=False):
2134 def remove(self, ignoremissing=False):
2135 """wraps unlink for a repo's working directory"""
2135 """wraps unlink for a repo's working directory"""
2136 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2136 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2137 self._repo.wvfs.unlinkpath(
2137 self._repo.wvfs.unlinkpath(
2138 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2138 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2139 )
2139 )
2140
2140
2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2141 def write(self, data, flags, backgroundclose=False, **kwargs):
2142 """wraps repo.wwrite"""
2142 """wraps repo.wwrite"""
2143 return self._repo.wwrite(
2143 return self._repo.wwrite(
2144 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2144 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2145 )
2145 )
2146
2146
2147 def markcopied(self, src):
2147 def markcopied(self, src):
2148 """marks this file a copy of `src`"""
2148 """marks this file a copy of `src`"""
2149 self._repo.dirstate.copy(src, self._path)
2149 self._repo.dirstate.copy(src, self._path)
2150
2150
2151 def clearunknown(self):
2151 def clearunknown(self):
2152 """Removes conflicting items in the working directory so that
2152 """Removes conflicting items in the working directory so that
2153 ``write()`` can be called successfully.
2153 ``write()`` can be called successfully.
2154 """
2154 """
2155 wvfs = self._repo.wvfs
2155 wvfs = self._repo.wvfs
2156 f = self._path
2156 f = self._path
2157 wvfs.audit(f)
2157 wvfs.audit(f)
2158 if self._repo.ui.configbool(
2158 if self._repo.ui.configbool(
2159 b'experimental', b'merge.checkpathconflicts'
2159 b'experimental', b'merge.checkpathconflicts'
2160 ):
2160 ):
2161 # remove files under the directory as they should already be
2161 # remove files under the directory as they should already be
2162 # warned and backed up
2162 # warned and backed up
2163 if wvfs.isdir(f) and not wvfs.islink(f):
2163 if wvfs.isdir(f) and not wvfs.islink(f):
2164 wvfs.rmtree(f, forcibly=True)
2164 wvfs.rmtree(f, forcibly=True)
2165 for p in reversed(list(pathutil.finddirs(f))):
2165 for p in reversed(list(pathutil.finddirs(f))):
2166 if wvfs.isfileorlink(p):
2166 if wvfs.isfileorlink(p):
2167 wvfs.unlink(p)
2167 wvfs.unlink(p)
2168 break
2168 break
2169 else:
2169 else:
2170 # don't remove files if path conflicts are not processed
2170 # don't remove files if path conflicts are not processed
2171 if wvfs.isdir(f) and not wvfs.islink(f):
2171 if wvfs.isdir(f) and not wvfs.islink(f):
2172 wvfs.removedirs(f)
2172 wvfs.removedirs(f)
2173
2173
2174 def setflags(self, l, x):
2174 def setflags(self, l, x):
2175 self._repo.wvfs.setflags(self._path, l, x)
2175 self._repo.wvfs.setflags(self._path, l, x)
2176
2176
2177
2177
2178 class overlayworkingctx(committablectx):
2178 class overlayworkingctx(committablectx):
2179 """Wraps another mutable context with a write-back cache that can be
2179 """Wraps another mutable context with a write-back cache that can be
2180 converted into a commit context.
2180 converted into a commit context.
2181
2181
2182 self._cache[path] maps to a dict with keys: {
2182 self._cache[path] maps to a dict with keys: {
2183 'exists': bool?
2183 'exists': bool?
2184 'date': date?
2184 'date': date?
2185 'data': str?
2185 'data': str?
2186 'flags': str?
2186 'flags': str?
2187 'copied': str? (path or None)
2187 'copied': str? (path or None)
2188 }
2188 }
2189 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2189 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2190 is `False`, the file was deleted.
2190 is `False`, the file was deleted.
2191 """
2191 """
2192
2192
2193 def __init__(self, repo):
2193 def __init__(self, repo):
2194 super(overlayworkingctx, self).__init__(repo)
2194 super(overlayworkingctx, self).__init__(repo)
2195 self.clean()
2195 self.clean()
2196
2196
2197 def setbase(self, wrappedctx):
2197 def setbase(self, wrappedctx):
2198 self._wrappedctx = wrappedctx
2198 self._wrappedctx = wrappedctx
2199 self._parents = [wrappedctx]
2199 self._parents = [wrappedctx]
2200 # Drop old manifest cache as it is now out of date.
2200 # Drop old manifest cache as it is now out of date.
2201 # This is necessary when, e.g., rebasing several nodes with one
2201 # This is necessary when, e.g., rebasing several nodes with one
2202 # ``overlayworkingctx`` (e.g. with --collapse).
2202 # ``overlayworkingctx`` (e.g. with --collapse).
2203 util.clearcachedproperty(self, b'_manifest')
2203 util.clearcachedproperty(self, b'_manifest')
2204
2204
2205 def setparents(self, p1node, p2node=nullid):
2205 def setparents(self, p1node, p2node=nullid):
2206 assert p1node == self._wrappedctx.node()
2206 assert p1node == self._wrappedctx.node()
2207 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2207 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2208
2208
2209 def data(self, path):
2209 def data(self, path):
2210 if self.isdirty(path):
2210 if self.isdirty(path):
2211 if self._cache[path][b'exists']:
2211 if self._cache[path][b'exists']:
2212 if self._cache[path][b'data'] is not None:
2212 if self._cache[path][b'data'] is not None:
2213 return self._cache[path][b'data']
2213 return self._cache[path][b'data']
2214 else:
2214 else:
2215 # Must fallback here, too, because we only set flags.
2215 # Must fallback here, too, because we only set flags.
2216 return self._wrappedctx[path].data()
2216 return self._wrappedctx[path].data()
2217 else:
2217 else:
2218 raise error.ProgrammingError(
2218 raise error.ProgrammingError(
2219 b"No such file or directory: %s" % path
2219 b"No such file or directory: %s" % path
2220 )
2220 )
2221 else:
2221 else:
2222 return self._wrappedctx[path].data()
2222 return self._wrappedctx[path].data()
2223
2223
2224 @propertycache
2224 @propertycache
2225 def _manifest(self):
2225 def _manifest(self):
2226 parents = self.parents()
2226 parents = self.parents()
2227 man = parents[0].manifest().copy()
2227 man = parents[0].manifest().copy()
2228
2228
2229 flag = self._flagfunc
2229 flag = self._flagfunc
2230 for path in self.added():
2230 for path in self.added():
2231 man[path] = addednodeid
2231 man[path] = addednodeid
2232 man.setflag(path, flag(path))
2232 man.setflag(path, flag(path))
2233 for path in self.modified():
2233 for path in self.modified():
2234 man[path] = modifiednodeid
2234 man[path] = modifiednodeid
2235 man.setflag(path, flag(path))
2235 man.setflag(path, flag(path))
2236 for path in self.removed():
2236 for path in self.removed():
2237 del man[path]
2237 del man[path]
2238 return man
2238 return man
2239
2239
2240 @propertycache
2240 @propertycache
2241 def _flagfunc(self):
2241 def _flagfunc(self):
2242 def f(path):
2242 def f(path):
2243 return self._cache[path][b'flags']
2243 return self._cache[path][b'flags']
2244
2244
2245 return f
2245 return f
2246
2246
2247 def files(self):
2247 def files(self):
2248 return sorted(self.added() + self.modified() + self.removed())
2248 return sorted(self.added() + self.modified() + self.removed())
2249
2249
2250 def modified(self):
2250 def modified(self):
2251 return [
2251 return [
2252 f
2252 f
2253 for f in self._cache.keys()
2253 for f in self._cache.keys()
2254 if self._cache[f][b'exists'] and self._existsinparent(f)
2254 if self._cache[f][b'exists'] and self._existsinparent(f)
2255 ]
2255 ]
2256
2256
2257 def added(self):
2257 def added(self):
2258 return [
2258 return [
2259 f
2259 f
2260 for f in self._cache.keys()
2260 for f in self._cache.keys()
2261 if self._cache[f][b'exists'] and not self._existsinparent(f)
2261 if self._cache[f][b'exists'] and not self._existsinparent(f)
2262 ]
2262 ]
2263
2263
2264 def removed(self):
2264 def removed(self):
2265 return [
2265 return [
2266 f
2266 f
2267 for f in self._cache.keys()
2267 for f in self._cache.keys()
2268 if not self._cache[f][b'exists'] and self._existsinparent(f)
2268 if not self._cache[f][b'exists'] and self._existsinparent(f)
2269 ]
2269 ]
2270
2270
2271 def p1copies(self):
2271 def p1copies(self):
2272 copies = {}
2272 copies = {}
2273 narrowmatch = self._repo.narrowmatch()
2273 narrowmatch = self._repo.narrowmatch()
2274 for f in self._cache.keys():
2274 for f in self._cache.keys():
2275 if not narrowmatch(f):
2275 if not narrowmatch(f):
2276 continue
2276 continue
2277 copies.pop(f, None) # delete if it exists
2277 copies.pop(f, None) # delete if it exists
2278 source = self._cache[f][b'copied']
2278 source = self._cache[f][b'copied']
2279 if source:
2279 if source:
2280 copies[f] = source
2280 copies[f] = source
2281 return copies
2281 return copies
2282
2282
2283 def p2copies(self):
2283 def p2copies(self):
2284 copies = {}
2284 copies = {}
2285 narrowmatch = self._repo.narrowmatch()
2285 narrowmatch = self._repo.narrowmatch()
2286 for f in self._cache.keys():
2286 for f in self._cache.keys():
2287 if not narrowmatch(f):
2287 if not narrowmatch(f):
2288 continue
2288 continue
2289 copies.pop(f, None) # delete if it exists
2289 copies.pop(f, None) # delete if it exists
2290 source = self._cache[f][b'copied']
2290 source = self._cache[f][b'copied']
2291 if source:
2291 if source:
2292 copies[f] = source
2292 copies[f] = source
2293 return copies
2293 return copies
2294
2294
2295 def isinmemory(self):
2295 def isinmemory(self):
2296 return True
2296 return True
2297
2297
2298 def filedate(self, path):
2298 def filedate(self, path):
2299 if self.isdirty(path):
2299 if self.isdirty(path):
2300 return self._cache[path][b'date']
2300 return self._cache[path][b'date']
2301 else:
2301 else:
2302 return self._wrappedctx[path].date()
2302 return self._wrappedctx[path].date()
2303
2303
2304 def markcopied(self, path, origin):
2304 def markcopied(self, path, origin):
2305 self._markdirty(
2305 self._markdirty(
2306 path,
2306 path,
2307 exists=True,
2307 exists=True,
2308 date=self.filedate(path),
2308 date=self.filedate(path),
2309 flags=self.flags(path),
2309 flags=self.flags(path),
2310 copied=origin,
2310 copied=origin,
2311 )
2311 )
2312
2312
2313 def copydata(self, path):
2313 def copydata(self, path):
2314 if self.isdirty(path):
2314 if self.isdirty(path):
2315 return self._cache[path][b'copied']
2315 return self._cache[path][b'copied']
2316 else:
2316 else:
2317 return None
2317 return None
2318
2318
2319 def flags(self, path):
2319 def flags(self, path):
2320 if self.isdirty(path):
2320 if self.isdirty(path):
2321 if self._cache[path][b'exists']:
2321 if self._cache[path][b'exists']:
2322 return self._cache[path][b'flags']
2322 return self._cache[path][b'flags']
2323 else:
2323 else:
2324 raise error.ProgrammingError(
2324 raise error.ProgrammingError(
2325 b"No such file or directory: %s" % self._path
2325 b"No such file or directory: %s" % path
2326 )
2326 )
2327 else:
2327 else:
2328 return self._wrappedctx[path].flags()
2328 return self._wrappedctx[path].flags()
2329
2329
2330 def __contains__(self, key):
2330 def __contains__(self, key):
2331 if key in self._cache:
2331 if key in self._cache:
2332 return self._cache[key][b'exists']
2332 return self._cache[key][b'exists']
2333 return key in self.p1()
2333 return key in self.p1()
2334
2334
2335 def _existsinparent(self, path):
2335 def _existsinparent(self, path):
2336 try:
2336 try:
2337 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2337 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2338 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2338 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2339 # with an ``exists()`` function.
2339 # with an ``exists()`` function.
2340 self._wrappedctx[path]
2340 self._wrappedctx[path]
2341 return True
2341 return True
2342 except error.ManifestLookupError:
2342 except error.ManifestLookupError:
2343 return False
2343 return False
2344
2344
2345 def _auditconflicts(self, path):
2345 def _auditconflicts(self, path):
2346 """Replicates conflict checks done by wvfs.write().
2346 """Replicates conflict checks done by wvfs.write().
2347
2347
2348 Since we never write to the filesystem and never call `applyupdates` in
2348 Since we never write to the filesystem and never call `applyupdates` in
2349 IMM, we'll never check that a path is actually writable -- e.g., because
2349 IMM, we'll never check that a path is actually writable -- e.g., because
2350 it adds `a/foo`, but `a` is actually a file in the other commit.
2350 it adds `a/foo`, but `a` is actually a file in the other commit.
2351 """
2351 """
2352
2352
2353 def fail(path, component):
2353 def fail(path, component):
2354 # p1() is the base and we're receiving "writes" for p2()'s
2354 # p1() is the base and we're receiving "writes" for p2()'s
2355 # files.
2355 # files.
2356 if b'l' in self.p1()[component].flags():
2356 if b'l' in self.p1()[component].flags():
2357 raise error.Abort(
2357 raise error.Abort(
2358 b"error: %s conflicts with symlink %s "
2358 b"error: %s conflicts with symlink %s "
2359 b"in %d." % (path, component, self.p1().rev())
2359 b"in %d." % (path, component, self.p1().rev())
2360 )
2360 )
2361 else:
2361 else:
2362 raise error.Abort(
2362 raise error.Abort(
2363 b"error: '%s' conflicts with file '%s' in "
2363 b"error: '%s' conflicts with file '%s' in "
2364 b"%d." % (path, component, self.p1().rev())
2364 b"%d." % (path, component, self.p1().rev())
2365 )
2365 )
2366
2366
2367 # Test that each new directory to be created to write this path from p2
2367 # Test that each new directory to be created to write this path from p2
2368 # is not a file in p1.
2368 # is not a file in p1.
2369 components = path.split(b'/')
2369 components = path.split(b'/')
2370 for i in pycompat.xrange(len(components)):
2370 for i in pycompat.xrange(len(components)):
2371 component = b"/".join(components[0:i])
2371 component = b"/".join(components[0:i])
2372 if component in self:
2372 if component in self:
2373 fail(path, component)
2373 fail(path, component)
2374
2374
2375 # Test the other direction -- that this path from p2 isn't a directory
2375 # Test the other direction -- that this path from p2 isn't a directory
2376 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2376 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2377 match = self.match([path], default=b'path')
2377 match = self.match([path], default=b'path')
2378 mfiles = list(self.p1().manifest().walk(match))
2378 mfiles = list(self.p1().manifest().walk(match))
2379 if len(mfiles) > 0:
2379 if len(mfiles) > 0:
2380 if len(mfiles) == 1 and mfiles[0] == path:
2380 if len(mfiles) == 1 and mfiles[0] == path:
2381 return
2381 return
2382 # omit the files which are deleted in current IMM wctx
2382 # omit the files which are deleted in current IMM wctx
2383 mfiles = [m for m in mfiles if m in self]
2383 mfiles = [m for m in mfiles if m in self]
2384 if not mfiles:
2384 if not mfiles:
2385 return
2385 return
2386 raise error.Abort(
2386 raise error.Abort(
2387 b"error: file '%s' cannot be written because "
2387 b"error: file '%s' cannot be written because "
2388 b" '%s/' is a directory in %s (containing %d "
2388 b" '%s/' is a directory in %s (containing %d "
2389 b"entries: %s)"
2389 b"entries: %s)"
2390 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2390 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2391 )
2391 )
2392
2392
2393 def write(self, path, data, flags=b'', **kwargs):
2393 def write(self, path, data, flags=b'', **kwargs):
2394 if data is None:
2394 if data is None:
2395 raise error.ProgrammingError(b"data must be non-None")
2395 raise error.ProgrammingError(b"data must be non-None")
2396 self._auditconflicts(path)
2396 self._auditconflicts(path)
2397 self._markdirty(
2397 self._markdirty(
2398 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2398 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2399 )
2399 )
2400
2400
2401 def setflags(self, path, l, x):
2401 def setflags(self, path, l, x):
2402 flag = b''
2402 flag = b''
2403 if l:
2403 if l:
2404 flag = b'l'
2404 flag = b'l'
2405 elif x:
2405 elif x:
2406 flag = b'x'
2406 flag = b'x'
2407 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2407 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2408
2408
2409 def remove(self, path):
2409 def remove(self, path):
2410 self._markdirty(path, exists=False)
2410 self._markdirty(path, exists=False)
2411
2411
2412 def exists(self, path):
2412 def exists(self, path):
2413 """exists behaves like `lexists`, but needs to follow symlinks and
2413 """exists behaves like `lexists`, but needs to follow symlinks and
2414 return False if they are broken.
2414 return False if they are broken.
2415 """
2415 """
2416 if self.isdirty(path):
2416 if self.isdirty(path):
2417 # If this path exists and is a symlink, "follow" it by calling
2417 # If this path exists and is a symlink, "follow" it by calling
2418 # exists on the destination path.
2418 # exists on the destination path.
2419 if (
2419 if (
2420 self._cache[path][b'exists']
2420 self._cache[path][b'exists']
2421 and b'l' in self._cache[path][b'flags']
2421 and b'l' in self._cache[path][b'flags']
2422 ):
2422 ):
2423 return self.exists(self._cache[path][b'data'].strip())
2423 return self.exists(self._cache[path][b'data'].strip())
2424 else:
2424 else:
2425 return self._cache[path][b'exists']
2425 return self._cache[path][b'exists']
2426
2426
2427 return self._existsinparent(path)
2427 return self._existsinparent(path)
2428
2428
2429 def lexists(self, path):
2429 def lexists(self, path):
2430 """lexists returns True if the path exists"""
2430 """lexists returns True if the path exists"""
2431 if self.isdirty(path):
2431 if self.isdirty(path):
2432 return self._cache[path][b'exists']
2432 return self._cache[path][b'exists']
2433
2433
2434 return self._existsinparent(path)
2434 return self._existsinparent(path)
2435
2435
2436 def size(self, path):
2436 def size(self, path):
2437 if self.isdirty(path):
2437 if self.isdirty(path):
2438 if self._cache[path][b'exists']:
2438 if self._cache[path][b'exists']:
2439 return len(self._cache[path][b'data'])
2439 return len(self._cache[path][b'data'])
2440 else:
2440 else:
2441 raise error.ProgrammingError(
2441 raise error.ProgrammingError(
2442 b"No such file or directory: %s" % self._path
2442 b"No such file or directory: %s" % path
2443 )
2443 )
2444 return self._wrappedctx[path].size()
2444 return self._wrappedctx[path].size()
2445
2445
2446 def tomemctx(
2446 def tomemctx(
2447 self,
2447 self,
2448 text,
2448 text,
2449 branch=None,
2449 branch=None,
2450 extra=None,
2450 extra=None,
2451 date=None,
2451 date=None,
2452 parents=None,
2452 parents=None,
2453 user=None,
2453 user=None,
2454 editor=None,
2454 editor=None,
2455 ):
2455 ):
2456 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2456 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2457 committed.
2457 committed.
2458
2458
2459 ``text`` is the commit message.
2459 ``text`` is the commit message.
2460 ``parents`` (optional) are rev numbers.
2460 ``parents`` (optional) are rev numbers.
2461 """
2461 """
2462 # Default parents to the wrapped context if not passed.
2462 # Default parents to the wrapped context if not passed.
2463 if parents is None:
2463 if parents is None:
2464 parents = self.parents()
2464 parents = self.parents()
2465 if len(parents) == 1:
2465 if len(parents) == 1:
2466 parents = (parents[0], None)
2466 parents = (parents[0], None)
2467
2467
2468 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2468 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2469 if parents[1] is None:
2469 if parents[1] is None:
2470 parents = (self._repo[parents[0]], None)
2470 parents = (self._repo[parents[0]], None)
2471 else:
2471 else:
2472 parents = (self._repo[parents[0]], self._repo[parents[1]])
2472 parents = (self._repo[parents[0]], self._repo[parents[1]])
2473
2473
2474 files = self.files()
2474 files = self.files()
2475
2475
2476 def getfile(repo, memctx, path):
2476 def getfile(repo, memctx, path):
2477 if self._cache[path][b'exists']:
2477 if self._cache[path][b'exists']:
2478 return memfilectx(
2478 return memfilectx(
2479 repo,
2479 repo,
2480 memctx,
2480 memctx,
2481 path,
2481 path,
2482 self._cache[path][b'data'],
2482 self._cache[path][b'data'],
2483 b'l' in self._cache[path][b'flags'],
2483 b'l' in self._cache[path][b'flags'],
2484 b'x' in self._cache[path][b'flags'],
2484 b'x' in self._cache[path][b'flags'],
2485 self._cache[path][b'copied'],
2485 self._cache[path][b'copied'],
2486 )
2486 )
2487 else:
2487 else:
2488 # Returning None, but including the path in `files`, is
2488 # Returning None, but including the path in `files`, is
2489 # necessary for memctx to register a deletion.
2489 # necessary for memctx to register a deletion.
2490 return None
2490 return None
2491
2491
2492 if branch is None:
2492 if branch is None:
2493 branch = self._wrappedctx.branch()
2493 branch = self._wrappedctx.branch()
2494
2494
2495 return memctx(
2495 return memctx(
2496 self._repo,
2496 self._repo,
2497 parents,
2497 parents,
2498 text,
2498 text,
2499 files,
2499 files,
2500 getfile,
2500 getfile,
2501 date=date,
2501 date=date,
2502 extra=extra,
2502 extra=extra,
2503 user=user,
2503 user=user,
2504 branch=branch,
2504 branch=branch,
2505 editor=editor,
2505 editor=editor,
2506 )
2506 )
2507
2507
2508 def tomemctx_for_amend(self, precursor):
2508 def tomemctx_for_amend(self, precursor):
2509 extra = precursor.extra().copy()
2509 extra = precursor.extra().copy()
2510 extra[b'amend_source'] = precursor.hex()
2510 extra[b'amend_source'] = precursor.hex()
2511 return self.tomemctx(
2511 return self.tomemctx(
2512 text=precursor.description(),
2512 text=precursor.description(),
2513 branch=precursor.branch(),
2513 branch=precursor.branch(),
2514 extra=extra,
2514 extra=extra,
2515 date=precursor.date(),
2515 date=precursor.date(),
2516 user=precursor.user(),
2516 user=precursor.user(),
2517 )
2517 )
2518
2518
2519 def isdirty(self, path):
2519 def isdirty(self, path):
2520 return path in self._cache
2520 return path in self._cache
2521
2521
2522 def isempty(self):
2522 def isempty(self):
2523 # We need to discard any keys that are actually clean before the empty
2523 # We need to discard any keys that are actually clean before the empty
2524 # commit check.
2524 # commit check.
2525 self._compact()
2525 self._compact()
2526 return len(self._cache) == 0
2526 return len(self._cache) == 0
2527
2527
2528 def clean(self):
2528 def clean(self):
2529 self._cache = {}
2529 self._cache = {}
2530
2530
2531 def _compact(self):
2531 def _compact(self):
2532 """Removes keys from the cache that are actually clean, by comparing
2532 """Removes keys from the cache that are actually clean, by comparing
2533 them with the underlying context.
2533 them with the underlying context.
2534
2534
2535 This can occur during the merge process, e.g. by passing --tool :local
2535 This can occur during the merge process, e.g. by passing --tool :local
2536 to resolve a conflict.
2536 to resolve a conflict.
2537 """
2537 """
2538 keys = []
2538 keys = []
2539 # This won't be perfect, but can help performance significantly when
2539 # This won't be perfect, but can help performance significantly when
2540 # using things like remotefilelog.
2540 # using things like remotefilelog.
2541 scmutil.prefetchfiles(
2541 scmutil.prefetchfiles(
2542 self.repo(),
2542 self.repo(),
2543 [self.p1().rev()],
2543 [self.p1().rev()],
2544 scmutil.matchfiles(self.repo(), self._cache.keys()),
2544 scmutil.matchfiles(self.repo(), self._cache.keys()),
2545 )
2545 )
2546
2546
2547 for path in self._cache.keys():
2547 for path in self._cache.keys():
2548 cache = self._cache[path]
2548 cache = self._cache[path]
2549 try:
2549 try:
2550 underlying = self._wrappedctx[path]
2550 underlying = self._wrappedctx[path]
2551 if (
2551 if (
2552 underlying.data() == cache[b'data']
2552 underlying.data() == cache[b'data']
2553 and underlying.flags() == cache[b'flags']
2553 and underlying.flags() == cache[b'flags']
2554 ):
2554 ):
2555 keys.append(path)
2555 keys.append(path)
2556 except error.ManifestLookupError:
2556 except error.ManifestLookupError:
2557 # Path not in the underlying manifest (created).
2557 # Path not in the underlying manifest (created).
2558 continue
2558 continue
2559
2559
2560 for path in keys:
2560 for path in keys:
2561 del self._cache[path]
2561 del self._cache[path]
2562 return keys
2562 return keys
2563
2563
2564 def _markdirty(
2564 def _markdirty(
2565 self, path, exists, data=None, date=None, flags=b'', copied=None
2565 self, path, exists, data=None, date=None, flags=b'', copied=None
2566 ):
2566 ):
2567 # data not provided, let's see if we already have some; if not, let's
2567 # data not provided, let's see if we already have some; if not, let's
2568 # grab it from our underlying context, so that we always have data if
2568 # grab it from our underlying context, so that we always have data if
2569 # the file is marked as existing.
2569 # the file is marked as existing.
2570 if exists and data is None:
2570 if exists and data is None:
2571 oldentry = self._cache.get(path) or {}
2571 oldentry = self._cache.get(path) or {}
2572 data = oldentry.get(b'data')
2572 data = oldentry.get(b'data')
2573 if data is None:
2573 if data is None:
2574 data = self._wrappedctx[path].data()
2574 data = self._wrappedctx[path].data()
2575
2575
2576 self._cache[path] = {
2576 self._cache[path] = {
2577 b'exists': exists,
2577 b'exists': exists,
2578 b'data': data,
2578 b'data': data,
2579 b'date': date,
2579 b'date': date,
2580 b'flags': flags,
2580 b'flags': flags,
2581 b'copied': copied,
2581 b'copied': copied,
2582 }
2582 }
2583
2583
2584 def filectx(self, path, filelog=None):
2584 def filectx(self, path, filelog=None):
2585 return overlayworkingfilectx(
2585 return overlayworkingfilectx(
2586 self._repo, path, parent=self, filelog=filelog
2586 self._repo, path, parent=self, filelog=filelog
2587 )
2587 )
2588
2588
2589
2589
2590 class overlayworkingfilectx(committablefilectx):
2590 class overlayworkingfilectx(committablefilectx):
2591 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2591 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2592 cache, which can be flushed through later by calling ``flush()``."""
2592 cache, which can be flushed through later by calling ``flush()``."""
2593
2593
2594 def __init__(self, repo, path, filelog=None, parent=None):
2594 def __init__(self, repo, path, filelog=None, parent=None):
2595 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2595 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2596 self._repo = repo
2596 self._repo = repo
2597 self._parent = parent
2597 self._parent = parent
2598 self._path = path
2598 self._path = path
2599
2599
2600 def cmp(self, fctx):
2600 def cmp(self, fctx):
2601 return self.data() != fctx.data()
2601 return self.data() != fctx.data()
2602
2602
2603 def changectx(self):
2603 def changectx(self):
2604 return self._parent
2604 return self._parent
2605
2605
2606 def data(self):
2606 def data(self):
2607 return self._parent.data(self._path)
2607 return self._parent.data(self._path)
2608
2608
2609 def date(self):
2609 def date(self):
2610 return self._parent.filedate(self._path)
2610 return self._parent.filedate(self._path)
2611
2611
2612 def exists(self):
2612 def exists(self):
2613 return self.lexists()
2613 return self.lexists()
2614
2614
2615 def lexists(self):
2615 def lexists(self):
2616 return self._parent.exists(self._path)
2616 return self._parent.exists(self._path)
2617
2617
2618 def copysource(self):
2618 def copysource(self):
2619 return self._parent.copydata(self._path)
2619 return self._parent.copydata(self._path)
2620
2620
2621 def size(self):
2621 def size(self):
2622 return self._parent.size(self._path)
2622 return self._parent.size(self._path)
2623
2623
2624 def markcopied(self, origin):
2624 def markcopied(self, origin):
2625 self._parent.markcopied(self._path, origin)
2625 self._parent.markcopied(self._path, origin)
2626
2626
2627 def audit(self):
2627 def audit(self):
2628 pass
2628 pass
2629
2629
2630 def flags(self):
2630 def flags(self):
2631 return self._parent.flags(self._path)
2631 return self._parent.flags(self._path)
2632
2632
2633 def setflags(self, islink, isexec):
2633 def setflags(self, islink, isexec):
2634 return self._parent.setflags(self._path, islink, isexec)
2634 return self._parent.setflags(self._path, islink, isexec)
2635
2635
2636 def write(self, data, flags, backgroundclose=False, **kwargs):
2636 def write(self, data, flags, backgroundclose=False, **kwargs):
2637 return self._parent.write(self._path, data, flags, **kwargs)
2637 return self._parent.write(self._path, data, flags, **kwargs)
2638
2638
2639 def remove(self, ignoremissing=False):
2639 def remove(self, ignoremissing=False):
2640 return self._parent.remove(self._path)
2640 return self._parent.remove(self._path)
2641
2641
2642 def clearunknown(self):
2642 def clearunknown(self):
2643 pass
2643 pass
2644
2644
2645
2645
2646 class workingcommitctx(workingctx):
2646 class workingcommitctx(workingctx):
2647 """A workingcommitctx object makes access to data related to
2647 """A workingcommitctx object makes access to data related to
2648 the revision being committed convenient.
2648 the revision being committed convenient.
2649
2649
2650 This hides changes in the working directory, if they aren't
2650 This hides changes in the working directory, if they aren't
2651 committed in this context.
2651 committed in this context.
2652 """
2652 """
2653
2653
2654 def __init__(
2654 def __init__(
2655 self, repo, changes, text=b"", user=None, date=None, extra=None
2655 self, repo, changes, text=b"", user=None, date=None, extra=None
2656 ):
2656 ):
2657 super(workingcommitctx, self).__init__(
2657 super(workingcommitctx, self).__init__(
2658 repo, text, user, date, extra, changes
2658 repo, text, user, date, extra, changes
2659 )
2659 )
2660
2660
2661 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2661 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2662 """Return matched files only in ``self._status``
2662 """Return matched files only in ``self._status``
2663
2663
2664 Uncommitted files appear "clean" via this context, even if
2664 Uncommitted files appear "clean" via this context, even if
2665 they aren't actually so in the working directory.
2665 they aren't actually so in the working directory.
2666 """
2666 """
2667 if clean:
2667 if clean:
2668 clean = [f for f in self._manifest if f not in self._changedset]
2668 clean = [f for f in self._manifest if f not in self._changedset]
2669 else:
2669 else:
2670 clean = []
2670 clean = []
2671 return scmutil.status(
2671 return scmutil.status(
2672 [f for f in self._status.modified if match(f)],
2672 [f for f in self._status.modified if match(f)],
2673 [f for f in self._status.added if match(f)],
2673 [f for f in self._status.added if match(f)],
2674 [f for f in self._status.removed if match(f)],
2674 [f for f in self._status.removed if match(f)],
2675 [],
2675 [],
2676 [],
2676 [],
2677 [],
2677 [],
2678 clean,
2678 clean,
2679 )
2679 )
2680
2680
2681 @propertycache
2681 @propertycache
2682 def _changedset(self):
2682 def _changedset(self):
2683 """Return the set of files changed in this context
2683 """Return the set of files changed in this context
2684 """
2684 """
2685 changed = set(self._status.modified)
2685 changed = set(self._status.modified)
2686 changed.update(self._status.added)
2686 changed.update(self._status.added)
2687 changed.update(self._status.removed)
2687 changed.update(self._status.removed)
2688 return changed
2688 return changed
2689
2689
2690
2690
2691 def makecachingfilectxfn(func):
2691 def makecachingfilectxfn(func):
2692 """Create a filectxfn that caches based on the path.
2692 """Create a filectxfn that caches based on the path.
2693
2693
2694 We can't use util.cachefunc because it uses all arguments as the cache
2694 We can't use util.cachefunc because it uses all arguments as the cache
2695 key and this creates a cycle since the arguments include the repo and
2695 key and this creates a cycle since the arguments include the repo and
2696 memctx.
2696 memctx.
2697 """
2697 """
2698 cache = {}
2698 cache = {}
2699
2699
2700 def getfilectx(repo, memctx, path):
2700 def getfilectx(repo, memctx, path):
2701 if path not in cache:
2701 if path not in cache:
2702 cache[path] = func(repo, memctx, path)
2702 cache[path] = func(repo, memctx, path)
2703 return cache[path]
2703 return cache[path]
2704
2704
2705 return getfilectx
2705 return getfilectx
2706
2706
2707
2707
2708 def memfilefromctx(ctx):
2708 def memfilefromctx(ctx):
2709 """Given a context return a memfilectx for ctx[path]
2709 """Given a context return a memfilectx for ctx[path]
2710
2710
2711 This is a convenience method for building a memctx based on another
2711 This is a convenience method for building a memctx based on another
2712 context.
2712 context.
2713 """
2713 """
2714
2714
2715 def getfilectx(repo, memctx, path):
2715 def getfilectx(repo, memctx, path):
2716 fctx = ctx[path]
2716 fctx = ctx[path]
2717 copysource = fctx.copysource()
2717 copysource = fctx.copysource()
2718 return memfilectx(
2718 return memfilectx(
2719 repo,
2719 repo,
2720 memctx,
2720 memctx,
2721 path,
2721 path,
2722 fctx.data(),
2722 fctx.data(),
2723 islink=fctx.islink(),
2723 islink=fctx.islink(),
2724 isexec=fctx.isexec(),
2724 isexec=fctx.isexec(),
2725 copysource=copysource,
2725 copysource=copysource,
2726 )
2726 )
2727
2727
2728 return getfilectx
2728 return getfilectx
2729
2729
2730
2730
2731 def memfilefrompatch(patchstore):
2731 def memfilefrompatch(patchstore):
2732 """Given a patch (e.g. patchstore object) return a memfilectx
2732 """Given a patch (e.g. patchstore object) return a memfilectx
2733
2733
2734 This is a convenience method for building a memctx based on a patchstore.
2734 This is a convenience method for building a memctx based on a patchstore.
2735 """
2735 """
2736
2736
2737 def getfilectx(repo, memctx, path):
2737 def getfilectx(repo, memctx, path):
2738 data, mode, copysource = patchstore.getfile(path)
2738 data, mode, copysource = patchstore.getfile(path)
2739 if data is None:
2739 if data is None:
2740 return None
2740 return None
2741 islink, isexec = mode
2741 islink, isexec = mode
2742 return memfilectx(
2742 return memfilectx(
2743 repo,
2743 repo,
2744 memctx,
2744 memctx,
2745 path,
2745 path,
2746 data,
2746 data,
2747 islink=islink,
2747 islink=islink,
2748 isexec=isexec,
2748 isexec=isexec,
2749 copysource=copysource,
2749 copysource=copysource,
2750 )
2750 )
2751
2751
2752 return getfilectx
2752 return getfilectx
2753
2753
2754
2754
2755 class memctx(committablectx):
2755 class memctx(committablectx):
2756 """Use memctx to perform in-memory commits via localrepo.commitctx().
2756 """Use memctx to perform in-memory commits via localrepo.commitctx().
2757
2757
2758 Revision information is supplied at initialization time while
2758 Revision information is supplied at initialization time while
2759 related files data and is made available through a callback
2759 related files data and is made available through a callback
2760 mechanism. 'repo' is the current localrepo, 'parents' is a
2760 mechanism. 'repo' is the current localrepo, 'parents' is a
2761 sequence of two parent revisions identifiers (pass None for every
2761 sequence of two parent revisions identifiers (pass None for every
2762 missing parent), 'text' is the commit message and 'files' lists
2762 missing parent), 'text' is the commit message and 'files' lists
2763 names of files touched by the revision (normalized and relative to
2763 names of files touched by the revision (normalized and relative to
2764 repository root).
2764 repository root).
2765
2765
2766 filectxfn(repo, memctx, path) is a callable receiving the
2766 filectxfn(repo, memctx, path) is a callable receiving the
2767 repository, the current memctx object and the normalized path of
2767 repository, the current memctx object and the normalized path of
2768 requested file, relative to repository root. It is fired by the
2768 requested file, relative to repository root. It is fired by the
2769 commit function for every file in 'files', but calls order is
2769 commit function for every file in 'files', but calls order is
2770 undefined. If the file is available in the revision being
2770 undefined. If the file is available in the revision being
2771 committed (updated or added), filectxfn returns a memfilectx
2771 committed (updated or added), filectxfn returns a memfilectx
2772 object. If the file was removed, filectxfn return None for recent
2772 object. If the file was removed, filectxfn return None for recent
2773 Mercurial. Moved files are represented by marking the source file
2773 Mercurial. Moved files are represented by marking the source file
2774 removed and the new file added with copy information (see
2774 removed and the new file added with copy information (see
2775 memfilectx).
2775 memfilectx).
2776
2776
2777 user receives the committer name and defaults to current
2777 user receives the committer name and defaults to current
2778 repository username, date is the commit date in any format
2778 repository username, date is the commit date in any format
2779 supported by dateutil.parsedate() and defaults to current date, extra
2779 supported by dateutil.parsedate() and defaults to current date, extra
2780 is a dictionary of metadata or is left empty.
2780 is a dictionary of metadata or is left empty.
2781 """
2781 """
2782
2782
2783 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2783 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2784 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2784 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2785 # this field to determine what to do in filectxfn.
2785 # this field to determine what to do in filectxfn.
2786 _returnnoneformissingfiles = True
2786 _returnnoneformissingfiles = True
2787
2787
2788 def __init__(
2788 def __init__(
2789 self,
2789 self,
2790 repo,
2790 repo,
2791 parents,
2791 parents,
2792 text,
2792 text,
2793 files,
2793 files,
2794 filectxfn,
2794 filectxfn,
2795 user=None,
2795 user=None,
2796 date=None,
2796 date=None,
2797 extra=None,
2797 extra=None,
2798 branch=None,
2798 branch=None,
2799 editor=None,
2799 editor=None,
2800 ):
2800 ):
2801 super(memctx, self).__init__(
2801 super(memctx, self).__init__(
2802 repo, text, user, date, extra, branch=branch
2802 repo, text, user, date, extra, branch=branch
2803 )
2803 )
2804 self._rev = None
2804 self._rev = None
2805 self._node = None
2805 self._node = None
2806 parents = [(p or nullid) for p in parents]
2806 parents = [(p or nullid) for p in parents]
2807 p1, p2 = parents
2807 p1, p2 = parents
2808 self._parents = [self._repo[p] for p in (p1, p2)]
2808 self._parents = [self._repo[p] for p in (p1, p2)]
2809 files = sorted(set(files))
2809 files = sorted(set(files))
2810 self._files = files
2810 self._files = files
2811 self.substate = {}
2811 self.substate = {}
2812
2812
2813 if isinstance(filectxfn, patch.filestore):
2813 if isinstance(filectxfn, patch.filestore):
2814 filectxfn = memfilefrompatch(filectxfn)
2814 filectxfn = memfilefrompatch(filectxfn)
2815 elif not callable(filectxfn):
2815 elif not callable(filectxfn):
2816 # if store is not callable, wrap it in a function
2816 # if store is not callable, wrap it in a function
2817 filectxfn = memfilefromctx(filectxfn)
2817 filectxfn = memfilefromctx(filectxfn)
2818
2818
2819 # memoizing increases performance for e.g. vcs convert scenarios.
2819 # memoizing increases performance for e.g. vcs convert scenarios.
2820 self._filectxfn = makecachingfilectxfn(filectxfn)
2820 self._filectxfn = makecachingfilectxfn(filectxfn)
2821
2821
2822 if editor:
2822 if editor:
2823 self._text = editor(self._repo, self, [])
2823 self._text = editor(self._repo, self, [])
2824 self._repo.savecommitmessage(self._text)
2824 self._repo.savecommitmessage(self._text)
2825
2825
2826 def filectx(self, path, filelog=None):
2826 def filectx(self, path, filelog=None):
2827 """get a file context from the working directory
2827 """get a file context from the working directory
2828
2828
2829 Returns None if file doesn't exist and should be removed."""
2829 Returns None if file doesn't exist and should be removed."""
2830 return self._filectxfn(self._repo, self, path)
2830 return self._filectxfn(self._repo, self, path)
2831
2831
2832 def commit(self):
2832 def commit(self):
2833 """commit context to the repo"""
2833 """commit context to the repo"""
2834 return self._repo.commitctx(self)
2834 return self._repo.commitctx(self)
2835
2835
2836 @propertycache
2836 @propertycache
2837 def _manifest(self):
2837 def _manifest(self):
2838 """generate a manifest based on the return values of filectxfn"""
2838 """generate a manifest based on the return values of filectxfn"""
2839
2839
2840 # keep this simple for now; just worry about p1
2840 # keep this simple for now; just worry about p1
2841 pctx = self._parents[0]
2841 pctx = self._parents[0]
2842 man = pctx.manifest().copy()
2842 man = pctx.manifest().copy()
2843
2843
2844 for f in self._status.modified:
2844 for f in self._status.modified:
2845 man[f] = modifiednodeid
2845 man[f] = modifiednodeid
2846
2846
2847 for f in self._status.added:
2847 for f in self._status.added:
2848 man[f] = addednodeid
2848 man[f] = addednodeid
2849
2849
2850 for f in self._status.removed:
2850 for f in self._status.removed:
2851 if f in man:
2851 if f in man:
2852 del man[f]
2852 del man[f]
2853
2853
2854 return man
2854 return man
2855
2855
2856 @propertycache
2856 @propertycache
2857 def _status(self):
2857 def _status(self):
2858 """Calculate exact status from ``files`` specified at construction
2858 """Calculate exact status from ``files`` specified at construction
2859 """
2859 """
2860 man1 = self.p1().manifest()
2860 man1 = self.p1().manifest()
2861 p2 = self._parents[1]
2861 p2 = self._parents[1]
2862 # "1 < len(self._parents)" can't be used for checking
2862 # "1 < len(self._parents)" can't be used for checking
2863 # existence of the 2nd parent, because "memctx._parents" is
2863 # existence of the 2nd parent, because "memctx._parents" is
2864 # explicitly initialized by the list, of which length is 2.
2864 # explicitly initialized by the list, of which length is 2.
2865 if p2.node() != nullid:
2865 if p2.node() != nullid:
2866 man2 = p2.manifest()
2866 man2 = p2.manifest()
2867 managing = lambda f: f in man1 or f in man2
2867 managing = lambda f: f in man1 or f in man2
2868 else:
2868 else:
2869 managing = lambda f: f in man1
2869 managing = lambda f: f in man1
2870
2870
2871 modified, added, removed = [], [], []
2871 modified, added, removed = [], [], []
2872 for f in self._files:
2872 for f in self._files:
2873 if not managing(f):
2873 if not managing(f):
2874 added.append(f)
2874 added.append(f)
2875 elif self[f]:
2875 elif self[f]:
2876 modified.append(f)
2876 modified.append(f)
2877 else:
2877 else:
2878 removed.append(f)
2878 removed.append(f)
2879
2879
2880 return scmutil.status(modified, added, removed, [], [], [], [])
2880 return scmutil.status(modified, added, removed, [], [], [], [])
2881
2881
2882
2882
2883 class memfilectx(committablefilectx):
2883 class memfilectx(committablefilectx):
2884 """memfilectx represents an in-memory file to commit.
2884 """memfilectx represents an in-memory file to commit.
2885
2885
2886 See memctx and committablefilectx for more details.
2886 See memctx and committablefilectx for more details.
2887 """
2887 """
2888
2888
2889 def __init__(
2889 def __init__(
2890 self,
2890 self,
2891 repo,
2891 repo,
2892 changectx,
2892 changectx,
2893 path,
2893 path,
2894 data,
2894 data,
2895 islink=False,
2895 islink=False,
2896 isexec=False,
2896 isexec=False,
2897 copysource=None,
2897 copysource=None,
2898 ):
2898 ):
2899 """
2899 """
2900 path is the normalized file path relative to repository root.
2900 path is the normalized file path relative to repository root.
2901 data is the file content as a string.
2901 data is the file content as a string.
2902 islink is True if the file is a symbolic link.
2902 islink is True if the file is a symbolic link.
2903 isexec is True if the file is executable.
2903 isexec is True if the file is executable.
2904 copied is the source file path if current file was copied in the
2904 copied is the source file path if current file was copied in the
2905 revision being committed, or None."""
2905 revision being committed, or None."""
2906 super(memfilectx, self).__init__(repo, path, None, changectx)
2906 super(memfilectx, self).__init__(repo, path, None, changectx)
2907 self._data = data
2907 self._data = data
2908 if islink:
2908 if islink:
2909 self._flags = b'l'
2909 self._flags = b'l'
2910 elif isexec:
2910 elif isexec:
2911 self._flags = b'x'
2911 self._flags = b'x'
2912 else:
2912 else:
2913 self._flags = b''
2913 self._flags = b''
2914 self._copysource = copysource
2914 self._copysource = copysource
2915
2915
2916 def copysource(self):
2916 def copysource(self):
2917 return self._copysource
2917 return self._copysource
2918
2918
2919 def cmp(self, fctx):
2919 def cmp(self, fctx):
2920 return self.data() != fctx.data()
2920 return self.data() != fctx.data()
2921
2921
2922 def data(self):
2922 def data(self):
2923 return self._data
2923 return self._data
2924
2924
2925 def remove(self, ignoremissing=False):
2925 def remove(self, ignoremissing=False):
2926 """wraps unlink for a repo's working directory"""
2926 """wraps unlink for a repo's working directory"""
2927 # need to figure out what to do here
2927 # need to figure out what to do here
2928 del self._changectx[self._path]
2928 del self._changectx[self._path]
2929
2929
2930 def write(self, data, flags, **kwargs):
2930 def write(self, data, flags, **kwargs):
2931 """wraps repo.wwrite"""
2931 """wraps repo.wwrite"""
2932 self._data = data
2932 self._data = data
2933
2933
2934
2934
2935 class metadataonlyctx(committablectx):
2935 class metadataonlyctx(committablectx):
2936 """Like memctx but it's reusing the manifest of different commit.
2936 """Like memctx but it's reusing the manifest of different commit.
2937 Intended to be used by lightweight operations that are creating
2937 Intended to be used by lightweight operations that are creating
2938 metadata-only changes.
2938 metadata-only changes.
2939
2939
2940 Revision information is supplied at initialization time. 'repo' is the
2940 Revision information is supplied at initialization time. 'repo' is the
2941 current localrepo, 'ctx' is original revision which manifest we're reuisng
2941 current localrepo, 'ctx' is original revision which manifest we're reuisng
2942 'parents' is a sequence of two parent revisions identifiers (pass None for
2942 'parents' is a sequence of two parent revisions identifiers (pass None for
2943 every missing parent), 'text' is the commit.
2943 every missing parent), 'text' is the commit.
2944
2944
2945 user receives the committer name and defaults to current repository
2945 user receives the committer name and defaults to current repository
2946 username, date is the commit date in any format supported by
2946 username, date is the commit date in any format supported by
2947 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2947 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2948 metadata or is left empty.
2948 metadata or is left empty.
2949 """
2949 """
2950
2950
2951 def __init__(
2951 def __init__(
2952 self,
2952 self,
2953 repo,
2953 repo,
2954 originalctx,
2954 originalctx,
2955 parents=None,
2955 parents=None,
2956 text=None,
2956 text=None,
2957 user=None,
2957 user=None,
2958 date=None,
2958 date=None,
2959 extra=None,
2959 extra=None,
2960 editor=None,
2960 editor=None,
2961 ):
2961 ):
2962 if text is None:
2962 if text is None:
2963 text = originalctx.description()
2963 text = originalctx.description()
2964 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2964 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2965 self._rev = None
2965 self._rev = None
2966 self._node = None
2966 self._node = None
2967 self._originalctx = originalctx
2967 self._originalctx = originalctx
2968 self._manifestnode = originalctx.manifestnode()
2968 self._manifestnode = originalctx.manifestnode()
2969 if parents is None:
2969 if parents is None:
2970 parents = originalctx.parents()
2970 parents = originalctx.parents()
2971 else:
2971 else:
2972 parents = [repo[p] for p in parents if p is not None]
2972 parents = [repo[p] for p in parents if p is not None]
2973 parents = parents[:]
2973 parents = parents[:]
2974 while len(parents) < 2:
2974 while len(parents) < 2:
2975 parents.append(repo[nullid])
2975 parents.append(repo[nullid])
2976 p1, p2 = self._parents = parents
2976 p1, p2 = self._parents = parents
2977
2977
2978 # sanity check to ensure that the reused manifest parents are
2978 # sanity check to ensure that the reused manifest parents are
2979 # manifests of our commit parents
2979 # manifests of our commit parents
2980 mp1, mp2 = self.manifestctx().parents
2980 mp1, mp2 = self.manifestctx().parents
2981 if p1 != nullid and p1.manifestnode() != mp1:
2981 if p1 != nullid and p1.manifestnode() != mp1:
2982 raise RuntimeError(
2982 raise RuntimeError(
2983 r"can't reuse the manifest: its p1 "
2983 r"can't reuse the manifest: its p1 "
2984 r"doesn't match the new ctx p1"
2984 r"doesn't match the new ctx p1"
2985 )
2985 )
2986 if p2 != nullid and p2.manifestnode() != mp2:
2986 if p2 != nullid and p2.manifestnode() != mp2:
2987 raise RuntimeError(
2987 raise RuntimeError(
2988 r"can't reuse the manifest: "
2988 r"can't reuse the manifest: "
2989 r"its p2 doesn't match the new ctx p2"
2989 r"its p2 doesn't match the new ctx p2"
2990 )
2990 )
2991
2991
2992 self._files = originalctx.files()
2992 self._files = originalctx.files()
2993 self.substate = {}
2993 self.substate = {}
2994
2994
2995 if editor:
2995 if editor:
2996 self._text = editor(self._repo, self, [])
2996 self._text = editor(self._repo, self, [])
2997 self._repo.savecommitmessage(self._text)
2997 self._repo.savecommitmessage(self._text)
2998
2998
2999 def manifestnode(self):
2999 def manifestnode(self):
3000 return self._manifestnode
3000 return self._manifestnode
3001
3001
3002 @property
3002 @property
3003 def _manifestctx(self):
3003 def _manifestctx(self):
3004 return self._repo.manifestlog[self._manifestnode]
3004 return self._repo.manifestlog[self._manifestnode]
3005
3005
3006 def filectx(self, path, filelog=None):
3006 def filectx(self, path, filelog=None):
3007 return self._originalctx.filectx(path, filelog=filelog)
3007 return self._originalctx.filectx(path, filelog=filelog)
3008
3008
3009 def commit(self):
3009 def commit(self):
3010 """commit context to the repo"""
3010 """commit context to the repo"""
3011 return self._repo.commitctx(self)
3011 return self._repo.commitctx(self)
3012
3012
3013 @property
3013 @property
3014 def _manifest(self):
3014 def _manifest(self):
3015 return self._originalctx.manifest()
3015 return self._originalctx.manifest()
3016
3016
3017 @propertycache
3017 @propertycache
3018 def _status(self):
3018 def _status(self):
3019 """Calculate exact status from ``files`` specified in the ``origctx``
3019 """Calculate exact status from ``files`` specified in the ``origctx``
3020 and parents manifests.
3020 and parents manifests.
3021 """
3021 """
3022 man1 = self.p1().manifest()
3022 man1 = self.p1().manifest()
3023 p2 = self._parents[1]
3023 p2 = self._parents[1]
3024 # "1 < len(self._parents)" can't be used for checking
3024 # "1 < len(self._parents)" can't be used for checking
3025 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3025 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3026 # explicitly initialized by the list, of which length is 2.
3026 # explicitly initialized by the list, of which length is 2.
3027 if p2.node() != nullid:
3027 if p2.node() != nullid:
3028 man2 = p2.manifest()
3028 man2 = p2.manifest()
3029 managing = lambda f: f in man1 or f in man2
3029 managing = lambda f: f in man1 or f in man2
3030 else:
3030 else:
3031 managing = lambda f: f in man1
3031 managing = lambda f: f in man1
3032
3032
3033 modified, added, removed = [], [], []
3033 modified, added, removed = [], [], []
3034 for f in self._files:
3034 for f in self._files:
3035 if not managing(f):
3035 if not managing(f):
3036 added.append(f)
3036 added.append(f)
3037 elif f in self:
3037 elif f in self:
3038 modified.append(f)
3038 modified.append(f)
3039 else:
3039 else:
3040 removed.append(f)
3040 removed.append(f)
3041
3041
3042 return scmutil.status(modified, added, removed, [], [], [], [])
3042 return scmutil.status(modified, added, removed, [], [], [], [])
3043
3043
3044
3044
3045 class arbitraryfilectx(object):
3045 class arbitraryfilectx(object):
3046 """Allows you to use filectx-like functions on a file in an arbitrary
3046 """Allows you to use filectx-like functions on a file in an arbitrary
3047 location on disk, possibly not in the working directory.
3047 location on disk, possibly not in the working directory.
3048 """
3048 """
3049
3049
3050 def __init__(self, path, repo=None):
3050 def __init__(self, path, repo=None):
3051 # Repo is optional because contrib/simplemerge uses this class.
3051 # Repo is optional because contrib/simplemerge uses this class.
3052 self._repo = repo
3052 self._repo = repo
3053 self._path = path
3053 self._path = path
3054
3054
3055 def cmp(self, fctx):
3055 def cmp(self, fctx):
3056 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3056 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3057 # path if either side is a symlink.
3057 # path if either side is a symlink.
3058 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3058 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3059 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3059 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3060 # Add a fast-path for merge if both sides are disk-backed.
3060 # Add a fast-path for merge if both sides are disk-backed.
3061 # Note that filecmp uses the opposite return values (True if same)
3061 # Note that filecmp uses the opposite return values (True if same)
3062 # from our cmp functions (True if different).
3062 # from our cmp functions (True if different).
3063 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3063 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3064 return self.data() != fctx.data()
3064 return self.data() != fctx.data()
3065
3065
3066 def path(self):
3066 def path(self):
3067 return self._path
3067 return self._path
3068
3068
3069 def flags(self):
3069 def flags(self):
3070 return b''
3070 return b''
3071
3071
3072 def data(self):
3072 def data(self):
3073 return util.readfile(self._path)
3073 return util.readfile(self._path)
3074
3074
3075 def decodeddata(self):
3075 def decodeddata(self):
3076 with open(self._path, b"rb") as f:
3076 with open(self._path, b"rb") as f:
3077 return f.read()
3077 return f.read()
3078
3078
3079 def remove(self):
3079 def remove(self):
3080 util.unlink(self._path)
3080 util.unlink(self._path)
3081
3081
3082 def write(self, data, flags, **kwargs):
3082 def write(self, data, flags, **kwargs):
3083 assert not flags
3083 assert not flags
3084 with open(self._path, b"wb") as f:
3084 with open(self._path, b"wb") as f:
3085 f.write(data)
3085 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now