##// END OF EJS Templates
commitablectx: fix the default phase...
marmoute -
r44415:bbcf78c4 default
parent child Browse files
Show More
@@ -1,3021 +1,3021 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, expr, badfn=None):
203 def matchfileset(self, expr, badfn=None):
204 return fileset.match(self, expr, badfn=badfn)
204 return fileset.match(self, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 ):
331 ):
332 r = self._repo
332 r = self._repo
333 return matchmod.match(
333 return matchmod.match(
334 r.root,
334 r.root,
335 r.getcwd(),
335 r.getcwd(),
336 pats,
336 pats,
337 include,
337 include,
338 exclude,
338 exclude,
339 default,
339 default,
340 auditor=r.nofsauditor,
340 auditor=r.nofsauditor,
341 ctx=self,
341 ctx=self,
342 listsubrepos=listsubrepos,
342 listsubrepos=listsubrepos,
343 badfn=badfn,
343 badfn=badfn,
344 )
344 )
345
345
346 def diff(
346 def diff(
347 self,
347 self,
348 ctx2=None,
348 ctx2=None,
349 match=None,
349 match=None,
350 changes=None,
350 changes=None,
351 opts=None,
351 opts=None,
352 losedatafn=None,
352 losedatafn=None,
353 pathfn=None,
353 pathfn=None,
354 copy=None,
354 copy=None,
355 copysourcematch=None,
355 copysourcematch=None,
356 hunksfilterfn=None,
356 hunksfilterfn=None,
357 ):
357 ):
358 """Returns a diff generator for the given contexts and matcher"""
358 """Returns a diff generator for the given contexts and matcher"""
359 if ctx2 is None:
359 if ctx2 is None:
360 ctx2 = self.p1()
360 ctx2 = self.p1()
361 if ctx2 is not None:
361 if ctx2 is not None:
362 ctx2 = self._repo[ctx2]
362 ctx2 = self._repo[ctx2]
363 return patch.diff(
363 return patch.diff(
364 self._repo,
364 self._repo,
365 ctx2,
365 ctx2,
366 self,
366 self,
367 match=match,
367 match=match,
368 changes=changes,
368 changes=changes,
369 opts=opts,
369 opts=opts,
370 losedatafn=losedatafn,
370 losedatafn=losedatafn,
371 pathfn=pathfn,
371 pathfn=pathfn,
372 copy=copy,
372 copy=copy,
373 copysourcematch=copysourcematch,
373 copysourcematch=copysourcematch,
374 hunksfilterfn=hunksfilterfn,
374 hunksfilterfn=hunksfilterfn,
375 )
375 )
376
376
377 def dirs(self):
377 def dirs(self):
378 return self._manifest.dirs()
378 return self._manifest.dirs()
379
379
380 def hasdir(self, dir):
380 def hasdir(self, dir):
381 return self._manifest.hasdir(dir)
381 return self._manifest.hasdir(dir)
382
382
383 def status(
383 def status(
384 self,
384 self,
385 other=None,
385 other=None,
386 match=None,
386 match=None,
387 listignored=False,
387 listignored=False,
388 listclean=False,
388 listclean=False,
389 listunknown=False,
389 listunknown=False,
390 listsubrepos=False,
390 listsubrepos=False,
391 ):
391 ):
392 """return status of files between two nodes or node and working
392 """return status of files between two nodes or node and working
393 directory.
393 directory.
394
394
395 If other is None, compare this node with working directory.
395 If other is None, compare this node with working directory.
396
396
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
398 """
398 """
399
399
400 ctx1 = self
400 ctx1 = self
401 ctx2 = self._repo[other]
401 ctx2 = self._repo[other]
402
402
403 # This next code block is, admittedly, fragile logic that tests for
403 # This next code block is, admittedly, fragile logic that tests for
404 # reversing the contexts and wouldn't need to exist if it weren't for
404 # reversing the contexts and wouldn't need to exist if it weren't for
405 # the fast (and common) code path of comparing the working directory
405 # the fast (and common) code path of comparing the working directory
406 # with its first parent.
406 # with its first parent.
407 #
407 #
408 # What we're aiming for here is the ability to call:
408 # What we're aiming for here is the ability to call:
409 #
409 #
410 # workingctx.status(parentctx)
410 # workingctx.status(parentctx)
411 #
411 #
412 # If we always built the manifest for each context and compared those,
412 # If we always built the manifest for each context and compared those,
413 # then we'd be done. But the special case of the above call means we
413 # then we'd be done. But the special case of the above call means we
414 # just copy the manifest of the parent.
414 # just copy the manifest of the parent.
415 reversed = False
415 reversed = False
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
417 reversed = True
417 reversed = True
418 ctx1, ctx2 = ctx2, ctx1
418 ctx1, ctx2 = ctx2, ctx1
419
419
420 match = self._repo.narrowmatch(match)
420 match = self._repo.narrowmatch(match)
421 match = ctx2._matchstatus(ctx1, match)
421 match = ctx2._matchstatus(ctx1, match)
422 r = scmutil.status([], [], [], [], [], [], [])
422 r = scmutil.status([], [], [], [], [], [], [])
423 r = ctx2._buildstatus(
423 r = ctx2._buildstatus(
424 ctx1, r, match, listignored, listclean, listunknown
424 ctx1, r, match, listignored, listclean, listunknown
425 )
425 )
426
426
427 if reversed:
427 if reversed:
428 # Reverse added and removed. Clear deleted, unknown and ignored as
428 # Reverse added and removed. Clear deleted, unknown and ignored as
429 # these make no sense to reverse.
429 # these make no sense to reverse.
430 r = scmutil.status(
430 r = scmutil.status(
431 r.modified, r.removed, r.added, [], [], [], r.clean
431 r.modified, r.removed, r.added, [], [], [], r.clean
432 )
432 )
433
433
434 if listsubrepos:
434 if listsubrepos:
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
436 try:
436 try:
437 rev2 = ctx2.subrev(subpath)
437 rev2 = ctx2.subrev(subpath)
438 except KeyError:
438 except KeyError:
439 # A subrepo that existed in node1 was deleted between
439 # A subrepo that existed in node1 was deleted between
440 # node1 and node2 (inclusive). Thus, ctx2's substate
440 # node1 and node2 (inclusive). Thus, ctx2's substate
441 # won't contain that subpath. The best we can do ignore it.
441 # won't contain that subpath. The best we can do ignore it.
442 rev2 = None
442 rev2 = None
443 submatch = matchmod.subdirmatcher(subpath, match)
443 submatch = matchmod.subdirmatcher(subpath, match)
444 s = sub.status(
444 s = sub.status(
445 rev2,
445 rev2,
446 match=submatch,
446 match=submatch,
447 ignored=listignored,
447 ignored=listignored,
448 clean=listclean,
448 clean=listclean,
449 unknown=listunknown,
449 unknown=listunknown,
450 listsubrepos=True,
450 listsubrepos=True,
451 )
451 )
452 for k in (
452 for k in (
453 'modified',
453 'modified',
454 'added',
454 'added',
455 'removed',
455 'removed',
456 'deleted',
456 'deleted',
457 'unknown',
457 'unknown',
458 'ignored',
458 'ignored',
459 'clean',
459 'clean',
460 ):
460 ):
461 rfiles, sfiles = getattr(r, k), getattr(s, k)
461 rfiles, sfiles = getattr(r, k), getattr(s, k)
462 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
462 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
463
463
464 r.modified.sort()
464 r.modified.sort()
465 r.added.sort()
465 r.added.sort()
466 r.removed.sort()
466 r.removed.sort()
467 r.deleted.sort()
467 r.deleted.sort()
468 r.unknown.sort()
468 r.unknown.sort()
469 r.ignored.sort()
469 r.ignored.sort()
470 r.clean.sort()
470 r.clean.sort()
471
471
472 return r
472 return r
473
473
474
474
475 class changectx(basectx):
475 class changectx(basectx):
476 """A changecontext object makes access to data related to a particular
476 """A changecontext object makes access to data related to a particular
477 changeset convenient. It represents a read-only context already present in
477 changeset convenient. It represents a read-only context already present in
478 the repo."""
478 the repo."""
479
479
480 def __init__(self, repo, rev, node, maybe_filtered=True):
480 def __init__(self, repo, rev, node, maybe_filtered=True):
481 super(changectx, self).__init__(repo)
481 super(changectx, self).__init__(repo)
482 self._rev = rev
482 self._rev = rev
483 self._node = node
483 self._node = node
484 # When maybe_filtered is True, the revision might be affected by
484 # When maybe_filtered is True, the revision might be affected by
485 # changelog filtering and operation through the filtered changelog must be used.
485 # changelog filtering and operation through the filtered changelog must be used.
486 #
486 #
487 # When maybe_filtered is False, the revision has already been checked
487 # When maybe_filtered is False, the revision has already been checked
488 # against filtering and is not filtered. Operation through the
488 # against filtering and is not filtered. Operation through the
489 # unfiltered changelog might be used in some case.
489 # unfiltered changelog might be used in some case.
490 self._maybe_filtered = maybe_filtered
490 self._maybe_filtered = maybe_filtered
491
491
492 def __hash__(self):
492 def __hash__(self):
493 try:
493 try:
494 return hash(self._rev)
494 return hash(self._rev)
495 except AttributeError:
495 except AttributeError:
496 return id(self)
496 return id(self)
497
497
498 def __nonzero__(self):
498 def __nonzero__(self):
499 return self._rev != nullrev
499 return self._rev != nullrev
500
500
501 __bool__ = __nonzero__
501 __bool__ = __nonzero__
502
502
503 @propertycache
503 @propertycache
504 def _changeset(self):
504 def _changeset(self):
505 if self._maybe_filtered:
505 if self._maybe_filtered:
506 repo = self._repo
506 repo = self._repo
507 else:
507 else:
508 repo = self._repo.unfiltered()
508 repo = self._repo.unfiltered()
509 return repo.changelog.changelogrevision(self.rev())
509 return repo.changelog.changelogrevision(self.rev())
510
510
511 @propertycache
511 @propertycache
512 def _manifest(self):
512 def _manifest(self):
513 return self._manifestctx.read()
513 return self._manifestctx.read()
514
514
515 @property
515 @property
516 def _manifestctx(self):
516 def _manifestctx(self):
517 return self._repo.manifestlog[self._changeset.manifest]
517 return self._repo.manifestlog[self._changeset.manifest]
518
518
519 @propertycache
519 @propertycache
520 def _manifestdelta(self):
520 def _manifestdelta(self):
521 return self._manifestctx.readdelta()
521 return self._manifestctx.readdelta()
522
522
523 @propertycache
523 @propertycache
524 def _parents(self):
524 def _parents(self):
525 repo = self._repo
525 repo = self._repo
526 if self._maybe_filtered:
526 if self._maybe_filtered:
527 cl = repo.changelog
527 cl = repo.changelog
528 else:
528 else:
529 cl = repo.unfiltered().changelog
529 cl = repo.unfiltered().changelog
530
530
531 p1, p2 = cl.parentrevs(self._rev)
531 p1, p2 = cl.parentrevs(self._rev)
532 if p2 == nullrev:
532 if p2 == nullrev:
533 return [repo[p1]]
533 return [repo[p1]]
534 return [repo[p1], repo[p2]]
534 return [repo[p1], repo[p2]]
535
535
536 def changeset(self):
536 def changeset(self):
537 c = self._changeset
537 c = self._changeset
538 return (
538 return (
539 c.manifest,
539 c.manifest,
540 c.user,
540 c.user,
541 c.date,
541 c.date,
542 c.files,
542 c.files,
543 c.description,
543 c.description,
544 c.extra,
544 c.extra,
545 )
545 )
546
546
547 def manifestnode(self):
547 def manifestnode(self):
548 return self._changeset.manifest
548 return self._changeset.manifest
549
549
550 def user(self):
550 def user(self):
551 return self._changeset.user
551 return self._changeset.user
552
552
553 def date(self):
553 def date(self):
554 return self._changeset.date
554 return self._changeset.date
555
555
556 def files(self):
556 def files(self):
557 return self._changeset.files
557 return self._changeset.files
558
558
559 def filesmodified(self):
559 def filesmodified(self):
560 modified = set(self.files())
560 modified = set(self.files())
561 modified.difference_update(self.filesadded())
561 modified.difference_update(self.filesadded())
562 modified.difference_update(self.filesremoved())
562 modified.difference_update(self.filesremoved())
563 return sorted(modified)
563 return sorted(modified)
564
564
565 def filesadded(self):
565 def filesadded(self):
566 filesadded = self._changeset.filesadded
566 filesadded = self._changeset.filesadded
567 compute_on_none = True
567 compute_on_none = True
568 if self._repo.filecopiesmode == b'changeset-sidedata':
568 if self._repo.filecopiesmode == b'changeset-sidedata':
569 compute_on_none = False
569 compute_on_none = False
570 else:
570 else:
571 source = self._repo.ui.config(b'experimental', b'copies.read-from')
571 source = self._repo.ui.config(b'experimental', b'copies.read-from')
572 if source == b'changeset-only':
572 if source == b'changeset-only':
573 compute_on_none = False
573 compute_on_none = False
574 elif source != b'compatibility':
574 elif source != b'compatibility':
575 # filelog mode, ignore any changelog content
575 # filelog mode, ignore any changelog content
576 filesadded = None
576 filesadded = None
577 if filesadded is None:
577 if filesadded is None:
578 if compute_on_none:
578 if compute_on_none:
579 filesadded = copies.computechangesetfilesadded(self)
579 filesadded = copies.computechangesetfilesadded(self)
580 else:
580 else:
581 filesadded = []
581 filesadded = []
582 return filesadded
582 return filesadded
583
583
584 def filesremoved(self):
584 def filesremoved(self):
585 filesremoved = self._changeset.filesremoved
585 filesremoved = self._changeset.filesremoved
586 compute_on_none = True
586 compute_on_none = True
587 if self._repo.filecopiesmode == b'changeset-sidedata':
587 if self._repo.filecopiesmode == b'changeset-sidedata':
588 compute_on_none = False
588 compute_on_none = False
589 else:
589 else:
590 source = self._repo.ui.config(b'experimental', b'copies.read-from')
590 source = self._repo.ui.config(b'experimental', b'copies.read-from')
591 if source == b'changeset-only':
591 if source == b'changeset-only':
592 compute_on_none = False
592 compute_on_none = False
593 elif source != b'compatibility':
593 elif source != b'compatibility':
594 # filelog mode, ignore any changelog content
594 # filelog mode, ignore any changelog content
595 filesremoved = None
595 filesremoved = None
596 if filesremoved is None:
596 if filesremoved is None:
597 if compute_on_none:
597 if compute_on_none:
598 filesremoved = copies.computechangesetfilesremoved(self)
598 filesremoved = copies.computechangesetfilesremoved(self)
599 else:
599 else:
600 filesremoved = []
600 filesremoved = []
601 return filesremoved
601 return filesremoved
602
602
603 @propertycache
603 @propertycache
604 def _copies(self):
604 def _copies(self):
605 p1copies = self._changeset.p1copies
605 p1copies = self._changeset.p1copies
606 p2copies = self._changeset.p2copies
606 p2copies = self._changeset.p2copies
607 compute_on_none = True
607 compute_on_none = True
608 if self._repo.filecopiesmode == b'changeset-sidedata':
608 if self._repo.filecopiesmode == b'changeset-sidedata':
609 compute_on_none = False
609 compute_on_none = False
610 else:
610 else:
611 source = self._repo.ui.config(b'experimental', b'copies.read-from')
611 source = self._repo.ui.config(b'experimental', b'copies.read-from')
612 # If config says to get copy metadata only from changeset, then
612 # If config says to get copy metadata only from changeset, then
613 # return that, defaulting to {} if there was no copy metadata. In
613 # return that, defaulting to {} if there was no copy metadata. In
614 # compatibility mode, we return copy data from the changeset if it
614 # compatibility mode, we return copy data from the changeset if it
615 # was recorded there, and otherwise we fall back to getting it from
615 # was recorded there, and otherwise we fall back to getting it from
616 # the filelogs (below).
616 # the filelogs (below).
617 #
617 #
618 # If we are in compatiblity mode and there is not data in the
618 # If we are in compatiblity mode and there is not data in the
619 # changeset), we get the copy metadata from the filelogs.
619 # changeset), we get the copy metadata from the filelogs.
620 #
620 #
621 # otherwise, when config said to read only from filelog, we get the
621 # otherwise, when config said to read only from filelog, we get the
622 # copy metadata from the filelogs.
622 # copy metadata from the filelogs.
623 if source == b'changeset-only':
623 if source == b'changeset-only':
624 compute_on_none = False
624 compute_on_none = False
625 elif source != b'compatibility':
625 elif source != b'compatibility':
626 # filelog mode, ignore any changelog content
626 # filelog mode, ignore any changelog content
627 p1copies = p2copies = None
627 p1copies = p2copies = None
628 if p1copies is None:
628 if p1copies is None:
629 if compute_on_none:
629 if compute_on_none:
630 p1copies, p2copies = super(changectx, self)._copies
630 p1copies, p2copies = super(changectx, self)._copies
631 else:
631 else:
632 if p1copies is None:
632 if p1copies is None:
633 p1copies = {}
633 p1copies = {}
634 if p2copies is None:
634 if p2copies is None:
635 p2copies = {}
635 p2copies = {}
636 return p1copies, p2copies
636 return p1copies, p2copies
637
637
638 def description(self):
638 def description(self):
639 return self._changeset.description
639 return self._changeset.description
640
640
641 def branch(self):
641 def branch(self):
642 return encoding.tolocal(self._changeset.extra.get(b"branch"))
642 return encoding.tolocal(self._changeset.extra.get(b"branch"))
643
643
644 def closesbranch(self):
644 def closesbranch(self):
645 return b'close' in self._changeset.extra
645 return b'close' in self._changeset.extra
646
646
647 def extra(self):
647 def extra(self):
648 """Return a dict of extra information."""
648 """Return a dict of extra information."""
649 return self._changeset.extra
649 return self._changeset.extra
650
650
651 def tags(self):
651 def tags(self):
652 """Return a list of byte tag names"""
652 """Return a list of byte tag names"""
653 return self._repo.nodetags(self._node)
653 return self._repo.nodetags(self._node)
654
654
655 def bookmarks(self):
655 def bookmarks(self):
656 """Return a list of byte bookmark names."""
656 """Return a list of byte bookmark names."""
657 return self._repo.nodebookmarks(self._node)
657 return self._repo.nodebookmarks(self._node)
658
658
659 def phase(self):
659 def phase(self):
660 return self._repo._phasecache.phase(self._repo, self._rev)
660 return self._repo._phasecache.phase(self._repo, self._rev)
661
661
662 def hidden(self):
662 def hidden(self):
663 return self._rev in repoview.filterrevs(self._repo, b'visible')
663 return self._rev in repoview.filterrevs(self._repo, b'visible')
664
664
665 def isinmemory(self):
665 def isinmemory(self):
666 return False
666 return False
667
667
668 def children(self):
668 def children(self):
669 """return list of changectx contexts for each child changeset.
669 """return list of changectx contexts for each child changeset.
670
670
671 This returns only the immediate child changesets. Use descendants() to
671 This returns only the immediate child changesets. Use descendants() to
672 recursively walk children.
672 recursively walk children.
673 """
673 """
674 c = self._repo.changelog.children(self._node)
674 c = self._repo.changelog.children(self._node)
675 return [self._repo[x] for x in c]
675 return [self._repo[x] for x in c]
676
676
677 def ancestors(self):
677 def ancestors(self):
678 for a in self._repo.changelog.ancestors([self._rev]):
678 for a in self._repo.changelog.ancestors([self._rev]):
679 yield self._repo[a]
679 yield self._repo[a]
680
680
681 def descendants(self):
681 def descendants(self):
682 """Recursively yield all children of the changeset.
682 """Recursively yield all children of the changeset.
683
683
684 For just the immediate children, use children()
684 For just the immediate children, use children()
685 """
685 """
686 for d in self._repo.changelog.descendants([self._rev]):
686 for d in self._repo.changelog.descendants([self._rev]):
687 yield self._repo[d]
687 yield self._repo[d]
688
688
689 def filectx(self, path, fileid=None, filelog=None):
689 def filectx(self, path, fileid=None, filelog=None):
690 """get a file context from this changeset"""
690 """get a file context from this changeset"""
691 if fileid is None:
691 if fileid is None:
692 fileid = self.filenode(path)
692 fileid = self.filenode(path)
693 return filectx(
693 return filectx(
694 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
694 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
695 )
695 )
696
696
697 def ancestor(self, c2, warn=False):
697 def ancestor(self, c2, warn=False):
698 """return the "best" ancestor context of self and c2
698 """return the "best" ancestor context of self and c2
699
699
700 If there are multiple candidates, it will show a message and check
700 If there are multiple candidates, it will show a message and check
701 merge.preferancestor configuration before falling back to the
701 merge.preferancestor configuration before falling back to the
702 revlog ancestor."""
702 revlog ancestor."""
703 # deal with workingctxs
703 # deal with workingctxs
704 n2 = c2._node
704 n2 = c2._node
705 if n2 is None:
705 if n2 is None:
706 n2 = c2._parents[0]._node
706 n2 = c2._parents[0]._node
707 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
707 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
708 if not cahs:
708 if not cahs:
709 anc = nullid
709 anc = nullid
710 elif len(cahs) == 1:
710 elif len(cahs) == 1:
711 anc = cahs[0]
711 anc = cahs[0]
712 else:
712 else:
713 # experimental config: merge.preferancestor
713 # experimental config: merge.preferancestor
714 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
714 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
715 try:
715 try:
716 ctx = scmutil.revsymbol(self._repo, r)
716 ctx = scmutil.revsymbol(self._repo, r)
717 except error.RepoLookupError:
717 except error.RepoLookupError:
718 continue
718 continue
719 anc = ctx.node()
719 anc = ctx.node()
720 if anc in cahs:
720 if anc in cahs:
721 break
721 break
722 else:
722 else:
723 anc = self._repo.changelog.ancestor(self._node, n2)
723 anc = self._repo.changelog.ancestor(self._node, n2)
724 if warn:
724 if warn:
725 self._repo.ui.status(
725 self._repo.ui.status(
726 (
726 (
727 _(b"note: using %s as ancestor of %s and %s\n")
727 _(b"note: using %s as ancestor of %s and %s\n")
728 % (short(anc), short(self._node), short(n2))
728 % (short(anc), short(self._node), short(n2))
729 )
729 )
730 + b''.join(
730 + b''.join(
731 _(
731 _(
732 b" alternatively, use --config "
732 b" alternatively, use --config "
733 b"merge.preferancestor=%s\n"
733 b"merge.preferancestor=%s\n"
734 )
734 )
735 % short(n)
735 % short(n)
736 for n in sorted(cahs)
736 for n in sorted(cahs)
737 if n != anc
737 if n != anc
738 )
738 )
739 )
739 )
740 return self._repo[anc]
740 return self._repo[anc]
741
741
742 def isancestorof(self, other):
742 def isancestorof(self, other):
743 """True if this changeset is an ancestor of other"""
743 """True if this changeset is an ancestor of other"""
744 return self._repo.changelog.isancestorrev(self._rev, other._rev)
744 return self._repo.changelog.isancestorrev(self._rev, other._rev)
745
745
746 def walk(self, match):
746 def walk(self, match):
747 '''Generates matching file names.'''
747 '''Generates matching file names.'''
748
748
749 # Wrap match.bad method to have message with nodeid
749 # Wrap match.bad method to have message with nodeid
750 def bad(fn, msg):
750 def bad(fn, msg):
751 # The manifest doesn't know about subrepos, so don't complain about
751 # The manifest doesn't know about subrepos, so don't complain about
752 # paths into valid subrepos.
752 # paths into valid subrepos.
753 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
753 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
754 return
754 return
755 match.bad(fn, _(b'no such file in rev %s') % self)
755 match.bad(fn, _(b'no such file in rev %s') % self)
756
756
757 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
757 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
758 return self._manifest.walk(m)
758 return self._manifest.walk(m)
759
759
760 def matches(self, match):
760 def matches(self, match):
761 return self.walk(match)
761 return self.walk(match)
762
762
763
763
764 class basefilectx(object):
764 class basefilectx(object):
765 """A filecontext object represents the common logic for its children:
765 """A filecontext object represents the common logic for its children:
766 filectx: read-only access to a filerevision that is already present
766 filectx: read-only access to a filerevision that is already present
767 in the repo,
767 in the repo,
768 workingfilectx: a filecontext that represents files from the working
768 workingfilectx: a filecontext that represents files from the working
769 directory,
769 directory,
770 memfilectx: a filecontext that represents files in-memory,
770 memfilectx: a filecontext that represents files in-memory,
771 """
771 """
772
772
773 @propertycache
773 @propertycache
774 def _filelog(self):
774 def _filelog(self):
775 return self._repo.file(self._path)
775 return self._repo.file(self._path)
776
776
777 @propertycache
777 @propertycache
778 def _changeid(self):
778 def _changeid(self):
779 if '_changectx' in self.__dict__:
779 if '_changectx' in self.__dict__:
780 return self._changectx.rev()
780 return self._changectx.rev()
781 elif '_descendantrev' in self.__dict__:
781 elif '_descendantrev' in self.__dict__:
782 # this file context was created from a revision with a known
782 # this file context was created from a revision with a known
783 # descendant, we can (lazily) correct for linkrev aliases
783 # descendant, we can (lazily) correct for linkrev aliases
784 return self._adjustlinkrev(self._descendantrev)
784 return self._adjustlinkrev(self._descendantrev)
785 else:
785 else:
786 return self._filelog.linkrev(self._filerev)
786 return self._filelog.linkrev(self._filerev)
787
787
788 @propertycache
788 @propertycache
789 def _filenode(self):
789 def _filenode(self):
790 if '_fileid' in self.__dict__:
790 if '_fileid' in self.__dict__:
791 return self._filelog.lookup(self._fileid)
791 return self._filelog.lookup(self._fileid)
792 else:
792 else:
793 return self._changectx.filenode(self._path)
793 return self._changectx.filenode(self._path)
794
794
795 @propertycache
795 @propertycache
796 def _filerev(self):
796 def _filerev(self):
797 return self._filelog.rev(self._filenode)
797 return self._filelog.rev(self._filenode)
798
798
799 @propertycache
799 @propertycache
800 def _repopath(self):
800 def _repopath(self):
801 return self._path
801 return self._path
802
802
803 def __nonzero__(self):
803 def __nonzero__(self):
804 try:
804 try:
805 self._filenode
805 self._filenode
806 return True
806 return True
807 except error.LookupError:
807 except error.LookupError:
808 # file is missing
808 # file is missing
809 return False
809 return False
810
810
811 __bool__ = __nonzero__
811 __bool__ = __nonzero__
812
812
813 def __bytes__(self):
813 def __bytes__(self):
814 try:
814 try:
815 return b"%s@%s" % (self.path(), self._changectx)
815 return b"%s@%s" % (self.path(), self._changectx)
816 except error.LookupError:
816 except error.LookupError:
817 return b"%s@???" % self.path()
817 return b"%s@???" % self.path()
818
818
819 __str__ = encoding.strmethod(__bytes__)
819 __str__ = encoding.strmethod(__bytes__)
820
820
821 def __repr__(self):
821 def __repr__(self):
822 return "<%s %s>" % (type(self).__name__, str(self))
822 return "<%s %s>" % (type(self).__name__, str(self))
823
823
824 def __hash__(self):
824 def __hash__(self):
825 try:
825 try:
826 return hash((self._path, self._filenode))
826 return hash((self._path, self._filenode))
827 except AttributeError:
827 except AttributeError:
828 return id(self)
828 return id(self)
829
829
830 def __eq__(self, other):
830 def __eq__(self, other):
831 try:
831 try:
832 return (
832 return (
833 type(self) == type(other)
833 type(self) == type(other)
834 and self._path == other._path
834 and self._path == other._path
835 and self._filenode == other._filenode
835 and self._filenode == other._filenode
836 )
836 )
837 except AttributeError:
837 except AttributeError:
838 return False
838 return False
839
839
840 def __ne__(self, other):
840 def __ne__(self, other):
841 return not (self == other)
841 return not (self == other)
842
842
843 def filerev(self):
843 def filerev(self):
844 return self._filerev
844 return self._filerev
845
845
846 def filenode(self):
846 def filenode(self):
847 return self._filenode
847 return self._filenode
848
848
849 @propertycache
849 @propertycache
850 def _flags(self):
850 def _flags(self):
851 return self._changectx.flags(self._path)
851 return self._changectx.flags(self._path)
852
852
853 def flags(self):
853 def flags(self):
854 return self._flags
854 return self._flags
855
855
856 def filelog(self):
856 def filelog(self):
857 return self._filelog
857 return self._filelog
858
858
859 def rev(self):
859 def rev(self):
860 return self._changeid
860 return self._changeid
861
861
862 def linkrev(self):
862 def linkrev(self):
863 return self._filelog.linkrev(self._filerev)
863 return self._filelog.linkrev(self._filerev)
864
864
865 def node(self):
865 def node(self):
866 return self._changectx.node()
866 return self._changectx.node()
867
867
868 def hex(self):
868 def hex(self):
869 return self._changectx.hex()
869 return self._changectx.hex()
870
870
871 def user(self):
871 def user(self):
872 return self._changectx.user()
872 return self._changectx.user()
873
873
874 def date(self):
874 def date(self):
875 return self._changectx.date()
875 return self._changectx.date()
876
876
877 def files(self):
877 def files(self):
878 return self._changectx.files()
878 return self._changectx.files()
879
879
880 def description(self):
880 def description(self):
881 return self._changectx.description()
881 return self._changectx.description()
882
882
883 def branch(self):
883 def branch(self):
884 return self._changectx.branch()
884 return self._changectx.branch()
885
885
886 def extra(self):
886 def extra(self):
887 return self._changectx.extra()
887 return self._changectx.extra()
888
888
889 def phase(self):
889 def phase(self):
890 return self._changectx.phase()
890 return self._changectx.phase()
891
891
892 def phasestr(self):
892 def phasestr(self):
893 return self._changectx.phasestr()
893 return self._changectx.phasestr()
894
894
895 def obsolete(self):
895 def obsolete(self):
896 return self._changectx.obsolete()
896 return self._changectx.obsolete()
897
897
898 def instabilities(self):
898 def instabilities(self):
899 return self._changectx.instabilities()
899 return self._changectx.instabilities()
900
900
901 def manifest(self):
901 def manifest(self):
902 return self._changectx.manifest()
902 return self._changectx.manifest()
903
903
904 def changectx(self):
904 def changectx(self):
905 return self._changectx
905 return self._changectx
906
906
907 def renamed(self):
907 def renamed(self):
908 return self._copied
908 return self._copied
909
909
910 def copysource(self):
910 def copysource(self):
911 return self._copied and self._copied[0]
911 return self._copied and self._copied[0]
912
912
913 def repo(self):
913 def repo(self):
914 return self._repo
914 return self._repo
915
915
916 def size(self):
916 def size(self):
917 return len(self.data())
917 return len(self.data())
918
918
919 def path(self):
919 def path(self):
920 return self._path
920 return self._path
921
921
922 def isbinary(self):
922 def isbinary(self):
923 try:
923 try:
924 return stringutil.binary(self.data())
924 return stringutil.binary(self.data())
925 except IOError:
925 except IOError:
926 return False
926 return False
927
927
928 def isexec(self):
928 def isexec(self):
929 return b'x' in self.flags()
929 return b'x' in self.flags()
930
930
931 def islink(self):
931 def islink(self):
932 return b'l' in self.flags()
932 return b'l' in self.flags()
933
933
934 def isabsent(self):
934 def isabsent(self):
935 """whether this filectx represents a file not in self._changectx
935 """whether this filectx represents a file not in self._changectx
936
936
937 This is mainly for merge code to detect change/delete conflicts. This is
937 This is mainly for merge code to detect change/delete conflicts. This is
938 expected to be True for all subclasses of basectx."""
938 expected to be True for all subclasses of basectx."""
939 return False
939 return False
940
940
941 _customcmp = False
941 _customcmp = False
942
942
943 def cmp(self, fctx):
943 def cmp(self, fctx):
944 """compare with other file context
944 """compare with other file context
945
945
946 returns True if different than fctx.
946 returns True if different than fctx.
947 """
947 """
948 if fctx._customcmp:
948 if fctx._customcmp:
949 return fctx.cmp(self)
949 return fctx.cmp(self)
950
950
951 if self._filenode is None:
951 if self._filenode is None:
952 raise error.ProgrammingError(
952 raise error.ProgrammingError(
953 b'filectx.cmp() must be reimplemented if not backed by revlog'
953 b'filectx.cmp() must be reimplemented if not backed by revlog'
954 )
954 )
955
955
956 if fctx._filenode is None:
956 if fctx._filenode is None:
957 if self._repo._encodefilterpats:
957 if self._repo._encodefilterpats:
958 # can't rely on size() because wdir content may be decoded
958 # can't rely on size() because wdir content may be decoded
959 return self._filelog.cmp(self._filenode, fctx.data())
959 return self._filelog.cmp(self._filenode, fctx.data())
960 if self.size() - 4 == fctx.size():
960 if self.size() - 4 == fctx.size():
961 # size() can match:
961 # size() can match:
962 # if file data starts with '\1\n', empty metadata block is
962 # if file data starts with '\1\n', empty metadata block is
963 # prepended, which adds 4 bytes to filelog.size().
963 # prepended, which adds 4 bytes to filelog.size().
964 return self._filelog.cmp(self._filenode, fctx.data())
964 return self._filelog.cmp(self._filenode, fctx.data())
965 if self.size() == fctx.size():
965 if self.size() == fctx.size():
966 # size() matches: need to compare content
966 # size() matches: need to compare content
967 return self._filelog.cmp(self._filenode, fctx.data())
967 return self._filelog.cmp(self._filenode, fctx.data())
968
968
969 # size() differs
969 # size() differs
970 return True
970 return True
971
971
972 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
972 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
973 """return the first ancestor of <srcrev> introducing <fnode>
973 """return the first ancestor of <srcrev> introducing <fnode>
974
974
975 If the linkrev of the file revision does not point to an ancestor of
975 If the linkrev of the file revision does not point to an ancestor of
976 srcrev, we'll walk down the ancestors until we find one introducing
976 srcrev, we'll walk down the ancestors until we find one introducing
977 this file revision.
977 this file revision.
978
978
979 :srcrev: the changeset revision we search ancestors from
979 :srcrev: the changeset revision we search ancestors from
980 :inclusive: if true, the src revision will also be checked
980 :inclusive: if true, the src revision will also be checked
981 :stoprev: an optional revision to stop the walk at. If no introduction
981 :stoprev: an optional revision to stop the walk at. If no introduction
982 of this file content could be found before this floor
982 of this file content could be found before this floor
983 revision, the function will returns "None" and stops its
983 revision, the function will returns "None" and stops its
984 iteration.
984 iteration.
985 """
985 """
986 repo = self._repo
986 repo = self._repo
987 cl = repo.unfiltered().changelog
987 cl = repo.unfiltered().changelog
988 mfl = repo.manifestlog
988 mfl = repo.manifestlog
989 # fetch the linkrev
989 # fetch the linkrev
990 lkr = self.linkrev()
990 lkr = self.linkrev()
991 if srcrev == lkr:
991 if srcrev == lkr:
992 return lkr
992 return lkr
993 # hack to reuse ancestor computation when searching for renames
993 # hack to reuse ancestor computation when searching for renames
994 memberanc = getattr(self, '_ancestrycontext', None)
994 memberanc = getattr(self, '_ancestrycontext', None)
995 iteranc = None
995 iteranc = None
996 if srcrev is None:
996 if srcrev is None:
997 # wctx case, used by workingfilectx during mergecopy
997 # wctx case, used by workingfilectx during mergecopy
998 revs = [p.rev() for p in self._repo[None].parents()]
998 revs = [p.rev() for p in self._repo[None].parents()]
999 inclusive = True # we skipped the real (revless) source
999 inclusive = True # we skipped the real (revless) source
1000 else:
1000 else:
1001 revs = [srcrev]
1001 revs = [srcrev]
1002 if memberanc is None:
1002 if memberanc is None:
1003 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1003 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1004 # check if this linkrev is an ancestor of srcrev
1004 # check if this linkrev is an ancestor of srcrev
1005 if lkr not in memberanc:
1005 if lkr not in memberanc:
1006 if iteranc is None:
1006 if iteranc is None:
1007 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1007 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1008 fnode = self._filenode
1008 fnode = self._filenode
1009 path = self._path
1009 path = self._path
1010 for a in iteranc:
1010 for a in iteranc:
1011 if stoprev is not None and a < stoprev:
1011 if stoprev is not None and a < stoprev:
1012 return None
1012 return None
1013 ac = cl.read(a) # get changeset data (we avoid object creation)
1013 ac = cl.read(a) # get changeset data (we avoid object creation)
1014 if path in ac[3]: # checking the 'files' field.
1014 if path in ac[3]: # checking the 'files' field.
1015 # The file has been touched, check if the content is
1015 # The file has been touched, check if the content is
1016 # similar to the one we search for.
1016 # similar to the one we search for.
1017 if fnode == mfl[ac[0]].readfast().get(path):
1017 if fnode == mfl[ac[0]].readfast().get(path):
1018 return a
1018 return a
1019 # In theory, we should never get out of that loop without a result.
1019 # In theory, we should never get out of that loop without a result.
1020 # But if manifest uses a buggy file revision (not children of the
1020 # But if manifest uses a buggy file revision (not children of the
1021 # one it replaces) we could. Such a buggy situation will likely
1021 # one it replaces) we could. Such a buggy situation will likely
1022 # result is crash somewhere else at to some point.
1022 # result is crash somewhere else at to some point.
1023 return lkr
1023 return lkr
1024
1024
1025 def isintroducedafter(self, changelogrev):
1025 def isintroducedafter(self, changelogrev):
1026 """True if a filectx has been introduced after a given floor revision
1026 """True if a filectx has been introduced after a given floor revision
1027 """
1027 """
1028 if self.linkrev() >= changelogrev:
1028 if self.linkrev() >= changelogrev:
1029 return True
1029 return True
1030 introrev = self._introrev(stoprev=changelogrev)
1030 introrev = self._introrev(stoprev=changelogrev)
1031 if introrev is None:
1031 if introrev is None:
1032 return False
1032 return False
1033 return introrev >= changelogrev
1033 return introrev >= changelogrev
1034
1034
1035 def introrev(self):
1035 def introrev(self):
1036 """return the rev of the changeset which introduced this file revision
1036 """return the rev of the changeset which introduced this file revision
1037
1037
1038 This method is different from linkrev because it take into account the
1038 This method is different from linkrev because it take into account the
1039 changeset the filectx was created from. It ensures the returned
1039 changeset the filectx was created from. It ensures the returned
1040 revision is one of its ancestors. This prevents bugs from
1040 revision is one of its ancestors. This prevents bugs from
1041 'linkrev-shadowing' when a file revision is used by multiple
1041 'linkrev-shadowing' when a file revision is used by multiple
1042 changesets.
1042 changesets.
1043 """
1043 """
1044 return self._introrev()
1044 return self._introrev()
1045
1045
1046 def _introrev(self, stoprev=None):
1046 def _introrev(self, stoprev=None):
1047 """
1047 """
1048 Same as `introrev` but, with an extra argument to limit changelog
1048 Same as `introrev` but, with an extra argument to limit changelog
1049 iteration range in some internal usecase.
1049 iteration range in some internal usecase.
1050
1050
1051 If `stoprev` is set, the `introrev` will not be searched past that
1051 If `stoprev` is set, the `introrev` will not be searched past that
1052 `stoprev` revision and "None" might be returned. This is useful to
1052 `stoprev` revision and "None" might be returned. This is useful to
1053 limit the iteration range.
1053 limit the iteration range.
1054 """
1054 """
1055 toprev = None
1055 toprev = None
1056 attrs = vars(self)
1056 attrs = vars(self)
1057 if '_changeid' in attrs:
1057 if '_changeid' in attrs:
1058 # We have a cached value already
1058 # We have a cached value already
1059 toprev = self._changeid
1059 toprev = self._changeid
1060 elif '_changectx' in attrs:
1060 elif '_changectx' in attrs:
1061 # We know which changelog entry we are coming from
1061 # We know which changelog entry we are coming from
1062 toprev = self._changectx.rev()
1062 toprev = self._changectx.rev()
1063
1063
1064 if toprev is not None:
1064 if toprev is not None:
1065 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1065 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1066 elif '_descendantrev' in attrs:
1066 elif '_descendantrev' in attrs:
1067 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1067 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1068 # be nice and cache the result of the computation
1068 # be nice and cache the result of the computation
1069 if introrev is not None:
1069 if introrev is not None:
1070 self._changeid = introrev
1070 self._changeid = introrev
1071 return introrev
1071 return introrev
1072 else:
1072 else:
1073 return self.linkrev()
1073 return self.linkrev()
1074
1074
1075 def introfilectx(self):
1075 def introfilectx(self):
1076 """Return filectx having identical contents, but pointing to the
1076 """Return filectx having identical contents, but pointing to the
1077 changeset revision where this filectx was introduced"""
1077 changeset revision where this filectx was introduced"""
1078 introrev = self.introrev()
1078 introrev = self.introrev()
1079 if self.rev() == introrev:
1079 if self.rev() == introrev:
1080 return self
1080 return self
1081 return self.filectx(self.filenode(), changeid=introrev)
1081 return self.filectx(self.filenode(), changeid=introrev)
1082
1082
1083 def _parentfilectx(self, path, fileid, filelog):
1083 def _parentfilectx(self, path, fileid, filelog):
1084 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1084 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1085 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1085 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1086 if '_changeid' in vars(self) or '_changectx' in vars(self):
1086 if '_changeid' in vars(self) or '_changectx' in vars(self):
1087 # If self is associated with a changeset (probably explicitly
1087 # If self is associated with a changeset (probably explicitly
1088 # fed), ensure the created filectx is associated with a
1088 # fed), ensure the created filectx is associated with a
1089 # changeset that is an ancestor of self.changectx.
1089 # changeset that is an ancestor of self.changectx.
1090 # This lets us later use _adjustlinkrev to get a correct link.
1090 # This lets us later use _adjustlinkrev to get a correct link.
1091 fctx._descendantrev = self.rev()
1091 fctx._descendantrev = self.rev()
1092 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1092 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1093 elif '_descendantrev' in vars(self):
1093 elif '_descendantrev' in vars(self):
1094 # Otherwise propagate _descendantrev if we have one associated.
1094 # Otherwise propagate _descendantrev if we have one associated.
1095 fctx._descendantrev = self._descendantrev
1095 fctx._descendantrev = self._descendantrev
1096 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1096 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1097 return fctx
1097 return fctx
1098
1098
1099 def parents(self):
1099 def parents(self):
1100 _path = self._path
1100 _path = self._path
1101 fl = self._filelog
1101 fl = self._filelog
1102 parents = self._filelog.parents(self._filenode)
1102 parents = self._filelog.parents(self._filenode)
1103 pl = [(_path, node, fl) for node in parents if node != nullid]
1103 pl = [(_path, node, fl) for node in parents if node != nullid]
1104
1104
1105 r = fl.renamed(self._filenode)
1105 r = fl.renamed(self._filenode)
1106 if r:
1106 if r:
1107 # - In the simple rename case, both parent are nullid, pl is empty.
1107 # - In the simple rename case, both parent are nullid, pl is empty.
1108 # - In case of merge, only one of the parent is null id and should
1108 # - In case of merge, only one of the parent is null id and should
1109 # be replaced with the rename information. This parent is -always-
1109 # be replaced with the rename information. This parent is -always-
1110 # the first one.
1110 # the first one.
1111 #
1111 #
1112 # As null id have always been filtered out in the previous list
1112 # As null id have always been filtered out in the previous list
1113 # comprehension, inserting to 0 will always result in "replacing
1113 # comprehension, inserting to 0 will always result in "replacing
1114 # first nullid parent with rename information.
1114 # first nullid parent with rename information.
1115 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1115 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1116
1116
1117 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1117 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1118
1118
1119 def p1(self):
1119 def p1(self):
1120 return self.parents()[0]
1120 return self.parents()[0]
1121
1121
1122 def p2(self):
1122 def p2(self):
1123 p = self.parents()
1123 p = self.parents()
1124 if len(p) == 2:
1124 if len(p) == 2:
1125 return p[1]
1125 return p[1]
1126 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1126 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1127
1127
1128 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1128 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1129 """Returns a list of annotateline objects for each line in the file
1129 """Returns a list of annotateline objects for each line in the file
1130
1130
1131 - line.fctx is the filectx of the node where that line was last changed
1131 - line.fctx is the filectx of the node where that line was last changed
1132 - line.lineno is the line number at the first appearance in the managed
1132 - line.lineno is the line number at the first appearance in the managed
1133 file
1133 file
1134 - line.text is the data on that line (including newline character)
1134 - line.text is the data on that line (including newline character)
1135 """
1135 """
1136 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1136 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1137
1137
1138 def parents(f):
1138 def parents(f):
1139 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1139 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1140 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1140 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1141 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1141 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1142 # isn't an ancestor of the srcrev.
1142 # isn't an ancestor of the srcrev.
1143 f._changeid
1143 f._changeid
1144 pl = f.parents()
1144 pl = f.parents()
1145
1145
1146 # Don't return renamed parents if we aren't following.
1146 # Don't return renamed parents if we aren't following.
1147 if not follow:
1147 if not follow:
1148 pl = [p for p in pl if p.path() == f.path()]
1148 pl = [p for p in pl if p.path() == f.path()]
1149
1149
1150 # renamed filectx won't have a filelog yet, so set it
1150 # renamed filectx won't have a filelog yet, so set it
1151 # from the cache to save time
1151 # from the cache to save time
1152 for p in pl:
1152 for p in pl:
1153 if not '_filelog' in p.__dict__:
1153 if not '_filelog' in p.__dict__:
1154 p._filelog = getlog(p.path())
1154 p._filelog = getlog(p.path())
1155
1155
1156 return pl
1156 return pl
1157
1157
1158 # use linkrev to find the first changeset where self appeared
1158 # use linkrev to find the first changeset where self appeared
1159 base = self.introfilectx()
1159 base = self.introfilectx()
1160 if getattr(base, '_ancestrycontext', None) is None:
1160 if getattr(base, '_ancestrycontext', None) is None:
1161 cl = self._repo.changelog
1161 cl = self._repo.changelog
1162 if base.rev() is None:
1162 if base.rev() is None:
1163 # wctx is not inclusive, but works because _ancestrycontext
1163 # wctx is not inclusive, but works because _ancestrycontext
1164 # is used to test filelog revisions
1164 # is used to test filelog revisions
1165 ac = cl.ancestors(
1165 ac = cl.ancestors(
1166 [p.rev() for p in base.parents()], inclusive=True
1166 [p.rev() for p in base.parents()], inclusive=True
1167 )
1167 )
1168 else:
1168 else:
1169 ac = cl.ancestors([base.rev()], inclusive=True)
1169 ac = cl.ancestors([base.rev()], inclusive=True)
1170 base._ancestrycontext = ac
1170 base._ancestrycontext = ac
1171
1171
1172 return dagop.annotate(
1172 return dagop.annotate(
1173 base, parents, skiprevs=skiprevs, diffopts=diffopts
1173 base, parents, skiprevs=skiprevs, diffopts=diffopts
1174 )
1174 )
1175
1175
1176 def ancestors(self, followfirst=False):
1176 def ancestors(self, followfirst=False):
1177 visit = {}
1177 visit = {}
1178 c = self
1178 c = self
1179 if followfirst:
1179 if followfirst:
1180 cut = 1
1180 cut = 1
1181 else:
1181 else:
1182 cut = None
1182 cut = None
1183
1183
1184 while True:
1184 while True:
1185 for parent in c.parents()[:cut]:
1185 for parent in c.parents()[:cut]:
1186 visit[(parent.linkrev(), parent.filenode())] = parent
1186 visit[(parent.linkrev(), parent.filenode())] = parent
1187 if not visit:
1187 if not visit:
1188 break
1188 break
1189 c = visit.pop(max(visit))
1189 c = visit.pop(max(visit))
1190 yield c
1190 yield c
1191
1191
1192 def decodeddata(self):
1192 def decodeddata(self):
1193 """Returns `data()` after running repository decoding filters.
1193 """Returns `data()` after running repository decoding filters.
1194
1194
1195 This is often equivalent to how the data would be expressed on disk.
1195 This is often equivalent to how the data would be expressed on disk.
1196 """
1196 """
1197 return self._repo.wwritedata(self.path(), self.data())
1197 return self._repo.wwritedata(self.path(), self.data())
1198
1198
1199
1199
1200 class filectx(basefilectx):
1200 class filectx(basefilectx):
1201 """A filecontext object makes access to data related to a particular
1201 """A filecontext object makes access to data related to a particular
1202 filerevision convenient."""
1202 filerevision convenient."""
1203
1203
1204 def __init__(
1204 def __init__(
1205 self,
1205 self,
1206 repo,
1206 repo,
1207 path,
1207 path,
1208 changeid=None,
1208 changeid=None,
1209 fileid=None,
1209 fileid=None,
1210 filelog=None,
1210 filelog=None,
1211 changectx=None,
1211 changectx=None,
1212 ):
1212 ):
1213 """changeid must be a revision number, if specified.
1213 """changeid must be a revision number, if specified.
1214 fileid can be a file revision or node."""
1214 fileid can be a file revision or node."""
1215 self._repo = repo
1215 self._repo = repo
1216 self._path = path
1216 self._path = path
1217
1217
1218 assert (
1218 assert (
1219 changeid is not None or fileid is not None or changectx is not None
1219 changeid is not None or fileid is not None or changectx is not None
1220 ), (
1220 ), (
1221 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1221 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1222 % (changeid, fileid, changectx,)
1222 % (changeid, fileid, changectx,)
1223 )
1223 )
1224
1224
1225 if filelog is not None:
1225 if filelog is not None:
1226 self._filelog = filelog
1226 self._filelog = filelog
1227
1227
1228 if changeid is not None:
1228 if changeid is not None:
1229 self._changeid = changeid
1229 self._changeid = changeid
1230 if changectx is not None:
1230 if changectx is not None:
1231 self._changectx = changectx
1231 self._changectx = changectx
1232 if fileid is not None:
1232 if fileid is not None:
1233 self._fileid = fileid
1233 self._fileid = fileid
1234
1234
1235 @propertycache
1235 @propertycache
1236 def _changectx(self):
1236 def _changectx(self):
1237 try:
1237 try:
1238 return self._repo[self._changeid]
1238 return self._repo[self._changeid]
1239 except error.FilteredRepoLookupError:
1239 except error.FilteredRepoLookupError:
1240 # Linkrev may point to any revision in the repository. When the
1240 # Linkrev may point to any revision in the repository. When the
1241 # repository is filtered this may lead to `filectx` trying to build
1241 # repository is filtered this may lead to `filectx` trying to build
1242 # `changectx` for filtered revision. In such case we fallback to
1242 # `changectx` for filtered revision. In such case we fallback to
1243 # creating `changectx` on the unfiltered version of the reposition.
1243 # creating `changectx` on the unfiltered version of the reposition.
1244 # This fallback should not be an issue because `changectx` from
1244 # This fallback should not be an issue because `changectx` from
1245 # `filectx` are not used in complex operations that care about
1245 # `filectx` are not used in complex operations that care about
1246 # filtering.
1246 # filtering.
1247 #
1247 #
1248 # This fallback is a cheap and dirty fix that prevent several
1248 # This fallback is a cheap and dirty fix that prevent several
1249 # crashes. It does not ensure the behavior is correct. However the
1249 # crashes. It does not ensure the behavior is correct. However the
1250 # behavior was not correct before filtering either and "incorrect
1250 # behavior was not correct before filtering either and "incorrect
1251 # behavior" is seen as better as "crash"
1251 # behavior" is seen as better as "crash"
1252 #
1252 #
1253 # Linkrevs have several serious troubles with filtering that are
1253 # Linkrevs have several serious troubles with filtering that are
1254 # complicated to solve. Proper handling of the issue here should be
1254 # complicated to solve. Proper handling of the issue here should be
1255 # considered when solving linkrev issue are on the table.
1255 # considered when solving linkrev issue are on the table.
1256 return self._repo.unfiltered()[self._changeid]
1256 return self._repo.unfiltered()[self._changeid]
1257
1257
1258 def filectx(self, fileid, changeid=None):
1258 def filectx(self, fileid, changeid=None):
1259 '''opens an arbitrary revision of the file without
1259 '''opens an arbitrary revision of the file without
1260 opening a new filelog'''
1260 opening a new filelog'''
1261 return filectx(
1261 return filectx(
1262 self._repo,
1262 self._repo,
1263 self._path,
1263 self._path,
1264 fileid=fileid,
1264 fileid=fileid,
1265 filelog=self._filelog,
1265 filelog=self._filelog,
1266 changeid=changeid,
1266 changeid=changeid,
1267 )
1267 )
1268
1268
1269 def rawdata(self):
1269 def rawdata(self):
1270 return self._filelog.rawdata(self._filenode)
1270 return self._filelog.rawdata(self._filenode)
1271
1271
1272 def rawflags(self):
1272 def rawflags(self):
1273 """low-level revlog flags"""
1273 """low-level revlog flags"""
1274 return self._filelog.flags(self._filerev)
1274 return self._filelog.flags(self._filerev)
1275
1275
1276 def data(self):
1276 def data(self):
1277 try:
1277 try:
1278 return self._filelog.read(self._filenode)
1278 return self._filelog.read(self._filenode)
1279 except error.CensoredNodeError:
1279 except error.CensoredNodeError:
1280 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1280 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1281 return b""
1281 return b""
1282 raise error.Abort(
1282 raise error.Abort(
1283 _(b"censored node: %s") % short(self._filenode),
1283 _(b"censored node: %s") % short(self._filenode),
1284 hint=_(b"set censor.policy to ignore errors"),
1284 hint=_(b"set censor.policy to ignore errors"),
1285 )
1285 )
1286
1286
1287 def size(self):
1287 def size(self):
1288 return self._filelog.size(self._filerev)
1288 return self._filelog.size(self._filerev)
1289
1289
1290 @propertycache
1290 @propertycache
1291 def _copied(self):
1291 def _copied(self):
1292 """check if file was actually renamed in this changeset revision
1292 """check if file was actually renamed in this changeset revision
1293
1293
1294 If rename logged in file revision, we report copy for changeset only
1294 If rename logged in file revision, we report copy for changeset only
1295 if file revisions linkrev points back to the changeset in question
1295 if file revisions linkrev points back to the changeset in question
1296 or both changeset parents contain different file revisions.
1296 or both changeset parents contain different file revisions.
1297 """
1297 """
1298
1298
1299 renamed = self._filelog.renamed(self._filenode)
1299 renamed = self._filelog.renamed(self._filenode)
1300 if not renamed:
1300 if not renamed:
1301 return None
1301 return None
1302
1302
1303 if self.rev() == self.linkrev():
1303 if self.rev() == self.linkrev():
1304 return renamed
1304 return renamed
1305
1305
1306 name = self.path()
1306 name = self.path()
1307 fnode = self._filenode
1307 fnode = self._filenode
1308 for p in self._changectx.parents():
1308 for p in self._changectx.parents():
1309 try:
1309 try:
1310 if fnode == p.filenode(name):
1310 if fnode == p.filenode(name):
1311 return None
1311 return None
1312 except error.LookupError:
1312 except error.LookupError:
1313 pass
1313 pass
1314 return renamed
1314 return renamed
1315
1315
1316 def children(self):
1316 def children(self):
1317 # hard for renames
1317 # hard for renames
1318 c = self._filelog.children(self._filenode)
1318 c = self._filelog.children(self._filenode)
1319 return [
1319 return [
1320 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1320 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1321 for x in c
1321 for x in c
1322 ]
1322 ]
1323
1323
1324
1324
1325 class committablectx(basectx):
1325 class committablectx(basectx):
1326 """A committablectx object provides common functionality for a context that
1326 """A committablectx object provides common functionality for a context that
1327 wants the ability to commit, e.g. workingctx or memctx."""
1327 wants the ability to commit, e.g. workingctx or memctx."""
1328
1328
1329 def __init__(
1329 def __init__(
1330 self,
1330 self,
1331 repo,
1331 repo,
1332 text=b"",
1332 text=b"",
1333 user=None,
1333 user=None,
1334 date=None,
1334 date=None,
1335 extra=None,
1335 extra=None,
1336 changes=None,
1336 changes=None,
1337 branch=None,
1337 branch=None,
1338 ):
1338 ):
1339 super(committablectx, self).__init__(repo)
1339 super(committablectx, self).__init__(repo)
1340 self._rev = None
1340 self._rev = None
1341 self._node = None
1341 self._node = None
1342 self._text = text
1342 self._text = text
1343 if date:
1343 if date:
1344 self._date = dateutil.parsedate(date)
1344 self._date = dateutil.parsedate(date)
1345 if user:
1345 if user:
1346 self._user = user
1346 self._user = user
1347 if changes:
1347 if changes:
1348 self._status = changes
1348 self._status = changes
1349
1349
1350 self._extra = {}
1350 self._extra = {}
1351 if extra:
1351 if extra:
1352 self._extra = extra.copy()
1352 self._extra = extra.copy()
1353 if branch is not None:
1353 if branch is not None:
1354 self._extra[b'branch'] = encoding.fromlocal(branch)
1354 self._extra[b'branch'] = encoding.fromlocal(branch)
1355 if not self._extra.get(b'branch'):
1355 if not self._extra.get(b'branch'):
1356 self._extra[b'branch'] = b'default'
1356 self._extra[b'branch'] = b'default'
1357
1357
1358 def __bytes__(self):
1358 def __bytes__(self):
1359 return bytes(self._parents[0]) + b"+"
1359 return bytes(self._parents[0]) + b"+"
1360
1360
1361 __str__ = encoding.strmethod(__bytes__)
1361 __str__ = encoding.strmethod(__bytes__)
1362
1362
1363 def __nonzero__(self):
1363 def __nonzero__(self):
1364 return True
1364 return True
1365
1365
1366 __bool__ = __nonzero__
1366 __bool__ = __nonzero__
1367
1367
1368 @propertycache
1368 @propertycache
1369 def _status(self):
1369 def _status(self):
1370 return self._repo.status()
1370 return self._repo.status()
1371
1371
1372 @propertycache
1372 @propertycache
1373 def _user(self):
1373 def _user(self):
1374 return self._repo.ui.username()
1374 return self._repo.ui.username()
1375
1375
1376 @propertycache
1376 @propertycache
1377 def _date(self):
1377 def _date(self):
1378 ui = self._repo.ui
1378 ui = self._repo.ui
1379 date = ui.configdate(b'devel', b'default-date')
1379 date = ui.configdate(b'devel', b'default-date')
1380 if date is None:
1380 if date is None:
1381 date = dateutil.makedate()
1381 date = dateutil.makedate()
1382 return date
1382 return date
1383
1383
1384 def subrev(self, subpath):
1384 def subrev(self, subpath):
1385 return None
1385 return None
1386
1386
1387 def manifestnode(self):
1387 def manifestnode(self):
1388 return None
1388 return None
1389
1389
1390 def user(self):
1390 def user(self):
1391 return self._user or self._repo.ui.username()
1391 return self._user or self._repo.ui.username()
1392
1392
1393 def date(self):
1393 def date(self):
1394 return self._date
1394 return self._date
1395
1395
1396 def description(self):
1396 def description(self):
1397 return self._text
1397 return self._text
1398
1398
1399 def files(self):
1399 def files(self):
1400 return sorted(
1400 return sorted(
1401 self._status.modified + self._status.added + self._status.removed
1401 self._status.modified + self._status.added + self._status.removed
1402 )
1402 )
1403
1403
1404 def modified(self):
1404 def modified(self):
1405 return self._status.modified
1405 return self._status.modified
1406
1406
1407 def added(self):
1407 def added(self):
1408 return self._status.added
1408 return self._status.added
1409
1409
1410 def removed(self):
1410 def removed(self):
1411 return self._status.removed
1411 return self._status.removed
1412
1412
1413 def deleted(self):
1413 def deleted(self):
1414 return self._status.deleted
1414 return self._status.deleted
1415
1415
1416 filesmodified = modified
1416 filesmodified = modified
1417 filesadded = added
1417 filesadded = added
1418 filesremoved = removed
1418 filesremoved = removed
1419
1419
1420 def branch(self):
1420 def branch(self):
1421 return encoding.tolocal(self._extra[b'branch'])
1421 return encoding.tolocal(self._extra[b'branch'])
1422
1422
1423 def closesbranch(self):
1423 def closesbranch(self):
1424 return b'close' in self._extra
1424 return b'close' in self._extra
1425
1425
1426 def extra(self):
1426 def extra(self):
1427 return self._extra
1427 return self._extra
1428
1428
1429 def isinmemory(self):
1429 def isinmemory(self):
1430 return False
1430 return False
1431
1431
1432 def tags(self):
1432 def tags(self):
1433 return []
1433 return []
1434
1434
1435 def bookmarks(self):
1435 def bookmarks(self):
1436 b = []
1436 b = []
1437 for p in self.parents():
1437 for p in self.parents():
1438 b.extend(p.bookmarks())
1438 b.extend(p.bookmarks())
1439 return b
1439 return b
1440
1440
1441 def phase(self):
1441 def phase(self):
1442 phase = phases.draft # default phase to draft
1442 phase = phases.newcommitphase(self._repo.ui)
1443 for p in self.parents():
1443 for p in self.parents():
1444 phase = max(phase, p.phase())
1444 phase = max(phase, p.phase())
1445 return phase
1445 return phase
1446
1446
1447 def hidden(self):
1447 def hidden(self):
1448 return False
1448 return False
1449
1449
1450 def children(self):
1450 def children(self):
1451 return []
1451 return []
1452
1452
1453 def ancestor(self, c2):
1453 def ancestor(self, c2):
1454 """return the "best" ancestor context of self and c2"""
1454 """return the "best" ancestor context of self and c2"""
1455 return self._parents[0].ancestor(c2) # punt on two parents for now
1455 return self._parents[0].ancestor(c2) # punt on two parents for now
1456
1456
1457 def ancestors(self):
1457 def ancestors(self):
1458 for p in self._parents:
1458 for p in self._parents:
1459 yield p
1459 yield p
1460 for a in self._repo.changelog.ancestors(
1460 for a in self._repo.changelog.ancestors(
1461 [p.rev() for p in self._parents]
1461 [p.rev() for p in self._parents]
1462 ):
1462 ):
1463 yield self._repo[a]
1463 yield self._repo[a]
1464
1464
1465 def markcommitted(self, node):
1465 def markcommitted(self, node):
1466 """Perform post-commit cleanup necessary after committing this ctx
1466 """Perform post-commit cleanup necessary after committing this ctx
1467
1467
1468 Specifically, this updates backing stores this working context
1468 Specifically, this updates backing stores this working context
1469 wraps to reflect the fact that the changes reflected by this
1469 wraps to reflect the fact that the changes reflected by this
1470 workingctx have been committed. For example, it marks
1470 workingctx have been committed. For example, it marks
1471 modified and added files as normal in the dirstate.
1471 modified and added files as normal in the dirstate.
1472
1472
1473 """
1473 """
1474
1474
1475 def dirty(self, missing=False, merge=True, branch=True):
1475 def dirty(self, missing=False, merge=True, branch=True):
1476 return False
1476 return False
1477
1477
1478
1478
1479 class workingctx(committablectx):
1479 class workingctx(committablectx):
1480 """A workingctx object makes access to data related to
1480 """A workingctx object makes access to data related to
1481 the current working directory convenient.
1481 the current working directory convenient.
1482 date - any valid date string or (unixtime, offset), or None.
1482 date - any valid date string or (unixtime, offset), or None.
1483 user - username string, or None.
1483 user - username string, or None.
1484 extra - a dictionary of extra values, or None.
1484 extra - a dictionary of extra values, or None.
1485 changes - a list of file lists as returned by localrepo.status()
1485 changes - a list of file lists as returned by localrepo.status()
1486 or None to use the repository status.
1486 or None to use the repository status.
1487 """
1487 """
1488
1488
1489 def __init__(
1489 def __init__(
1490 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1490 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1491 ):
1491 ):
1492 branch = None
1492 branch = None
1493 if not extra or b'branch' not in extra:
1493 if not extra or b'branch' not in extra:
1494 try:
1494 try:
1495 branch = repo.dirstate.branch()
1495 branch = repo.dirstate.branch()
1496 except UnicodeDecodeError:
1496 except UnicodeDecodeError:
1497 raise error.Abort(_(b'branch name not in UTF-8!'))
1497 raise error.Abort(_(b'branch name not in UTF-8!'))
1498 super(workingctx, self).__init__(
1498 super(workingctx, self).__init__(
1499 repo, text, user, date, extra, changes, branch=branch
1499 repo, text, user, date, extra, changes, branch=branch
1500 )
1500 )
1501
1501
1502 def __iter__(self):
1502 def __iter__(self):
1503 d = self._repo.dirstate
1503 d = self._repo.dirstate
1504 for f in d:
1504 for f in d:
1505 if d[f] != b'r':
1505 if d[f] != b'r':
1506 yield f
1506 yield f
1507
1507
1508 def __contains__(self, key):
1508 def __contains__(self, key):
1509 return self._repo.dirstate[key] not in b"?r"
1509 return self._repo.dirstate[key] not in b"?r"
1510
1510
1511 def hex(self):
1511 def hex(self):
1512 return wdirhex
1512 return wdirhex
1513
1513
1514 @propertycache
1514 @propertycache
1515 def _parents(self):
1515 def _parents(self):
1516 p = self._repo.dirstate.parents()
1516 p = self._repo.dirstate.parents()
1517 if p[1] == nullid:
1517 if p[1] == nullid:
1518 p = p[:-1]
1518 p = p[:-1]
1519 # use unfiltered repo to delay/avoid loading obsmarkers
1519 # use unfiltered repo to delay/avoid loading obsmarkers
1520 unfi = self._repo.unfiltered()
1520 unfi = self._repo.unfiltered()
1521 return [
1521 return [
1522 changectx(
1522 changectx(
1523 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1523 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1524 )
1524 )
1525 for n in p
1525 for n in p
1526 ]
1526 ]
1527
1527
1528 def _fileinfo(self, path):
1528 def _fileinfo(self, path):
1529 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1529 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1530 self._manifest
1530 self._manifest
1531 return super(workingctx, self)._fileinfo(path)
1531 return super(workingctx, self)._fileinfo(path)
1532
1532
1533 def _buildflagfunc(self):
1533 def _buildflagfunc(self):
1534 # Create a fallback function for getting file flags when the
1534 # Create a fallback function for getting file flags when the
1535 # filesystem doesn't support them
1535 # filesystem doesn't support them
1536
1536
1537 copiesget = self._repo.dirstate.copies().get
1537 copiesget = self._repo.dirstate.copies().get
1538 parents = self.parents()
1538 parents = self.parents()
1539 if len(parents) < 2:
1539 if len(parents) < 2:
1540 # when we have one parent, it's easy: copy from parent
1540 # when we have one parent, it's easy: copy from parent
1541 man = parents[0].manifest()
1541 man = parents[0].manifest()
1542
1542
1543 def func(f):
1543 def func(f):
1544 f = copiesget(f, f)
1544 f = copiesget(f, f)
1545 return man.flags(f)
1545 return man.flags(f)
1546
1546
1547 else:
1547 else:
1548 # merges are tricky: we try to reconstruct the unstored
1548 # merges are tricky: we try to reconstruct the unstored
1549 # result from the merge (issue1802)
1549 # result from the merge (issue1802)
1550 p1, p2 = parents
1550 p1, p2 = parents
1551 pa = p1.ancestor(p2)
1551 pa = p1.ancestor(p2)
1552 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1552 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1553
1553
1554 def func(f):
1554 def func(f):
1555 f = copiesget(f, f) # may be wrong for merges with copies
1555 f = copiesget(f, f) # may be wrong for merges with copies
1556 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1556 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1557 if fl1 == fl2:
1557 if fl1 == fl2:
1558 return fl1
1558 return fl1
1559 if fl1 == fla:
1559 if fl1 == fla:
1560 return fl2
1560 return fl2
1561 if fl2 == fla:
1561 if fl2 == fla:
1562 return fl1
1562 return fl1
1563 return b'' # punt for conflicts
1563 return b'' # punt for conflicts
1564
1564
1565 return func
1565 return func
1566
1566
1567 @propertycache
1567 @propertycache
1568 def _flagfunc(self):
1568 def _flagfunc(self):
1569 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1569 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1570
1570
1571 def flags(self, path):
1571 def flags(self, path):
1572 if '_manifest' in self.__dict__:
1572 if '_manifest' in self.__dict__:
1573 try:
1573 try:
1574 return self._manifest.flags(path)
1574 return self._manifest.flags(path)
1575 except KeyError:
1575 except KeyError:
1576 return b''
1576 return b''
1577
1577
1578 try:
1578 try:
1579 return self._flagfunc(path)
1579 return self._flagfunc(path)
1580 except OSError:
1580 except OSError:
1581 return b''
1581 return b''
1582
1582
1583 def filectx(self, path, filelog=None):
1583 def filectx(self, path, filelog=None):
1584 """get a file context from the working directory"""
1584 """get a file context from the working directory"""
1585 return workingfilectx(
1585 return workingfilectx(
1586 self._repo, path, workingctx=self, filelog=filelog
1586 self._repo, path, workingctx=self, filelog=filelog
1587 )
1587 )
1588
1588
1589 def dirty(self, missing=False, merge=True, branch=True):
1589 def dirty(self, missing=False, merge=True, branch=True):
1590 """check whether a working directory is modified"""
1590 """check whether a working directory is modified"""
1591 # check subrepos first
1591 # check subrepos first
1592 for s in sorted(self.substate):
1592 for s in sorted(self.substate):
1593 if self.sub(s).dirty(missing=missing):
1593 if self.sub(s).dirty(missing=missing):
1594 return True
1594 return True
1595 # check current working dir
1595 # check current working dir
1596 return (
1596 return (
1597 (merge and self.p2())
1597 (merge and self.p2())
1598 or (branch and self.branch() != self.p1().branch())
1598 or (branch and self.branch() != self.p1().branch())
1599 or self.modified()
1599 or self.modified()
1600 or self.added()
1600 or self.added()
1601 or self.removed()
1601 or self.removed()
1602 or (missing and self.deleted())
1602 or (missing and self.deleted())
1603 )
1603 )
1604
1604
1605 def add(self, list, prefix=b""):
1605 def add(self, list, prefix=b""):
1606 with self._repo.wlock():
1606 with self._repo.wlock():
1607 ui, ds = self._repo.ui, self._repo.dirstate
1607 ui, ds = self._repo.ui, self._repo.dirstate
1608 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1608 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1609 rejected = []
1609 rejected = []
1610 lstat = self._repo.wvfs.lstat
1610 lstat = self._repo.wvfs.lstat
1611 for f in list:
1611 for f in list:
1612 # ds.pathto() returns an absolute file when this is invoked from
1612 # ds.pathto() returns an absolute file when this is invoked from
1613 # the keyword extension. That gets flagged as non-portable on
1613 # the keyword extension. That gets flagged as non-portable on
1614 # Windows, since it contains the drive letter and colon.
1614 # Windows, since it contains the drive letter and colon.
1615 scmutil.checkportable(ui, os.path.join(prefix, f))
1615 scmutil.checkportable(ui, os.path.join(prefix, f))
1616 try:
1616 try:
1617 st = lstat(f)
1617 st = lstat(f)
1618 except OSError:
1618 except OSError:
1619 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1619 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1620 rejected.append(f)
1620 rejected.append(f)
1621 continue
1621 continue
1622 limit = ui.configbytes(b'ui', b'large-file-limit')
1622 limit = ui.configbytes(b'ui', b'large-file-limit')
1623 if limit != 0 and st.st_size > limit:
1623 if limit != 0 and st.st_size > limit:
1624 ui.warn(
1624 ui.warn(
1625 _(
1625 _(
1626 b"%s: up to %d MB of RAM may be required "
1626 b"%s: up to %d MB of RAM may be required "
1627 b"to manage this file\n"
1627 b"to manage this file\n"
1628 b"(use 'hg revert %s' to cancel the "
1628 b"(use 'hg revert %s' to cancel the "
1629 b"pending addition)\n"
1629 b"pending addition)\n"
1630 )
1630 )
1631 % (f, 3 * st.st_size // 1000000, uipath(f))
1631 % (f, 3 * st.st_size // 1000000, uipath(f))
1632 )
1632 )
1633 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1633 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1634 ui.warn(
1634 ui.warn(
1635 _(
1635 _(
1636 b"%s not added: only files and symlinks "
1636 b"%s not added: only files and symlinks "
1637 b"supported currently\n"
1637 b"supported currently\n"
1638 )
1638 )
1639 % uipath(f)
1639 % uipath(f)
1640 )
1640 )
1641 rejected.append(f)
1641 rejected.append(f)
1642 elif ds[f] in b'amn':
1642 elif ds[f] in b'amn':
1643 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1643 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1644 elif ds[f] == b'r':
1644 elif ds[f] == b'r':
1645 ds.normallookup(f)
1645 ds.normallookup(f)
1646 else:
1646 else:
1647 ds.add(f)
1647 ds.add(f)
1648 return rejected
1648 return rejected
1649
1649
1650 def forget(self, files, prefix=b""):
1650 def forget(self, files, prefix=b""):
1651 with self._repo.wlock():
1651 with self._repo.wlock():
1652 ds = self._repo.dirstate
1652 ds = self._repo.dirstate
1653 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1653 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1654 rejected = []
1654 rejected = []
1655 for f in files:
1655 for f in files:
1656 if f not in ds:
1656 if f not in ds:
1657 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1657 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1658 rejected.append(f)
1658 rejected.append(f)
1659 elif ds[f] != b'a':
1659 elif ds[f] != b'a':
1660 ds.remove(f)
1660 ds.remove(f)
1661 else:
1661 else:
1662 ds.drop(f)
1662 ds.drop(f)
1663 return rejected
1663 return rejected
1664
1664
1665 def copy(self, source, dest):
1665 def copy(self, source, dest):
1666 try:
1666 try:
1667 st = self._repo.wvfs.lstat(dest)
1667 st = self._repo.wvfs.lstat(dest)
1668 except OSError as err:
1668 except OSError as err:
1669 if err.errno != errno.ENOENT:
1669 if err.errno != errno.ENOENT:
1670 raise
1670 raise
1671 self._repo.ui.warn(
1671 self._repo.ui.warn(
1672 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1672 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1673 )
1673 )
1674 return
1674 return
1675 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1675 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1676 self._repo.ui.warn(
1676 self._repo.ui.warn(
1677 _(b"copy failed: %s is not a file or a symbolic link\n")
1677 _(b"copy failed: %s is not a file or a symbolic link\n")
1678 % self._repo.dirstate.pathto(dest)
1678 % self._repo.dirstate.pathto(dest)
1679 )
1679 )
1680 else:
1680 else:
1681 with self._repo.wlock():
1681 with self._repo.wlock():
1682 ds = self._repo.dirstate
1682 ds = self._repo.dirstate
1683 if ds[dest] in b'?':
1683 if ds[dest] in b'?':
1684 ds.add(dest)
1684 ds.add(dest)
1685 elif ds[dest] in b'r':
1685 elif ds[dest] in b'r':
1686 ds.normallookup(dest)
1686 ds.normallookup(dest)
1687 ds.copy(source, dest)
1687 ds.copy(source, dest)
1688
1688
1689 def match(
1689 def match(
1690 self,
1690 self,
1691 pats=None,
1691 pats=None,
1692 include=None,
1692 include=None,
1693 exclude=None,
1693 exclude=None,
1694 default=b'glob',
1694 default=b'glob',
1695 listsubrepos=False,
1695 listsubrepos=False,
1696 badfn=None,
1696 badfn=None,
1697 ):
1697 ):
1698 r = self._repo
1698 r = self._repo
1699
1699
1700 # Only a case insensitive filesystem needs magic to translate user input
1700 # Only a case insensitive filesystem needs magic to translate user input
1701 # to actual case in the filesystem.
1701 # to actual case in the filesystem.
1702 icasefs = not util.fscasesensitive(r.root)
1702 icasefs = not util.fscasesensitive(r.root)
1703 return matchmod.match(
1703 return matchmod.match(
1704 r.root,
1704 r.root,
1705 r.getcwd(),
1705 r.getcwd(),
1706 pats,
1706 pats,
1707 include,
1707 include,
1708 exclude,
1708 exclude,
1709 default,
1709 default,
1710 auditor=r.auditor,
1710 auditor=r.auditor,
1711 ctx=self,
1711 ctx=self,
1712 listsubrepos=listsubrepos,
1712 listsubrepos=listsubrepos,
1713 badfn=badfn,
1713 badfn=badfn,
1714 icasefs=icasefs,
1714 icasefs=icasefs,
1715 )
1715 )
1716
1716
1717 def _filtersuspectsymlink(self, files):
1717 def _filtersuspectsymlink(self, files):
1718 if not files or self._repo.dirstate._checklink:
1718 if not files or self._repo.dirstate._checklink:
1719 return files
1719 return files
1720
1720
1721 # Symlink placeholders may get non-symlink-like contents
1721 # Symlink placeholders may get non-symlink-like contents
1722 # via user error or dereferencing by NFS or Samba servers,
1722 # via user error or dereferencing by NFS or Samba servers,
1723 # so we filter out any placeholders that don't look like a
1723 # so we filter out any placeholders that don't look like a
1724 # symlink
1724 # symlink
1725 sane = []
1725 sane = []
1726 for f in files:
1726 for f in files:
1727 if self.flags(f) == b'l':
1727 if self.flags(f) == b'l':
1728 d = self[f].data()
1728 d = self[f].data()
1729 if (
1729 if (
1730 d == b''
1730 d == b''
1731 or len(d) >= 1024
1731 or len(d) >= 1024
1732 or b'\n' in d
1732 or b'\n' in d
1733 or stringutil.binary(d)
1733 or stringutil.binary(d)
1734 ):
1734 ):
1735 self._repo.ui.debug(
1735 self._repo.ui.debug(
1736 b'ignoring suspect symlink placeholder "%s"\n' % f
1736 b'ignoring suspect symlink placeholder "%s"\n' % f
1737 )
1737 )
1738 continue
1738 continue
1739 sane.append(f)
1739 sane.append(f)
1740 return sane
1740 return sane
1741
1741
1742 def _checklookup(self, files):
1742 def _checklookup(self, files):
1743 # check for any possibly clean files
1743 # check for any possibly clean files
1744 if not files:
1744 if not files:
1745 return [], [], []
1745 return [], [], []
1746
1746
1747 modified = []
1747 modified = []
1748 deleted = []
1748 deleted = []
1749 fixup = []
1749 fixup = []
1750 pctx = self._parents[0]
1750 pctx = self._parents[0]
1751 # do a full compare of any files that might have changed
1751 # do a full compare of any files that might have changed
1752 for f in sorted(files):
1752 for f in sorted(files):
1753 try:
1753 try:
1754 # This will return True for a file that got replaced by a
1754 # This will return True for a file that got replaced by a
1755 # directory in the interim, but fixing that is pretty hard.
1755 # directory in the interim, but fixing that is pretty hard.
1756 if (
1756 if (
1757 f not in pctx
1757 f not in pctx
1758 or self.flags(f) != pctx.flags(f)
1758 or self.flags(f) != pctx.flags(f)
1759 or pctx[f].cmp(self[f])
1759 or pctx[f].cmp(self[f])
1760 ):
1760 ):
1761 modified.append(f)
1761 modified.append(f)
1762 else:
1762 else:
1763 fixup.append(f)
1763 fixup.append(f)
1764 except (IOError, OSError):
1764 except (IOError, OSError):
1765 # A file become inaccessible in between? Mark it as deleted,
1765 # A file become inaccessible in between? Mark it as deleted,
1766 # matching dirstate behavior (issue5584).
1766 # matching dirstate behavior (issue5584).
1767 # The dirstate has more complex behavior around whether a
1767 # The dirstate has more complex behavior around whether a
1768 # missing file matches a directory, etc, but we don't need to
1768 # missing file matches a directory, etc, but we don't need to
1769 # bother with that: if f has made it to this point, we're sure
1769 # bother with that: if f has made it to this point, we're sure
1770 # it's in the dirstate.
1770 # it's in the dirstate.
1771 deleted.append(f)
1771 deleted.append(f)
1772
1772
1773 return modified, deleted, fixup
1773 return modified, deleted, fixup
1774
1774
1775 def _poststatusfixup(self, status, fixup):
1775 def _poststatusfixup(self, status, fixup):
1776 """update dirstate for files that are actually clean"""
1776 """update dirstate for files that are actually clean"""
1777 poststatus = self._repo.postdsstatus()
1777 poststatus = self._repo.postdsstatus()
1778 if fixup or poststatus:
1778 if fixup or poststatus:
1779 try:
1779 try:
1780 oldid = self._repo.dirstate.identity()
1780 oldid = self._repo.dirstate.identity()
1781
1781
1782 # updating the dirstate is optional
1782 # updating the dirstate is optional
1783 # so we don't wait on the lock
1783 # so we don't wait on the lock
1784 # wlock can invalidate the dirstate, so cache normal _after_
1784 # wlock can invalidate the dirstate, so cache normal _after_
1785 # taking the lock
1785 # taking the lock
1786 with self._repo.wlock(False):
1786 with self._repo.wlock(False):
1787 if self._repo.dirstate.identity() == oldid:
1787 if self._repo.dirstate.identity() == oldid:
1788 if fixup:
1788 if fixup:
1789 normal = self._repo.dirstate.normal
1789 normal = self._repo.dirstate.normal
1790 for f in fixup:
1790 for f in fixup:
1791 normal(f)
1791 normal(f)
1792 # write changes out explicitly, because nesting
1792 # write changes out explicitly, because nesting
1793 # wlock at runtime may prevent 'wlock.release()'
1793 # wlock at runtime may prevent 'wlock.release()'
1794 # after this block from doing so for subsequent
1794 # after this block from doing so for subsequent
1795 # changing files
1795 # changing files
1796 tr = self._repo.currenttransaction()
1796 tr = self._repo.currenttransaction()
1797 self._repo.dirstate.write(tr)
1797 self._repo.dirstate.write(tr)
1798
1798
1799 if poststatus:
1799 if poststatus:
1800 for ps in poststatus:
1800 for ps in poststatus:
1801 ps(self, status)
1801 ps(self, status)
1802 else:
1802 else:
1803 # in this case, writing changes out breaks
1803 # in this case, writing changes out breaks
1804 # consistency, because .hg/dirstate was
1804 # consistency, because .hg/dirstate was
1805 # already changed simultaneously after last
1805 # already changed simultaneously after last
1806 # caching (see also issue5584 for detail)
1806 # caching (see also issue5584 for detail)
1807 self._repo.ui.debug(
1807 self._repo.ui.debug(
1808 b'skip updating dirstate: identity mismatch\n'
1808 b'skip updating dirstate: identity mismatch\n'
1809 )
1809 )
1810 except error.LockError:
1810 except error.LockError:
1811 pass
1811 pass
1812 finally:
1812 finally:
1813 # Even if the wlock couldn't be grabbed, clear out the list.
1813 # Even if the wlock couldn't be grabbed, clear out the list.
1814 self._repo.clearpostdsstatus()
1814 self._repo.clearpostdsstatus()
1815
1815
1816 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1816 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1817 '''Gets the status from the dirstate -- internal use only.'''
1817 '''Gets the status from the dirstate -- internal use only.'''
1818 subrepos = []
1818 subrepos = []
1819 if b'.hgsub' in self:
1819 if b'.hgsub' in self:
1820 subrepos = sorted(self.substate)
1820 subrepos = sorted(self.substate)
1821 cmp, s = self._repo.dirstate.status(
1821 cmp, s = self._repo.dirstate.status(
1822 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1822 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1823 )
1823 )
1824
1824
1825 # check for any possibly clean files
1825 # check for any possibly clean files
1826 fixup = []
1826 fixup = []
1827 if cmp:
1827 if cmp:
1828 modified2, deleted2, fixup = self._checklookup(cmp)
1828 modified2, deleted2, fixup = self._checklookup(cmp)
1829 s.modified.extend(modified2)
1829 s.modified.extend(modified2)
1830 s.deleted.extend(deleted2)
1830 s.deleted.extend(deleted2)
1831
1831
1832 if fixup and clean:
1832 if fixup and clean:
1833 s.clean.extend(fixup)
1833 s.clean.extend(fixup)
1834
1834
1835 self._poststatusfixup(s, fixup)
1835 self._poststatusfixup(s, fixup)
1836
1836
1837 if match.always():
1837 if match.always():
1838 # cache for performance
1838 # cache for performance
1839 if s.unknown or s.ignored or s.clean:
1839 if s.unknown or s.ignored or s.clean:
1840 # "_status" is cached with list*=False in the normal route
1840 # "_status" is cached with list*=False in the normal route
1841 self._status = scmutil.status(
1841 self._status = scmutil.status(
1842 s.modified, s.added, s.removed, s.deleted, [], [], []
1842 s.modified, s.added, s.removed, s.deleted, [], [], []
1843 )
1843 )
1844 else:
1844 else:
1845 self._status = s
1845 self._status = s
1846
1846
1847 return s
1847 return s
1848
1848
1849 @propertycache
1849 @propertycache
1850 def _copies(self):
1850 def _copies(self):
1851 p1copies = {}
1851 p1copies = {}
1852 p2copies = {}
1852 p2copies = {}
1853 parents = self._repo.dirstate.parents()
1853 parents = self._repo.dirstate.parents()
1854 p1manifest = self._repo[parents[0]].manifest()
1854 p1manifest = self._repo[parents[0]].manifest()
1855 p2manifest = self._repo[parents[1]].manifest()
1855 p2manifest = self._repo[parents[1]].manifest()
1856 changedset = set(self.added()) | set(self.modified())
1856 changedset = set(self.added()) | set(self.modified())
1857 narrowmatch = self._repo.narrowmatch()
1857 narrowmatch = self._repo.narrowmatch()
1858 for dst, src in self._repo.dirstate.copies().items():
1858 for dst, src in self._repo.dirstate.copies().items():
1859 if dst not in changedset or not narrowmatch(dst):
1859 if dst not in changedset or not narrowmatch(dst):
1860 continue
1860 continue
1861 if src in p1manifest:
1861 if src in p1manifest:
1862 p1copies[dst] = src
1862 p1copies[dst] = src
1863 elif src in p2manifest:
1863 elif src in p2manifest:
1864 p2copies[dst] = src
1864 p2copies[dst] = src
1865 return p1copies, p2copies
1865 return p1copies, p2copies
1866
1866
1867 @propertycache
1867 @propertycache
1868 def _manifest(self):
1868 def _manifest(self):
1869 """generate a manifest corresponding to the values in self._status
1869 """generate a manifest corresponding to the values in self._status
1870
1870
1871 This reuse the file nodeid from parent, but we use special node
1871 This reuse the file nodeid from parent, but we use special node
1872 identifiers for added and modified files. This is used by manifests
1872 identifiers for added and modified files. This is used by manifests
1873 merge to see that files are different and by update logic to avoid
1873 merge to see that files are different and by update logic to avoid
1874 deleting newly added files.
1874 deleting newly added files.
1875 """
1875 """
1876 return self._buildstatusmanifest(self._status)
1876 return self._buildstatusmanifest(self._status)
1877
1877
1878 def _buildstatusmanifest(self, status):
1878 def _buildstatusmanifest(self, status):
1879 """Builds a manifest that includes the given status results."""
1879 """Builds a manifest that includes the given status results."""
1880 parents = self.parents()
1880 parents = self.parents()
1881
1881
1882 man = parents[0].manifest().copy()
1882 man = parents[0].manifest().copy()
1883
1883
1884 ff = self._flagfunc
1884 ff = self._flagfunc
1885 for i, l in (
1885 for i, l in (
1886 (addednodeid, status.added),
1886 (addednodeid, status.added),
1887 (modifiednodeid, status.modified),
1887 (modifiednodeid, status.modified),
1888 ):
1888 ):
1889 for f in l:
1889 for f in l:
1890 man[f] = i
1890 man[f] = i
1891 try:
1891 try:
1892 man.setflag(f, ff(f))
1892 man.setflag(f, ff(f))
1893 except OSError:
1893 except OSError:
1894 pass
1894 pass
1895
1895
1896 for f in status.deleted + status.removed:
1896 for f in status.deleted + status.removed:
1897 if f in man:
1897 if f in man:
1898 del man[f]
1898 del man[f]
1899
1899
1900 return man
1900 return man
1901
1901
1902 def _buildstatus(
1902 def _buildstatus(
1903 self, other, s, match, listignored, listclean, listunknown
1903 self, other, s, match, listignored, listclean, listunknown
1904 ):
1904 ):
1905 """build a status with respect to another context
1905 """build a status with respect to another context
1906
1906
1907 This includes logic for maintaining the fast path of status when
1907 This includes logic for maintaining the fast path of status when
1908 comparing the working directory against its parent, which is to skip
1908 comparing the working directory against its parent, which is to skip
1909 building a new manifest if self (working directory) is not comparing
1909 building a new manifest if self (working directory) is not comparing
1910 against its parent (repo['.']).
1910 against its parent (repo['.']).
1911 """
1911 """
1912 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1912 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1913 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1913 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1914 # might have accidentally ended up with the entire contents of the file
1914 # might have accidentally ended up with the entire contents of the file
1915 # they are supposed to be linking to.
1915 # they are supposed to be linking to.
1916 s.modified[:] = self._filtersuspectsymlink(s.modified)
1916 s.modified[:] = self._filtersuspectsymlink(s.modified)
1917 if other != self._repo[b'.']:
1917 if other != self._repo[b'.']:
1918 s = super(workingctx, self)._buildstatus(
1918 s = super(workingctx, self)._buildstatus(
1919 other, s, match, listignored, listclean, listunknown
1919 other, s, match, listignored, listclean, listunknown
1920 )
1920 )
1921 return s
1921 return s
1922
1922
1923 def _matchstatus(self, other, match):
1923 def _matchstatus(self, other, match):
1924 """override the match method with a filter for directory patterns
1924 """override the match method with a filter for directory patterns
1925
1925
1926 We use inheritance to customize the match.bad method only in cases of
1926 We use inheritance to customize the match.bad method only in cases of
1927 workingctx since it belongs only to the working directory when
1927 workingctx since it belongs only to the working directory when
1928 comparing against the parent changeset.
1928 comparing against the parent changeset.
1929
1929
1930 If we aren't comparing against the working directory's parent, then we
1930 If we aren't comparing against the working directory's parent, then we
1931 just use the default match object sent to us.
1931 just use the default match object sent to us.
1932 """
1932 """
1933 if other != self._repo[b'.']:
1933 if other != self._repo[b'.']:
1934
1934
1935 def bad(f, msg):
1935 def bad(f, msg):
1936 # 'f' may be a directory pattern from 'match.files()',
1936 # 'f' may be a directory pattern from 'match.files()',
1937 # so 'f not in ctx1' is not enough
1937 # so 'f not in ctx1' is not enough
1938 if f not in other and not other.hasdir(f):
1938 if f not in other and not other.hasdir(f):
1939 self._repo.ui.warn(
1939 self._repo.ui.warn(
1940 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1940 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1941 )
1941 )
1942
1942
1943 match.bad = bad
1943 match.bad = bad
1944 return match
1944 return match
1945
1945
1946 def walk(self, match):
1946 def walk(self, match):
1947 '''Generates matching file names.'''
1947 '''Generates matching file names.'''
1948 return sorted(
1948 return sorted(
1949 self._repo.dirstate.walk(
1949 self._repo.dirstate.walk(
1950 self._repo.narrowmatch(match),
1950 self._repo.narrowmatch(match),
1951 subrepos=sorted(self.substate),
1951 subrepos=sorted(self.substate),
1952 unknown=True,
1952 unknown=True,
1953 ignored=False,
1953 ignored=False,
1954 )
1954 )
1955 )
1955 )
1956
1956
1957 def matches(self, match):
1957 def matches(self, match):
1958 match = self._repo.narrowmatch(match)
1958 match = self._repo.narrowmatch(match)
1959 ds = self._repo.dirstate
1959 ds = self._repo.dirstate
1960 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1960 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1961
1961
1962 def markcommitted(self, node):
1962 def markcommitted(self, node):
1963 with self._repo.dirstate.parentchange():
1963 with self._repo.dirstate.parentchange():
1964 for f in self.modified() + self.added():
1964 for f in self.modified() + self.added():
1965 self._repo.dirstate.normal(f)
1965 self._repo.dirstate.normal(f)
1966 for f in self.removed():
1966 for f in self.removed():
1967 self._repo.dirstate.drop(f)
1967 self._repo.dirstate.drop(f)
1968 self._repo.dirstate.setparents(node)
1968 self._repo.dirstate.setparents(node)
1969
1969
1970 # write changes out explicitly, because nesting wlock at
1970 # write changes out explicitly, because nesting wlock at
1971 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1971 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1972 # from immediately doing so for subsequent changing files
1972 # from immediately doing so for subsequent changing files
1973 self._repo.dirstate.write(self._repo.currenttransaction())
1973 self._repo.dirstate.write(self._repo.currenttransaction())
1974
1974
1975 sparse.aftercommit(self._repo, node)
1975 sparse.aftercommit(self._repo, node)
1976
1976
1977
1977
1978 class committablefilectx(basefilectx):
1978 class committablefilectx(basefilectx):
1979 """A committablefilectx provides common functionality for a file context
1979 """A committablefilectx provides common functionality for a file context
1980 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1980 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1981
1981
1982 def __init__(self, repo, path, filelog=None, ctx=None):
1982 def __init__(self, repo, path, filelog=None, ctx=None):
1983 self._repo = repo
1983 self._repo = repo
1984 self._path = path
1984 self._path = path
1985 self._changeid = None
1985 self._changeid = None
1986 self._filerev = self._filenode = None
1986 self._filerev = self._filenode = None
1987
1987
1988 if filelog is not None:
1988 if filelog is not None:
1989 self._filelog = filelog
1989 self._filelog = filelog
1990 if ctx:
1990 if ctx:
1991 self._changectx = ctx
1991 self._changectx = ctx
1992
1992
1993 def __nonzero__(self):
1993 def __nonzero__(self):
1994 return True
1994 return True
1995
1995
1996 __bool__ = __nonzero__
1996 __bool__ = __nonzero__
1997
1997
1998 def linkrev(self):
1998 def linkrev(self):
1999 # linked to self._changectx no matter if file is modified or not
1999 # linked to self._changectx no matter if file is modified or not
2000 return self.rev()
2000 return self.rev()
2001
2001
2002 def renamed(self):
2002 def renamed(self):
2003 path = self.copysource()
2003 path = self.copysource()
2004 if not path:
2004 if not path:
2005 return None
2005 return None
2006 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2006 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2007
2007
2008 def parents(self):
2008 def parents(self):
2009 '''return parent filectxs, following copies if necessary'''
2009 '''return parent filectxs, following copies if necessary'''
2010
2010
2011 def filenode(ctx, path):
2011 def filenode(ctx, path):
2012 return ctx._manifest.get(path, nullid)
2012 return ctx._manifest.get(path, nullid)
2013
2013
2014 path = self._path
2014 path = self._path
2015 fl = self._filelog
2015 fl = self._filelog
2016 pcl = self._changectx._parents
2016 pcl = self._changectx._parents
2017 renamed = self.renamed()
2017 renamed = self.renamed()
2018
2018
2019 if renamed:
2019 if renamed:
2020 pl = [renamed + (None,)]
2020 pl = [renamed + (None,)]
2021 else:
2021 else:
2022 pl = [(path, filenode(pcl[0], path), fl)]
2022 pl = [(path, filenode(pcl[0], path), fl)]
2023
2023
2024 for pc in pcl[1:]:
2024 for pc in pcl[1:]:
2025 pl.append((path, filenode(pc, path), fl))
2025 pl.append((path, filenode(pc, path), fl))
2026
2026
2027 return [
2027 return [
2028 self._parentfilectx(p, fileid=n, filelog=l)
2028 self._parentfilectx(p, fileid=n, filelog=l)
2029 for p, n, l in pl
2029 for p, n, l in pl
2030 if n != nullid
2030 if n != nullid
2031 ]
2031 ]
2032
2032
2033 def children(self):
2033 def children(self):
2034 return []
2034 return []
2035
2035
2036
2036
2037 class workingfilectx(committablefilectx):
2037 class workingfilectx(committablefilectx):
2038 """A workingfilectx object makes access to data related to a particular
2038 """A workingfilectx object makes access to data related to a particular
2039 file in the working directory convenient."""
2039 file in the working directory convenient."""
2040
2040
2041 def __init__(self, repo, path, filelog=None, workingctx=None):
2041 def __init__(self, repo, path, filelog=None, workingctx=None):
2042 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2042 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2043
2043
2044 @propertycache
2044 @propertycache
2045 def _changectx(self):
2045 def _changectx(self):
2046 return workingctx(self._repo)
2046 return workingctx(self._repo)
2047
2047
2048 def data(self):
2048 def data(self):
2049 return self._repo.wread(self._path)
2049 return self._repo.wread(self._path)
2050
2050
2051 def copysource(self):
2051 def copysource(self):
2052 return self._repo.dirstate.copied(self._path)
2052 return self._repo.dirstate.copied(self._path)
2053
2053
2054 def size(self):
2054 def size(self):
2055 return self._repo.wvfs.lstat(self._path).st_size
2055 return self._repo.wvfs.lstat(self._path).st_size
2056
2056
2057 def lstat(self):
2057 def lstat(self):
2058 return self._repo.wvfs.lstat(self._path)
2058 return self._repo.wvfs.lstat(self._path)
2059
2059
2060 def date(self):
2060 def date(self):
2061 t, tz = self._changectx.date()
2061 t, tz = self._changectx.date()
2062 try:
2062 try:
2063 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2063 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2064 except OSError as err:
2064 except OSError as err:
2065 if err.errno != errno.ENOENT:
2065 if err.errno != errno.ENOENT:
2066 raise
2066 raise
2067 return (t, tz)
2067 return (t, tz)
2068
2068
2069 def exists(self):
2069 def exists(self):
2070 return self._repo.wvfs.exists(self._path)
2070 return self._repo.wvfs.exists(self._path)
2071
2071
2072 def lexists(self):
2072 def lexists(self):
2073 return self._repo.wvfs.lexists(self._path)
2073 return self._repo.wvfs.lexists(self._path)
2074
2074
2075 def audit(self):
2075 def audit(self):
2076 return self._repo.wvfs.audit(self._path)
2076 return self._repo.wvfs.audit(self._path)
2077
2077
2078 def cmp(self, fctx):
2078 def cmp(self, fctx):
2079 """compare with other file context
2079 """compare with other file context
2080
2080
2081 returns True if different than fctx.
2081 returns True if different than fctx.
2082 """
2082 """
2083 # fctx should be a filectx (not a workingfilectx)
2083 # fctx should be a filectx (not a workingfilectx)
2084 # invert comparison to reuse the same code path
2084 # invert comparison to reuse the same code path
2085 return fctx.cmp(self)
2085 return fctx.cmp(self)
2086
2086
2087 def remove(self, ignoremissing=False):
2087 def remove(self, ignoremissing=False):
2088 """wraps unlink for a repo's working directory"""
2088 """wraps unlink for a repo's working directory"""
2089 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2089 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2090 self._repo.wvfs.unlinkpath(
2090 self._repo.wvfs.unlinkpath(
2091 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2091 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2092 )
2092 )
2093
2093
2094 def write(self, data, flags, backgroundclose=False, **kwargs):
2094 def write(self, data, flags, backgroundclose=False, **kwargs):
2095 """wraps repo.wwrite"""
2095 """wraps repo.wwrite"""
2096 return self._repo.wwrite(
2096 return self._repo.wwrite(
2097 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2097 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2098 )
2098 )
2099
2099
2100 def markcopied(self, src):
2100 def markcopied(self, src):
2101 """marks this file a copy of `src`"""
2101 """marks this file a copy of `src`"""
2102 self._repo.dirstate.copy(src, self._path)
2102 self._repo.dirstate.copy(src, self._path)
2103
2103
2104 def clearunknown(self):
2104 def clearunknown(self):
2105 """Removes conflicting items in the working directory so that
2105 """Removes conflicting items in the working directory so that
2106 ``write()`` can be called successfully.
2106 ``write()`` can be called successfully.
2107 """
2107 """
2108 wvfs = self._repo.wvfs
2108 wvfs = self._repo.wvfs
2109 f = self._path
2109 f = self._path
2110 wvfs.audit(f)
2110 wvfs.audit(f)
2111 if self._repo.ui.configbool(
2111 if self._repo.ui.configbool(
2112 b'experimental', b'merge.checkpathconflicts'
2112 b'experimental', b'merge.checkpathconflicts'
2113 ):
2113 ):
2114 # remove files under the directory as they should already be
2114 # remove files under the directory as they should already be
2115 # warned and backed up
2115 # warned and backed up
2116 if wvfs.isdir(f) and not wvfs.islink(f):
2116 if wvfs.isdir(f) and not wvfs.islink(f):
2117 wvfs.rmtree(f, forcibly=True)
2117 wvfs.rmtree(f, forcibly=True)
2118 for p in reversed(list(pathutil.finddirs(f))):
2118 for p in reversed(list(pathutil.finddirs(f))):
2119 if wvfs.isfileorlink(p):
2119 if wvfs.isfileorlink(p):
2120 wvfs.unlink(p)
2120 wvfs.unlink(p)
2121 break
2121 break
2122 else:
2122 else:
2123 # don't remove files if path conflicts are not processed
2123 # don't remove files if path conflicts are not processed
2124 if wvfs.isdir(f) and not wvfs.islink(f):
2124 if wvfs.isdir(f) and not wvfs.islink(f):
2125 wvfs.removedirs(f)
2125 wvfs.removedirs(f)
2126
2126
2127 def setflags(self, l, x):
2127 def setflags(self, l, x):
2128 self._repo.wvfs.setflags(self._path, l, x)
2128 self._repo.wvfs.setflags(self._path, l, x)
2129
2129
2130
2130
2131 class overlayworkingctx(committablectx):
2131 class overlayworkingctx(committablectx):
2132 """Wraps another mutable context with a write-back cache that can be
2132 """Wraps another mutable context with a write-back cache that can be
2133 converted into a commit context.
2133 converted into a commit context.
2134
2134
2135 self._cache[path] maps to a dict with keys: {
2135 self._cache[path] maps to a dict with keys: {
2136 'exists': bool?
2136 'exists': bool?
2137 'date': date?
2137 'date': date?
2138 'data': str?
2138 'data': str?
2139 'flags': str?
2139 'flags': str?
2140 'copied': str? (path or None)
2140 'copied': str? (path or None)
2141 }
2141 }
2142 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2142 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2143 is `False`, the file was deleted.
2143 is `False`, the file was deleted.
2144 """
2144 """
2145
2145
2146 def __init__(self, repo):
2146 def __init__(self, repo):
2147 super(overlayworkingctx, self).__init__(repo)
2147 super(overlayworkingctx, self).__init__(repo)
2148 self.clean()
2148 self.clean()
2149
2149
2150 def setbase(self, wrappedctx):
2150 def setbase(self, wrappedctx):
2151 self._wrappedctx = wrappedctx
2151 self._wrappedctx = wrappedctx
2152 self._parents = [wrappedctx]
2152 self._parents = [wrappedctx]
2153 # Drop old manifest cache as it is now out of date.
2153 # Drop old manifest cache as it is now out of date.
2154 # This is necessary when, e.g., rebasing several nodes with one
2154 # This is necessary when, e.g., rebasing several nodes with one
2155 # ``overlayworkingctx`` (e.g. with --collapse).
2155 # ``overlayworkingctx`` (e.g. with --collapse).
2156 util.clearcachedproperty(self, b'_manifest')
2156 util.clearcachedproperty(self, b'_manifest')
2157
2157
2158 def data(self, path):
2158 def data(self, path):
2159 if self.isdirty(path):
2159 if self.isdirty(path):
2160 if self._cache[path][b'exists']:
2160 if self._cache[path][b'exists']:
2161 if self._cache[path][b'data'] is not None:
2161 if self._cache[path][b'data'] is not None:
2162 return self._cache[path][b'data']
2162 return self._cache[path][b'data']
2163 else:
2163 else:
2164 # Must fallback here, too, because we only set flags.
2164 # Must fallback here, too, because we only set flags.
2165 return self._wrappedctx[path].data()
2165 return self._wrappedctx[path].data()
2166 else:
2166 else:
2167 raise error.ProgrammingError(
2167 raise error.ProgrammingError(
2168 b"No such file or directory: %s" % path
2168 b"No such file or directory: %s" % path
2169 )
2169 )
2170 else:
2170 else:
2171 return self._wrappedctx[path].data()
2171 return self._wrappedctx[path].data()
2172
2172
2173 @propertycache
2173 @propertycache
2174 def _manifest(self):
2174 def _manifest(self):
2175 parents = self.parents()
2175 parents = self.parents()
2176 man = parents[0].manifest().copy()
2176 man = parents[0].manifest().copy()
2177
2177
2178 flag = self._flagfunc
2178 flag = self._flagfunc
2179 for path in self.added():
2179 for path in self.added():
2180 man[path] = addednodeid
2180 man[path] = addednodeid
2181 man.setflag(path, flag(path))
2181 man.setflag(path, flag(path))
2182 for path in self.modified():
2182 for path in self.modified():
2183 man[path] = modifiednodeid
2183 man[path] = modifiednodeid
2184 man.setflag(path, flag(path))
2184 man.setflag(path, flag(path))
2185 for path in self.removed():
2185 for path in self.removed():
2186 del man[path]
2186 del man[path]
2187 return man
2187 return man
2188
2188
2189 @propertycache
2189 @propertycache
2190 def _flagfunc(self):
2190 def _flagfunc(self):
2191 def f(path):
2191 def f(path):
2192 return self._cache[path][b'flags']
2192 return self._cache[path][b'flags']
2193
2193
2194 return f
2194 return f
2195
2195
2196 def files(self):
2196 def files(self):
2197 return sorted(self.added() + self.modified() + self.removed())
2197 return sorted(self.added() + self.modified() + self.removed())
2198
2198
2199 def modified(self):
2199 def modified(self):
2200 return [
2200 return [
2201 f
2201 f
2202 for f in self._cache.keys()
2202 for f in self._cache.keys()
2203 if self._cache[f][b'exists'] and self._existsinparent(f)
2203 if self._cache[f][b'exists'] and self._existsinparent(f)
2204 ]
2204 ]
2205
2205
2206 def added(self):
2206 def added(self):
2207 return [
2207 return [
2208 f
2208 f
2209 for f in self._cache.keys()
2209 for f in self._cache.keys()
2210 if self._cache[f][b'exists'] and not self._existsinparent(f)
2210 if self._cache[f][b'exists'] and not self._existsinparent(f)
2211 ]
2211 ]
2212
2212
2213 def removed(self):
2213 def removed(self):
2214 return [
2214 return [
2215 f
2215 f
2216 for f in self._cache.keys()
2216 for f in self._cache.keys()
2217 if not self._cache[f][b'exists'] and self._existsinparent(f)
2217 if not self._cache[f][b'exists'] and self._existsinparent(f)
2218 ]
2218 ]
2219
2219
2220 def p1copies(self):
2220 def p1copies(self):
2221 copies = self._repo._wrappedctx.p1copies().copy()
2221 copies = self._repo._wrappedctx.p1copies().copy()
2222 narrowmatch = self._repo.narrowmatch()
2222 narrowmatch = self._repo.narrowmatch()
2223 for f in self._cache.keys():
2223 for f in self._cache.keys():
2224 if not narrowmatch(f):
2224 if not narrowmatch(f):
2225 continue
2225 continue
2226 copies.pop(f, None) # delete if it exists
2226 copies.pop(f, None) # delete if it exists
2227 source = self._cache[f][b'copied']
2227 source = self._cache[f][b'copied']
2228 if source:
2228 if source:
2229 copies[f] = source
2229 copies[f] = source
2230 return copies
2230 return copies
2231
2231
2232 def p2copies(self):
2232 def p2copies(self):
2233 copies = self._repo._wrappedctx.p2copies().copy()
2233 copies = self._repo._wrappedctx.p2copies().copy()
2234 narrowmatch = self._repo.narrowmatch()
2234 narrowmatch = self._repo.narrowmatch()
2235 for f in self._cache.keys():
2235 for f in self._cache.keys():
2236 if not narrowmatch(f):
2236 if not narrowmatch(f):
2237 continue
2237 continue
2238 copies.pop(f, None) # delete if it exists
2238 copies.pop(f, None) # delete if it exists
2239 source = self._cache[f][b'copied']
2239 source = self._cache[f][b'copied']
2240 if source:
2240 if source:
2241 copies[f] = source
2241 copies[f] = source
2242 return copies
2242 return copies
2243
2243
2244 def isinmemory(self):
2244 def isinmemory(self):
2245 return True
2245 return True
2246
2246
2247 def filedate(self, path):
2247 def filedate(self, path):
2248 if self.isdirty(path):
2248 if self.isdirty(path):
2249 return self._cache[path][b'date']
2249 return self._cache[path][b'date']
2250 else:
2250 else:
2251 return self._wrappedctx[path].date()
2251 return self._wrappedctx[path].date()
2252
2252
2253 def markcopied(self, path, origin):
2253 def markcopied(self, path, origin):
2254 self._markdirty(
2254 self._markdirty(
2255 path,
2255 path,
2256 exists=True,
2256 exists=True,
2257 date=self.filedate(path),
2257 date=self.filedate(path),
2258 flags=self.flags(path),
2258 flags=self.flags(path),
2259 copied=origin,
2259 copied=origin,
2260 )
2260 )
2261
2261
2262 def copydata(self, path):
2262 def copydata(self, path):
2263 if self.isdirty(path):
2263 if self.isdirty(path):
2264 return self._cache[path][b'copied']
2264 return self._cache[path][b'copied']
2265 else:
2265 else:
2266 return None
2266 return None
2267
2267
2268 def flags(self, path):
2268 def flags(self, path):
2269 if self.isdirty(path):
2269 if self.isdirty(path):
2270 if self._cache[path][b'exists']:
2270 if self._cache[path][b'exists']:
2271 return self._cache[path][b'flags']
2271 return self._cache[path][b'flags']
2272 else:
2272 else:
2273 raise error.ProgrammingError(
2273 raise error.ProgrammingError(
2274 b"No such file or directory: %s" % self._path
2274 b"No such file or directory: %s" % self._path
2275 )
2275 )
2276 else:
2276 else:
2277 return self._wrappedctx[path].flags()
2277 return self._wrappedctx[path].flags()
2278
2278
2279 def __contains__(self, key):
2279 def __contains__(self, key):
2280 if key in self._cache:
2280 if key in self._cache:
2281 return self._cache[key][b'exists']
2281 return self._cache[key][b'exists']
2282 return key in self.p1()
2282 return key in self.p1()
2283
2283
2284 def _existsinparent(self, path):
2284 def _existsinparent(self, path):
2285 try:
2285 try:
2286 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2286 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2287 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2287 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2288 # with an ``exists()`` function.
2288 # with an ``exists()`` function.
2289 self._wrappedctx[path]
2289 self._wrappedctx[path]
2290 return True
2290 return True
2291 except error.ManifestLookupError:
2291 except error.ManifestLookupError:
2292 return False
2292 return False
2293
2293
2294 def _auditconflicts(self, path):
2294 def _auditconflicts(self, path):
2295 """Replicates conflict checks done by wvfs.write().
2295 """Replicates conflict checks done by wvfs.write().
2296
2296
2297 Since we never write to the filesystem and never call `applyupdates` in
2297 Since we never write to the filesystem and never call `applyupdates` in
2298 IMM, we'll never check that a path is actually writable -- e.g., because
2298 IMM, we'll never check that a path is actually writable -- e.g., because
2299 it adds `a/foo`, but `a` is actually a file in the other commit.
2299 it adds `a/foo`, but `a` is actually a file in the other commit.
2300 """
2300 """
2301
2301
2302 def fail(path, component):
2302 def fail(path, component):
2303 # p1() is the base and we're receiving "writes" for p2()'s
2303 # p1() is the base and we're receiving "writes" for p2()'s
2304 # files.
2304 # files.
2305 if b'l' in self.p1()[component].flags():
2305 if b'l' in self.p1()[component].flags():
2306 raise error.Abort(
2306 raise error.Abort(
2307 b"error: %s conflicts with symlink %s "
2307 b"error: %s conflicts with symlink %s "
2308 b"in %d." % (path, component, self.p1().rev())
2308 b"in %d." % (path, component, self.p1().rev())
2309 )
2309 )
2310 else:
2310 else:
2311 raise error.Abort(
2311 raise error.Abort(
2312 b"error: '%s' conflicts with file '%s' in "
2312 b"error: '%s' conflicts with file '%s' in "
2313 b"%d." % (path, component, self.p1().rev())
2313 b"%d." % (path, component, self.p1().rev())
2314 )
2314 )
2315
2315
2316 # Test that each new directory to be created to write this path from p2
2316 # Test that each new directory to be created to write this path from p2
2317 # is not a file in p1.
2317 # is not a file in p1.
2318 components = path.split(b'/')
2318 components = path.split(b'/')
2319 for i in pycompat.xrange(len(components)):
2319 for i in pycompat.xrange(len(components)):
2320 component = b"/".join(components[0:i])
2320 component = b"/".join(components[0:i])
2321 if component in self:
2321 if component in self:
2322 fail(path, component)
2322 fail(path, component)
2323
2323
2324 # Test the other direction -- that this path from p2 isn't a directory
2324 # Test the other direction -- that this path from p2 isn't a directory
2325 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2325 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2326 match = self.match([path], default=b'path')
2326 match = self.match([path], default=b'path')
2327 matches = self.p1().manifest().matches(match)
2327 matches = self.p1().manifest().matches(match)
2328 mfiles = matches.keys()
2328 mfiles = matches.keys()
2329 if len(mfiles) > 0:
2329 if len(mfiles) > 0:
2330 if len(mfiles) == 1 and mfiles[0] == path:
2330 if len(mfiles) == 1 and mfiles[0] == path:
2331 return
2331 return
2332 # omit the files which are deleted in current IMM wctx
2332 # omit the files which are deleted in current IMM wctx
2333 mfiles = [m for m in mfiles if m in self]
2333 mfiles = [m for m in mfiles if m in self]
2334 if not mfiles:
2334 if not mfiles:
2335 return
2335 return
2336 raise error.Abort(
2336 raise error.Abort(
2337 b"error: file '%s' cannot be written because "
2337 b"error: file '%s' cannot be written because "
2338 b" '%s/' is a directory in %s (containing %d "
2338 b" '%s/' is a directory in %s (containing %d "
2339 b"entries: %s)"
2339 b"entries: %s)"
2340 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2340 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2341 )
2341 )
2342
2342
2343 def write(self, path, data, flags=b'', **kwargs):
2343 def write(self, path, data, flags=b'', **kwargs):
2344 if data is None:
2344 if data is None:
2345 raise error.ProgrammingError(b"data must be non-None")
2345 raise error.ProgrammingError(b"data must be non-None")
2346 self._auditconflicts(path)
2346 self._auditconflicts(path)
2347 self._markdirty(
2347 self._markdirty(
2348 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2348 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2349 )
2349 )
2350
2350
2351 def setflags(self, path, l, x):
2351 def setflags(self, path, l, x):
2352 flag = b''
2352 flag = b''
2353 if l:
2353 if l:
2354 flag = b'l'
2354 flag = b'l'
2355 elif x:
2355 elif x:
2356 flag = b'x'
2356 flag = b'x'
2357 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2357 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2358
2358
2359 def remove(self, path):
2359 def remove(self, path):
2360 self._markdirty(path, exists=False)
2360 self._markdirty(path, exists=False)
2361
2361
2362 def exists(self, path):
2362 def exists(self, path):
2363 """exists behaves like `lexists`, but needs to follow symlinks and
2363 """exists behaves like `lexists`, but needs to follow symlinks and
2364 return False if they are broken.
2364 return False if they are broken.
2365 """
2365 """
2366 if self.isdirty(path):
2366 if self.isdirty(path):
2367 # If this path exists and is a symlink, "follow" it by calling
2367 # If this path exists and is a symlink, "follow" it by calling
2368 # exists on the destination path.
2368 # exists on the destination path.
2369 if (
2369 if (
2370 self._cache[path][b'exists']
2370 self._cache[path][b'exists']
2371 and b'l' in self._cache[path][b'flags']
2371 and b'l' in self._cache[path][b'flags']
2372 ):
2372 ):
2373 return self.exists(self._cache[path][b'data'].strip())
2373 return self.exists(self._cache[path][b'data'].strip())
2374 else:
2374 else:
2375 return self._cache[path][b'exists']
2375 return self._cache[path][b'exists']
2376
2376
2377 return self._existsinparent(path)
2377 return self._existsinparent(path)
2378
2378
2379 def lexists(self, path):
2379 def lexists(self, path):
2380 """lexists returns True if the path exists"""
2380 """lexists returns True if the path exists"""
2381 if self.isdirty(path):
2381 if self.isdirty(path):
2382 return self._cache[path][b'exists']
2382 return self._cache[path][b'exists']
2383
2383
2384 return self._existsinparent(path)
2384 return self._existsinparent(path)
2385
2385
2386 def size(self, path):
2386 def size(self, path):
2387 if self.isdirty(path):
2387 if self.isdirty(path):
2388 if self._cache[path][b'exists']:
2388 if self._cache[path][b'exists']:
2389 return len(self._cache[path][b'data'])
2389 return len(self._cache[path][b'data'])
2390 else:
2390 else:
2391 raise error.ProgrammingError(
2391 raise error.ProgrammingError(
2392 b"No such file or directory: %s" % self._path
2392 b"No such file or directory: %s" % self._path
2393 )
2393 )
2394 return self._wrappedctx[path].size()
2394 return self._wrappedctx[path].size()
2395
2395
2396 def tomemctx(
2396 def tomemctx(
2397 self,
2397 self,
2398 text,
2398 text,
2399 branch=None,
2399 branch=None,
2400 extra=None,
2400 extra=None,
2401 date=None,
2401 date=None,
2402 parents=None,
2402 parents=None,
2403 user=None,
2403 user=None,
2404 editor=None,
2404 editor=None,
2405 ):
2405 ):
2406 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2406 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2407 committed.
2407 committed.
2408
2408
2409 ``text`` is the commit message.
2409 ``text`` is the commit message.
2410 ``parents`` (optional) are rev numbers.
2410 ``parents`` (optional) are rev numbers.
2411 """
2411 """
2412 # Default parents to the wrapped contexts' if not passed.
2412 # Default parents to the wrapped contexts' if not passed.
2413 if parents is None:
2413 if parents is None:
2414 parents = self._wrappedctx.parents()
2414 parents = self._wrappedctx.parents()
2415 if len(parents) == 1:
2415 if len(parents) == 1:
2416 parents = (parents[0], None)
2416 parents = (parents[0], None)
2417
2417
2418 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2418 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2419 if parents[1] is None:
2419 if parents[1] is None:
2420 parents = (self._repo[parents[0]], None)
2420 parents = (self._repo[parents[0]], None)
2421 else:
2421 else:
2422 parents = (self._repo[parents[0]], self._repo[parents[1]])
2422 parents = (self._repo[parents[0]], self._repo[parents[1]])
2423
2423
2424 files = self.files()
2424 files = self.files()
2425
2425
2426 def getfile(repo, memctx, path):
2426 def getfile(repo, memctx, path):
2427 if self._cache[path][b'exists']:
2427 if self._cache[path][b'exists']:
2428 return memfilectx(
2428 return memfilectx(
2429 repo,
2429 repo,
2430 memctx,
2430 memctx,
2431 path,
2431 path,
2432 self._cache[path][b'data'],
2432 self._cache[path][b'data'],
2433 b'l' in self._cache[path][b'flags'],
2433 b'l' in self._cache[path][b'flags'],
2434 b'x' in self._cache[path][b'flags'],
2434 b'x' in self._cache[path][b'flags'],
2435 self._cache[path][b'copied'],
2435 self._cache[path][b'copied'],
2436 )
2436 )
2437 else:
2437 else:
2438 # Returning None, but including the path in `files`, is
2438 # Returning None, but including the path in `files`, is
2439 # necessary for memctx to register a deletion.
2439 # necessary for memctx to register a deletion.
2440 return None
2440 return None
2441
2441
2442 return memctx(
2442 return memctx(
2443 self._repo,
2443 self._repo,
2444 parents,
2444 parents,
2445 text,
2445 text,
2446 files,
2446 files,
2447 getfile,
2447 getfile,
2448 date=date,
2448 date=date,
2449 extra=extra,
2449 extra=extra,
2450 user=user,
2450 user=user,
2451 branch=branch,
2451 branch=branch,
2452 editor=editor,
2452 editor=editor,
2453 )
2453 )
2454
2454
2455 def isdirty(self, path):
2455 def isdirty(self, path):
2456 return path in self._cache
2456 return path in self._cache
2457
2457
2458 def isempty(self):
2458 def isempty(self):
2459 # We need to discard any keys that are actually clean before the empty
2459 # We need to discard any keys that are actually clean before the empty
2460 # commit check.
2460 # commit check.
2461 self._compact()
2461 self._compact()
2462 return len(self._cache) == 0
2462 return len(self._cache) == 0
2463
2463
2464 def clean(self):
2464 def clean(self):
2465 self._cache = {}
2465 self._cache = {}
2466
2466
2467 def _compact(self):
2467 def _compact(self):
2468 """Removes keys from the cache that are actually clean, by comparing
2468 """Removes keys from the cache that are actually clean, by comparing
2469 them with the underlying context.
2469 them with the underlying context.
2470
2470
2471 This can occur during the merge process, e.g. by passing --tool :local
2471 This can occur during the merge process, e.g. by passing --tool :local
2472 to resolve a conflict.
2472 to resolve a conflict.
2473 """
2473 """
2474 keys = []
2474 keys = []
2475 # This won't be perfect, but can help performance significantly when
2475 # This won't be perfect, but can help performance significantly when
2476 # using things like remotefilelog.
2476 # using things like remotefilelog.
2477 scmutil.prefetchfiles(
2477 scmutil.prefetchfiles(
2478 self.repo(),
2478 self.repo(),
2479 [self.p1().rev()],
2479 [self.p1().rev()],
2480 scmutil.matchfiles(self.repo(), self._cache.keys()),
2480 scmutil.matchfiles(self.repo(), self._cache.keys()),
2481 )
2481 )
2482
2482
2483 for path in self._cache.keys():
2483 for path in self._cache.keys():
2484 cache = self._cache[path]
2484 cache = self._cache[path]
2485 try:
2485 try:
2486 underlying = self._wrappedctx[path]
2486 underlying = self._wrappedctx[path]
2487 if (
2487 if (
2488 underlying.data() == cache[b'data']
2488 underlying.data() == cache[b'data']
2489 and underlying.flags() == cache[b'flags']
2489 and underlying.flags() == cache[b'flags']
2490 ):
2490 ):
2491 keys.append(path)
2491 keys.append(path)
2492 except error.ManifestLookupError:
2492 except error.ManifestLookupError:
2493 # Path not in the underlying manifest (created).
2493 # Path not in the underlying manifest (created).
2494 continue
2494 continue
2495
2495
2496 for path in keys:
2496 for path in keys:
2497 del self._cache[path]
2497 del self._cache[path]
2498 return keys
2498 return keys
2499
2499
2500 def _markdirty(
2500 def _markdirty(
2501 self, path, exists, data=None, date=None, flags=b'', copied=None
2501 self, path, exists, data=None, date=None, flags=b'', copied=None
2502 ):
2502 ):
2503 # data not provided, let's see if we already have some; if not, let's
2503 # data not provided, let's see if we already have some; if not, let's
2504 # grab it from our underlying context, so that we always have data if
2504 # grab it from our underlying context, so that we always have data if
2505 # the file is marked as existing.
2505 # the file is marked as existing.
2506 if exists and data is None:
2506 if exists and data is None:
2507 oldentry = self._cache.get(path) or {}
2507 oldentry = self._cache.get(path) or {}
2508 data = oldentry.get(b'data')
2508 data = oldentry.get(b'data')
2509 if data is None:
2509 if data is None:
2510 data = self._wrappedctx[path].data()
2510 data = self._wrappedctx[path].data()
2511
2511
2512 self._cache[path] = {
2512 self._cache[path] = {
2513 b'exists': exists,
2513 b'exists': exists,
2514 b'data': data,
2514 b'data': data,
2515 b'date': date,
2515 b'date': date,
2516 b'flags': flags,
2516 b'flags': flags,
2517 b'copied': copied,
2517 b'copied': copied,
2518 }
2518 }
2519
2519
2520 def filectx(self, path, filelog=None):
2520 def filectx(self, path, filelog=None):
2521 return overlayworkingfilectx(
2521 return overlayworkingfilectx(
2522 self._repo, path, parent=self, filelog=filelog
2522 self._repo, path, parent=self, filelog=filelog
2523 )
2523 )
2524
2524
2525
2525
2526 class overlayworkingfilectx(committablefilectx):
2526 class overlayworkingfilectx(committablefilectx):
2527 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2527 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2528 cache, which can be flushed through later by calling ``flush()``."""
2528 cache, which can be flushed through later by calling ``flush()``."""
2529
2529
2530 def __init__(self, repo, path, filelog=None, parent=None):
2530 def __init__(self, repo, path, filelog=None, parent=None):
2531 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2531 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2532 self._repo = repo
2532 self._repo = repo
2533 self._parent = parent
2533 self._parent = parent
2534 self._path = path
2534 self._path = path
2535
2535
2536 def cmp(self, fctx):
2536 def cmp(self, fctx):
2537 return self.data() != fctx.data()
2537 return self.data() != fctx.data()
2538
2538
2539 def changectx(self):
2539 def changectx(self):
2540 return self._parent
2540 return self._parent
2541
2541
2542 def data(self):
2542 def data(self):
2543 return self._parent.data(self._path)
2543 return self._parent.data(self._path)
2544
2544
2545 def date(self):
2545 def date(self):
2546 return self._parent.filedate(self._path)
2546 return self._parent.filedate(self._path)
2547
2547
2548 def exists(self):
2548 def exists(self):
2549 return self.lexists()
2549 return self.lexists()
2550
2550
2551 def lexists(self):
2551 def lexists(self):
2552 return self._parent.exists(self._path)
2552 return self._parent.exists(self._path)
2553
2553
2554 def copysource(self):
2554 def copysource(self):
2555 return self._parent.copydata(self._path)
2555 return self._parent.copydata(self._path)
2556
2556
2557 def size(self):
2557 def size(self):
2558 return self._parent.size(self._path)
2558 return self._parent.size(self._path)
2559
2559
2560 def markcopied(self, origin):
2560 def markcopied(self, origin):
2561 self._parent.markcopied(self._path, origin)
2561 self._parent.markcopied(self._path, origin)
2562
2562
2563 def audit(self):
2563 def audit(self):
2564 pass
2564 pass
2565
2565
2566 def flags(self):
2566 def flags(self):
2567 return self._parent.flags(self._path)
2567 return self._parent.flags(self._path)
2568
2568
2569 def setflags(self, islink, isexec):
2569 def setflags(self, islink, isexec):
2570 return self._parent.setflags(self._path, islink, isexec)
2570 return self._parent.setflags(self._path, islink, isexec)
2571
2571
2572 def write(self, data, flags, backgroundclose=False, **kwargs):
2572 def write(self, data, flags, backgroundclose=False, **kwargs):
2573 return self._parent.write(self._path, data, flags, **kwargs)
2573 return self._parent.write(self._path, data, flags, **kwargs)
2574
2574
2575 def remove(self, ignoremissing=False):
2575 def remove(self, ignoremissing=False):
2576 return self._parent.remove(self._path)
2576 return self._parent.remove(self._path)
2577
2577
2578 def clearunknown(self):
2578 def clearunknown(self):
2579 pass
2579 pass
2580
2580
2581
2581
2582 class workingcommitctx(workingctx):
2582 class workingcommitctx(workingctx):
2583 """A workingcommitctx object makes access to data related to
2583 """A workingcommitctx object makes access to data related to
2584 the revision being committed convenient.
2584 the revision being committed convenient.
2585
2585
2586 This hides changes in the working directory, if they aren't
2586 This hides changes in the working directory, if they aren't
2587 committed in this context.
2587 committed in this context.
2588 """
2588 """
2589
2589
2590 def __init__(
2590 def __init__(
2591 self, repo, changes, text=b"", user=None, date=None, extra=None
2591 self, repo, changes, text=b"", user=None, date=None, extra=None
2592 ):
2592 ):
2593 super(workingcommitctx, self).__init__(
2593 super(workingcommitctx, self).__init__(
2594 repo, text, user, date, extra, changes
2594 repo, text, user, date, extra, changes
2595 )
2595 )
2596
2596
2597 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2597 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2598 """Return matched files only in ``self._status``
2598 """Return matched files only in ``self._status``
2599
2599
2600 Uncommitted files appear "clean" via this context, even if
2600 Uncommitted files appear "clean" via this context, even if
2601 they aren't actually so in the working directory.
2601 they aren't actually so in the working directory.
2602 """
2602 """
2603 if clean:
2603 if clean:
2604 clean = [f for f in self._manifest if f not in self._changedset]
2604 clean = [f for f in self._manifest if f not in self._changedset]
2605 else:
2605 else:
2606 clean = []
2606 clean = []
2607 return scmutil.status(
2607 return scmutil.status(
2608 [f for f in self._status.modified if match(f)],
2608 [f for f in self._status.modified if match(f)],
2609 [f for f in self._status.added if match(f)],
2609 [f for f in self._status.added if match(f)],
2610 [f for f in self._status.removed if match(f)],
2610 [f for f in self._status.removed if match(f)],
2611 [],
2611 [],
2612 [],
2612 [],
2613 [],
2613 [],
2614 clean,
2614 clean,
2615 )
2615 )
2616
2616
2617 @propertycache
2617 @propertycache
2618 def _changedset(self):
2618 def _changedset(self):
2619 """Return the set of files changed in this context
2619 """Return the set of files changed in this context
2620 """
2620 """
2621 changed = set(self._status.modified)
2621 changed = set(self._status.modified)
2622 changed.update(self._status.added)
2622 changed.update(self._status.added)
2623 changed.update(self._status.removed)
2623 changed.update(self._status.removed)
2624 return changed
2624 return changed
2625
2625
2626
2626
2627 def makecachingfilectxfn(func):
2627 def makecachingfilectxfn(func):
2628 """Create a filectxfn that caches based on the path.
2628 """Create a filectxfn that caches based on the path.
2629
2629
2630 We can't use util.cachefunc because it uses all arguments as the cache
2630 We can't use util.cachefunc because it uses all arguments as the cache
2631 key and this creates a cycle since the arguments include the repo and
2631 key and this creates a cycle since the arguments include the repo and
2632 memctx.
2632 memctx.
2633 """
2633 """
2634 cache = {}
2634 cache = {}
2635
2635
2636 def getfilectx(repo, memctx, path):
2636 def getfilectx(repo, memctx, path):
2637 if path not in cache:
2637 if path not in cache:
2638 cache[path] = func(repo, memctx, path)
2638 cache[path] = func(repo, memctx, path)
2639 return cache[path]
2639 return cache[path]
2640
2640
2641 return getfilectx
2641 return getfilectx
2642
2642
2643
2643
2644 def memfilefromctx(ctx):
2644 def memfilefromctx(ctx):
2645 """Given a context return a memfilectx for ctx[path]
2645 """Given a context return a memfilectx for ctx[path]
2646
2646
2647 This is a convenience method for building a memctx based on another
2647 This is a convenience method for building a memctx based on another
2648 context.
2648 context.
2649 """
2649 """
2650
2650
2651 def getfilectx(repo, memctx, path):
2651 def getfilectx(repo, memctx, path):
2652 fctx = ctx[path]
2652 fctx = ctx[path]
2653 copysource = fctx.copysource()
2653 copysource = fctx.copysource()
2654 return memfilectx(
2654 return memfilectx(
2655 repo,
2655 repo,
2656 memctx,
2656 memctx,
2657 path,
2657 path,
2658 fctx.data(),
2658 fctx.data(),
2659 islink=fctx.islink(),
2659 islink=fctx.islink(),
2660 isexec=fctx.isexec(),
2660 isexec=fctx.isexec(),
2661 copysource=copysource,
2661 copysource=copysource,
2662 )
2662 )
2663
2663
2664 return getfilectx
2664 return getfilectx
2665
2665
2666
2666
2667 def memfilefrompatch(patchstore):
2667 def memfilefrompatch(patchstore):
2668 """Given a patch (e.g. patchstore object) return a memfilectx
2668 """Given a patch (e.g. patchstore object) return a memfilectx
2669
2669
2670 This is a convenience method for building a memctx based on a patchstore.
2670 This is a convenience method for building a memctx based on a patchstore.
2671 """
2671 """
2672
2672
2673 def getfilectx(repo, memctx, path):
2673 def getfilectx(repo, memctx, path):
2674 data, mode, copysource = patchstore.getfile(path)
2674 data, mode, copysource = patchstore.getfile(path)
2675 if data is None:
2675 if data is None:
2676 return None
2676 return None
2677 islink, isexec = mode
2677 islink, isexec = mode
2678 return memfilectx(
2678 return memfilectx(
2679 repo,
2679 repo,
2680 memctx,
2680 memctx,
2681 path,
2681 path,
2682 data,
2682 data,
2683 islink=islink,
2683 islink=islink,
2684 isexec=isexec,
2684 isexec=isexec,
2685 copysource=copysource,
2685 copysource=copysource,
2686 )
2686 )
2687
2687
2688 return getfilectx
2688 return getfilectx
2689
2689
2690
2690
2691 class memctx(committablectx):
2691 class memctx(committablectx):
2692 """Use memctx to perform in-memory commits via localrepo.commitctx().
2692 """Use memctx to perform in-memory commits via localrepo.commitctx().
2693
2693
2694 Revision information is supplied at initialization time while
2694 Revision information is supplied at initialization time while
2695 related files data and is made available through a callback
2695 related files data and is made available through a callback
2696 mechanism. 'repo' is the current localrepo, 'parents' is a
2696 mechanism. 'repo' is the current localrepo, 'parents' is a
2697 sequence of two parent revisions identifiers (pass None for every
2697 sequence of two parent revisions identifiers (pass None for every
2698 missing parent), 'text' is the commit message and 'files' lists
2698 missing parent), 'text' is the commit message and 'files' lists
2699 names of files touched by the revision (normalized and relative to
2699 names of files touched by the revision (normalized and relative to
2700 repository root).
2700 repository root).
2701
2701
2702 filectxfn(repo, memctx, path) is a callable receiving the
2702 filectxfn(repo, memctx, path) is a callable receiving the
2703 repository, the current memctx object and the normalized path of
2703 repository, the current memctx object and the normalized path of
2704 requested file, relative to repository root. It is fired by the
2704 requested file, relative to repository root. It is fired by the
2705 commit function for every file in 'files', but calls order is
2705 commit function for every file in 'files', but calls order is
2706 undefined. If the file is available in the revision being
2706 undefined. If the file is available in the revision being
2707 committed (updated or added), filectxfn returns a memfilectx
2707 committed (updated or added), filectxfn returns a memfilectx
2708 object. If the file was removed, filectxfn return None for recent
2708 object. If the file was removed, filectxfn return None for recent
2709 Mercurial. Moved files are represented by marking the source file
2709 Mercurial. Moved files are represented by marking the source file
2710 removed and the new file added with copy information (see
2710 removed and the new file added with copy information (see
2711 memfilectx).
2711 memfilectx).
2712
2712
2713 user receives the committer name and defaults to current
2713 user receives the committer name and defaults to current
2714 repository username, date is the commit date in any format
2714 repository username, date is the commit date in any format
2715 supported by dateutil.parsedate() and defaults to current date, extra
2715 supported by dateutil.parsedate() and defaults to current date, extra
2716 is a dictionary of metadata or is left empty.
2716 is a dictionary of metadata or is left empty.
2717 """
2717 """
2718
2718
2719 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2719 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2720 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2720 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2721 # this field to determine what to do in filectxfn.
2721 # this field to determine what to do in filectxfn.
2722 _returnnoneformissingfiles = True
2722 _returnnoneformissingfiles = True
2723
2723
2724 def __init__(
2724 def __init__(
2725 self,
2725 self,
2726 repo,
2726 repo,
2727 parents,
2727 parents,
2728 text,
2728 text,
2729 files,
2729 files,
2730 filectxfn,
2730 filectxfn,
2731 user=None,
2731 user=None,
2732 date=None,
2732 date=None,
2733 extra=None,
2733 extra=None,
2734 branch=None,
2734 branch=None,
2735 editor=False,
2735 editor=False,
2736 ):
2736 ):
2737 super(memctx, self).__init__(
2737 super(memctx, self).__init__(
2738 repo, text, user, date, extra, branch=branch
2738 repo, text, user, date, extra, branch=branch
2739 )
2739 )
2740 self._rev = None
2740 self._rev = None
2741 self._node = None
2741 self._node = None
2742 parents = [(p or nullid) for p in parents]
2742 parents = [(p or nullid) for p in parents]
2743 p1, p2 = parents
2743 p1, p2 = parents
2744 self._parents = [self._repo[p] for p in (p1, p2)]
2744 self._parents = [self._repo[p] for p in (p1, p2)]
2745 files = sorted(set(files))
2745 files = sorted(set(files))
2746 self._files = files
2746 self._files = files
2747 self.substate = {}
2747 self.substate = {}
2748
2748
2749 if isinstance(filectxfn, patch.filestore):
2749 if isinstance(filectxfn, patch.filestore):
2750 filectxfn = memfilefrompatch(filectxfn)
2750 filectxfn = memfilefrompatch(filectxfn)
2751 elif not callable(filectxfn):
2751 elif not callable(filectxfn):
2752 # if store is not callable, wrap it in a function
2752 # if store is not callable, wrap it in a function
2753 filectxfn = memfilefromctx(filectxfn)
2753 filectxfn = memfilefromctx(filectxfn)
2754
2754
2755 # memoizing increases performance for e.g. vcs convert scenarios.
2755 # memoizing increases performance for e.g. vcs convert scenarios.
2756 self._filectxfn = makecachingfilectxfn(filectxfn)
2756 self._filectxfn = makecachingfilectxfn(filectxfn)
2757
2757
2758 if editor:
2758 if editor:
2759 self._text = editor(self._repo, self, [])
2759 self._text = editor(self._repo, self, [])
2760 self._repo.savecommitmessage(self._text)
2760 self._repo.savecommitmessage(self._text)
2761
2761
2762 def filectx(self, path, filelog=None):
2762 def filectx(self, path, filelog=None):
2763 """get a file context from the working directory
2763 """get a file context from the working directory
2764
2764
2765 Returns None if file doesn't exist and should be removed."""
2765 Returns None if file doesn't exist and should be removed."""
2766 return self._filectxfn(self._repo, self, path)
2766 return self._filectxfn(self._repo, self, path)
2767
2767
2768 def commit(self):
2768 def commit(self):
2769 """commit context to the repo"""
2769 """commit context to the repo"""
2770 return self._repo.commitctx(self)
2770 return self._repo.commitctx(self)
2771
2771
2772 @propertycache
2772 @propertycache
2773 def _manifest(self):
2773 def _manifest(self):
2774 """generate a manifest based on the return values of filectxfn"""
2774 """generate a manifest based on the return values of filectxfn"""
2775
2775
2776 # keep this simple for now; just worry about p1
2776 # keep this simple for now; just worry about p1
2777 pctx = self._parents[0]
2777 pctx = self._parents[0]
2778 man = pctx.manifest().copy()
2778 man = pctx.manifest().copy()
2779
2779
2780 for f in self._status.modified:
2780 for f in self._status.modified:
2781 man[f] = modifiednodeid
2781 man[f] = modifiednodeid
2782
2782
2783 for f in self._status.added:
2783 for f in self._status.added:
2784 man[f] = addednodeid
2784 man[f] = addednodeid
2785
2785
2786 for f in self._status.removed:
2786 for f in self._status.removed:
2787 if f in man:
2787 if f in man:
2788 del man[f]
2788 del man[f]
2789
2789
2790 return man
2790 return man
2791
2791
2792 @propertycache
2792 @propertycache
2793 def _status(self):
2793 def _status(self):
2794 """Calculate exact status from ``files`` specified at construction
2794 """Calculate exact status from ``files`` specified at construction
2795 """
2795 """
2796 man1 = self.p1().manifest()
2796 man1 = self.p1().manifest()
2797 p2 = self._parents[1]
2797 p2 = self._parents[1]
2798 # "1 < len(self._parents)" can't be used for checking
2798 # "1 < len(self._parents)" can't be used for checking
2799 # existence of the 2nd parent, because "memctx._parents" is
2799 # existence of the 2nd parent, because "memctx._parents" is
2800 # explicitly initialized by the list, of which length is 2.
2800 # explicitly initialized by the list, of which length is 2.
2801 if p2.node() != nullid:
2801 if p2.node() != nullid:
2802 man2 = p2.manifest()
2802 man2 = p2.manifest()
2803 managing = lambda f: f in man1 or f in man2
2803 managing = lambda f: f in man1 or f in man2
2804 else:
2804 else:
2805 managing = lambda f: f in man1
2805 managing = lambda f: f in man1
2806
2806
2807 modified, added, removed = [], [], []
2807 modified, added, removed = [], [], []
2808 for f in self._files:
2808 for f in self._files:
2809 if not managing(f):
2809 if not managing(f):
2810 added.append(f)
2810 added.append(f)
2811 elif self[f]:
2811 elif self[f]:
2812 modified.append(f)
2812 modified.append(f)
2813 else:
2813 else:
2814 removed.append(f)
2814 removed.append(f)
2815
2815
2816 return scmutil.status(modified, added, removed, [], [], [], [])
2816 return scmutil.status(modified, added, removed, [], [], [], [])
2817
2817
2818
2818
2819 class memfilectx(committablefilectx):
2819 class memfilectx(committablefilectx):
2820 """memfilectx represents an in-memory file to commit.
2820 """memfilectx represents an in-memory file to commit.
2821
2821
2822 See memctx and committablefilectx for more details.
2822 See memctx and committablefilectx for more details.
2823 """
2823 """
2824
2824
2825 def __init__(
2825 def __init__(
2826 self,
2826 self,
2827 repo,
2827 repo,
2828 changectx,
2828 changectx,
2829 path,
2829 path,
2830 data,
2830 data,
2831 islink=False,
2831 islink=False,
2832 isexec=False,
2832 isexec=False,
2833 copysource=None,
2833 copysource=None,
2834 ):
2834 ):
2835 """
2835 """
2836 path is the normalized file path relative to repository root.
2836 path is the normalized file path relative to repository root.
2837 data is the file content as a string.
2837 data is the file content as a string.
2838 islink is True if the file is a symbolic link.
2838 islink is True if the file is a symbolic link.
2839 isexec is True if the file is executable.
2839 isexec is True if the file is executable.
2840 copied is the source file path if current file was copied in the
2840 copied is the source file path if current file was copied in the
2841 revision being committed, or None."""
2841 revision being committed, or None."""
2842 super(memfilectx, self).__init__(repo, path, None, changectx)
2842 super(memfilectx, self).__init__(repo, path, None, changectx)
2843 self._data = data
2843 self._data = data
2844 if islink:
2844 if islink:
2845 self._flags = b'l'
2845 self._flags = b'l'
2846 elif isexec:
2846 elif isexec:
2847 self._flags = b'x'
2847 self._flags = b'x'
2848 else:
2848 else:
2849 self._flags = b''
2849 self._flags = b''
2850 self._copysource = copysource
2850 self._copysource = copysource
2851
2851
2852 def copysource(self):
2852 def copysource(self):
2853 return self._copysource
2853 return self._copysource
2854
2854
2855 def cmp(self, fctx):
2855 def cmp(self, fctx):
2856 return self.data() != fctx.data()
2856 return self.data() != fctx.data()
2857
2857
2858 def data(self):
2858 def data(self):
2859 return self._data
2859 return self._data
2860
2860
2861 def remove(self, ignoremissing=False):
2861 def remove(self, ignoremissing=False):
2862 """wraps unlink for a repo's working directory"""
2862 """wraps unlink for a repo's working directory"""
2863 # need to figure out what to do here
2863 # need to figure out what to do here
2864 del self._changectx[self._path]
2864 del self._changectx[self._path]
2865
2865
2866 def write(self, data, flags, **kwargs):
2866 def write(self, data, flags, **kwargs):
2867 """wraps repo.wwrite"""
2867 """wraps repo.wwrite"""
2868 self._data = data
2868 self._data = data
2869
2869
2870
2870
2871 class metadataonlyctx(committablectx):
2871 class metadataonlyctx(committablectx):
2872 """Like memctx but it's reusing the manifest of different commit.
2872 """Like memctx but it's reusing the manifest of different commit.
2873 Intended to be used by lightweight operations that are creating
2873 Intended to be used by lightweight operations that are creating
2874 metadata-only changes.
2874 metadata-only changes.
2875
2875
2876 Revision information is supplied at initialization time. 'repo' is the
2876 Revision information is supplied at initialization time. 'repo' is the
2877 current localrepo, 'ctx' is original revision which manifest we're reuisng
2877 current localrepo, 'ctx' is original revision which manifest we're reuisng
2878 'parents' is a sequence of two parent revisions identifiers (pass None for
2878 'parents' is a sequence of two parent revisions identifiers (pass None for
2879 every missing parent), 'text' is the commit.
2879 every missing parent), 'text' is the commit.
2880
2880
2881 user receives the committer name and defaults to current repository
2881 user receives the committer name and defaults to current repository
2882 username, date is the commit date in any format supported by
2882 username, date is the commit date in any format supported by
2883 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2883 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2884 metadata or is left empty.
2884 metadata or is left empty.
2885 """
2885 """
2886
2886
2887 def __init__(
2887 def __init__(
2888 self,
2888 self,
2889 repo,
2889 repo,
2890 originalctx,
2890 originalctx,
2891 parents=None,
2891 parents=None,
2892 text=None,
2892 text=None,
2893 user=None,
2893 user=None,
2894 date=None,
2894 date=None,
2895 extra=None,
2895 extra=None,
2896 editor=False,
2896 editor=False,
2897 ):
2897 ):
2898 if text is None:
2898 if text is None:
2899 text = originalctx.description()
2899 text = originalctx.description()
2900 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2900 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2901 self._rev = None
2901 self._rev = None
2902 self._node = None
2902 self._node = None
2903 self._originalctx = originalctx
2903 self._originalctx = originalctx
2904 self._manifestnode = originalctx.manifestnode()
2904 self._manifestnode = originalctx.manifestnode()
2905 if parents is None:
2905 if parents is None:
2906 parents = originalctx.parents()
2906 parents = originalctx.parents()
2907 else:
2907 else:
2908 parents = [repo[p] for p in parents if p is not None]
2908 parents = [repo[p] for p in parents if p is not None]
2909 parents = parents[:]
2909 parents = parents[:]
2910 while len(parents) < 2:
2910 while len(parents) < 2:
2911 parents.append(repo[nullid])
2911 parents.append(repo[nullid])
2912 p1, p2 = self._parents = parents
2912 p1, p2 = self._parents = parents
2913
2913
2914 # sanity check to ensure that the reused manifest parents are
2914 # sanity check to ensure that the reused manifest parents are
2915 # manifests of our commit parents
2915 # manifests of our commit parents
2916 mp1, mp2 = self.manifestctx().parents
2916 mp1, mp2 = self.manifestctx().parents
2917 if p1 != nullid and p1.manifestnode() != mp1:
2917 if p1 != nullid and p1.manifestnode() != mp1:
2918 raise RuntimeError(
2918 raise RuntimeError(
2919 r"can't reuse the manifest: its p1 "
2919 r"can't reuse the manifest: its p1 "
2920 r"doesn't match the new ctx p1"
2920 r"doesn't match the new ctx p1"
2921 )
2921 )
2922 if p2 != nullid and p2.manifestnode() != mp2:
2922 if p2 != nullid and p2.manifestnode() != mp2:
2923 raise RuntimeError(
2923 raise RuntimeError(
2924 r"can't reuse the manifest: "
2924 r"can't reuse the manifest: "
2925 r"its p2 doesn't match the new ctx p2"
2925 r"its p2 doesn't match the new ctx p2"
2926 )
2926 )
2927
2927
2928 self._files = originalctx.files()
2928 self._files = originalctx.files()
2929 self.substate = {}
2929 self.substate = {}
2930
2930
2931 if editor:
2931 if editor:
2932 self._text = editor(self._repo, self, [])
2932 self._text = editor(self._repo, self, [])
2933 self._repo.savecommitmessage(self._text)
2933 self._repo.savecommitmessage(self._text)
2934
2934
2935 def manifestnode(self):
2935 def manifestnode(self):
2936 return self._manifestnode
2936 return self._manifestnode
2937
2937
2938 @property
2938 @property
2939 def _manifestctx(self):
2939 def _manifestctx(self):
2940 return self._repo.manifestlog[self._manifestnode]
2940 return self._repo.manifestlog[self._manifestnode]
2941
2941
2942 def filectx(self, path, filelog=None):
2942 def filectx(self, path, filelog=None):
2943 return self._originalctx.filectx(path, filelog=filelog)
2943 return self._originalctx.filectx(path, filelog=filelog)
2944
2944
2945 def commit(self):
2945 def commit(self):
2946 """commit context to the repo"""
2946 """commit context to the repo"""
2947 return self._repo.commitctx(self)
2947 return self._repo.commitctx(self)
2948
2948
2949 @property
2949 @property
2950 def _manifest(self):
2950 def _manifest(self):
2951 return self._originalctx.manifest()
2951 return self._originalctx.manifest()
2952
2952
2953 @propertycache
2953 @propertycache
2954 def _status(self):
2954 def _status(self):
2955 """Calculate exact status from ``files`` specified in the ``origctx``
2955 """Calculate exact status from ``files`` specified in the ``origctx``
2956 and parents manifests.
2956 and parents manifests.
2957 """
2957 """
2958 man1 = self.p1().manifest()
2958 man1 = self.p1().manifest()
2959 p2 = self._parents[1]
2959 p2 = self._parents[1]
2960 # "1 < len(self._parents)" can't be used for checking
2960 # "1 < len(self._parents)" can't be used for checking
2961 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2961 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2962 # explicitly initialized by the list, of which length is 2.
2962 # explicitly initialized by the list, of which length is 2.
2963 if p2.node() != nullid:
2963 if p2.node() != nullid:
2964 man2 = p2.manifest()
2964 man2 = p2.manifest()
2965 managing = lambda f: f in man1 or f in man2
2965 managing = lambda f: f in man1 or f in man2
2966 else:
2966 else:
2967 managing = lambda f: f in man1
2967 managing = lambda f: f in man1
2968
2968
2969 modified, added, removed = [], [], []
2969 modified, added, removed = [], [], []
2970 for f in self._files:
2970 for f in self._files:
2971 if not managing(f):
2971 if not managing(f):
2972 added.append(f)
2972 added.append(f)
2973 elif f in self:
2973 elif f in self:
2974 modified.append(f)
2974 modified.append(f)
2975 else:
2975 else:
2976 removed.append(f)
2976 removed.append(f)
2977
2977
2978 return scmutil.status(modified, added, removed, [], [], [], [])
2978 return scmutil.status(modified, added, removed, [], [], [], [])
2979
2979
2980
2980
2981 class arbitraryfilectx(object):
2981 class arbitraryfilectx(object):
2982 """Allows you to use filectx-like functions on a file in an arbitrary
2982 """Allows you to use filectx-like functions on a file in an arbitrary
2983 location on disk, possibly not in the working directory.
2983 location on disk, possibly not in the working directory.
2984 """
2984 """
2985
2985
2986 def __init__(self, path, repo=None):
2986 def __init__(self, path, repo=None):
2987 # Repo is optional because contrib/simplemerge uses this class.
2987 # Repo is optional because contrib/simplemerge uses this class.
2988 self._repo = repo
2988 self._repo = repo
2989 self._path = path
2989 self._path = path
2990
2990
2991 def cmp(self, fctx):
2991 def cmp(self, fctx):
2992 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2992 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2993 # path if either side is a symlink.
2993 # path if either side is a symlink.
2994 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2994 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2995 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2995 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2996 # Add a fast-path for merge if both sides are disk-backed.
2996 # Add a fast-path for merge if both sides are disk-backed.
2997 # Note that filecmp uses the opposite return values (True if same)
2997 # Note that filecmp uses the opposite return values (True if same)
2998 # from our cmp functions (True if different).
2998 # from our cmp functions (True if different).
2999 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2999 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3000 return self.data() != fctx.data()
3000 return self.data() != fctx.data()
3001
3001
3002 def path(self):
3002 def path(self):
3003 return self._path
3003 return self._path
3004
3004
3005 def flags(self):
3005 def flags(self):
3006 return b''
3006 return b''
3007
3007
3008 def data(self):
3008 def data(self):
3009 return util.readfile(self._path)
3009 return util.readfile(self._path)
3010
3010
3011 def decodeddata(self):
3011 def decodeddata(self):
3012 with open(self._path, b"rb") as f:
3012 with open(self._path, b"rb") as f:
3013 return f.read()
3013 return f.read()
3014
3014
3015 def remove(self):
3015 def remove(self):
3016 util.unlink(self._path)
3016 util.unlink(self._path)
3017
3017
3018 def write(self, data, flags, **kwargs):
3018 def write(self, data, flags, **kwargs):
3019 assert not flags
3019 assert not flags
3020 with open(self._path, b"wb") as f:
3020 with open(self._path, b"wb") as f:
3021 f.write(data)
3021 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now