##// END OF EJS Templates
changectx: mark parent of changesets as non filtered...
marmoute -
r44568:98349edd default
parent child Browse files
Show More
@@ -1,3054 +1,3057 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, cwd, expr, badfn=None):
203 def matchfileset(self, cwd, expr, badfn=None):
204 return fileset.match(self, cwd, expr, badfn=badfn)
204 return fileset.match(self, cwd, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 cwd=None,
331 cwd=None,
332 ):
332 ):
333 r = self._repo
333 r = self._repo
334 if not cwd:
334 if not cwd:
335 cwd = r.getcwd()
335 cwd = r.getcwd()
336 return matchmod.match(
336 return matchmod.match(
337 r.root,
337 r.root,
338 cwd,
338 cwd,
339 pats,
339 pats,
340 include,
340 include,
341 exclude,
341 exclude,
342 default,
342 default,
343 auditor=r.nofsauditor,
343 auditor=r.nofsauditor,
344 ctx=self,
344 ctx=self,
345 listsubrepos=listsubrepos,
345 listsubrepos=listsubrepos,
346 badfn=badfn,
346 badfn=badfn,
347 )
347 )
348
348
349 def diff(
349 def diff(
350 self,
350 self,
351 ctx2=None,
351 ctx2=None,
352 match=None,
352 match=None,
353 changes=None,
353 changes=None,
354 opts=None,
354 opts=None,
355 losedatafn=None,
355 losedatafn=None,
356 pathfn=None,
356 pathfn=None,
357 copy=None,
357 copy=None,
358 copysourcematch=None,
358 copysourcematch=None,
359 hunksfilterfn=None,
359 hunksfilterfn=None,
360 ):
360 ):
361 """Returns a diff generator for the given contexts and matcher"""
361 """Returns a diff generator for the given contexts and matcher"""
362 if ctx2 is None:
362 if ctx2 is None:
363 ctx2 = self.p1()
363 ctx2 = self.p1()
364 if ctx2 is not None:
364 if ctx2 is not None:
365 ctx2 = self._repo[ctx2]
365 ctx2 = self._repo[ctx2]
366 return patch.diff(
366 return patch.diff(
367 self._repo,
367 self._repo,
368 ctx2,
368 ctx2,
369 self,
369 self,
370 match=match,
370 match=match,
371 changes=changes,
371 changes=changes,
372 opts=opts,
372 opts=opts,
373 losedatafn=losedatafn,
373 losedatafn=losedatafn,
374 pathfn=pathfn,
374 pathfn=pathfn,
375 copy=copy,
375 copy=copy,
376 copysourcematch=copysourcematch,
376 copysourcematch=copysourcematch,
377 hunksfilterfn=hunksfilterfn,
377 hunksfilterfn=hunksfilterfn,
378 )
378 )
379
379
380 def dirs(self):
380 def dirs(self):
381 return self._manifest.dirs()
381 return self._manifest.dirs()
382
382
383 def hasdir(self, dir):
383 def hasdir(self, dir):
384 return self._manifest.hasdir(dir)
384 return self._manifest.hasdir(dir)
385
385
386 def status(
386 def status(
387 self,
387 self,
388 other=None,
388 other=None,
389 match=None,
389 match=None,
390 listignored=False,
390 listignored=False,
391 listclean=False,
391 listclean=False,
392 listunknown=False,
392 listunknown=False,
393 listsubrepos=False,
393 listsubrepos=False,
394 ):
394 ):
395 """return status of files between two nodes or node and working
395 """return status of files between two nodes or node and working
396 directory.
396 directory.
397
397
398 If other is None, compare this node with working directory.
398 If other is None, compare this node with working directory.
399
399
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 """
401 """
402
402
403 ctx1 = self
403 ctx1 = self
404 ctx2 = self._repo[other]
404 ctx2 = self._repo[other]
405
405
406 # This next code block is, admittedly, fragile logic that tests for
406 # This next code block is, admittedly, fragile logic that tests for
407 # reversing the contexts and wouldn't need to exist if it weren't for
407 # reversing the contexts and wouldn't need to exist if it weren't for
408 # the fast (and common) code path of comparing the working directory
408 # the fast (and common) code path of comparing the working directory
409 # with its first parent.
409 # with its first parent.
410 #
410 #
411 # What we're aiming for here is the ability to call:
411 # What we're aiming for here is the ability to call:
412 #
412 #
413 # workingctx.status(parentctx)
413 # workingctx.status(parentctx)
414 #
414 #
415 # If we always built the manifest for each context and compared those,
415 # If we always built the manifest for each context and compared those,
416 # then we'd be done. But the special case of the above call means we
416 # then we'd be done. But the special case of the above call means we
417 # just copy the manifest of the parent.
417 # just copy the manifest of the parent.
418 reversed = False
418 reversed = False
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 reversed = True
420 reversed = True
421 ctx1, ctx2 = ctx2, ctx1
421 ctx1, ctx2 = ctx2, ctx1
422
422
423 match = self._repo.narrowmatch(match)
423 match = self._repo.narrowmatch(match)
424 match = ctx2._matchstatus(ctx1, match)
424 match = ctx2._matchstatus(ctx1, match)
425 r = scmutil.status([], [], [], [], [], [], [])
425 r = scmutil.status([], [], [], [], [], [], [])
426 r = ctx2._buildstatus(
426 r = ctx2._buildstatus(
427 ctx1, r, match, listignored, listclean, listunknown
427 ctx1, r, match, listignored, listclean, listunknown
428 )
428 )
429
429
430 if reversed:
430 if reversed:
431 # Reverse added and removed. Clear deleted, unknown and ignored as
431 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # these make no sense to reverse.
432 # these make no sense to reverse.
433 r = scmutil.status(
433 r = scmutil.status(
434 r.modified, r.removed, r.added, [], [], [], r.clean
434 r.modified, r.removed, r.added, [], [], [], r.clean
435 )
435 )
436
436
437 if listsubrepos:
437 if listsubrepos:
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 try:
439 try:
440 rev2 = ctx2.subrev(subpath)
440 rev2 = ctx2.subrev(subpath)
441 except KeyError:
441 except KeyError:
442 # A subrepo that existed in node1 was deleted between
442 # A subrepo that existed in node1 was deleted between
443 # node1 and node2 (inclusive). Thus, ctx2's substate
443 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # won't contain that subpath. The best we can do ignore it.
444 # won't contain that subpath. The best we can do ignore it.
445 rev2 = None
445 rev2 = None
446 submatch = matchmod.subdirmatcher(subpath, match)
446 submatch = matchmod.subdirmatcher(subpath, match)
447 s = sub.status(
447 s = sub.status(
448 rev2,
448 rev2,
449 match=submatch,
449 match=submatch,
450 ignored=listignored,
450 ignored=listignored,
451 clean=listclean,
451 clean=listclean,
452 unknown=listunknown,
452 unknown=listunknown,
453 listsubrepos=True,
453 listsubrepos=True,
454 )
454 )
455 for k in (
455 for k in (
456 'modified',
456 'modified',
457 'added',
457 'added',
458 'removed',
458 'removed',
459 'deleted',
459 'deleted',
460 'unknown',
460 'unknown',
461 'ignored',
461 'ignored',
462 'clean',
462 'clean',
463 ):
463 ):
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466
466
467 r.modified.sort()
467 r.modified.sort()
468 r.added.sort()
468 r.added.sort()
469 r.removed.sort()
469 r.removed.sort()
470 r.deleted.sort()
470 r.deleted.sort()
471 r.unknown.sort()
471 r.unknown.sort()
472 r.ignored.sort()
472 r.ignored.sort()
473 r.clean.sort()
473 r.clean.sort()
474
474
475 return r
475 return r
476
476
477
477
478 class changectx(basectx):
478 class changectx(basectx):
479 """A changecontext object makes access to data related to a particular
479 """A changecontext object makes access to data related to a particular
480 changeset convenient. It represents a read-only context already present in
480 changeset convenient. It represents a read-only context already present in
481 the repo."""
481 the repo."""
482
482
483 def __init__(self, repo, rev, node, maybe_filtered=True):
483 def __init__(self, repo, rev, node, maybe_filtered=True):
484 super(changectx, self).__init__(repo)
484 super(changectx, self).__init__(repo)
485 self._rev = rev
485 self._rev = rev
486 self._node = node
486 self._node = node
487 # When maybe_filtered is True, the revision might be affected by
487 # When maybe_filtered is True, the revision might be affected by
488 # changelog filtering and operation through the filtered changelog must be used.
488 # changelog filtering and operation through the filtered changelog must be used.
489 #
489 #
490 # When maybe_filtered is False, the revision has already been checked
490 # When maybe_filtered is False, the revision has already been checked
491 # against filtering and is not filtered. Operation through the
491 # against filtering and is not filtered. Operation through the
492 # unfiltered changelog might be used in some case.
492 # unfiltered changelog might be used in some case.
493 self._maybe_filtered = maybe_filtered
493 self._maybe_filtered = maybe_filtered
494
494
495 def __hash__(self):
495 def __hash__(self):
496 try:
496 try:
497 return hash(self._rev)
497 return hash(self._rev)
498 except AttributeError:
498 except AttributeError:
499 return id(self)
499 return id(self)
500
500
501 def __nonzero__(self):
501 def __nonzero__(self):
502 return self._rev != nullrev
502 return self._rev != nullrev
503
503
504 __bool__ = __nonzero__
504 __bool__ = __nonzero__
505
505
506 @propertycache
506 @propertycache
507 def _changeset(self):
507 def _changeset(self):
508 if self._maybe_filtered:
508 if self._maybe_filtered:
509 repo = self._repo
509 repo = self._repo
510 else:
510 else:
511 repo = self._repo.unfiltered()
511 repo = self._repo.unfiltered()
512 return repo.changelog.changelogrevision(self.rev())
512 return repo.changelog.changelogrevision(self.rev())
513
513
514 @propertycache
514 @propertycache
515 def _manifest(self):
515 def _manifest(self):
516 return self._manifestctx.read()
516 return self._manifestctx.read()
517
517
518 @property
518 @property
519 def _manifestctx(self):
519 def _manifestctx(self):
520 return self._repo.manifestlog[self._changeset.manifest]
520 return self._repo.manifestlog[self._changeset.manifest]
521
521
522 @propertycache
522 @propertycache
523 def _manifestdelta(self):
523 def _manifestdelta(self):
524 return self._manifestctx.readdelta()
524 return self._manifestctx.readdelta()
525
525
526 @propertycache
526 @propertycache
527 def _parents(self):
527 def _parents(self):
528 repo = self._repo
528 repo = self._repo
529 if self._maybe_filtered:
529 if self._maybe_filtered:
530 cl = repo.changelog
530 cl = repo.changelog
531 else:
531 else:
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533
533
534 p1, p2 = cl.parentrevs(self._rev)
534 p1, p2 = cl.parentrevs(self._rev)
535 if p2 == nullrev:
535 if p2 == nullrev:
536 return [repo[p1]]
536 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
537 return [repo[p1], repo[p2]]
537 return [
538 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
539 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
540 ]
538
541
539 def changeset(self):
542 def changeset(self):
540 c = self._changeset
543 c = self._changeset
541 return (
544 return (
542 c.manifest,
545 c.manifest,
543 c.user,
546 c.user,
544 c.date,
547 c.date,
545 c.files,
548 c.files,
546 c.description,
549 c.description,
547 c.extra,
550 c.extra,
548 )
551 )
549
552
550 def manifestnode(self):
553 def manifestnode(self):
551 return self._changeset.manifest
554 return self._changeset.manifest
552
555
553 def user(self):
556 def user(self):
554 return self._changeset.user
557 return self._changeset.user
555
558
556 def date(self):
559 def date(self):
557 return self._changeset.date
560 return self._changeset.date
558
561
559 def files(self):
562 def files(self):
560 return self._changeset.files
563 return self._changeset.files
561
564
562 def filesmodified(self):
565 def filesmodified(self):
563 modified = set(self.files())
566 modified = set(self.files())
564 modified.difference_update(self.filesadded())
567 modified.difference_update(self.filesadded())
565 modified.difference_update(self.filesremoved())
568 modified.difference_update(self.filesremoved())
566 return sorted(modified)
569 return sorted(modified)
567
570
568 def filesadded(self):
571 def filesadded(self):
569 filesadded = self._changeset.filesadded
572 filesadded = self._changeset.filesadded
570 compute_on_none = True
573 compute_on_none = True
571 if self._repo.filecopiesmode == b'changeset-sidedata':
574 if self._repo.filecopiesmode == b'changeset-sidedata':
572 compute_on_none = False
575 compute_on_none = False
573 else:
576 else:
574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
577 source = self._repo.ui.config(b'experimental', b'copies.read-from')
575 if source == b'changeset-only':
578 if source == b'changeset-only':
576 compute_on_none = False
579 compute_on_none = False
577 elif source != b'compatibility':
580 elif source != b'compatibility':
578 # filelog mode, ignore any changelog content
581 # filelog mode, ignore any changelog content
579 filesadded = None
582 filesadded = None
580 if filesadded is None:
583 if filesadded is None:
581 if compute_on_none:
584 if compute_on_none:
582 filesadded = copies.computechangesetfilesadded(self)
585 filesadded = copies.computechangesetfilesadded(self)
583 else:
586 else:
584 filesadded = []
587 filesadded = []
585 return filesadded
588 return filesadded
586
589
587 def filesremoved(self):
590 def filesremoved(self):
588 filesremoved = self._changeset.filesremoved
591 filesremoved = self._changeset.filesremoved
589 compute_on_none = True
592 compute_on_none = True
590 if self._repo.filecopiesmode == b'changeset-sidedata':
593 if self._repo.filecopiesmode == b'changeset-sidedata':
591 compute_on_none = False
594 compute_on_none = False
592 else:
595 else:
593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
596 source = self._repo.ui.config(b'experimental', b'copies.read-from')
594 if source == b'changeset-only':
597 if source == b'changeset-only':
595 compute_on_none = False
598 compute_on_none = False
596 elif source != b'compatibility':
599 elif source != b'compatibility':
597 # filelog mode, ignore any changelog content
600 # filelog mode, ignore any changelog content
598 filesremoved = None
601 filesremoved = None
599 if filesremoved is None:
602 if filesremoved is None:
600 if compute_on_none:
603 if compute_on_none:
601 filesremoved = copies.computechangesetfilesremoved(self)
604 filesremoved = copies.computechangesetfilesremoved(self)
602 else:
605 else:
603 filesremoved = []
606 filesremoved = []
604 return filesremoved
607 return filesremoved
605
608
606 @propertycache
609 @propertycache
607 def _copies(self):
610 def _copies(self):
608 p1copies = self._changeset.p1copies
611 p1copies = self._changeset.p1copies
609 p2copies = self._changeset.p2copies
612 p2copies = self._changeset.p2copies
610 compute_on_none = True
613 compute_on_none = True
611 if self._repo.filecopiesmode == b'changeset-sidedata':
614 if self._repo.filecopiesmode == b'changeset-sidedata':
612 compute_on_none = False
615 compute_on_none = False
613 else:
616 else:
614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 source = self._repo.ui.config(b'experimental', b'copies.read-from')
615 # If config says to get copy metadata only from changeset, then
618 # If config says to get copy metadata only from changeset, then
616 # return that, defaulting to {} if there was no copy metadata. In
619 # return that, defaulting to {} if there was no copy metadata. In
617 # compatibility mode, we return copy data from the changeset if it
620 # compatibility mode, we return copy data from the changeset if it
618 # was recorded there, and otherwise we fall back to getting it from
621 # was recorded there, and otherwise we fall back to getting it from
619 # the filelogs (below).
622 # the filelogs (below).
620 #
623 #
621 # If we are in compatiblity mode and there is not data in the
624 # If we are in compatiblity mode and there is not data in the
622 # changeset), we get the copy metadata from the filelogs.
625 # changeset), we get the copy metadata from the filelogs.
623 #
626 #
624 # otherwise, when config said to read only from filelog, we get the
627 # otherwise, when config said to read only from filelog, we get the
625 # copy metadata from the filelogs.
628 # copy metadata from the filelogs.
626 if source == b'changeset-only':
629 if source == b'changeset-only':
627 compute_on_none = False
630 compute_on_none = False
628 elif source != b'compatibility':
631 elif source != b'compatibility':
629 # filelog mode, ignore any changelog content
632 # filelog mode, ignore any changelog content
630 p1copies = p2copies = None
633 p1copies = p2copies = None
631 if p1copies is None:
634 if p1copies is None:
632 if compute_on_none:
635 if compute_on_none:
633 p1copies, p2copies = super(changectx, self)._copies
636 p1copies, p2copies = super(changectx, self)._copies
634 else:
637 else:
635 if p1copies is None:
638 if p1copies is None:
636 p1copies = {}
639 p1copies = {}
637 if p2copies is None:
640 if p2copies is None:
638 p2copies = {}
641 p2copies = {}
639 return p1copies, p2copies
642 return p1copies, p2copies
640
643
641 def description(self):
644 def description(self):
642 return self._changeset.description
645 return self._changeset.description
643
646
644 def branch(self):
647 def branch(self):
645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
648 return encoding.tolocal(self._changeset.extra.get(b"branch"))
646
649
647 def closesbranch(self):
650 def closesbranch(self):
648 return b'close' in self._changeset.extra
651 return b'close' in self._changeset.extra
649
652
650 def extra(self):
653 def extra(self):
651 """Return a dict of extra information."""
654 """Return a dict of extra information."""
652 return self._changeset.extra
655 return self._changeset.extra
653
656
654 def tags(self):
657 def tags(self):
655 """Return a list of byte tag names"""
658 """Return a list of byte tag names"""
656 return self._repo.nodetags(self._node)
659 return self._repo.nodetags(self._node)
657
660
658 def bookmarks(self):
661 def bookmarks(self):
659 """Return a list of byte bookmark names."""
662 """Return a list of byte bookmark names."""
660 return self._repo.nodebookmarks(self._node)
663 return self._repo.nodebookmarks(self._node)
661
664
662 def phase(self):
665 def phase(self):
663 return self._repo._phasecache.phase(self._repo, self._rev)
666 return self._repo._phasecache.phase(self._repo, self._rev)
664
667
665 def hidden(self):
668 def hidden(self):
666 return self._rev in repoview.filterrevs(self._repo, b'visible')
669 return self._rev in repoview.filterrevs(self._repo, b'visible')
667
670
668 def isinmemory(self):
671 def isinmemory(self):
669 return False
672 return False
670
673
671 def children(self):
674 def children(self):
672 """return list of changectx contexts for each child changeset.
675 """return list of changectx contexts for each child changeset.
673
676
674 This returns only the immediate child changesets. Use descendants() to
677 This returns only the immediate child changesets. Use descendants() to
675 recursively walk children.
678 recursively walk children.
676 """
679 """
677 c = self._repo.changelog.children(self._node)
680 c = self._repo.changelog.children(self._node)
678 return [self._repo[x] for x in c]
681 return [self._repo[x] for x in c]
679
682
680 def ancestors(self):
683 def ancestors(self):
681 for a in self._repo.changelog.ancestors([self._rev]):
684 for a in self._repo.changelog.ancestors([self._rev]):
682 yield self._repo[a]
685 yield self._repo[a]
683
686
684 def descendants(self):
687 def descendants(self):
685 """Recursively yield all children of the changeset.
688 """Recursively yield all children of the changeset.
686
689
687 For just the immediate children, use children()
690 For just the immediate children, use children()
688 """
691 """
689 for d in self._repo.changelog.descendants([self._rev]):
692 for d in self._repo.changelog.descendants([self._rev]):
690 yield self._repo[d]
693 yield self._repo[d]
691
694
692 def filectx(self, path, fileid=None, filelog=None):
695 def filectx(self, path, fileid=None, filelog=None):
693 """get a file context from this changeset"""
696 """get a file context from this changeset"""
694 if fileid is None:
697 if fileid is None:
695 fileid = self.filenode(path)
698 fileid = self.filenode(path)
696 return filectx(
699 return filectx(
697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
700 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
698 )
701 )
699
702
700 def ancestor(self, c2, warn=False):
703 def ancestor(self, c2, warn=False):
701 """return the "best" ancestor context of self and c2
704 """return the "best" ancestor context of self and c2
702
705
703 If there are multiple candidates, it will show a message and check
706 If there are multiple candidates, it will show a message and check
704 merge.preferancestor configuration before falling back to the
707 merge.preferancestor configuration before falling back to the
705 revlog ancestor."""
708 revlog ancestor."""
706 # deal with workingctxs
709 # deal with workingctxs
707 n2 = c2._node
710 n2 = c2._node
708 if n2 is None:
711 if n2 is None:
709 n2 = c2._parents[0]._node
712 n2 = c2._parents[0]._node
710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
713 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
711 if not cahs:
714 if not cahs:
712 anc = nullid
715 anc = nullid
713 elif len(cahs) == 1:
716 elif len(cahs) == 1:
714 anc = cahs[0]
717 anc = cahs[0]
715 else:
718 else:
716 # experimental config: merge.preferancestor
719 # experimental config: merge.preferancestor
717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
720 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
718 try:
721 try:
719 ctx = scmutil.revsymbol(self._repo, r)
722 ctx = scmutil.revsymbol(self._repo, r)
720 except error.RepoLookupError:
723 except error.RepoLookupError:
721 continue
724 continue
722 anc = ctx.node()
725 anc = ctx.node()
723 if anc in cahs:
726 if anc in cahs:
724 break
727 break
725 else:
728 else:
726 anc = self._repo.changelog.ancestor(self._node, n2)
729 anc = self._repo.changelog.ancestor(self._node, n2)
727 if warn:
730 if warn:
728 self._repo.ui.status(
731 self._repo.ui.status(
729 (
732 (
730 _(b"note: using %s as ancestor of %s and %s\n")
733 _(b"note: using %s as ancestor of %s and %s\n")
731 % (short(anc), short(self._node), short(n2))
734 % (short(anc), short(self._node), short(n2))
732 )
735 )
733 + b''.join(
736 + b''.join(
734 _(
737 _(
735 b" alternatively, use --config "
738 b" alternatively, use --config "
736 b"merge.preferancestor=%s\n"
739 b"merge.preferancestor=%s\n"
737 )
740 )
738 % short(n)
741 % short(n)
739 for n in sorted(cahs)
742 for n in sorted(cahs)
740 if n != anc
743 if n != anc
741 )
744 )
742 )
745 )
743 return self._repo[anc]
746 return self._repo[anc]
744
747
745 def isancestorof(self, other):
748 def isancestorof(self, other):
746 """True if this changeset is an ancestor of other"""
749 """True if this changeset is an ancestor of other"""
747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
750 return self._repo.changelog.isancestorrev(self._rev, other._rev)
748
751
749 def walk(self, match):
752 def walk(self, match):
750 '''Generates matching file names.'''
753 '''Generates matching file names.'''
751
754
752 # Wrap match.bad method to have message with nodeid
755 # Wrap match.bad method to have message with nodeid
753 def bad(fn, msg):
756 def bad(fn, msg):
754 # The manifest doesn't know about subrepos, so don't complain about
757 # The manifest doesn't know about subrepos, so don't complain about
755 # paths into valid subrepos.
758 # paths into valid subrepos.
756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
759 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
757 return
760 return
758 match.bad(fn, _(b'no such file in rev %s') % self)
761 match.bad(fn, _(b'no such file in rev %s') % self)
759
762
760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
763 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
761 return self._manifest.walk(m)
764 return self._manifest.walk(m)
762
765
763 def matches(self, match):
766 def matches(self, match):
764 return self.walk(match)
767 return self.walk(match)
765
768
766
769
767 class basefilectx(object):
770 class basefilectx(object):
768 """A filecontext object represents the common logic for its children:
771 """A filecontext object represents the common logic for its children:
769 filectx: read-only access to a filerevision that is already present
772 filectx: read-only access to a filerevision that is already present
770 in the repo,
773 in the repo,
771 workingfilectx: a filecontext that represents files from the working
774 workingfilectx: a filecontext that represents files from the working
772 directory,
775 directory,
773 memfilectx: a filecontext that represents files in-memory,
776 memfilectx: a filecontext that represents files in-memory,
774 """
777 """
775
778
776 @propertycache
779 @propertycache
777 def _filelog(self):
780 def _filelog(self):
778 return self._repo.file(self._path)
781 return self._repo.file(self._path)
779
782
780 @propertycache
783 @propertycache
781 def _changeid(self):
784 def _changeid(self):
782 if '_changectx' in self.__dict__:
785 if '_changectx' in self.__dict__:
783 return self._changectx.rev()
786 return self._changectx.rev()
784 elif '_descendantrev' in self.__dict__:
787 elif '_descendantrev' in self.__dict__:
785 # this file context was created from a revision with a known
788 # this file context was created from a revision with a known
786 # descendant, we can (lazily) correct for linkrev aliases
789 # descendant, we can (lazily) correct for linkrev aliases
787 return self._adjustlinkrev(self._descendantrev)
790 return self._adjustlinkrev(self._descendantrev)
788 else:
791 else:
789 return self._filelog.linkrev(self._filerev)
792 return self._filelog.linkrev(self._filerev)
790
793
791 @propertycache
794 @propertycache
792 def _filenode(self):
795 def _filenode(self):
793 if '_fileid' in self.__dict__:
796 if '_fileid' in self.__dict__:
794 return self._filelog.lookup(self._fileid)
797 return self._filelog.lookup(self._fileid)
795 else:
798 else:
796 return self._changectx.filenode(self._path)
799 return self._changectx.filenode(self._path)
797
800
798 @propertycache
801 @propertycache
799 def _filerev(self):
802 def _filerev(self):
800 return self._filelog.rev(self._filenode)
803 return self._filelog.rev(self._filenode)
801
804
802 @propertycache
805 @propertycache
803 def _repopath(self):
806 def _repopath(self):
804 return self._path
807 return self._path
805
808
806 def __nonzero__(self):
809 def __nonzero__(self):
807 try:
810 try:
808 self._filenode
811 self._filenode
809 return True
812 return True
810 except error.LookupError:
813 except error.LookupError:
811 # file is missing
814 # file is missing
812 return False
815 return False
813
816
814 __bool__ = __nonzero__
817 __bool__ = __nonzero__
815
818
816 def __bytes__(self):
819 def __bytes__(self):
817 try:
820 try:
818 return b"%s@%s" % (self.path(), self._changectx)
821 return b"%s@%s" % (self.path(), self._changectx)
819 except error.LookupError:
822 except error.LookupError:
820 return b"%s@???" % self.path()
823 return b"%s@???" % self.path()
821
824
822 __str__ = encoding.strmethod(__bytes__)
825 __str__ = encoding.strmethod(__bytes__)
823
826
824 def __repr__(self):
827 def __repr__(self):
825 return "<%s %s>" % (type(self).__name__, str(self))
828 return "<%s %s>" % (type(self).__name__, str(self))
826
829
827 def __hash__(self):
830 def __hash__(self):
828 try:
831 try:
829 return hash((self._path, self._filenode))
832 return hash((self._path, self._filenode))
830 except AttributeError:
833 except AttributeError:
831 return id(self)
834 return id(self)
832
835
833 def __eq__(self, other):
836 def __eq__(self, other):
834 try:
837 try:
835 return (
838 return (
836 type(self) == type(other)
839 type(self) == type(other)
837 and self._path == other._path
840 and self._path == other._path
838 and self._filenode == other._filenode
841 and self._filenode == other._filenode
839 )
842 )
840 except AttributeError:
843 except AttributeError:
841 return False
844 return False
842
845
843 def __ne__(self, other):
846 def __ne__(self, other):
844 return not (self == other)
847 return not (self == other)
845
848
846 def filerev(self):
849 def filerev(self):
847 return self._filerev
850 return self._filerev
848
851
849 def filenode(self):
852 def filenode(self):
850 return self._filenode
853 return self._filenode
851
854
852 @propertycache
855 @propertycache
853 def _flags(self):
856 def _flags(self):
854 return self._changectx.flags(self._path)
857 return self._changectx.flags(self._path)
855
858
856 def flags(self):
859 def flags(self):
857 return self._flags
860 return self._flags
858
861
859 def filelog(self):
862 def filelog(self):
860 return self._filelog
863 return self._filelog
861
864
862 def rev(self):
865 def rev(self):
863 return self._changeid
866 return self._changeid
864
867
865 def linkrev(self):
868 def linkrev(self):
866 return self._filelog.linkrev(self._filerev)
869 return self._filelog.linkrev(self._filerev)
867
870
868 def node(self):
871 def node(self):
869 return self._changectx.node()
872 return self._changectx.node()
870
873
871 def hex(self):
874 def hex(self):
872 return self._changectx.hex()
875 return self._changectx.hex()
873
876
874 def user(self):
877 def user(self):
875 return self._changectx.user()
878 return self._changectx.user()
876
879
877 def date(self):
880 def date(self):
878 return self._changectx.date()
881 return self._changectx.date()
879
882
880 def files(self):
883 def files(self):
881 return self._changectx.files()
884 return self._changectx.files()
882
885
883 def description(self):
886 def description(self):
884 return self._changectx.description()
887 return self._changectx.description()
885
888
886 def branch(self):
889 def branch(self):
887 return self._changectx.branch()
890 return self._changectx.branch()
888
891
889 def extra(self):
892 def extra(self):
890 return self._changectx.extra()
893 return self._changectx.extra()
891
894
892 def phase(self):
895 def phase(self):
893 return self._changectx.phase()
896 return self._changectx.phase()
894
897
895 def phasestr(self):
898 def phasestr(self):
896 return self._changectx.phasestr()
899 return self._changectx.phasestr()
897
900
898 def obsolete(self):
901 def obsolete(self):
899 return self._changectx.obsolete()
902 return self._changectx.obsolete()
900
903
901 def instabilities(self):
904 def instabilities(self):
902 return self._changectx.instabilities()
905 return self._changectx.instabilities()
903
906
904 def manifest(self):
907 def manifest(self):
905 return self._changectx.manifest()
908 return self._changectx.manifest()
906
909
907 def changectx(self):
910 def changectx(self):
908 return self._changectx
911 return self._changectx
909
912
910 def renamed(self):
913 def renamed(self):
911 return self._copied
914 return self._copied
912
915
913 def copysource(self):
916 def copysource(self):
914 return self._copied and self._copied[0]
917 return self._copied and self._copied[0]
915
918
916 def repo(self):
919 def repo(self):
917 return self._repo
920 return self._repo
918
921
919 def size(self):
922 def size(self):
920 return len(self.data())
923 return len(self.data())
921
924
922 def path(self):
925 def path(self):
923 return self._path
926 return self._path
924
927
925 def isbinary(self):
928 def isbinary(self):
926 try:
929 try:
927 return stringutil.binary(self.data())
930 return stringutil.binary(self.data())
928 except IOError:
931 except IOError:
929 return False
932 return False
930
933
931 def isexec(self):
934 def isexec(self):
932 return b'x' in self.flags()
935 return b'x' in self.flags()
933
936
934 def islink(self):
937 def islink(self):
935 return b'l' in self.flags()
938 return b'l' in self.flags()
936
939
937 def isabsent(self):
940 def isabsent(self):
938 """whether this filectx represents a file not in self._changectx
941 """whether this filectx represents a file not in self._changectx
939
942
940 This is mainly for merge code to detect change/delete conflicts. This is
943 This is mainly for merge code to detect change/delete conflicts. This is
941 expected to be True for all subclasses of basectx."""
944 expected to be True for all subclasses of basectx."""
942 return False
945 return False
943
946
944 _customcmp = False
947 _customcmp = False
945
948
946 def cmp(self, fctx):
949 def cmp(self, fctx):
947 """compare with other file context
950 """compare with other file context
948
951
949 returns True if different than fctx.
952 returns True if different than fctx.
950 """
953 """
951 if fctx._customcmp:
954 if fctx._customcmp:
952 return fctx.cmp(self)
955 return fctx.cmp(self)
953
956
954 if self._filenode is None:
957 if self._filenode is None:
955 raise error.ProgrammingError(
958 raise error.ProgrammingError(
956 b'filectx.cmp() must be reimplemented if not backed by revlog'
959 b'filectx.cmp() must be reimplemented if not backed by revlog'
957 )
960 )
958
961
959 if fctx._filenode is None:
962 if fctx._filenode is None:
960 if self._repo._encodefilterpats:
963 if self._repo._encodefilterpats:
961 # can't rely on size() because wdir content may be decoded
964 # can't rely on size() because wdir content may be decoded
962 return self._filelog.cmp(self._filenode, fctx.data())
965 return self._filelog.cmp(self._filenode, fctx.data())
963 if self.size() - 4 == fctx.size():
966 if self.size() - 4 == fctx.size():
964 # size() can match:
967 # size() can match:
965 # if file data starts with '\1\n', empty metadata block is
968 # if file data starts with '\1\n', empty metadata block is
966 # prepended, which adds 4 bytes to filelog.size().
969 # prepended, which adds 4 bytes to filelog.size().
967 return self._filelog.cmp(self._filenode, fctx.data())
970 return self._filelog.cmp(self._filenode, fctx.data())
968 if self.size() == fctx.size():
971 if self.size() == fctx.size():
969 # size() matches: need to compare content
972 # size() matches: need to compare content
970 return self._filelog.cmp(self._filenode, fctx.data())
973 return self._filelog.cmp(self._filenode, fctx.data())
971
974
972 # size() differs
975 # size() differs
973 return True
976 return True
974
977
975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
978 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
976 """return the first ancestor of <srcrev> introducing <fnode>
979 """return the first ancestor of <srcrev> introducing <fnode>
977
980
978 If the linkrev of the file revision does not point to an ancestor of
981 If the linkrev of the file revision does not point to an ancestor of
979 srcrev, we'll walk down the ancestors until we find one introducing
982 srcrev, we'll walk down the ancestors until we find one introducing
980 this file revision.
983 this file revision.
981
984
982 :srcrev: the changeset revision we search ancestors from
985 :srcrev: the changeset revision we search ancestors from
983 :inclusive: if true, the src revision will also be checked
986 :inclusive: if true, the src revision will also be checked
984 :stoprev: an optional revision to stop the walk at. If no introduction
987 :stoprev: an optional revision to stop the walk at. If no introduction
985 of this file content could be found before this floor
988 of this file content could be found before this floor
986 revision, the function will returns "None" and stops its
989 revision, the function will returns "None" and stops its
987 iteration.
990 iteration.
988 """
991 """
989 repo = self._repo
992 repo = self._repo
990 cl = repo.unfiltered().changelog
993 cl = repo.unfiltered().changelog
991 mfl = repo.manifestlog
994 mfl = repo.manifestlog
992 # fetch the linkrev
995 # fetch the linkrev
993 lkr = self.linkrev()
996 lkr = self.linkrev()
994 if srcrev == lkr:
997 if srcrev == lkr:
995 return lkr
998 return lkr
996 # hack to reuse ancestor computation when searching for renames
999 # hack to reuse ancestor computation when searching for renames
997 memberanc = getattr(self, '_ancestrycontext', None)
1000 memberanc = getattr(self, '_ancestrycontext', None)
998 iteranc = None
1001 iteranc = None
999 if srcrev is None:
1002 if srcrev is None:
1000 # wctx case, used by workingfilectx during mergecopy
1003 # wctx case, used by workingfilectx during mergecopy
1001 revs = [p.rev() for p in self._repo[None].parents()]
1004 revs = [p.rev() for p in self._repo[None].parents()]
1002 inclusive = True # we skipped the real (revless) source
1005 inclusive = True # we skipped the real (revless) source
1003 else:
1006 else:
1004 revs = [srcrev]
1007 revs = [srcrev]
1005 if memberanc is None:
1008 if memberanc is None:
1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1009 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1007 # check if this linkrev is an ancestor of srcrev
1010 # check if this linkrev is an ancestor of srcrev
1008 if lkr not in memberanc:
1011 if lkr not in memberanc:
1009 if iteranc is None:
1012 if iteranc is None:
1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1013 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1011 fnode = self._filenode
1014 fnode = self._filenode
1012 path = self._path
1015 path = self._path
1013 for a in iteranc:
1016 for a in iteranc:
1014 if stoprev is not None and a < stoprev:
1017 if stoprev is not None and a < stoprev:
1015 return None
1018 return None
1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1019 ac = cl.read(a) # get changeset data (we avoid object creation)
1017 if path in ac[3]: # checking the 'files' field.
1020 if path in ac[3]: # checking the 'files' field.
1018 # The file has been touched, check if the content is
1021 # The file has been touched, check if the content is
1019 # similar to the one we search for.
1022 # similar to the one we search for.
1020 if fnode == mfl[ac[0]].readfast().get(path):
1023 if fnode == mfl[ac[0]].readfast().get(path):
1021 return a
1024 return a
1022 # In theory, we should never get out of that loop without a result.
1025 # In theory, we should never get out of that loop without a result.
1023 # But if manifest uses a buggy file revision (not children of the
1026 # But if manifest uses a buggy file revision (not children of the
1024 # one it replaces) we could. Such a buggy situation will likely
1027 # one it replaces) we could. Such a buggy situation will likely
1025 # result is crash somewhere else at to some point.
1028 # result is crash somewhere else at to some point.
1026 return lkr
1029 return lkr
1027
1030
1028 def isintroducedafter(self, changelogrev):
1031 def isintroducedafter(self, changelogrev):
1029 """True if a filectx has been introduced after a given floor revision
1032 """True if a filectx has been introduced after a given floor revision
1030 """
1033 """
1031 if self.linkrev() >= changelogrev:
1034 if self.linkrev() >= changelogrev:
1032 return True
1035 return True
1033 introrev = self._introrev(stoprev=changelogrev)
1036 introrev = self._introrev(stoprev=changelogrev)
1034 if introrev is None:
1037 if introrev is None:
1035 return False
1038 return False
1036 return introrev >= changelogrev
1039 return introrev >= changelogrev
1037
1040
1038 def introrev(self):
1041 def introrev(self):
1039 """return the rev of the changeset which introduced this file revision
1042 """return the rev of the changeset which introduced this file revision
1040
1043
1041 This method is different from linkrev because it take into account the
1044 This method is different from linkrev because it take into account the
1042 changeset the filectx was created from. It ensures the returned
1045 changeset the filectx was created from. It ensures the returned
1043 revision is one of its ancestors. This prevents bugs from
1046 revision is one of its ancestors. This prevents bugs from
1044 'linkrev-shadowing' when a file revision is used by multiple
1047 'linkrev-shadowing' when a file revision is used by multiple
1045 changesets.
1048 changesets.
1046 """
1049 """
1047 return self._introrev()
1050 return self._introrev()
1048
1051
1049 def _introrev(self, stoprev=None):
1052 def _introrev(self, stoprev=None):
1050 """
1053 """
1051 Same as `introrev` but, with an extra argument to limit changelog
1054 Same as `introrev` but, with an extra argument to limit changelog
1052 iteration range in some internal usecase.
1055 iteration range in some internal usecase.
1053
1056
1054 If `stoprev` is set, the `introrev` will not be searched past that
1057 If `stoprev` is set, the `introrev` will not be searched past that
1055 `stoprev` revision and "None" might be returned. This is useful to
1058 `stoprev` revision and "None" might be returned. This is useful to
1056 limit the iteration range.
1059 limit the iteration range.
1057 """
1060 """
1058 toprev = None
1061 toprev = None
1059 attrs = vars(self)
1062 attrs = vars(self)
1060 if '_changeid' in attrs:
1063 if '_changeid' in attrs:
1061 # We have a cached value already
1064 # We have a cached value already
1062 toprev = self._changeid
1065 toprev = self._changeid
1063 elif '_changectx' in attrs:
1066 elif '_changectx' in attrs:
1064 # We know which changelog entry we are coming from
1067 # We know which changelog entry we are coming from
1065 toprev = self._changectx.rev()
1068 toprev = self._changectx.rev()
1066
1069
1067 if toprev is not None:
1070 if toprev is not None:
1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1071 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1069 elif '_descendantrev' in attrs:
1072 elif '_descendantrev' in attrs:
1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1073 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1071 # be nice and cache the result of the computation
1074 # be nice and cache the result of the computation
1072 if introrev is not None:
1075 if introrev is not None:
1073 self._changeid = introrev
1076 self._changeid = introrev
1074 return introrev
1077 return introrev
1075 else:
1078 else:
1076 return self.linkrev()
1079 return self.linkrev()
1077
1080
1078 def introfilectx(self):
1081 def introfilectx(self):
1079 """Return filectx having identical contents, but pointing to the
1082 """Return filectx having identical contents, but pointing to the
1080 changeset revision where this filectx was introduced"""
1083 changeset revision where this filectx was introduced"""
1081 introrev = self.introrev()
1084 introrev = self.introrev()
1082 if self.rev() == introrev:
1085 if self.rev() == introrev:
1083 return self
1086 return self
1084 return self.filectx(self.filenode(), changeid=introrev)
1087 return self.filectx(self.filenode(), changeid=introrev)
1085
1088
1086 def _parentfilectx(self, path, fileid, filelog):
1089 def _parentfilectx(self, path, fileid, filelog):
1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1090 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1091 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1092 if '_changeid' in vars(self) or '_changectx' in vars(self):
1090 # If self is associated with a changeset (probably explicitly
1093 # If self is associated with a changeset (probably explicitly
1091 # fed), ensure the created filectx is associated with a
1094 # fed), ensure the created filectx is associated with a
1092 # changeset that is an ancestor of self.changectx.
1095 # changeset that is an ancestor of self.changectx.
1093 # This lets us later use _adjustlinkrev to get a correct link.
1096 # This lets us later use _adjustlinkrev to get a correct link.
1094 fctx._descendantrev = self.rev()
1097 fctx._descendantrev = self.rev()
1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1098 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1096 elif '_descendantrev' in vars(self):
1099 elif '_descendantrev' in vars(self):
1097 # Otherwise propagate _descendantrev if we have one associated.
1100 # Otherwise propagate _descendantrev if we have one associated.
1098 fctx._descendantrev = self._descendantrev
1101 fctx._descendantrev = self._descendantrev
1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1102 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1100 return fctx
1103 return fctx
1101
1104
1102 def parents(self):
1105 def parents(self):
1103 _path = self._path
1106 _path = self._path
1104 fl = self._filelog
1107 fl = self._filelog
1105 parents = self._filelog.parents(self._filenode)
1108 parents = self._filelog.parents(self._filenode)
1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1109 pl = [(_path, node, fl) for node in parents if node != nullid]
1107
1110
1108 r = fl.renamed(self._filenode)
1111 r = fl.renamed(self._filenode)
1109 if r:
1112 if r:
1110 # - In the simple rename case, both parent are nullid, pl is empty.
1113 # - In the simple rename case, both parent are nullid, pl is empty.
1111 # - In case of merge, only one of the parent is null id and should
1114 # - In case of merge, only one of the parent is null id and should
1112 # be replaced with the rename information. This parent is -always-
1115 # be replaced with the rename information. This parent is -always-
1113 # the first one.
1116 # the first one.
1114 #
1117 #
1115 # As null id have always been filtered out in the previous list
1118 # As null id have always been filtered out in the previous list
1116 # comprehension, inserting to 0 will always result in "replacing
1119 # comprehension, inserting to 0 will always result in "replacing
1117 # first nullid parent with rename information.
1120 # first nullid parent with rename information.
1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1121 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1119
1122
1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1123 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1121
1124
1122 def p1(self):
1125 def p1(self):
1123 return self.parents()[0]
1126 return self.parents()[0]
1124
1127
1125 def p2(self):
1128 def p2(self):
1126 p = self.parents()
1129 p = self.parents()
1127 if len(p) == 2:
1130 if len(p) == 2:
1128 return p[1]
1131 return p[1]
1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1132 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1130
1133
1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1134 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1132 """Returns a list of annotateline objects for each line in the file
1135 """Returns a list of annotateline objects for each line in the file
1133
1136
1134 - line.fctx is the filectx of the node where that line was last changed
1137 - line.fctx is the filectx of the node where that line was last changed
1135 - line.lineno is the line number at the first appearance in the managed
1138 - line.lineno is the line number at the first appearance in the managed
1136 file
1139 file
1137 - line.text is the data on that line (including newline character)
1140 - line.text is the data on that line (including newline character)
1138 """
1141 """
1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1142 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1140
1143
1141 def parents(f):
1144 def parents(f):
1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1145 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1146 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1147 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1145 # isn't an ancestor of the srcrev.
1148 # isn't an ancestor of the srcrev.
1146 f._changeid
1149 f._changeid
1147 pl = f.parents()
1150 pl = f.parents()
1148
1151
1149 # Don't return renamed parents if we aren't following.
1152 # Don't return renamed parents if we aren't following.
1150 if not follow:
1153 if not follow:
1151 pl = [p for p in pl if p.path() == f.path()]
1154 pl = [p for p in pl if p.path() == f.path()]
1152
1155
1153 # renamed filectx won't have a filelog yet, so set it
1156 # renamed filectx won't have a filelog yet, so set it
1154 # from the cache to save time
1157 # from the cache to save time
1155 for p in pl:
1158 for p in pl:
1156 if not '_filelog' in p.__dict__:
1159 if not '_filelog' in p.__dict__:
1157 p._filelog = getlog(p.path())
1160 p._filelog = getlog(p.path())
1158
1161
1159 return pl
1162 return pl
1160
1163
1161 # use linkrev to find the first changeset where self appeared
1164 # use linkrev to find the first changeset where self appeared
1162 base = self.introfilectx()
1165 base = self.introfilectx()
1163 if getattr(base, '_ancestrycontext', None) is None:
1166 if getattr(base, '_ancestrycontext', None) is None:
1164 # it is safe to use an unfiltered repository here because we are
1167 # it is safe to use an unfiltered repository here because we are
1165 # walking ancestors only.
1168 # walking ancestors only.
1166 cl = self._repo.unfiltered().changelog
1169 cl = self._repo.unfiltered().changelog
1167 if base.rev() is None:
1170 if base.rev() is None:
1168 # wctx is not inclusive, but works because _ancestrycontext
1171 # wctx is not inclusive, but works because _ancestrycontext
1169 # is used to test filelog revisions
1172 # is used to test filelog revisions
1170 ac = cl.ancestors(
1173 ac = cl.ancestors(
1171 [p.rev() for p in base.parents()], inclusive=True
1174 [p.rev() for p in base.parents()], inclusive=True
1172 )
1175 )
1173 else:
1176 else:
1174 ac = cl.ancestors([base.rev()], inclusive=True)
1177 ac = cl.ancestors([base.rev()], inclusive=True)
1175 base._ancestrycontext = ac
1178 base._ancestrycontext = ac
1176
1179
1177 return dagop.annotate(
1180 return dagop.annotate(
1178 base, parents, skiprevs=skiprevs, diffopts=diffopts
1181 base, parents, skiprevs=skiprevs, diffopts=diffopts
1179 )
1182 )
1180
1183
1181 def ancestors(self, followfirst=False):
1184 def ancestors(self, followfirst=False):
1182 visit = {}
1185 visit = {}
1183 c = self
1186 c = self
1184 if followfirst:
1187 if followfirst:
1185 cut = 1
1188 cut = 1
1186 else:
1189 else:
1187 cut = None
1190 cut = None
1188
1191
1189 while True:
1192 while True:
1190 for parent in c.parents()[:cut]:
1193 for parent in c.parents()[:cut]:
1191 visit[(parent.linkrev(), parent.filenode())] = parent
1194 visit[(parent.linkrev(), parent.filenode())] = parent
1192 if not visit:
1195 if not visit:
1193 break
1196 break
1194 c = visit.pop(max(visit))
1197 c = visit.pop(max(visit))
1195 yield c
1198 yield c
1196
1199
1197 def decodeddata(self):
1200 def decodeddata(self):
1198 """Returns `data()` after running repository decoding filters.
1201 """Returns `data()` after running repository decoding filters.
1199
1202
1200 This is often equivalent to how the data would be expressed on disk.
1203 This is often equivalent to how the data would be expressed on disk.
1201 """
1204 """
1202 return self._repo.wwritedata(self.path(), self.data())
1205 return self._repo.wwritedata(self.path(), self.data())
1203
1206
1204
1207
1205 class filectx(basefilectx):
1208 class filectx(basefilectx):
1206 """A filecontext object makes access to data related to a particular
1209 """A filecontext object makes access to data related to a particular
1207 filerevision convenient."""
1210 filerevision convenient."""
1208
1211
1209 def __init__(
1212 def __init__(
1210 self,
1213 self,
1211 repo,
1214 repo,
1212 path,
1215 path,
1213 changeid=None,
1216 changeid=None,
1214 fileid=None,
1217 fileid=None,
1215 filelog=None,
1218 filelog=None,
1216 changectx=None,
1219 changectx=None,
1217 ):
1220 ):
1218 """changeid must be a revision number, if specified.
1221 """changeid must be a revision number, if specified.
1219 fileid can be a file revision or node."""
1222 fileid can be a file revision or node."""
1220 self._repo = repo
1223 self._repo = repo
1221 self._path = path
1224 self._path = path
1222
1225
1223 assert (
1226 assert (
1224 changeid is not None or fileid is not None or changectx is not None
1227 changeid is not None or fileid is not None or changectx is not None
1225 ), (
1228 ), (
1226 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1229 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1227 % (changeid, fileid, changectx,)
1230 % (changeid, fileid, changectx,)
1228 )
1231 )
1229
1232
1230 if filelog is not None:
1233 if filelog is not None:
1231 self._filelog = filelog
1234 self._filelog = filelog
1232
1235
1233 if changeid is not None:
1236 if changeid is not None:
1234 self._changeid = changeid
1237 self._changeid = changeid
1235 if changectx is not None:
1238 if changectx is not None:
1236 self._changectx = changectx
1239 self._changectx = changectx
1237 if fileid is not None:
1240 if fileid is not None:
1238 self._fileid = fileid
1241 self._fileid = fileid
1239
1242
1240 @propertycache
1243 @propertycache
1241 def _changectx(self):
1244 def _changectx(self):
1242 try:
1245 try:
1243 return self._repo[self._changeid]
1246 return self._repo[self._changeid]
1244 except error.FilteredRepoLookupError:
1247 except error.FilteredRepoLookupError:
1245 # Linkrev may point to any revision in the repository. When the
1248 # Linkrev may point to any revision in the repository. When the
1246 # repository is filtered this may lead to `filectx` trying to build
1249 # repository is filtered this may lead to `filectx` trying to build
1247 # `changectx` for filtered revision. In such case we fallback to
1250 # `changectx` for filtered revision. In such case we fallback to
1248 # creating `changectx` on the unfiltered version of the reposition.
1251 # creating `changectx` on the unfiltered version of the reposition.
1249 # This fallback should not be an issue because `changectx` from
1252 # This fallback should not be an issue because `changectx` from
1250 # `filectx` are not used in complex operations that care about
1253 # `filectx` are not used in complex operations that care about
1251 # filtering.
1254 # filtering.
1252 #
1255 #
1253 # This fallback is a cheap and dirty fix that prevent several
1256 # This fallback is a cheap and dirty fix that prevent several
1254 # crashes. It does not ensure the behavior is correct. However the
1257 # crashes. It does not ensure the behavior is correct. However the
1255 # behavior was not correct before filtering either and "incorrect
1258 # behavior was not correct before filtering either and "incorrect
1256 # behavior" is seen as better as "crash"
1259 # behavior" is seen as better as "crash"
1257 #
1260 #
1258 # Linkrevs have several serious troubles with filtering that are
1261 # Linkrevs have several serious troubles with filtering that are
1259 # complicated to solve. Proper handling of the issue here should be
1262 # complicated to solve. Proper handling of the issue here should be
1260 # considered when solving linkrev issue are on the table.
1263 # considered when solving linkrev issue are on the table.
1261 return self._repo.unfiltered()[self._changeid]
1264 return self._repo.unfiltered()[self._changeid]
1262
1265
1263 def filectx(self, fileid, changeid=None):
1266 def filectx(self, fileid, changeid=None):
1264 '''opens an arbitrary revision of the file without
1267 '''opens an arbitrary revision of the file without
1265 opening a new filelog'''
1268 opening a new filelog'''
1266 return filectx(
1269 return filectx(
1267 self._repo,
1270 self._repo,
1268 self._path,
1271 self._path,
1269 fileid=fileid,
1272 fileid=fileid,
1270 filelog=self._filelog,
1273 filelog=self._filelog,
1271 changeid=changeid,
1274 changeid=changeid,
1272 )
1275 )
1273
1276
1274 def rawdata(self):
1277 def rawdata(self):
1275 return self._filelog.rawdata(self._filenode)
1278 return self._filelog.rawdata(self._filenode)
1276
1279
1277 def rawflags(self):
1280 def rawflags(self):
1278 """low-level revlog flags"""
1281 """low-level revlog flags"""
1279 return self._filelog.flags(self._filerev)
1282 return self._filelog.flags(self._filerev)
1280
1283
1281 def data(self):
1284 def data(self):
1282 try:
1285 try:
1283 return self._filelog.read(self._filenode)
1286 return self._filelog.read(self._filenode)
1284 except error.CensoredNodeError:
1287 except error.CensoredNodeError:
1285 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1288 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1286 return b""
1289 return b""
1287 raise error.Abort(
1290 raise error.Abort(
1288 _(b"censored node: %s") % short(self._filenode),
1291 _(b"censored node: %s") % short(self._filenode),
1289 hint=_(b"set censor.policy to ignore errors"),
1292 hint=_(b"set censor.policy to ignore errors"),
1290 )
1293 )
1291
1294
1292 def size(self):
1295 def size(self):
1293 return self._filelog.size(self._filerev)
1296 return self._filelog.size(self._filerev)
1294
1297
1295 @propertycache
1298 @propertycache
1296 def _copied(self):
1299 def _copied(self):
1297 """check if file was actually renamed in this changeset revision
1300 """check if file was actually renamed in this changeset revision
1298
1301
1299 If rename logged in file revision, we report copy for changeset only
1302 If rename logged in file revision, we report copy for changeset only
1300 if file revisions linkrev points back to the changeset in question
1303 if file revisions linkrev points back to the changeset in question
1301 or both changeset parents contain different file revisions.
1304 or both changeset parents contain different file revisions.
1302 """
1305 """
1303
1306
1304 renamed = self._filelog.renamed(self._filenode)
1307 renamed = self._filelog.renamed(self._filenode)
1305 if not renamed:
1308 if not renamed:
1306 return None
1309 return None
1307
1310
1308 if self.rev() == self.linkrev():
1311 if self.rev() == self.linkrev():
1309 return renamed
1312 return renamed
1310
1313
1311 name = self.path()
1314 name = self.path()
1312 fnode = self._filenode
1315 fnode = self._filenode
1313 for p in self._changectx.parents():
1316 for p in self._changectx.parents():
1314 try:
1317 try:
1315 if fnode == p.filenode(name):
1318 if fnode == p.filenode(name):
1316 return None
1319 return None
1317 except error.LookupError:
1320 except error.LookupError:
1318 pass
1321 pass
1319 return renamed
1322 return renamed
1320
1323
1321 def children(self):
1324 def children(self):
1322 # hard for renames
1325 # hard for renames
1323 c = self._filelog.children(self._filenode)
1326 c = self._filelog.children(self._filenode)
1324 return [
1327 return [
1325 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1328 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1326 for x in c
1329 for x in c
1327 ]
1330 ]
1328
1331
1329
1332
1330 class committablectx(basectx):
1333 class committablectx(basectx):
1331 """A committablectx object provides common functionality for a context that
1334 """A committablectx object provides common functionality for a context that
1332 wants the ability to commit, e.g. workingctx or memctx."""
1335 wants the ability to commit, e.g. workingctx or memctx."""
1333
1336
1334 def __init__(
1337 def __init__(
1335 self,
1338 self,
1336 repo,
1339 repo,
1337 text=b"",
1340 text=b"",
1338 user=None,
1341 user=None,
1339 date=None,
1342 date=None,
1340 extra=None,
1343 extra=None,
1341 changes=None,
1344 changes=None,
1342 branch=None,
1345 branch=None,
1343 ):
1346 ):
1344 super(committablectx, self).__init__(repo)
1347 super(committablectx, self).__init__(repo)
1345 self._rev = None
1348 self._rev = None
1346 self._node = None
1349 self._node = None
1347 self._text = text
1350 self._text = text
1348 if date:
1351 if date:
1349 self._date = dateutil.parsedate(date)
1352 self._date = dateutil.parsedate(date)
1350 if user:
1353 if user:
1351 self._user = user
1354 self._user = user
1352 if changes:
1355 if changes:
1353 self._status = changes
1356 self._status = changes
1354
1357
1355 self._extra = {}
1358 self._extra = {}
1356 if extra:
1359 if extra:
1357 self._extra = extra.copy()
1360 self._extra = extra.copy()
1358 if branch is not None:
1361 if branch is not None:
1359 self._extra[b'branch'] = encoding.fromlocal(branch)
1362 self._extra[b'branch'] = encoding.fromlocal(branch)
1360 if not self._extra.get(b'branch'):
1363 if not self._extra.get(b'branch'):
1361 self._extra[b'branch'] = b'default'
1364 self._extra[b'branch'] = b'default'
1362
1365
1363 def __bytes__(self):
1366 def __bytes__(self):
1364 return bytes(self._parents[0]) + b"+"
1367 return bytes(self._parents[0]) + b"+"
1365
1368
1366 __str__ = encoding.strmethod(__bytes__)
1369 __str__ = encoding.strmethod(__bytes__)
1367
1370
1368 def __nonzero__(self):
1371 def __nonzero__(self):
1369 return True
1372 return True
1370
1373
1371 __bool__ = __nonzero__
1374 __bool__ = __nonzero__
1372
1375
1373 @propertycache
1376 @propertycache
1374 def _status(self):
1377 def _status(self):
1375 return self._repo.status()
1378 return self._repo.status()
1376
1379
1377 @propertycache
1380 @propertycache
1378 def _user(self):
1381 def _user(self):
1379 return self._repo.ui.username()
1382 return self._repo.ui.username()
1380
1383
1381 @propertycache
1384 @propertycache
1382 def _date(self):
1385 def _date(self):
1383 ui = self._repo.ui
1386 ui = self._repo.ui
1384 date = ui.configdate(b'devel', b'default-date')
1387 date = ui.configdate(b'devel', b'default-date')
1385 if date is None:
1388 if date is None:
1386 date = dateutil.makedate()
1389 date = dateutil.makedate()
1387 return date
1390 return date
1388
1391
1389 def subrev(self, subpath):
1392 def subrev(self, subpath):
1390 return None
1393 return None
1391
1394
1392 def manifestnode(self):
1395 def manifestnode(self):
1393 return None
1396 return None
1394
1397
1395 def user(self):
1398 def user(self):
1396 return self._user or self._repo.ui.username()
1399 return self._user or self._repo.ui.username()
1397
1400
1398 def date(self):
1401 def date(self):
1399 return self._date
1402 return self._date
1400
1403
1401 def description(self):
1404 def description(self):
1402 return self._text
1405 return self._text
1403
1406
1404 def files(self):
1407 def files(self):
1405 return sorted(
1408 return sorted(
1406 self._status.modified + self._status.added + self._status.removed
1409 self._status.modified + self._status.added + self._status.removed
1407 )
1410 )
1408
1411
1409 def modified(self):
1412 def modified(self):
1410 return self._status.modified
1413 return self._status.modified
1411
1414
1412 def added(self):
1415 def added(self):
1413 return self._status.added
1416 return self._status.added
1414
1417
1415 def removed(self):
1418 def removed(self):
1416 return self._status.removed
1419 return self._status.removed
1417
1420
1418 def deleted(self):
1421 def deleted(self):
1419 return self._status.deleted
1422 return self._status.deleted
1420
1423
1421 filesmodified = modified
1424 filesmodified = modified
1422 filesadded = added
1425 filesadded = added
1423 filesremoved = removed
1426 filesremoved = removed
1424
1427
1425 def branch(self):
1428 def branch(self):
1426 return encoding.tolocal(self._extra[b'branch'])
1429 return encoding.tolocal(self._extra[b'branch'])
1427
1430
1428 def closesbranch(self):
1431 def closesbranch(self):
1429 return b'close' in self._extra
1432 return b'close' in self._extra
1430
1433
1431 def extra(self):
1434 def extra(self):
1432 return self._extra
1435 return self._extra
1433
1436
1434 def isinmemory(self):
1437 def isinmemory(self):
1435 return False
1438 return False
1436
1439
1437 def tags(self):
1440 def tags(self):
1438 return []
1441 return []
1439
1442
1440 def bookmarks(self):
1443 def bookmarks(self):
1441 b = []
1444 b = []
1442 for p in self.parents():
1445 for p in self.parents():
1443 b.extend(p.bookmarks())
1446 b.extend(p.bookmarks())
1444 return b
1447 return b
1445
1448
1446 def phase(self):
1449 def phase(self):
1447 phase = phases.newcommitphase(self._repo.ui)
1450 phase = phases.newcommitphase(self._repo.ui)
1448 for p in self.parents():
1451 for p in self.parents():
1449 phase = max(phase, p.phase())
1452 phase = max(phase, p.phase())
1450 return phase
1453 return phase
1451
1454
1452 def hidden(self):
1455 def hidden(self):
1453 return False
1456 return False
1454
1457
1455 def children(self):
1458 def children(self):
1456 return []
1459 return []
1457
1460
1458 def ancestor(self, c2):
1461 def ancestor(self, c2):
1459 """return the "best" ancestor context of self and c2"""
1462 """return the "best" ancestor context of self and c2"""
1460 return self._parents[0].ancestor(c2) # punt on two parents for now
1463 return self._parents[0].ancestor(c2) # punt on two parents for now
1461
1464
1462 def ancestors(self):
1465 def ancestors(self):
1463 for p in self._parents:
1466 for p in self._parents:
1464 yield p
1467 yield p
1465 for a in self._repo.changelog.ancestors(
1468 for a in self._repo.changelog.ancestors(
1466 [p.rev() for p in self._parents]
1469 [p.rev() for p in self._parents]
1467 ):
1470 ):
1468 yield self._repo[a]
1471 yield self._repo[a]
1469
1472
1470 def markcommitted(self, node):
1473 def markcommitted(self, node):
1471 """Perform post-commit cleanup necessary after committing this ctx
1474 """Perform post-commit cleanup necessary after committing this ctx
1472
1475
1473 Specifically, this updates backing stores this working context
1476 Specifically, this updates backing stores this working context
1474 wraps to reflect the fact that the changes reflected by this
1477 wraps to reflect the fact that the changes reflected by this
1475 workingctx have been committed. For example, it marks
1478 workingctx have been committed. For example, it marks
1476 modified and added files as normal in the dirstate.
1479 modified and added files as normal in the dirstate.
1477
1480
1478 """
1481 """
1479
1482
1480 def dirty(self, missing=False, merge=True, branch=True):
1483 def dirty(self, missing=False, merge=True, branch=True):
1481 return False
1484 return False
1482
1485
1483
1486
1484 class workingctx(committablectx):
1487 class workingctx(committablectx):
1485 """A workingctx object makes access to data related to
1488 """A workingctx object makes access to data related to
1486 the current working directory convenient.
1489 the current working directory convenient.
1487 date - any valid date string or (unixtime, offset), or None.
1490 date - any valid date string or (unixtime, offset), or None.
1488 user - username string, or None.
1491 user - username string, or None.
1489 extra - a dictionary of extra values, or None.
1492 extra - a dictionary of extra values, or None.
1490 changes - a list of file lists as returned by localrepo.status()
1493 changes - a list of file lists as returned by localrepo.status()
1491 or None to use the repository status.
1494 or None to use the repository status.
1492 """
1495 """
1493
1496
1494 def __init__(
1497 def __init__(
1495 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1498 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1496 ):
1499 ):
1497 branch = None
1500 branch = None
1498 if not extra or b'branch' not in extra:
1501 if not extra or b'branch' not in extra:
1499 try:
1502 try:
1500 branch = repo.dirstate.branch()
1503 branch = repo.dirstate.branch()
1501 except UnicodeDecodeError:
1504 except UnicodeDecodeError:
1502 raise error.Abort(_(b'branch name not in UTF-8!'))
1505 raise error.Abort(_(b'branch name not in UTF-8!'))
1503 super(workingctx, self).__init__(
1506 super(workingctx, self).__init__(
1504 repo, text, user, date, extra, changes, branch=branch
1507 repo, text, user, date, extra, changes, branch=branch
1505 )
1508 )
1506
1509
1507 def __iter__(self):
1510 def __iter__(self):
1508 d = self._repo.dirstate
1511 d = self._repo.dirstate
1509 for f in d:
1512 for f in d:
1510 if d[f] != b'r':
1513 if d[f] != b'r':
1511 yield f
1514 yield f
1512
1515
1513 def __contains__(self, key):
1516 def __contains__(self, key):
1514 return self._repo.dirstate[key] not in b"?r"
1517 return self._repo.dirstate[key] not in b"?r"
1515
1518
1516 def hex(self):
1519 def hex(self):
1517 return wdirhex
1520 return wdirhex
1518
1521
1519 @propertycache
1522 @propertycache
1520 def _parents(self):
1523 def _parents(self):
1521 p = self._repo.dirstate.parents()
1524 p = self._repo.dirstate.parents()
1522 if p[1] == nullid:
1525 if p[1] == nullid:
1523 p = p[:-1]
1526 p = p[:-1]
1524 # use unfiltered repo to delay/avoid loading obsmarkers
1527 # use unfiltered repo to delay/avoid loading obsmarkers
1525 unfi = self._repo.unfiltered()
1528 unfi = self._repo.unfiltered()
1526 return [
1529 return [
1527 changectx(
1530 changectx(
1528 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1531 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1529 )
1532 )
1530 for n in p
1533 for n in p
1531 ]
1534 ]
1532
1535
1533 def setparents(self, p1node, p2node=nullid):
1536 def setparents(self, p1node, p2node=nullid):
1534 dirstate = self._repo.dirstate
1537 dirstate = self._repo.dirstate
1535 with dirstate.parentchange():
1538 with dirstate.parentchange():
1536 copies = dirstate.setparents(p1node, p2node)
1539 copies = dirstate.setparents(p1node, p2node)
1537 pctx = self._repo[p1node]
1540 pctx = self._repo[p1node]
1538 if copies:
1541 if copies:
1539 # Adjust copy records, the dirstate cannot do it, it
1542 # Adjust copy records, the dirstate cannot do it, it
1540 # requires access to parents manifests. Preserve them
1543 # requires access to parents manifests. Preserve them
1541 # only for entries added to first parent.
1544 # only for entries added to first parent.
1542 for f in copies:
1545 for f in copies:
1543 if f not in pctx and copies[f] in pctx:
1546 if f not in pctx and copies[f] in pctx:
1544 dirstate.copy(copies[f], f)
1547 dirstate.copy(copies[f], f)
1545 if p2node == nullid:
1548 if p2node == nullid:
1546 for f, s in sorted(dirstate.copies().items()):
1549 for f, s in sorted(dirstate.copies().items()):
1547 if f not in pctx and s not in pctx:
1550 if f not in pctx and s not in pctx:
1548 dirstate.copy(None, f)
1551 dirstate.copy(None, f)
1549
1552
1550 def _fileinfo(self, path):
1553 def _fileinfo(self, path):
1551 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1554 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1552 self._manifest
1555 self._manifest
1553 return super(workingctx, self)._fileinfo(path)
1556 return super(workingctx, self)._fileinfo(path)
1554
1557
1555 def _buildflagfunc(self):
1558 def _buildflagfunc(self):
1556 # Create a fallback function for getting file flags when the
1559 # Create a fallback function for getting file flags when the
1557 # filesystem doesn't support them
1560 # filesystem doesn't support them
1558
1561
1559 copiesget = self._repo.dirstate.copies().get
1562 copiesget = self._repo.dirstate.copies().get
1560 parents = self.parents()
1563 parents = self.parents()
1561 if len(parents) < 2:
1564 if len(parents) < 2:
1562 # when we have one parent, it's easy: copy from parent
1565 # when we have one parent, it's easy: copy from parent
1563 man = parents[0].manifest()
1566 man = parents[0].manifest()
1564
1567
1565 def func(f):
1568 def func(f):
1566 f = copiesget(f, f)
1569 f = copiesget(f, f)
1567 return man.flags(f)
1570 return man.flags(f)
1568
1571
1569 else:
1572 else:
1570 # merges are tricky: we try to reconstruct the unstored
1573 # merges are tricky: we try to reconstruct the unstored
1571 # result from the merge (issue1802)
1574 # result from the merge (issue1802)
1572 p1, p2 = parents
1575 p1, p2 = parents
1573 pa = p1.ancestor(p2)
1576 pa = p1.ancestor(p2)
1574 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1577 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1575
1578
1576 def func(f):
1579 def func(f):
1577 f = copiesget(f, f) # may be wrong for merges with copies
1580 f = copiesget(f, f) # may be wrong for merges with copies
1578 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1581 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1579 if fl1 == fl2:
1582 if fl1 == fl2:
1580 return fl1
1583 return fl1
1581 if fl1 == fla:
1584 if fl1 == fla:
1582 return fl2
1585 return fl2
1583 if fl2 == fla:
1586 if fl2 == fla:
1584 return fl1
1587 return fl1
1585 return b'' # punt for conflicts
1588 return b'' # punt for conflicts
1586
1589
1587 return func
1590 return func
1588
1591
1589 @propertycache
1592 @propertycache
1590 def _flagfunc(self):
1593 def _flagfunc(self):
1591 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1594 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1592
1595
1593 def flags(self, path):
1596 def flags(self, path):
1594 if '_manifest' in self.__dict__:
1597 if '_manifest' in self.__dict__:
1595 try:
1598 try:
1596 return self._manifest.flags(path)
1599 return self._manifest.flags(path)
1597 except KeyError:
1600 except KeyError:
1598 return b''
1601 return b''
1599
1602
1600 try:
1603 try:
1601 return self._flagfunc(path)
1604 return self._flagfunc(path)
1602 except OSError:
1605 except OSError:
1603 return b''
1606 return b''
1604
1607
1605 def filectx(self, path, filelog=None):
1608 def filectx(self, path, filelog=None):
1606 """get a file context from the working directory"""
1609 """get a file context from the working directory"""
1607 return workingfilectx(
1610 return workingfilectx(
1608 self._repo, path, workingctx=self, filelog=filelog
1611 self._repo, path, workingctx=self, filelog=filelog
1609 )
1612 )
1610
1613
1611 def dirty(self, missing=False, merge=True, branch=True):
1614 def dirty(self, missing=False, merge=True, branch=True):
1612 """check whether a working directory is modified"""
1615 """check whether a working directory is modified"""
1613 # check subrepos first
1616 # check subrepos first
1614 for s in sorted(self.substate):
1617 for s in sorted(self.substate):
1615 if self.sub(s).dirty(missing=missing):
1618 if self.sub(s).dirty(missing=missing):
1616 return True
1619 return True
1617 # check current working dir
1620 # check current working dir
1618 return (
1621 return (
1619 (merge and self.p2())
1622 (merge and self.p2())
1620 or (branch and self.branch() != self.p1().branch())
1623 or (branch and self.branch() != self.p1().branch())
1621 or self.modified()
1624 or self.modified()
1622 or self.added()
1625 or self.added()
1623 or self.removed()
1626 or self.removed()
1624 or (missing and self.deleted())
1627 or (missing and self.deleted())
1625 )
1628 )
1626
1629
1627 def add(self, list, prefix=b""):
1630 def add(self, list, prefix=b""):
1628 with self._repo.wlock():
1631 with self._repo.wlock():
1629 ui, ds = self._repo.ui, self._repo.dirstate
1632 ui, ds = self._repo.ui, self._repo.dirstate
1630 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1633 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1631 rejected = []
1634 rejected = []
1632 lstat = self._repo.wvfs.lstat
1635 lstat = self._repo.wvfs.lstat
1633 for f in list:
1636 for f in list:
1634 # ds.pathto() returns an absolute file when this is invoked from
1637 # ds.pathto() returns an absolute file when this is invoked from
1635 # the keyword extension. That gets flagged as non-portable on
1638 # the keyword extension. That gets flagged as non-portable on
1636 # Windows, since it contains the drive letter and colon.
1639 # Windows, since it contains the drive letter and colon.
1637 scmutil.checkportable(ui, os.path.join(prefix, f))
1640 scmutil.checkportable(ui, os.path.join(prefix, f))
1638 try:
1641 try:
1639 st = lstat(f)
1642 st = lstat(f)
1640 except OSError:
1643 except OSError:
1641 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1644 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1642 rejected.append(f)
1645 rejected.append(f)
1643 continue
1646 continue
1644 limit = ui.configbytes(b'ui', b'large-file-limit')
1647 limit = ui.configbytes(b'ui', b'large-file-limit')
1645 if limit != 0 and st.st_size > limit:
1648 if limit != 0 and st.st_size > limit:
1646 ui.warn(
1649 ui.warn(
1647 _(
1650 _(
1648 b"%s: up to %d MB of RAM may be required "
1651 b"%s: up to %d MB of RAM may be required "
1649 b"to manage this file\n"
1652 b"to manage this file\n"
1650 b"(use 'hg revert %s' to cancel the "
1653 b"(use 'hg revert %s' to cancel the "
1651 b"pending addition)\n"
1654 b"pending addition)\n"
1652 )
1655 )
1653 % (f, 3 * st.st_size // 1000000, uipath(f))
1656 % (f, 3 * st.st_size // 1000000, uipath(f))
1654 )
1657 )
1655 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1658 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1656 ui.warn(
1659 ui.warn(
1657 _(
1660 _(
1658 b"%s not added: only files and symlinks "
1661 b"%s not added: only files and symlinks "
1659 b"supported currently\n"
1662 b"supported currently\n"
1660 )
1663 )
1661 % uipath(f)
1664 % uipath(f)
1662 )
1665 )
1663 rejected.append(f)
1666 rejected.append(f)
1664 elif ds[f] in b'amn':
1667 elif ds[f] in b'amn':
1665 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1668 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1666 elif ds[f] == b'r':
1669 elif ds[f] == b'r':
1667 ds.normallookup(f)
1670 ds.normallookup(f)
1668 else:
1671 else:
1669 ds.add(f)
1672 ds.add(f)
1670 return rejected
1673 return rejected
1671
1674
1672 def forget(self, files, prefix=b""):
1675 def forget(self, files, prefix=b""):
1673 with self._repo.wlock():
1676 with self._repo.wlock():
1674 ds = self._repo.dirstate
1677 ds = self._repo.dirstate
1675 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1678 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1676 rejected = []
1679 rejected = []
1677 for f in files:
1680 for f in files:
1678 if f not in ds:
1681 if f not in ds:
1679 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1682 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1680 rejected.append(f)
1683 rejected.append(f)
1681 elif ds[f] != b'a':
1684 elif ds[f] != b'a':
1682 ds.remove(f)
1685 ds.remove(f)
1683 else:
1686 else:
1684 ds.drop(f)
1687 ds.drop(f)
1685 return rejected
1688 return rejected
1686
1689
1687 def copy(self, source, dest):
1690 def copy(self, source, dest):
1688 try:
1691 try:
1689 st = self._repo.wvfs.lstat(dest)
1692 st = self._repo.wvfs.lstat(dest)
1690 except OSError as err:
1693 except OSError as err:
1691 if err.errno != errno.ENOENT:
1694 if err.errno != errno.ENOENT:
1692 raise
1695 raise
1693 self._repo.ui.warn(
1696 self._repo.ui.warn(
1694 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1697 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1695 )
1698 )
1696 return
1699 return
1697 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1700 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1698 self._repo.ui.warn(
1701 self._repo.ui.warn(
1699 _(b"copy failed: %s is not a file or a symbolic link\n")
1702 _(b"copy failed: %s is not a file or a symbolic link\n")
1700 % self._repo.dirstate.pathto(dest)
1703 % self._repo.dirstate.pathto(dest)
1701 )
1704 )
1702 else:
1705 else:
1703 with self._repo.wlock():
1706 with self._repo.wlock():
1704 ds = self._repo.dirstate
1707 ds = self._repo.dirstate
1705 if ds[dest] in b'?':
1708 if ds[dest] in b'?':
1706 ds.add(dest)
1709 ds.add(dest)
1707 elif ds[dest] in b'r':
1710 elif ds[dest] in b'r':
1708 ds.normallookup(dest)
1711 ds.normallookup(dest)
1709 ds.copy(source, dest)
1712 ds.copy(source, dest)
1710
1713
1711 def match(
1714 def match(
1712 self,
1715 self,
1713 pats=None,
1716 pats=None,
1714 include=None,
1717 include=None,
1715 exclude=None,
1718 exclude=None,
1716 default=b'glob',
1719 default=b'glob',
1717 listsubrepos=False,
1720 listsubrepos=False,
1718 badfn=None,
1721 badfn=None,
1719 cwd=None,
1722 cwd=None,
1720 ):
1723 ):
1721 r = self._repo
1724 r = self._repo
1722 if not cwd:
1725 if not cwd:
1723 cwd = r.getcwd()
1726 cwd = r.getcwd()
1724
1727
1725 # Only a case insensitive filesystem needs magic to translate user input
1728 # Only a case insensitive filesystem needs magic to translate user input
1726 # to actual case in the filesystem.
1729 # to actual case in the filesystem.
1727 icasefs = not util.fscasesensitive(r.root)
1730 icasefs = not util.fscasesensitive(r.root)
1728 return matchmod.match(
1731 return matchmod.match(
1729 r.root,
1732 r.root,
1730 cwd,
1733 cwd,
1731 pats,
1734 pats,
1732 include,
1735 include,
1733 exclude,
1736 exclude,
1734 default,
1737 default,
1735 auditor=r.auditor,
1738 auditor=r.auditor,
1736 ctx=self,
1739 ctx=self,
1737 listsubrepos=listsubrepos,
1740 listsubrepos=listsubrepos,
1738 badfn=badfn,
1741 badfn=badfn,
1739 icasefs=icasefs,
1742 icasefs=icasefs,
1740 )
1743 )
1741
1744
1742 def _filtersuspectsymlink(self, files):
1745 def _filtersuspectsymlink(self, files):
1743 if not files or self._repo.dirstate._checklink:
1746 if not files or self._repo.dirstate._checklink:
1744 return files
1747 return files
1745
1748
1746 # Symlink placeholders may get non-symlink-like contents
1749 # Symlink placeholders may get non-symlink-like contents
1747 # via user error or dereferencing by NFS or Samba servers,
1750 # via user error or dereferencing by NFS or Samba servers,
1748 # so we filter out any placeholders that don't look like a
1751 # so we filter out any placeholders that don't look like a
1749 # symlink
1752 # symlink
1750 sane = []
1753 sane = []
1751 for f in files:
1754 for f in files:
1752 if self.flags(f) == b'l':
1755 if self.flags(f) == b'l':
1753 d = self[f].data()
1756 d = self[f].data()
1754 if (
1757 if (
1755 d == b''
1758 d == b''
1756 or len(d) >= 1024
1759 or len(d) >= 1024
1757 or b'\n' in d
1760 or b'\n' in d
1758 or stringutil.binary(d)
1761 or stringutil.binary(d)
1759 ):
1762 ):
1760 self._repo.ui.debug(
1763 self._repo.ui.debug(
1761 b'ignoring suspect symlink placeholder "%s"\n' % f
1764 b'ignoring suspect symlink placeholder "%s"\n' % f
1762 )
1765 )
1763 continue
1766 continue
1764 sane.append(f)
1767 sane.append(f)
1765 return sane
1768 return sane
1766
1769
1767 def _checklookup(self, files):
1770 def _checklookup(self, files):
1768 # check for any possibly clean files
1771 # check for any possibly clean files
1769 if not files:
1772 if not files:
1770 return [], [], []
1773 return [], [], []
1771
1774
1772 modified = []
1775 modified = []
1773 deleted = []
1776 deleted = []
1774 fixup = []
1777 fixup = []
1775 pctx = self._parents[0]
1778 pctx = self._parents[0]
1776 # do a full compare of any files that might have changed
1779 # do a full compare of any files that might have changed
1777 for f in sorted(files):
1780 for f in sorted(files):
1778 try:
1781 try:
1779 # This will return True for a file that got replaced by a
1782 # This will return True for a file that got replaced by a
1780 # directory in the interim, but fixing that is pretty hard.
1783 # directory in the interim, but fixing that is pretty hard.
1781 if (
1784 if (
1782 f not in pctx
1785 f not in pctx
1783 or self.flags(f) != pctx.flags(f)
1786 or self.flags(f) != pctx.flags(f)
1784 or pctx[f].cmp(self[f])
1787 or pctx[f].cmp(self[f])
1785 ):
1788 ):
1786 modified.append(f)
1789 modified.append(f)
1787 else:
1790 else:
1788 fixup.append(f)
1791 fixup.append(f)
1789 except (IOError, OSError):
1792 except (IOError, OSError):
1790 # A file become inaccessible in between? Mark it as deleted,
1793 # A file become inaccessible in between? Mark it as deleted,
1791 # matching dirstate behavior (issue5584).
1794 # matching dirstate behavior (issue5584).
1792 # The dirstate has more complex behavior around whether a
1795 # The dirstate has more complex behavior around whether a
1793 # missing file matches a directory, etc, but we don't need to
1796 # missing file matches a directory, etc, but we don't need to
1794 # bother with that: if f has made it to this point, we're sure
1797 # bother with that: if f has made it to this point, we're sure
1795 # it's in the dirstate.
1798 # it's in the dirstate.
1796 deleted.append(f)
1799 deleted.append(f)
1797
1800
1798 return modified, deleted, fixup
1801 return modified, deleted, fixup
1799
1802
1800 def _poststatusfixup(self, status, fixup):
1803 def _poststatusfixup(self, status, fixup):
1801 """update dirstate for files that are actually clean"""
1804 """update dirstate for files that are actually clean"""
1802 poststatus = self._repo.postdsstatus()
1805 poststatus = self._repo.postdsstatus()
1803 if fixup or poststatus:
1806 if fixup or poststatus:
1804 try:
1807 try:
1805 oldid = self._repo.dirstate.identity()
1808 oldid = self._repo.dirstate.identity()
1806
1809
1807 # updating the dirstate is optional
1810 # updating the dirstate is optional
1808 # so we don't wait on the lock
1811 # so we don't wait on the lock
1809 # wlock can invalidate the dirstate, so cache normal _after_
1812 # wlock can invalidate the dirstate, so cache normal _after_
1810 # taking the lock
1813 # taking the lock
1811 with self._repo.wlock(False):
1814 with self._repo.wlock(False):
1812 if self._repo.dirstate.identity() == oldid:
1815 if self._repo.dirstate.identity() == oldid:
1813 if fixup:
1816 if fixup:
1814 normal = self._repo.dirstate.normal
1817 normal = self._repo.dirstate.normal
1815 for f in fixup:
1818 for f in fixup:
1816 normal(f)
1819 normal(f)
1817 # write changes out explicitly, because nesting
1820 # write changes out explicitly, because nesting
1818 # wlock at runtime may prevent 'wlock.release()'
1821 # wlock at runtime may prevent 'wlock.release()'
1819 # after this block from doing so for subsequent
1822 # after this block from doing so for subsequent
1820 # changing files
1823 # changing files
1821 tr = self._repo.currenttransaction()
1824 tr = self._repo.currenttransaction()
1822 self._repo.dirstate.write(tr)
1825 self._repo.dirstate.write(tr)
1823
1826
1824 if poststatus:
1827 if poststatus:
1825 for ps in poststatus:
1828 for ps in poststatus:
1826 ps(self, status)
1829 ps(self, status)
1827 else:
1830 else:
1828 # in this case, writing changes out breaks
1831 # in this case, writing changes out breaks
1829 # consistency, because .hg/dirstate was
1832 # consistency, because .hg/dirstate was
1830 # already changed simultaneously after last
1833 # already changed simultaneously after last
1831 # caching (see also issue5584 for detail)
1834 # caching (see also issue5584 for detail)
1832 self._repo.ui.debug(
1835 self._repo.ui.debug(
1833 b'skip updating dirstate: identity mismatch\n'
1836 b'skip updating dirstate: identity mismatch\n'
1834 )
1837 )
1835 except error.LockError:
1838 except error.LockError:
1836 pass
1839 pass
1837 finally:
1840 finally:
1838 # Even if the wlock couldn't be grabbed, clear out the list.
1841 # Even if the wlock couldn't be grabbed, clear out the list.
1839 self._repo.clearpostdsstatus()
1842 self._repo.clearpostdsstatus()
1840
1843
1841 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1844 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1842 '''Gets the status from the dirstate -- internal use only.'''
1845 '''Gets the status from the dirstate -- internal use only.'''
1843 subrepos = []
1846 subrepos = []
1844 if b'.hgsub' in self:
1847 if b'.hgsub' in self:
1845 subrepos = sorted(self.substate)
1848 subrepos = sorted(self.substate)
1846 cmp, s = self._repo.dirstate.status(
1849 cmp, s = self._repo.dirstate.status(
1847 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1850 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1848 )
1851 )
1849
1852
1850 # check for any possibly clean files
1853 # check for any possibly clean files
1851 fixup = []
1854 fixup = []
1852 if cmp:
1855 if cmp:
1853 modified2, deleted2, fixup = self._checklookup(cmp)
1856 modified2, deleted2, fixup = self._checklookup(cmp)
1854 s.modified.extend(modified2)
1857 s.modified.extend(modified2)
1855 s.deleted.extend(deleted2)
1858 s.deleted.extend(deleted2)
1856
1859
1857 if fixup and clean:
1860 if fixup and clean:
1858 s.clean.extend(fixup)
1861 s.clean.extend(fixup)
1859
1862
1860 self._poststatusfixup(s, fixup)
1863 self._poststatusfixup(s, fixup)
1861
1864
1862 if match.always():
1865 if match.always():
1863 # cache for performance
1866 # cache for performance
1864 if s.unknown or s.ignored or s.clean:
1867 if s.unknown or s.ignored or s.clean:
1865 # "_status" is cached with list*=False in the normal route
1868 # "_status" is cached with list*=False in the normal route
1866 self._status = scmutil.status(
1869 self._status = scmutil.status(
1867 s.modified, s.added, s.removed, s.deleted, [], [], []
1870 s.modified, s.added, s.removed, s.deleted, [], [], []
1868 )
1871 )
1869 else:
1872 else:
1870 self._status = s
1873 self._status = s
1871
1874
1872 return s
1875 return s
1873
1876
1874 @propertycache
1877 @propertycache
1875 def _copies(self):
1878 def _copies(self):
1876 p1copies = {}
1879 p1copies = {}
1877 p2copies = {}
1880 p2copies = {}
1878 parents = self._repo.dirstate.parents()
1881 parents = self._repo.dirstate.parents()
1879 p1manifest = self._repo[parents[0]].manifest()
1882 p1manifest = self._repo[parents[0]].manifest()
1880 p2manifest = self._repo[parents[1]].manifest()
1883 p2manifest = self._repo[parents[1]].manifest()
1881 changedset = set(self.added()) | set(self.modified())
1884 changedset = set(self.added()) | set(self.modified())
1882 narrowmatch = self._repo.narrowmatch()
1885 narrowmatch = self._repo.narrowmatch()
1883 for dst, src in self._repo.dirstate.copies().items():
1886 for dst, src in self._repo.dirstate.copies().items():
1884 if dst not in changedset or not narrowmatch(dst):
1887 if dst not in changedset or not narrowmatch(dst):
1885 continue
1888 continue
1886 if src in p1manifest:
1889 if src in p1manifest:
1887 p1copies[dst] = src
1890 p1copies[dst] = src
1888 elif src in p2manifest:
1891 elif src in p2manifest:
1889 p2copies[dst] = src
1892 p2copies[dst] = src
1890 return p1copies, p2copies
1893 return p1copies, p2copies
1891
1894
1892 @propertycache
1895 @propertycache
1893 def _manifest(self):
1896 def _manifest(self):
1894 """generate a manifest corresponding to the values in self._status
1897 """generate a manifest corresponding to the values in self._status
1895
1898
1896 This reuse the file nodeid from parent, but we use special node
1899 This reuse the file nodeid from parent, but we use special node
1897 identifiers for added and modified files. This is used by manifests
1900 identifiers for added and modified files. This is used by manifests
1898 merge to see that files are different and by update logic to avoid
1901 merge to see that files are different and by update logic to avoid
1899 deleting newly added files.
1902 deleting newly added files.
1900 """
1903 """
1901 return self._buildstatusmanifest(self._status)
1904 return self._buildstatusmanifest(self._status)
1902
1905
1903 def _buildstatusmanifest(self, status):
1906 def _buildstatusmanifest(self, status):
1904 """Builds a manifest that includes the given status results."""
1907 """Builds a manifest that includes the given status results."""
1905 parents = self.parents()
1908 parents = self.parents()
1906
1909
1907 man = parents[0].manifest().copy()
1910 man = parents[0].manifest().copy()
1908
1911
1909 ff = self._flagfunc
1912 ff = self._flagfunc
1910 for i, l in (
1913 for i, l in (
1911 (addednodeid, status.added),
1914 (addednodeid, status.added),
1912 (modifiednodeid, status.modified),
1915 (modifiednodeid, status.modified),
1913 ):
1916 ):
1914 for f in l:
1917 for f in l:
1915 man[f] = i
1918 man[f] = i
1916 try:
1919 try:
1917 man.setflag(f, ff(f))
1920 man.setflag(f, ff(f))
1918 except OSError:
1921 except OSError:
1919 pass
1922 pass
1920
1923
1921 for f in status.deleted + status.removed:
1924 for f in status.deleted + status.removed:
1922 if f in man:
1925 if f in man:
1923 del man[f]
1926 del man[f]
1924
1927
1925 return man
1928 return man
1926
1929
1927 def _buildstatus(
1930 def _buildstatus(
1928 self, other, s, match, listignored, listclean, listunknown
1931 self, other, s, match, listignored, listclean, listunknown
1929 ):
1932 ):
1930 """build a status with respect to another context
1933 """build a status with respect to another context
1931
1934
1932 This includes logic for maintaining the fast path of status when
1935 This includes logic for maintaining the fast path of status when
1933 comparing the working directory against its parent, which is to skip
1936 comparing the working directory against its parent, which is to skip
1934 building a new manifest if self (working directory) is not comparing
1937 building a new manifest if self (working directory) is not comparing
1935 against its parent (repo['.']).
1938 against its parent (repo['.']).
1936 """
1939 """
1937 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1940 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1938 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1941 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1939 # might have accidentally ended up with the entire contents of the file
1942 # might have accidentally ended up with the entire contents of the file
1940 # they are supposed to be linking to.
1943 # they are supposed to be linking to.
1941 s.modified[:] = self._filtersuspectsymlink(s.modified)
1944 s.modified[:] = self._filtersuspectsymlink(s.modified)
1942 if other != self._repo[b'.']:
1945 if other != self._repo[b'.']:
1943 s = super(workingctx, self)._buildstatus(
1946 s = super(workingctx, self)._buildstatus(
1944 other, s, match, listignored, listclean, listunknown
1947 other, s, match, listignored, listclean, listunknown
1945 )
1948 )
1946 return s
1949 return s
1947
1950
1948 def _matchstatus(self, other, match):
1951 def _matchstatus(self, other, match):
1949 """override the match method with a filter for directory patterns
1952 """override the match method with a filter for directory patterns
1950
1953
1951 We use inheritance to customize the match.bad method only in cases of
1954 We use inheritance to customize the match.bad method only in cases of
1952 workingctx since it belongs only to the working directory when
1955 workingctx since it belongs only to the working directory when
1953 comparing against the parent changeset.
1956 comparing against the parent changeset.
1954
1957
1955 If we aren't comparing against the working directory's parent, then we
1958 If we aren't comparing against the working directory's parent, then we
1956 just use the default match object sent to us.
1959 just use the default match object sent to us.
1957 """
1960 """
1958 if other != self._repo[b'.']:
1961 if other != self._repo[b'.']:
1959
1962
1960 def bad(f, msg):
1963 def bad(f, msg):
1961 # 'f' may be a directory pattern from 'match.files()',
1964 # 'f' may be a directory pattern from 'match.files()',
1962 # so 'f not in ctx1' is not enough
1965 # so 'f not in ctx1' is not enough
1963 if f not in other and not other.hasdir(f):
1966 if f not in other and not other.hasdir(f):
1964 self._repo.ui.warn(
1967 self._repo.ui.warn(
1965 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1968 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1966 )
1969 )
1967
1970
1968 match.bad = bad
1971 match.bad = bad
1969 return match
1972 return match
1970
1973
1971 def walk(self, match):
1974 def walk(self, match):
1972 '''Generates matching file names.'''
1975 '''Generates matching file names.'''
1973 return sorted(
1976 return sorted(
1974 self._repo.dirstate.walk(
1977 self._repo.dirstate.walk(
1975 self._repo.narrowmatch(match),
1978 self._repo.narrowmatch(match),
1976 subrepos=sorted(self.substate),
1979 subrepos=sorted(self.substate),
1977 unknown=True,
1980 unknown=True,
1978 ignored=False,
1981 ignored=False,
1979 )
1982 )
1980 )
1983 )
1981
1984
1982 def matches(self, match):
1985 def matches(self, match):
1983 match = self._repo.narrowmatch(match)
1986 match = self._repo.narrowmatch(match)
1984 ds = self._repo.dirstate
1987 ds = self._repo.dirstate
1985 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1988 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1986
1989
1987 def markcommitted(self, node):
1990 def markcommitted(self, node):
1988 with self._repo.dirstate.parentchange():
1991 with self._repo.dirstate.parentchange():
1989 for f in self.modified() + self.added():
1992 for f in self.modified() + self.added():
1990 self._repo.dirstate.normal(f)
1993 self._repo.dirstate.normal(f)
1991 for f in self.removed():
1994 for f in self.removed():
1992 self._repo.dirstate.drop(f)
1995 self._repo.dirstate.drop(f)
1993 self._repo.dirstate.setparents(node)
1996 self._repo.dirstate.setparents(node)
1994 self._repo._quick_access_changeid_invalidate()
1997 self._repo._quick_access_changeid_invalidate()
1995
1998
1996 # write changes out explicitly, because nesting wlock at
1999 # write changes out explicitly, because nesting wlock at
1997 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2000 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1998 # from immediately doing so for subsequent changing files
2001 # from immediately doing so for subsequent changing files
1999 self._repo.dirstate.write(self._repo.currenttransaction())
2002 self._repo.dirstate.write(self._repo.currenttransaction())
2000
2003
2001 sparse.aftercommit(self._repo, node)
2004 sparse.aftercommit(self._repo, node)
2002
2005
2003
2006
2004 class committablefilectx(basefilectx):
2007 class committablefilectx(basefilectx):
2005 """A committablefilectx provides common functionality for a file context
2008 """A committablefilectx provides common functionality for a file context
2006 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2009 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2007
2010
2008 def __init__(self, repo, path, filelog=None, ctx=None):
2011 def __init__(self, repo, path, filelog=None, ctx=None):
2009 self._repo = repo
2012 self._repo = repo
2010 self._path = path
2013 self._path = path
2011 self._changeid = None
2014 self._changeid = None
2012 self._filerev = self._filenode = None
2015 self._filerev = self._filenode = None
2013
2016
2014 if filelog is not None:
2017 if filelog is not None:
2015 self._filelog = filelog
2018 self._filelog = filelog
2016 if ctx:
2019 if ctx:
2017 self._changectx = ctx
2020 self._changectx = ctx
2018
2021
2019 def __nonzero__(self):
2022 def __nonzero__(self):
2020 return True
2023 return True
2021
2024
2022 __bool__ = __nonzero__
2025 __bool__ = __nonzero__
2023
2026
2024 def linkrev(self):
2027 def linkrev(self):
2025 # linked to self._changectx no matter if file is modified or not
2028 # linked to self._changectx no matter if file is modified or not
2026 return self.rev()
2029 return self.rev()
2027
2030
2028 def renamed(self):
2031 def renamed(self):
2029 path = self.copysource()
2032 path = self.copysource()
2030 if not path:
2033 if not path:
2031 return None
2034 return None
2032 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2035 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2033
2036
2034 def parents(self):
2037 def parents(self):
2035 '''return parent filectxs, following copies if necessary'''
2038 '''return parent filectxs, following copies if necessary'''
2036
2039
2037 def filenode(ctx, path):
2040 def filenode(ctx, path):
2038 return ctx._manifest.get(path, nullid)
2041 return ctx._manifest.get(path, nullid)
2039
2042
2040 path = self._path
2043 path = self._path
2041 fl = self._filelog
2044 fl = self._filelog
2042 pcl = self._changectx._parents
2045 pcl = self._changectx._parents
2043 renamed = self.renamed()
2046 renamed = self.renamed()
2044
2047
2045 if renamed:
2048 if renamed:
2046 pl = [renamed + (None,)]
2049 pl = [renamed + (None,)]
2047 else:
2050 else:
2048 pl = [(path, filenode(pcl[0], path), fl)]
2051 pl = [(path, filenode(pcl[0], path), fl)]
2049
2052
2050 for pc in pcl[1:]:
2053 for pc in pcl[1:]:
2051 pl.append((path, filenode(pc, path), fl))
2054 pl.append((path, filenode(pc, path), fl))
2052
2055
2053 return [
2056 return [
2054 self._parentfilectx(p, fileid=n, filelog=l)
2057 self._parentfilectx(p, fileid=n, filelog=l)
2055 for p, n, l in pl
2058 for p, n, l in pl
2056 if n != nullid
2059 if n != nullid
2057 ]
2060 ]
2058
2061
2059 def children(self):
2062 def children(self):
2060 return []
2063 return []
2061
2064
2062
2065
2063 class workingfilectx(committablefilectx):
2066 class workingfilectx(committablefilectx):
2064 """A workingfilectx object makes access to data related to a particular
2067 """A workingfilectx object makes access to data related to a particular
2065 file in the working directory convenient."""
2068 file in the working directory convenient."""
2066
2069
2067 def __init__(self, repo, path, filelog=None, workingctx=None):
2070 def __init__(self, repo, path, filelog=None, workingctx=None):
2068 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2071 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2069
2072
2070 @propertycache
2073 @propertycache
2071 def _changectx(self):
2074 def _changectx(self):
2072 return workingctx(self._repo)
2075 return workingctx(self._repo)
2073
2076
2074 def data(self):
2077 def data(self):
2075 return self._repo.wread(self._path)
2078 return self._repo.wread(self._path)
2076
2079
2077 def copysource(self):
2080 def copysource(self):
2078 return self._repo.dirstate.copied(self._path)
2081 return self._repo.dirstate.copied(self._path)
2079
2082
2080 def size(self):
2083 def size(self):
2081 return self._repo.wvfs.lstat(self._path).st_size
2084 return self._repo.wvfs.lstat(self._path).st_size
2082
2085
2083 def lstat(self):
2086 def lstat(self):
2084 return self._repo.wvfs.lstat(self._path)
2087 return self._repo.wvfs.lstat(self._path)
2085
2088
2086 def date(self):
2089 def date(self):
2087 t, tz = self._changectx.date()
2090 t, tz = self._changectx.date()
2088 try:
2091 try:
2089 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2092 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2090 except OSError as err:
2093 except OSError as err:
2091 if err.errno != errno.ENOENT:
2094 if err.errno != errno.ENOENT:
2092 raise
2095 raise
2093 return (t, tz)
2096 return (t, tz)
2094
2097
2095 def exists(self):
2098 def exists(self):
2096 return self._repo.wvfs.exists(self._path)
2099 return self._repo.wvfs.exists(self._path)
2097
2100
2098 def lexists(self):
2101 def lexists(self):
2099 return self._repo.wvfs.lexists(self._path)
2102 return self._repo.wvfs.lexists(self._path)
2100
2103
2101 def audit(self):
2104 def audit(self):
2102 return self._repo.wvfs.audit(self._path)
2105 return self._repo.wvfs.audit(self._path)
2103
2106
2104 def cmp(self, fctx):
2107 def cmp(self, fctx):
2105 """compare with other file context
2108 """compare with other file context
2106
2109
2107 returns True if different than fctx.
2110 returns True if different than fctx.
2108 """
2111 """
2109 # fctx should be a filectx (not a workingfilectx)
2112 # fctx should be a filectx (not a workingfilectx)
2110 # invert comparison to reuse the same code path
2113 # invert comparison to reuse the same code path
2111 return fctx.cmp(self)
2114 return fctx.cmp(self)
2112
2115
2113 def remove(self, ignoremissing=False):
2116 def remove(self, ignoremissing=False):
2114 """wraps unlink for a repo's working directory"""
2117 """wraps unlink for a repo's working directory"""
2115 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2118 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2116 self._repo.wvfs.unlinkpath(
2119 self._repo.wvfs.unlinkpath(
2117 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2120 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2118 )
2121 )
2119
2122
2120 def write(self, data, flags, backgroundclose=False, **kwargs):
2123 def write(self, data, flags, backgroundclose=False, **kwargs):
2121 """wraps repo.wwrite"""
2124 """wraps repo.wwrite"""
2122 return self._repo.wwrite(
2125 return self._repo.wwrite(
2123 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2126 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2124 )
2127 )
2125
2128
2126 def markcopied(self, src):
2129 def markcopied(self, src):
2127 """marks this file a copy of `src`"""
2130 """marks this file a copy of `src`"""
2128 self._repo.dirstate.copy(src, self._path)
2131 self._repo.dirstate.copy(src, self._path)
2129
2132
2130 def clearunknown(self):
2133 def clearunknown(self):
2131 """Removes conflicting items in the working directory so that
2134 """Removes conflicting items in the working directory so that
2132 ``write()`` can be called successfully.
2135 ``write()`` can be called successfully.
2133 """
2136 """
2134 wvfs = self._repo.wvfs
2137 wvfs = self._repo.wvfs
2135 f = self._path
2138 f = self._path
2136 wvfs.audit(f)
2139 wvfs.audit(f)
2137 if self._repo.ui.configbool(
2140 if self._repo.ui.configbool(
2138 b'experimental', b'merge.checkpathconflicts'
2141 b'experimental', b'merge.checkpathconflicts'
2139 ):
2142 ):
2140 # remove files under the directory as they should already be
2143 # remove files under the directory as they should already be
2141 # warned and backed up
2144 # warned and backed up
2142 if wvfs.isdir(f) and not wvfs.islink(f):
2145 if wvfs.isdir(f) and not wvfs.islink(f):
2143 wvfs.rmtree(f, forcibly=True)
2146 wvfs.rmtree(f, forcibly=True)
2144 for p in reversed(list(pathutil.finddirs(f))):
2147 for p in reversed(list(pathutil.finddirs(f))):
2145 if wvfs.isfileorlink(p):
2148 if wvfs.isfileorlink(p):
2146 wvfs.unlink(p)
2149 wvfs.unlink(p)
2147 break
2150 break
2148 else:
2151 else:
2149 # don't remove files if path conflicts are not processed
2152 # don't remove files if path conflicts are not processed
2150 if wvfs.isdir(f) and not wvfs.islink(f):
2153 if wvfs.isdir(f) and not wvfs.islink(f):
2151 wvfs.removedirs(f)
2154 wvfs.removedirs(f)
2152
2155
2153 def setflags(self, l, x):
2156 def setflags(self, l, x):
2154 self._repo.wvfs.setflags(self._path, l, x)
2157 self._repo.wvfs.setflags(self._path, l, x)
2155
2158
2156
2159
2157 class overlayworkingctx(committablectx):
2160 class overlayworkingctx(committablectx):
2158 """Wraps another mutable context with a write-back cache that can be
2161 """Wraps another mutable context with a write-back cache that can be
2159 converted into a commit context.
2162 converted into a commit context.
2160
2163
2161 self._cache[path] maps to a dict with keys: {
2164 self._cache[path] maps to a dict with keys: {
2162 'exists': bool?
2165 'exists': bool?
2163 'date': date?
2166 'date': date?
2164 'data': str?
2167 'data': str?
2165 'flags': str?
2168 'flags': str?
2166 'copied': str? (path or None)
2169 'copied': str? (path or None)
2167 }
2170 }
2168 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2171 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2169 is `False`, the file was deleted.
2172 is `False`, the file was deleted.
2170 """
2173 """
2171
2174
2172 def __init__(self, repo):
2175 def __init__(self, repo):
2173 super(overlayworkingctx, self).__init__(repo)
2176 super(overlayworkingctx, self).__init__(repo)
2174 self.clean()
2177 self.clean()
2175
2178
2176 def setbase(self, wrappedctx):
2179 def setbase(self, wrappedctx):
2177 self._wrappedctx = wrappedctx
2180 self._wrappedctx = wrappedctx
2178 self._parents = [wrappedctx]
2181 self._parents = [wrappedctx]
2179 # Drop old manifest cache as it is now out of date.
2182 # Drop old manifest cache as it is now out of date.
2180 # This is necessary when, e.g., rebasing several nodes with one
2183 # This is necessary when, e.g., rebasing several nodes with one
2181 # ``overlayworkingctx`` (e.g. with --collapse).
2184 # ``overlayworkingctx`` (e.g. with --collapse).
2182 util.clearcachedproperty(self, b'_manifest')
2185 util.clearcachedproperty(self, b'_manifest')
2183
2186
2184 def setparents(self, p1node, p2node=nullid):
2187 def setparents(self, p1node, p2node=nullid):
2185 assert p1node == self._wrappedctx.node()
2188 assert p1node == self._wrappedctx.node()
2186 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2189 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2187
2190
2188 def data(self, path):
2191 def data(self, path):
2189 if self.isdirty(path):
2192 if self.isdirty(path):
2190 if self._cache[path][b'exists']:
2193 if self._cache[path][b'exists']:
2191 if self._cache[path][b'data'] is not None:
2194 if self._cache[path][b'data'] is not None:
2192 return self._cache[path][b'data']
2195 return self._cache[path][b'data']
2193 else:
2196 else:
2194 # Must fallback here, too, because we only set flags.
2197 # Must fallback here, too, because we only set flags.
2195 return self._wrappedctx[path].data()
2198 return self._wrappedctx[path].data()
2196 else:
2199 else:
2197 raise error.ProgrammingError(
2200 raise error.ProgrammingError(
2198 b"No such file or directory: %s" % path
2201 b"No such file or directory: %s" % path
2199 )
2202 )
2200 else:
2203 else:
2201 return self._wrappedctx[path].data()
2204 return self._wrappedctx[path].data()
2202
2205
2203 @propertycache
2206 @propertycache
2204 def _manifest(self):
2207 def _manifest(self):
2205 parents = self.parents()
2208 parents = self.parents()
2206 man = parents[0].manifest().copy()
2209 man = parents[0].manifest().copy()
2207
2210
2208 flag = self._flagfunc
2211 flag = self._flagfunc
2209 for path in self.added():
2212 for path in self.added():
2210 man[path] = addednodeid
2213 man[path] = addednodeid
2211 man.setflag(path, flag(path))
2214 man.setflag(path, flag(path))
2212 for path in self.modified():
2215 for path in self.modified():
2213 man[path] = modifiednodeid
2216 man[path] = modifiednodeid
2214 man.setflag(path, flag(path))
2217 man.setflag(path, flag(path))
2215 for path in self.removed():
2218 for path in self.removed():
2216 del man[path]
2219 del man[path]
2217 return man
2220 return man
2218
2221
2219 @propertycache
2222 @propertycache
2220 def _flagfunc(self):
2223 def _flagfunc(self):
2221 def f(path):
2224 def f(path):
2222 return self._cache[path][b'flags']
2225 return self._cache[path][b'flags']
2223
2226
2224 return f
2227 return f
2225
2228
2226 def files(self):
2229 def files(self):
2227 return sorted(self.added() + self.modified() + self.removed())
2230 return sorted(self.added() + self.modified() + self.removed())
2228
2231
2229 def modified(self):
2232 def modified(self):
2230 return [
2233 return [
2231 f
2234 f
2232 for f in self._cache.keys()
2235 for f in self._cache.keys()
2233 if self._cache[f][b'exists'] and self._existsinparent(f)
2236 if self._cache[f][b'exists'] and self._existsinparent(f)
2234 ]
2237 ]
2235
2238
2236 def added(self):
2239 def added(self):
2237 return [
2240 return [
2238 f
2241 f
2239 for f in self._cache.keys()
2242 for f in self._cache.keys()
2240 if self._cache[f][b'exists'] and not self._existsinparent(f)
2243 if self._cache[f][b'exists'] and not self._existsinparent(f)
2241 ]
2244 ]
2242
2245
2243 def removed(self):
2246 def removed(self):
2244 return [
2247 return [
2245 f
2248 f
2246 for f in self._cache.keys()
2249 for f in self._cache.keys()
2247 if not self._cache[f][b'exists'] and self._existsinparent(f)
2250 if not self._cache[f][b'exists'] and self._existsinparent(f)
2248 ]
2251 ]
2249
2252
2250 def p1copies(self):
2253 def p1copies(self):
2251 copies = {}
2254 copies = {}
2252 narrowmatch = self._repo.narrowmatch()
2255 narrowmatch = self._repo.narrowmatch()
2253 for f in self._cache.keys():
2256 for f in self._cache.keys():
2254 if not narrowmatch(f):
2257 if not narrowmatch(f):
2255 continue
2258 continue
2256 copies.pop(f, None) # delete if it exists
2259 copies.pop(f, None) # delete if it exists
2257 source = self._cache[f][b'copied']
2260 source = self._cache[f][b'copied']
2258 if source:
2261 if source:
2259 copies[f] = source
2262 copies[f] = source
2260 return copies
2263 return copies
2261
2264
2262 def p2copies(self):
2265 def p2copies(self):
2263 copies = {}
2266 copies = {}
2264 narrowmatch = self._repo.narrowmatch()
2267 narrowmatch = self._repo.narrowmatch()
2265 for f in self._cache.keys():
2268 for f in self._cache.keys():
2266 if not narrowmatch(f):
2269 if not narrowmatch(f):
2267 continue
2270 continue
2268 copies.pop(f, None) # delete if it exists
2271 copies.pop(f, None) # delete if it exists
2269 source = self._cache[f][b'copied']
2272 source = self._cache[f][b'copied']
2270 if source:
2273 if source:
2271 copies[f] = source
2274 copies[f] = source
2272 return copies
2275 return copies
2273
2276
2274 def isinmemory(self):
2277 def isinmemory(self):
2275 return True
2278 return True
2276
2279
2277 def filedate(self, path):
2280 def filedate(self, path):
2278 if self.isdirty(path):
2281 if self.isdirty(path):
2279 return self._cache[path][b'date']
2282 return self._cache[path][b'date']
2280 else:
2283 else:
2281 return self._wrappedctx[path].date()
2284 return self._wrappedctx[path].date()
2282
2285
2283 def markcopied(self, path, origin):
2286 def markcopied(self, path, origin):
2284 self._markdirty(
2287 self._markdirty(
2285 path,
2288 path,
2286 exists=True,
2289 exists=True,
2287 date=self.filedate(path),
2290 date=self.filedate(path),
2288 flags=self.flags(path),
2291 flags=self.flags(path),
2289 copied=origin,
2292 copied=origin,
2290 )
2293 )
2291
2294
2292 def copydata(self, path):
2295 def copydata(self, path):
2293 if self.isdirty(path):
2296 if self.isdirty(path):
2294 return self._cache[path][b'copied']
2297 return self._cache[path][b'copied']
2295 else:
2298 else:
2296 return None
2299 return None
2297
2300
2298 def flags(self, path):
2301 def flags(self, path):
2299 if self.isdirty(path):
2302 if self.isdirty(path):
2300 if self._cache[path][b'exists']:
2303 if self._cache[path][b'exists']:
2301 return self._cache[path][b'flags']
2304 return self._cache[path][b'flags']
2302 else:
2305 else:
2303 raise error.ProgrammingError(
2306 raise error.ProgrammingError(
2304 b"No such file or directory: %s" % self._path
2307 b"No such file or directory: %s" % self._path
2305 )
2308 )
2306 else:
2309 else:
2307 return self._wrappedctx[path].flags()
2310 return self._wrappedctx[path].flags()
2308
2311
2309 def __contains__(self, key):
2312 def __contains__(self, key):
2310 if key in self._cache:
2313 if key in self._cache:
2311 return self._cache[key][b'exists']
2314 return self._cache[key][b'exists']
2312 return key in self.p1()
2315 return key in self.p1()
2313
2316
2314 def _existsinparent(self, path):
2317 def _existsinparent(self, path):
2315 try:
2318 try:
2316 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2319 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2317 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2320 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2318 # with an ``exists()`` function.
2321 # with an ``exists()`` function.
2319 self._wrappedctx[path]
2322 self._wrappedctx[path]
2320 return True
2323 return True
2321 except error.ManifestLookupError:
2324 except error.ManifestLookupError:
2322 return False
2325 return False
2323
2326
2324 def _auditconflicts(self, path):
2327 def _auditconflicts(self, path):
2325 """Replicates conflict checks done by wvfs.write().
2328 """Replicates conflict checks done by wvfs.write().
2326
2329
2327 Since we never write to the filesystem and never call `applyupdates` in
2330 Since we never write to the filesystem and never call `applyupdates` in
2328 IMM, we'll never check that a path is actually writable -- e.g., because
2331 IMM, we'll never check that a path is actually writable -- e.g., because
2329 it adds `a/foo`, but `a` is actually a file in the other commit.
2332 it adds `a/foo`, but `a` is actually a file in the other commit.
2330 """
2333 """
2331
2334
2332 def fail(path, component):
2335 def fail(path, component):
2333 # p1() is the base and we're receiving "writes" for p2()'s
2336 # p1() is the base and we're receiving "writes" for p2()'s
2334 # files.
2337 # files.
2335 if b'l' in self.p1()[component].flags():
2338 if b'l' in self.p1()[component].flags():
2336 raise error.Abort(
2339 raise error.Abort(
2337 b"error: %s conflicts with symlink %s "
2340 b"error: %s conflicts with symlink %s "
2338 b"in %d." % (path, component, self.p1().rev())
2341 b"in %d." % (path, component, self.p1().rev())
2339 )
2342 )
2340 else:
2343 else:
2341 raise error.Abort(
2344 raise error.Abort(
2342 b"error: '%s' conflicts with file '%s' in "
2345 b"error: '%s' conflicts with file '%s' in "
2343 b"%d." % (path, component, self.p1().rev())
2346 b"%d." % (path, component, self.p1().rev())
2344 )
2347 )
2345
2348
2346 # Test that each new directory to be created to write this path from p2
2349 # Test that each new directory to be created to write this path from p2
2347 # is not a file in p1.
2350 # is not a file in p1.
2348 components = path.split(b'/')
2351 components = path.split(b'/')
2349 for i in pycompat.xrange(len(components)):
2352 for i in pycompat.xrange(len(components)):
2350 component = b"/".join(components[0:i])
2353 component = b"/".join(components[0:i])
2351 if component in self:
2354 if component in self:
2352 fail(path, component)
2355 fail(path, component)
2353
2356
2354 # Test the other direction -- that this path from p2 isn't a directory
2357 # Test the other direction -- that this path from p2 isn't a directory
2355 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2358 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2356 match = self.match([path], default=b'path')
2359 match = self.match([path], default=b'path')
2357 matches = self.p1().manifest().matches(match)
2360 matches = self.p1().manifest().matches(match)
2358 mfiles = matches.keys()
2361 mfiles = matches.keys()
2359 if len(mfiles) > 0:
2362 if len(mfiles) > 0:
2360 if len(mfiles) == 1 and mfiles[0] == path:
2363 if len(mfiles) == 1 and mfiles[0] == path:
2361 return
2364 return
2362 # omit the files which are deleted in current IMM wctx
2365 # omit the files which are deleted in current IMM wctx
2363 mfiles = [m for m in mfiles if m in self]
2366 mfiles = [m for m in mfiles if m in self]
2364 if not mfiles:
2367 if not mfiles:
2365 return
2368 return
2366 raise error.Abort(
2369 raise error.Abort(
2367 b"error: file '%s' cannot be written because "
2370 b"error: file '%s' cannot be written because "
2368 b" '%s/' is a directory in %s (containing %d "
2371 b" '%s/' is a directory in %s (containing %d "
2369 b"entries: %s)"
2372 b"entries: %s)"
2370 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2373 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2371 )
2374 )
2372
2375
2373 def write(self, path, data, flags=b'', **kwargs):
2376 def write(self, path, data, flags=b'', **kwargs):
2374 if data is None:
2377 if data is None:
2375 raise error.ProgrammingError(b"data must be non-None")
2378 raise error.ProgrammingError(b"data must be non-None")
2376 self._auditconflicts(path)
2379 self._auditconflicts(path)
2377 self._markdirty(
2380 self._markdirty(
2378 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2381 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2379 )
2382 )
2380
2383
2381 def setflags(self, path, l, x):
2384 def setflags(self, path, l, x):
2382 flag = b''
2385 flag = b''
2383 if l:
2386 if l:
2384 flag = b'l'
2387 flag = b'l'
2385 elif x:
2388 elif x:
2386 flag = b'x'
2389 flag = b'x'
2387 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2390 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2388
2391
2389 def remove(self, path):
2392 def remove(self, path):
2390 self._markdirty(path, exists=False)
2393 self._markdirty(path, exists=False)
2391
2394
2392 def exists(self, path):
2395 def exists(self, path):
2393 """exists behaves like `lexists`, but needs to follow symlinks and
2396 """exists behaves like `lexists`, but needs to follow symlinks and
2394 return False if they are broken.
2397 return False if they are broken.
2395 """
2398 """
2396 if self.isdirty(path):
2399 if self.isdirty(path):
2397 # If this path exists and is a symlink, "follow" it by calling
2400 # If this path exists and is a symlink, "follow" it by calling
2398 # exists on the destination path.
2401 # exists on the destination path.
2399 if (
2402 if (
2400 self._cache[path][b'exists']
2403 self._cache[path][b'exists']
2401 and b'l' in self._cache[path][b'flags']
2404 and b'l' in self._cache[path][b'flags']
2402 ):
2405 ):
2403 return self.exists(self._cache[path][b'data'].strip())
2406 return self.exists(self._cache[path][b'data'].strip())
2404 else:
2407 else:
2405 return self._cache[path][b'exists']
2408 return self._cache[path][b'exists']
2406
2409
2407 return self._existsinparent(path)
2410 return self._existsinparent(path)
2408
2411
2409 def lexists(self, path):
2412 def lexists(self, path):
2410 """lexists returns True if the path exists"""
2413 """lexists returns True if the path exists"""
2411 if self.isdirty(path):
2414 if self.isdirty(path):
2412 return self._cache[path][b'exists']
2415 return self._cache[path][b'exists']
2413
2416
2414 return self._existsinparent(path)
2417 return self._existsinparent(path)
2415
2418
2416 def size(self, path):
2419 def size(self, path):
2417 if self.isdirty(path):
2420 if self.isdirty(path):
2418 if self._cache[path][b'exists']:
2421 if self._cache[path][b'exists']:
2419 return len(self._cache[path][b'data'])
2422 return len(self._cache[path][b'data'])
2420 else:
2423 else:
2421 raise error.ProgrammingError(
2424 raise error.ProgrammingError(
2422 b"No such file or directory: %s" % self._path
2425 b"No such file or directory: %s" % self._path
2423 )
2426 )
2424 return self._wrappedctx[path].size()
2427 return self._wrappedctx[path].size()
2425
2428
2426 def tomemctx(
2429 def tomemctx(
2427 self,
2430 self,
2428 text,
2431 text,
2429 branch=None,
2432 branch=None,
2430 extra=None,
2433 extra=None,
2431 date=None,
2434 date=None,
2432 parents=None,
2435 parents=None,
2433 user=None,
2436 user=None,
2434 editor=None,
2437 editor=None,
2435 ):
2438 ):
2436 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2439 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2437 committed.
2440 committed.
2438
2441
2439 ``text`` is the commit message.
2442 ``text`` is the commit message.
2440 ``parents`` (optional) are rev numbers.
2443 ``parents`` (optional) are rev numbers.
2441 """
2444 """
2442 # Default parents to the wrapped context if not passed.
2445 # Default parents to the wrapped context if not passed.
2443 if parents is None:
2446 if parents is None:
2444 parents = self.parents()
2447 parents = self.parents()
2445 if len(parents) == 1:
2448 if len(parents) == 1:
2446 parents = (parents[0], None)
2449 parents = (parents[0], None)
2447
2450
2448 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2451 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2449 if parents[1] is None:
2452 if parents[1] is None:
2450 parents = (self._repo[parents[0]], None)
2453 parents = (self._repo[parents[0]], None)
2451 else:
2454 else:
2452 parents = (self._repo[parents[0]], self._repo[parents[1]])
2455 parents = (self._repo[parents[0]], self._repo[parents[1]])
2453
2456
2454 files = self.files()
2457 files = self.files()
2455
2458
2456 def getfile(repo, memctx, path):
2459 def getfile(repo, memctx, path):
2457 if self._cache[path][b'exists']:
2460 if self._cache[path][b'exists']:
2458 return memfilectx(
2461 return memfilectx(
2459 repo,
2462 repo,
2460 memctx,
2463 memctx,
2461 path,
2464 path,
2462 self._cache[path][b'data'],
2465 self._cache[path][b'data'],
2463 b'l' in self._cache[path][b'flags'],
2466 b'l' in self._cache[path][b'flags'],
2464 b'x' in self._cache[path][b'flags'],
2467 b'x' in self._cache[path][b'flags'],
2465 self._cache[path][b'copied'],
2468 self._cache[path][b'copied'],
2466 )
2469 )
2467 else:
2470 else:
2468 # Returning None, but including the path in `files`, is
2471 # Returning None, but including the path in `files`, is
2469 # necessary for memctx to register a deletion.
2472 # necessary for memctx to register a deletion.
2470 return None
2473 return None
2471
2474
2472 if branch is None:
2475 if branch is None:
2473 branch = self._wrappedctx.branch()
2476 branch = self._wrappedctx.branch()
2474
2477
2475 return memctx(
2478 return memctx(
2476 self._repo,
2479 self._repo,
2477 parents,
2480 parents,
2478 text,
2481 text,
2479 files,
2482 files,
2480 getfile,
2483 getfile,
2481 date=date,
2484 date=date,
2482 extra=extra,
2485 extra=extra,
2483 user=user,
2486 user=user,
2484 branch=branch,
2487 branch=branch,
2485 editor=editor,
2488 editor=editor,
2486 )
2489 )
2487
2490
2488 def isdirty(self, path):
2491 def isdirty(self, path):
2489 return path in self._cache
2492 return path in self._cache
2490
2493
2491 def isempty(self):
2494 def isempty(self):
2492 # We need to discard any keys that are actually clean before the empty
2495 # We need to discard any keys that are actually clean before the empty
2493 # commit check.
2496 # commit check.
2494 self._compact()
2497 self._compact()
2495 return len(self._cache) == 0
2498 return len(self._cache) == 0
2496
2499
2497 def clean(self):
2500 def clean(self):
2498 self._cache = {}
2501 self._cache = {}
2499
2502
2500 def _compact(self):
2503 def _compact(self):
2501 """Removes keys from the cache that are actually clean, by comparing
2504 """Removes keys from the cache that are actually clean, by comparing
2502 them with the underlying context.
2505 them with the underlying context.
2503
2506
2504 This can occur during the merge process, e.g. by passing --tool :local
2507 This can occur during the merge process, e.g. by passing --tool :local
2505 to resolve a conflict.
2508 to resolve a conflict.
2506 """
2509 """
2507 keys = []
2510 keys = []
2508 # This won't be perfect, but can help performance significantly when
2511 # This won't be perfect, but can help performance significantly when
2509 # using things like remotefilelog.
2512 # using things like remotefilelog.
2510 scmutil.prefetchfiles(
2513 scmutil.prefetchfiles(
2511 self.repo(),
2514 self.repo(),
2512 [self.p1().rev()],
2515 [self.p1().rev()],
2513 scmutil.matchfiles(self.repo(), self._cache.keys()),
2516 scmutil.matchfiles(self.repo(), self._cache.keys()),
2514 )
2517 )
2515
2518
2516 for path in self._cache.keys():
2519 for path in self._cache.keys():
2517 cache = self._cache[path]
2520 cache = self._cache[path]
2518 try:
2521 try:
2519 underlying = self._wrappedctx[path]
2522 underlying = self._wrappedctx[path]
2520 if (
2523 if (
2521 underlying.data() == cache[b'data']
2524 underlying.data() == cache[b'data']
2522 and underlying.flags() == cache[b'flags']
2525 and underlying.flags() == cache[b'flags']
2523 ):
2526 ):
2524 keys.append(path)
2527 keys.append(path)
2525 except error.ManifestLookupError:
2528 except error.ManifestLookupError:
2526 # Path not in the underlying manifest (created).
2529 # Path not in the underlying manifest (created).
2527 continue
2530 continue
2528
2531
2529 for path in keys:
2532 for path in keys:
2530 del self._cache[path]
2533 del self._cache[path]
2531 return keys
2534 return keys
2532
2535
2533 def _markdirty(
2536 def _markdirty(
2534 self, path, exists, data=None, date=None, flags=b'', copied=None
2537 self, path, exists, data=None, date=None, flags=b'', copied=None
2535 ):
2538 ):
2536 # data not provided, let's see if we already have some; if not, let's
2539 # data not provided, let's see if we already have some; if not, let's
2537 # grab it from our underlying context, so that we always have data if
2540 # grab it from our underlying context, so that we always have data if
2538 # the file is marked as existing.
2541 # the file is marked as existing.
2539 if exists and data is None:
2542 if exists and data is None:
2540 oldentry = self._cache.get(path) or {}
2543 oldentry = self._cache.get(path) or {}
2541 data = oldentry.get(b'data')
2544 data = oldentry.get(b'data')
2542 if data is None:
2545 if data is None:
2543 data = self._wrappedctx[path].data()
2546 data = self._wrappedctx[path].data()
2544
2547
2545 self._cache[path] = {
2548 self._cache[path] = {
2546 b'exists': exists,
2549 b'exists': exists,
2547 b'data': data,
2550 b'data': data,
2548 b'date': date,
2551 b'date': date,
2549 b'flags': flags,
2552 b'flags': flags,
2550 b'copied': copied,
2553 b'copied': copied,
2551 }
2554 }
2552
2555
2553 def filectx(self, path, filelog=None):
2556 def filectx(self, path, filelog=None):
2554 return overlayworkingfilectx(
2557 return overlayworkingfilectx(
2555 self._repo, path, parent=self, filelog=filelog
2558 self._repo, path, parent=self, filelog=filelog
2556 )
2559 )
2557
2560
2558
2561
2559 class overlayworkingfilectx(committablefilectx):
2562 class overlayworkingfilectx(committablefilectx):
2560 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2563 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2561 cache, which can be flushed through later by calling ``flush()``."""
2564 cache, which can be flushed through later by calling ``flush()``."""
2562
2565
2563 def __init__(self, repo, path, filelog=None, parent=None):
2566 def __init__(self, repo, path, filelog=None, parent=None):
2564 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2567 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2565 self._repo = repo
2568 self._repo = repo
2566 self._parent = parent
2569 self._parent = parent
2567 self._path = path
2570 self._path = path
2568
2571
2569 def cmp(self, fctx):
2572 def cmp(self, fctx):
2570 return self.data() != fctx.data()
2573 return self.data() != fctx.data()
2571
2574
2572 def changectx(self):
2575 def changectx(self):
2573 return self._parent
2576 return self._parent
2574
2577
2575 def data(self):
2578 def data(self):
2576 return self._parent.data(self._path)
2579 return self._parent.data(self._path)
2577
2580
2578 def date(self):
2581 def date(self):
2579 return self._parent.filedate(self._path)
2582 return self._parent.filedate(self._path)
2580
2583
2581 def exists(self):
2584 def exists(self):
2582 return self.lexists()
2585 return self.lexists()
2583
2586
2584 def lexists(self):
2587 def lexists(self):
2585 return self._parent.exists(self._path)
2588 return self._parent.exists(self._path)
2586
2589
2587 def copysource(self):
2590 def copysource(self):
2588 return self._parent.copydata(self._path)
2591 return self._parent.copydata(self._path)
2589
2592
2590 def size(self):
2593 def size(self):
2591 return self._parent.size(self._path)
2594 return self._parent.size(self._path)
2592
2595
2593 def markcopied(self, origin):
2596 def markcopied(self, origin):
2594 self._parent.markcopied(self._path, origin)
2597 self._parent.markcopied(self._path, origin)
2595
2598
2596 def audit(self):
2599 def audit(self):
2597 pass
2600 pass
2598
2601
2599 def flags(self):
2602 def flags(self):
2600 return self._parent.flags(self._path)
2603 return self._parent.flags(self._path)
2601
2604
2602 def setflags(self, islink, isexec):
2605 def setflags(self, islink, isexec):
2603 return self._parent.setflags(self._path, islink, isexec)
2606 return self._parent.setflags(self._path, islink, isexec)
2604
2607
2605 def write(self, data, flags, backgroundclose=False, **kwargs):
2608 def write(self, data, flags, backgroundclose=False, **kwargs):
2606 return self._parent.write(self._path, data, flags, **kwargs)
2609 return self._parent.write(self._path, data, flags, **kwargs)
2607
2610
2608 def remove(self, ignoremissing=False):
2611 def remove(self, ignoremissing=False):
2609 return self._parent.remove(self._path)
2612 return self._parent.remove(self._path)
2610
2613
2611 def clearunknown(self):
2614 def clearunknown(self):
2612 pass
2615 pass
2613
2616
2614
2617
2615 class workingcommitctx(workingctx):
2618 class workingcommitctx(workingctx):
2616 """A workingcommitctx object makes access to data related to
2619 """A workingcommitctx object makes access to data related to
2617 the revision being committed convenient.
2620 the revision being committed convenient.
2618
2621
2619 This hides changes in the working directory, if they aren't
2622 This hides changes in the working directory, if they aren't
2620 committed in this context.
2623 committed in this context.
2621 """
2624 """
2622
2625
2623 def __init__(
2626 def __init__(
2624 self, repo, changes, text=b"", user=None, date=None, extra=None
2627 self, repo, changes, text=b"", user=None, date=None, extra=None
2625 ):
2628 ):
2626 super(workingcommitctx, self).__init__(
2629 super(workingcommitctx, self).__init__(
2627 repo, text, user, date, extra, changes
2630 repo, text, user, date, extra, changes
2628 )
2631 )
2629
2632
2630 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2633 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2631 """Return matched files only in ``self._status``
2634 """Return matched files only in ``self._status``
2632
2635
2633 Uncommitted files appear "clean" via this context, even if
2636 Uncommitted files appear "clean" via this context, even if
2634 they aren't actually so in the working directory.
2637 they aren't actually so in the working directory.
2635 """
2638 """
2636 if clean:
2639 if clean:
2637 clean = [f for f in self._manifest if f not in self._changedset]
2640 clean = [f for f in self._manifest if f not in self._changedset]
2638 else:
2641 else:
2639 clean = []
2642 clean = []
2640 return scmutil.status(
2643 return scmutil.status(
2641 [f for f in self._status.modified if match(f)],
2644 [f for f in self._status.modified if match(f)],
2642 [f for f in self._status.added if match(f)],
2645 [f for f in self._status.added if match(f)],
2643 [f for f in self._status.removed if match(f)],
2646 [f for f in self._status.removed if match(f)],
2644 [],
2647 [],
2645 [],
2648 [],
2646 [],
2649 [],
2647 clean,
2650 clean,
2648 )
2651 )
2649
2652
2650 @propertycache
2653 @propertycache
2651 def _changedset(self):
2654 def _changedset(self):
2652 """Return the set of files changed in this context
2655 """Return the set of files changed in this context
2653 """
2656 """
2654 changed = set(self._status.modified)
2657 changed = set(self._status.modified)
2655 changed.update(self._status.added)
2658 changed.update(self._status.added)
2656 changed.update(self._status.removed)
2659 changed.update(self._status.removed)
2657 return changed
2660 return changed
2658
2661
2659
2662
2660 def makecachingfilectxfn(func):
2663 def makecachingfilectxfn(func):
2661 """Create a filectxfn that caches based on the path.
2664 """Create a filectxfn that caches based on the path.
2662
2665
2663 We can't use util.cachefunc because it uses all arguments as the cache
2666 We can't use util.cachefunc because it uses all arguments as the cache
2664 key and this creates a cycle since the arguments include the repo and
2667 key and this creates a cycle since the arguments include the repo and
2665 memctx.
2668 memctx.
2666 """
2669 """
2667 cache = {}
2670 cache = {}
2668
2671
2669 def getfilectx(repo, memctx, path):
2672 def getfilectx(repo, memctx, path):
2670 if path not in cache:
2673 if path not in cache:
2671 cache[path] = func(repo, memctx, path)
2674 cache[path] = func(repo, memctx, path)
2672 return cache[path]
2675 return cache[path]
2673
2676
2674 return getfilectx
2677 return getfilectx
2675
2678
2676
2679
2677 def memfilefromctx(ctx):
2680 def memfilefromctx(ctx):
2678 """Given a context return a memfilectx for ctx[path]
2681 """Given a context return a memfilectx for ctx[path]
2679
2682
2680 This is a convenience method for building a memctx based on another
2683 This is a convenience method for building a memctx based on another
2681 context.
2684 context.
2682 """
2685 """
2683
2686
2684 def getfilectx(repo, memctx, path):
2687 def getfilectx(repo, memctx, path):
2685 fctx = ctx[path]
2688 fctx = ctx[path]
2686 copysource = fctx.copysource()
2689 copysource = fctx.copysource()
2687 return memfilectx(
2690 return memfilectx(
2688 repo,
2691 repo,
2689 memctx,
2692 memctx,
2690 path,
2693 path,
2691 fctx.data(),
2694 fctx.data(),
2692 islink=fctx.islink(),
2695 islink=fctx.islink(),
2693 isexec=fctx.isexec(),
2696 isexec=fctx.isexec(),
2694 copysource=copysource,
2697 copysource=copysource,
2695 )
2698 )
2696
2699
2697 return getfilectx
2700 return getfilectx
2698
2701
2699
2702
2700 def memfilefrompatch(patchstore):
2703 def memfilefrompatch(patchstore):
2701 """Given a patch (e.g. patchstore object) return a memfilectx
2704 """Given a patch (e.g. patchstore object) return a memfilectx
2702
2705
2703 This is a convenience method for building a memctx based on a patchstore.
2706 This is a convenience method for building a memctx based on a patchstore.
2704 """
2707 """
2705
2708
2706 def getfilectx(repo, memctx, path):
2709 def getfilectx(repo, memctx, path):
2707 data, mode, copysource = patchstore.getfile(path)
2710 data, mode, copysource = patchstore.getfile(path)
2708 if data is None:
2711 if data is None:
2709 return None
2712 return None
2710 islink, isexec = mode
2713 islink, isexec = mode
2711 return memfilectx(
2714 return memfilectx(
2712 repo,
2715 repo,
2713 memctx,
2716 memctx,
2714 path,
2717 path,
2715 data,
2718 data,
2716 islink=islink,
2719 islink=islink,
2717 isexec=isexec,
2720 isexec=isexec,
2718 copysource=copysource,
2721 copysource=copysource,
2719 )
2722 )
2720
2723
2721 return getfilectx
2724 return getfilectx
2722
2725
2723
2726
2724 class memctx(committablectx):
2727 class memctx(committablectx):
2725 """Use memctx to perform in-memory commits via localrepo.commitctx().
2728 """Use memctx to perform in-memory commits via localrepo.commitctx().
2726
2729
2727 Revision information is supplied at initialization time while
2730 Revision information is supplied at initialization time while
2728 related files data and is made available through a callback
2731 related files data and is made available through a callback
2729 mechanism. 'repo' is the current localrepo, 'parents' is a
2732 mechanism. 'repo' is the current localrepo, 'parents' is a
2730 sequence of two parent revisions identifiers (pass None for every
2733 sequence of two parent revisions identifiers (pass None for every
2731 missing parent), 'text' is the commit message and 'files' lists
2734 missing parent), 'text' is the commit message and 'files' lists
2732 names of files touched by the revision (normalized and relative to
2735 names of files touched by the revision (normalized and relative to
2733 repository root).
2736 repository root).
2734
2737
2735 filectxfn(repo, memctx, path) is a callable receiving the
2738 filectxfn(repo, memctx, path) is a callable receiving the
2736 repository, the current memctx object and the normalized path of
2739 repository, the current memctx object and the normalized path of
2737 requested file, relative to repository root. It is fired by the
2740 requested file, relative to repository root. It is fired by the
2738 commit function for every file in 'files', but calls order is
2741 commit function for every file in 'files', but calls order is
2739 undefined. If the file is available in the revision being
2742 undefined. If the file is available in the revision being
2740 committed (updated or added), filectxfn returns a memfilectx
2743 committed (updated or added), filectxfn returns a memfilectx
2741 object. If the file was removed, filectxfn return None for recent
2744 object. If the file was removed, filectxfn return None for recent
2742 Mercurial. Moved files are represented by marking the source file
2745 Mercurial. Moved files are represented by marking the source file
2743 removed and the new file added with copy information (see
2746 removed and the new file added with copy information (see
2744 memfilectx).
2747 memfilectx).
2745
2748
2746 user receives the committer name and defaults to current
2749 user receives the committer name and defaults to current
2747 repository username, date is the commit date in any format
2750 repository username, date is the commit date in any format
2748 supported by dateutil.parsedate() and defaults to current date, extra
2751 supported by dateutil.parsedate() and defaults to current date, extra
2749 is a dictionary of metadata or is left empty.
2752 is a dictionary of metadata or is left empty.
2750 """
2753 """
2751
2754
2752 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2755 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2753 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2756 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2754 # this field to determine what to do in filectxfn.
2757 # this field to determine what to do in filectxfn.
2755 _returnnoneformissingfiles = True
2758 _returnnoneformissingfiles = True
2756
2759
2757 def __init__(
2760 def __init__(
2758 self,
2761 self,
2759 repo,
2762 repo,
2760 parents,
2763 parents,
2761 text,
2764 text,
2762 files,
2765 files,
2763 filectxfn,
2766 filectxfn,
2764 user=None,
2767 user=None,
2765 date=None,
2768 date=None,
2766 extra=None,
2769 extra=None,
2767 branch=None,
2770 branch=None,
2768 editor=None,
2771 editor=None,
2769 ):
2772 ):
2770 super(memctx, self).__init__(
2773 super(memctx, self).__init__(
2771 repo, text, user, date, extra, branch=branch
2774 repo, text, user, date, extra, branch=branch
2772 )
2775 )
2773 self._rev = None
2776 self._rev = None
2774 self._node = None
2777 self._node = None
2775 parents = [(p or nullid) for p in parents]
2778 parents = [(p or nullid) for p in parents]
2776 p1, p2 = parents
2779 p1, p2 = parents
2777 self._parents = [self._repo[p] for p in (p1, p2)]
2780 self._parents = [self._repo[p] for p in (p1, p2)]
2778 files = sorted(set(files))
2781 files = sorted(set(files))
2779 self._files = files
2782 self._files = files
2780 self.substate = {}
2783 self.substate = {}
2781
2784
2782 if isinstance(filectxfn, patch.filestore):
2785 if isinstance(filectxfn, patch.filestore):
2783 filectxfn = memfilefrompatch(filectxfn)
2786 filectxfn = memfilefrompatch(filectxfn)
2784 elif not callable(filectxfn):
2787 elif not callable(filectxfn):
2785 # if store is not callable, wrap it in a function
2788 # if store is not callable, wrap it in a function
2786 filectxfn = memfilefromctx(filectxfn)
2789 filectxfn = memfilefromctx(filectxfn)
2787
2790
2788 # memoizing increases performance for e.g. vcs convert scenarios.
2791 # memoizing increases performance for e.g. vcs convert scenarios.
2789 self._filectxfn = makecachingfilectxfn(filectxfn)
2792 self._filectxfn = makecachingfilectxfn(filectxfn)
2790
2793
2791 if editor:
2794 if editor:
2792 self._text = editor(self._repo, self, [])
2795 self._text = editor(self._repo, self, [])
2793 self._repo.savecommitmessage(self._text)
2796 self._repo.savecommitmessage(self._text)
2794
2797
2795 def filectx(self, path, filelog=None):
2798 def filectx(self, path, filelog=None):
2796 """get a file context from the working directory
2799 """get a file context from the working directory
2797
2800
2798 Returns None if file doesn't exist and should be removed."""
2801 Returns None if file doesn't exist and should be removed."""
2799 return self._filectxfn(self._repo, self, path)
2802 return self._filectxfn(self._repo, self, path)
2800
2803
2801 def commit(self):
2804 def commit(self):
2802 """commit context to the repo"""
2805 """commit context to the repo"""
2803 return self._repo.commitctx(self)
2806 return self._repo.commitctx(self)
2804
2807
2805 @propertycache
2808 @propertycache
2806 def _manifest(self):
2809 def _manifest(self):
2807 """generate a manifest based on the return values of filectxfn"""
2810 """generate a manifest based on the return values of filectxfn"""
2808
2811
2809 # keep this simple for now; just worry about p1
2812 # keep this simple for now; just worry about p1
2810 pctx = self._parents[0]
2813 pctx = self._parents[0]
2811 man = pctx.manifest().copy()
2814 man = pctx.manifest().copy()
2812
2815
2813 for f in self._status.modified:
2816 for f in self._status.modified:
2814 man[f] = modifiednodeid
2817 man[f] = modifiednodeid
2815
2818
2816 for f in self._status.added:
2819 for f in self._status.added:
2817 man[f] = addednodeid
2820 man[f] = addednodeid
2818
2821
2819 for f in self._status.removed:
2822 for f in self._status.removed:
2820 if f in man:
2823 if f in man:
2821 del man[f]
2824 del man[f]
2822
2825
2823 return man
2826 return man
2824
2827
2825 @propertycache
2828 @propertycache
2826 def _status(self):
2829 def _status(self):
2827 """Calculate exact status from ``files`` specified at construction
2830 """Calculate exact status from ``files`` specified at construction
2828 """
2831 """
2829 man1 = self.p1().manifest()
2832 man1 = self.p1().manifest()
2830 p2 = self._parents[1]
2833 p2 = self._parents[1]
2831 # "1 < len(self._parents)" can't be used for checking
2834 # "1 < len(self._parents)" can't be used for checking
2832 # existence of the 2nd parent, because "memctx._parents" is
2835 # existence of the 2nd parent, because "memctx._parents" is
2833 # explicitly initialized by the list, of which length is 2.
2836 # explicitly initialized by the list, of which length is 2.
2834 if p2.node() != nullid:
2837 if p2.node() != nullid:
2835 man2 = p2.manifest()
2838 man2 = p2.manifest()
2836 managing = lambda f: f in man1 or f in man2
2839 managing = lambda f: f in man1 or f in man2
2837 else:
2840 else:
2838 managing = lambda f: f in man1
2841 managing = lambda f: f in man1
2839
2842
2840 modified, added, removed = [], [], []
2843 modified, added, removed = [], [], []
2841 for f in self._files:
2844 for f in self._files:
2842 if not managing(f):
2845 if not managing(f):
2843 added.append(f)
2846 added.append(f)
2844 elif self[f]:
2847 elif self[f]:
2845 modified.append(f)
2848 modified.append(f)
2846 else:
2849 else:
2847 removed.append(f)
2850 removed.append(f)
2848
2851
2849 return scmutil.status(modified, added, removed, [], [], [], [])
2852 return scmutil.status(modified, added, removed, [], [], [], [])
2850
2853
2851
2854
2852 class memfilectx(committablefilectx):
2855 class memfilectx(committablefilectx):
2853 """memfilectx represents an in-memory file to commit.
2856 """memfilectx represents an in-memory file to commit.
2854
2857
2855 See memctx and committablefilectx for more details.
2858 See memctx and committablefilectx for more details.
2856 """
2859 """
2857
2860
2858 def __init__(
2861 def __init__(
2859 self,
2862 self,
2860 repo,
2863 repo,
2861 changectx,
2864 changectx,
2862 path,
2865 path,
2863 data,
2866 data,
2864 islink=False,
2867 islink=False,
2865 isexec=False,
2868 isexec=False,
2866 copysource=None,
2869 copysource=None,
2867 ):
2870 ):
2868 """
2871 """
2869 path is the normalized file path relative to repository root.
2872 path is the normalized file path relative to repository root.
2870 data is the file content as a string.
2873 data is the file content as a string.
2871 islink is True if the file is a symbolic link.
2874 islink is True if the file is a symbolic link.
2872 isexec is True if the file is executable.
2875 isexec is True if the file is executable.
2873 copied is the source file path if current file was copied in the
2876 copied is the source file path if current file was copied in the
2874 revision being committed, or None."""
2877 revision being committed, or None."""
2875 super(memfilectx, self).__init__(repo, path, None, changectx)
2878 super(memfilectx, self).__init__(repo, path, None, changectx)
2876 self._data = data
2879 self._data = data
2877 if islink:
2880 if islink:
2878 self._flags = b'l'
2881 self._flags = b'l'
2879 elif isexec:
2882 elif isexec:
2880 self._flags = b'x'
2883 self._flags = b'x'
2881 else:
2884 else:
2882 self._flags = b''
2885 self._flags = b''
2883 self._copysource = copysource
2886 self._copysource = copysource
2884
2887
2885 def copysource(self):
2888 def copysource(self):
2886 return self._copysource
2889 return self._copysource
2887
2890
2888 def cmp(self, fctx):
2891 def cmp(self, fctx):
2889 return self.data() != fctx.data()
2892 return self.data() != fctx.data()
2890
2893
2891 def data(self):
2894 def data(self):
2892 return self._data
2895 return self._data
2893
2896
2894 def remove(self, ignoremissing=False):
2897 def remove(self, ignoremissing=False):
2895 """wraps unlink for a repo's working directory"""
2898 """wraps unlink for a repo's working directory"""
2896 # need to figure out what to do here
2899 # need to figure out what to do here
2897 del self._changectx[self._path]
2900 del self._changectx[self._path]
2898
2901
2899 def write(self, data, flags, **kwargs):
2902 def write(self, data, flags, **kwargs):
2900 """wraps repo.wwrite"""
2903 """wraps repo.wwrite"""
2901 self._data = data
2904 self._data = data
2902
2905
2903
2906
2904 class metadataonlyctx(committablectx):
2907 class metadataonlyctx(committablectx):
2905 """Like memctx but it's reusing the manifest of different commit.
2908 """Like memctx but it's reusing the manifest of different commit.
2906 Intended to be used by lightweight operations that are creating
2909 Intended to be used by lightweight operations that are creating
2907 metadata-only changes.
2910 metadata-only changes.
2908
2911
2909 Revision information is supplied at initialization time. 'repo' is the
2912 Revision information is supplied at initialization time. 'repo' is the
2910 current localrepo, 'ctx' is original revision which manifest we're reuisng
2913 current localrepo, 'ctx' is original revision which manifest we're reuisng
2911 'parents' is a sequence of two parent revisions identifiers (pass None for
2914 'parents' is a sequence of two parent revisions identifiers (pass None for
2912 every missing parent), 'text' is the commit.
2915 every missing parent), 'text' is the commit.
2913
2916
2914 user receives the committer name and defaults to current repository
2917 user receives the committer name and defaults to current repository
2915 username, date is the commit date in any format supported by
2918 username, date is the commit date in any format supported by
2916 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2919 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2917 metadata or is left empty.
2920 metadata or is left empty.
2918 """
2921 """
2919
2922
2920 def __init__(
2923 def __init__(
2921 self,
2924 self,
2922 repo,
2925 repo,
2923 originalctx,
2926 originalctx,
2924 parents=None,
2927 parents=None,
2925 text=None,
2928 text=None,
2926 user=None,
2929 user=None,
2927 date=None,
2930 date=None,
2928 extra=None,
2931 extra=None,
2929 editor=None,
2932 editor=None,
2930 ):
2933 ):
2931 if text is None:
2934 if text is None:
2932 text = originalctx.description()
2935 text = originalctx.description()
2933 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2936 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2934 self._rev = None
2937 self._rev = None
2935 self._node = None
2938 self._node = None
2936 self._originalctx = originalctx
2939 self._originalctx = originalctx
2937 self._manifestnode = originalctx.manifestnode()
2940 self._manifestnode = originalctx.manifestnode()
2938 if parents is None:
2941 if parents is None:
2939 parents = originalctx.parents()
2942 parents = originalctx.parents()
2940 else:
2943 else:
2941 parents = [repo[p] for p in parents if p is not None]
2944 parents = [repo[p] for p in parents if p is not None]
2942 parents = parents[:]
2945 parents = parents[:]
2943 while len(parents) < 2:
2946 while len(parents) < 2:
2944 parents.append(repo[nullid])
2947 parents.append(repo[nullid])
2945 p1, p2 = self._parents = parents
2948 p1, p2 = self._parents = parents
2946
2949
2947 # sanity check to ensure that the reused manifest parents are
2950 # sanity check to ensure that the reused manifest parents are
2948 # manifests of our commit parents
2951 # manifests of our commit parents
2949 mp1, mp2 = self.manifestctx().parents
2952 mp1, mp2 = self.manifestctx().parents
2950 if p1 != nullid and p1.manifestnode() != mp1:
2953 if p1 != nullid and p1.manifestnode() != mp1:
2951 raise RuntimeError(
2954 raise RuntimeError(
2952 r"can't reuse the manifest: its p1 "
2955 r"can't reuse the manifest: its p1 "
2953 r"doesn't match the new ctx p1"
2956 r"doesn't match the new ctx p1"
2954 )
2957 )
2955 if p2 != nullid and p2.manifestnode() != mp2:
2958 if p2 != nullid and p2.manifestnode() != mp2:
2956 raise RuntimeError(
2959 raise RuntimeError(
2957 r"can't reuse the manifest: "
2960 r"can't reuse the manifest: "
2958 r"its p2 doesn't match the new ctx p2"
2961 r"its p2 doesn't match the new ctx p2"
2959 )
2962 )
2960
2963
2961 self._files = originalctx.files()
2964 self._files = originalctx.files()
2962 self.substate = {}
2965 self.substate = {}
2963
2966
2964 if editor:
2967 if editor:
2965 self._text = editor(self._repo, self, [])
2968 self._text = editor(self._repo, self, [])
2966 self._repo.savecommitmessage(self._text)
2969 self._repo.savecommitmessage(self._text)
2967
2970
2968 def manifestnode(self):
2971 def manifestnode(self):
2969 return self._manifestnode
2972 return self._manifestnode
2970
2973
2971 @property
2974 @property
2972 def _manifestctx(self):
2975 def _manifestctx(self):
2973 return self._repo.manifestlog[self._manifestnode]
2976 return self._repo.manifestlog[self._manifestnode]
2974
2977
2975 def filectx(self, path, filelog=None):
2978 def filectx(self, path, filelog=None):
2976 return self._originalctx.filectx(path, filelog=filelog)
2979 return self._originalctx.filectx(path, filelog=filelog)
2977
2980
2978 def commit(self):
2981 def commit(self):
2979 """commit context to the repo"""
2982 """commit context to the repo"""
2980 return self._repo.commitctx(self)
2983 return self._repo.commitctx(self)
2981
2984
2982 @property
2985 @property
2983 def _manifest(self):
2986 def _manifest(self):
2984 return self._originalctx.manifest()
2987 return self._originalctx.manifest()
2985
2988
2986 @propertycache
2989 @propertycache
2987 def _status(self):
2990 def _status(self):
2988 """Calculate exact status from ``files`` specified in the ``origctx``
2991 """Calculate exact status from ``files`` specified in the ``origctx``
2989 and parents manifests.
2992 and parents manifests.
2990 """
2993 """
2991 man1 = self.p1().manifest()
2994 man1 = self.p1().manifest()
2992 p2 = self._parents[1]
2995 p2 = self._parents[1]
2993 # "1 < len(self._parents)" can't be used for checking
2996 # "1 < len(self._parents)" can't be used for checking
2994 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2997 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2995 # explicitly initialized by the list, of which length is 2.
2998 # explicitly initialized by the list, of which length is 2.
2996 if p2.node() != nullid:
2999 if p2.node() != nullid:
2997 man2 = p2.manifest()
3000 man2 = p2.manifest()
2998 managing = lambda f: f in man1 or f in man2
3001 managing = lambda f: f in man1 or f in man2
2999 else:
3002 else:
3000 managing = lambda f: f in man1
3003 managing = lambda f: f in man1
3001
3004
3002 modified, added, removed = [], [], []
3005 modified, added, removed = [], [], []
3003 for f in self._files:
3006 for f in self._files:
3004 if not managing(f):
3007 if not managing(f):
3005 added.append(f)
3008 added.append(f)
3006 elif f in self:
3009 elif f in self:
3007 modified.append(f)
3010 modified.append(f)
3008 else:
3011 else:
3009 removed.append(f)
3012 removed.append(f)
3010
3013
3011 return scmutil.status(modified, added, removed, [], [], [], [])
3014 return scmutil.status(modified, added, removed, [], [], [], [])
3012
3015
3013
3016
3014 class arbitraryfilectx(object):
3017 class arbitraryfilectx(object):
3015 """Allows you to use filectx-like functions on a file in an arbitrary
3018 """Allows you to use filectx-like functions on a file in an arbitrary
3016 location on disk, possibly not in the working directory.
3019 location on disk, possibly not in the working directory.
3017 """
3020 """
3018
3021
3019 def __init__(self, path, repo=None):
3022 def __init__(self, path, repo=None):
3020 # Repo is optional because contrib/simplemerge uses this class.
3023 # Repo is optional because contrib/simplemerge uses this class.
3021 self._repo = repo
3024 self._repo = repo
3022 self._path = path
3025 self._path = path
3023
3026
3024 def cmp(self, fctx):
3027 def cmp(self, fctx):
3025 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3028 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3026 # path if either side is a symlink.
3029 # path if either side is a symlink.
3027 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3030 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3028 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3031 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3029 # Add a fast-path for merge if both sides are disk-backed.
3032 # Add a fast-path for merge if both sides are disk-backed.
3030 # Note that filecmp uses the opposite return values (True if same)
3033 # Note that filecmp uses the opposite return values (True if same)
3031 # from our cmp functions (True if different).
3034 # from our cmp functions (True if different).
3032 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3035 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3033 return self.data() != fctx.data()
3036 return self.data() != fctx.data()
3034
3037
3035 def path(self):
3038 def path(self):
3036 return self._path
3039 return self._path
3037
3040
3038 def flags(self):
3041 def flags(self):
3039 return b''
3042 return b''
3040
3043
3041 def data(self):
3044 def data(self):
3042 return util.readfile(self._path)
3045 return util.readfile(self._path)
3043
3046
3044 def decodeddata(self):
3047 def decodeddata(self):
3045 with open(self._path, b"rb") as f:
3048 with open(self._path, b"rb") as f:
3046 return f.read()
3049 return f.read()
3047
3050
3048 def remove(self):
3051 def remove(self):
3049 util.unlink(self._path)
3052 util.unlink(self._path)
3050
3053
3051 def write(self, data, flags, **kwargs):
3054 def write(self, data, flags, **kwargs):
3052 assert not flags
3055 assert not flags
3053 with open(self._path, b"wb") as f:
3056 with open(self._path, b"wb") as f:
3054 f.write(data)
3057 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now