##// END OF EJS Templates
overlwayworkingctx: remove doubly bad reference to wrapped ctx for copies...
Martin von Zweigbergk -
r44491:f652b7dd default
parent child Browse files
Show More
@@ -1,3027 +1,3027 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, cwd, expr, badfn=None):
203 def matchfileset(self, cwd, expr, badfn=None):
204 return fileset.match(self, cwd, expr, badfn=badfn)
204 return fileset.match(self, cwd, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 cwd=None,
331 cwd=None,
332 ):
332 ):
333 r = self._repo
333 r = self._repo
334 if not cwd:
334 if not cwd:
335 cwd = r.getcwd()
335 cwd = r.getcwd()
336 return matchmod.match(
336 return matchmod.match(
337 r.root,
337 r.root,
338 cwd,
338 cwd,
339 pats,
339 pats,
340 include,
340 include,
341 exclude,
341 exclude,
342 default,
342 default,
343 auditor=r.nofsauditor,
343 auditor=r.nofsauditor,
344 ctx=self,
344 ctx=self,
345 listsubrepos=listsubrepos,
345 listsubrepos=listsubrepos,
346 badfn=badfn,
346 badfn=badfn,
347 )
347 )
348
348
349 def diff(
349 def diff(
350 self,
350 self,
351 ctx2=None,
351 ctx2=None,
352 match=None,
352 match=None,
353 changes=None,
353 changes=None,
354 opts=None,
354 opts=None,
355 losedatafn=None,
355 losedatafn=None,
356 pathfn=None,
356 pathfn=None,
357 copy=None,
357 copy=None,
358 copysourcematch=None,
358 copysourcematch=None,
359 hunksfilterfn=None,
359 hunksfilterfn=None,
360 ):
360 ):
361 """Returns a diff generator for the given contexts and matcher"""
361 """Returns a diff generator for the given contexts and matcher"""
362 if ctx2 is None:
362 if ctx2 is None:
363 ctx2 = self.p1()
363 ctx2 = self.p1()
364 if ctx2 is not None:
364 if ctx2 is not None:
365 ctx2 = self._repo[ctx2]
365 ctx2 = self._repo[ctx2]
366 return patch.diff(
366 return patch.diff(
367 self._repo,
367 self._repo,
368 ctx2,
368 ctx2,
369 self,
369 self,
370 match=match,
370 match=match,
371 changes=changes,
371 changes=changes,
372 opts=opts,
372 opts=opts,
373 losedatafn=losedatafn,
373 losedatafn=losedatafn,
374 pathfn=pathfn,
374 pathfn=pathfn,
375 copy=copy,
375 copy=copy,
376 copysourcematch=copysourcematch,
376 copysourcematch=copysourcematch,
377 hunksfilterfn=hunksfilterfn,
377 hunksfilterfn=hunksfilterfn,
378 )
378 )
379
379
380 def dirs(self):
380 def dirs(self):
381 return self._manifest.dirs()
381 return self._manifest.dirs()
382
382
383 def hasdir(self, dir):
383 def hasdir(self, dir):
384 return self._manifest.hasdir(dir)
384 return self._manifest.hasdir(dir)
385
385
386 def status(
386 def status(
387 self,
387 self,
388 other=None,
388 other=None,
389 match=None,
389 match=None,
390 listignored=False,
390 listignored=False,
391 listclean=False,
391 listclean=False,
392 listunknown=False,
392 listunknown=False,
393 listsubrepos=False,
393 listsubrepos=False,
394 ):
394 ):
395 """return status of files between two nodes or node and working
395 """return status of files between two nodes or node and working
396 directory.
396 directory.
397
397
398 If other is None, compare this node with working directory.
398 If other is None, compare this node with working directory.
399
399
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
400 returns (modified, added, removed, deleted, unknown, ignored, clean)
401 """
401 """
402
402
403 ctx1 = self
403 ctx1 = self
404 ctx2 = self._repo[other]
404 ctx2 = self._repo[other]
405
405
406 # This next code block is, admittedly, fragile logic that tests for
406 # This next code block is, admittedly, fragile logic that tests for
407 # reversing the contexts and wouldn't need to exist if it weren't for
407 # reversing the contexts and wouldn't need to exist if it weren't for
408 # the fast (and common) code path of comparing the working directory
408 # the fast (and common) code path of comparing the working directory
409 # with its first parent.
409 # with its first parent.
410 #
410 #
411 # What we're aiming for here is the ability to call:
411 # What we're aiming for here is the ability to call:
412 #
412 #
413 # workingctx.status(parentctx)
413 # workingctx.status(parentctx)
414 #
414 #
415 # If we always built the manifest for each context and compared those,
415 # If we always built the manifest for each context and compared those,
416 # then we'd be done. But the special case of the above call means we
416 # then we'd be done. But the special case of the above call means we
417 # just copy the manifest of the parent.
417 # just copy the manifest of the parent.
418 reversed = False
418 reversed = False
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
419 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
420 reversed = True
420 reversed = True
421 ctx1, ctx2 = ctx2, ctx1
421 ctx1, ctx2 = ctx2, ctx1
422
422
423 match = self._repo.narrowmatch(match)
423 match = self._repo.narrowmatch(match)
424 match = ctx2._matchstatus(ctx1, match)
424 match = ctx2._matchstatus(ctx1, match)
425 r = scmutil.status([], [], [], [], [], [], [])
425 r = scmutil.status([], [], [], [], [], [], [])
426 r = ctx2._buildstatus(
426 r = ctx2._buildstatus(
427 ctx1, r, match, listignored, listclean, listunknown
427 ctx1, r, match, listignored, listclean, listunknown
428 )
428 )
429
429
430 if reversed:
430 if reversed:
431 # Reverse added and removed. Clear deleted, unknown and ignored as
431 # Reverse added and removed. Clear deleted, unknown and ignored as
432 # these make no sense to reverse.
432 # these make no sense to reverse.
433 r = scmutil.status(
433 r = scmutil.status(
434 r.modified, r.removed, r.added, [], [], [], r.clean
434 r.modified, r.removed, r.added, [], [], [], r.clean
435 )
435 )
436
436
437 if listsubrepos:
437 if listsubrepos:
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
438 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
439 try:
439 try:
440 rev2 = ctx2.subrev(subpath)
440 rev2 = ctx2.subrev(subpath)
441 except KeyError:
441 except KeyError:
442 # A subrepo that existed in node1 was deleted between
442 # A subrepo that existed in node1 was deleted between
443 # node1 and node2 (inclusive). Thus, ctx2's substate
443 # node1 and node2 (inclusive). Thus, ctx2's substate
444 # won't contain that subpath. The best we can do ignore it.
444 # won't contain that subpath. The best we can do ignore it.
445 rev2 = None
445 rev2 = None
446 submatch = matchmod.subdirmatcher(subpath, match)
446 submatch = matchmod.subdirmatcher(subpath, match)
447 s = sub.status(
447 s = sub.status(
448 rev2,
448 rev2,
449 match=submatch,
449 match=submatch,
450 ignored=listignored,
450 ignored=listignored,
451 clean=listclean,
451 clean=listclean,
452 unknown=listunknown,
452 unknown=listunknown,
453 listsubrepos=True,
453 listsubrepos=True,
454 )
454 )
455 for k in (
455 for k in (
456 'modified',
456 'modified',
457 'added',
457 'added',
458 'removed',
458 'removed',
459 'deleted',
459 'deleted',
460 'unknown',
460 'unknown',
461 'ignored',
461 'ignored',
462 'clean',
462 'clean',
463 ):
463 ):
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
464 rfiles, sfiles = getattr(r, k), getattr(s, k)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
465 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
466
466
467 r.modified.sort()
467 r.modified.sort()
468 r.added.sort()
468 r.added.sort()
469 r.removed.sort()
469 r.removed.sort()
470 r.deleted.sort()
470 r.deleted.sort()
471 r.unknown.sort()
471 r.unknown.sort()
472 r.ignored.sort()
472 r.ignored.sort()
473 r.clean.sort()
473 r.clean.sort()
474
474
475 return r
475 return r
476
476
477
477
478 class changectx(basectx):
478 class changectx(basectx):
479 """A changecontext object makes access to data related to a particular
479 """A changecontext object makes access to data related to a particular
480 changeset convenient. It represents a read-only context already present in
480 changeset convenient. It represents a read-only context already present in
481 the repo."""
481 the repo."""
482
482
483 def __init__(self, repo, rev, node, maybe_filtered=True):
483 def __init__(self, repo, rev, node, maybe_filtered=True):
484 super(changectx, self).__init__(repo)
484 super(changectx, self).__init__(repo)
485 self._rev = rev
485 self._rev = rev
486 self._node = node
486 self._node = node
487 # When maybe_filtered is True, the revision might be affected by
487 # When maybe_filtered is True, the revision might be affected by
488 # changelog filtering and operation through the filtered changelog must be used.
488 # changelog filtering and operation through the filtered changelog must be used.
489 #
489 #
490 # When maybe_filtered is False, the revision has already been checked
490 # When maybe_filtered is False, the revision has already been checked
491 # against filtering and is not filtered. Operation through the
491 # against filtering and is not filtered. Operation through the
492 # unfiltered changelog might be used in some case.
492 # unfiltered changelog might be used in some case.
493 self._maybe_filtered = maybe_filtered
493 self._maybe_filtered = maybe_filtered
494
494
495 def __hash__(self):
495 def __hash__(self):
496 try:
496 try:
497 return hash(self._rev)
497 return hash(self._rev)
498 except AttributeError:
498 except AttributeError:
499 return id(self)
499 return id(self)
500
500
501 def __nonzero__(self):
501 def __nonzero__(self):
502 return self._rev != nullrev
502 return self._rev != nullrev
503
503
504 __bool__ = __nonzero__
504 __bool__ = __nonzero__
505
505
506 @propertycache
506 @propertycache
507 def _changeset(self):
507 def _changeset(self):
508 if self._maybe_filtered:
508 if self._maybe_filtered:
509 repo = self._repo
509 repo = self._repo
510 else:
510 else:
511 repo = self._repo.unfiltered()
511 repo = self._repo.unfiltered()
512 return repo.changelog.changelogrevision(self.rev())
512 return repo.changelog.changelogrevision(self.rev())
513
513
514 @propertycache
514 @propertycache
515 def _manifest(self):
515 def _manifest(self):
516 return self._manifestctx.read()
516 return self._manifestctx.read()
517
517
518 @property
518 @property
519 def _manifestctx(self):
519 def _manifestctx(self):
520 return self._repo.manifestlog[self._changeset.manifest]
520 return self._repo.manifestlog[self._changeset.manifest]
521
521
522 @propertycache
522 @propertycache
523 def _manifestdelta(self):
523 def _manifestdelta(self):
524 return self._manifestctx.readdelta()
524 return self._manifestctx.readdelta()
525
525
526 @propertycache
526 @propertycache
527 def _parents(self):
527 def _parents(self):
528 repo = self._repo
528 repo = self._repo
529 if self._maybe_filtered:
529 if self._maybe_filtered:
530 cl = repo.changelog
530 cl = repo.changelog
531 else:
531 else:
532 cl = repo.unfiltered().changelog
532 cl = repo.unfiltered().changelog
533
533
534 p1, p2 = cl.parentrevs(self._rev)
534 p1, p2 = cl.parentrevs(self._rev)
535 if p2 == nullrev:
535 if p2 == nullrev:
536 return [repo[p1]]
536 return [repo[p1]]
537 return [repo[p1], repo[p2]]
537 return [repo[p1], repo[p2]]
538
538
539 def changeset(self):
539 def changeset(self):
540 c = self._changeset
540 c = self._changeset
541 return (
541 return (
542 c.manifest,
542 c.manifest,
543 c.user,
543 c.user,
544 c.date,
544 c.date,
545 c.files,
545 c.files,
546 c.description,
546 c.description,
547 c.extra,
547 c.extra,
548 )
548 )
549
549
550 def manifestnode(self):
550 def manifestnode(self):
551 return self._changeset.manifest
551 return self._changeset.manifest
552
552
553 def user(self):
553 def user(self):
554 return self._changeset.user
554 return self._changeset.user
555
555
556 def date(self):
556 def date(self):
557 return self._changeset.date
557 return self._changeset.date
558
558
559 def files(self):
559 def files(self):
560 return self._changeset.files
560 return self._changeset.files
561
561
562 def filesmodified(self):
562 def filesmodified(self):
563 modified = set(self.files())
563 modified = set(self.files())
564 modified.difference_update(self.filesadded())
564 modified.difference_update(self.filesadded())
565 modified.difference_update(self.filesremoved())
565 modified.difference_update(self.filesremoved())
566 return sorted(modified)
566 return sorted(modified)
567
567
568 def filesadded(self):
568 def filesadded(self):
569 filesadded = self._changeset.filesadded
569 filesadded = self._changeset.filesadded
570 compute_on_none = True
570 compute_on_none = True
571 if self._repo.filecopiesmode == b'changeset-sidedata':
571 if self._repo.filecopiesmode == b'changeset-sidedata':
572 compute_on_none = False
572 compute_on_none = False
573 else:
573 else:
574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
574 source = self._repo.ui.config(b'experimental', b'copies.read-from')
575 if source == b'changeset-only':
575 if source == b'changeset-only':
576 compute_on_none = False
576 compute_on_none = False
577 elif source != b'compatibility':
577 elif source != b'compatibility':
578 # filelog mode, ignore any changelog content
578 # filelog mode, ignore any changelog content
579 filesadded = None
579 filesadded = None
580 if filesadded is None:
580 if filesadded is None:
581 if compute_on_none:
581 if compute_on_none:
582 filesadded = copies.computechangesetfilesadded(self)
582 filesadded = copies.computechangesetfilesadded(self)
583 else:
583 else:
584 filesadded = []
584 filesadded = []
585 return filesadded
585 return filesadded
586
586
587 def filesremoved(self):
587 def filesremoved(self):
588 filesremoved = self._changeset.filesremoved
588 filesremoved = self._changeset.filesremoved
589 compute_on_none = True
589 compute_on_none = True
590 if self._repo.filecopiesmode == b'changeset-sidedata':
590 if self._repo.filecopiesmode == b'changeset-sidedata':
591 compute_on_none = False
591 compute_on_none = False
592 else:
592 else:
593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
593 source = self._repo.ui.config(b'experimental', b'copies.read-from')
594 if source == b'changeset-only':
594 if source == b'changeset-only':
595 compute_on_none = False
595 compute_on_none = False
596 elif source != b'compatibility':
596 elif source != b'compatibility':
597 # filelog mode, ignore any changelog content
597 # filelog mode, ignore any changelog content
598 filesremoved = None
598 filesremoved = None
599 if filesremoved is None:
599 if filesremoved is None:
600 if compute_on_none:
600 if compute_on_none:
601 filesremoved = copies.computechangesetfilesremoved(self)
601 filesremoved = copies.computechangesetfilesremoved(self)
602 else:
602 else:
603 filesremoved = []
603 filesremoved = []
604 return filesremoved
604 return filesremoved
605
605
606 @propertycache
606 @propertycache
607 def _copies(self):
607 def _copies(self):
608 p1copies = self._changeset.p1copies
608 p1copies = self._changeset.p1copies
609 p2copies = self._changeset.p2copies
609 p2copies = self._changeset.p2copies
610 compute_on_none = True
610 compute_on_none = True
611 if self._repo.filecopiesmode == b'changeset-sidedata':
611 if self._repo.filecopiesmode == b'changeset-sidedata':
612 compute_on_none = False
612 compute_on_none = False
613 else:
613 else:
614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
614 source = self._repo.ui.config(b'experimental', b'copies.read-from')
615 # If config says to get copy metadata only from changeset, then
615 # If config says to get copy metadata only from changeset, then
616 # return that, defaulting to {} if there was no copy metadata. In
616 # return that, defaulting to {} if there was no copy metadata. In
617 # compatibility mode, we return copy data from the changeset if it
617 # compatibility mode, we return copy data from the changeset if it
618 # was recorded there, and otherwise we fall back to getting it from
618 # was recorded there, and otherwise we fall back to getting it from
619 # the filelogs (below).
619 # the filelogs (below).
620 #
620 #
621 # If we are in compatiblity mode and there is not data in the
621 # If we are in compatiblity mode and there is not data in the
622 # changeset), we get the copy metadata from the filelogs.
622 # changeset), we get the copy metadata from the filelogs.
623 #
623 #
624 # otherwise, when config said to read only from filelog, we get the
624 # otherwise, when config said to read only from filelog, we get the
625 # copy metadata from the filelogs.
625 # copy metadata from the filelogs.
626 if source == b'changeset-only':
626 if source == b'changeset-only':
627 compute_on_none = False
627 compute_on_none = False
628 elif source != b'compatibility':
628 elif source != b'compatibility':
629 # filelog mode, ignore any changelog content
629 # filelog mode, ignore any changelog content
630 p1copies = p2copies = None
630 p1copies = p2copies = None
631 if p1copies is None:
631 if p1copies is None:
632 if compute_on_none:
632 if compute_on_none:
633 p1copies, p2copies = super(changectx, self)._copies
633 p1copies, p2copies = super(changectx, self)._copies
634 else:
634 else:
635 if p1copies is None:
635 if p1copies is None:
636 p1copies = {}
636 p1copies = {}
637 if p2copies is None:
637 if p2copies is None:
638 p2copies = {}
638 p2copies = {}
639 return p1copies, p2copies
639 return p1copies, p2copies
640
640
641 def description(self):
641 def description(self):
642 return self._changeset.description
642 return self._changeset.description
643
643
644 def branch(self):
644 def branch(self):
645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
645 return encoding.tolocal(self._changeset.extra.get(b"branch"))
646
646
647 def closesbranch(self):
647 def closesbranch(self):
648 return b'close' in self._changeset.extra
648 return b'close' in self._changeset.extra
649
649
650 def extra(self):
650 def extra(self):
651 """Return a dict of extra information."""
651 """Return a dict of extra information."""
652 return self._changeset.extra
652 return self._changeset.extra
653
653
654 def tags(self):
654 def tags(self):
655 """Return a list of byte tag names"""
655 """Return a list of byte tag names"""
656 return self._repo.nodetags(self._node)
656 return self._repo.nodetags(self._node)
657
657
658 def bookmarks(self):
658 def bookmarks(self):
659 """Return a list of byte bookmark names."""
659 """Return a list of byte bookmark names."""
660 return self._repo.nodebookmarks(self._node)
660 return self._repo.nodebookmarks(self._node)
661
661
662 def phase(self):
662 def phase(self):
663 return self._repo._phasecache.phase(self._repo, self._rev)
663 return self._repo._phasecache.phase(self._repo, self._rev)
664
664
665 def hidden(self):
665 def hidden(self):
666 return self._rev in repoview.filterrevs(self._repo, b'visible')
666 return self._rev in repoview.filterrevs(self._repo, b'visible')
667
667
668 def isinmemory(self):
668 def isinmemory(self):
669 return False
669 return False
670
670
671 def children(self):
671 def children(self):
672 """return list of changectx contexts for each child changeset.
672 """return list of changectx contexts for each child changeset.
673
673
674 This returns only the immediate child changesets. Use descendants() to
674 This returns only the immediate child changesets. Use descendants() to
675 recursively walk children.
675 recursively walk children.
676 """
676 """
677 c = self._repo.changelog.children(self._node)
677 c = self._repo.changelog.children(self._node)
678 return [self._repo[x] for x in c]
678 return [self._repo[x] for x in c]
679
679
680 def ancestors(self):
680 def ancestors(self):
681 for a in self._repo.changelog.ancestors([self._rev]):
681 for a in self._repo.changelog.ancestors([self._rev]):
682 yield self._repo[a]
682 yield self._repo[a]
683
683
684 def descendants(self):
684 def descendants(self):
685 """Recursively yield all children of the changeset.
685 """Recursively yield all children of the changeset.
686
686
687 For just the immediate children, use children()
687 For just the immediate children, use children()
688 """
688 """
689 for d in self._repo.changelog.descendants([self._rev]):
689 for d in self._repo.changelog.descendants([self._rev]):
690 yield self._repo[d]
690 yield self._repo[d]
691
691
692 def filectx(self, path, fileid=None, filelog=None):
692 def filectx(self, path, fileid=None, filelog=None):
693 """get a file context from this changeset"""
693 """get a file context from this changeset"""
694 if fileid is None:
694 if fileid is None:
695 fileid = self.filenode(path)
695 fileid = self.filenode(path)
696 return filectx(
696 return filectx(
697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
697 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
698 )
698 )
699
699
700 def ancestor(self, c2, warn=False):
700 def ancestor(self, c2, warn=False):
701 """return the "best" ancestor context of self and c2
701 """return the "best" ancestor context of self and c2
702
702
703 If there are multiple candidates, it will show a message and check
703 If there are multiple candidates, it will show a message and check
704 merge.preferancestor configuration before falling back to the
704 merge.preferancestor configuration before falling back to the
705 revlog ancestor."""
705 revlog ancestor."""
706 # deal with workingctxs
706 # deal with workingctxs
707 n2 = c2._node
707 n2 = c2._node
708 if n2 is None:
708 if n2 is None:
709 n2 = c2._parents[0]._node
709 n2 = c2._parents[0]._node
710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
710 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
711 if not cahs:
711 if not cahs:
712 anc = nullid
712 anc = nullid
713 elif len(cahs) == 1:
713 elif len(cahs) == 1:
714 anc = cahs[0]
714 anc = cahs[0]
715 else:
715 else:
716 # experimental config: merge.preferancestor
716 # experimental config: merge.preferancestor
717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
717 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
718 try:
718 try:
719 ctx = scmutil.revsymbol(self._repo, r)
719 ctx = scmutil.revsymbol(self._repo, r)
720 except error.RepoLookupError:
720 except error.RepoLookupError:
721 continue
721 continue
722 anc = ctx.node()
722 anc = ctx.node()
723 if anc in cahs:
723 if anc in cahs:
724 break
724 break
725 else:
725 else:
726 anc = self._repo.changelog.ancestor(self._node, n2)
726 anc = self._repo.changelog.ancestor(self._node, n2)
727 if warn:
727 if warn:
728 self._repo.ui.status(
728 self._repo.ui.status(
729 (
729 (
730 _(b"note: using %s as ancestor of %s and %s\n")
730 _(b"note: using %s as ancestor of %s and %s\n")
731 % (short(anc), short(self._node), short(n2))
731 % (short(anc), short(self._node), short(n2))
732 )
732 )
733 + b''.join(
733 + b''.join(
734 _(
734 _(
735 b" alternatively, use --config "
735 b" alternatively, use --config "
736 b"merge.preferancestor=%s\n"
736 b"merge.preferancestor=%s\n"
737 )
737 )
738 % short(n)
738 % short(n)
739 for n in sorted(cahs)
739 for n in sorted(cahs)
740 if n != anc
740 if n != anc
741 )
741 )
742 )
742 )
743 return self._repo[anc]
743 return self._repo[anc]
744
744
745 def isancestorof(self, other):
745 def isancestorof(self, other):
746 """True if this changeset is an ancestor of other"""
746 """True if this changeset is an ancestor of other"""
747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
747 return self._repo.changelog.isancestorrev(self._rev, other._rev)
748
748
749 def walk(self, match):
749 def walk(self, match):
750 '''Generates matching file names.'''
750 '''Generates matching file names.'''
751
751
752 # Wrap match.bad method to have message with nodeid
752 # Wrap match.bad method to have message with nodeid
753 def bad(fn, msg):
753 def bad(fn, msg):
754 # The manifest doesn't know about subrepos, so don't complain about
754 # The manifest doesn't know about subrepos, so don't complain about
755 # paths into valid subrepos.
755 # paths into valid subrepos.
756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
756 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
757 return
757 return
758 match.bad(fn, _(b'no such file in rev %s') % self)
758 match.bad(fn, _(b'no such file in rev %s') % self)
759
759
760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
760 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
761 return self._manifest.walk(m)
761 return self._manifest.walk(m)
762
762
763 def matches(self, match):
763 def matches(self, match):
764 return self.walk(match)
764 return self.walk(match)
765
765
766
766
767 class basefilectx(object):
767 class basefilectx(object):
768 """A filecontext object represents the common logic for its children:
768 """A filecontext object represents the common logic for its children:
769 filectx: read-only access to a filerevision that is already present
769 filectx: read-only access to a filerevision that is already present
770 in the repo,
770 in the repo,
771 workingfilectx: a filecontext that represents files from the working
771 workingfilectx: a filecontext that represents files from the working
772 directory,
772 directory,
773 memfilectx: a filecontext that represents files in-memory,
773 memfilectx: a filecontext that represents files in-memory,
774 """
774 """
775
775
776 @propertycache
776 @propertycache
777 def _filelog(self):
777 def _filelog(self):
778 return self._repo.file(self._path)
778 return self._repo.file(self._path)
779
779
780 @propertycache
780 @propertycache
781 def _changeid(self):
781 def _changeid(self):
782 if '_changectx' in self.__dict__:
782 if '_changectx' in self.__dict__:
783 return self._changectx.rev()
783 return self._changectx.rev()
784 elif '_descendantrev' in self.__dict__:
784 elif '_descendantrev' in self.__dict__:
785 # this file context was created from a revision with a known
785 # this file context was created from a revision with a known
786 # descendant, we can (lazily) correct for linkrev aliases
786 # descendant, we can (lazily) correct for linkrev aliases
787 return self._adjustlinkrev(self._descendantrev)
787 return self._adjustlinkrev(self._descendantrev)
788 else:
788 else:
789 return self._filelog.linkrev(self._filerev)
789 return self._filelog.linkrev(self._filerev)
790
790
791 @propertycache
791 @propertycache
792 def _filenode(self):
792 def _filenode(self):
793 if '_fileid' in self.__dict__:
793 if '_fileid' in self.__dict__:
794 return self._filelog.lookup(self._fileid)
794 return self._filelog.lookup(self._fileid)
795 else:
795 else:
796 return self._changectx.filenode(self._path)
796 return self._changectx.filenode(self._path)
797
797
798 @propertycache
798 @propertycache
799 def _filerev(self):
799 def _filerev(self):
800 return self._filelog.rev(self._filenode)
800 return self._filelog.rev(self._filenode)
801
801
802 @propertycache
802 @propertycache
803 def _repopath(self):
803 def _repopath(self):
804 return self._path
804 return self._path
805
805
806 def __nonzero__(self):
806 def __nonzero__(self):
807 try:
807 try:
808 self._filenode
808 self._filenode
809 return True
809 return True
810 except error.LookupError:
810 except error.LookupError:
811 # file is missing
811 # file is missing
812 return False
812 return False
813
813
814 __bool__ = __nonzero__
814 __bool__ = __nonzero__
815
815
816 def __bytes__(self):
816 def __bytes__(self):
817 try:
817 try:
818 return b"%s@%s" % (self.path(), self._changectx)
818 return b"%s@%s" % (self.path(), self._changectx)
819 except error.LookupError:
819 except error.LookupError:
820 return b"%s@???" % self.path()
820 return b"%s@???" % self.path()
821
821
822 __str__ = encoding.strmethod(__bytes__)
822 __str__ = encoding.strmethod(__bytes__)
823
823
824 def __repr__(self):
824 def __repr__(self):
825 return "<%s %s>" % (type(self).__name__, str(self))
825 return "<%s %s>" % (type(self).__name__, str(self))
826
826
827 def __hash__(self):
827 def __hash__(self):
828 try:
828 try:
829 return hash((self._path, self._filenode))
829 return hash((self._path, self._filenode))
830 except AttributeError:
830 except AttributeError:
831 return id(self)
831 return id(self)
832
832
833 def __eq__(self, other):
833 def __eq__(self, other):
834 try:
834 try:
835 return (
835 return (
836 type(self) == type(other)
836 type(self) == type(other)
837 and self._path == other._path
837 and self._path == other._path
838 and self._filenode == other._filenode
838 and self._filenode == other._filenode
839 )
839 )
840 except AttributeError:
840 except AttributeError:
841 return False
841 return False
842
842
843 def __ne__(self, other):
843 def __ne__(self, other):
844 return not (self == other)
844 return not (self == other)
845
845
846 def filerev(self):
846 def filerev(self):
847 return self._filerev
847 return self._filerev
848
848
849 def filenode(self):
849 def filenode(self):
850 return self._filenode
850 return self._filenode
851
851
852 @propertycache
852 @propertycache
853 def _flags(self):
853 def _flags(self):
854 return self._changectx.flags(self._path)
854 return self._changectx.flags(self._path)
855
855
856 def flags(self):
856 def flags(self):
857 return self._flags
857 return self._flags
858
858
859 def filelog(self):
859 def filelog(self):
860 return self._filelog
860 return self._filelog
861
861
862 def rev(self):
862 def rev(self):
863 return self._changeid
863 return self._changeid
864
864
865 def linkrev(self):
865 def linkrev(self):
866 return self._filelog.linkrev(self._filerev)
866 return self._filelog.linkrev(self._filerev)
867
867
868 def node(self):
868 def node(self):
869 return self._changectx.node()
869 return self._changectx.node()
870
870
871 def hex(self):
871 def hex(self):
872 return self._changectx.hex()
872 return self._changectx.hex()
873
873
874 def user(self):
874 def user(self):
875 return self._changectx.user()
875 return self._changectx.user()
876
876
877 def date(self):
877 def date(self):
878 return self._changectx.date()
878 return self._changectx.date()
879
879
880 def files(self):
880 def files(self):
881 return self._changectx.files()
881 return self._changectx.files()
882
882
883 def description(self):
883 def description(self):
884 return self._changectx.description()
884 return self._changectx.description()
885
885
886 def branch(self):
886 def branch(self):
887 return self._changectx.branch()
887 return self._changectx.branch()
888
888
889 def extra(self):
889 def extra(self):
890 return self._changectx.extra()
890 return self._changectx.extra()
891
891
892 def phase(self):
892 def phase(self):
893 return self._changectx.phase()
893 return self._changectx.phase()
894
894
895 def phasestr(self):
895 def phasestr(self):
896 return self._changectx.phasestr()
896 return self._changectx.phasestr()
897
897
898 def obsolete(self):
898 def obsolete(self):
899 return self._changectx.obsolete()
899 return self._changectx.obsolete()
900
900
901 def instabilities(self):
901 def instabilities(self):
902 return self._changectx.instabilities()
902 return self._changectx.instabilities()
903
903
904 def manifest(self):
904 def manifest(self):
905 return self._changectx.manifest()
905 return self._changectx.manifest()
906
906
907 def changectx(self):
907 def changectx(self):
908 return self._changectx
908 return self._changectx
909
909
910 def renamed(self):
910 def renamed(self):
911 return self._copied
911 return self._copied
912
912
913 def copysource(self):
913 def copysource(self):
914 return self._copied and self._copied[0]
914 return self._copied and self._copied[0]
915
915
916 def repo(self):
916 def repo(self):
917 return self._repo
917 return self._repo
918
918
919 def size(self):
919 def size(self):
920 return len(self.data())
920 return len(self.data())
921
921
922 def path(self):
922 def path(self):
923 return self._path
923 return self._path
924
924
925 def isbinary(self):
925 def isbinary(self):
926 try:
926 try:
927 return stringutil.binary(self.data())
927 return stringutil.binary(self.data())
928 except IOError:
928 except IOError:
929 return False
929 return False
930
930
931 def isexec(self):
931 def isexec(self):
932 return b'x' in self.flags()
932 return b'x' in self.flags()
933
933
934 def islink(self):
934 def islink(self):
935 return b'l' in self.flags()
935 return b'l' in self.flags()
936
936
937 def isabsent(self):
937 def isabsent(self):
938 """whether this filectx represents a file not in self._changectx
938 """whether this filectx represents a file not in self._changectx
939
939
940 This is mainly for merge code to detect change/delete conflicts. This is
940 This is mainly for merge code to detect change/delete conflicts. This is
941 expected to be True for all subclasses of basectx."""
941 expected to be True for all subclasses of basectx."""
942 return False
942 return False
943
943
944 _customcmp = False
944 _customcmp = False
945
945
946 def cmp(self, fctx):
946 def cmp(self, fctx):
947 """compare with other file context
947 """compare with other file context
948
948
949 returns True if different than fctx.
949 returns True if different than fctx.
950 """
950 """
951 if fctx._customcmp:
951 if fctx._customcmp:
952 return fctx.cmp(self)
952 return fctx.cmp(self)
953
953
954 if self._filenode is None:
954 if self._filenode is None:
955 raise error.ProgrammingError(
955 raise error.ProgrammingError(
956 b'filectx.cmp() must be reimplemented if not backed by revlog'
956 b'filectx.cmp() must be reimplemented if not backed by revlog'
957 )
957 )
958
958
959 if fctx._filenode is None:
959 if fctx._filenode is None:
960 if self._repo._encodefilterpats:
960 if self._repo._encodefilterpats:
961 # can't rely on size() because wdir content may be decoded
961 # can't rely on size() because wdir content may be decoded
962 return self._filelog.cmp(self._filenode, fctx.data())
962 return self._filelog.cmp(self._filenode, fctx.data())
963 if self.size() - 4 == fctx.size():
963 if self.size() - 4 == fctx.size():
964 # size() can match:
964 # size() can match:
965 # if file data starts with '\1\n', empty metadata block is
965 # if file data starts with '\1\n', empty metadata block is
966 # prepended, which adds 4 bytes to filelog.size().
966 # prepended, which adds 4 bytes to filelog.size().
967 return self._filelog.cmp(self._filenode, fctx.data())
967 return self._filelog.cmp(self._filenode, fctx.data())
968 if self.size() == fctx.size():
968 if self.size() == fctx.size():
969 # size() matches: need to compare content
969 # size() matches: need to compare content
970 return self._filelog.cmp(self._filenode, fctx.data())
970 return self._filelog.cmp(self._filenode, fctx.data())
971
971
972 # size() differs
972 # size() differs
973 return True
973 return True
974
974
975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
975 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
976 """return the first ancestor of <srcrev> introducing <fnode>
976 """return the first ancestor of <srcrev> introducing <fnode>
977
977
978 If the linkrev of the file revision does not point to an ancestor of
978 If the linkrev of the file revision does not point to an ancestor of
979 srcrev, we'll walk down the ancestors until we find one introducing
979 srcrev, we'll walk down the ancestors until we find one introducing
980 this file revision.
980 this file revision.
981
981
982 :srcrev: the changeset revision we search ancestors from
982 :srcrev: the changeset revision we search ancestors from
983 :inclusive: if true, the src revision will also be checked
983 :inclusive: if true, the src revision will also be checked
984 :stoprev: an optional revision to stop the walk at. If no introduction
984 :stoprev: an optional revision to stop the walk at. If no introduction
985 of this file content could be found before this floor
985 of this file content could be found before this floor
986 revision, the function will returns "None" and stops its
986 revision, the function will returns "None" and stops its
987 iteration.
987 iteration.
988 """
988 """
989 repo = self._repo
989 repo = self._repo
990 cl = repo.unfiltered().changelog
990 cl = repo.unfiltered().changelog
991 mfl = repo.manifestlog
991 mfl = repo.manifestlog
992 # fetch the linkrev
992 # fetch the linkrev
993 lkr = self.linkrev()
993 lkr = self.linkrev()
994 if srcrev == lkr:
994 if srcrev == lkr:
995 return lkr
995 return lkr
996 # hack to reuse ancestor computation when searching for renames
996 # hack to reuse ancestor computation when searching for renames
997 memberanc = getattr(self, '_ancestrycontext', None)
997 memberanc = getattr(self, '_ancestrycontext', None)
998 iteranc = None
998 iteranc = None
999 if srcrev is None:
999 if srcrev is None:
1000 # wctx case, used by workingfilectx during mergecopy
1000 # wctx case, used by workingfilectx during mergecopy
1001 revs = [p.rev() for p in self._repo[None].parents()]
1001 revs = [p.rev() for p in self._repo[None].parents()]
1002 inclusive = True # we skipped the real (revless) source
1002 inclusive = True # we skipped the real (revless) source
1003 else:
1003 else:
1004 revs = [srcrev]
1004 revs = [srcrev]
1005 if memberanc is None:
1005 if memberanc is None:
1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1006 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1007 # check if this linkrev is an ancestor of srcrev
1007 # check if this linkrev is an ancestor of srcrev
1008 if lkr not in memberanc:
1008 if lkr not in memberanc:
1009 if iteranc is None:
1009 if iteranc is None:
1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1010 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1011 fnode = self._filenode
1011 fnode = self._filenode
1012 path = self._path
1012 path = self._path
1013 for a in iteranc:
1013 for a in iteranc:
1014 if stoprev is not None and a < stoprev:
1014 if stoprev is not None and a < stoprev:
1015 return None
1015 return None
1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1016 ac = cl.read(a) # get changeset data (we avoid object creation)
1017 if path in ac[3]: # checking the 'files' field.
1017 if path in ac[3]: # checking the 'files' field.
1018 # The file has been touched, check if the content is
1018 # The file has been touched, check if the content is
1019 # similar to the one we search for.
1019 # similar to the one we search for.
1020 if fnode == mfl[ac[0]].readfast().get(path):
1020 if fnode == mfl[ac[0]].readfast().get(path):
1021 return a
1021 return a
1022 # In theory, we should never get out of that loop without a result.
1022 # In theory, we should never get out of that loop without a result.
1023 # But if manifest uses a buggy file revision (not children of the
1023 # But if manifest uses a buggy file revision (not children of the
1024 # one it replaces) we could. Such a buggy situation will likely
1024 # one it replaces) we could. Such a buggy situation will likely
1025 # result is crash somewhere else at to some point.
1025 # result is crash somewhere else at to some point.
1026 return lkr
1026 return lkr
1027
1027
1028 def isintroducedafter(self, changelogrev):
1028 def isintroducedafter(self, changelogrev):
1029 """True if a filectx has been introduced after a given floor revision
1029 """True if a filectx has been introduced after a given floor revision
1030 """
1030 """
1031 if self.linkrev() >= changelogrev:
1031 if self.linkrev() >= changelogrev:
1032 return True
1032 return True
1033 introrev = self._introrev(stoprev=changelogrev)
1033 introrev = self._introrev(stoprev=changelogrev)
1034 if introrev is None:
1034 if introrev is None:
1035 return False
1035 return False
1036 return introrev >= changelogrev
1036 return introrev >= changelogrev
1037
1037
1038 def introrev(self):
1038 def introrev(self):
1039 """return the rev of the changeset which introduced this file revision
1039 """return the rev of the changeset which introduced this file revision
1040
1040
1041 This method is different from linkrev because it take into account the
1041 This method is different from linkrev because it take into account the
1042 changeset the filectx was created from. It ensures the returned
1042 changeset the filectx was created from. It ensures the returned
1043 revision is one of its ancestors. This prevents bugs from
1043 revision is one of its ancestors. This prevents bugs from
1044 'linkrev-shadowing' when a file revision is used by multiple
1044 'linkrev-shadowing' when a file revision is used by multiple
1045 changesets.
1045 changesets.
1046 """
1046 """
1047 return self._introrev()
1047 return self._introrev()
1048
1048
1049 def _introrev(self, stoprev=None):
1049 def _introrev(self, stoprev=None):
1050 """
1050 """
1051 Same as `introrev` but, with an extra argument to limit changelog
1051 Same as `introrev` but, with an extra argument to limit changelog
1052 iteration range in some internal usecase.
1052 iteration range in some internal usecase.
1053
1053
1054 If `stoprev` is set, the `introrev` will not be searched past that
1054 If `stoprev` is set, the `introrev` will not be searched past that
1055 `stoprev` revision and "None" might be returned. This is useful to
1055 `stoprev` revision and "None" might be returned. This is useful to
1056 limit the iteration range.
1056 limit the iteration range.
1057 """
1057 """
1058 toprev = None
1058 toprev = None
1059 attrs = vars(self)
1059 attrs = vars(self)
1060 if '_changeid' in attrs:
1060 if '_changeid' in attrs:
1061 # We have a cached value already
1061 # We have a cached value already
1062 toprev = self._changeid
1062 toprev = self._changeid
1063 elif '_changectx' in attrs:
1063 elif '_changectx' in attrs:
1064 # We know which changelog entry we are coming from
1064 # We know which changelog entry we are coming from
1065 toprev = self._changectx.rev()
1065 toprev = self._changectx.rev()
1066
1066
1067 if toprev is not None:
1067 if toprev is not None:
1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1068 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1069 elif '_descendantrev' in attrs:
1069 elif '_descendantrev' in attrs:
1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1070 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1071 # be nice and cache the result of the computation
1071 # be nice and cache the result of the computation
1072 if introrev is not None:
1072 if introrev is not None:
1073 self._changeid = introrev
1073 self._changeid = introrev
1074 return introrev
1074 return introrev
1075 else:
1075 else:
1076 return self.linkrev()
1076 return self.linkrev()
1077
1077
1078 def introfilectx(self):
1078 def introfilectx(self):
1079 """Return filectx having identical contents, but pointing to the
1079 """Return filectx having identical contents, but pointing to the
1080 changeset revision where this filectx was introduced"""
1080 changeset revision where this filectx was introduced"""
1081 introrev = self.introrev()
1081 introrev = self.introrev()
1082 if self.rev() == introrev:
1082 if self.rev() == introrev:
1083 return self
1083 return self
1084 return self.filectx(self.filenode(), changeid=introrev)
1084 return self.filectx(self.filenode(), changeid=introrev)
1085
1085
1086 def _parentfilectx(self, path, fileid, filelog):
1086 def _parentfilectx(self, path, fileid, filelog):
1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1087 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1088 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1089 if '_changeid' in vars(self) or '_changectx' in vars(self):
1090 # If self is associated with a changeset (probably explicitly
1090 # If self is associated with a changeset (probably explicitly
1091 # fed), ensure the created filectx is associated with a
1091 # fed), ensure the created filectx is associated with a
1092 # changeset that is an ancestor of self.changectx.
1092 # changeset that is an ancestor of self.changectx.
1093 # This lets us later use _adjustlinkrev to get a correct link.
1093 # This lets us later use _adjustlinkrev to get a correct link.
1094 fctx._descendantrev = self.rev()
1094 fctx._descendantrev = self.rev()
1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1095 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1096 elif '_descendantrev' in vars(self):
1096 elif '_descendantrev' in vars(self):
1097 # Otherwise propagate _descendantrev if we have one associated.
1097 # Otherwise propagate _descendantrev if we have one associated.
1098 fctx._descendantrev = self._descendantrev
1098 fctx._descendantrev = self._descendantrev
1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1099 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1100 return fctx
1100 return fctx
1101
1101
1102 def parents(self):
1102 def parents(self):
1103 _path = self._path
1103 _path = self._path
1104 fl = self._filelog
1104 fl = self._filelog
1105 parents = self._filelog.parents(self._filenode)
1105 parents = self._filelog.parents(self._filenode)
1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1106 pl = [(_path, node, fl) for node in parents if node != nullid]
1107
1107
1108 r = fl.renamed(self._filenode)
1108 r = fl.renamed(self._filenode)
1109 if r:
1109 if r:
1110 # - In the simple rename case, both parent are nullid, pl is empty.
1110 # - In the simple rename case, both parent are nullid, pl is empty.
1111 # - In case of merge, only one of the parent is null id and should
1111 # - In case of merge, only one of the parent is null id and should
1112 # be replaced with the rename information. This parent is -always-
1112 # be replaced with the rename information. This parent is -always-
1113 # the first one.
1113 # the first one.
1114 #
1114 #
1115 # As null id have always been filtered out in the previous list
1115 # As null id have always been filtered out in the previous list
1116 # comprehension, inserting to 0 will always result in "replacing
1116 # comprehension, inserting to 0 will always result in "replacing
1117 # first nullid parent with rename information.
1117 # first nullid parent with rename information.
1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1118 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1119
1119
1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1120 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1121
1121
1122 def p1(self):
1122 def p1(self):
1123 return self.parents()[0]
1123 return self.parents()[0]
1124
1124
1125 def p2(self):
1125 def p2(self):
1126 p = self.parents()
1126 p = self.parents()
1127 if len(p) == 2:
1127 if len(p) == 2:
1128 return p[1]
1128 return p[1]
1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1129 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1130
1130
1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1131 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1132 """Returns a list of annotateline objects for each line in the file
1132 """Returns a list of annotateline objects for each line in the file
1133
1133
1134 - line.fctx is the filectx of the node where that line was last changed
1134 - line.fctx is the filectx of the node where that line was last changed
1135 - line.lineno is the line number at the first appearance in the managed
1135 - line.lineno is the line number at the first appearance in the managed
1136 file
1136 file
1137 - line.text is the data on that line (including newline character)
1137 - line.text is the data on that line (including newline character)
1138 """
1138 """
1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1139 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1140
1140
1141 def parents(f):
1141 def parents(f):
1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1142 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1143 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1144 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1145 # isn't an ancestor of the srcrev.
1145 # isn't an ancestor of the srcrev.
1146 f._changeid
1146 f._changeid
1147 pl = f.parents()
1147 pl = f.parents()
1148
1148
1149 # Don't return renamed parents if we aren't following.
1149 # Don't return renamed parents if we aren't following.
1150 if not follow:
1150 if not follow:
1151 pl = [p for p in pl if p.path() == f.path()]
1151 pl = [p for p in pl if p.path() == f.path()]
1152
1152
1153 # renamed filectx won't have a filelog yet, so set it
1153 # renamed filectx won't have a filelog yet, so set it
1154 # from the cache to save time
1154 # from the cache to save time
1155 for p in pl:
1155 for p in pl:
1156 if not '_filelog' in p.__dict__:
1156 if not '_filelog' in p.__dict__:
1157 p._filelog = getlog(p.path())
1157 p._filelog = getlog(p.path())
1158
1158
1159 return pl
1159 return pl
1160
1160
1161 # use linkrev to find the first changeset where self appeared
1161 # use linkrev to find the first changeset where self appeared
1162 base = self.introfilectx()
1162 base = self.introfilectx()
1163 if getattr(base, '_ancestrycontext', None) is None:
1163 if getattr(base, '_ancestrycontext', None) is None:
1164 cl = self._repo.changelog
1164 cl = self._repo.changelog
1165 if base.rev() is None:
1165 if base.rev() is None:
1166 # wctx is not inclusive, but works because _ancestrycontext
1166 # wctx is not inclusive, but works because _ancestrycontext
1167 # is used to test filelog revisions
1167 # is used to test filelog revisions
1168 ac = cl.ancestors(
1168 ac = cl.ancestors(
1169 [p.rev() for p in base.parents()], inclusive=True
1169 [p.rev() for p in base.parents()], inclusive=True
1170 )
1170 )
1171 else:
1171 else:
1172 ac = cl.ancestors([base.rev()], inclusive=True)
1172 ac = cl.ancestors([base.rev()], inclusive=True)
1173 base._ancestrycontext = ac
1173 base._ancestrycontext = ac
1174
1174
1175 return dagop.annotate(
1175 return dagop.annotate(
1176 base, parents, skiprevs=skiprevs, diffopts=diffopts
1176 base, parents, skiprevs=skiprevs, diffopts=diffopts
1177 )
1177 )
1178
1178
1179 def ancestors(self, followfirst=False):
1179 def ancestors(self, followfirst=False):
1180 visit = {}
1180 visit = {}
1181 c = self
1181 c = self
1182 if followfirst:
1182 if followfirst:
1183 cut = 1
1183 cut = 1
1184 else:
1184 else:
1185 cut = None
1185 cut = None
1186
1186
1187 while True:
1187 while True:
1188 for parent in c.parents()[:cut]:
1188 for parent in c.parents()[:cut]:
1189 visit[(parent.linkrev(), parent.filenode())] = parent
1189 visit[(parent.linkrev(), parent.filenode())] = parent
1190 if not visit:
1190 if not visit:
1191 break
1191 break
1192 c = visit.pop(max(visit))
1192 c = visit.pop(max(visit))
1193 yield c
1193 yield c
1194
1194
1195 def decodeddata(self):
1195 def decodeddata(self):
1196 """Returns `data()` after running repository decoding filters.
1196 """Returns `data()` after running repository decoding filters.
1197
1197
1198 This is often equivalent to how the data would be expressed on disk.
1198 This is often equivalent to how the data would be expressed on disk.
1199 """
1199 """
1200 return self._repo.wwritedata(self.path(), self.data())
1200 return self._repo.wwritedata(self.path(), self.data())
1201
1201
1202
1202
1203 class filectx(basefilectx):
1203 class filectx(basefilectx):
1204 """A filecontext object makes access to data related to a particular
1204 """A filecontext object makes access to data related to a particular
1205 filerevision convenient."""
1205 filerevision convenient."""
1206
1206
1207 def __init__(
1207 def __init__(
1208 self,
1208 self,
1209 repo,
1209 repo,
1210 path,
1210 path,
1211 changeid=None,
1211 changeid=None,
1212 fileid=None,
1212 fileid=None,
1213 filelog=None,
1213 filelog=None,
1214 changectx=None,
1214 changectx=None,
1215 ):
1215 ):
1216 """changeid must be a revision number, if specified.
1216 """changeid must be a revision number, if specified.
1217 fileid can be a file revision or node."""
1217 fileid can be a file revision or node."""
1218 self._repo = repo
1218 self._repo = repo
1219 self._path = path
1219 self._path = path
1220
1220
1221 assert (
1221 assert (
1222 changeid is not None or fileid is not None or changectx is not None
1222 changeid is not None or fileid is not None or changectx is not None
1223 ), (
1223 ), (
1224 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1224 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1225 % (changeid, fileid, changectx,)
1225 % (changeid, fileid, changectx,)
1226 )
1226 )
1227
1227
1228 if filelog is not None:
1228 if filelog is not None:
1229 self._filelog = filelog
1229 self._filelog = filelog
1230
1230
1231 if changeid is not None:
1231 if changeid is not None:
1232 self._changeid = changeid
1232 self._changeid = changeid
1233 if changectx is not None:
1233 if changectx is not None:
1234 self._changectx = changectx
1234 self._changectx = changectx
1235 if fileid is not None:
1235 if fileid is not None:
1236 self._fileid = fileid
1236 self._fileid = fileid
1237
1237
1238 @propertycache
1238 @propertycache
1239 def _changectx(self):
1239 def _changectx(self):
1240 try:
1240 try:
1241 return self._repo[self._changeid]
1241 return self._repo[self._changeid]
1242 except error.FilteredRepoLookupError:
1242 except error.FilteredRepoLookupError:
1243 # Linkrev may point to any revision in the repository. When the
1243 # Linkrev may point to any revision in the repository. When the
1244 # repository is filtered this may lead to `filectx` trying to build
1244 # repository is filtered this may lead to `filectx` trying to build
1245 # `changectx` for filtered revision. In such case we fallback to
1245 # `changectx` for filtered revision. In such case we fallback to
1246 # creating `changectx` on the unfiltered version of the reposition.
1246 # creating `changectx` on the unfiltered version of the reposition.
1247 # This fallback should not be an issue because `changectx` from
1247 # This fallback should not be an issue because `changectx` from
1248 # `filectx` are not used in complex operations that care about
1248 # `filectx` are not used in complex operations that care about
1249 # filtering.
1249 # filtering.
1250 #
1250 #
1251 # This fallback is a cheap and dirty fix that prevent several
1251 # This fallback is a cheap and dirty fix that prevent several
1252 # crashes. It does not ensure the behavior is correct. However the
1252 # crashes. It does not ensure the behavior is correct. However the
1253 # behavior was not correct before filtering either and "incorrect
1253 # behavior was not correct before filtering either and "incorrect
1254 # behavior" is seen as better as "crash"
1254 # behavior" is seen as better as "crash"
1255 #
1255 #
1256 # Linkrevs have several serious troubles with filtering that are
1256 # Linkrevs have several serious troubles with filtering that are
1257 # complicated to solve. Proper handling of the issue here should be
1257 # complicated to solve. Proper handling of the issue here should be
1258 # considered when solving linkrev issue are on the table.
1258 # considered when solving linkrev issue are on the table.
1259 return self._repo.unfiltered()[self._changeid]
1259 return self._repo.unfiltered()[self._changeid]
1260
1260
1261 def filectx(self, fileid, changeid=None):
1261 def filectx(self, fileid, changeid=None):
1262 '''opens an arbitrary revision of the file without
1262 '''opens an arbitrary revision of the file without
1263 opening a new filelog'''
1263 opening a new filelog'''
1264 return filectx(
1264 return filectx(
1265 self._repo,
1265 self._repo,
1266 self._path,
1266 self._path,
1267 fileid=fileid,
1267 fileid=fileid,
1268 filelog=self._filelog,
1268 filelog=self._filelog,
1269 changeid=changeid,
1269 changeid=changeid,
1270 )
1270 )
1271
1271
1272 def rawdata(self):
1272 def rawdata(self):
1273 return self._filelog.rawdata(self._filenode)
1273 return self._filelog.rawdata(self._filenode)
1274
1274
1275 def rawflags(self):
1275 def rawflags(self):
1276 """low-level revlog flags"""
1276 """low-level revlog flags"""
1277 return self._filelog.flags(self._filerev)
1277 return self._filelog.flags(self._filerev)
1278
1278
1279 def data(self):
1279 def data(self):
1280 try:
1280 try:
1281 return self._filelog.read(self._filenode)
1281 return self._filelog.read(self._filenode)
1282 except error.CensoredNodeError:
1282 except error.CensoredNodeError:
1283 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1283 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1284 return b""
1284 return b""
1285 raise error.Abort(
1285 raise error.Abort(
1286 _(b"censored node: %s") % short(self._filenode),
1286 _(b"censored node: %s") % short(self._filenode),
1287 hint=_(b"set censor.policy to ignore errors"),
1287 hint=_(b"set censor.policy to ignore errors"),
1288 )
1288 )
1289
1289
1290 def size(self):
1290 def size(self):
1291 return self._filelog.size(self._filerev)
1291 return self._filelog.size(self._filerev)
1292
1292
1293 @propertycache
1293 @propertycache
1294 def _copied(self):
1294 def _copied(self):
1295 """check if file was actually renamed in this changeset revision
1295 """check if file was actually renamed in this changeset revision
1296
1296
1297 If rename logged in file revision, we report copy for changeset only
1297 If rename logged in file revision, we report copy for changeset only
1298 if file revisions linkrev points back to the changeset in question
1298 if file revisions linkrev points back to the changeset in question
1299 or both changeset parents contain different file revisions.
1299 or both changeset parents contain different file revisions.
1300 """
1300 """
1301
1301
1302 renamed = self._filelog.renamed(self._filenode)
1302 renamed = self._filelog.renamed(self._filenode)
1303 if not renamed:
1303 if not renamed:
1304 return None
1304 return None
1305
1305
1306 if self.rev() == self.linkrev():
1306 if self.rev() == self.linkrev():
1307 return renamed
1307 return renamed
1308
1308
1309 name = self.path()
1309 name = self.path()
1310 fnode = self._filenode
1310 fnode = self._filenode
1311 for p in self._changectx.parents():
1311 for p in self._changectx.parents():
1312 try:
1312 try:
1313 if fnode == p.filenode(name):
1313 if fnode == p.filenode(name):
1314 return None
1314 return None
1315 except error.LookupError:
1315 except error.LookupError:
1316 pass
1316 pass
1317 return renamed
1317 return renamed
1318
1318
1319 def children(self):
1319 def children(self):
1320 # hard for renames
1320 # hard for renames
1321 c = self._filelog.children(self._filenode)
1321 c = self._filelog.children(self._filenode)
1322 return [
1322 return [
1323 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1323 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1324 for x in c
1324 for x in c
1325 ]
1325 ]
1326
1326
1327
1327
1328 class committablectx(basectx):
1328 class committablectx(basectx):
1329 """A committablectx object provides common functionality for a context that
1329 """A committablectx object provides common functionality for a context that
1330 wants the ability to commit, e.g. workingctx or memctx."""
1330 wants the ability to commit, e.g. workingctx or memctx."""
1331
1331
1332 def __init__(
1332 def __init__(
1333 self,
1333 self,
1334 repo,
1334 repo,
1335 text=b"",
1335 text=b"",
1336 user=None,
1336 user=None,
1337 date=None,
1337 date=None,
1338 extra=None,
1338 extra=None,
1339 changes=None,
1339 changes=None,
1340 branch=None,
1340 branch=None,
1341 ):
1341 ):
1342 super(committablectx, self).__init__(repo)
1342 super(committablectx, self).__init__(repo)
1343 self._rev = None
1343 self._rev = None
1344 self._node = None
1344 self._node = None
1345 self._text = text
1345 self._text = text
1346 if date:
1346 if date:
1347 self._date = dateutil.parsedate(date)
1347 self._date = dateutil.parsedate(date)
1348 if user:
1348 if user:
1349 self._user = user
1349 self._user = user
1350 if changes:
1350 if changes:
1351 self._status = changes
1351 self._status = changes
1352
1352
1353 self._extra = {}
1353 self._extra = {}
1354 if extra:
1354 if extra:
1355 self._extra = extra.copy()
1355 self._extra = extra.copy()
1356 if branch is not None:
1356 if branch is not None:
1357 self._extra[b'branch'] = encoding.fromlocal(branch)
1357 self._extra[b'branch'] = encoding.fromlocal(branch)
1358 if not self._extra.get(b'branch'):
1358 if not self._extra.get(b'branch'):
1359 self._extra[b'branch'] = b'default'
1359 self._extra[b'branch'] = b'default'
1360
1360
1361 def __bytes__(self):
1361 def __bytes__(self):
1362 return bytes(self._parents[0]) + b"+"
1362 return bytes(self._parents[0]) + b"+"
1363
1363
1364 __str__ = encoding.strmethod(__bytes__)
1364 __str__ = encoding.strmethod(__bytes__)
1365
1365
1366 def __nonzero__(self):
1366 def __nonzero__(self):
1367 return True
1367 return True
1368
1368
1369 __bool__ = __nonzero__
1369 __bool__ = __nonzero__
1370
1370
1371 @propertycache
1371 @propertycache
1372 def _status(self):
1372 def _status(self):
1373 return self._repo.status()
1373 return self._repo.status()
1374
1374
1375 @propertycache
1375 @propertycache
1376 def _user(self):
1376 def _user(self):
1377 return self._repo.ui.username()
1377 return self._repo.ui.username()
1378
1378
1379 @propertycache
1379 @propertycache
1380 def _date(self):
1380 def _date(self):
1381 ui = self._repo.ui
1381 ui = self._repo.ui
1382 date = ui.configdate(b'devel', b'default-date')
1382 date = ui.configdate(b'devel', b'default-date')
1383 if date is None:
1383 if date is None:
1384 date = dateutil.makedate()
1384 date = dateutil.makedate()
1385 return date
1385 return date
1386
1386
1387 def subrev(self, subpath):
1387 def subrev(self, subpath):
1388 return None
1388 return None
1389
1389
1390 def manifestnode(self):
1390 def manifestnode(self):
1391 return None
1391 return None
1392
1392
1393 def user(self):
1393 def user(self):
1394 return self._user or self._repo.ui.username()
1394 return self._user or self._repo.ui.username()
1395
1395
1396 def date(self):
1396 def date(self):
1397 return self._date
1397 return self._date
1398
1398
1399 def description(self):
1399 def description(self):
1400 return self._text
1400 return self._text
1401
1401
1402 def files(self):
1402 def files(self):
1403 return sorted(
1403 return sorted(
1404 self._status.modified + self._status.added + self._status.removed
1404 self._status.modified + self._status.added + self._status.removed
1405 )
1405 )
1406
1406
1407 def modified(self):
1407 def modified(self):
1408 return self._status.modified
1408 return self._status.modified
1409
1409
1410 def added(self):
1410 def added(self):
1411 return self._status.added
1411 return self._status.added
1412
1412
1413 def removed(self):
1413 def removed(self):
1414 return self._status.removed
1414 return self._status.removed
1415
1415
1416 def deleted(self):
1416 def deleted(self):
1417 return self._status.deleted
1417 return self._status.deleted
1418
1418
1419 filesmodified = modified
1419 filesmodified = modified
1420 filesadded = added
1420 filesadded = added
1421 filesremoved = removed
1421 filesremoved = removed
1422
1422
1423 def branch(self):
1423 def branch(self):
1424 return encoding.tolocal(self._extra[b'branch'])
1424 return encoding.tolocal(self._extra[b'branch'])
1425
1425
1426 def closesbranch(self):
1426 def closesbranch(self):
1427 return b'close' in self._extra
1427 return b'close' in self._extra
1428
1428
1429 def extra(self):
1429 def extra(self):
1430 return self._extra
1430 return self._extra
1431
1431
1432 def isinmemory(self):
1432 def isinmemory(self):
1433 return False
1433 return False
1434
1434
1435 def tags(self):
1435 def tags(self):
1436 return []
1436 return []
1437
1437
1438 def bookmarks(self):
1438 def bookmarks(self):
1439 b = []
1439 b = []
1440 for p in self.parents():
1440 for p in self.parents():
1441 b.extend(p.bookmarks())
1441 b.extend(p.bookmarks())
1442 return b
1442 return b
1443
1443
1444 def phase(self):
1444 def phase(self):
1445 phase = phases.newcommitphase(self._repo.ui)
1445 phase = phases.newcommitphase(self._repo.ui)
1446 for p in self.parents():
1446 for p in self.parents():
1447 phase = max(phase, p.phase())
1447 phase = max(phase, p.phase())
1448 return phase
1448 return phase
1449
1449
1450 def hidden(self):
1450 def hidden(self):
1451 return False
1451 return False
1452
1452
1453 def children(self):
1453 def children(self):
1454 return []
1454 return []
1455
1455
1456 def ancestor(self, c2):
1456 def ancestor(self, c2):
1457 """return the "best" ancestor context of self and c2"""
1457 """return the "best" ancestor context of self and c2"""
1458 return self._parents[0].ancestor(c2) # punt on two parents for now
1458 return self._parents[0].ancestor(c2) # punt on two parents for now
1459
1459
1460 def ancestors(self):
1460 def ancestors(self):
1461 for p in self._parents:
1461 for p in self._parents:
1462 yield p
1462 yield p
1463 for a in self._repo.changelog.ancestors(
1463 for a in self._repo.changelog.ancestors(
1464 [p.rev() for p in self._parents]
1464 [p.rev() for p in self._parents]
1465 ):
1465 ):
1466 yield self._repo[a]
1466 yield self._repo[a]
1467
1467
1468 def markcommitted(self, node):
1468 def markcommitted(self, node):
1469 """Perform post-commit cleanup necessary after committing this ctx
1469 """Perform post-commit cleanup necessary after committing this ctx
1470
1470
1471 Specifically, this updates backing stores this working context
1471 Specifically, this updates backing stores this working context
1472 wraps to reflect the fact that the changes reflected by this
1472 wraps to reflect the fact that the changes reflected by this
1473 workingctx have been committed. For example, it marks
1473 workingctx have been committed. For example, it marks
1474 modified and added files as normal in the dirstate.
1474 modified and added files as normal in the dirstate.
1475
1475
1476 """
1476 """
1477
1477
1478 def dirty(self, missing=False, merge=True, branch=True):
1478 def dirty(self, missing=False, merge=True, branch=True):
1479 return False
1479 return False
1480
1480
1481
1481
1482 class workingctx(committablectx):
1482 class workingctx(committablectx):
1483 """A workingctx object makes access to data related to
1483 """A workingctx object makes access to data related to
1484 the current working directory convenient.
1484 the current working directory convenient.
1485 date - any valid date string or (unixtime, offset), or None.
1485 date - any valid date string or (unixtime, offset), or None.
1486 user - username string, or None.
1486 user - username string, or None.
1487 extra - a dictionary of extra values, or None.
1487 extra - a dictionary of extra values, or None.
1488 changes - a list of file lists as returned by localrepo.status()
1488 changes - a list of file lists as returned by localrepo.status()
1489 or None to use the repository status.
1489 or None to use the repository status.
1490 """
1490 """
1491
1491
1492 def __init__(
1492 def __init__(
1493 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1493 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1494 ):
1494 ):
1495 branch = None
1495 branch = None
1496 if not extra or b'branch' not in extra:
1496 if not extra or b'branch' not in extra:
1497 try:
1497 try:
1498 branch = repo.dirstate.branch()
1498 branch = repo.dirstate.branch()
1499 except UnicodeDecodeError:
1499 except UnicodeDecodeError:
1500 raise error.Abort(_(b'branch name not in UTF-8!'))
1500 raise error.Abort(_(b'branch name not in UTF-8!'))
1501 super(workingctx, self).__init__(
1501 super(workingctx, self).__init__(
1502 repo, text, user, date, extra, changes, branch=branch
1502 repo, text, user, date, extra, changes, branch=branch
1503 )
1503 )
1504
1504
1505 def __iter__(self):
1505 def __iter__(self):
1506 d = self._repo.dirstate
1506 d = self._repo.dirstate
1507 for f in d:
1507 for f in d:
1508 if d[f] != b'r':
1508 if d[f] != b'r':
1509 yield f
1509 yield f
1510
1510
1511 def __contains__(self, key):
1511 def __contains__(self, key):
1512 return self._repo.dirstate[key] not in b"?r"
1512 return self._repo.dirstate[key] not in b"?r"
1513
1513
1514 def hex(self):
1514 def hex(self):
1515 return wdirhex
1515 return wdirhex
1516
1516
1517 @propertycache
1517 @propertycache
1518 def _parents(self):
1518 def _parents(self):
1519 p = self._repo.dirstate.parents()
1519 p = self._repo.dirstate.parents()
1520 if p[1] == nullid:
1520 if p[1] == nullid:
1521 p = p[:-1]
1521 p = p[:-1]
1522 # use unfiltered repo to delay/avoid loading obsmarkers
1522 # use unfiltered repo to delay/avoid loading obsmarkers
1523 unfi = self._repo.unfiltered()
1523 unfi = self._repo.unfiltered()
1524 return [
1524 return [
1525 changectx(
1525 changectx(
1526 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1526 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1527 )
1527 )
1528 for n in p
1528 for n in p
1529 ]
1529 ]
1530
1530
1531 def _fileinfo(self, path):
1531 def _fileinfo(self, path):
1532 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1532 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1533 self._manifest
1533 self._manifest
1534 return super(workingctx, self)._fileinfo(path)
1534 return super(workingctx, self)._fileinfo(path)
1535
1535
1536 def _buildflagfunc(self):
1536 def _buildflagfunc(self):
1537 # Create a fallback function for getting file flags when the
1537 # Create a fallback function for getting file flags when the
1538 # filesystem doesn't support them
1538 # filesystem doesn't support them
1539
1539
1540 copiesget = self._repo.dirstate.copies().get
1540 copiesget = self._repo.dirstate.copies().get
1541 parents = self.parents()
1541 parents = self.parents()
1542 if len(parents) < 2:
1542 if len(parents) < 2:
1543 # when we have one parent, it's easy: copy from parent
1543 # when we have one parent, it's easy: copy from parent
1544 man = parents[0].manifest()
1544 man = parents[0].manifest()
1545
1545
1546 def func(f):
1546 def func(f):
1547 f = copiesget(f, f)
1547 f = copiesget(f, f)
1548 return man.flags(f)
1548 return man.flags(f)
1549
1549
1550 else:
1550 else:
1551 # merges are tricky: we try to reconstruct the unstored
1551 # merges are tricky: we try to reconstruct the unstored
1552 # result from the merge (issue1802)
1552 # result from the merge (issue1802)
1553 p1, p2 = parents
1553 p1, p2 = parents
1554 pa = p1.ancestor(p2)
1554 pa = p1.ancestor(p2)
1555 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1555 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1556
1556
1557 def func(f):
1557 def func(f):
1558 f = copiesget(f, f) # may be wrong for merges with copies
1558 f = copiesget(f, f) # may be wrong for merges with copies
1559 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1559 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1560 if fl1 == fl2:
1560 if fl1 == fl2:
1561 return fl1
1561 return fl1
1562 if fl1 == fla:
1562 if fl1 == fla:
1563 return fl2
1563 return fl2
1564 if fl2 == fla:
1564 if fl2 == fla:
1565 return fl1
1565 return fl1
1566 return b'' # punt for conflicts
1566 return b'' # punt for conflicts
1567
1567
1568 return func
1568 return func
1569
1569
1570 @propertycache
1570 @propertycache
1571 def _flagfunc(self):
1571 def _flagfunc(self):
1572 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1572 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1573
1573
1574 def flags(self, path):
1574 def flags(self, path):
1575 if '_manifest' in self.__dict__:
1575 if '_manifest' in self.__dict__:
1576 try:
1576 try:
1577 return self._manifest.flags(path)
1577 return self._manifest.flags(path)
1578 except KeyError:
1578 except KeyError:
1579 return b''
1579 return b''
1580
1580
1581 try:
1581 try:
1582 return self._flagfunc(path)
1582 return self._flagfunc(path)
1583 except OSError:
1583 except OSError:
1584 return b''
1584 return b''
1585
1585
1586 def filectx(self, path, filelog=None):
1586 def filectx(self, path, filelog=None):
1587 """get a file context from the working directory"""
1587 """get a file context from the working directory"""
1588 return workingfilectx(
1588 return workingfilectx(
1589 self._repo, path, workingctx=self, filelog=filelog
1589 self._repo, path, workingctx=self, filelog=filelog
1590 )
1590 )
1591
1591
1592 def dirty(self, missing=False, merge=True, branch=True):
1592 def dirty(self, missing=False, merge=True, branch=True):
1593 """check whether a working directory is modified"""
1593 """check whether a working directory is modified"""
1594 # check subrepos first
1594 # check subrepos first
1595 for s in sorted(self.substate):
1595 for s in sorted(self.substate):
1596 if self.sub(s).dirty(missing=missing):
1596 if self.sub(s).dirty(missing=missing):
1597 return True
1597 return True
1598 # check current working dir
1598 # check current working dir
1599 return (
1599 return (
1600 (merge and self.p2())
1600 (merge and self.p2())
1601 or (branch and self.branch() != self.p1().branch())
1601 or (branch and self.branch() != self.p1().branch())
1602 or self.modified()
1602 or self.modified()
1603 or self.added()
1603 or self.added()
1604 or self.removed()
1604 or self.removed()
1605 or (missing and self.deleted())
1605 or (missing and self.deleted())
1606 )
1606 )
1607
1607
1608 def add(self, list, prefix=b""):
1608 def add(self, list, prefix=b""):
1609 with self._repo.wlock():
1609 with self._repo.wlock():
1610 ui, ds = self._repo.ui, self._repo.dirstate
1610 ui, ds = self._repo.ui, self._repo.dirstate
1611 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1611 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1612 rejected = []
1612 rejected = []
1613 lstat = self._repo.wvfs.lstat
1613 lstat = self._repo.wvfs.lstat
1614 for f in list:
1614 for f in list:
1615 # ds.pathto() returns an absolute file when this is invoked from
1615 # ds.pathto() returns an absolute file when this is invoked from
1616 # the keyword extension. That gets flagged as non-portable on
1616 # the keyword extension. That gets flagged as non-portable on
1617 # Windows, since it contains the drive letter and colon.
1617 # Windows, since it contains the drive letter and colon.
1618 scmutil.checkportable(ui, os.path.join(prefix, f))
1618 scmutil.checkportable(ui, os.path.join(prefix, f))
1619 try:
1619 try:
1620 st = lstat(f)
1620 st = lstat(f)
1621 except OSError:
1621 except OSError:
1622 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1622 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1623 rejected.append(f)
1623 rejected.append(f)
1624 continue
1624 continue
1625 limit = ui.configbytes(b'ui', b'large-file-limit')
1625 limit = ui.configbytes(b'ui', b'large-file-limit')
1626 if limit != 0 and st.st_size > limit:
1626 if limit != 0 and st.st_size > limit:
1627 ui.warn(
1627 ui.warn(
1628 _(
1628 _(
1629 b"%s: up to %d MB of RAM may be required "
1629 b"%s: up to %d MB of RAM may be required "
1630 b"to manage this file\n"
1630 b"to manage this file\n"
1631 b"(use 'hg revert %s' to cancel the "
1631 b"(use 'hg revert %s' to cancel the "
1632 b"pending addition)\n"
1632 b"pending addition)\n"
1633 )
1633 )
1634 % (f, 3 * st.st_size // 1000000, uipath(f))
1634 % (f, 3 * st.st_size // 1000000, uipath(f))
1635 )
1635 )
1636 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1636 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1637 ui.warn(
1637 ui.warn(
1638 _(
1638 _(
1639 b"%s not added: only files and symlinks "
1639 b"%s not added: only files and symlinks "
1640 b"supported currently\n"
1640 b"supported currently\n"
1641 )
1641 )
1642 % uipath(f)
1642 % uipath(f)
1643 )
1643 )
1644 rejected.append(f)
1644 rejected.append(f)
1645 elif ds[f] in b'amn':
1645 elif ds[f] in b'amn':
1646 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1646 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1647 elif ds[f] == b'r':
1647 elif ds[f] == b'r':
1648 ds.normallookup(f)
1648 ds.normallookup(f)
1649 else:
1649 else:
1650 ds.add(f)
1650 ds.add(f)
1651 return rejected
1651 return rejected
1652
1652
1653 def forget(self, files, prefix=b""):
1653 def forget(self, files, prefix=b""):
1654 with self._repo.wlock():
1654 with self._repo.wlock():
1655 ds = self._repo.dirstate
1655 ds = self._repo.dirstate
1656 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1656 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1657 rejected = []
1657 rejected = []
1658 for f in files:
1658 for f in files:
1659 if f not in ds:
1659 if f not in ds:
1660 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1660 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1661 rejected.append(f)
1661 rejected.append(f)
1662 elif ds[f] != b'a':
1662 elif ds[f] != b'a':
1663 ds.remove(f)
1663 ds.remove(f)
1664 else:
1664 else:
1665 ds.drop(f)
1665 ds.drop(f)
1666 return rejected
1666 return rejected
1667
1667
1668 def copy(self, source, dest):
1668 def copy(self, source, dest):
1669 try:
1669 try:
1670 st = self._repo.wvfs.lstat(dest)
1670 st = self._repo.wvfs.lstat(dest)
1671 except OSError as err:
1671 except OSError as err:
1672 if err.errno != errno.ENOENT:
1672 if err.errno != errno.ENOENT:
1673 raise
1673 raise
1674 self._repo.ui.warn(
1674 self._repo.ui.warn(
1675 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1675 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1676 )
1676 )
1677 return
1677 return
1678 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1678 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1679 self._repo.ui.warn(
1679 self._repo.ui.warn(
1680 _(b"copy failed: %s is not a file or a symbolic link\n")
1680 _(b"copy failed: %s is not a file or a symbolic link\n")
1681 % self._repo.dirstate.pathto(dest)
1681 % self._repo.dirstate.pathto(dest)
1682 )
1682 )
1683 else:
1683 else:
1684 with self._repo.wlock():
1684 with self._repo.wlock():
1685 ds = self._repo.dirstate
1685 ds = self._repo.dirstate
1686 if ds[dest] in b'?':
1686 if ds[dest] in b'?':
1687 ds.add(dest)
1687 ds.add(dest)
1688 elif ds[dest] in b'r':
1688 elif ds[dest] in b'r':
1689 ds.normallookup(dest)
1689 ds.normallookup(dest)
1690 ds.copy(source, dest)
1690 ds.copy(source, dest)
1691
1691
1692 def match(
1692 def match(
1693 self,
1693 self,
1694 pats=None,
1694 pats=None,
1695 include=None,
1695 include=None,
1696 exclude=None,
1696 exclude=None,
1697 default=b'glob',
1697 default=b'glob',
1698 listsubrepos=False,
1698 listsubrepos=False,
1699 badfn=None,
1699 badfn=None,
1700 cwd=None,
1700 cwd=None,
1701 ):
1701 ):
1702 r = self._repo
1702 r = self._repo
1703 if not cwd:
1703 if not cwd:
1704 cwd = r.getcwd()
1704 cwd = r.getcwd()
1705
1705
1706 # Only a case insensitive filesystem needs magic to translate user input
1706 # Only a case insensitive filesystem needs magic to translate user input
1707 # to actual case in the filesystem.
1707 # to actual case in the filesystem.
1708 icasefs = not util.fscasesensitive(r.root)
1708 icasefs = not util.fscasesensitive(r.root)
1709 return matchmod.match(
1709 return matchmod.match(
1710 r.root,
1710 r.root,
1711 cwd,
1711 cwd,
1712 pats,
1712 pats,
1713 include,
1713 include,
1714 exclude,
1714 exclude,
1715 default,
1715 default,
1716 auditor=r.auditor,
1716 auditor=r.auditor,
1717 ctx=self,
1717 ctx=self,
1718 listsubrepos=listsubrepos,
1718 listsubrepos=listsubrepos,
1719 badfn=badfn,
1719 badfn=badfn,
1720 icasefs=icasefs,
1720 icasefs=icasefs,
1721 )
1721 )
1722
1722
1723 def _filtersuspectsymlink(self, files):
1723 def _filtersuspectsymlink(self, files):
1724 if not files or self._repo.dirstate._checklink:
1724 if not files or self._repo.dirstate._checklink:
1725 return files
1725 return files
1726
1726
1727 # Symlink placeholders may get non-symlink-like contents
1727 # Symlink placeholders may get non-symlink-like contents
1728 # via user error or dereferencing by NFS or Samba servers,
1728 # via user error or dereferencing by NFS or Samba servers,
1729 # so we filter out any placeholders that don't look like a
1729 # so we filter out any placeholders that don't look like a
1730 # symlink
1730 # symlink
1731 sane = []
1731 sane = []
1732 for f in files:
1732 for f in files:
1733 if self.flags(f) == b'l':
1733 if self.flags(f) == b'l':
1734 d = self[f].data()
1734 d = self[f].data()
1735 if (
1735 if (
1736 d == b''
1736 d == b''
1737 or len(d) >= 1024
1737 or len(d) >= 1024
1738 or b'\n' in d
1738 or b'\n' in d
1739 or stringutil.binary(d)
1739 or stringutil.binary(d)
1740 ):
1740 ):
1741 self._repo.ui.debug(
1741 self._repo.ui.debug(
1742 b'ignoring suspect symlink placeholder "%s"\n' % f
1742 b'ignoring suspect symlink placeholder "%s"\n' % f
1743 )
1743 )
1744 continue
1744 continue
1745 sane.append(f)
1745 sane.append(f)
1746 return sane
1746 return sane
1747
1747
1748 def _checklookup(self, files):
1748 def _checklookup(self, files):
1749 # check for any possibly clean files
1749 # check for any possibly clean files
1750 if not files:
1750 if not files:
1751 return [], [], []
1751 return [], [], []
1752
1752
1753 modified = []
1753 modified = []
1754 deleted = []
1754 deleted = []
1755 fixup = []
1755 fixup = []
1756 pctx = self._parents[0]
1756 pctx = self._parents[0]
1757 # do a full compare of any files that might have changed
1757 # do a full compare of any files that might have changed
1758 for f in sorted(files):
1758 for f in sorted(files):
1759 try:
1759 try:
1760 # This will return True for a file that got replaced by a
1760 # This will return True for a file that got replaced by a
1761 # directory in the interim, but fixing that is pretty hard.
1761 # directory in the interim, but fixing that is pretty hard.
1762 if (
1762 if (
1763 f not in pctx
1763 f not in pctx
1764 or self.flags(f) != pctx.flags(f)
1764 or self.flags(f) != pctx.flags(f)
1765 or pctx[f].cmp(self[f])
1765 or pctx[f].cmp(self[f])
1766 ):
1766 ):
1767 modified.append(f)
1767 modified.append(f)
1768 else:
1768 else:
1769 fixup.append(f)
1769 fixup.append(f)
1770 except (IOError, OSError):
1770 except (IOError, OSError):
1771 # A file become inaccessible in between? Mark it as deleted,
1771 # A file become inaccessible in between? Mark it as deleted,
1772 # matching dirstate behavior (issue5584).
1772 # matching dirstate behavior (issue5584).
1773 # The dirstate has more complex behavior around whether a
1773 # The dirstate has more complex behavior around whether a
1774 # missing file matches a directory, etc, but we don't need to
1774 # missing file matches a directory, etc, but we don't need to
1775 # bother with that: if f has made it to this point, we're sure
1775 # bother with that: if f has made it to this point, we're sure
1776 # it's in the dirstate.
1776 # it's in the dirstate.
1777 deleted.append(f)
1777 deleted.append(f)
1778
1778
1779 return modified, deleted, fixup
1779 return modified, deleted, fixup
1780
1780
1781 def _poststatusfixup(self, status, fixup):
1781 def _poststatusfixup(self, status, fixup):
1782 """update dirstate for files that are actually clean"""
1782 """update dirstate for files that are actually clean"""
1783 poststatus = self._repo.postdsstatus()
1783 poststatus = self._repo.postdsstatus()
1784 if fixup or poststatus:
1784 if fixup or poststatus:
1785 try:
1785 try:
1786 oldid = self._repo.dirstate.identity()
1786 oldid = self._repo.dirstate.identity()
1787
1787
1788 # updating the dirstate is optional
1788 # updating the dirstate is optional
1789 # so we don't wait on the lock
1789 # so we don't wait on the lock
1790 # wlock can invalidate the dirstate, so cache normal _after_
1790 # wlock can invalidate the dirstate, so cache normal _after_
1791 # taking the lock
1791 # taking the lock
1792 with self._repo.wlock(False):
1792 with self._repo.wlock(False):
1793 if self._repo.dirstate.identity() == oldid:
1793 if self._repo.dirstate.identity() == oldid:
1794 if fixup:
1794 if fixup:
1795 normal = self._repo.dirstate.normal
1795 normal = self._repo.dirstate.normal
1796 for f in fixup:
1796 for f in fixup:
1797 normal(f)
1797 normal(f)
1798 # write changes out explicitly, because nesting
1798 # write changes out explicitly, because nesting
1799 # wlock at runtime may prevent 'wlock.release()'
1799 # wlock at runtime may prevent 'wlock.release()'
1800 # after this block from doing so for subsequent
1800 # after this block from doing so for subsequent
1801 # changing files
1801 # changing files
1802 tr = self._repo.currenttransaction()
1802 tr = self._repo.currenttransaction()
1803 self._repo.dirstate.write(tr)
1803 self._repo.dirstate.write(tr)
1804
1804
1805 if poststatus:
1805 if poststatus:
1806 for ps in poststatus:
1806 for ps in poststatus:
1807 ps(self, status)
1807 ps(self, status)
1808 else:
1808 else:
1809 # in this case, writing changes out breaks
1809 # in this case, writing changes out breaks
1810 # consistency, because .hg/dirstate was
1810 # consistency, because .hg/dirstate was
1811 # already changed simultaneously after last
1811 # already changed simultaneously after last
1812 # caching (see also issue5584 for detail)
1812 # caching (see also issue5584 for detail)
1813 self._repo.ui.debug(
1813 self._repo.ui.debug(
1814 b'skip updating dirstate: identity mismatch\n'
1814 b'skip updating dirstate: identity mismatch\n'
1815 )
1815 )
1816 except error.LockError:
1816 except error.LockError:
1817 pass
1817 pass
1818 finally:
1818 finally:
1819 # Even if the wlock couldn't be grabbed, clear out the list.
1819 # Even if the wlock couldn't be grabbed, clear out the list.
1820 self._repo.clearpostdsstatus()
1820 self._repo.clearpostdsstatus()
1821
1821
1822 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1822 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1823 '''Gets the status from the dirstate -- internal use only.'''
1823 '''Gets the status from the dirstate -- internal use only.'''
1824 subrepos = []
1824 subrepos = []
1825 if b'.hgsub' in self:
1825 if b'.hgsub' in self:
1826 subrepos = sorted(self.substate)
1826 subrepos = sorted(self.substate)
1827 cmp, s = self._repo.dirstate.status(
1827 cmp, s = self._repo.dirstate.status(
1828 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1828 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1829 )
1829 )
1830
1830
1831 # check for any possibly clean files
1831 # check for any possibly clean files
1832 fixup = []
1832 fixup = []
1833 if cmp:
1833 if cmp:
1834 modified2, deleted2, fixup = self._checklookup(cmp)
1834 modified2, deleted2, fixup = self._checklookup(cmp)
1835 s.modified.extend(modified2)
1835 s.modified.extend(modified2)
1836 s.deleted.extend(deleted2)
1836 s.deleted.extend(deleted2)
1837
1837
1838 if fixup and clean:
1838 if fixup and clean:
1839 s.clean.extend(fixup)
1839 s.clean.extend(fixup)
1840
1840
1841 self._poststatusfixup(s, fixup)
1841 self._poststatusfixup(s, fixup)
1842
1842
1843 if match.always():
1843 if match.always():
1844 # cache for performance
1844 # cache for performance
1845 if s.unknown or s.ignored or s.clean:
1845 if s.unknown or s.ignored or s.clean:
1846 # "_status" is cached with list*=False in the normal route
1846 # "_status" is cached with list*=False in the normal route
1847 self._status = scmutil.status(
1847 self._status = scmutil.status(
1848 s.modified, s.added, s.removed, s.deleted, [], [], []
1848 s.modified, s.added, s.removed, s.deleted, [], [], []
1849 )
1849 )
1850 else:
1850 else:
1851 self._status = s
1851 self._status = s
1852
1852
1853 return s
1853 return s
1854
1854
1855 @propertycache
1855 @propertycache
1856 def _copies(self):
1856 def _copies(self):
1857 p1copies = {}
1857 p1copies = {}
1858 p2copies = {}
1858 p2copies = {}
1859 parents = self._repo.dirstate.parents()
1859 parents = self._repo.dirstate.parents()
1860 p1manifest = self._repo[parents[0]].manifest()
1860 p1manifest = self._repo[parents[0]].manifest()
1861 p2manifest = self._repo[parents[1]].manifest()
1861 p2manifest = self._repo[parents[1]].manifest()
1862 changedset = set(self.added()) | set(self.modified())
1862 changedset = set(self.added()) | set(self.modified())
1863 narrowmatch = self._repo.narrowmatch()
1863 narrowmatch = self._repo.narrowmatch()
1864 for dst, src in self._repo.dirstate.copies().items():
1864 for dst, src in self._repo.dirstate.copies().items():
1865 if dst not in changedset or not narrowmatch(dst):
1865 if dst not in changedset or not narrowmatch(dst):
1866 continue
1866 continue
1867 if src in p1manifest:
1867 if src in p1manifest:
1868 p1copies[dst] = src
1868 p1copies[dst] = src
1869 elif src in p2manifest:
1869 elif src in p2manifest:
1870 p2copies[dst] = src
1870 p2copies[dst] = src
1871 return p1copies, p2copies
1871 return p1copies, p2copies
1872
1872
1873 @propertycache
1873 @propertycache
1874 def _manifest(self):
1874 def _manifest(self):
1875 """generate a manifest corresponding to the values in self._status
1875 """generate a manifest corresponding to the values in self._status
1876
1876
1877 This reuse the file nodeid from parent, but we use special node
1877 This reuse the file nodeid from parent, but we use special node
1878 identifiers for added and modified files. This is used by manifests
1878 identifiers for added and modified files. This is used by manifests
1879 merge to see that files are different and by update logic to avoid
1879 merge to see that files are different and by update logic to avoid
1880 deleting newly added files.
1880 deleting newly added files.
1881 """
1881 """
1882 return self._buildstatusmanifest(self._status)
1882 return self._buildstatusmanifest(self._status)
1883
1883
1884 def _buildstatusmanifest(self, status):
1884 def _buildstatusmanifest(self, status):
1885 """Builds a manifest that includes the given status results."""
1885 """Builds a manifest that includes the given status results."""
1886 parents = self.parents()
1886 parents = self.parents()
1887
1887
1888 man = parents[0].manifest().copy()
1888 man = parents[0].manifest().copy()
1889
1889
1890 ff = self._flagfunc
1890 ff = self._flagfunc
1891 for i, l in (
1891 for i, l in (
1892 (addednodeid, status.added),
1892 (addednodeid, status.added),
1893 (modifiednodeid, status.modified),
1893 (modifiednodeid, status.modified),
1894 ):
1894 ):
1895 for f in l:
1895 for f in l:
1896 man[f] = i
1896 man[f] = i
1897 try:
1897 try:
1898 man.setflag(f, ff(f))
1898 man.setflag(f, ff(f))
1899 except OSError:
1899 except OSError:
1900 pass
1900 pass
1901
1901
1902 for f in status.deleted + status.removed:
1902 for f in status.deleted + status.removed:
1903 if f in man:
1903 if f in man:
1904 del man[f]
1904 del man[f]
1905
1905
1906 return man
1906 return man
1907
1907
1908 def _buildstatus(
1908 def _buildstatus(
1909 self, other, s, match, listignored, listclean, listunknown
1909 self, other, s, match, listignored, listclean, listunknown
1910 ):
1910 ):
1911 """build a status with respect to another context
1911 """build a status with respect to another context
1912
1912
1913 This includes logic for maintaining the fast path of status when
1913 This includes logic for maintaining the fast path of status when
1914 comparing the working directory against its parent, which is to skip
1914 comparing the working directory against its parent, which is to skip
1915 building a new manifest if self (working directory) is not comparing
1915 building a new manifest if self (working directory) is not comparing
1916 against its parent (repo['.']).
1916 against its parent (repo['.']).
1917 """
1917 """
1918 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1918 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1919 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1919 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1920 # might have accidentally ended up with the entire contents of the file
1920 # might have accidentally ended up with the entire contents of the file
1921 # they are supposed to be linking to.
1921 # they are supposed to be linking to.
1922 s.modified[:] = self._filtersuspectsymlink(s.modified)
1922 s.modified[:] = self._filtersuspectsymlink(s.modified)
1923 if other != self._repo[b'.']:
1923 if other != self._repo[b'.']:
1924 s = super(workingctx, self)._buildstatus(
1924 s = super(workingctx, self)._buildstatus(
1925 other, s, match, listignored, listclean, listunknown
1925 other, s, match, listignored, listclean, listunknown
1926 )
1926 )
1927 return s
1927 return s
1928
1928
1929 def _matchstatus(self, other, match):
1929 def _matchstatus(self, other, match):
1930 """override the match method with a filter for directory patterns
1930 """override the match method with a filter for directory patterns
1931
1931
1932 We use inheritance to customize the match.bad method only in cases of
1932 We use inheritance to customize the match.bad method only in cases of
1933 workingctx since it belongs only to the working directory when
1933 workingctx since it belongs only to the working directory when
1934 comparing against the parent changeset.
1934 comparing against the parent changeset.
1935
1935
1936 If we aren't comparing against the working directory's parent, then we
1936 If we aren't comparing against the working directory's parent, then we
1937 just use the default match object sent to us.
1937 just use the default match object sent to us.
1938 """
1938 """
1939 if other != self._repo[b'.']:
1939 if other != self._repo[b'.']:
1940
1940
1941 def bad(f, msg):
1941 def bad(f, msg):
1942 # 'f' may be a directory pattern from 'match.files()',
1942 # 'f' may be a directory pattern from 'match.files()',
1943 # so 'f not in ctx1' is not enough
1943 # so 'f not in ctx1' is not enough
1944 if f not in other and not other.hasdir(f):
1944 if f not in other and not other.hasdir(f):
1945 self._repo.ui.warn(
1945 self._repo.ui.warn(
1946 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1946 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1947 )
1947 )
1948
1948
1949 match.bad = bad
1949 match.bad = bad
1950 return match
1950 return match
1951
1951
1952 def walk(self, match):
1952 def walk(self, match):
1953 '''Generates matching file names.'''
1953 '''Generates matching file names.'''
1954 return sorted(
1954 return sorted(
1955 self._repo.dirstate.walk(
1955 self._repo.dirstate.walk(
1956 self._repo.narrowmatch(match),
1956 self._repo.narrowmatch(match),
1957 subrepos=sorted(self.substate),
1957 subrepos=sorted(self.substate),
1958 unknown=True,
1958 unknown=True,
1959 ignored=False,
1959 ignored=False,
1960 )
1960 )
1961 )
1961 )
1962
1962
1963 def matches(self, match):
1963 def matches(self, match):
1964 match = self._repo.narrowmatch(match)
1964 match = self._repo.narrowmatch(match)
1965 ds = self._repo.dirstate
1965 ds = self._repo.dirstate
1966 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1966 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1967
1967
1968 def markcommitted(self, node):
1968 def markcommitted(self, node):
1969 with self._repo.dirstate.parentchange():
1969 with self._repo.dirstate.parentchange():
1970 for f in self.modified() + self.added():
1970 for f in self.modified() + self.added():
1971 self._repo.dirstate.normal(f)
1971 self._repo.dirstate.normal(f)
1972 for f in self.removed():
1972 for f in self.removed():
1973 self._repo.dirstate.drop(f)
1973 self._repo.dirstate.drop(f)
1974 self._repo.dirstate.setparents(node)
1974 self._repo.dirstate.setparents(node)
1975
1975
1976 # write changes out explicitly, because nesting wlock at
1976 # write changes out explicitly, because nesting wlock at
1977 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1977 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1978 # from immediately doing so for subsequent changing files
1978 # from immediately doing so for subsequent changing files
1979 self._repo.dirstate.write(self._repo.currenttransaction())
1979 self._repo.dirstate.write(self._repo.currenttransaction())
1980
1980
1981 sparse.aftercommit(self._repo, node)
1981 sparse.aftercommit(self._repo, node)
1982
1982
1983
1983
1984 class committablefilectx(basefilectx):
1984 class committablefilectx(basefilectx):
1985 """A committablefilectx provides common functionality for a file context
1985 """A committablefilectx provides common functionality for a file context
1986 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1986 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1987
1987
1988 def __init__(self, repo, path, filelog=None, ctx=None):
1988 def __init__(self, repo, path, filelog=None, ctx=None):
1989 self._repo = repo
1989 self._repo = repo
1990 self._path = path
1990 self._path = path
1991 self._changeid = None
1991 self._changeid = None
1992 self._filerev = self._filenode = None
1992 self._filerev = self._filenode = None
1993
1993
1994 if filelog is not None:
1994 if filelog is not None:
1995 self._filelog = filelog
1995 self._filelog = filelog
1996 if ctx:
1996 if ctx:
1997 self._changectx = ctx
1997 self._changectx = ctx
1998
1998
1999 def __nonzero__(self):
1999 def __nonzero__(self):
2000 return True
2000 return True
2001
2001
2002 __bool__ = __nonzero__
2002 __bool__ = __nonzero__
2003
2003
2004 def linkrev(self):
2004 def linkrev(self):
2005 # linked to self._changectx no matter if file is modified or not
2005 # linked to self._changectx no matter if file is modified or not
2006 return self.rev()
2006 return self.rev()
2007
2007
2008 def renamed(self):
2008 def renamed(self):
2009 path = self.copysource()
2009 path = self.copysource()
2010 if not path:
2010 if not path:
2011 return None
2011 return None
2012 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2012 return path, self._changectx._parents[0]._manifest.get(path, nullid)
2013
2013
2014 def parents(self):
2014 def parents(self):
2015 '''return parent filectxs, following copies if necessary'''
2015 '''return parent filectxs, following copies if necessary'''
2016
2016
2017 def filenode(ctx, path):
2017 def filenode(ctx, path):
2018 return ctx._manifest.get(path, nullid)
2018 return ctx._manifest.get(path, nullid)
2019
2019
2020 path = self._path
2020 path = self._path
2021 fl = self._filelog
2021 fl = self._filelog
2022 pcl = self._changectx._parents
2022 pcl = self._changectx._parents
2023 renamed = self.renamed()
2023 renamed = self.renamed()
2024
2024
2025 if renamed:
2025 if renamed:
2026 pl = [renamed + (None,)]
2026 pl = [renamed + (None,)]
2027 else:
2027 else:
2028 pl = [(path, filenode(pcl[0], path), fl)]
2028 pl = [(path, filenode(pcl[0], path), fl)]
2029
2029
2030 for pc in pcl[1:]:
2030 for pc in pcl[1:]:
2031 pl.append((path, filenode(pc, path), fl))
2031 pl.append((path, filenode(pc, path), fl))
2032
2032
2033 return [
2033 return [
2034 self._parentfilectx(p, fileid=n, filelog=l)
2034 self._parentfilectx(p, fileid=n, filelog=l)
2035 for p, n, l in pl
2035 for p, n, l in pl
2036 if n != nullid
2036 if n != nullid
2037 ]
2037 ]
2038
2038
2039 def children(self):
2039 def children(self):
2040 return []
2040 return []
2041
2041
2042
2042
2043 class workingfilectx(committablefilectx):
2043 class workingfilectx(committablefilectx):
2044 """A workingfilectx object makes access to data related to a particular
2044 """A workingfilectx object makes access to data related to a particular
2045 file in the working directory convenient."""
2045 file in the working directory convenient."""
2046
2046
2047 def __init__(self, repo, path, filelog=None, workingctx=None):
2047 def __init__(self, repo, path, filelog=None, workingctx=None):
2048 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2048 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2049
2049
2050 @propertycache
2050 @propertycache
2051 def _changectx(self):
2051 def _changectx(self):
2052 return workingctx(self._repo)
2052 return workingctx(self._repo)
2053
2053
2054 def data(self):
2054 def data(self):
2055 return self._repo.wread(self._path)
2055 return self._repo.wread(self._path)
2056
2056
2057 def copysource(self):
2057 def copysource(self):
2058 return self._repo.dirstate.copied(self._path)
2058 return self._repo.dirstate.copied(self._path)
2059
2059
2060 def size(self):
2060 def size(self):
2061 return self._repo.wvfs.lstat(self._path).st_size
2061 return self._repo.wvfs.lstat(self._path).st_size
2062
2062
2063 def lstat(self):
2063 def lstat(self):
2064 return self._repo.wvfs.lstat(self._path)
2064 return self._repo.wvfs.lstat(self._path)
2065
2065
2066 def date(self):
2066 def date(self):
2067 t, tz = self._changectx.date()
2067 t, tz = self._changectx.date()
2068 try:
2068 try:
2069 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2069 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2070 except OSError as err:
2070 except OSError as err:
2071 if err.errno != errno.ENOENT:
2071 if err.errno != errno.ENOENT:
2072 raise
2072 raise
2073 return (t, tz)
2073 return (t, tz)
2074
2074
2075 def exists(self):
2075 def exists(self):
2076 return self._repo.wvfs.exists(self._path)
2076 return self._repo.wvfs.exists(self._path)
2077
2077
2078 def lexists(self):
2078 def lexists(self):
2079 return self._repo.wvfs.lexists(self._path)
2079 return self._repo.wvfs.lexists(self._path)
2080
2080
2081 def audit(self):
2081 def audit(self):
2082 return self._repo.wvfs.audit(self._path)
2082 return self._repo.wvfs.audit(self._path)
2083
2083
2084 def cmp(self, fctx):
2084 def cmp(self, fctx):
2085 """compare with other file context
2085 """compare with other file context
2086
2086
2087 returns True if different than fctx.
2087 returns True if different than fctx.
2088 """
2088 """
2089 # fctx should be a filectx (not a workingfilectx)
2089 # fctx should be a filectx (not a workingfilectx)
2090 # invert comparison to reuse the same code path
2090 # invert comparison to reuse the same code path
2091 return fctx.cmp(self)
2091 return fctx.cmp(self)
2092
2092
2093 def remove(self, ignoremissing=False):
2093 def remove(self, ignoremissing=False):
2094 """wraps unlink for a repo's working directory"""
2094 """wraps unlink for a repo's working directory"""
2095 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2095 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2096 self._repo.wvfs.unlinkpath(
2096 self._repo.wvfs.unlinkpath(
2097 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2097 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2098 )
2098 )
2099
2099
2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2100 def write(self, data, flags, backgroundclose=False, **kwargs):
2101 """wraps repo.wwrite"""
2101 """wraps repo.wwrite"""
2102 return self._repo.wwrite(
2102 return self._repo.wwrite(
2103 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2103 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2104 )
2104 )
2105
2105
2106 def markcopied(self, src):
2106 def markcopied(self, src):
2107 """marks this file a copy of `src`"""
2107 """marks this file a copy of `src`"""
2108 self._repo.dirstate.copy(src, self._path)
2108 self._repo.dirstate.copy(src, self._path)
2109
2109
2110 def clearunknown(self):
2110 def clearunknown(self):
2111 """Removes conflicting items in the working directory so that
2111 """Removes conflicting items in the working directory so that
2112 ``write()`` can be called successfully.
2112 ``write()`` can be called successfully.
2113 """
2113 """
2114 wvfs = self._repo.wvfs
2114 wvfs = self._repo.wvfs
2115 f = self._path
2115 f = self._path
2116 wvfs.audit(f)
2116 wvfs.audit(f)
2117 if self._repo.ui.configbool(
2117 if self._repo.ui.configbool(
2118 b'experimental', b'merge.checkpathconflicts'
2118 b'experimental', b'merge.checkpathconflicts'
2119 ):
2119 ):
2120 # remove files under the directory as they should already be
2120 # remove files under the directory as they should already be
2121 # warned and backed up
2121 # warned and backed up
2122 if wvfs.isdir(f) and not wvfs.islink(f):
2122 if wvfs.isdir(f) and not wvfs.islink(f):
2123 wvfs.rmtree(f, forcibly=True)
2123 wvfs.rmtree(f, forcibly=True)
2124 for p in reversed(list(pathutil.finddirs(f))):
2124 for p in reversed(list(pathutil.finddirs(f))):
2125 if wvfs.isfileorlink(p):
2125 if wvfs.isfileorlink(p):
2126 wvfs.unlink(p)
2126 wvfs.unlink(p)
2127 break
2127 break
2128 else:
2128 else:
2129 # don't remove files if path conflicts are not processed
2129 # don't remove files if path conflicts are not processed
2130 if wvfs.isdir(f) and not wvfs.islink(f):
2130 if wvfs.isdir(f) and not wvfs.islink(f):
2131 wvfs.removedirs(f)
2131 wvfs.removedirs(f)
2132
2132
2133 def setflags(self, l, x):
2133 def setflags(self, l, x):
2134 self._repo.wvfs.setflags(self._path, l, x)
2134 self._repo.wvfs.setflags(self._path, l, x)
2135
2135
2136
2136
2137 class overlayworkingctx(committablectx):
2137 class overlayworkingctx(committablectx):
2138 """Wraps another mutable context with a write-back cache that can be
2138 """Wraps another mutable context with a write-back cache that can be
2139 converted into a commit context.
2139 converted into a commit context.
2140
2140
2141 self._cache[path] maps to a dict with keys: {
2141 self._cache[path] maps to a dict with keys: {
2142 'exists': bool?
2142 'exists': bool?
2143 'date': date?
2143 'date': date?
2144 'data': str?
2144 'data': str?
2145 'flags': str?
2145 'flags': str?
2146 'copied': str? (path or None)
2146 'copied': str? (path or None)
2147 }
2147 }
2148 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2148 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2149 is `False`, the file was deleted.
2149 is `False`, the file was deleted.
2150 """
2150 """
2151
2151
2152 def __init__(self, repo):
2152 def __init__(self, repo):
2153 super(overlayworkingctx, self).__init__(repo)
2153 super(overlayworkingctx, self).__init__(repo)
2154 self.clean()
2154 self.clean()
2155
2155
2156 def setbase(self, wrappedctx):
2156 def setbase(self, wrappedctx):
2157 self._wrappedctx = wrappedctx
2157 self._wrappedctx = wrappedctx
2158 self._parents = [wrappedctx]
2158 self._parents = [wrappedctx]
2159 # Drop old manifest cache as it is now out of date.
2159 # Drop old manifest cache as it is now out of date.
2160 # This is necessary when, e.g., rebasing several nodes with one
2160 # This is necessary when, e.g., rebasing several nodes with one
2161 # ``overlayworkingctx`` (e.g. with --collapse).
2161 # ``overlayworkingctx`` (e.g. with --collapse).
2162 util.clearcachedproperty(self, b'_manifest')
2162 util.clearcachedproperty(self, b'_manifest')
2163
2163
2164 def data(self, path):
2164 def data(self, path):
2165 if self.isdirty(path):
2165 if self.isdirty(path):
2166 if self._cache[path][b'exists']:
2166 if self._cache[path][b'exists']:
2167 if self._cache[path][b'data'] is not None:
2167 if self._cache[path][b'data'] is not None:
2168 return self._cache[path][b'data']
2168 return self._cache[path][b'data']
2169 else:
2169 else:
2170 # Must fallback here, too, because we only set flags.
2170 # Must fallback here, too, because we only set flags.
2171 return self._wrappedctx[path].data()
2171 return self._wrappedctx[path].data()
2172 else:
2172 else:
2173 raise error.ProgrammingError(
2173 raise error.ProgrammingError(
2174 b"No such file or directory: %s" % path
2174 b"No such file or directory: %s" % path
2175 )
2175 )
2176 else:
2176 else:
2177 return self._wrappedctx[path].data()
2177 return self._wrappedctx[path].data()
2178
2178
2179 @propertycache
2179 @propertycache
2180 def _manifest(self):
2180 def _manifest(self):
2181 parents = self.parents()
2181 parents = self.parents()
2182 man = parents[0].manifest().copy()
2182 man = parents[0].manifest().copy()
2183
2183
2184 flag = self._flagfunc
2184 flag = self._flagfunc
2185 for path in self.added():
2185 for path in self.added():
2186 man[path] = addednodeid
2186 man[path] = addednodeid
2187 man.setflag(path, flag(path))
2187 man.setflag(path, flag(path))
2188 for path in self.modified():
2188 for path in self.modified():
2189 man[path] = modifiednodeid
2189 man[path] = modifiednodeid
2190 man.setflag(path, flag(path))
2190 man.setflag(path, flag(path))
2191 for path in self.removed():
2191 for path in self.removed():
2192 del man[path]
2192 del man[path]
2193 return man
2193 return man
2194
2194
2195 @propertycache
2195 @propertycache
2196 def _flagfunc(self):
2196 def _flagfunc(self):
2197 def f(path):
2197 def f(path):
2198 return self._cache[path][b'flags']
2198 return self._cache[path][b'flags']
2199
2199
2200 return f
2200 return f
2201
2201
2202 def files(self):
2202 def files(self):
2203 return sorted(self.added() + self.modified() + self.removed())
2203 return sorted(self.added() + self.modified() + self.removed())
2204
2204
2205 def modified(self):
2205 def modified(self):
2206 return [
2206 return [
2207 f
2207 f
2208 for f in self._cache.keys()
2208 for f in self._cache.keys()
2209 if self._cache[f][b'exists'] and self._existsinparent(f)
2209 if self._cache[f][b'exists'] and self._existsinparent(f)
2210 ]
2210 ]
2211
2211
2212 def added(self):
2212 def added(self):
2213 return [
2213 return [
2214 f
2214 f
2215 for f in self._cache.keys()
2215 for f in self._cache.keys()
2216 if self._cache[f][b'exists'] and not self._existsinparent(f)
2216 if self._cache[f][b'exists'] and not self._existsinparent(f)
2217 ]
2217 ]
2218
2218
2219 def removed(self):
2219 def removed(self):
2220 return [
2220 return [
2221 f
2221 f
2222 for f in self._cache.keys()
2222 for f in self._cache.keys()
2223 if not self._cache[f][b'exists'] and self._existsinparent(f)
2223 if not self._cache[f][b'exists'] and self._existsinparent(f)
2224 ]
2224 ]
2225
2225
2226 def p1copies(self):
2226 def p1copies(self):
2227 copies = self._repo._wrappedctx.p1copies().copy()
2227 copies = {}
2228 narrowmatch = self._repo.narrowmatch()
2228 narrowmatch = self._repo.narrowmatch()
2229 for f in self._cache.keys():
2229 for f in self._cache.keys():
2230 if not narrowmatch(f):
2230 if not narrowmatch(f):
2231 continue
2231 continue
2232 copies.pop(f, None) # delete if it exists
2232 copies.pop(f, None) # delete if it exists
2233 source = self._cache[f][b'copied']
2233 source = self._cache[f][b'copied']
2234 if source:
2234 if source:
2235 copies[f] = source
2235 copies[f] = source
2236 return copies
2236 return copies
2237
2237
2238 def p2copies(self):
2238 def p2copies(self):
2239 copies = self._repo._wrappedctx.p2copies().copy()
2239 copies = {}
2240 narrowmatch = self._repo.narrowmatch()
2240 narrowmatch = self._repo.narrowmatch()
2241 for f in self._cache.keys():
2241 for f in self._cache.keys():
2242 if not narrowmatch(f):
2242 if not narrowmatch(f):
2243 continue
2243 continue
2244 copies.pop(f, None) # delete if it exists
2244 copies.pop(f, None) # delete if it exists
2245 source = self._cache[f][b'copied']
2245 source = self._cache[f][b'copied']
2246 if source:
2246 if source:
2247 copies[f] = source
2247 copies[f] = source
2248 return copies
2248 return copies
2249
2249
2250 def isinmemory(self):
2250 def isinmemory(self):
2251 return True
2251 return True
2252
2252
2253 def filedate(self, path):
2253 def filedate(self, path):
2254 if self.isdirty(path):
2254 if self.isdirty(path):
2255 return self._cache[path][b'date']
2255 return self._cache[path][b'date']
2256 else:
2256 else:
2257 return self._wrappedctx[path].date()
2257 return self._wrappedctx[path].date()
2258
2258
2259 def markcopied(self, path, origin):
2259 def markcopied(self, path, origin):
2260 self._markdirty(
2260 self._markdirty(
2261 path,
2261 path,
2262 exists=True,
2262 exists=True,
2263 date=self.filedate(path),
2263 date=self.filedate(path),
2264 flags=self.flags(path),
2264 flags=self.flags(path),
2265 copied=origin,
2265 copied=origin,
2266 )
2266 )
2267
2267
2268 def copydata(self, path):
2268 def copydata(self, path):
2269 if self.isdirty(path):
2269 if self.isdirty(path):
2270 return self._cache[path][b'copied']
2270 return self._cache[path][b'copied']
2271 else:
2271 else:
2272 return None
2272 return None
2273
2273
2274 def flags(self, path):
2274 def flags(self, path):
2275 if self.isdirty(path):
2275 if self.isdirty(path):
2276 if self._cache[path][b'exists']:
2276 if self._cache[path][b'exists']:
2277 return self._cache[path][b'flags']
2277 return self._cache[path][b'flags']
2278 else:
2278 else:
2279 raise error.ProgrammingError(
2279 raise error.ProgrammingError(
2280 b"No such file or directory: %s" % self._path
2280 b"No such file or directory: %s" % self._path
2281 )
2281 )
2282 else:
2282 else:
2283 return self._wrappedctx[path].flags()
2283 return self._wrappedctx[path].flags()
2284
2284
2285 def __contains__(self, key):
2285 def __contains__(self, key):
2286 if key in self._cache:
2286 if key in self._cache:
2287 return self._cache[key][b'exists']
2287 return self._cache[key][b'exists']
2288 return key in self.p1()
2288 return key in self.p1()
2289
2289
2290 def _existsinparent(self, path):
2290 def _existsinparent(self, path):
2291 try:
2291 try:
2292 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2292 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2293 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2293 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2294 # with an ``exists()`` function.
2294 # with an ``exists()`` function.
2295 self._wrappedctx[path]
2295 self._wrappedctx[path]
2296 return True
2296 return True
2297 except error.ManifestLookupError:
2297 except error.ManifestLookupError:
2298 return False
2298 return False
2299
2299
2300 def _auditconflicts(self, path):
2300 def _auditconflicts(self, path):
2301 """Replicates conflict checks done by wvfs.write().
2301 """Replicates conflict checks done by wvfs.write().
2302
2302
2303 Since we never write to the filesystem and never call `applyupdates` in
2303 Since we never write to the filesystem and never call `applyupdates` in
2304 IMM, we'll never check that a path is actually writable -- e.g., because
2304 IMM, we'll never check that a path is actually writable -- e.g., because
2305 it adds `a/foo`, but `a` is actually a file in the other commit.
2305 it adds `a/foo`, but `a` is actually a file in the other commit.
2306 """
2306 """
2307
2307
2308 def fail(path, component):
2308 def fail(path, component):
2309 # p1() is the base and we're receiving "writes" for p2()'s
2309 # p1() is the base and we're receiving "writes" for p2()'s
2310 # files.
2310 # files.
2311 if b'l' in self.p1()[component].flags():
2311 if b'l' in self.p1()[component].flags():
2312 raise error.Abort(
2312 raise error.Abort(
2313 b"error: %s conflicts with symlink %s "
2313 b"error: %s conflicts with symlink %s "
2314 b"in %d." % (path, component, self.p1().rev())
2314 b"in %d." % (path, component, self.p1().rev())
2315 )
2315 )
2316 else:
2316 else:
2317 raise error.Abort(
2317 raise error.Abort(
2318 b"error: '%s' conflicts with file '%s' in "
2318 b"error: '%s' conflicts with file '%s' in "
2319 b"%d." % (path, component, self.p1().rev())
2319 b"%d." % (path, component, self.p1().rev())
2320 )
2320 )
2321
2321
2322 # Test that each new directory to be created to write this path from p2
2322 # Test that each new directory to be created to write this path from p2
2323 # is not a file in p1.
2323 # is not a file in p1.
2324 components = path.split(b'/')
2324 components = path.split(b'/')
2325 for i in pycompat.xrange(len(components)):
2325 for i in pycompat.xrange(len(components)):
2326 component = b"/".join(components[0:i])
2326 component = b"/".join(components[0:i])
2327 if component in self:
2327 if component in self:
2328 fail(path, component)
2328 fail(path, component)
2329
2329
2330 # Test the other direction -- that this path from p2 isn't a directory
2330 # Test the other direction -- that this path from p2 isn't a directory
2331 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2331 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2332 match = self.match([path], default=b'path')
2332 match = self.match([path], default=b'path')
2333 matches = self.p1().manifest().matches(match)
2333 matches = self.p1().manifest().matches(match)
2334 mfiles = matches.keys()
2334 mfiles = matches.keys()
2335 if len(mfiles) > 0:
2335 if len(mfiles) > 0:
2336 if len(mfiles) == 1 and mfiles[0] == path:
2336 if len(mfiles) == 1 and mfiles[0] == path:
2337 return
2337 return
2338 # omit the files which are deleted in current IMM wctx
2338 # omit the files which are deleted in current IMM wctx
2339 mfiles = [m for m in mfiles if m in self]
2339 mfiles = [m for m in mfiles if m in self]
2340 if not mfiles:
2340 if not mfiles:
2341 return
2341 return
2342 raise error.Abort(
2342 raise error.Abort(
2343 b"error: file '%s' cannot be written because "
2343 b"error: file '%s' cannot be written because "
2344 b" '%s/' is a directory in %s (containing %d "
2344 b" '%s/' is a directory in %s (containing %d "
2345 b"entries: %s)"
2345 b"entries: %s)"
2346 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2346 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2347 )
2347 )
2348
2348
2349 def write(self, path, data, flags=b'', **kwargs):
2349 def write(self, path, data, flags=b'', **kwargs):
2350 if data is None:
2350 if data is None:
2351 raise error.ProgrammingError(b"data must be non-None")
2351 raise error.ProgrammingError(b"data must be non-None")
2352 self._auditconflicts(path)
2352 self._auditconflicts(path)
2353 self._markdirty(
2353 self._markdirty(
2354 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2354 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2355 )
2355 )
2356
2356
2357 def setflags(self, path, l, x):
2357 def setflags(self, path, l, x):
2358 flag = b''
2358 flag = b''
2359 if l:
2359 if l:
2360 flag = b'l'
2360 flag = b'l'
2361 elif x:
2361 elif x:
2362 flag = b'x'
2362 flag = b'x'
2363 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2363 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2364
2364
2365 def remove(self, path):
2365 def remove(self, path):
2366 self._markdirty(path, exists=False)
2366 self._markdirty(path, exists=False)
2367
2367
2368 def exists(self, path):
2368 def exists(self, path):
2369 """exists behaves like `lexists`, but needs to follow symlinks and
2369 """exists behaves like `lexists`, but needs to follow symlinks and
2370 return False if they are broken.
2370 return False if they are broken.
2371 """
2371 """
2372 if self.isdirty(path):
2372 if self.isdirty(path):
2373 # If this path exists and is a symlink, "follow" it by calling
2373 # If this path exists and is a symlink, "follow" it by calling
2374 # exists on the destination path.
2374 # exists on the destination path.
2375 if (
2375 if (
2376 self._cache[path][b'exists']
2376 self._cache[path][b'exists']
2377 and b'l' in self._cache[path][b'flags']
2377 and b'l' in self._cache[path][b'flags']
2378 ):
2378 ):
2379 return self.exists(self._cache[path][b'data'].strip())
2379 return self.exists(self._cache[path][b'data'].strip())
2380 else:
2380 else:
2381 return self._cache[path][b'exists']
2381 return self._cache[path][b'exists']
2382
2382
2383 return self._existsinparent(path)
2383 return self._existsinparent(path)
2384
2384
2385 def lexists(self, path):
2385 def lexists(self, path):
2386 """lexists returns True if the path exists"""
2386 """lexists returns True if the path exists"""
2387 if self.isdirty(path):
2387 if self.isdirty(path):
2388 return self._cache[path][b'exists']
2388 return self._cache[path][b'exists']
2389
2389
2390 return self._existsinparent(path)
2390 return self._existsinparent(path)
2391
2391
2392 def size(self, path):
2392 def size(self, path):
2393 if self.isdirty(path):
2393 if self.isdirty(path):
2394 if self._cache[path][b'exists']:
2394 if self._cache[path][b'exists']:
2395 return len(self._cache[path][b'data'])
2395 return len(self._cache[path][b'data'])
2396 else:
2396 else:
2397 raise error.ProgrammingError(
2397 raise error.ProgrammingError(
2398 b"No such file or directory: %s" % self._path
2398 b"No such file or directory: %s" % self._path
2399 )
2399 )
2400 return self._wrappedctx[path].size()
2400 return self._wrappedctx[path].size()
2401
2401
2402 def tomemctx(
2402 def tomemctx(
2403 self,
2403 self,
2404 text,
2404 text,
2405 branch=None,
2405 branch=None,
2406 extra=None,
2406 extra=None,
2407 date=None,
2407 date=None,
2408 parents=None,
2408 parents=None,
2409 user=None,
2409 user=None,
2410 editor=None,
2410 editor=None,
2411 ):
2411 ):
2412 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2412 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2413 committed.
2413 committed.
2414
2414
2415 ``text`` is the commit message.
2415 ``text`` is the commit message.
2416 ``parents`` (optional) are rev numbers.
2416 ``parents`` (optional) are rev numbers.
2417 """
2417 """
2418 # Default parents to the wrapped contexts' if not passed.
2418 # Default parents to the wrapped contexts' if not passed.
2419 if parents is None:
2419 if parents is None:
2420 parents = self._wrappedctx.parents()
2420 parents = self._wrappedctx.parents()
2421 if len(parents) == 1:
2421 if len(parents) == 1:
2422 parents = (parents[0], None)
2422 parents = (parents[0], None)
2423
2423
2424 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2424 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2425 if parents[1] is None:
2425 if parents[1] is None:
2426 parents = (self._repo[parents[0]], None)
2426 parents = (self._repo[parents[0]], None)
2427 else:
2427 else:
2428 parents = (self._repo[parents[0]], self._repo[parents[1]])
2428 parents = (self._repo[parents[0]], self._repo[parents[1]])
2429
2429
2430 files = self.files()
2430 files = self.files()
2431
2431
2432 def getfile(repo, memctx, path):
2432 def getfile(repo, memctx, path):
2433 if self._cache[path][b'exists']:
2433 if self._cache[path][b'exists']:
2434 return memfilectx(
2434 return memfilectx(
2435 repo,
2435 repo,
2436 memctx,
2436 memctx,
2437 path,
2437 path,
2438 self._cache[path][b'data'],
2438 self._cache[path][b'data'],
2439 b'l' in self._cache[path][b'flags'],
2439 b'l' in self._cache[path][b'flags'],
2440 b'x' in self._cache[path][b'flags'],
2440 b'x' in self._cache[path][b'flags'],
2441 self._cache[path][b'copied'],
2441 self._cache[path][b'copied'],
2442 )
2442 )
2443 else:
2443 else:
2444 # Returning None, but including the path in `files`, is
2444 # Returning None, but including the path in `files`, is
2445 # necessary for memctx to register a deletion.
2445 # necessary for memctx to register a deletion.
2446 return None
2446 return None
2447
2447
2448 return memctx(
2448 return memctx(
2449 self._repo,
2449 self._repo,
2450 parents,
2450 parents,
2451 text,
2451 text,
2452 files,
2452 files,
2453 getfile,
2453 getfile,
2454 date=date,
2454 date=date,
2455 extra=extra,
2455 extra=extra,
2456 user=user,
2456 user=user,
2457 branch=branch,
2457 branch=branch,
2458 editor=editor,
2458 editor=editor,
2459 )
2459 )
2460
2460
2461 def isdirty(self, path):
2461 def isdirty(self, path):
2462 return path in self._cache
2462 return path in self._cache
2463
2463
2464 def isempty(self):
2464 def isempty(self):
2465 # We need to discard any keys that are actually clean before the empty
2465 # We need to discard any keys that are actually clean before the empty
2466 # commit check.
2466 # commit check.
2467 self._compact()
2467 self._compact()
2468 return len(self._cache) == 0
2468 return len(self._cache) == 0
2469
2469
2470 def clean(self):
2470 def clean(self):
2471 self._cache = {}
2471 self._cache = {}
2472
2472
2473 def _compact(self):
2473 def _compact(self):
2474 """Removes keys from the cache that are actually clean, by comparing
2474 """Removes keys from the cache that are actually clean, by comparing
2475 them with the underlying context.
2475 them with the underlying context.
2476
2476
2477 This can occur during the merge process, e.g. by passing --tool :local
2477 This can occur during the merge process, e.g. by passing --tool :local
2478 to resolve a conflict.
2478 to resolve a conflict.
2479 """
2479 """
2480 keys = []
2480 keys = []
2481 # This won't be perfect, but can help performance significantly when
2481 # This won't be perfect, but can help performance significantly when
2482 # using things like remotefilelog.
2482 # using things like remotefilelog.
2483 scmutil.prefetchfiles(
2483 scmutil.prefetchfiles(
2484 self.repo(),
2484 self.repo(),
2485 [self.p1().rev()],
2485 [self.p1().rev()],
2486 scmutil.matchfiles(self.repo(), self._cache.keys()),
2486 scmutil.matchfiles(self.repo(), self._cache.keys()),
2487 )
2487 )
2488
2488
2489 for path in self._cache.keys():
2489 for path in self._cache.keys():
2490 cache = self._cache[path]
2490 cache = self._cache[path]
2491 try:
2491 try:
2492 underlying = self._wrappedctx[path]
2492 underlying = self._wrappedctx[path]
2493 if (
2493 if (
2494 underlying.data() == cache[b'data']
2494 underlying.data() == cache[b'data']
2495 and underlying.flags() == cache[b'flags']
2495 and underlying.flags() == cache[b'flags']
2496 ):
2496 ):
2497 keys.append(path)
2497 keys.append(path)
2498 except error.ManifestLookupError:
2498 except error.ManifestLookupError:
2499 # Path not in the underlying manifest (created).
2499 # Path not in the underlying manifest (created).
2500 continue
2500 continue
2501
2501
2502 for path in keys:
2502 for path in keys:
2503 del self._cache[path]
2503 del self._cache[path]
2504 return keys
2504 return keys
2505
2505
2506 def _markdirty(
2506 def _markdirty(
2507 self, path, exists, data=None, date=None, flags=b'', copied=None
2507 self, path, exists, data=None, date=None, flags=b'', copied=None
2508 ):
2508 ):
2509 # data not provided, let's see if we already have some; if not, let's
2509 # data not provided, let's see if we already have some; if not, let's
2510 # grab it from our underlying context, so that we always have data if
2510 # grab it from our underlying context, so that we always have data if
2511 # the file is marked as existing.
2511 # the file is marked as existing.
2512 if exists and data is None:
2512 if exists and data is None:
2513 oldentry = self._cache.get(path) or {}
2513 oldentry = self._cache.get(path) or {}
2514 data = oldentry.get(b'data')
2514 data = oldentry.get(b'data')
2515 if data is None:
2515 if data is None:
2516 data = self._wrappedctx[path].data()
2516 data = self._wrappedctx[path].data()
2517
2517
2518 self._cache[path] = {
2518 self._cache[path] = {
2519 b'exists': exists,
2519 b'exists': exists,
2520 b'data': data,
2520 b'data': data,
2521 b'date': date,
2521 b'date': date,
2522 b'flags': flags,
2522 b'flags': flags,
2523 b'copied': copied,
2523 b'copied': copied,
2524 }
2524 }
2525
2525
2526 def filectx(self, path, filelog=None):
2526 def filectx(self, path, filelog=None):
2527 return overlayworkingfilectx(
2527 return overlayworkingfilectx(
2528 self._repo, path, parent=self, filelog=filelog
2528 self._repo, path, parent=self, filelog=filelog
2529 )
2529 )
2530
2530
2531
2531
2532 class overlayworkingfilectx(committablefilectx):
2532 class overlayworkingfilectx(committablefilectx):
2533 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2533 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2534 cache, which can be flushed through later by calling ``flush()``."""
2534 cache, which can be flushed through later by calling ``flush()``."""
2535
2535
2536 def __init__(self, repo, path, filelog=None, parent=None):
2536 def __init__(self, repo, path, filelog=None, parent=None):
2537 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2537 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2538 self._repo = repo
2538 self._repo = repo
2539 self._parent = parent
2539 self._parent = parent
2540 self._path = path
2540 self._path = path
2541
2541
2542 def cmp(self, fctx):
2542 def cmp(self, fctx):
2543 return self.data() != fctx.data()
2543 return self.data() != fctx.data()
2544
2544
2545 def changectx(self):
2545 def changectx(self):
2546 return self._parent
2546 return self._parent
2547
2547
2548 def data(self):
2548 def data(self):
2549 return self._parent.data(self._path)
2549 return self._parent.data(self._path)
2550
2550
2551 def date(self):
2551 def date(self):
2552 return self._parent.filedate(self._path)
2552 return self._parent.filedate(self._path)
2553
2553
2554 def exists(self):
2554 def exists(self):
2555 return self.lexists()
2555 return self.lexists()
2556
2556
2557 def lexists(self):
2557 def lexists(self):
2558 return self._parent.exists(self._path)
2558 return self._parent.exists(self._path)
2559
2559
2560 def copysource(self):
2560 def copysource(self):
2561 return self._parent.copydata(self._path)
2561 return self._parent.copydata(self._path)
2562
2562
2563 def size(self):
2563 def size(self):
2564 return self._parent.size(self._path)
2564 return self._parent.size(self._path)
2565
2565
2566 def markcopied(self, origin):
2566 def markcopied(self, origin):
2567 self._parent.markcopied(self._path, origin)
2567 self._parent.markcopied(self._path, origin)
2568
2568
2569 def audit(self):
2569 def audit(self):
2570 pass
2570 pass
2571
2571
2572 def flags(self):
2572 def flags(self):
2573 return self._parent.flags(self._path)
2573 return self._parent.flags(self._path)
2574
2574
2575 def setflags(self, islink, isexec):
2575 def setflags(self, islink, isexec):
2576 return self._parent.setflags(self._path, islink, isexec)
2576 return self._parent.setflags(self._path, islink, isexec)
2577
2577
2578 def write(self, data, flags, backgroundclose=False, **kwargs):
2578 def write(self, data, flags, backgroundclose=False, **kwargs):
2579 return self._parent.write(self._path, data, flags, **kwargs)
2579 return self._parent.write(self._path, data, flags, **kwargs)
2580
2580
2581 def remove(self, ignoremissing=False):
2581 def remove(self, ignoremissing=False):
2582 return self._parent.remove(self._path)
2582 return self._parent.remove(self._path)
2583
2583
2584 def clearunknown(self):
2584 def clearunknown(self):
2585 pass
2585 pass
2586
2586
2587
2587
2588 class workingcommitctx(workingctx):
2588 class workingcommitctx(workingctx):
2589 """A workingcommitctx object makes access to data related to
2589 """A workingcommitctx object makes access to data related to
2590 the revision being committed convenient.
2590 the revision being committed convenient.
2591
2591
2592 This hides changes in the working directory, if they aren't
2592 This hides changes in the working directory, if they aren't
2593 committed in this context.
2593 committed in this context.
2594 """
2594 """
2595
2595
2596 def __init__(
2596 def __init__(
2597 self, repo, changes, text=b"", user=None, date=None, extra=None
2597 self, repo, changes, text=b"", user=None, date=None, extra=None
2598 ):
2598 ):
2599 super(workingcommitctx, self).__init__(
2599 super(workingcommitctx, self).__init__(
2600 repo, text, user, date, extra, changes
2600 repo, text, user, date, extra, changes
2601 )
2601 )
2602
2602
2603 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2603 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2604 """Return matched files only in ``self._status``
2604 """Return matched files only in ``self._status``
2605
2605
2606 Uncommitted files appear "clean" via this context, even if
2606 Uncommitted files appear "clean" via this context, even if
2607 they aren't actually so in the working directory.
2607 they aren't actually so in the working directory.
2608 """
2608 """
2609 if clean:
2609 if clean:
2610 clean = [f for f in self._manifest if f not in self._changedset]
2610 clean = [f for f in self._manifest if f not in self._changedset]
2611 else:
2611 else:
2612 clean = []
2612 clean = []
2613 return scmutil.status(
2613 return scmutil.status(
2614 [f for f in self._status.modified if match(f)],
2614 [f for f in self._status.modified if match(f)],
2615 [f for f in self._status.added if match(f)],
2615 [f for f in self._status.added if match(f)],
2616 [f for f in self._status.removed if match(f)],
2616 [f for f in self._status.removed if match(f)],
2617 [],
2617 [],
2618 [],
2618 [],
2619 [],
2619 [],
2620 clean,
2620 clean,
2621 )
2621 )
2622
2622
2623 @propertycache
2623 @propertycache
2624 def _changedset(self):
2624 def _changedset(self):
2625 """Return the set of files changed in this context
2625 """Return the set of files changed in this context
2626 """
2626 """
2627 changed = set(self._status.modified)
2627 changed = set(self._status.modified)
2628 changed.update(self._status.added)
2628 changed.update(self._status.added)
2629 changed.update(self._status.removed)
2629 changed.update(self._status.removed)
2630 return changed
2630 return changed
2631
2631
2632
2632
2633 def makecachingfilectxfn(func):
2633 def makecachingfilectxfn(func):
2634 """Create a filectxfn that caches based on the path.
2634 """Create a filectxfn that caches based on the path.
2635
2635
2636 We can't use util.cachefunc because it uses all arguments as the cache
2636 We can't use util.cachefunc because it uses all arguments as the cache
2637 key and this creates a cycle since the arguments include the repo and
2637 key and this creates a cycle since the arguments include the repo and
2638 memctx.
2638 memctx.
2639 """
2639 """
2640 cache = {}
2640 cache = {}
2641
2641
2642 def getfilectx(repo, memctx, path):
2642 def getfilectx(repo, memctx, path):
2643 if path not in cache:
2643 if path not in cache:
2644 cache[path] = func(repo, memctx, path)
2644 cache[path] = func(repo, memctx, path)
2645 return cache[path]
2645 return cache[path]
2646
2646
2647 return getfilectx
2647 return getfilectx
2648
2648
2649
2649
2650 def memfilefromctx(ctx):
2650 def memfilefromctx(ctx):
2651 """Given a context return a memfilectx for ctx[path]
2651 """Given a context return a memfilectx for ctx[path]
2652
2652
2653 This is a convenience method for building a memctx based on another
2653 This is a convenience method for building a memctx based on another
2654 context.
2654 context.
2655 """
2655 """
2656
2656
2657 def getfilectx(repo, memctx, path):
2657 def getfilectx(repo, memctx, path):
2658 fctx = ctx[path]
2658 fctx = ctx[path]
2659 copysource = fctx.copysource()
2659 copysource = fctx.copysource()
2660 return memfilectx(
2660 return memfilectx(
2661 repo,
2661 repo,
2662 memctx,
2662 memctx,
2663 path,
2663 path,
2664 fctx.data(),
2664 fctx.data(),
2665 islink=fctx.islink(),
2665 islink=fctx.islink(),
2666 isexec=fctx.isexec(),
2666 isexec=fctx.isexec(),
2667 copysource=copysource,
2667 copysource=copysource,
2668 )
2668 )
2669
2669
2670 return getfilectx
2670 return getfilectx
2671
2671
2672
2672
2673 def memfilefrompatch(patchstore):
2673 def memfilefrompatch(patchstore):
2674 """Given a patch (e.g. patchstore object) return a memfilectx
2674 """Given a patch (e.g. patchstore object) return a memfilectx
2675
2675
2676 This is a convenience method for building a memctx based on a patchstore.
2676 This is a convenience method for building a memctx based on a patchstore.
2677 """
2677 """
2678
2678
2679 def getfilectx(repo, memctx, path):
2679 def getfilectx(repo, memctx, path):
2680 data, mode, copysource = patchstore.getfile(path)
2680 data, mode, copysource = patchstore.getfile(path)
2681 if data is None:
2681 if data is None:
2682 return None
2682 return None
2683 islink, isexec = mode
2683 islink, isexec = mode
2684 return memfilectx(
2684 return memfilectx(
2685 repo,
2685 repo,
2686 memctx,
2686 memctx,
2687 path,
2687 path,
2688 data,
2688 data,
2689 islink=islink,
2689 islink=islink,
2690 isexec=isexec,
2690 isexec=isexec,
2691 copysource=copysource,
2691 copysource=copysource,
2692 )
2692 )
2693
2693
2694 return getfilectx
2694 return getfilectx
2695
2695
2696
2696
2697 class memctx(committablectx):
2697 class memctx(committablectx):
2698 """Use memctx to perform in-memory commits via localrepo.commitctx().
2698 """Use memctx to perform in-memory commits via localrepo.commitctx().
2699
2699
2700 Revision information is supplied at initialization time while
2700 Revision information is supplied at initialization time while
2701 related files data and is made available through a callback
2701 related files data and is made available through a callback
2702 mechanism. 'repo' is the current localrepo, 'parents' is a
2702 mechanism. 'repo' is the current localrepo, 'parents' is a
2703 sequence of two parent revisions identifiers (pass None for every
2703 sequence of two parent revisions identifiers (pass None for every
2704 missing parent), 'text' is the commit message and 'files' lists
2704 missing parent), 'text' is the commit message and 'files' lists
2705 names of files touched by the revision (normalized and relative to
2705 names of files touched by the revision (normalized and relative to
2706 repository root).
2706 repository root).
2707
2707
2708 filectxfn(repo, memctx, path) is a callable receiving the
2708 filectxfn(repo, memctx, path) is a callable receiving the
2709 repository, the current memctx object and the normalized path of
2709 repository, the current memctx object and the normalized path of
2710 requested file, relative to repository root. It is fired by the
2710 requested file, relative to repository root. It is fired by the
2711 commit function for every file in 'files', but calls order is
2711 commit function for every file in 'files', but calls order is
2712 undefined. If the file is available in the revision being
2712 undefined. If the file is available in the revision being
2713 committed (updated or added), filectxfn returns a memfilectx
2713 committed (updated or added), filectxfn returns a memfilectx
2714 object. If the file was removed, filectxfn return None for recent
2714 object. If the file was removed, filectxfn return None for recent
2715 Mercurial. Moved files are represented by marking the source file
2715 Mercurial. Moved files are represented by marking the source file
2716 removed and the new file added with copy information (see
2716 removed and the new file added with copy information (see
2717 memfilectx).
2717 memfilectx).
2718
2718
2719 user receives the committer name and defaults to current
2719 user receives the committer name and defaults to current
2720 repository username, date is the commit date in any format
2720 repository username, date is the commit date in any format
2721 supported by dateutil.parsedate() and defaults to current date, extra
2721 supported by dateutil.parsedate() and defaults to current date, extra
2722 is a dictionary of metadata or is left empty.
2722 is a dictionary of metadata or is left empty.
2723 """
2723 """
2724
2724
2725 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2725 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2726 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2726 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2727 # this field to determine what to do in filectxfn.
2727 # this field to determine what to do in filectxfn.
2728 _returnnoneformissingfiles = True
2728 _returnnoneformissingfiles = True
2729
2729
2730 def __init__(
2730 def __init__(
2731 self,
2731 self,
2732 repo,
2732 repo,
2733 parents,
2733 parents,
2734 text,
2734 text,
2735 files,
2735 files,
2736 filectxfn,
2736 filectxfn,
2737 user=None,
2737 user=None,
2738 date=None,
2738 date=None,
2739 extra=None,
2739 extra=None,
2740 branch=None,
2740 branch=None,
2741 editor=None,
2741 editor=None,
2742 ):
2742 ):
2743 super(memctx, self).__init__(
2743 super(memctx, self).__init__(
2744 repo, text, user, date, extra, branch=branch
2744 repo, text, user, date, extra, branch=branch
2745 )
2745 )
2746 self._rev = None
2746 self._rev = None
2747 self._node = None
2747 self._node = None
2748 parents = [(p or nullid) for p in parents]
2748 parents = [(p or nullid) for p in parents]
2749 p1, p2 = parents
2749 p1, p2 = parents
2750 self._parents = [self._repo[p] for p in (p1, p2)]
2750 self._parents = [self._repo[p] for p in (p1, p2)]
2751 files = sorted(set(files))
2751 files = sorted(set(files))
2752 self._files = files
2752 self._files = files
2753 self.substate = {}
2753 self.substate = {}
2754
2754
2755 if isinstance(filectxfn, patch.filestore):
2755 if isinstance(filectxfn, patch.filestore):
2756 filectxfn = memfilefrompatch(filectxfn)
2756 filectxfn = memfilefrompatch(filectxfn)
2757 elif not callable(filectxfn):
2757 elif not callable(filectxfn):
2758 # if store is not callable, wrap it in a function
2758 # if store is not callable, wrap it in a function
2759 filectxfn = memfilefromctx(filectxfn)
2759 filectxfn = memfilefromctx(filectxfn)
2760
2760
2761 # memoizing increases performance for e.g. vcs convert scenarios.
2761 # memoizing increases performance for e.g. vcs convert scenarios.
2762 self._filectxfn = makecachingfilectxfn(filectxfn)
2762 self._filectxfn = makecachingfilectxfn(filectxfn)
2763
2763
2764 if editor:
2764 if editor:
2765 self._text = editor(self._repo, self, [])
2765 self._text = editor(self._repo, self, [])
2766 self._repo.savecommitmessage(self._text)
2766 self._repo.savecommitmessage(self._text)
2767
2767
2768 def filectx(self, path, filelog=None):
2768 def filectx(self, path, filelog=None):
2769 """get a file context from the working directory
2769 """get a file context from the working directory
2770
2770
2771 Returns None if file doesn't exist and should be removed."""
2771 Returns None if file doesn't exist and should be removed."""
2772 return self._filectxfn(self._repo, self, path)
2772 return self._filectxfn(self._repo, self, path)
2773
2773
2774 def commit(self):
2774 def commit(self):
2775 """commit context to the repo"""
2775 """commit context to the repo"""
2776 return self._repo.commitctx(self)
2776 return self._repo.commitctx(self)
2777
2777
2778 @propertycache
2778 @propertycache
2779 def _manifest(self):
2779 def _manifest(self):
2780 """generate a manifest based on the return values of filectxfn"""
2780 """generate a manifest based on the return values of filectxfn"""
2781
2781
2782 # keep this simple for now; just worry about p1
2782 # keep this simple for now; just worry about p1
2783 pctx = self._parents[0]
2783 pctx = self._parents[0]
2784 man = pctx.manifest().copy()
2784 man = pctx.manifest().copy()
2785
2785
2786 for f in self._status.modified:
2786 for f in self._status.modified:
2787 man[f] = modifiednodeid
2787 man[f] = modifiednodeid
2788
2788
2789 for f in self._status.added:
2789 for f in self._status.added:
2790 man[f] = addednodeid
2790 man[f] = addednodeid
2791
2791
2792 for f in self._status.removed:
2792 for f in self._status.removed:
2793 if f in man:
2793 if f in man:
2794 del man[f]
2794 del man[f]
2795
2795
2796 return man
2796 return man
2797
2797
2798 @propertycache
2798 @propertycache
2799 def _status(self):
2799 def _status(self):
2800 """Calculate exact status from ``files`` specified at construction
2800 """Calculate exact status from ``files`` specified at construction
2801 """
2801 """
2802 man1 = self.p1().manifest()
2802 man1 = self.p1().manifest()
2803 p2 = self._parents[1]
2803 p2 = self._parents[1]
2804 # "1 < len(self._parents)" can't be used for checking
2804 # "1 < len(self._parents)" can't be used for checking
2805 # existence of the 2nd parent, because "memctx._parents" is
2805 # existence of the 2nd parent, because "memctx._parents" is
2806 # explicitly initialized by the list, of which length is 2.
2806 # explicitly initialized by the list, of which length is 2.
2807 if p2.node() != nullid:
2807 if p2.node() != nullid:
2808 man2 = p2.manifest()
2808 man2 = p2.manifest()
2809 managing = lambda f: f in man1 or f in man2
2809 managing = lambda f: f in man1 or f in man2
2810 else:
2810 else:
2811 managing = lambda f: f in man1
2811 managing = lambda f: f in man1
2812
2812
2813 modified, added, removed = [], [], []
2813 modified, added, removed = [], [], []
2814 for f in self._files:
2814 for f in self._files:
2815 if not managing(f):
2815 if not managing(f):
2816 added.append(f)
2816 added.append(f)
2817 elif self[f]:
2817 elif self[f]:
2818 modified.append(f)
2818 modified.append(f)
2819 else:
2819 else:
2820 removed.append(f)
2820 removed.append(f)
2821
2821
2822 return scmutil.status(modified, added, removed, [], [], [], [])
2822 return scmutil.status(modified, added, removed, [], [], [], [])
2823
2823
2824
2824
2825 class memfilectx(committablefilectx):
2825 class memfilectx(committablefilectx):
2826 """memfilectx represents an in-memory file to commit.
2826 """memfilectx represents an in-memory file to commit.
2827
2827
2828 See memctx and committablefilectx for more details.
2828 See memctx and committablefilectx for more details.
2829 """
2829 """
2830
2830
2831 def __init__(
2831 def __init__(
2832 self,
2832 self,
2833 repo,
2833 repo,
2834 changectx,
2834 changectx,
2835 path,
2835 path,
2836 data,
2836 data,
2837 islink=False,
2837 islink=False,
2838 isexec=False,
2838 isexec=False,
2839 copysource=None,
2839 copysource=None,
2840 ):
2840 ):
2841 """
2841 """
2842 path is the normalized file path relative to repository root.
2842 path is the normalized file path relative to repository root.
2843 data is the file content as a string.
2843 data is the file content as a string.
2844 islink is True if the file is a symbolic link.
2844 islink is True if the file is a symbolic link.
2845 isexec is True if the file is executable.
2845 isexec is True if the file is executable.
2846 copied is the source file path if current file was copied in the
2846 copied is the source file path if current file was copied in the
2847 revision being committed, or None."""
2847 revision being committed, or None."""
2848 super(memfilectx, self).__init__(repo, path, None, changectx)
2848 super(memfilectx, self).__init__(repo, path, None, changectx)
2849 self._data = data
2849 self._data = data
2850 if islink:
2850 if islink:
2851 self._flags = b'l'
2851 self._flags = b'l'
2852 elif isexec:
2852 elif isexec:
2853 self._flags = b'x'
2853 self._flags = b'x'
2854 else:
2854 else:
2855 self._flags = b''
2855 self._flags = b''
2856 self._copysource = copysource
2856 self._copysource = copysource
2857
2857
2858 def copysource(self):
2858 def copysource(self):
2859 return self._copysource
2859 return self._copysource
2860
2860
2861 def cmp(self, fctx):
2861 def cmp(self, fctx):
2862 return self.data() != fctx.data()
2862 return self.data() != fctx.data()
2863
2863
2864 def data(self):
2864 def data(self):
2865 return self._data
2865 return self._data
2866
2866
2867 def remove(self, ignoremissing=False):
2867 def remove(self, ignoremissing=False):
2868 """wraps unlink for a repo's working directory"""
2868 """wraps unlink for a repo's working directory"""
2869 # need to figure out what to do here
2869 # need to figure out what to do here
2870 del self._changectx[self._path]
2870 del self._changectx[self._path]
2871
2871
2872 def write(self, data, flags, **kwargs):
2872 def write(self, data, flags, **kwargs):
2873 """wraps repo.wwrite"""
2873 """wraps repo.wwrite"""
2874 self._data = data
2874 self._data = data
2875
2875
2876
2876
2877 class metadataonlyctx(committablectx):
2877 class metadataonlyctx(committablectx):
2878 """Like memctx but it's reusing the manifest of different commit.
2878 """Like memctx but it's reusing the manifest of different commit.
2879 Intended to be used by lightweight operations that are creating
2879 Intended to be used by lightweight operations that are creating
2880 metadata-only changes.
2880 metadata-only changes.
2881
2881
2882 Revision information is supplied at initialization time. 'repo' is the
2882 Revision information is supplied at initialization time. 'repo' is the
2883 current localrepo, 'ctx' is original revision which manifest we're reuisng
2883 current localrepo, 'ctx' is original revision which manifest we're reuisng
2884 'parents' is a sequence of two parent revisions identifiers (pass None for
2884 'parents' is a sequence of two parent revisions identifiers (pass None for
2885 every missing parent), 'text' is the commit.
2885 every missing parent), 'text' is the commit.
2886
2886
2887 user receives the committer name and defaults to current repository
2887 user receives the committer name and defaults to current repository
2888 username, date is the commit date in any format supported by
2888 username, date is the commit date in any format supported by
2889 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2889 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2890 metadata or is left empty.
2890 metadata or is left empty.
2891 """
2891 """
2892
2892
2893 def __init__(
2893 def __init__(
2894 self,
2894 self,
2895 repo,
2895 repo,
2896 originalctx,
2896 originalctx,
2897 parents=None,
2897 parents=None,
2898 text=None,
2898 text=None,
2899 user=None,
2899 user=None,
2900 date=None,
2900 date=None,
2901 extra=None,
2901 extra=None,
2902 editor=None,
2902 editor=None,
2903 ):
2903 ):
2904 if text is None:
2904 if text is None:
2905 text = originalctx.description()
2905 text = originalctx.description()
2906 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2906 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2907 self._rev = None
2907 self._rev = None
2908 self._node = None
2908 self._node = None
2909 self._originalctx = originalctx
2909 self._originalctx = originalctx
2910 self._manifestnode = originalctx.manifestnode()
2910 self._manifestnode = originalctx.manifestnode()
2911 if parents is None:
2911 if parents is None:
2912 parents = originalctx.parents()
2912 parents = originalctx.parents()
2913 else:
2913 else:
2914 parents = [repo[p] for p in parents if p is not None]
2914 parents = [repo[p] for p in parents if p is not None]
2915 parents = parents[:]
2915 parents = parents[:]
2916 while len(parents) < 2:
2916 while len(parents) < 2:
2917 parents.append(repo[nullid])
2917 parents.append(repo[nullid])
2918 p1, p2 = self._parents = parents
2918 p1, p2 = self._parents = parents
2919
2919
2920 # sanity check to ensure that the reused manifest parents are
2920 # sanity check to ensure that the reused manifest parents are
2921 # manifests of our commit parents
2921 # manifests of our commit parents
2922 mp1, mp2 = self.manifestctx().parents
2922 mp1, mp2 = self.manifestctx().parents
2923 if p1 != nullid and p1.manifestnode() != mp1:
2923 if p1 != nullid and p1.manifestnode() != mp1:
2924 raise RuntimeError(
2924 raise RuntimeError(
2925 r"can't reuse the manifest: its p1 "
2925 r"can't reuse the manifest: its p1 "
2926 r"doesn't match the new ctx p1"
2926 r"doesn't match the new ctx p1"
2927 )
2927 )
2928 if p2 != nullid and p2.manifestnode() != mp2:
2928 if p2 != nullid and p2.manifestnode() != mp2:
2929 raise RuntimeError(
2929 raise RuntimeError(
2930 r"can't reuse the manifest: "
2930 r"can't reuse the manifest: "
2931 r"its p2 doesn't match the new ctx p2"
2931 r"its p2 doesn't match the new ctx p2"
2932 )
2932 )
2933
2933
2934 self._files = originalctx.files()
2934 self._files = originalctx.files()
2935 self.substate = {}
2935 self.substate = {}
2936
2936
2937 if editor:
2937 if editor:
2938 self._text = editor(self._repo, self, [])
2938 self._text = editor(self._repo, self, [])
2939 self._repo.savecommitmessage(self._text)
2939 self._repo.savecommitmessage(self._text)
2940
2940
2941 def manifestnode(self):
2941 def manifestnode(self):
2942 return self._manifestnode
2942 return self._manifestnode
2943
2943
2944 @property
2944 @property
2945 def _manifestctx(self):
2945 def _manifestctx(self):
2946 return self._repo.manifestlog[self._manifestnode]
2946 return self._repo.manifestlog[self._manifestnode]
2947
2947
2948 def filectx(self, path, filelog=None):
2948 def filectx(self, path, filelog=None):
2949 return self._originalctx.filectx(path, filelog=filelog)
2949 return self._originalctx.filectx(path, filelog=filelog)
2950
2950
2951 def commit(self):
2951 def commit(self):
2952 """commit context to the repo"""
2952 """commit context to the repo"""
2953 return self._repo.commitctx(self)
2953 return self._repo.commitctx(self)
2954
2954
2955 @property
2955 @property
2956 def _manifest(self):
2956 def _manifest(self):
2957 return self._originalctx.manifest()
2957 return self._originalctx.manifest()
2958
2958
2959 @propertycache
2959 @propertycache
2960 def _status(self):
2960 def _status(self):
2961 """Calculate exact status from ``files`` specified in the ``origctx``
2961 """Calculate exact status from ``files`` specified in the ``origctx``
2962 and parents manifests.
2962 and parents manifests.
2963 """
2963 """
2964 man1 = self.p1().manifest()
2964 man1 = self.p1().manifest()
2965 p2 = self._parents[1]
2965 p2 = self._parents[1]
2966 # "1 < len(self._parents)" can't be used for checking
2966 # "1 < len(self._parents)" can't be used for checking
2967 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2967 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2968 # explicitly initialized by the list, of which length is 2.
2968 # explicitly initialized by the list, of which length is 2.
2969 if p2.node() != nullid:
2969 if p2.node() != nullid:
2970 man2 = p2.manifest()
2970 man2 = p2.manifest()
2971 managing = lambda f: f in man1 or f in man2
2971 managing = lambda f: f in man1 or f in man2
2972 else:
2972 else:
2973 managing = lambda f: f in man1
2973 managing = lambda f: f in man1
2974
2974
2975 modified, added, removed = [], [], []
2975 modified, added, removed = [], [], []
2976 for f in self._files:
2976 for f in self._files:
2977 if not managing(f):
2977 if not managing(f):
2978 added.append(f)
2978 added.append(f)
2979 elif f in self:
2979 elif f in self:
2980 modified.append(f)
2980 modified.append(f)
2981 else:
2981 else:
2982 removed.append(f)
2982 removed.append(f)
2983
2983
2984 return scmutil.status(modified, added, removed, [], [], [], [])
2984 return scmutil.status(modified, added, removed, [], [], [], [])
2985
2985
2986
2986
2987 class arbitraryfilectx(object):
2987 class arbitraryfilectx(object):
2988 """Allows you to use filectx-like functions on a file in an arbitrary
2988 """Allows you to use filectx-like functions on a file in an arbitrary
2989 location on disk, possibly not in the working directory.
2989 location on disk, possibly not in the working directory.
2990 """
2990 """
2991
2991
2992 def __init__(self, path, repo=None):
2992 def __init__(self, path, repo=None):
2993 # Repo is optional because contrib/simplemerge uses this class.
2993 # Repo is optional because contrib/simplemerge uses this class.
2994 self._repo = repo
2994 self._repo = repo
2995 self._path = path
2995 self._path = path
2996
2996
2997 def cmp(self, fctx):
2997 def cmp(self, fctx):
2998 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2998 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2999 # path if either side is a symlink.
2999 # path if either side is a symlink.
3000 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3000 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3001 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3001 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3002 # Add a fast-path for merge if both sides are disk-backed.
3002 # Add a fast-path for merge if both sides are disk-backed.
3003 # Note that filecmp uses the opposite return values (True if same)
3003 # Note that filecmp uses the opposite return values (True if same)
3004 # from our cmp functions (True if different).
3004 # from our cmp functions (True if different).
3005 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3005 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3006 return self.data() != fctx.data()
3006 return self.data() != fctx.data()
3007
3007
3008 def path(self):
3008 def path(self):
3009 return self._path
3009 return self._path
3010
3010
3011 def flags(self):
3011 def flags(self):
3012 return b''
3012 return b''
3013
3013
3014 def data(self):
3014 def data(self):
3015 return util.readfile(self._path)
3015 return util.readfile(self._path)
3016
3016
3017 def decodeddata(self):
3017 def decodeddata(self):
3018 with open(self._path, b"rb") as f:
3018 with open(self._path, b"rb") as f:
3019 return f.read()
3019 return f.read()
3020
3020
3021 def remove(self):
3021 def remove(self):
3022 util.unlink(self._path)
3022 util.unlink(self._path)
3023
3023
3024 def write(self, data, flags, **kwargs):
3024 def write(self, data, flags, **kwargs):
3025 assert not flags
3025 assert not flags
3026 with open(self._path, b"wb") as f:
3026 with open(self._path, b"wb") as f:
3027 f.write(data)
3027 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now