##// END OF EJS Templates
status: gather fixup info at comparison time...
marmoute -
r49204:41f40f35 default
parent child Browse files
Show More
@@ -1,3123 +1,3140 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 hex,
17 hex,
18 nullrev,
18 nullrev,
19 short,
19 short,
20 )
20 )
21 from .pycompat import (
21 from .pycompat import (
22 getattr,
22 getattr,
23 open,
23 open,
24 )
24 )
25 from . import (
25 from . import (
26 dagop,
26 dagop,
27 encoding,
27 encoding,
28 error,
28 error,
29 fileset,
29 fileset,
30 match as matchmod,
30 match as matchmod,
31 mergestate as mergestatemod,
31 mergestate as mergestatemod,
32 metadata,
32 metadata,
33 obsolete as obsmod,
33 obsolete as obsmod,
34 patch,
34 patch,
35 pathutil,
35 pathutil,
36 phases,
36 phases,
37 pycompat,
37 pycompat,
38 repoview,
38 repoview,
39 scmutil,
39 scmutil,
40 sparse,
40 sparse,
41 subrepo,
41 subrepo,
42 subrepoutil,
42 subrepoutil,
43 util,
43 util,
44 )
44 )
45 from .utils import (
45 from .utils import (
46 dateutil,
46 dateutil,
47 stringutil,
47 stringutil,
48 )
48 )
49 from .dirstateutils import (
50 timestamp,
51 )
49
52
50 propertycache = util.propertycache
53 propertycache = util.propertycache
51
54
52
55
53 class basectx(object):
56 class basectx(object):
54 """A basectx object represents the common logic for its children:
57 """A basectx object represents the common logic for its children:
55 changectx: read-only context that is already present in the repo,
58 changectx: read-only context that is already present in the repo,
56 workingctx: a context that represents the working directory and can
59 workingctx: a context that represents the working directory and can
57 be committed,
60 be committed,
58 memctx: a context that represents changes in-memory and can also
61 memctx: a context that represents changes in-memory and can also
59 be committed."""
62 be committed."""
60
63
61 def __init__(self, repo):
64 def __init__(self, repo):
62 self._repo = repo
65 self._repo = repo
63
66
64 def __bytes__(self):
67 def __bytes__(self):
65 return short(self.node())
68 return short(self.node())
66
69
67 __str__ = encoding.strmethod(__bytes__)
70 __str__ = encoding.strmethod(__bytes__)
68
71
69 def __repr__(self):
72 def __repr__(self):
70 return "<%s %s>" % (type(self).__name__, str(self))
73 return "<%s %s>" % (type(self).__name__, str(self))
71
74
72 def __eq__(self, other):
75 def __eq__(self, other):
73 try:
76 try:
74 return type(self) == type(other) and self._rev == other._rev
77 return type(self) == type(other) and self._rev == other._rev
75 except AttributeError:
78 except AttributeError:
76 return False
79 return False
77
80
78 def __ne__(self, other):
81 def __ne__(self, other):
79 return not (self == other)
82 return not (self == other)
80
83
81 def __contains__(self, key):
84 def __contains__(self, key):
82 return key in self._manifest
85 return key in self._manifest
83
86
84 def __getitem__(self, key):
87 def __getitem__(self, key):
85 return self.filectx(key)
88 return self.filectx(key)
86
89
87 def __iter__(self):
90 def __iter__(self):
88 return iter(self._manifest)
91 return iter(self._manifest)
89
92
90 def _buildstatusmanifest(self, status):
93 def _buildstatusmanifest(self, status):
91 """Builds a manifest that includes the given status results, if this is
94 """Builds a manifest that includes the given status results, if this is
92 a working copy context. For non-working copy contexts, it just returns
95 a working copy context. For non-working copy contexts, it just returns
93 the normal manifest."""
96 the normal manifest."""
94 return self.manifest()
97 return self.manifest()
95
98
96 def _matchstatus(self, other, match):
99 def _matchstatus(self, other, match):
97 """This internal method provides a way for child objects to override the
100 """This internal method provides a way for child objects to override the
98 match operator.
101 match operator.
99 """
102 """
100 return match
103 return match
101
104
102 def _buildstatus(
105 def _buildstatus(
103 self, other, s, match, listignored, listclean, listunknown
106 self, other, s, match, listignored, listclean, listunknown
104 ):
107 ):
105 """build a status with respect to another context"""
108 """build a status with respect to another context"""
106 # Load earliest manifest first for caching reasons. More specifically,
109 # Load earliest manifest first for caching reasons. More specifically,
107 # if you have revisions 1000 and 1001, 1001 is probably stored as a
110 # if you have revisions 1000 and 1001, 1001 is probably stored as a
108 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
111 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
109 # 1000 and cache it so that when you read 1001, we just need to apply a
112 # 1000 and cache it so that when you read 1001, we just need to apply a
110 # delta to what's in the cache. So that's one full reconstruction + one
113 # delta to what's in the cache. So that's one full reconstruction + one
111 # delta application.
114 # delta application.
112 mf2 = None
115 mf2 = None
113 if self.rev() is not None and self.rev() < other.rev():
116 if self.rev() is not None and self.rev() < other.rev():
114 mf2 = self._buildstatusmanifest(s)
117 mf2 = self._buildstatusmanifest(s)
115 mf1 = other._buildstatusmanifest(s)
118 mf1 = other._buildstatusmanifest(s)
116 if mf2 is None:
119 if mf2 is None:
117 mf2 = self._buildstatusmanifest(s)
120 mf2 = self._buildstatusmanifest(s)
118
121
119 modified, added = [], []
122 modified, added = [], []
120 removed = []
123 removed = []
121 clean = []
124 clean = []
122 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
125 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
123 deletedset = set(deleted)
126 deletedset = set(deleted)
124 d = mf1.diff(mf2, match=match, clean=listclean)
127 d = mf1.diff(mf2, match=match, clean=listclean)
125 for fn, value in pycompat.iteritems(d):
128 for fn, value in pycompat.iteritems(d):
126 if fn in deletedset:
129 if fn in deletedset:
127 continue
130 continue
128 if value is None:
131 if value is None:
129 clean.append(fn)
132 clean.append(fn)
130 continue
133 continue
131 (node1, flag1), (node2, flag2) = value
134 (node1, flag1), (node2, flag2) = value
132 if node1 is None:
135 if node1 is None:
133 added.append(fn)
136 added.append(fn)
134 elif node2 is None:
137 elif node2 is None:
135 removed.append(fn)
138 removed.append(fn)
136 elif flag1 != flag2:
139 elif flag1 != flag2:
137 modified.append(fn)
140 modified.append(fn)
138 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
141 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
139 # When comparing files between two commits, we save time by
142 # When comparing files between two commits, we save time by
140 # not comparing the file contents when the nodeids differ.
143 # not comparing the file contents when the nodeids differ.
141 # Note that this means we incorrectly report a reverted change
144 # Note that this means we incorrectly report a reverted change
142 # to a file as a modification.
145 # to a file as a modification.
143 modified.append(fn)
146 modified.append(fn)
144 elif self[fn].cmp(other[fn]):
147 elif self[fn].cmp(other[fn]):
145 modified.append(fn)
148 modified.append(fn)
146 else:
149 else:
147 clean.append(fn)
150 clean.append(fn)
148
151
149 if removed:
152 if removed:
150 # need to filter files if they are already reported as removed
153 # need to filter files if they are already reported as removed
151 unknown = [
154 unknown = [
152 fn
155 fn
153 for fn in unknown
156 for fn in unknown
154 if fn not in mf1 and (not match or match(fn))
157 if fn not in mf1 and (not match or match(fn))
155 ]
158 ]
156 ignored = [
159 ignored = [
157 fn
160 fn
158 for fn in ignored
161 for fn in ignored
159 if fn not in mf1 and (not match or match(fn))
162 if fn not in mf1 and (not match or match(fn))
160 ]
163 ]
161 # if they're deleted, don't report them as removed
164 # if they're deleted, don't report them as removed
162 removed = [fn for fn in removed if fn not in deletedset]
165 removed = [fn for fn in removed if fn not in deletedset]
163
166
164 return scmutil.status(
167 return scmutil.status(
165 modified, added, removed, deleted, unknown, ignored, clean
168 modified, added, removed, deleted, unknown, ignored, clean
166 )
169 )
167
170
168 @propertycache
171 @propertycache
169 def substate(self):
172 def substate(self):
170 return subrepoutil.state(self, self._repo.ui)
173 return subrepoutil.state(self, self._repo.ui)
171
174
172 def subrev(self, subpath):
175 def subrev(self, subpath):
173 return self.substate[subpath][1]
176 return self.substate[subpath][1]
174
177
175 def rev(self):
178 def rev(self):
176 return self._rev
179 return self._rev
177
180
178 def node(self):
181 def node(self):
179 return self._node
182 return self._node
180
183
181 def hex(self):
184 def hex(self):
182 return hex(self.node())
185 return hex(self.node())
183
186
184 def manifest(self):
187 def manifest(self):
185 return self._manifest
188 return self._manifest
186
189
187 def manifestctx(self):
190 def manifestctx(self):
188 return self._manifestctx
191 return self._manifestctx
189
192
190 def repo(self):
193 def repo(self):
191 return self._repo
194 return self._repo
192
195
193 def phasestr(self):
196 def phasestr(self):
194 return phases.phasenames[self.phase()]
197 return phases.phasenames[self.phase()]
195
198
196 def mutable(self):
199 def mutable(self):
197 return self.phase() > phases.public
200 return self.phase() > phases.public
198
201
199 def matchfileset(self, cwd, expr, badfn=None):
202 def matchfileset(self, cwd, expr, badfn=None):
200 return fileset.match(self, cwd, expr, badfn=badfn)
203 return fileset.match(self, cwd, expr, badfn=badfn)
201
204
202 def obsolete(self):
205 def obsolete(self):
203 """True if the changeset is obsolete"""
206 """True if the changeset is obsolete"""
204 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
207 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
205
208
206 def extinct(self):
209 def extinct(self):
207 """True if the changeset is extinct"""
210 """True if the changeset is extinct"""
208 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
211 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
209
212
210 def orphan(self):
213 def orphan(self):
211 """True if the changeset is not obsolete, but its ancestor is"""
214 """True if the changeset is not obsolete, but its ancestor is"""
212 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
215 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
213
216
214 def phasedivergent(self):
217 def phasedivergent(self):
215 """True if the changeset tries to be a successor of a public changeset
218 """True if the changeset tries to be a successor of a public changeset
216
219
217 Only non-public and non-obsolete changesets may be phase-divergent.
220 Only non-public and non-obsolete changesets may be phase-divergent.
218 """
221 """
219 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
222 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
220
223
221 def contentdivergent(self):
224 def contentdivergent(self):
222 """Is a successor of a changeset with multiple possible successor sets
225 """Is a successor of a changeset with multiple possible successor sets
223
226
224 Only non-public and non-obsolete changesets may be content-divergent.
227 Only non-public and non-obsolete changesets may be content-divergent.
225 """
228 """
226 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
229 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
227
230
228 def isunstable(self):
231 def isunstable(self):
229 """True if the changeset is either orphan, phase-divergent or
232 """True if the changeset is either orphan, phase-divergent or
230 content-divergent"""
233 content-divergent"""
231 return self.orphan() or self.phasedivergent() or self.contentdivergent()
234 return self.orphan() or self.phasedivergent() or self.contentdivergent()
232
235
233 def instabilities(self):
236 def instabilities(self):
234 """return the list of instabilities affecting this changeset.
237 """return the list of instabilities affecting this changeset.
235
238
236 Instabilities are returned as strings. possible values are:
239 Instabilities are returned as strings. possible values are:
237 - orphan,
240 - orphan,
238 - phase-divergent,
241 - phase-divergent,
239 - content-divergent.
242 - content-divergent.
240 """
243 """
241 instabilities = []
244 instabilities = []
242 if self.orphan():
245 if self.orphan():
243 instabilities.append(b'orphan')
246 instabilities.append(b'orphan')
244 if self.phasedivergent():
247 if self.phasedivergent():
245 instabilities.append(b'phase-divergent')
248 instabilities.append(b'phase-divergent')
246 if self.contentdivergent():
249 if self.contentdivergent():
247 instabilities.append(b'content-divergent')
250 instabilities.append(b'content-divergent')
248 return instabilities
251 return instabilities
249
252
250 def parents(self):
253 def parents(self):
251 """return contexts for each parent changeset"""
254 """return contexts for each parent changeset"""
252 return self._parents
255 return self._parents
253
256
254 def p1(self):
257 def p1(self):
255 return self._parents[0]
258 return self._parents[0]
256
259
257 def p2(self):
260 def p2(self):
258 parents = self._parents
261 parents = self._parents
259 if len(parents) == 2:
262 if len(parents) == 2:
260 return parents[1]
263 return parents[1]
261 return self._repo[nullrev]
264 return self._repo[nullrev]
262
265
263 def _fileinfo(self, path):
266 def _fileinfo(self, path):
264 if '_manifest' in self.__dict__:
267 if '_manifest' in self.__dict__:
265 try:
268 try:
266 return self._manifest.find(path)
269 return self._manifest.find(path)
267 except KeyError:
270 except KeyError:
268 raise error.ManifestLookupError(
271 raise error.ManifestLookupError(
269 self._node or b'None', path, _(b'not found in manifest')
272 self._node or b'None', path, _(b'not found in manifest')
270 )
273 )
271 if '_manifestdelta' in self.__dict__ or path in self.files():
274 if '_manifestdelta' in self.__dict__ or path in self.files():
272 if path in self._manifestdelta:
275 if path in self._manifestdelta:
273 return (
276 return (
274 self._manifestdelta[path],
277 self._manifestdelta[path],
275 self._manifestdelta.flags(path),
278 self._manifestdelta.flags(path),
276 )
279 )
277 mfl = self._repo.manifestlog
280 mfl = self._repo.manifestlog
278 try:
281 try:
279 node, flag = mfl[self._changeset.manifest].find(path)
282 node, flag = mfl[self._changeset.manifest].find(path)
280 except KeyError:
283 except KeyError:
281 raise error.ManifestLookupError(
284 raise error.ManifestLookupError(
282 self._node or b'None', path, _(b'not found in manifest')
285 self._node or b'None', path, _(b'not found in manifest')
283 )
286 )
284
287
285 return node, flag
288 return node, flag
286
289
287 def filenode(self, path):
290 def filenode(self, path):
288 return self._fileinfo(path)[0]
291 return self._fileinfo(path)[0]
289
292
290 def flags(self, path):
293 def flags(self, path):
291 try:
294 try:
292 return self._fileinfo(path)[1]
295 return self._fileinfo(path)[1]
293 except error.LookupError:
296 except error.LookupError:
294 return b''
297 return b''
295
298
296 @propertycache
299 @propertycache
297 def _copies(self):
300 def _copies(self):
298 return metadata.computechangesetcopies(self)
301 return metadata.computechangesetcopies(self)
299
302
300 def p1copies(self):
303 def p1copies(self):
301 return self._copies[0]
304 return self._copies[0]
302
305
303 def p2copies(self):
306 def p2copies(self):
304 return self._copies[1]
307 return self._copies[1]
305
308
306 def sub(self, path, allowcreate=True):
309 def sub(self, path, allowcreate=True):
307 '''return a subrepo for the stored revision of path, never wdir()'''
310 '''return a subrepo for the stored revision of path, never wdir()'''
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
311 return subrepo.subrepo(self, path, allowcreate=allowcreate)
309
312
310 def nullsub(self, path, pctx):
313 def nullsub(self, path, pctx):
311 return subrepo.nullsubrepo(self, path, pctx)
314 return subrepo.nullsubrepo(self, path, pctx)
312
315
313 def workingsub(self, path):
316 def workingsub(self, path):
314 """return a subrepo for the stored revision, or wdir if this is a wdir
317 """return a subrepo for the stored revision, or wdir if this is a wdir
315 context.
318 context.
316 """
319 """
317 return subrepo.subrepo(self, path, allowwdir=True)
320 return subrepo.subrepo(self, path, allowwdir=True)
318
321
319 def match(
322 def match(
320 self,
323 self,
321 pats=None,
324 pats=None,
322 include=None,
325 include=None,
323 exclude=None,
326 exclude=None,
324 default=b'glob',
327 default=b'glob',
325 listsubrepos=False,
328 listsubrepos=False,
326 badfn=None,
329 badfn=None,
327 cwd=None,
330 cwd=None,
328 ):
331 ):
329 r = self._repo
332 r = self._repo
330 if not cwd:
333 if not cwd:
331 cwd = r.getcwd()
334 cwd = r.getcwd()
332 return matchmod.match(
335 return matchmod.match(
333 r.root,
336 r.root,
334 cwd,
337 cwd,
335 pats,
338 pats,
336 include,
339 include,
337 exclude,
340 exclude,
338 default,
341 default,
339 auditor=r.nofsauditor,
342 auditor=r.nofsauditor,
340 ctx=self,
343 ctx=self,
341 listsubrepos=listsubrepos,
344 listsubrepos=listsubrepos,
342 badfn=badfn,
345 badfn=badfn,
343 )
346 )
344
347
345 def diff(
348 def diff(
346 self,
349 self,
347 ctx2=None,
350 ctx2=None,
348 match=None,
351 match=None,
349 changes=None,
352 changes=None,
350 opts=None,
353 opts=None,
351 losedatafn=None,
354 losedatafn=None,
352 pathfn=None,
355 pathfn=None,
353 copy=None,
356 copy=None,
354 copysourcematch=None,
357 copysourcematch=None,
355 hunksfilterfn=None,
358 hunksfilterfn=None,
356 ):
359 ):
357 """Returns a diff generator for the given contexts and matcher"""
360 """Returns a diff generator for the given contexts and matcher"""
358 if ctx2 is None:
361 if ctx2 is None:
359 ctx2 = self.p1()
362 ctx2 = self.p1()
360 if ctx2 is not None:
363 if ctx2 is not None:
361 ctx2 = self._repo[ctx2]
364 ctx2 = self._repo[ctx2]
362 return patch.diff(
365 return patch.diff(
363 self._repo,
366 self._repo,
364 ctx2,
367 ctx2,
365 self,
368 self,
366 match=match,
369 match=match,
367 changes=changes,
370 changes=changes,
368 opts=opts,
371 opts=opts,
369 losedatafn=losedatafn,
372 losedatafn=losedatafn,
370 pathfn=pathfn,
373 pathfn=pathfn,
371 copy=copy,
374 copy=copy,
372 copysourcematch=copysourcematch,
375 copysourcematch=copysourcematch,
373 hunksfilterfn=hunksfilterfn,
376 hunksfilterfn=hunksfilterfn,
374 )
377 )
375
378
376 def dirs(self):
379 def dirs(self):
377 return self._manifest.dirs()
380 return self._manifest.dirs()
378
381
379 def hasdir(self, dir):
382 def hasdir(self, dir):
380 return self._manifest.hasdir(dir)
383 return self._manifest.hasdir(dir)
381
384
382 def status(
385 def status(
383 self,
386 self,
384 other=None,
387 other=None,
385 match=None,
388 match=None,
386 listignored=False,
389 listignored=False,
387 listclean=False,
390 listclean=False,
388 listunknown=False,
391 listunknown=False,
389 listsubrepos=False,
392 listsubrepos=False,
390 ):
393 ):
391 """return status of files between two nodes or node and working
394 """return status of files between two nodes or node and working
392 directory.
395 directory.
393
396
394 If other is None, compare this node with working directory.
397 If other is None, compare this node with working directory.
395
398
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
399 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
397
400
398 Returns a mercurial.scmutils.status object.
401 Returns a mercurial.scmutils.status object.
399
402
400 Data can be accessed using either tuple notation:
403 Data can be accessed using either tuple notation:
401
404
402 (modified, added, removed, deleted, unknown, ignored, clean)
405 (modified, added, removed, deleted, unknown, ignored, clean)
403
406
404 or direct attribute access:
407 or direct attribute access:
405
408
406 s.modified, s.added, ...
409 s.modified, s.added, ...
407 """
410 """
408
411
409 ctx1 = self
412 ctx1 = self
410 ctx2 = self._repo[other]
413 ctx2 = self._repo[other]
411
414
412 # This next code block is, admittedly, fragile logic that tests for
415 # This next code block is, admittedly, fragile logic that tests for
413 # reversing the contexts and wouldn't need to exist if it weren't for
416 # reversing the contexts and wouldn't need to exist if it weren't for
414 # the fast (and common) code path of comparing the working directory
417 # the fast (and common) code path of comparing the working directory
415 # with its first parent.
418 # with its first parent.
416 #
419 #
417 # What we're aiming for here is the ability to call:
420 # What we're aiming for here is the ability to call:
418 #
421 #
419 # workingctx.status(parentctx)
422 # workingctx.status(parentctx)
420 #
423 #
421 # If we always built the manifest for each context and compared those,
424 # If we always built the manifest for each context and compared those,
422 # then we'd be done. But the special case of the above call means we
425 # then we'd be done. But the special case of the above call means we
423 # just copy the manifest of the parent.
426 # just copy the manifest of the parent.
424 reversed = False
427 reversed = False
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
428 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
426 reversed = True
429 reversed = True
427 ctx1, ctx2 = ctx2, ctx1
430 ctx1, ctx2 = ctx2, ctx1
428
431
429 match = self._repo.narrowmatch(match)
432 match = self._repo.narrowmatch(match)
430 match = ctx2._matchstatus(ctx1, match)
433 match = ctx2._matchstatus(ctx1, match)
431 r = scmutil.status([], [], [], [], [], [], [])
434 r = scmutil.status([], [], [], [], [], [], [])
432 r = ctx2._buildstatus(
435 r = ctx2._buildstatus(
433 ctx1, r, match, listignored, listclean, listunknown
436 ctx1, r, match, listignored, listclean, listunknown
434 )
437 )
435
438
436 if reversed:
439 if reversed:
437 # Reverse added and removed. Clear deleted, unknown and ignored as
440 # Reverse added and removed. Clear deleted, unknown and ignored as
438 # these make no sense to reverse.
441 # these make no sense to reverse.
439 r = scmutil.status(
442 r = scmutil.status(
440 r.modified, r.removed, r.added, [], [], [], r.clean
443 r.modified, r.removed, r.added, [], [], [], r.clean
441 )
444 )
442
445
443 if listsubrepos:
446 if listsubrepos:
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
447 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
445 try:
448 try:
446 rev2 = ctx2.subrev(subpath)
449 rev2 = ctx2.subrev(subpath)
447 except KeyError:
450 except KeyError:
448 # A subrepo that existed in node1 was deleted between
451 # A subrepo that existed in node1 was deleted between
449 # node1 and node2 (inclusive). Thus, ctx2's substate
452 # node1 and node2 (inclusive). Thus, ctx2's substate
450 # won't contain that subpath. The best we can do ignore it.
453 # won't contain that subpath. The best we can do ignore it.
451 rev2 = None
454 rev2 = None
452 submatch = matchmod.subdirmatcher(subpath, match)
455 submatch = matchmod.subdirmatcher(subpath, match)
453 s = sub.status(
456 s = sub.status(
454 rev2,
457 rev2,
455 match=submatch,
458 match=submatch,
456 ignored=listignored,
459 ignored=listignored,
457 clean=listclean,
460 clean=listclean,
458 unknown=listunknown,
461 unknown=listunknown,
459 listsubrepos=True,
462 listsubrepos=True,
460 )
463 )
461 for k in (
464 for k in (
462 'modified',
465 'modified',
463 'added',
466 'added',
464 'removed',
467 'removed',
465 'deleted',
468 'deleted',
466 'unknown',
469 'unknown',
467 'ignored',
470 'ignored',
468 'clean',
471 'clean',
469 ):
472 ):
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
473 rfiles, sfiles = getattr(r, k), getattr(s, k)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
474 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
472
475
473 r.modified.sort()
476 r.modified.sort()
474 r.added.sort()
477 r.added.sort()
475 r.removed.sort()
478 r.removed.sort()
476 r.deleted.sort()
479 r.deleted.sort()
477 r.unknown.sort()
480 r.unknown.sort()
478 r.ignored.sort()
481 r.ignored.sort()
479 r.clean.sort()
482 r.clean.sort()
480
483
481 return r
484 return r
482
485
483 def mergestate(self, clean=False):
486 def mergestate(self, clean=False):
484 """Get a mergestate object for this context."""
487 """Get a mergestate object for this context."""
485 raise NotImplementedError(
488 raise NotImplementedError(
486 '%s does not implement mergestate()' % self.__class__
489 '%s does not implement mergestate()' % self.__class__
487 )
490 )
488
491
489 def isempty(self):
492 def isempty(self):
490 return not (
493 return not (
491 len(self.parents()) > 1
494 len(self.parents()) > 1
492 or self.branch() != self.p1().branch()
495 or self.branch() != self.p1().branch()
493 or self.closesbranch()
496 or self.closesbranch()
494 or self.files()
497 or self.files()
495 )
498 )
496
499
497
500
498 class changectx(basectx):
501 class changectx(basectx):
499 """A changecontext object makes access to data related to a particular
502 """A changecontext object makes access to data related to a particular
500 changeset convenient. It represents a read-only context already present in
503 changeset convenient. It represents a read-only context already present in
501 the repo."""
504 the repo."""
502
505
503 def __init__(self, repo, rev, node, maybe_filtered=True):
506 def __init__(self, repo, rev, node, maybe_filtered=True):
504 super(changectx, self).__init__(repo)
507 super(changectx, self).__init__(repo)
505 self._rev = rev
508 self._rev = rev
506 self._node = node
509 self._node = node
507 # When maybe_filtered is True, the revision might be affected by
510 # When maybe_filtered is True, the revision might be affected by
508 # changelog filtering and operation through the filtered changelog must be used.
511 # changelog filtering and operation through the filtered changelog must be used.
509 #
512 #
510 # When maybe_filtered is False, the revision has already been checked
513 # When maybe_filtered is False, the revision has already been checked
511 # against filtering and is not filtered. Operation through the
514 # against filtering and is not filtered. Operation through the
512 # unfiltered changelog might be used in some case.
515 # unfiltered changelog might be used in some case.
513 self._maybe_filtered = maybe_filtered
516 self._maybe_filtered = maybe_filtered
514
517
515 def __hash__(self):
518 def __hash__(self):
516 try:
519 try:
517 return hash(self._rev)
520 return hash(self._rev)
518 except AttributeError:
521 except AttributeError:
519 return id(self)
522 return id(self)
520
523
521 def __nonzero__(self):
524 def __nonzero__(self):
522 return self._rev != nullrev
525 return self._rev != nullrev
523
526
524 __bool__ = __nonzero__
527 __bool__ = __nonzero__
525
528
526 @propertycache
529 @propertycache
527 def _changeset(self):
530 def _changeset(self):
528 if self._maybe_filtered:
531 if self._maybe_filtered:
529 repo = self._repo
532 repo = self._repo
530 else:
533 else:
531 repo = self._repo.unfiltered()
534 repo = self._repo.unfiltered()
532 return repo.changelog.changelogrevision(self.rev())
535 return repo.changelog.changelogrevision(self.rev())
533
536
534 @propertycache
537 @propertycache
535 def _manifest(self):
538 def _manifest(self):
536 return self._manifestctx.read()
539 return self._manifestctx.read()
537
540
538 @property
541 @property
539 def _manifestctx(self):
542 def _manifestctx(self):
540 return self._repo.manifestlog[self._changeset.manifest]
543 return self._repo.manifestlog[self._changeset.manifest]
541
544
542 @propertycache
545 @propertycache
543 def _manifestdelta(self):
546 def _manifestdelta(self):
544 return self._manifestctx.readdelta()
547 return self._manifestctx.readdelta()
545
548
546 @propertycache
549 @propertycache
547 def _parents(self):
550 def _parents(self):
548 repo = self._repo
551 repo = self._repo
549 if self._maybe_filtered:
552 if self._maybe_filtered:
550 cl = repo.changelog
553 cl = repo.changelog
551 else:
554 else:
552 cl = repo.unfiltered().changelog
555 cl = repo.unfiltered().changelog
553
556
554 p1, p2 = cl.parentrevs(self._rev)
557 p1, p2 = cl.parentrevs(self._rev)
555 if p2 == nullrev:
558 if p2 == nullrev:
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
559 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
557 return [
560 return [
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
561 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
562 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
560 ]
563 ]
561
564
562 def changeset(self):
565 def changeset(self):
563 c = self._changeset
566 c = self._changeset
564 return (
567 return (
565 c.manifest,
568 c.manifest,
566 c.user,
569 c.user,
567 c.date,
570 c.date,
568 c.files,
571 c.files,
569 c.description,
572 c.description,
570 c.extra,
573 c.extra,
571 )
574 )
572
575
573 def manifestnode(self):
576 def manifestnode(self):
574 return self._changeset.manifest
577 return self._changeset.manifest
575
578
576 def user(self):
579 def user(self):
577 return self._changeset.user
580 return self._changeset.user
578
581
579 def date(self):
582 def date(self):
580 return self._changeset.date
583 return self._changeset.date
581
584
582 def files(self):
585 def files(self):
583 return self._changeset.files
586 return self._changeset.files
584
587
585 def filesmodified(self):
588 def filesmodified(self):
586 modified = set(self.files())
589 modified = set(self.files())
587 modified.difference_update(self.filesadded())
590 modified.difference_update(self.filesadded())
588 modified.difference_update(self.filesremoved())
591 modified.difference_update(self.filesremoved())
589 return sorted(modified)
592 return sorted(modified)
590
593
591 def filesadded(self):
594 def filesadded(self):
592 filesadded = self._changeset.filesadded
595 filesadded = self._changeset.filesadded
593 compute_on_none = True
596 compute_on_none = True
594 if self._repo.filecopiesmode == b'changeset-sidedata':
597 if self._repo.filecopiesmode == b'changeset-sidedata':
595 compute_on_none = False
598 compute_on_none = False
596 else:
599 else:
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
600 source = self._repo.ui.config(b'experimental', b'copies.read-from')
598 if source == b'changeset-only':
601 if source == b'changeset-only':
599 compute_on_none = False
602 compute_on_none = False
600 elif source != b'compatibility':
603 elif source != b'compatibility':
601 # filelog mode, ignore any changelog content
604 # filelog mode, ignore any changelog content
602 filesadded = None
605 filesadded = None
603 if filesadded is None:
606 if filesadded is None:
604 if compute_on_none:
607 if compute_on_none:
605 filesadded = metadata.computechangesetfilesadded(self)
608 filesadded = metadata.computechangesetfilesadded(self)
606 else:
609 else:
607 filesadded = []
610 filesadded = []
608 return filesadded
611 return filesadded
609
612
610 def filesremoved(self):
613 def filesremoved(self):
611 filesremoved = self._changeset.filesremoved
614 filesremoved = self._changeset.filesremoved
612 compute_on_none = True
615 compute_on_none = True
613 if self._repo.filecopiesmode == b'changeset-sidedata':
616 if self._repo.filecopiesmode == b'changeset-sidedata':
614 compute_on_none = False
617 compute_on_none = False
615 else:
618 else:
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
619 source = self._repo.ui.config(b'experimental', b'copies.read-from')
617 if source == b'changeset-only':
620 if source == b'changeset-only':
618 compute_on_none = False
621 compute_on_none = False
619 elif source != b'compatibility':
622 elif source != b'compatibility':
620 # filelog mode, ignore any changelog content
623 # filelog mode, ignore any changelog content
621 filesremoved = None
624 filesremoved = None
622 if filesremoved is None:
625 if filesremoved is None:
623 if compute_on_none:
626 if compute_on_none:
624 filesremoved = metadata.computechangesetfilesremoved(self)
627 filesremoved = metadata.computechangesetfilesremoved(self)
625 else:
628 else:
626 filesremoved = []
629 filesremoved = []
627 return filesremoved
630 return filesremoved
628
631
629 @propertycache
632 @propertycache
630 def _copies(self):
633 def _copies(self):
631 p1copies = self._changeset.p1copies
634 p1copies = self._changeset.p1copies
632 p2copies = self._changeset.p2copies
635 p2copies = self._changeset.p2copies
633 compute_on_none = True
636 compute_on_none = True
634 if self._repo.filecopiesmode == b'changeset-sidedata':
637 if self._repo.filecopiesmode == b'changeset-sidedata':
635 compute_on_none = False
638 compute_on_none = False
636 else:
639 else:
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
640 source = self._repo.ui.config(b'experimental', b'copies.read-from')
638 # If config says to get copy metadata only from changeset, then
641 # If config says to get copy metadata only from changeset, then
639 # return that, defaulting to {} if there was no copy metadata. In
642 # return that, defaulting to {} if there was no copy metadata. In
640 # compatibility mode, we return copy data from the changeset if it
643 # compatibility mode, we return copy data from the changeset if it
641 # was recorded there, and otherwise we fall back to getting it from
644 # was recorded there, and otherwise we fall back to getting it from
642 # the filelogs (below).
645 # the filelogs (below).
643 #
646 #
644 # If we are in compatiblity mode and there is not data in the
647 # If we are in compatiblity mode and there is not data in the
645 # changeset), we get the copy metadata from the filelogs.
648 # changeset), we get the copy metadata from the filelogs.
646 #
649 #
647 # otherwise, when config said to read only from filelog, we get the
650 # otherwise, when config said to read only from filelog, we get the
648 # copy metadata from the filelogs.
651 # copy metadata from the filelogs.
649 if source == b'changeset-only':
652 if source == b'changeset-only':
650 compute_on_none = False
653 compute_on_none = False
651 elif source != b'compatibility':
654 elif source != b'compatibility':
652 # filelog mode, ignore any changelog content
655 # filelog mode, ignore any changelog content
653 p1copies = p2copies = None
656 p1copies = p2copies = None
654 if p1copies is None:
657 if p1copies is None:
655 if compute_on_none:
658 if compute_on_none:
656 p1copies, p2copies = super(changectx, self)._copies
659 p1copies, p2copies = super(changectx, self)._copies
657 else:
660 else:
658 if p1copies is None:
661 if p1copies is None:
659 p1copies = {}
662 p1copies = {}
660 if p2copies is None:
663 if p2copies is None:
661 p2copies = {}
664 p2copies = {}
662 return p1copies, p2copies
665 return p1copies, p2copies
663
666
664 def description(self):
667 def description(self):
665 return self._changeset.description
668 return self._changeset.description
666
669
667 def branch(self):
670 def branch(self):
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
671 return encoding.tolocal(self._changeset.extra.get(b"branch"))
669
672
670 def closesbranch(self):
673 def closesbranch(self):
671 return b'close' in self._changeset.extra
674 return b'close' in self._changeset.extra
672
675
673 def extra(self):
676 def extra(self):
674 """Return a dict of extra information."""
677 """Return a dict of extra information."""
675 return self._changeset.extra
678 return self._changeset.extra
676
679
677 def tags(self):
680 def tags(self):
678 """Return a list of byte tag names"""
681 """Return a list of byte tag names"""
679 return self._repo.nodetags(self._node)
682 return self._repo.nodetags(self._node)
680
683
681 def bookmarks(self):
684 def bookmarks(self):
682 """Return a list of byte bookmark names."""
685 """Return a list of byte bookmark names."""
683 return self._repo.nodebookmarks(self._node)
686 return self._repo.nodebookmarks(self._node)
684
687
685 def phase(self):
688 def phase(self):
686 return self._repo._phasecache.phase(self._repo, self._rev)
689 return self._repo._phasecache.phase(self._repo, self._rev)
687
690
688 def hidden(self):
691 def hidden(self):
689 return self._rev in repoview.filterrevs(self._repo, b'visible')
692 return self._rev in repoview.filterrevs(self._repo, b'visible')
690
693
691 def isinmemory(self):
694 def isinmemory(self):
692 return False
695 return False
693
696
694 def children(self):
697 def children(self):
695 """return list of changectx contexts for each child changeset.
698 """return list of changectx contexts for each child changeset.
696
699
697 This returns only the immediate child changesets. Use descendants() to
700 This returns only the immediate child changesets. Use descendants() to
698 recursively walk children.
701 recursively walk children.
699 """
702 """
700 c = self._repo.changelog.children(self._node)
703 c = self._repo.changelog.children(self._node)
701 return [self._repo[x] for x in c]
704 return [self._repo[x] for x in c]
702
705
703 def ancestors(self):
706 def ancestors(self):
704 for a in self._repo.changelog.ancestors([self._rev]):
707 for a in self._repo.changelog.ancestors([self._rev]):
705 yield self._repo[a]
708 yield self._repo[a]
706
709
707 def descendants(self):
710 def descendants(self):
708 """Recursively yield all children of the changeset.
711 """Recursively yield all children of the changeset.
709
712
710 For just the immediate children, use children()
713 For just the immediate children, use children()
711 """
714 """
712 for d in self._repo.changelog.descendants([self._rev]):
715 for d in self._repo.changelog.descendants([self._rev]):
713 yield self._repo[d]
716 yield self._repo[d]
714
717
715 def filectx(self, path, fileid=None, filelog=None):
718 def filectx(self, path, fileid=None, filelog=None):
716 """get a file context from this changeset"""
719 """get a file context from this changeset"""
717 if fileid is None:
720 if fileid is None:
718 fileid = self.filenode(path)
721 fileid = self.filenode(path)
719 return filectx(
722 return filectx(
720 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
723 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
721 )
724 )
722
725
723 def ancestor(self, c2, warn=False):
726 def ancestor(self, c2, warn=False):
724 """return the "best" ancestor context of self and c2
727 """return the "best" ancestor context of self and c2
725
728
726 If there are multiple candidates, it will show a message and check
729 If there are multiple candidates, it will show a message and check
727 merge.preferancestor configuration before falling back to the
730 merge.preferancestor configuration before falling back to the
728 revlog ancestor."""
731 revlog ancestor."""
729 # deal with workingctxs
732 # deal with workingctxs
730 n2 = c2._node
733 n2 = c2._node
731 if n2 is None:
734 if n2 is None:
732 n2 = c2._parents[0]._node
735 n2 = c2._parents[0]._node
733 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
736 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
734 if not cahs:
737 if not cahs:
735 anc = self._repo.nodeconstants.nullid
738 anc = self._repo.nodeconstants.nullid
736 elif len(cahs) == 1:
739 elif len(cahs) == 1:
737 anc = cahs[0]
740 anc = cahs[0]
738 else:
741 else:
739 # experimental config: merge.preferancestor
742 # experimental config: merge.preferancestor
740 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
743 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
741 try:
744 try:
742 ctx = scmutil.revsymbol(self._repo, r)
745 ctx = scmutil.revsymbol(self._repo, r)
743 except error.RepoLookupError:
746 except error.RepoLookupError:
744 continue
747 continue
745 anc = ctx.node()
748 anc = ctx.node()
746 if anc in cahs:
749 if anc in cahs:
747 break
750 break
748 else:
751 else:
749 anc = self._repo.changelog.ancestor(self._node, n2)
752 anc = self._repo.changelog.ancestor(self._node, n2)
750 if warn:
753 if warn:
751 self._repo.ui.status(
754 self._repo.ui.status(
752 (
755 (
753 _(b"note: using %s as ancestor of %s and %s\n")
756 _(b"note: using %s as ancestor of %s and %s\n")
754 % (short(anc), short(self._node), short(n2))
757 % (short(anc), short(self._node), short(n2))
755 )
758 )
756 + b''.join(
759 + b''.join(
757 _(
760 _(
758 b" alternatively, use --config "
761 b" alternatively, use --config "
759 b"merge.preferancestor=%s\n"
762 b"merge.preferancestor=%s\n"
760 )
763 )
761 % short(n)
764 % short(n)
762 for n in sorted(cahs)
765 for n in sorted(cahs)
763 if n != anc
766 if n != anc
764 )
767 )
765 )
768 )
766 return self._repo[anc]
769 return self._repo[anc]
767
770
768 def isancestorof(self, other):
771 def isancestorof(self, other):
769 """True if this changeset is an ancestor of other"""
772 """True if this changeset is an ancestor of other"""
770 return self._repo.changelog.isancestorrev(self._rev, other._rev)
773 return self._repo.changelog.isancestorrev(self._rev, other._rev)
771
774
772 def walk(self, match):
775 def walk(self, match):
773 '''Generates matching file names.'''
776 '''Generates matching file names.'''
774
777
775 # Wrap match.bad method to have message with nodeid
778 # Wrap match.bad method to have message with nodeid
776 def bad(fn, msg):
779 def bad(fn, msg):
777 # The manifest doesn't know about subrepos, so don't complain about
780 # The manifest doesn't know about subrepos, so don't complain about
778 # paths into valid subrepos.
781 # paths into valid subrepos.
779 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
782 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
780 return
783 return
781 match.bad(fn, _(b'no such file in rev %s') % self)
784 match.bad(fn, _(b'no such file in rev %s') % self)
782
785
783 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
786 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
784 return self._manifest.walk(m)
787 return self._manifest.walk(m)
785
788
786 def matches(self, match):
789 def matches(self, match):
787 return self.walk(match)
790 return self.walk(match)
788
791
789
792
790 class basefilectx(object):
793 class basefilectx(object):
791 """A filecontext object represents the common logic for its children:
794 """A filecontext object represents the common logic for its children:
792 filectx: read-only access to a filerevision that is already present
795 filectx: read-only access to a filerevision that is already present
793 in the repo,
796 in the repo,
794 workingfilectx: a filecontext that represents files from the working
797 workingfilectx: a filecontext that represents files from the working
795 directory,
798 directory,
796 memfilectx: a filecontext that represents files in-memory,
799 memfilectx: a filecontext that represents files in-memory,
797 """
800 """
798
801
799 @propertycache
802 @propertycache
800 def _filelog(self):
803 def _filelog(self):
801 return self._repo.file(self._path)
804 return self._repo.file(self._path)
802
805
803 @propertycache
806 @propertycache
804 def _changeid(self):
807 def _changeid(self):
805 if '_changectx' in self.__dict__:
808 if '_changectx' in self.__dict__:
806 return self._changectx.rev()
809 return self._changectx.rev()
807 elif '_descendantrev' in self.__dict__:
810 elif '_descendantrev' in self.__dict__:
808 # this file context was created from a revision with a known
811 # this file context was created from a revision with a known
809 # descendant, we can (lazily) correct for linkrev aliases
812 # descendant, we can (lazily) correct for linkrev aliases
810 return self._adjustlinkrev(self._descendantrev)
813 return self._adjustlinkrev(self._descendantrev)
811 else:
814 else:
812 return self._filelog.linkrev(self._filerev)
815 return self._filelog.linkrev(self._filerev)
813
816
814 @propertycache
817 @propertycache
815 def _filenode(self):
818 def _filenode(self):
816 if '_fileid' in self.__dict__:
819 if '_fileid' in self.__dict__:
817 return self._filelog.lookup(self._fileid)
820 return self._filelog.lookup(self._fileid)
818 else:
821 else:
819 return self._changectx.filenode(self._path)
822 return self._changectx.filenode(self._path)
820
823
821 @propertycache
824 @propertycache
822 def _filerev(self):
825 def _filerev(self):
823 return self._filelog.rev(self._filenode)
826 return self._filelog.rev(self._filenode)
824
827
825 @propertycache
828 @propertycache
826 def _repopath(self):
829 def _repopath(self):
827 return self._path
830 return self._path
828
831
829 def __nonzero__(self):
832 def __nonzero__(self):
830 try:
833 try:
831 self._filenode
834 self._filenode
832 return True
835 return True
833 except error.LookupError:
836 except error.LookupError:
834 # file is missing
837 # file is missing
835 return False
838 return False
836
839
837 __bool__ = __nonzero__
840 __bool__ = __nonzero__
838
841
839 def __bytes__(self):
842 def __bytes__(self):
840 try:
843 try:
841 return b"%s@%s" % (self.path(), self._changectx)
844 return b"%s@%s" % (self.path(), self._changectx)
842 except error.LookupError:
845 except error.LookupError:
843 return b"%s@???" % self.path()
846 return b"%s@???" % self.path()
844
847
845 __str__ = encoding.strmethod(__bytes__)
848 __str__ = encoding.strmethod(__bytes__)
846
849
847 def __repr__(self):
850 def __repr__(self):
848 return "<%s %s>" % (type(self).__name__, str(self))
851 return "<%s %s>" % (type(self).__name__, str(self))
849
852
850 def __hash__(self):
853 def __hash__(self):
851 try:
854 try:
852 return hash((self._path, self._filenode))
855 return hash((self._path, self._filenode))
853 except AttributeError:
856 except AttributeError:
854 return id(self)
857 return id(self)
855
858
856 def __eq__(self, other):
859 def __eq__(self, other):
857 try:
860 try:
858 return (
861 return (
859 type(self) == type(other)
862 type(self) == type(other)
860 and self._path == other._path
863 and self._path == other._path
861 and self._filenode == other._filenode
864 and self._filenode == other._filenode
862 )
865 )
863 except AttributeError:
866 except AttributeError:
864 return False
867 return False
865
868
866 def __ne__(self, other):
869 def __ne__(self, other):
867 return not (self == other)
870 return not (self == other)
868
871
869 def filerev(self):
872 def filerev(self):
870 return self._filerev
873 return self._filerev
871
874
872 def filenode(self):
875 def filenode(self):
873 return self._filenode
876 return self._filenode
874
877
875 @propertycache
878 @propertycache
876 def _flags(self):
879 def _flags(self):
877 return self._changectx.flags(self._path)
880 return self._changectx.flags(self._path)
878
881
879 def flags(self):
882 def flags(self):
880 return self._flags
883 return self._flags
881
884
882 def filelog(self):
885 def filelog(self):
883 return self._filelog
886 return self._filelog
884
887
885 def rev(self):
888 def rev(self):
886 return self._changeid
889 return self._changeid
887
890
888 def linkrev(self):
891 def linkrev(self):
889 return self._filelog.linkrev(self._filerev)
892 return self._filelog.linkrev(self._filerev)
890
893
891 def node(self):
894 def node(self):
892 return self._changectx.node()
895 return self._changectx.node()
893
896
894 def hex(self):
897 def hex(self):
895 return self._changectx.hex()
898 return self._changectx.hex()
896
899
897 def user(self):
900 def user(self):
898 return self._changectx.user()
901 return self._changectx.user()
899
902
900 def date(self):
903 def date(self):
901 return self._changectx.date()
904 return self._changectx.date()
902
905
903 def files(self):
906 def files(self):
904 return self._changectx.files()
907 return self._changectx.files()
905
908
906 def description(self):
909 def description(self):
907 return self._changectx.description()
910 return self._changectx.description()
908
911
909 def branch(self):
912 def branch(self):
910 return self._changectx.branch()
913 return self._changectx.branch()
911
914
912 def extra(self):
915 def extra(self):
913 return self._changectx.extra()
916 return self._changectx.extra()
914
917
915 def phase(self):
918 def phase(self):
916 return self._changectx.phase()
919 return self._changectx.phase()
917
920
918 def phasestr(self):
921 def phasestr(self):
919 return self._changectx.phasestr()
922 return self._changectx.phasestr()
920
923
921 def obsolete(self):
924 def obsolete(self):
922 return self._changectx.obsolete()
925 return self._changectx.obsolete()
923
926
924 def instabilities(self):
927 def instabilities(self):
925 return self._changectx.instabilities()
928 return self._changectx.instabilities()
926
929
927 def manifest(self):
930 def manifest(self):
928 return self._changectx.manifest()
931 return self._changectx.manifest()
929
932
930 def changectx(self):
933 def changectx(self):
931 return self._changectx
934 return self._changectx
932
935
933 def renamed(self):
936 def renamed(self):
934 return self._copied
937 return self._copied
935
938
936 def copysource(self):
939 def copysource(self):
937 return self._copied and self._copied[0]
940 return self._copied and self._copied[0]
938
941
939 def repo(self):
942 def repo(self):
940 return self._repo
943 return self._repo
941
944
942 def size(self):
945 def size(self):
943 return len(self.data())
946 return len(self.data())
944
947
945 def path(self):
948 def path(self):
946 return self._path
949 return self._path
947
950
948 def isbinary(self):
951 def isbinary(self):
949 try:
952 try:
950 return stringutil.binary(self.data())
953 return stringutil.binary(self.data())
951 except IOError:
954 except IOError:
952 return False
955 return False
953
956
954 def isexec(self):
957 def isexec(self):
955 return b'x' in self.flags()
958 return b'x' in self.flags()
956
959
957 def islink(self):
960 def islink(self):
958 return b'l' in self.flags()
961 return b'l' in self.flags()
959
962
960 def isabsent(self):
963 def isabsent(self):
961 """whether this filectx represents a file not in self._changectx
964 """whether this filectx represents a file not in self._changectx
962
965
963 This is mainly for merge code to detect change/delete conflicts. This is
966 This is mainly for merge code to detect change/delete conflicts. This is
964 expected to be True for all subclasses of basectx."""
967 expected to be True for all subclasses of basectx."""
965 return False
968 return False
966
969
967 _customcmp = False
970 _customcmp = False
968
971
969 def cmp(self, fctx):
972 def cmp(self, fctx):
970 """compare with other file context
973 """compare with other file context
971
974
972 returns True if different than fctx.
975 returns True if different than fctx.
973 """
976 """
974 if fctx._customcmp:
977 if fctx._customcmp:
975 return fctx.cmp(self)
978 return fctx.cmp(self)
976
979
977 if self._filenode is None:
980 if self._filenode is None:
978 raise error.ProgrammingError(
981 raise error.ProgrammingError(
979 b'filectx.cmp() must be reimplemented if not backed by revlog'
982 b'filectx.cmp() must be reimplemented if not backed by revlog'
980 )
983 )
981
984
982 if fctx._filenode is None:
985 if fctx._filenode is None:
983 if self._repo._encodefilterpats:
986 if self._repo._encodefilterpats:
984 # can't rely on size() because wdir content may be decoded
987 # can't rely on size() because wdir content may be decoded
985 return self._filelog.cmp(self._filenode, fctx.data())
988 return self._filelog.cmp(self._filenode, fctx.data())
986 if self.size() - 4 == fctx.size():
989 if self.size() - 4 == fctx.size():
987 # size() can match:
990 # size() can match:
988 # if file data starts with '\1\n', empty metadata block is
991 # if file data starts with '\1\n', empty metadata block is
989 # prepended, which adds 4 bytes to filelog.size().
992 # prepended, which adds 4 bytes to filelog.size().
990 return self._filelog.cmp(self._filenode, fctx.data())
993 return self._filelog.cmp(self._filenode, fctx.data())
991 if self.size() == fctx.size() or self.flags() == b'l':
994 if self.size() == fctx.size() or self.flags() == b'l':
992 # size() matches: need to compare content
995 # size() matches: need to compare content
993 # issue6456: Always compare symlinks because size can represent
996 # issue6456: Always compare symlinks because size can represent
994 # encrypted string for EXT-4 encryption(fscrypt).
997 # encrypted string for EXT-4 encryption(fscrypt).
995 return self._filelog.cmp(self._filenode, fctx.data())
998 return self._filelog.cmp(self._filenode, fctx.data())
996
999
997 # size() differs
1000 # size() differs
998 return True
1001 return True
999
1002
1000 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1003 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1001 """return the first ancestor of <srcrev> introducing <fnode>
1004 """return the first ancestor of <srcrev> introducing <fnode>
1002
1005
1003 If the linkrev of the file revision does not point to an ancestor of
1006 If the linkrev of the file revision does not point to an ancestor of
1004 srcrev, we'll walk down the ancestors until we find one introducing
1007 srcrev, we'll walk down the ancestors until we find one introducing
1005 this file revision.
1008 this file revision.
1006
1009
1007 :srcrev: the changeset revision we search ancestors from
1010 :srcrev: the changeset revision we search ancestors from
1008 :inclusive: if true, the src revision will also be checked
1011 :inclusive: if true, the src revision will also be checked
1009 :stoprev: an optional revision to stop the walk at. If no introduction
1012 :stoprev: an optional revision to stop the walk at. If no introduction
1010 of this file content could be found before this floor
1013 of this file content could be found before this floor
1011 revision, the function will returns "None" and stops its
1014 revision, the function will returns "None" and stops its
1012 iteration.
1015 iteration.
1013 """
1016 """
1014 repo = self._repo
1017 repo = self._repo
1015 cl = repo.unfiltered().changelog
1018 cl = repo.unfiltered().changelog
1016 mfl = repo.manifestlog
1019 mfl = repo.manifestlog
1017 # fetch the linkrev
1020 # fetch the linkrev
1018 lkr = self.linkrev()
1021 lkr = self.linkrev()
1019 if srcrev == lkr:
1022 if srcrev == lkr:
1020 return lkr
1023 return lkr
1021 # hack to reuse ancestor computation when searching for renames
1024 # hack to reuse ancestor computation when searching for renames
1022 memberanc = getattr(self, '_ancestrycontext', None)
1025 memberanc = getattr(self, '_ancestrycontext', None)
1023 iteranc = None
1026 iteranc = None
1024 if srcrev is None:
1027 if srcrev is None:
1025 # wctx case, used by workingfilectx during mergecopy
1028 # wctx case, used by workingfilectx during mergecopy
1026 revs = [p.rev() for p in self._repo[None].parents()]
1029 revs = [p.rev() for p in self._repo[None].parents()]
1027 inclusive = True # we skipped the real (revless) source
1030 inclusive = True # we skipped the real (revless) source
1028 else:
1031 else:
1029 revs = [srcrev]
1032 revs = [srcrev]
1030 if memberanc is None:
1033 if memberanc is None:
1031 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1034 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1032 # check if this linkrev is an ancestor of srcrev
1035 # check if this linkrev is an ancestor of srcrev
1033 if lkr not in memberanc:
1036 if lkr not in memberanc:
1034 if iteranc is None:
1037 if iteranc is None:
1035 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1038 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1036 fnode = self._filenode
1039 fnode = self._filenode
1037 path = self._path
1040 path = self._path
1038 for a in iteranc:
1041 for a in iteranc:
1039 if stoprev is not None and a < stoprev:
1042 if stoprev is not None and a < stoprev:
1040 return None
1043 return None
1041 ac = cl.read(a) # get changeset data (we avoid object creation)
1044 ac = cl.read(a) # get changeset data (we avoid object creation)
1042 if path in ac[3]: # checking the 'files' field.
1045 if path in ac[3]: # checking the 'files' field.
1043 # The file has been touched, check if the content is
1046 # The file has been touched, check if the content is
1044 # similar to the one we search for.
1047 # similar to the one we search for.
1045 if fnode == mfl[ac[0]].readfast().get(path):
1048 if fnode == mfl[ac[0]].readfast().get(path):
1046 return a
1049 return a
1047 # In theory, we should never get out of that loop without a result.
1050 # In theory, we should never get out of that loop without a result.
1048 # But if manifest uses a buggy file revision (not children of the
1051 # But if manifest uses a buggy file revision (not children of the
1049 # one it replaces) we could. Such a buggy situation will likely
1052 # one it replaces) we could. Such a buggy situation will likely
1050 # result is crash somewhere else at to some point.
1053 # result is crash somewhere else at to some point.
1051 return lkr
1054 return lkr
1052
1055
1053 def isintroducedafter(self, changelogrev):
1056 def isintroducedafter(self, changelogrev):
1054 """True if a filectx has been introduced after a given floor revision"""
1057 """True if a filectx has been introduced after a given floor revision"""
1055 if self.linkrev() >= changelogrev:
1058 if self.linkrev() >= changelogrev:
1056 return True
1059 return True
1057 introrev = self._introrev(stoprev=changelogrev)
1060 introrev = self._introrev(stoprev=changelogrev)
1058 if introrev is None:
1061 if introrev is None:
1059 return False
1062 return False
1060 return introrev >= changelogrev
1063 return introrev >= changelogrev
1061
1064
1062 def introrev(self):
1065 def introrev(self):
1063 """return the rev of the changeset which introduced this file revision
1066 """return the rev of the changeset which introduced this file revision
1064
1067
1065 This method is different from linkrev because it take into account the
1068 This method is different from linkrev because it take into account the
1066 changeset the filectx was created from. It ensures the returned
1069 changeset the filectx was created from. It ensures the returned
1067 revision is one of its ancestors. This prevents bugs from
1070 revision is one of its ancestors. This prevents bugs from
1068 'linkrev-shadowing' when a file revision is used by multiple
1071 'linkrev-shadowing' when a file revision is used by multiple
1069 changesets.
1072 changesets.
1070 """
1073 """
1071 return self._introrev()
1074 return self._introrev()
1072
1075
1073 def _introrev(self, stoprev=None):
1076 def _introrev(self, stoprev=None):
1074 """
1077 """
1075 Same as `introrev` but, with an extra argument to limit changelog
1078 Same as `introrev` but, with an extra argument to limit changelog
1076 iteration range in some internal usecase.
1079 iteration range in some internal usecase.
1077
1080
1078 If `stoprev` is set, the `introrev` will not be searched past that
1081 If `stoprev` is set, the `introrev` will not be searched past that
1079 `stoprev` revision and "None" might be returned. This is useful to
1082 `stoprev` revision and "None" might be returned. This is useful to
1080 limit the iteration range.
1083 limit the iteration range.
1081 """
1084 """
1082 toprev = None
1085 toprev = None
1083 attrs = vars(self)
1086 attrs = vars(self)
1084 if '_changeid' in attrs:
1087 if '_changeid' in attrs:
1085 # We have a cached value already
1088 # We have a cached value already
1086 toprev = self._changeid
1089 toprev = self._changeid
1087 elif '_changectx' in attrs:
1090 elif '_changectx' in attrs:
1088 # We know which changelog entry we are coming from
1091 # We know which changelog entry we are coming from
1089 toprev = self._changectx.rev()
1092 toprev = self._changectx.rev()
1090
1093
1091 if toprev is not None:
1094 if toprev is not None:
1092 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1095 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1093 elif '_descendantrev' in attrs:
1096 elif '_descendantrev' in attrs:
1094 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1097 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1095 # be nice and cache the result of the computation
1098 # be nice and cache the result of the computation
1096 if introrev is not None:
1099 if introrev is not None:
1097 self._changeid = introrev
1100 self._changeid = introrev
1098 return introrev
1101 return introrev
1099 else:
1102 else:
1100 return self.linkrev()
1103 return self.linkrev()
1101
1104
1102 def introfilectx(self):
1105 def introfilectx(self):
1103 """Return filectx having identical contents, but pointing to the
1106 """Return filectx having identical contents, but pointing to the
1104 changeset revision where this filectx was introduced"""
1107 changeset revision where this filectx was introduced"""
1105 introrev = self.introrev()
1108 introrev = self.introrev()
1106 if self.rev() == introrev:
1109 if self.rev() == introrev:
1107 return self
1110 return self
1108 return self.filectx(self.filenode(), changeid=introrev)
1111 return self.filectx(self.filenode(), changeid=introrev)
1109
1112
1110 def _parentfilectx(self, path, fileid, filelog):
1113 def _parentfilectx(self, path, fileid, filelog):
1111 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1114 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1112 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1115 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1113 if '_changeid' in vars(self) or '_changectx' in vars(self):
1116 if '_changeid' in vars(self) or '_changectx' in vars(self):
1114 # If self is associated with a changeset (probably explicitly
1117 # If self is associated with a changeset (probably explicitly
1115 # fed), ensure the created filectx is associated with a
1118 # fed), ensure the created filectx is associated with a
1116 # changeset that is an ancestor of self.changectx.
1119 # changeset that is an ancestor of self.changectx.
1117 # This lets us later use _adjustlinkrev to get a correct link.
1120 # This lets us later use _adjustlinkrev to get a correct link.
1118 fctx._descendantrev = self.rev()
1121 fctx._descendantrev = self.rev()
1119 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1122 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1120 elif '_descendantrev' in vars(self):
1123 elif '_descendantrev' in vars(self):
1121 # Otherwise propagate _descendantrev if we have one associated.
1124 # Otherwise propagate _descendantrev if we have one associated.
1122 fctx._descendantrev = self._descendantrev
1125 fctx._descendantrev = self._descendantrev
1123 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1126 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1124 return fctx
1127 return fctx
1125
1128
1126 def parents(self):
1129 def parents(self):
1127 _path = self._path
1130 _path = self._path
1128 fl = self._filelog
1131 fl = self._filelog
1129 parents = self._filelog.parents(self._filenode)
1132 parents = self._filelog.parents(self._filenode)
1130 pl = [
1133 pl = [
1131 (_path, node, fl)
1134 (_path, node, fl)
1132 for node in parents
1135 for node in parents
1133 if node != self._repo.nodeconstants.nullid
1136 if node != self._repo.nodeconstants.nullid
1134 ]
1137 ]
1135
1138
1136 r = fl.renamed(self._filenode)
1139 r = fl.renamed(self._filenode)
1137 if r:
1140 if r:
1138 # - In the simple rename case, both parent are nullid, pl is empty.
1141 # - In the simple rename case, both parent are nullid, pl is empty.
1139 # - In case of merge, only one of the parent is null id and should
1142 # - In case of merge, only one of the parent is null id and should
1140 # be replaced with the rename information. This parent is -always-
1143 # be replaced with the rename information. This parent is -always-
1141 # the first one.
1144 # the first one.
1142 #
1145 #
1143 # As null id have always been filtered out in the previous list
1146 # As null id have always been filtered out in the previous list
1144 # comprehension, inserting to 0 will always result in "replacing
1147 # comprehension, inserting to 0 will always result in "replacing
1145 # first nullid parent with rename information.
1148 # first nullid parent with rename information.
1146 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1149 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1147
1150
1148 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1151 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1149
1152
1150 def p1(self):
1153 def p1(self):
1151 return self.parents()[0]
1154 return self.parents()[0]
1152
1155
1153 def p2(self):
1156 def p2(self):
1154 p = self.parents()
1157 p = self.parents()
1155 if len(p) == 2:
1158 if len(p) == 2:
1156 return p[1]
1159 return p[1]
1157 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1160 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1158
1161
1159 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1162 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1160 """Returns a list of annotateline objects for each line in the file
1163 """Returns a list of annotateline objects for each line in the file
1161
1164
1162 - line.fctx is the filectx of the node where that line was last changed
1165 - line.fctx is the filectx of the node where that line was last changed
1163 - line.lineno is the line number at the first appearance in the managed
1166 - line.lineno is the line number at the first appearance in the managed
1164 file
1167 file
1165 - line.text is the data on that line (including newline character)
1168 - line.text is the data on that line (including newline character)
1166 """
1169 """
1167 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1170 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1168
1171
1169 def parents(f):
1172 def parents(f):
1170 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1173 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1171 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1174 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1172 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1175 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1173 # isn't an ancestor of the srcrev.
1176 # isn't an ancestor of the srcrev.
1174 f._changeid
1177 f._changeid
1175 pl = f.parents()
1178 pl = f.parents()
1176
1179
1177 # Don't return renamed parents if we aren't following.
1180 # Don't return renamed parents if we aren't following.
1178 if not follow:
1181 if not follow:
1179 pl = [p for p in pl if p.path() == f.path()]
1182 pl = [p for p in pl if p.path() == f.path()]
1180
1183
1181 # renamed filectx won't have a filelog yet, so set it
1184 # renamed filectx won't have a filelog yet, so set it
1182 # from the cache to save time
1185 # from the cache to save time
1183 for p in pl:
1186 for p in pl:
1184 if not '_filelog' in p.__dict__:
1187 if not '_filelog' in p.__dict__:
1185 p._filelog = getlog(p.path())
1188 p._filelog = getlog(p.path())
1186
1189
1187 return pl
1190 return pl
1188
1191
1189 # use linkrev to find the first changeset where self appeared
1192 # use linkrev to find the first changeset where self appeared
1190 base = self.introfilectx()
1193 base = self.introfilectx()
1191 if getattr(base, '_ancestrycontext', None) is None:
1194 if getattr(base, '_ancestrycontext', None) is None:
1192 # it is safe to use an unfiltered repository here because we are
1195 # it is safe to use an unfiltered repository here because we are
1193 # walking ancestors only.
1196 # walking ancestors only.
1194 cl = self._repo.unfiltered().changelog
1197 cl = self._repo.unfiltered().changelog
1195 if base.rev() is None:
1198 if base.rev() is None:
1196 # wctx is not inclusive, but works because _ancestrycontext
1199 # wctx is not inclusive, but works because _ancestrycontext
1197 # is used to test filelog revisions
1200 # is used to test filelog revisions
1198 ac = cl.ancestors(
1201 ac = cl.ancestors(
1199 [p.rev() for p in base.parents()], inclusive=True
1202 [p.rev() for p in base.parents()], inclusive=True
1200 )
1203 )
1201 else:
1204 else:
1202 ac = cl.ancestors([base.rev()], inclusive=True)
1205 ac = cl.ancestors([base.rev()], inclusive=True)
1203 base._ancestrycontext = ac
1206 base._ancestrycontext = ac
1204
1207
1205 return dagop.annotate(
1208 return dagop.annotate(
1206 base, parents, skiprevs=skiprevs, diffopts=diffopts
1209 base, parents, skiprevs=skiprevs, diffopts=diffopts
1207 )
1210 )
1208
1211
1209 def ancestors(self, followfirst=False):
1212 def ancestors(self, followfirst=False):
1210 visit = {}
1213 visit = {}
1211 c = self
1214 c = self
1212 if followfirst:
1215 if followfirst:
1213 cut = 1
1216 cut = 1
1214 else:
1217 else:
1215 cut = None
1218 cut = None
1216
1219
1217 while True:
1220 while True:
1218 for parent in c.parents()[:cut]:
1221 for parent in c.parents()[:cut]:
1219 visit[(parent.linkrev(), parent.filenode())] = parent
1222 visit[(parent.linkrev(), parent.filenode())] = parent
1220 if not visit:
1223 if not visit:
1221 break
1224 break
1222 c = visit.pop(max(visit))
1225 c = visit.pop(max(visit))
1223 yield c
1226 yield c
1224
1227
1225 def decodeddata(self):
1228 def decodeddata(self):
1226 """Returns `data()` after running repository decoding filters.
1229 """Returns `data()` after running repository decoding filters.
1227
1230
1228 This is often equivalent to how the data would be expressed on disk.
1231 This is often equivalent to how the data would be expressed on disk.
1229 """
1232 """
1230 return self._repo.wwritedata(self.path(), self.data())
1233 return self._repo.wwritedata(self.path(), self.data())
1231
1234
1232
1235
1233 class filectx(basefilectx):
1236 class filectx(basefilectx):
1234 """A filecontext object makes access to data related to a particular
1237 """A filecontext object makes access to data related to a particular
1235 filerevision convenient."""
1238 filerevision convenient."""
1236
1239
1237 def __init__(
1240 def __init__(
1238 self,
1241 self,
1239 repo,
1242 repo,
1240 path,
1243 path,
1241 changeid=None,
1244 changeid=None,
1242 fileid=None,
1245 fileid=None,
1243 filelog=None,
1246 filelog=None,
1244 changectx=None,
1247 changectx=None,
1245 ):
1248 ):
1246 """changeid must be a revision number, if specified.
1249 """changeid must be a revision number, if specified.
1247 fileid can be a file revision or node."""
1250 fileid can be a file revision or node."""
1248 self._repo = repo
1251 self._repo = repo
1249 self._path = path
1252 self._path = path
1250
1253
1251 assert (
1254 assert (
1252 changeid is not None or fileid is not None or changectx is not None
1255 changeid is not None or fileid is not None or changectx is not None
1253 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1256 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1254 changeid,
1257 changeid,
1255 fileid,
1258 fileid,
1256 changectx,
1259 changectx,
1257 )
1260 )
1258
1261
1259 if filelog is not None:
1262 if filelog is not None:
1260 self._filelog = filelog
1263 self._filelog = filelog
1261
1264
1262 if changeid is not None:
1265 if changeid is not None:
1263 self._changeid = changeid
1266 self._changeid = changeid
1264 if changectx is not None:
1267 if changectx is not None:
1265 self._changectx = changectx
1268 self._changectx = changectx
1266 if fileid is not None:
1269 if fileid is not None:
1267 self._fileid = fileid
1270 self._fileid = fileid
1268
1271
1269 @propertycache
1272 @propertycache
1270 def _changectx(self):
1273 def _changectx(self):
1271 try:
1274 try:
1272 return self._repo[self._changeid]
1275 return self._repo[self._changeid]
1273 except error.FilteredRepoLookupError:
1276 except error.FilteredRepoLookupError:
1274 # Linkrev may point to any revision in the repository. When the
1277 # Linkrev may point to any revision in the repository. When the
1275 # repository is filtered this may lead to `filectx` trying to build
1278 # repository is filtered this may lead to `filectx` trying to build
1276 # `changectx` for filtered revision. In such case we fallback to
1279 # `changectx` for filtered revision. In such case we fallback to
1277 # creating `changectx` on the unfiltered version of the reposition.
1280 # creating `changectx` on the unfiltered version of the reposition.
1278 # This fallback should not be an issue because `changectx` from
1281 # This fallback should not be an issue because `changectx` from
1279 # `filectx` are not used in complex operations that care about
1282 # `filectx` are not used in complex operations that care about
1280 # filtering.
1283 # filtering.
1281 #
1284 #
1282 # This fallback is a cheap and dirty fix that prevent several
1285 # This fallback is a cheap and dirty fix that prevent several
1283 # crashes. It does not ensure the behavior is correct. However the
1286 # crashes. It does not ensure the behavior is correct. However the
1284 # behavior was not correct before filtering either and "incorrect
1287 # behavior was not correct before filtering either and "incorrect
1285 # behavior" is seen as better as "crash"
1288 # behavior" is seen as better as "crash"
1286 #
1289 #
1287 # Linkrevs have several serious troubles with filtering that are
1290 # Linkrevs have several serious troubles with filtering that are
1288 # complicated to solve. Proper handling of the issue here should be
1291 # complicated to solve. Proper handling of the issue here should be
1289 # considered when solving linkrev issue are on the table.
1292 # considered when solving linkrev issue are on the table.
1290 return self._repo.unfiltered()[self._changeid]
1293 return self._repo.unfiltered()[self._changeid]
1291
1294
1292 def filectx(self, fileid, changeid=None):
1295 def filectx(self, fileid, changeid=None):
1293 """opens an arbitrary revision of the file without
1296 """opens an arbitrary revision of the file without
1294 opening a new filelog"""
1297 opening a new filelog"""
1295 return filectx(
1298 return filectx(
1296 self._repo,
1299 self._repo,
1297 self._path,
1300 self._path,
1298 fileid=fileid,
1301 fileid=fileid,
1299 filelog=self._filelog,
1302 filelog=self._filelog,
1300 changeid=changeid,
1303 changeid=changeid,
1301 )
1304 )
1302
1305
1303 def rawdata(self):
1306 def rawdata(self):
1304 return self._filelog.rawdata(self._filenode)
1307 return self._filelog.rawdata(self._filenode)
1305
1308
1306 def rawflags(self):
1309 def rawflags(self):
1307 """low-level revlog flags"""
1310 """low-level revlog flags"""
1308 return self._filelog.flags(self._filerev)
1311 return self._filelog.flags(self._filerev)
1309
1312
1310 def data(self):
1313 def data(self):
1311 try:
1314 try:
1312 return self._filelog.read(self._filenode)
1315 return self._filelog.read(self._filenode)
1313 except error.CensoredNodeError:
1316 except error.CensoredNodeError:
1314 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1317 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1315 return b""
1318 return b""
1316 raise error.Abort(
1319 raise error.Abort(
1317 _(b"censored node: %s") % short(self._filenode),
1320 _(b"censored node: %s") % short(self._filenode),
1318 hint=_(b"set censor.policy to ignore errors"),
1321 hint=_(b"set censor.policy to ignore errors"),
1319 )
1322 )
1320
1323
1321 def size(self):
1324 def size(self):
1322 return self._filelog.size(self._filerev)
1325 return self._filelog.size(self._filerev)
1323
1326
1324 @propertycache
1327 @propertycache
1325 def _copied(self):
1328 def _copied(self):
1326 """check if file was actually renamed in this changeset revision
1329 """check if file was actually renamed in this changeset revision
1327
1330
1328 If rename logged in file revision, we report copy for changeset only
1331 If rename logged in file revision, we report copy for changeset only
1329 if file revisions linkrev points back to the changeset in question
1332 if file revisions linkrev points back to the changeset in question
1330 or both changeset parents contain different file revisions.
1333 or both changeset parents contain different file revisions.
1331 """
1334 """
1332
1335
1333 renamed = self._filelog.renamed(self._filenode)
1336 renamed = self._filelog.renamed(self._filenode)
1334 if not renamed:
1337 if not renamed:
1335 return None
1338 return None
1336
1339
1337 if self.rev() == self.linkrev():
1340 if self.rev() == self.linkrev():
1338 return renamed
1341 return renamed
1339
1342
1340 name = self.path()
1343 name = self.path()
1341 fnode = self._filenode
1344 fnode = self._filenode
1342 for p in self._changectx.parents():
1345 for p in self._changectx.parents():
1343 try:
1346 try:
1344 if fnode == p.filenode(name):
1347 if fnode == p.filenode(name):
1345 return None
1348 return None
1346 except error.LookupError:
1349 except error.LookupError:
1347 pass
1350 pass
1348 return renamed
1351 return renamed
1349
1352
1350 def children(self):
1353 def children(self):
1351 # hard for renames
1354 # hard for renames
1352 c = self._filelog.children(self._filenode)
1355 c = self._filelog.children(self._filenode)
1353 return [
1356 return [
1354 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1357 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1355 for x in c
1358 for x in c
1356 ]
1359 ]
1357
1360
1358
1361
1359 class committablectx(basectx):
1362 class committablectx(basectx):
1360 """A committablectx object provides common functionality for a context that
1363 """A committablectx object provides common functionality for a context that
1361 wants the ability to commit, e.g. workingctx or memctx."""
1364 wants the ability to commit, e.g. workingctx or memctx."""
1362
1365
1363 def __init__(
1366 def __init__(
1364 self,
1367 self,
1365 repo,
1368 repo,
1366 text=b"",
1369 text=b"",
1367 user=None,
1370 user=None,
1368 date=None,
1371 date=None,
1369 extra=None,
1372 extra=None,
1370 changes=None,
1373 changes=None,
1371 branch=None,
1374 branch=None,
1372 ):
1375 ):
1373 super(committablectx, self).__init__(repo)
1376 super(committablectx, self).__init__(repo)
1374 self._rev = None
1377 self._rev = None
1375 self._node = None
1378 self._node = None
1376 self._text = text
1379 self._text = text
1377 if date:
1380 if date:
1378 self._date = dateutil.parsedate(date)
1381 self._date = dateutil.parsedate(date)
1379 if user:
1382 if user:
1380 self._user = user
1383 self._user = user
1381 if changes:
1384 if changes:
1382 self._status = changes
1385 self._status = changes
1383
1386
1384 self._extra = {}
1387 self._extra = {}
1385 if extra:
1388 if extra:
1386 self._extra = extra.copy()
1389 self._extra = extra.copy()
1387 if branch is not None:
1390 if branch is not None:
1388 self._extra[b'branch'] = encoding.fromlocal(branch)
1391 self._extra[b'branch'] = encoding.fromlocal(branch)
1389 if not self._extra.get(b'branch'):
1392 if not self._extra.get(b'branch'):
1390 self._extra[b'branch'] = b'default'
1393 self._extra[b'branch'] = b'default'
1391
1394
1392 def __bytes__(self):
1395 def __bytes__(self):
1393 return bytes(self._parents[0]) + b"+"
1396 return bytes(self._parents[0]) + b"+"
1394
1397
1395 def hex(self):
1398 def hex(self):
1396 self._repo.nodeconstants.wdirhex
1399 self._repo.nodeconstants.wdirhex
1397
1400
1398 __str__ = encoding.strmethod(__bytes__)
1401 __str__ = encoding.strmethod(__bytes__)
1399
1402
1400 def __nonzero__(self):
1403 def __nonzero__(self):
1401 return True
1404 return True
1402
1405
1403 __bool__ = __nonzero__
1406 __bool__ = __nonzero__
1404
1407
1405 @propertycache
1408 @propertycache
1406 def _status(self):
1409 def _status(self):
1407 return self._repo.status()
1410 return self._repo.status()
1408
1411
1409 @propertycache
1412 @propertycache
1410 def _user(self):
1413 def _user(self):
1411 return self._repo.ui.username()
1414 return self._repo.ui.username()
1412
1415
1413 @propertycache
1416 @propertycache
1414 def _date(self):
1417 def _date(self):
1415 ui = self._repo.ui
1418 ui = self._repo.ui
1416 date = ui.configdate(b'devel', b'default-date')
1419 date = ui.configdate(b'devel', b'default-date')
1417 if date is None:
1420 if date is None:
1418 date = dateutil.makedate()
1421 date = dateutil.makedate()
1419 return date
1422 return date
1420
1423
1421 def subrev(self, subpath):
1424 def subrev(self, subpath):
1422 return None
1425 return None
1423
1426
1424 def manifestnode(self):
1427 def manifestnode(self):
1425 return None
1428 return None
1426
1429
1427 def user(self):
1430 def user(self):
1428 return self._user or self._repo.ui.username()
1431 return self._user or self._repo.ui.username()
1429
1432
1430 def date(self):
1433 def date(self):
1431 return self._date
1434 return self._date
1432
1435
1433 def description(self):
1436 def description(self):
1434 return self._text
1437 return self._text
1435
1438
1436 def files(self):
1439 def files(self):
1437 return sorted(
1440 return sorted(
1438 self._status.modified + self._status.added + self._status.removed
1441 self._status.modified + self._status.added + self._status.removed
1439 )
1442 )
1440
1443
1441 def modified(self):
1444 def modified(self):
1442 return self._status.modified
1445 return self._status.modified
1443
1446
1444 def added(self):
1447 def added(self):
1445 return self._status.added
1448 return self._status.added
1446
1449
1447 def removed(self):
1450 def removed(self):
1448 return self._status.removed
1451 return self._status.removed
1449
1452
1450 def deleted(self):
1453 def deleted(self):
1451 return self._status.deleted
1454 return self._status.deleted
1452
1455
1453 filesmodified = modified
1456 filesmodified = modified
1454 filesadded = added
1457 filesadded = added
1455 filesremoved = removed
1458 filesremoved = removed
1456
1459
1457 def branch(self):
1460 def branch(self):
1458 return encoding.tolocal(self._extra[b'branch'])
1461 return encoding.tolocal(self._extra[b'branch'])
1459
1462
1460 def closesbranch(self):
1463 def closesbranch(self):
1461 return b'close' in self._extra
1464 return b'close' in self._extra
1462
1465
1463 def extra(self):
1466 def extra(self):
1464 return self._extra
1467 return self._extra
1465
1468
1466 def isinmemory(self):
1469 def isinmemory(self):
1467 return False
1470 return False
1468
1471
1469 def tags(self):
1472 def tags(self):
1470 return []
1473 return []
1471
1474
1472 def bookmarks(self):
1475 def bookmarks(self):
1473 b = []
1476 b = []
1474 for p in self.parents():
1477 for p in self.parents():
1475 b.extend(p.bookmarks())
1478 b.extend(p.bookmarks())
1476 return b
1479 return b
1477
1480
1478 def phase(self):
1481 def phase(self):
1479 phase = phases.newcommitphase(self._repo.ui)
1482 phase = phases.newcommitphase(self._repo.ui)
1480 for p in self.parents():
1483 for p in self.parents():
1481 phase = max(phase, p.phase())
1484 phase = max(phase, p.phase())
1482 return phase
1485 return phase
1483
1486
1484 def hidden(self):
1487 def hidden(self):
1485 return False
1488 return False
1486
1489
1487 def children(self):
1490 def children(self):
1488 return []
1491 return []
1489
1492
1490 def flags(self, path):
1493 def flags(self, path):
1491 if '_manifest' in self.__dict__:
1494 if '_manifest' in self.__dict__:
1492 try:
1495 try:
1493 return self._manifest.flags(path)
1496 return self._manifest.flags(path)
1494 except KeyError:
1497 except KeyError:
1495 return b''
1498 return b''
1496
1499
1497 try:
1500 try:
1498 return self._flagfunc(path)
1501 return self._flagfunc(path)
1499 except OSError:
1502 except OSError:
1500 return b''
1503 return b''
1501
1504
1502 def ancestor(self, c2):
1505 def ancestor(self, c2):
1503 """return the "best" ancestor context of self and c2"""
1506 """return the "best" ancestor context of self and c2"""
1504 return self._parents[0].ancestor(c2) # punt on two parents for now
1507 return self._parents[0].ancestor(c2) # punt on two parents for now
1505
1508
1506 def ancestors(self):
1509 def ancestors(self):
1507 for p in self._parents:
1510 for p in self._parents:
1508 yield p
1511 yield p
1509 for a in self._repo.changelog.ancestors(
1512 for a in self._repo.changelog.ancestors(
1510 [p.rev() for p in self._parents]
1513 [p.rev() for p in self._parents]
1511 ):
1514 ):
1512 yield self._repo[a]
1515 yield self._repo[a]
1513
1516
1514 def markcommitted(self, node):
1517 def markcommitted(self, node):
1515 """Perform post-commit cleanup necessary after committing this ctx
1518 """Perform post-commit cleanup necessary after committing this ctx
1516
1519
1517 Specifically, this updates backing stores this working context
1520 Specifically, this updates backing stores this working context
1518 wraps to reflect the fact that the changes reflected by this
1521 wraps to reflect the fact that the changes reflected by this
1519 workingctx have been committed. For example, it marks
1522 workingctx have been committed. For example, it marks
1520 modified and added files as normal in the dirstate.
1523 modified and added files as normal in the dirstate.
1521
1524
1522 """
1525 """
1523
1526
1524 def dirty(self, missing=False, merge=True, branch=True):
1527 def dirty(self, missing=False, merge=True, branch=True):
1525 return False
1528 return False
1526
1529
1527
1530
1528 class workingctx(committablectx):
1531 class workingctx(committablectx):
1529 """A workingctx object makes access to data related to
1532 """A workingctx object makes access to data related to
1530 the current working directory convenient.
1533 the current working directory convenient.
1531 date - any valid date string or (unixtime, offset), or None.
1534 date - any valid date string or (unixtime, offset), or None.
1532 user - username string, or None.
1535 user - username string, or None.
1533 extra - a dictionary of extra values, or None.
1536 extra - a dictionary of extra values, or None.
1534 changes - a list of file lists as returned by localrepo.status()
1537 changes - a list of file lists as returned by localrepo.status()
1535 or None to use the repository status.
1538 or None to use the repository status.
1536 """
1539 """
1537
1540
1538 def __init__(
1541 def __init__(
1539 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1542 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1540 ):
1543 ):
1541 branch = None
1544 branch = None
1542 if not extra or b'branch' not in extra:
1545 if not extra or b'branch' not in extra:
1543 try:
1546 try:
1544 branch = repo.dirstate.branch()
1547 branch = repo.dirstate.branch()
1545 except UnicodeDecodeError:
1548 except UnicodeDecodeError:
1546 raise error.Abort(_(b'branch name not in UTF-8!'))
1549 raise error.Abort(_(b'branch name not in UTF-8!'))
1547 super(workingctx, self).__init__(
1550 super(workingctx, self).__init__(
1548 repo, text, user, date, extra, changes, branch=branch
1551 repo, text, user, date, extra, changes, branch=branch
1549 )
1552 )
1550
1553
1551 def __iter__(self):
1554 def __iter__(self):
1552 d = self._repo.dirstate
1555 d = self._repo.dirstate
1553 for f in d:
1556 for f in d:
1554 if d.get_entry(f).tracked:
1557 if d.get_entry(f).tracked:
1555 yield f
1558 yield f
1556
1559
1557 def __contains__(self, key):
1560 def __contains__(self, key):
1558 return self._repo.dirstate.get_entry(key).tracked
1561 return self._repo.dirstate.get_entry(key).tracked
1559
1562
1560 def hex(self):
1563 def hex(self):
1561 return self._repo.nodeconstants.wdirhex
1564 return self._repo.nodeconstants.wdirhex
1562
1565
1563 @propertycache
1566 @propertycache
1564 def _parents(self):
1567 def _parents(self):
1565 p = self._repo.dirstate.parents()
1568 p = self._repo.dirstate.parents()
1566 if p[1] == self._repo.nodeconstants.nullid:
1569 if p[1] == self._repo.nodeconstants.nullid:
1567 p = p[:-1]
1570 p = p[:-1]
1568 # use unfiltered repo to delay/avoid loading obsmarkers
1571 # use unfiltered repo to delay/avoid loading obsmarkers
1569 unfi = self._repo.unfiltered()
1572 unfi = self._repo.unfiltered()
1570 return [
1573 return [
1571 changectx(
1574 changectx(
1572 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1575 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1573 )
1576 )
1574 for n in p
1577 for n in p
1575 ]
1578 ]
1576
1579
1577 def setparents(self, p1node, p2node=None):
1580 def setparents(self, p1node, p2node=None):
1578 if p2node is None:
1581 if p2node is None:
1579 p2node = self._repo.nodeconstants.nullid
1582 p2node = self._repo.nodeconstants.nullid
1580 dirstate = self._repo.dirstate
1583 dirstate = self._repo.dirstate
1581 with dirstate.parentchange():
1584 with dirstate.parentchange():
1582 copies = dirstate.setparents(p1node, p2node)
1585 copies = dirstate.setparents(p1node, p2node)
1583 pctx = self._repo[p1node]
1586 pctx = self._repo[p1node]
1584 if copies:
1587 if copies:
1585 # Adjust copy records, the dirstate cannot do it, it
1588 # Adjust copy records, the dirstate cannot do it, it
1586 # requires access to parents manifests. Preserve them
1589 # requires access to parents manifests. Preserve them
1587 # only for entries added to first parent.
1590 # only for entries added to first parent.
1588 for f in copies:
1591 for f in copies:
1589 if f not in pctx and copies[f] in pctx:
1592 if f not in pctx and copies[f] in pctx:
1590 dirstate.copy(copies[f], f)
1593 dirstate.copy(copies[f], f)
1591 if p2node == self._repo.nodeconstants.nullid:
1594 if p2node == self._repo.nodeconstants.nullid:
1592 for f, s in sorted(dirstate.copies().items()):
1595 for f, s in sorted(dirstate.copies().items()):
1593 if f not in pctx and s not in pctx:
1596 if f not in pctx and s not in pctx:
1594 dirstate.copy(None, f)
1597 dirstate.copy(None, f)
1595
1598
1596 def _fileinfo(self, path):
1599 def _fileinfo(self, path):
1597 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1600 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1598 self._manifest
1601 self._manifest
1599 return super(workingctx, self)._fileinfo(path)
1602 return super(workingctx, self)._fileinfo(path)
1600
1603
1601 def _buildflagfunc(self):
1604 def _buildflagfunc(self):
1602 # Create a fallback function for getting file flags when the
1605 # Create a fallback function for getting file flags when the
1603 # filesystem doesn't support them
1606 # filesystem doesn't support them
1604
1607
1605 copiesget = self._repo.dirstate.copies().get
1608 copiesget = self._repo.dirstate.copies().get
1606 parents = self.parents()
1609 parents = self.parents()
1607 if len(parents) < 2:
1610 if len(parents) < 2:
1608 # when we have one parent, it's easy: copy from parent
1611 # when we have one parent, it's easy: copy from parent
1609 man = parents[0].manifest()
1612 man = parents[0].manifest()
1610
1613
1611 def func(f):
1614 def func(f):
1612 f = copiesget(f, f)
1615 f = copiesget(f, f)
1613 return man.flags(f)
1616 return man.flags(f)
1614
1617
1615 else:
1618 else:
1616 # merges are tricky: we try to reconstruct the unstored
1619 # merges are tricky: we try to reconstruct the unstored
1617 # result from the merge (issue1802)
1620 # result from the merge (issue1802)
1618 p1, p2 = parents
1621 p1, p2 = parents
1619 pa = p1.ancestor(p2)
1622 pa = p1.ancestor(p2)
1620 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1623 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1621
1624
1622 def func(f):
1625 def func(f):
1623 f = copiesget(f, f) # may be wrong for merges with copies
1626 f = copiesget(f, f) # may be wrong for merges with copies
1624 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1627 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1625 if fl1 == fl2:
1628 if fl1 == fl2:
1626 return fl1
1629 return fl1
1627 if fl1 == fla:
1630 if fl1 == fla:
1628 return fl2
1631 return fl2
1629 if fl2 == fla:
1632 if fl2 == fla:
1630 return fl1
1633 return fl1
1631 return b'' # punt for conflicts
1634 return b'' # punt for conflicts
1632
1635
1633 return func
1636 return func
1634
1637
1635 @propertycache
1638 @propertycache
1636 def _flagfunc(self):
1639 def _flagfunc(self):
1637 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1640 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1638
1641
1639 def flags(self, path):
1642 def flags(self, path):
1640 try:
1643 try:
1641 return self._flagfunc(path)
1644 return self._flagfunc(path)
1642 except OSError:
1645 except OSError:
1643 return b''
1646 return b''
1644
1647
1645 def filectx(self, path, filelog=None):
1648 def filectx(self, path, filelog=None):
1646 """get a file context from the working directory"""
1649 """get a file context from the working directory"""
1647 return workingfilectx(
1650 return workingfilectx(
1648 self._repo, path, workingctx=self, filelog=filelog
1651 self._repo, path, workingctx=self, filelog=filelog
1649 )
1652 )
1650
1653
1651 def dirty(self, missing=False, merge=True, branch=True):
1654 def dirty(self, missing=False, merge=True, branch=True):
1652 """check whether a working directory is modified"""
1655 """check whether a working directory is modified"""
1653 # check subrepos first
1656 # check subrepos first
1654 for s in sorted(self.substate):
1657 for s in sorted(self.substate):
1655 if self.sub(s).dirty(missing=missing):
1658 if self.sub(s).dirty(missing=missing):
1656 return True
1659 return True
1657 # check current working dir
1660 # check current working dir
1658 return (
1661 return (
1659 (merge and self.p2())
1662 (merge and self.p2())
1660 or (branch and self.branch() != self.p1().branch())
1663 or (branch and self.branch() != self.p1().branch())
1661 or self.modified()
1664 or self.modified()
1662 or self.added()
1665 or self.added()
1663 or self.removed()
1666 or self.removed()
1664 or (missing and self.deleted())
1667 or (missing and self.deleted())
1665 )
1668 )
1666
1669
1667 def add(self, list, prefix=b""):
1670 def add(self, list, prefix=b""):
1668 with self._repo.wlock():
1671 with self._repo.wlock():
1669 ui, ds = self._repo.ui, self._repo.dirstate
1672 ui, ds = self._repo.ui, self._repo.dirstate
1670 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1673 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1671 rejected = []
1674 rejected = []
1672 lstat = self._repo.wvfs.lstat
1675 lstat = self._repo.wvfs.lstat
1673 for f in list:
1676 for f in list:
1674 # ds.pathto() returns an absolute file when this is invoked from
1677 # ds.pathto() returns an absolute file when this is invoked from
1675 # the keyword extension. That gets flagged as non-portable on
1678 # the keyword extension. That gets flagged as non-portable on
1676 # Windows, since it contains the drive letter and colon.
1679 # Windows, since it contains the drive letter and colon.
1677 scmutil.checkportable(ui, os.path.join(prefix, f))
1680 scmutil.checkportable(ui, os.path.join(prefix, f))
1678 try:
1681 try:
1679 st = lstat(f)
1682 st = lstat(f)
1680 except OSError:
1683 except OSError:
1681 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1684 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1682 rejected.append(f)
1685 rejected.append(f)
1683 continue
1686 continue
1684 limit = ui.configbytes(b'ui', b'large-file-limit')
1687 limit = ui.configbytes(b'ui', b'large-file-limit')
1685 if limit != 0 and st.st_size > limit:
1688 if limit != 0 and st.st_size > limit:
1686 ui.warn(
1689 ui.warn(
1687 _(
1690 _(
1688 b"%s: up to %d MB of RAM may be required "
1691 b"%s: up to %d MB of RAM may be required "
1689 b"to manage this file\n"
1692 b"to manage this file\n"
1690 b"(use 'hg revert %s' to cancel the "
1693 b"(use 'hg revert %s' to cancel the "
1691 b"pending addition)\n"
1694 b"pending addition)\n"
1692 )
1695 )
1693 % (f, 3 * st.st_size // 1000000, uipath(f))
1696 % (f, 3 * st.st_size // 1000000, uipath(f))
1694 )
1697 )
1695 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1698 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1696 ui.warn(
1699 ui.warn(
1697 _(
1700 _(
1698 b"%s not added: only files and symlinks "
1701 b"%s not added: only files and symlinks "
1699 b"supported currently\n"
1702 b"supported currently\n"
1700 )
1703 )
1701 % uipath(f)
1704 % uipath(f)
1702 )
1705 )
1703 rejected.append(f)
1706 rejected.append(f)
1704 elif not ds.set_tracked(f):
1707 elif not ds.set_tracked(f):
1705 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1708 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1706 return rejected
1709 return rejected
1707
1710
1708 def forget(self, files, prefix=b""):
1711 def forget(self, files, prefix=b""):
1709 with self._repo.wlock():
1712 with self._repo.wlock():
1710 ds = self._repo.dirstate
1713 ds = self._repo.dirstate
1711 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1714 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1712 rejected = []
1715 rejected = []
1713 for f in files:
1716 for f in files:
1714 if not ds.set_untracked(f):
1717 if not ds.set_untracked(f):
1715 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1718 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1716 rejected.append(f)
1719 rejected.append(f)
1717 return rejected
1720 return rejected
1718
1721
1719 def copy(self, source, dest):
1722 def copy(self, source, dest):
1720 try:
1723 try:
1721 st = self._repo.wvfs.lstat(dest)
1724 st = self._repo.wvfs.lstat(dest)
1722 except OSError as err:
1725 except OSError as err:
1723 if err.errno != errno.ENOENT:
1726 if err.errno != errno.ENOENT:
1724 raise
1727 raise
1725 self._repo.ui.warn(
1728 self._repo.ui.warn(
1726 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1729 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1727 )
1730 )
1728 return
1731 return
1729 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1732 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1730 self._repo.ui.warn(
1733 self._repo.ui.warn(
1731 _(b"copy failed: %s is not a file or a symbolic link\n")
1734 _(b"copy failed: %s is not a file or a symbolic link\n")
1732 % self._repo.dirstate.pathto(dest)
1735 % self._repo.dirstate.pathto(dest)
1733 )
1736 )
1734 else:
1737 else:
1735 with self._repo.wlock():
1738 with self._repo.wlock():
1736 ds = self._repo.dirstate
1739 ds = self._repo.dirstate
1737 ds.set_tracked(dest)
1740 ds.set_tracked(dest)
1738 ds.copy(source, dest)
1741 ds.copy(source, dest)
1739
1742
1740 def match(
1743 def match(
1741 self,
1744 self,
1742 pats=None,
1745 pats=None,
1743 include=None,
1746 include=None,
1744 exclude=None,
1747 exclude=None,
1745 default=b'glob',
1748 default=b'glob',
1746 listsubrepos=False,
1749 listsubrepos=False,
1747 badfn=None,
1750 badfn=None,
1748 cwd=None,
1751 cwd=None,
1749 ):
1752 ):
1750 r = self._repo
1753 r = self._repo
1751 if not cwd:
1754 if not cwd:
1752 cwd = r.getcwd()
1755 cwd = r.getcwd()
1753
1756
1754 # Only a case insensitive filesystem needs magic to translate user input
1757 # Only a case insensitive filesystem needs magic to translate user input
1755 # to actual case in the filesystem.
1758 # to actual case in the filesystem.
1756 icasefs = not util.fscasesensitive(r.root)
1759 icasefs = not util.fscasesensitive(r.root)
1757 return matchmod.match(
1760 return matchmod.match(
1758 r.root,
1761 r.root,
1759 cwd,
1762 cwd,
1760 pats,
1763 pats,
1761 include,
1764 include,
1762 exclude,
1765 exclude,
1763 default,
1766 default,
1764 auditor=r.auditor,
1767 auditor=r.auditor,
1765 ctx=self,
1768 ctx=self,
1766 listsubrepos=listsubrepos,
1769 listsubrepos=listsubrepos,
1767 badfn=badfn,
1770 badfn=badfn,
1768 icasefs=icasefs,
1771 icasefs=icasefs,
1769 )
1772 )
1770
1773
1771 def _filtersuspectsymlink(self, files):
1774 def _filtersuspectsymlink(self, files):
1772 if not files or self._repo.dirstate._checklink:
1775 if not files or self._repo.dirstate._checklink:
1773 return files
1776 return files
1774
1777
1775 # Symlink placeholders may get non-symlink-like contents
1778 # Symlink placeholders may get non-symlink-like contents
1776 # via user error or dereferencing by NFS or Samba servers,
1779 # via user error or dereferencing by NFS or Samba servers,
1777 # so we filter out any placeholders that don't look like a
1780 # so we filter out any placeholders that don't look like a
1778 # symlink
1781 # symlink
1779 sane = []
1782 sane = []
1780 for f in files:
1783 for f in files:
1781 if self.flags(f) == b'l':
1784 if self.flags(f) == b'l':
1782 d = self[f].data()
1785 d = self[f].data()
1783 if (
1786 if (
1784 d == b''
1787 d == b''
1785 or len(d) >= 1024
1788 or len(d) >= 1024
1786 or b'\n' in d
1789 or b'\n' in d
1787 or stringutil.binary(d)
1790 or stringutil.binary(d)
1788 ):
1791 ):
1789 self._repo.ui.debug(
1792 self._repo.ui.debug(
1790 b'ignoring suspect symlink placeholder "%s"\n' % f
1793 b'ignoring suspect symlink placeholder "%s"\n' % f
1791 )
1794 )
1792 continue
1795 continue
1793 sane.append(f)
1796 sane.append(f)
1794 return sane
1797 return sane
1795
1798
1796 def _checklookup(self, files):
1799 def _checklookup(self, files):
1797 # check for any possibly clean files
1800 # check for any possibly clean files
1798 if not files:
1801 if not files:
1799 return [], [], []
1802 return [], [], []
1800
1803
1801 modified = []
1804 modified = []
1802 deleted = []
1805 deleted = []
1803 fixup = []
1806 fixup = []
1804 pctx = self._parents[0]
1807 pctx = self._parents[0]
1805 # do a full compare of any files that might have changed
1808 # do a full compare of any files that might have changed
1806 for f in sorted(files):
1809 for f in sorted(files):
1807 try:
1810 try:
1808 # This will return True for a file that got replaced by a
1811 # This will return True for a file that got replaced by a
1809 # directory in the interim, but fixing that is pretty hard.
1812 # directory in the interim, but fixing that is pretty hard.
1810 if (
1813 if (
1811 f not in pctx
1814 f not in pctx
1812 or self.flags(f) != pctx.flags(f)
1815 or self.flags(f) != pctx.flags(f)
1813 or pctx[f].cmp(self[f])
1816 or pctx[f].cmp(self[f])
1814 ):
1817 ):
1815 modified.append(f)
1818 modified.append(f)
1816 else:
1819 else:
1817 fixup.append(f)
1820 # XXX note that we have a race windows here since we gather
1821 # the stats after we compared so the file might have
1822 # changed.
1823 #
1824 # However this have always been the case and the
1825 # refactoring moving the code here is improving the
1826 # situation by narrowing the race and moving the two steps
1827 # (comparison + stat) in the same location.
1828 #
1829 # Making this code "correct" is now possible.
1830 s = self[f].lstat()
1831 mode = s.st_mode
1832 size = s.st_size
1833 mtime = timestamp.mtime_of(s)
1834 fixup.append((f, (mode, size, mtime)))
1818 except (IOError, OSError):
1835 except (IOError, OSError):
1819 # A file become inaccessible in between? Mark it as deleted,
1836 # A file become inaccessible in between? Mark it as deleted,
1820 # matching dirstate behavior (issue5584).
1837 # matching dirstate behavior (issue5584).
1821 # The dirstate has more complex behavior around whether a
1838 # The dirstate has more complex behavior around whether a
1822 # missing file matches a directory, etc, but we don't need to
1839 # missing file matches a directory, etc, but we don't need to
1823 # bother with that: if f has made it to this point, we're sure
1840 # bother with that: if f has made it to this point, we're sure
1824 # it's in the dirstate.
1841 # it's in the dirstate.
1825 deleted.append(f)
1842 deleted.append(f)
1826
1843
1827 return modified, deleted, fixup
1844 return modified, deleted, fixup
1828
1845
1829 def _poststatusfixup(self, status, fixup):
1846 def _poststatusfixup(self, status, fixup):
1830 """update dirstate for files that are actually clean"""
1847 """update dirstate for files that are actually clean"""
1831 poststatus = self._repo.postdsstatus()
1848 poststatus = self._repo.postdsstatus()
1832 if fixup or poststatus or self._repo.dirstate._dirty:
1849 if fixup or poststatus or self._repo.dirstate._dirty:
1833 try:
1850 try:
1834 oldid = self._repo.dirstate.identity()
1851 oldid = self._repo.dirstate.identity()
1835
1852
1836 # updating the dirstate is optional
1853 # updating the dirstate is optional
1837 # so we don't wait on the lock
1854 # so we don't wait on the lock
1838 # wlock can invalidate the dirstate, so cache normal _after_
1855 # wlock can invalidate the dirstate, so cache normal _after_
1839 # taking the lock
1856 # taking the lock
1840 with self._repo.wlock(False):
1857 with self._repo.wlock(False):
1841 dirstate = self._repo.dirstate
1858 dirstate = self._repo.dirstate
1842 if dirstate.identity() == oldid:
1859 if dirstate.identity() == oldid:
1843 if fixup:
1860 if fixup:
1844 if dirstate.pendingparentchange():
1861 if dirstate.pendingparentchange():
1845 normal = lambda f: dirstate.update_file(
1862 normal = lambda f, pfd: dirstate.update_file(
1846 f, p1_tracked=True, wc_tracked=True
1863 f, p1_tracked=True, wc_tracked=True
1847 )
1864 )
1848 else:
1865 else:
1849 normal = dirstate.set_clean
1866 normal = dirstate.set_clean
1850 for f in fixup:
1867 for f, pdf in fixup:
1851 normal(f)
1868 normal(f, pdf)
1852 # write changes out explicitly, because nesting
1869 # write changes out explicitly, because nesting
1853 # wlock at runtime may prevent 'wlock.release()'
1870 # wlock at runtime may prevent 'wlock.release()'
1854 # after this block from doing so for subsequent
1871 # after this block from doing so for subsequent
1855 # changing files
1872 # changing files
1856 tr = self._repo.currenttransaction()
1873 tr = self._repo.currenttransaction()
1857 self._repo.dirstate.write(tr)
1874 self._repo.dirstate.write(tr)
1858
1875
1859 if poststatus:
1876 if poststatus:
1860 for ps in poststatus:
1877 for ps in poststatus:
1861 ps(self, status)
1878 ps(self, status)
1862 else:
1879 else:
1863 # in this case, writing changes out breaks
1880 # in this case, writing changes out breaks
1864 # consistency, because .hg/dirstate was
1881 # consistency, because .hg/dirstate was
1865 # already changed simultaneously after last
1882 # already changed simultaneously after last
1866 # caching (see also issue5584 for detail)
1883 # caching (see also issue5584 for detail)
1867 self._repo.ui.debug(
1884 self._repo.ui.debug(
1868 b'skip updating dirstate: identity mismatch\n'
1885 b'skip updating dirstate: identity mismatch\n'
1869 )
1886 )
1870 except error.LockError:
1887 except error.LockError:
1871 pass
1888 pass
1872 finally:
1889 finally:
1873 # Even if the wlock couldn't be grabbed, clear out the list.
1890 # Even if the wlock couldn't be grabbed, clear out the list.
1874 self._repo.clearpostdsstatus()
1891 self._repo.clearpostdsstatus()
1875
1892
1876 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1893 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1877 '''Gets the status from the dirstate -- internal use only.'''
1894 '''Gets the status from the dirstate -- internal use only.'''
1878 subrepos = []
1895 subrepos = []
1879 if b'.hgsub' in self:
1896 if b'.hgsub' in self:
1880 subrepos = sorted(self.substate)
1897 subrepos = sorted(self.substate)
1881 cmp, s = self._repo.dirstate.status(
1898 cmp, s = self._repo.dirstate.status(
1882 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1899 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1883 )
1900 )
1884
1901
1885 # check for any possibly clean files
1902 # check for any possibly clean files
1886 fixup = []
1903 fixup = []
1887 if cmp:
1904 if cmp:
1888 modified2, deleted2, fixup = self._checklookup(cmp)
1905 modified2, deleted2, fixup = self._checklookup(cmp)
1889 s.modified.extend(modified2)
1906 s.modified.extend(modified2)
1890 s.deleted.extend(deleted2)
1907 s.deleted.extend(deleted2)
1891
1908
1892 if fixup and clean:
1909 if fixup and clean:
1893 s.clean.extend(fixup)
1910 s.clean.extend((f for f, _ in fixup))
1894
1911
1895 self._poststatusfixup(s, fixup)
1912 self._poststatusfixup(s, fixup)
1896
1913
1897 if match.always():
1914 if match.always():
1898 # cache for performance
1915 # cache for performance
1899 if s.unknown or s.ignored or s.clean:
1916 if s.unknown or s.ignored or s.clean:
1900 # "_status" is cached with list*=False in the normal route
1917 # "_status" is cached with list*=False in the normal route
1901 self._status = scmutil.status(
1918 self._status = scmutil.status(
1902 s.modified, s.added, s.removed, s.deleted, [], [], []
1919 s.modified, s.added, s.removed, s.deleted, [], [], []
1903 )
1920 )
1904 else:
1921 else:
1905 self._status = s
1922 self._status = s
1906
1923
1907 return s
1924 return s
1908
1925
1909 @propertycache
1926 @propertycache
1910 def _copies(self):
1927 def _copies(self):
1911 p1copies = {}
1928 p1copies = {}
1912 p2copies = {}
1929 p2copies = {}
1913 parents = self._repo.dirstate.parents()
1930 parents = self._repo.dirstate.parents()
1914 p1manifest = self._repo[parents[0]].manifest()
1931 p1manifest = self._repo[parents[0]].manifest()
1915 p2manifest = self._repo[parents[1]].manifest()
1932 p2manifest = self._repo[parents[1]].manifest()
1916 changedset = set(self.added()) | set(self.modified())
1933 changedset = set(self.added()) | set(self.modified())
1917 narrowmatch = self._repo.narrowmatch()
1934 narrowmatch = self._repo.narrowmatch()
1918 for dst, src in self._repo.dirstate.copies().items():
1935 for dst, src in self._repo.dirstate.copies().items():
1919 if dst not in changedset or not narrowmatch(dst):
1936 if dst not in changedset or not narrowmatch(dst):
1920 continue
1937 continue
1921 if src in p1manifest:
1938 if src in p1manifest:
1922 p1copies[dst] = src
1939 p1copies[dst] = src
1923 elif src in p2manifest:
1940 elif src in p2manifest:
1924 p2copies[dst] = src
1941 p2copies[dst] = src
1925 return p1copies, p2copies
1942 return p1copies, p2copies
1926
1943
1927 @propertycache
1944 @propertycache
1928 def _manifest(self):
1945 def _manifest(self):
1929 """generate a manifest corresponding to the values in self._status
1946 """generate a manifest corresponding to the values in self._status
1930
1947
1931 This reuse the file nodeid from parent, but we use special node
1948 This reuse the file nodeid from parent, but we use special node
1932 identifiers for added and modified files. This is used by manifests
1949 identifiers for added and modified files. This is used by manifests
1933 merge to see that files are different and by update logic to avoid
1950 merge to see that files are different and by update logic to avoid
1934 deleting newly added files.
1951 deleting newly added files.
1935 """
1952 """
1936 return self._buildstatusmanifest(self._status)
1953 return self._buildstatusmanifest(self._status)
1937
1954
1938 def _buildstatusmanifest(self, status):
1955 def _buildstatusmanifest(self, status):
1939 """Builds a manifest that includes the given status results."""
1956 """Builds a manifest that includes the given status results."""
1940 parents = self.parents()
1957 parents = self.parents()
1941
1958
1942 man = parents[0].manifest().copy()
1959 man = parents[0].manifest().copy()
1943
1960
1944 ff = self._flagfunc
1961 ff = self._flagfunc
1945 for i, l in (
1962 for i, l in (
1946 (self._repo.nodeconstants.addednodeid, status.added),
1963 (self._repo.nodeconstants.addednodeid, status.added),
1947 (self._repo.nodeconstants.modifiednodeid, status.modified),
1964 (self._repo.nodeconstants.modifiednodeid, status.modified),
1948 ):
1965 ):
1949 for f in l:
1966 for f in l:
1950 man[f] = i
1967 man[f] = i
1951 try:
1968 try:
1952 man.setflag(f, ff(f))
1969 man.setflag(f, ff(f))
1953 except OSError:
1970 except OSError:
1954 pass
1971 pass
1955
1972
1956 for f in status.deleted + status.removed:
1973 for f in status.deleted + status.removed:
1957 if f in man:
1974 if f in man:
1958 del man[f]
1975 del man[f]
1959
1976
1960 return man
1977 return man
1961
1978
1962 def _buildstatus(
1979 def _buildstatus(
1963 self, other, s, match, listignored, listclean, listunknown
1980 self, other, s, match, listignored, listclean, listunknown
1964 ):
1981 ):
1965 """build a status with respect to another context
1982 """build a status with respect to another context
1966
1983
1967 This includes logic for maintaining the fast path of status when
1984 This includes logic for maintaining the fast path of status when
1968 comparing the working directory against its parent, which is to skip
1985 comparing the working directory against its parent, which is to skip
1969 building a new manifest if self (working directory) is not comparing
1986 building a new manifest if self (working directory) is not comparing
1970 against its parent (repo['.']).
1987 against its parent (repo['.']).
1971 """
1988 """
1972 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1989 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1973 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1990 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1974 # might have accidentally ended up with the entire contents of the file
1991 # might have accidentally ended up with the entire contents of the file
1975 # they are supposed to be linking to.
1992 # they are supposed to be linking to.
1976 s.modified[:] = self._filtersuspectsymlink(s.modified)
1993 s.modified[:] = self._filtersuspectsymlink(s.modified)
1977 if other != self._repo[b'.']:
1994 if other != self._repo[b'.']:
1978 s = super(workingctx, self)._buildstatus(
1995 s = super(workingctx, self)._buildstatus(
1979 other, s, match, listignored, listclean, listunknown
1996 other, s, match, listignored, listclean, listunknown
1980 )
1997 )
1981 return s
1998 return s
1982
1999
1983 def _matchstatus(self, other, match):
2000 def _matchstatus(self, other, match):
1984 """override the match method with a filter for directory patterns
2001 """override the match method with a filter for directory patterns
1985
2002
1986 We use inheritance to customize the match.bad method only in cases of
2003 We use inheritance to customize the match.bad method only in cases of
1987 workingctx since it belongs only to the working directory when
2004 workingctx since it belongs only to the working directory when
1988 comparing against the parent changeset.
2005 comparing against the parent changeset.
1989
2006
1990 If we aren't comparing against the working directory's parent, then we
2007 If we aren't comparing against the working directory's parent, then we
1991 just use the default match object sent to us.
2008 just use the default match object sent to us.
1992 """
2009 """
1993 if other != self._repo[b'.']:
2010 if other != self._repo[b'.']:
1994
2011
1995 def bad(f, msg):
2012 def bad(f, msg):
1996 # 'f' may be a directory pattern from 'match.files()',
2013 # 'f' may be a directory pattern from 'match.files()',
1997 # so 'f not in ctx1' is not enough
2014 # so 'f not in ctx1' is not enough
1998 if f not in other and not other.hasdir(f):
2015 if f not in other and not other.hasdir(f):
1999 self._repo.ui.warn(
2016 self._repo.ui.warn(
2000 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2017 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2001 )
2018 )
2002
2019
2003 match.bad = bad
2020 match.bad = bad
2004 return match
2021 return match
2005
2022
2006 def walk(self, match):
2023 def walk(self, match):
2007 '''Generates matching file names.'''
2024 '''Generates matching file names.'''
2008 return sorted(
2025 return sorted(
2009 self._repo.dirstate.walk(
2026 self._repo.dirstate.walk(
2010 self._repo.narrowmatch(match),
2027 self._repo.narrowmatch(match),
2011 subrepos=sorted(self.substate),
2028 subrepos=sorted(self.substate),
2012 unknown=True,
2029 unknown=True,
2013 ignored=False,
2030 ignored=False,
2014 )
2031 )
2015 )
2032 )
2016
2033
2017 def matches(self, match):
2034 def matches(self, match):
2018 match = self._repo.narrowmatch(match)
2035 match = self._repo.narrowmatch(match)
2019 ds = self._repo.dirstate
2036 ds = self._repo.dirstate
2020 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2037 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2021
2038
2022 def markcommitted(self, node):
2039 def markcommitted(self, node):
2023 with self._repo.dirstate.parentchange():
2040 with self._repo.dirstate.parentchange():
2024 for f in self.modified() + self.added():
2041 for f in self.modified() + self.added():
2025 self._repo.dirstate.update_file(
2042 self._repo.dirstate.update_file(
2026 f, p1_tracked=True, wc_tracked=True
2043 f, p1_tracked=True, wc_tracked=True
2027 )
2044 )
2028 for f in self.removed():
2045 for f in self.removed():
2029 self._repo.dirstate.update_file(
2046 self._repo.dirstate.update_file(
2030 f, p1_tracked=False, wc_tracked=False
2047 f, p1_tracked=False, wc_tracked=False
2031 )
2048 )
2032 self._repo.dirstate.setparents(node)
2049 self._repo.dirstate.setparents(node)
2033 self._repo._quick_access_changeid_invalidate()
2050 self._repo._quick_access_changeid_invalidate()
2034
2051
2035 sparse.aftercommit(self._repo, node)
2052 sparse.aftercommit(self._repo, node)
2036
2053
2037 # write changes out explicitly, because nesting wlock at
2054 # write changes out explicitly, because nesting wlock at
2038 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2055 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2039 # from immediately doing so for subsequent changing files
2056 # from immediately doing so for subsequent changing files
2040 self._repo.dirstate.write(self._repo.currenttransaction())
2057 self._repo.dirstate.write(self._repo.currenttransaction())
2041
2058
2042 def mergestate(self, clean=False):
2059 def mergestate(self, clean=False):
2043 if clean:
2060 if clean:
2044 return mergestatemod.mergestate.clean(self._repo)
2061 return mergestatemod.mergestate.clean(self._repo)
2045 return mergestatemod.mergestate.read(self._repo)
2062 return mergestatemod.mergestate.read(self._repo)
2046
2063
2047
2064
2048 class committablefilectx(basefilectx):
2065 class committablefilectx(basefilectx):
2049 """A committablefilectx provides common functionality for a file context
2066 """A committablefilectx provides common functionality for a file context
2050 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2067 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2051
2068
2052 def __init__(self, repo, path, filelog=None, ctx=None):
2069 def __init__(self, repo, path, filelog=None, ctx=None):
2053 self._repo = repo
2070 self._repo = repo
2054 self._path = path
2071 self._path = path
2055 self._changeid = None
2072 self._changeid = None
2056 self._filerev = self._filenode = None
2073 self._filerev = self._filenode = None
2057
2074
2058 if filelog is not None:
2075 if filelog is not None:
2059 self._filelog = filelog
2076 self._filelog = filelog
2060 if ctx:
2077 if ctx:
2061 self._changectx = ctx
2078 self._changectx = ctx
2062
2079
2063 def __nonzero__(self):
2080 def __nonzero__(self):
2064 return True
2081 return True
2065
2082
2066 __bool__ = __nonzero__
2083 __bool__ = __nonzero__
2067
2084
2068 def linkrev(self):
2085 def linkrev(self):
2069 # linked to self._changectx no matter if file is modified or not
2086 # linked to self._changectx no matter if file is modified or not
2070 return self.rev()
2087 return self.rev()
2071
2088
2072 def renamed(self):
2089 def renamed(self):
2073 path = self.copysource()
2090 path = self.copysource()
2074 if not path:
2091 if not path:
2075 return None
2092 return None
2076 return (
2093 return (
2077 path,
2094 path,
2078 self._changectx._parents[0]._manifest.get(
2095 self._changectx._parents[0]._manifest.get(
2079 path, self._repo.nodeconstants.nullid
2096 path, self._repo.nodeconstants.nullid
2080 ),
2097 ),
2081 )
2098 )
2082
2099
2083 def parents(self):
2100 def parents(self):
2084 '''return parent filectxs, following copies if necessary'''
2101 '''return parent filectxs, following copies if necessary'''
2085
2102
2086 def filenode(ctx, path):
2103 def filenode(ctx, path):
2087 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2104 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2088
2105
2089 path = self._path
2106 path = self._path
2090 fl = self._filelog
2107 fl = self._filelog
2091 pcl = self._changectx._parents
2108 pcl = self._changectx._parents
2092 renamed = self.renamed()
2109 renamed = self.renamed()
2093
2110
2094 if renamed:
2111 if renamed:
2095 pl = [renamed + (None,)]
2112 pl = [renamed + (None,)]
2096 else:
2113 else:
2097 pl = [(path, filenode(pcl[0], path), fl)]
2114 pl = [(path, filenode(pcl[0], path), fl)]
2098
2115
2099 for pc in pcl[1:]:
2116 for pc in pcl[1:]:
2100 pl.append((path, filenode(pc, path), fl))
2117 pl.append((path, filenode(pc, path), fl))
2101
2118
2102 return [
2119 return [
2103 self._parentfilectx(p, fileid=n, filelog=l)
2120 self._parentfilectx(p, fileid=n, filelog=l)
2104 for p, n, l in pl
2121 for p, n, l in pl
2105 if n != self._repo.nodeconstants.nullid
2122 if n != self._repo.nodeconstants.nullid
2106 ]
2123 ]
2107
2124
2108 def children(self):
2125 def children(self):
2109 return []
2126 return []
2110
2127
2111
2128
2112 class workingfilectx(committablefilectx):
2129 class workingfilectx(committablefilectx):
2113 """A workingfilectx object makes access to data related to a particular
2130 """A workingfilectx object makes access to data related to a particular
2114 file in the working directory convenient."""
2131 file in the working directory convenient."""
2115
2132
2116 def __init__(self, repo, path, filelog=None, workingctx=None):
2133 def __init__(self, repo, path, filelog=None, workingctx=None):
2117 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2134 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2118
2135
2119 @propertycache
2136 @propertycache
2120 def _changectx(self):
2137 def _changectx(self):
2121 return workingctx(self._repo)
2138 return workingctx(self._repo)
2122
2139
2123 def data(self):
2140 def data(self):
2124 return self._repo.wread(self._path)
2141 return self._repo.wread(self._path)
2125
2142
2126 def copysource(self):
2143 def copysource(self):
2127 return self._repo.dirstate.copied(self._path)
2144 return self._repo.dirstate.copied(self._path)
2128
2145
2129 def size(self):
2146 def size(self):
2130 return self._repo.wvfs.lstat(self._path).st_size
2147 return self._repo.wvfs.lstat(self._path).st_size
2131
2148
2132 def lstat(self):
2149 def lstat(self):
2133 return self._repo.wvfs.lstat(self._path)
2150 return self._repo.wvfs.lstat(self._path)
2134
2151
2135 def date(self):
2152 def date(self):
2136 t, tz = self._changectx.date()
2153 t, tz = self._changectx.date()
2137 try:
2154 try:
2138 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2155 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2139 except OSError as err:
2156 except OSError as err:
2140 if err.errno != errno.ENOENT:
2157 if err.errno != errno.ENOENT:
2141 raise
2158 raise
2142 return (t, tz)
2159 return (t, tz)
2143
2160
2144 def exists(self):
2161 def exists(self):
2145 return self._repo.wvfs.exists(self._path)
2162 return self._repo.wvfs.exists(self._path)
2146
2163
2147 def lexists(self):
2164 def lexists(self):
2148 return self._repo.wvfs.lexists(self._path)
2165 return self._repo.wvfs.lexists(self._path)
2149
2166
2150 def audit(self):
2167 def audit(self):
2151 return self._repo.wvfs.audit(self._path)
2168 return self._repo.wvfs.audit(self._path)
2152
2169
2153 def cmp(self, fctx):
2170 def cmp(self, fctx):
2154 """compare with other file context
2171 """compare with other file context
2155
2172
2156 returns True if different than fctx.
2173 returns True if different than fctx.
2157 """
2174 """
2158 # fctx should be a filectx (not a workingfilectx)
2175 # fctx should be a filectx (not a workingfilectx)
2159 # invert comparison to reuse the same code path
2176 # invert comparison to reuse the same code path
2160 return fctx.cmp(self)
2177 return fctx.cmp(self)
2161
2178
2162 def remove(self, ignoremissing=False):
2179 def remove(self, ignoremissing=False):
2163 """wraps unlink for a repo's working directory"""
2180 """wraps unlink for a repo's working directory"""
2164 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2181 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2165 self._repo.wvfs.unlinkpath(
2182 self._repo.wvfs.unlinkpath(
2166 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2183 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2167 )
2184 )
2168
2185
2169 def write(self, data, flags, backgroundclose=False, **kwargs):
2186 def write(self, data, flags, backgroundclose=False, **kwargs):
2170 """wraps repo.wwrite"""
2187 """wraps repo.wwrite"""
2171 return self._repo.wwrite(
2188 return self._repo.wwrite(
2172 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2189 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2173 )
2190 )
2174
2191
2175 def markcopied(self, src):
2192 def markcopied(self, src):
2176 """marks this file a copy of `src`"""
2193 """marks this file a copy of `src`"""
2177 self._repo.dirstate.copy(src, self._path)
2194 self._repo.dirstate.copy(src, self._path)
2178
2195
2179 def clearunknown(self):
2196 def clearunknown(self):
2180 """Removes conflicting items in the working directory so that
2197 """Removes conflicting items in the working directory so that
2181 ``write()`` can be called successfully.
2198 ``write()`` can be called successfully.
2182 """
2199 """
2183 wvfs = self._repo.wvfs
2200 wvfs = self._repo.wvfs
2184 f = self._path
2201 f = self._path
2185 wvfs.audit(f)
2202 wvfs.audit(f)
2186 if self._repo.ui.configbool(
2203 if self._repo.ui.configbool(
2187 b'experimental', b'merge.checkpathconflicts'
2204 b'experimental', b'merge.checkpathconflicts'
2188 ):
2205 ):
2189 # remove files under the directory as they should already be
2206 # remove files under the directory as they should already be
2190 # warned and backed up
2207 # warned and backed up
2191 if wvfs.isdir(f) and not wvfs.islink(f):
2208 if wvfs.isdir(f) and not wvfs.islink(f):
2192 wvfs.rmtree(f, forcibly=True)
2209 wvfs.rmtree(f, forcibly=True)
2193 for p in reversed(list(pathutil.finddirs(f))):
2210 for p in reversed(list(pathutil.finddirs(f))):
2194 if wvfs.isfileorlink(p):
2211 if wvfs.isfileorlink(p):
2195 wvfs.unlink(p)
2212 wvfs.unlink(p)
2196 break
2213 break
2197 else:
2214 else:
2198 # don't remove files if path conflicts are not processed
2215 # don't remove files if path conflicts are not processed
2199 if wvfs.isdir(f) and not wvfs.islink(f):
2216 if wvfs.isdir(f) and not wvfs.islink(f):
2200 wvfs.removedirs(f)
2217 wvfs.removedirs(f)
2201
2218
2202 def setflags(self, l, x):
2219 def setflags(self, l, x):
2203 self._repo.wvfs.setflags(self._path, l, x)
2220 self._repo.wvfs.setflags(self._path, l, x)
2204
2221
2205
2222
2206 class overlayworkingctx(committablectx):
2223 class overlayworkingctx(committablectx):
2207 """Wraps another mutable context with a write-back cache that can be
2224 """Wraps another mutable context with a write-back cache that can be
2208 converted into a commit context.
2225 converted into a commit context.
2209
2226
2210 self._cache[path] maps to a dict with keys: {
2227 self._cache[path] maps to a dict with keys: {
2211 'exists': bool?
2228 'exists': bool?
2212 'date': date?
2229 'date': date?
2213 'data': str?
2230 'data': str?
2214 'flags': str?
2231 'flags': str?
2215 'copied': str? (path or None)
2232 'copied': str? (path or None)
2216 }
2233 }
2217 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2234 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2218 is `False`, the file was deleted.
2235 is `False`, the file was deleted.
2219 """
2236 """
2220
2237
2221 def __init__(self, repo):
2238 def __init__(self, repo):
2222 super(overlayworkingctx, self).__init__(repo)
2239 super(overlayworkingctx, self).__init__(repo)
2223 self.clean()
2240 self.clean()
2224
2241
2225 def setbase(self, wrappedctx):
2242 def setbase(self, wrappedctx):
2226 self._wrappedctx = wrappedctx
2243 self._wrappedctx = wrappedctx
2227 self._parents = [wrappedctx]
2244 self._parents = [wrappedctx]
2228 # Drop old manifest cache as it is now out of date.
2245 # Drop old manifest cache as it is now out of date.
2229 # This is necessary when, e.g., rebasing several nodes with one
2246 # This is necessary when, e.g., rebasing several nodes with one
2230 # ``overlayworkingctx`` (e.g. with --collapse).
2247 # ``overlayworkingctx`` (e.g. with --collapse).
2231 util.clearcachedproperty(self, b'_manifest')
2248 util.clearcachedproperty(self, b'_manifest')
2232
2249
2233 def setparents(self, p1node, p2node=None):
2250 def setparents(self, p1node, p2node=None):
2234 if p2node is None:
2251 if p2node is None:
2235 p2node = self._repo.nodeconstants.nullid
2252 p2node = self._repo.nodeconstants.nullid
2236 assert p1node == self._wrappedctx.node()
2253 assert p1node == self._wrappedctx.node()
2237 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2254 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2238
2255
2239 def data(self, path):
2256 def data(self, path):
2240 if self.isdirty(path):
2257 if self.isdirty(path):
2241 if self._cache[path][b'exists']:
2258 if self._cache[path][b'exists']:
2242 if self._cache[path][b'data'] is not None:
2259 if self._cache[path][b'data'] is not None:
2243 return self._cache[path][b'data']
2260 return self._cache[path][b'data']
2244 else:
2261 else:
2245 # Must fallback here, too, because we only set flags.
2262 # Must fallback here, too, because we only set flags.
2246 return self._wrappedctx[path].data()
2263 return self._wrappedctx[path].data()
2247 else:
2264 else:
2248 raise error.ProgrammingError(
2265 raise error.ProgrammingError(
2249 b"No such file or directory: %s" % path
2266 b"No such file or directory: %s" % path
2250 )
2267 )
2251 else:
2268 else:
2252 return self._wrappedctx[path].data()
2269 return self._wrappedctx[path].data()
2253
2270
2254 @propertycache
2271 @propertycache
2255 def _manifest(self):
2272 def _manifest(self):
2256 parents = self.parents()
2273 parents = self.parents()
2257 man = parents[0].manifest().copy()
2274 man = parents[0].manifest().copy()
2258
2275
2259 flag = self._flagfunc
2276 flag = self._flagfunc
2260 for path in self.added():
2277 for path in self.added():
2261 man[path] = self._repo.nodeconstants.addednodeid
2278 man[path] = self._repo.nodeconstants.addednodeid
2262 man.setflag(path, flag(path))
2279 man.setflag(path, flag(path))
2263 for path in self.modified():
2280 for path in self.modified():
2264 man[path] = self._repo.nodeconstants.modifiednodeid
2281 man[path] = self._repo.nodeconstants.modifiednodeid
2265 man.setflag(path, flag(path))
2282 man.setflag(path, flag(path))
2266 for path in self.removed():
2283 for path in self.removed():
2267 del man[path]
2284 del man[path]
2268 return man
2285 return man
2269
2286
2270 @propertycache
2287 @propertycache
2271 def _flagfunc(self):
2288 def _flagfunc(self):
2272 def f(path):
2289 def f(path):
2273 return self._cache[path][b'flags']
2290 return self._cache[path][b'flags']
2274
2291
2275 return f
2292 return f
2276
2293
2277 def files(self):
2294 def files(self):
2278 return sorted(self.added() + self.modified() + self.removed())
2295 return sorted(self.added() + self.modified() + self.removed())
2279
2296
2280 def modified(self):
2297 def modified(self):
2281 return [
2298 return [
2282 f
2299 f
2283 for f in self._cache.keys()
2300 for f in self._cache.keys()
2284 if self._cache[f][b'exists'] and self._existsinparent(f)
2301 if self._cache[f][b'exists'] and self._existsinparent(f)
2285 ]
2302 ]
2286
2303
2287 def added(self):
2304 def added(self):
2288 return [
2305 return [
2289 f
2306 f
2290 for f in self._cache.keys()
2307 for f in self._cache.keys()
2291 if self._cache[f][b'exists'] and not self._existsinparent(f)
2308 if self._cache[f][b'exists'] and not self._existsinparent(f)
2292 ]
2309 ]
2293
2310
2294 def removed(self):
2311 def removed(self):
2295 return [
2312 return [
2296 f
2313 f
2297 for f in self._cache.keys()
2314 for f in self._cache.keys()
2298 if not self._cache[f][b'exists'] and self._existsinparent(f)
2315 if not self._cache[f][b'exists'] and self._existsinparent(f)
2299 ]
2316 ]
2300
2317
2301 def p1copies(self):
2318 def p1copies(self):
2302 copies = {}
2319 copies = {}
2303 narrowmatch = self._repo.narrowmatch()
2320 narrowmatch = self._repo.narrowmatch()
2304 for f in self._cache.keys():
2321 for f in self._cache.keys():
2305 if not narrowmatch(f):
2322 if not narrowmatch(f):
2306 continue
2323 continue
2307 copies.pop(f, None) # delete if it exists
2324 copies.pop(f, None) # delete if it exists
2308 source = self._cache[f][b'copied']
2325 source = self._cache[f][b'copied']
2309 if source:
2326 if source:
2310 copies[f] = source
2327 copies[f] = source
2311 return copies
2328 return copies
2312
2329
2313 def p2copies(self):
2330 def p2copies(self):
2314 copies = {}
2331 copies = {}
2315 narrowmatch = self._repo.narrowmatch()
2332 narrowmatch = self._repo.narrowmatch()
2316 for f in self._cache.keys():
2333 for f in self._cache.keys():
2317 if not narrowmatch(f):
2334 if not narrowmatch(f):
2318 continue
2335 continue
2319 copies.pop(f, None) # delete if it exists
2336 copies.pop(f, None) # delete if it exists
2320 source = self._cache[f][b'copied']
2337 source = self._cache[f][b'copied']
2321 if source:
2338 if source:
2322 copies[f] = source
2339 copies[f] = source
2323 return copies
2340 return copies
2324
2341
2325 def isinmemory(self):
2342 def isinmemory(self):
2326 return True
2343 return True
2327
2344
2328 def filedate(self, path):
2345 def filedate(self, path):
2329 if self.isdirty(path):
2346 if self.isdirty(path):
2330 return self._cache[path][b'date']
2347 return self._cache[path][b'date']
2331 else:
2348 else:
2332 return self._wrappedctx[path].date()
2349 return self._wrappedctx[path].date()
2333
2350
2334 def markcopied(self, path, origin):
2351 def markcopied(self, path, origin):
2335 self._markdirty(
2352 self._markdirty(
2336 path,
2353 path,
2337 exists=True,
2354 exists=True,
2338 date=self.filedate(path),
2355 date=self.filedate(path),
2339 flags=self.flags(path),
2356 flags=self.flags(path),
2340 copied=origin,
2357 copied=origin,
2341 )
2358 )
2342
2359
2343 def copydata(self, path):
2360 def copydata(self, path):
2344 if self.isdirty(path):
2361 if self.isdirty(path):
2345 return self._cache[path][b'copied']
2362 return self._cache[path][b'copied']
2346 else:
2363 else:
2347 return None
2364 return None
2348
2365
2349 def flags(self, path):
2366 def flags(self, path):
2350 if self.isdirty(path):
2367 if self.isdirty(path):
2351 if self._cache[path][b'exists']:
2368 if self._cache[path][b'exists']:
2352 return self._cache[path][b'flags']
2369 return self._cache[path][b'flags']
2353 else:
2370 else:
2354 raise error.ProgrammingError(
2371 raise error.ProgrammingError(
2355 b"No such file or directory: %s" % path
2372 b"No such file or directory: %s" % path
2356 )
2373 )
2357 else:
2374 else:
2358 return self._wrappedctx[path].flags()
2375 return self._wrappedctx[path].flags()
2359
2376
2360 def __contains__(self, key):
2377 def __contains__(self, key):
2361 if key in self._cache:
2378 if key in self._cache:
2362 return self._cache[key][b'exists']
2379 return self._cache[key][b'exists']
2363 return key in self.p1()
2380 return key in self.p1()
2364
2381
2365 def _existsinparent(self, path):
2382 def _existsinparent(self, path):
2366 try:
2383 try:
2367 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2384 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2368 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2385 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2369 # with an ``exists()`` function.
2386 # with an ``exists()`` function.
2370 self._wrappedctx[path]
2387 self._wrappedctx[path]
2371 return True
2388 return True
2372 except error.ManifestLookupError:
2389 except error.ManifestLookupError:
2373 return False
2390 return False
2374
2391
2375 def _auditconflicts(self, path):
2392 def _auditconflicts(self, path):
2376 """Replicates conflict checks done by wvfs.write().
2393 """Replicates conflict checks done by wvfs.write().
2377
2394
2378 Since we never write to the filesystem and never call `applyupdates` in
2395 Since we never write to the filesystem and never call `applyupdates` in
2379 IMM, we'll never check that a path is actually writable -- e.g., because
2396 IMM, we'll never check that a path is actually writable -- e.g., because
2380 it adds `a/foo`, but `a` is actually a file in the other commit.
2397 it adds `a/foo`, but `a` is actually a file in the other commit.
2381 """
2398 """
2382
2399
2383 def fail(path, component):
2400 def fail(path, component):
2384 # p1() is the base and we're receiving "writes" for p2()'s
2401 # p1() is the base and we're receiving "writes" for p2()'s
2385 # files.
2402 # files.
2386 if b'l' in self.p1()[component].flags():
2403 if b'l' in self.p1()[component].flags():
2387 raise error.Abort(
2404 raise error.Abort(
2388 b"error: %s conflicts with symlink %s "
2405 b"error: %s conflicts with symlink %s "
2389 b"in %d." % (path, component, self.p1().rev())
2406 b"in %d." % (path, component, self.p1().rev())
2390 )
2407 )
2391 else:
2408 else:
2392 raise error.Abort(
2409 raise error.Abort(
2393 b"error: '%s' conflicts with file '%s' in "
2410 b"error: '%s' conflicts with file '%s' in "
2394 b"%d." % (path, component, self.p1().rev())
2411 b"%d." % (path, component, self.p1().rev())
2395 )
2412 )
2396
2413
2397 # Test that each new directory to be created to write this path from p2
2414 # Test that each new directory to be created to write this path from p2
2398 # is not a file in p1.
2415 # is not a file in p1.
2399 components = path.split(b'/')
2416 components = path.split(b'/')
2400 for i in pycompat.xrange(len(components)):
2417 for i in pycompat.xrange(len(components)):
2401 component = b"/".join(components[0:i])
2418 component = b"/".join(components[0:i])
2402 if component in self:
2419 if component in self:
2403 fail(path, component)
2420 fail(path, component)
2404
2421
2405 # Test the other direction -- that this path from p2 isn't a directory
2422 # Test the other direction -- that this path from p2 isn't a directory
2406 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2423 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2407 match = self.match([path], default=b'path')
2424 match = self.match([path], default=b'path')
2408 mfiles = list(self.p1().manifest().walk(match))
2425 mfiles = list(self.p1().manifest().walk(match))
2409 if len(mfiles) > 0:
2426 if len(mfiles) > 0:
2410 if len(mfiles) == 1 and mfiles[0] == path:
2427 if len(mfiles) == 1 and mfiles[0] == path:
2411 return
2428 return
2412 # omit the files which are deleted in current IMM wctx
2429 # omit the files which are deleted in current IMM wctx
2413 mfiles = [m for m in mfiles if m in self]
2430 mfiles = [m for m in mfiles if m in self]
2414 if not mfiles:
2431 if not mfiles:
2415 return
2432 return
2416 raise error.Abort(
2433 raise error.Abort(
2417 b"error: file '%s' cannot be written because "
2434 b"error: file '%s' cannot be written because "
2418 b" '%s/' is a directory in %s (containing %d "
2435 b" '%s/' is a directory in %s (containing %d "
2419 b"entries: %s)"
2436 b"entries: %s)"
2420 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2437 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2421 )
2438 )
2422
2439
2423 def write(self, path, data, flags=b'', **kwargs):
2440 def write(self, path, data, flags=b'', **kwargs):
2424 if data is None:
2441 if data is None:
2425 raise error.ProgrammingError(b"data must be non-None")
2442 raise error.ProgrammingError(b"data must be non-None")
2426 self._auditconflicts(path)
2443 self._auditconflicts(path)
2427 self._markdirty(
2444 self._markdirty(
2428 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2445 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2429 )
2446 )
2430
2447
2431 def setflags(self, path, l, x):
2448 def setflags(self, path, l, x):
2432 flag = b''
2449 flag = b''
2433 if l:
2450 if l:
2434 flag = b'l'
2451 flag = b'l'
2435 elif x:
2452 elif x:
2436 flag = b'x'
2453 flag = b'x'
2437 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2454 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2438
2455
2439 def remove(self, path):
2456 def remove(self, path):
2440 self._markdirty(path, exists=False)
2457 self._markdirty(path, exists=False)
2441
2458
2442 def exists(self, path):
2459 def exists(self, path):
2443 """exists behaves like `lexists`, but needs to follow symlinks and
2460 """exists behaves like `lexists`, but needs to follow symlinks and
2444 return False if they are broken.
2461 return False if they are broken.
2445 """
2462 """
2446 if self.isdirty(path):
2463 if self.isdirty(path):
2447 # If this path exists and is a symlink, "follow" it by calling
2464 # If this path exists and is a symlink, "follow" it by calling
2448 # exists on the destination path.
2465 # exists on the destination path.
2449 if (
2466 if (
2450 self._cache[path][b'exists']
2467 self._cache[path][b'exists']
2451 and b'l' in self._cache[path][b'flags']
2468 and b'l' in self._cache[path][b'flags']
2452 ):
2469 ):
2453 return self.exists(self._cache[path][b'data'].strip())
2470 return self.exists(self._cache[path][b'data'].strip())
2454 else:
2471 else:
2455 return self._cache[path][b'exists']
2472 return self._cache[path][b'exists']
2456
2473
2457 return self._existsinparent(path)
2474 return self._existsinparent(path)
2458
2475
2459 def lexists(self, path):
2476 def lexists(self, path):
2460 """lexists returns True if the path exists"""
2477 """lexists returns True if the path exists"""
2461 if self.isdirty(path):
2478 if self.isdirty(path):
2462 return self._cache[path][b'exists']
2479 return self._cache[path][b'exists']
2463
2480
2464 return self._existsinparent(path)
2481 return self._existsinparent(path)
2465
2482
2466 def size(self, path):
2483 def size(self, path):
2467 if self.isdirty(path):
2484 if self.isdirty(path):
2468 if self._cache[path][b'exists']:
2485 if self._cache[path][b'exists']:
2469 return len(self._cache[path][b'data'])
2486 return len(self._cache[path][b'data'])
2470 else:
2487 else:
2471 raise error.ProgrammingError(
2488 raise error.ProgrammingError(
2472 b"No such file or directory: %s" % path
2489 b"No such file or directory: %s" % path
2473 )
2490 )
2474 return self._wrappedctx[path].size()
2491 return self._wrappedctx[path].size()
2475
2492
2476 def tomemctx(
2493 def tomemctx(
2477 self,
2494 self,
2478 text,
2495 text,
2479 branch=None,
2496 branch=None,
2480 extra=None,
2497 extra=None,
2481 date=None,
2498 date=None,
2482 parents=None,
2499 parents=None,
2483 user=None,
2500 user=None,
2484 editor=None,
2501 editor=None,
2485 ):
2502 ):
2486 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2503 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2487 committed.
2504 committed.
2488
2505
2489 ``text`` is the commit message.
2506 ``text`` is the commit message.
2490 ``parents`` (optional) are rev numbers.
2507 ``parents`` (optional) are rev numbers.
2491 """
2508 """
2492 # Default parents to the wrapped context if not passed.
2509 # Default parents to the wrapped context if not passed.
2493 if parents is None:
2510 if parents is None:
2494 parents = self.parents()
2511 parents = self.parents()
2495 if len(parents) == 1:
2512 if len(parents) == 1:
2496 parents = (parents[0], None)
2513 parents = (parents[0], None)
2497
2514
2498 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2515 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2499 if parents[1] is None:
2516 if parents[1] is None:
2500 parents = (self._repo[parents[0]], None)
2517 parents = (self._repo[parents[0]], None)
2501 else:
2518 else:
2502 parents = (self._repo[parents[0]], self._repo[parents[1]])
2519 parents = (self._repo[parents[0]], self._repo[parents[1]])
2503
2520
2504 files = self.files()
2521 files = self.files()
2505
2522
2506 def getfile(repo, memctx, path):
2523 def getfile(repo, memctx, path):
2507 if self._cache[path][b'exists']:
2524 if self._cache[path][b'exists']:
2508 return memfilectx(
2525 return memfilectx(
2509 repo,
2526 repo,
2510 memctx,
2527 memctx,
2511 path,
2528 path,
2512 self._cache[path][b'data'],
2529 self._cache[path][b'data'],
2513 b'l' in self._cache[path][b'flags'],
2530 b'l' in self._cache[path][b'flags'],
2514 b'x' in self._cache[path][b'flags'],
2531 b'x' in self._cache[path][b'flags'],
2515 self._cache[path][b'copied'],
2532 self._cache[path][b'copied'],
2516 )
2533 )
2517 else:
2534 else:
2518 # Returning None, but including the path in `files`, is
2535 # Returning None, but including the path in `files`, is
2519 # necessary for memctx to register a deletion.
2536 # necessary for memctx to register a deletion.
2520 return None
2537 return None
2521
2538
2522 if branch is None:
2539 if branch is None:
2523 branch = self._wrappedctx.branch()
2540 branch = self._wrappedctx.branch()
2524
2541
2525 return memctx(
2542 return memctx(
2526 self._repo,
2543 self._repo,
2527 parents,
2544 parents,
2528 text,
2545 text,
2529 files,
2546 files,
2530 getfile,
2547 getfile,
2531 date=date,
2548 date=date,
2532 extra=extra,
2549 extra=extra,
2533 user=user,
2550 user=user,
2534 branch=branch,
2551 branch=branch,
2535 editor=editor,
2552 editor=editor,
2536 )
2553 )
2537
2554
2538 def tomemctx_for_amend(self, precursor):
2555 def tomemctx_for_amend(self, precursor):
2539 extra = precursor.extra().copy()
2556 extra = precursor.extra().copy()
2540 extra[b'amend_source'] = precursor.hex()
2557 extra[b'amend_source'] = precursor.hex()
2541 return self.tomemctx(
2558 return self.tomemctx(
2542 text=precursor.description(),
2559 text=precursor.description(),
2543 branch=precursor.branch(),
2560 branch=precursor.branch(),
2544 extra=extra,
2561 extra=extra,
2545 date=precursor.date(),
2562 date=precursor.date(),
2546 user=precursor.user(),
2563 user=precursor.user(),
2547 )
2564 )
2548
2565
2549 def isdirty(self, path):
2566 def isdirty(self, path):
2550 return path in self._cache
2567 return path in self._cache
2551
2568
2552 def clean(self):
2569 def clean(self):
2553 self._mergestate = None
2570 self._mergestate = None
2554 self._cache = {}
2571 self._cache = {}
2555
2572
2556 def _compact(self):
2573 def _compact(self):
2557 """Removes keys from the cache that are actually clean, by comparing
2574 """Removes keys from the cache that are actually clean, by comparing
2558 them with the underlying context.
2575 them with the underlying context.
2559
2576
2560 This can occur during the merge process, e.g. by passing --tool :local
2577 This can occur during the merge process, e.g. by passing --tool :local
2561 to resolve a conflict.
2578 to resolve a conflict.
2562 """
2579 """
2563 keys = []
2580 keys = []
2564 # This won't be perfect, but can help performance significantly when
2581 # This won't be perfect, but can help performance significantly when
2565 # using things like remotefilelog.
2582 # using things like remotefilelog.
2566 scmutil.prefetchfiles(
2583 scmutil.prefetchfiles(
2567 self.repo(),
2584 self.repo(),
2568 [
2585 [
2569 (
2586 (
2570 self.p1().rev(),
2587 self.p1().rev(),
2571 scmutil.matchfiles(self.repo(), self._cache.keys()),
2588 scmutil.matchfiles(self.repo(), self._cache.keys()),
2572 )
2589 )
2573 ],
2590 ],
2574 )
2591 )
2575
2592
2576 for path in self._cache.keys():
2593 for path in self._cache.keys():
2577 cache = self._cache[path]
2594 cache = self._cache[path]
2578 try:
2595 try:
2579 underlying = self._wrappedctx[path]
2596 underlying = self._wrappedctx[path]
2580 if (
2597 if (
2581 underlying.data() == cache[b'data']
2598 underlying.data() == cache[b'data']
2582 and underlying.flags() == cache[b'flags']
2599 and underlying.flags() == cache[b'flags']
2583 ):
2600 ):
2584 keys.append(path)
2601 keys.append(path)
2585 except error.ManifestLookupError:
2602 except error.ManifestLookupError:
2586 # Path not in the underlying manifest (created).
2603 # Path not in the underlying manifest (created).
2587 continue
2604 continue
2588
2605
2589 for path in keys:
2606 for path in keys:
2590 del self._cache[path]
2607 del self._cache[path]
2591 return keys
2608 return keys
2592
2609
2593 def _markdirty(
2610 def _markdirty(
2594 self, path, exists, data=None, date=None, flags=b'', copied=None
2611 self, path, exists, data=None, date=None, flags=b'', copied=None
2595 ):
2612 ):
2596 # data not provided, let's see if we already have some; if not, let's
2613 # data not provided, let's see if we already have some; if not, let's
2597 # grab it from our underlying context, so that we always have data if
2614 # grab it from our underlying context, so that we always have data if
2598 # the file is marked as existing.
2615 # the file is marked as existing.
2599 if exists and data is None:
2616 if exists and data is None:
2600 oldentry = self._cache.get(path) or {}
2617 oldentry = self._cache.get(path) or {}
2601 data = oldentry.get(b'data')
2618 data = oldentry.get(b'data')
2602 if data is None:
2619 if data is None:
2603 data = self._wrappedctx[path].data()
2620 data = self._wrappedctx[path].data()
2604
2621
2605 self._cache[path] = {
2622 self._cache[path] = {
2606 b'exists': exists,
2623 b'exists': exists,
2607 b'data': data,
2624 b'data': data,
2608 b'date': date,
2625 b'date': date,
2609 b'flags': flags,
2626 b'flags': flags,
2610 b'copied': copied,
2627 b'copied': copied,
2611 }
2628 }
2612 util.clearcachedproperty(self, b'_manifest')
2629 util.clearcachedproperty(self, b'_manifest')
2613
2630
2614 def filectx(self, path, filelog=None):
2631 def filectx(self, path, filelog=None):
2615 return overlayworkingfilectx(
2632 return overlayworkingfilectx(
2616 self._repo, path, parent=self, filelog=filelog
2633 self._repo, path, parent=self, filelog=filelog
2617 )
2634 )
2618
2635
2619 def mergestate(self, clean=False):
2636 def mergestate(self, clean=False):
2620 if clean or self._mergestate is None:
2637 if clean or self._mergestate is None:
2621 self._mergestate = mergestatemod.memmergestate(self._repo)
2638 self._mergestate = mergestatemod.memmergestate(self._repo)
2622 return self._mergestate
2639 return self._mergestate
2623
2640
2624
2641
2625 class overlayworkingfilectx(committablefilectx):
2642 class overlayworkingfilectx(committablefilectx):
2626 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2643 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2627 cache, which can be flushed through later by calling ``flush()``."""
2644 cache, which can be flushed through later by calling ``flush()``."""
2628
2645
2629 def __init__(self, repo, path, filelog=None, parent=None):
2646 def __init__(self, repo, path, filelog=None, parent=None):
2630 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2647 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2631 self._repo = repo
2648 self._repo = repo
2632 self._parent = parent
2649 self._parent = parent
2633 self._path = path
2650 self._path = path
2634
2651
2635 def cmp(self, fctx):
2652 def cmp(self, fctx):
2636 return self.data() != fctx.data()
2653 return self.data() != fctx.data()
2637
2654
2638 def changectx(self):
2655 def changectx(self):
2639 return self._parent
2656 return self._parent
2640
2657
2641 def data(self):
2658 def data(self):
2642 return self._parent.data(self._path)
2659 return self._parent.data(self._path)
2643
2660
2644 def date(self):
2661 def date(self):
2645 return self._parent.filedate(self._path)
2662 return self._parent.filedate(self._path)
2646
2663
2647 def exists(self):
2664 def exists(self):
2648 return self.lexists()
2665 return self.lexists()
2649
2666
2650 def lexists(self):
2667 def lexists(self):
2651 return self._parent.exists(self._path)
2668 return self._parent.exists(self._path)
2652
2669
2653 def copysource(self):
2670 def copysource(self):
2654 return self._parent.copydata(self._path)
2671 return self._parent.copydata(self._path)
2655
2672
2656 def size(self):
2673 def size(self):
2657 return self._parent.size(self._path)
2674 return self._parent.size(self._path)
2658
2675
2659 def markcopied(self, origin):
2676 def markcopied(self, origin):
2660 self._parent.markcopied(self._path, origin)
2677 self._parent.markcopied(self._path, origin)
2661
2678
2662 def audit(self):
2679 def audit(self):
2663 pass
2680 pass
2664
2681
2665 def flags(self):
2682 def flags(self):
2666 return self._parent.flags(self._path)
2683 return self._parent.flags(self._path)
2667
2684
2668 def setflags(self, islink, isexec):
2685 def setflags(self, islink, isexec):
2669 return self._parent.setflags(self._path, islink, isexec)
2686 return self._parent.setflags(self._path, islink, isexec)
2670
2687
2671 def write(self, data, flags, backgroundclose=False, **kwargs):
2688 def write(self, data, flags, backgroundclose=False, **kwargs):
2672 return self._parent.write(self._path, data, flags, **kwargs)
2689 return self._parent.write(self._path, data, flags, **kwargs)
2673
2690
2674 def remove(self, ignoremissing=False):
2691 def remove(self, ignoremissing=False):
2675 return self._parent.remove(self._path)
2692 return self._parent.remove(self._path)
2676
2693
2677 def clearunknown(self):
2694 def clearunknown(self):
2678 pass
2695 pass
2679
2696
2680
2697
2681 class workingcommitctx(workingctx):
2698 class workingcommitctx(workingctx):
2682 """A workingcommitctx object makes access to data related to
2699 """A workingcommitctx object makes access to data related to
2683 the revision being committed convenient.
2700 the revision being committed convenient.
2684
2701
2685 This hides changes in the working directory, if they aren't
2702 This hides changes in the working directory, if they aren't
2686 committed in this context.
2703 committed in this context.
2687 """
2704 """
2688
2705
2689 def __init__(
2706 def __init__(
2690 self, repo, changes, text=b"", user=None, date=None, extra=None
2707 self, repo, changes, text=b"", user=None, date=None, extra=None
2691 ):
2708 ):
2692 super(workingcommitctx, self).__init__(
2709 super(workingcommitctx, self).__init__(
2693 repo, text, user, date, extra, changes
2710 repo, text, user, date, extra, changes
2694 )
2711 )
2695
2712
2696 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2713 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2697 """Return matched files only in ``self._status``
2714 """Return matched files only in ``self._status``
2698
2715
2699 Uncommitted files appear "clean" via this context, even if
2716 Uncommitted files appear "clean" via this context, even if
2700 they aren't actually so in the working directory.
2717 they aren't actually so in the working directory.
2701 """
2718 """
2702 if clean:
2719 if clean:
2703 clean = [f for f in self._manifest if f not in self._changedset]
2720 clean = [f for f in self._manifest if f not in self._changedset]
2704 else:
2721 else:
2705 clean = []
2722 clean = []
2706 return scmutil.status(
2723 return scmutil.status(
2707 [f for f in self._status.modified if match(f)],
2724 [f for f in self._status.modified if match(f)],
2708 [f for f in self._status.added if match(f)],
2725 [f for f in self._status.added if match(f)],
2709 [f for f in self._status.removed if match(f)],
2726 [f for f in self._status.removed if match(f)],
2710 [],
2727 [],
2711 [],
2728 [],
2712 [],
2729 [],
2713 clean,
2730 clean,
2714 )
2731 )
2715
2732
2716 @propertycache
2733 @propertycache
2717 def _changedset(self):
2734 def _changedset(self):
2718 """Return the set of files changed in this context"""
2735 """Return the set of files changed in this context"""
2719 changed = set(self._status.modified)
2736 changed = set(self._status.modified)
2720 changed.update(self._status.added)
2737 changed.update(self._status.added)
2721 changed.update(self._status.removed)
2738 changed.update(self._status.removed)
2722 return changed
2739 return changed
2723
2740
2724
2741
2725 def makecachingfilectxfn(func):
2742 def makecachingfilectxfn(func):
2726 """Create a filectxfn that caches based on the path.
2743 """Create a filectxfn that caches based on the path.
2727
2744
2728 We can't use util.cachefunc because it uses all arguments as the cache
2745 We can't use util.cachefunc because it uses all arguments as the cache
2729 key and this creates a cycle since the arguments include the repo and
2746 key and this creates a cycle since the arguments include the repo and
2730 memctx.
2747 memctx.
2731 """
2748 """
2732 cache = {}
2749 cache = {}
2733
2750
2734 def getfilectx(repo, memctx, path):
2751 def getfilectx(repo, memctx, path):
2735 if path not in cache:
2752 if path not in cache:
2736 cache[path] = func(repo, memctx, path)
2753 cache[path] = func(repo, memctx, path)
2737 return cache[path]
2754 return cache[path]
2738
2755
2739 return getfilectx
2756 return getfilectx
2740
2757
2741
2758
2742 def memfilefromctx(ctx):
2759 def memfilefromctx(ctx):
2743 """Given a context return a memfilectx for ctx[path]
2760 """Given a context return a memfilectx for ctx[path]
2744
2761
2745 This is a convenience method for building a memctx based on another
2762 This is a convenience method for building a memctx based on another
2746 context.
2763 context.
2747 """
2764 """
2748
2765
2749 def getfilectx(repo, memctx, path):
2766 def getfilectx(repo, memctx, path):
2750 fctx = ctx[path]
2767 fctx = ctx[path]
2751 copysource = fctx.copysource()
2768 copysource = fctx.copysource()
2752 return memfilectx(
2769 return memfilectx(
2753 repo,
2770 repo,
2754 memctx,
2771 memctx,
2755 path,
2772 path,
2756 fctx.data(),
2773 fctx.data(),
2757 islink=fctx.islink(),
2774 islink=fctx.islink(),
2758 isexec=fctx.isexec(),
2775 isexec=fctx.isexec(),
2759 copysource=copysource,
2776 copysource=copysource,
2760 )
2777 )
2761
2778
2762 return getfilectx
2779 return getfilectx
2763
2780
2764
2781
2765 def memfilefrompatch(patchstore):
2782 def memfilefrompatch(patchstore):
2766 """Given a patch (e.g. patchstore object) return a memfilectx
2783 """Given a patch (e.g. patchstore object) return a memfilectx
2767
2784
2768 This is a convenience method for building a memctx based on a patchstore.
2785 This is a convenience method for building a memctx based on a patchstore.
2769 """
2786 """
2770
2787
2771 def getfilectx(repo, memctx, path):
2788 def getfilectx(repo, memctx, path):
2772 data, mode, copysource = patchstore.getfile(path)
2789 data, mode, copysource = patchstore.getfile(path)
2773 if data is None:
2790 if data is None:
2774 return None
2791 return None
2775 islink, isexec = mode
2792 islink, isexec = mode
2776 return memfilectx(
2793 return memfilectx(
2777 repo,
2794 repo,
2778 memctx,
2795 memctx,
2779 path,
2796 path,
2780 data,
2797 data,
2781 islink=islink,
2798 islink=islink,
2782 isexec=isexec,
2799 isexec=isexec,
2783 copysource=copysource,
2800 copysource=copysource,
2784 )
2801 )
2785
2802
2786 return getfilectx
2803 return getfilectx
2787
2804
2788
2805
2789 class memctx(committablectx):
2806 class memctx(committablectx):
2790 """Use memctx to perform in-memory commits via localrepo.commitctx().
2807 """Use memctx to perform in-memory commits via localrepo.commitctx().
2791
2808
2792 Revision information is supplied at initialization time while
2809 Revision information is supplied at initialization time while
2793 related files data and is made available through a callback
2810 related files data and is made available through a callback
2794 mechanism. 'repo' is the current localrepo, 'parents' is a
2811 mechanism. 'repo' is the current localrepo, 'parents' is a
2795 sequence of two parent revisions identifiers (pass None for every
2812 sequence of two parent revisions identifiers (pass None for every
2796 missing parent), 'text' is the commit message and 'files' lists
2813 missing parent), 'text' is the commit message and 'files' lists
2797 names of files touched by the revision (normalized and relative to
2814 names of files touched by the revision (normalized and relative to
2798 repository root).
2815 repository root).
2799
2816
2800 filectxfn(repo, memctx, path) is a callable receiving the
2817 filectxfn(repo, memctx, path) is a callable receiving the
2801 repository, the current memctx object and the normalized path of
2818 repository, the current memctx object and the normalized path of
2802 requested file, relative to repository root. It is fired by the
2819 requested file, relative to repository root. It is fired by the
2803 commit function for every file in 'files', but calls order is
2820 commit function for every file in 'files', but calls order is
2804 undefined. If the file is available in the revision being
2821 undefined. If the file is available in the revision being
2805 committed (updated or added), filectxfn returns a memfilectx
2822 committed (updated or added), filectxfn returns a memfilectx
2806 object. If the file was removed, filectxfn return None for recent
2823 object. If the file was removed, filectxfn return None for recent
2807 Mercurial. Moved files are represented by marking the source file
2824 Mercurial. Moved files are represented by marking the source file
2808 removed and the new file added with copy information (see
2825 removed and the new file added with copy information (see
2809 memfilectx).
2826 memfilectx).
2810
2827
2811 user receives the committer name and defaults to current
2828 user receives the committer name and defaults to current
2812 repository username, date is the commit date in any format
2829 repository username, date is the commit date in any format
2813 supported by dateutil.parsedate() and defaults to current date, extra
2830 supported by dateutil.parsedate() and defaults to current date, extra
2814 is a dictionary of metadata or is left empty.
2831 is a dictionary of metadata or is left empty.
2815 """
2832 """
2816
2833
2817 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2834 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2818 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2835 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2819 # this field to determine what to do in filectxfn.
2836 # this field to determine what to do in filectxfn.
2820 _returnnoneformissingfiles = True
2837 _returnnoneformissingfiles = True
2821
2838
2822 def __init__(
2839 def __init__(
2823 self,
2840 self,
2824 repo,
2841 repo,
2825 parents,
2842 parents,
2826 text,
2843 text,
2827 files,
2844 files,
2828 filectxfn,
2845 filectxfn,
2829 user=None,
2846 user=None,
2830 date=None,
2847 date=None,
2831 extra=None,
2848 extra=None,
2832 branch=None,
2849 branch=None,
2833 editor=None,
2850 editor=None,
2834 ):
2851 ):
2835 super(memctx, self).__init__(
2852 super(memctx, self).__init__(
2836 repo, text, user, date, extra, branch=branch
2853 repo, text, user, date, extra, branch=branch
2837 )
2854 )
2838 self._rev = None
2855 self._rev = None
2839 self._node = None
2856 self._node = None
2840 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2857 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2841 p1, p2 = parents
2858 p1, p2 = parents
2842 self._parents = [self._repo[p] for p in (p1, p2)]
2859 self._parents = [self._repo[p] for p in (p1, p2)]
2843 files = sorted(set(files))
2860 files = sorted(set(files))
2844 self._files = files
2861 self._files = files
2845 self.substate = {}
2862 self.substate = {}
2846
2863
2847 if isinstance(filectxfn, patch.filestore):
2864 if isinstance(filectxfn, patch.filestore):
2848 filectxfn = memfilefrompatch(filectxfn)
2865 filectxfn = memfilefrompatch(filectxfn)
2849 elif not callable(filectxfn):
2866 elif not callable(filectxfn):
2850 # if store is not callable, wrap it in a function
2867 # if store is not callable, wrap it in a function
2851 filectxfn = memfilefromctx(filectxfn)
2868 filectxfn = memfilefromctx(filectxfn)
2852
2869
2853 # memoizing increases performance for e.g. vcs convert scenarios.
2870 # memoizing increases performance for e.g. vcs convert scenarios.
2854 self._filectxfn = makecachingfilectxfn(filectxfn)
2871 self._filectxfn = makecachingfilectxfn(filectxfn)
2855
2872
2856 if editor:
2873 if editor:
2857 self._text = editor(self._repo, self, [])
2874 self._text = editor(self._repo, self, [])
2858 self._repo.savecommitmessage(self._text)
2875 self._repo.savecommitmessage(self._text)
2859
2876
2860 def filectx(self, path, filelog=None):
2877 def filectx(self, path, filelog=None):
2861 """get a file context from the working directory
2878 """get a file context from the working directory
2862
2879
2863 Returns None if file doesn't exist and should be removed."""
2880 Returns None if file doesn't exist and should be removed."""
2864 return self._filectxfn(self._repo, self, path)
2881 return self._filectxfn(self._repo, self, path)
2865
2882
2866 def commit(self):
2883 def commit(self):
2867 """commit context to the repo"""
2884 """commit context to the repo"""
2868 return self._repo.commitctx(self)
2885 return self._repo.commitctx(self)
2869
2886
2870 @propertycache
2887 @propertycache
2871 def _manifest(self):
2888 def _manifest(self):
2872 """generate a manifest based on the return values of filectxfn"""
2889 """generate a manifest based on the return values of filectxfn"""
2873
2890
2874 # keep this simple for now; just worry about p1
2891 # keep this simple for now; just worry about p1
2875 pctx = self._parents[0]
2892 pctx = self._parents[0]
2876 man = pctx.manifest().copy()
2893 man = pctx.manifest().copy()
2877
2894
2878 for f in self._status.modified:
2895 for f in self._status.modified:
2879 man[f] = self._repo.nodeconstants.modifiednodeid
2896 man[f] = self._repo.nodeconstants.modifiednodeid
2880
2897
2881 for f in self._status.added:
2898 for f in self._status.added:
2882 man[f] = self._repo.nodeconstants.addednodeid
2899 man[f] = self._repo.nodeconstants.addednodeid
2883
2900
2884 for f in self._status.removed:
2901 for f in self._status.removed:
2885 if f in man:
2902 if f in man:
2886 del man[f]
2903 del man[f]
2887
2904
2888 return man
2905 return man
2889
2906
2890 @propertycache
2907 @propertycache
2891 def _status(self):
2908 def _status(self):
2892 """Calculate exact status from ``files`` specified at construction"""
2909 """Calculate exact status from ``files`` specified at construction"""
2893 man1 = self.p1().manifest()
2910 man1 = self.p1().manifest()
2894 p2 = self._parents[1]
2911 p2 = self._parents[1]
2895 # "1 < len(self._parents)" can't be used for checking
2912 # "1 < len(self._parents)" can't be used for checking
2896 # existence of the 2nd parent, because "memctx._parents" is
2913 # existence of the 2nd parent, because "memctx._parents" is
2897 # explicitly initialized by the list, of which length is 2.
2914 # explicitly initialized by the list, of which length is 2.
2898 if p2.rev() != nullrev:
2915 if p2.rev() != nullrev:
2899 man2 = p2.manifest()
2916 man2 = p2.manifest()
2900 managing = lambda f: f in man1 or f in man2
2917 managing = lambda f: f in man1 or f in man2
2901 else:
2918 else:
2902 managing = lambda f: f in man1
2919 managing = lambda f: f in man1
2903
2920
2904 modified, added, removed = [], [], []
2921 modified, added, removed = [], [], []
2905 for f in self._files:
2922 for f in self._files:
2906 if not managing(f):
2923 if not managing(f):
2907 added.append(f)
2924 added.append(f)
2908 elif self[f]:
2925 elif self[f]:
2909 modified.append(f)
2926 modified.append(f)
2910 else:
2927 else:
2911 removed.append(f)
2928 removed.append(f)
2912
2929
2913 return scmutil.status(modified, added, removed, [], [], [], [])
2930 return scmutil.status(modified, added, removed, [], [], [], [])
2914
2931
2915 def parents(self):
2932 def parents(self):
2916 if self._parents[1].rev() == nullrev:
2933 if self._parents[1].rev() == nullrev:
2917 return [self._parents[0]]
2934 return [self._parents[0]]
2918 return self._parents
2935 return self._parents
2919
2936
2920
2937
2921 class memfilectx(committablefilectx):
2938 class memfilectx(committablefilectx):
2922 """memfilectx represents an in-memory file to commit.
2939 """memfilectx represents an in-memory file to commit.
2923
2940
2924 See memctx and committablefilectx for more details.
2941 See memctx and committablefilectx for more details.
2925 """
2942 """
2926
2943
2927 def __init__(
2944 def __init__(
2928 self,
2945 self,
2929 repo,
2946 repo,
2930 changectx,
2947 changectx,
2931 path,
2948 path,
2932 data,
2949 data,
2933 islink=False,
2950 islink=False,
2934 isexec=False,
2951 isexec=False,
2935 copysource=None,
2952 copysource=None,
2936 ):
2953 ):
2937 """
2954 """
2938 path is the normalized file path relative to repository root.
2955 path is the normalized file path relative to repository root.
2939 data is the file content as a string.
2956 data is the file content as a string.
2940 islink is True if the file is a symbolic link.
2957 islink is True if the file is a symbolic link.
2941 isexec is True if the file is executable.
2958 isexec is True if the file is executable.
2942 copied is the source file path if current file was copied in the
2959 copied is the source file path if current file was copied in the
2943 revision being committed, or None."""
2960 revision being committed, or None."""
2944 super(memfilectx, self).__init__(repo, path, None, changectx)
2961 super(memfilectx, self).__init__(repo, path, None, changectx)
2945 self._data = data
2962 self._data = data
2946 if islink:
2963 if islink:
2947 self._flags = b'l'
2964 self._flags = b'l'
2948 elif isexec:
2965 elif isexec:
2949 self._flags = b'x'
2966 self._flags = b'x'
2950 else:
2967 else:
2951 self._flags = b''
2968 self._flags = b''
2952 self._copysource = copysource
2969 self._copysource = copysource
2953
2970
2954 def copysource(self):
2971 def copysource(self):
2955 return self._copysource
2972 return self._copysource
2956
2973
2957 def cmp(self, fctx):
2974 def cmp(self, fctx):
2958 return self.data() != fctx.data()
2975 return self.data() != fctx.data()
2959
2976
2960 def data(self):
2977 def data(self):
2961 return self._data
2978 return self._data
2962
2979
2963 def remove(self, ignoremissing=False):
2980 def remove(self, ignoremissing=False):
2964 """wraps unlink for a repo's working directory"""
2981 """wraps unlink for a repo's working directory"""
2965 # need to figure out what to do here
2982 # need to figure out what to do here
2966 del self._changectx[self._path]
2983 del self._changectx[self._path]
2967
2984
2968 def write(self, data, flags, **kwargs):
2985 def write(self, data, flags, **kwargs):
2969 """wraps repo.wwrite"""
2986 """wraps repo.wwrite"""
2970 self._data = data
2987 self._data = data
2971
2988
2972
2989
2973 class metadataonlyctx(committablectx):
2990 class metadataonlyctx(committablectx):
2974 """Like memctx but it's reusing the manifest of different commit.
2991 """Like memctx but it's reusing the manifest of different commit.
2975 Intended to be used by lightweight operations that are creating
2992 Intended to be used by lightweight operations that are creating
2976 metadata-only changes.
2993 metadata-only changes.
2977
2994
2978 Revision information is supplied at initialization time. 'repo' is the
2995 Revision information is supplied at initialization time. 'repo' is the
2979 current localrepo, 'ctx' is original revision which manifest we're reuisng
2996 current localrepo, 'ctx' is original revision which manifest we're reuisng
2980 'parents' is a sequence of two parent revisions identifiers (pass None for
2997 'parents' is a sequence of two parent revisions identifiers (pass None for
2981 every missing parent), 'text' is the commit.
2998 every missing parent), 'text' is the commit.
2982
2999
2983 user receives the committer name and defaults to current repository
3000 user receives the committer name and defaults to current repository
2984 username, date is the commit date in any format supported by
3001 username, date is the commit date in any format supported by
2985 dateutil.parsedate() and defaults to current date, extra is a dictionary of
3002 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2986 metadata or is left empty.
3003 metadata or is left empty.
2987 """
3004 """
2988
3005
2989 def __init__(
3006 def __init__(
2990 self,
3007 self,
2991 repo,
3008 repo,
2992 originalctx,
3009 originalctx,
2993 parents=None,
3010 parents=None,
2994 text=None,
3011 text=None,
2995 user=None,
3012 user=None,
2996 date=None,
3013 date=None,
2997 extra=None,
3014 extra=None,
2998 editor=None,
3015 editor=None,
2999 ):
3016 ):
3000 if text is None:
3017 if text is None:
3001 text = originalctx.description()
3018 text = originalctx.description()
3002 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3019 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3003 self._rev = None
3020 self._rev = None
3004 self._node = None
3021 self._node = None
3005 self._originalctx = originalctx
3022 self._originalctx = originalctx
3006 self._manifestnode = originalctx.manifestnode()
3023 self._manifestnode = originalctx.manifestnode()
3007 if parents is None:
3024 if parents is None:
3008 parents = originalctx.parents()
3025 parents = originalctx.parents()
3009 else:
3026 else:
3010 parents = [repo[p] for p in parents if p is not None]
3027 parents = [repo[p] for p in parents if p is not None]
3011 parents = parents[:]
3028 parents = parents[:]
3012 while len(parents) < 2:
3029 while len(parents) < 2:
3013 parents.append(repo[nullrev])
3030 parents.append(repo[nullrev])
3014 p1, p2 = self._parents = parents
3031 p1, p2 = self._parents = parents
3015
3032
3016 # sanity check to ensure that the reused manifest parents are
3033 # sanity check to ensure that the reused manifest parents are
3017 # manifests of our commit parents
3034 # manifests of our commit parents
3018 mp1, mp2 = self.manifestctx().parents
3035 mp1, mp2 = self.manifestctx().parents
3019 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3036 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3020 raise RuntimeError(
3037 raise RuntimeError(
3021 r"can't reuse the manifest: its p1 "
3038 r"can't reuse the manifest: its p1 "
3022 r"doesn't match the new ctx p1"
3039 r"doesn't match the new ctx p1"
3023 )
3040 )
3024 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3041 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3025 raise RuntimeError(
3042 raise RuntimeError(
3026 r"can't reuse the manifest: "
3043 r"can't reuse the manifest: "
3027 r"its p2 doesn't match the new ctx p2"
3044 r"its p2 doesn't match the new ctx p2"
3028 )
3045 )
3029
3046
3030 self._files = originalctx.files()
3047 self._files = originalctx.files()
3031 self.substate = {}
3048 self.substate = {}
3032
3049
3033 if editor:
3050 if editor:
3034 self._text = editor(self._repo, self, [])
3051 self._text = editor(self._repo, self, [])
3035 self._repo.savecommitmessage(self._text)
3052 self._repo.savecommitmessage(self._text)
3036
3053
3037 def manifestnode(self):
3054 def manifestnode(self):
3038 return self._manifestnode
3055 return self._manifestnode
3039
3056
3040 @property
3057 @property
3041 def _manifestctx(self):
3058 def _manifestctx(self):
3042 return self._repo.manifestlog[self._manifestnode]
3059 return self._repo.manifestlog[self._manifestnode]
3043
3060
3044 def filectx(self, path, filelog=None):
3061 def filectx(self, path, filelog=None):
3045 return self._originalctx.filectx(path, filelog=filelog)
3062 return self._originalctx.filectx(path, filelog=filelog)
3046
3063
3047 def commit(self):
3064 def commit(self):
3048 """commit context to the repo"""
3065 """commit context to the repo"""
3049 return self._repo.commitctx(self)
3066 return self._repo.commitctx(self)
3050
3067
3051 @property
3068 @property
3052 def _manifest(self):
3069 def _manifest(self):
3053 return self._originalctx.manifest()
3070 return self._originalctx.manifest()
3054
3071
3055 @propertycache
3072 @propertycache
3056 def _status(self):
3073 def _status(self):
3057 """Calculate exact status from ``files`` specified in the ``origctx``
3074 """Calculate exact status from ``files`` specified in the ``origctx``
3058 and parents manifests.
3075 and parents manifests.
3059 """
3076 """
3060 man1 = self.p1().manifest()
3077 man1 = self.p1().manifest()
3061 p2 = self._parents[1]
3078 p2 = self._parents[1]
3062 # "1 < len(self._parents)" can't be used for checking
3079 # "1 < len(self._parents)" can't be used for checking
3063 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3080 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3064 # explicitly initialized by the list, of which length is 2.
3081 # explicitly initialized by the list, of which length is 2.
3065 if p2.rev() != nullrev:
3082 if p2.rev() != nullrev:
3066 man2 = p2.manifest()
3083 man2 = p2.manifest()
3067 managing = lambda f: f in man1 or f in man2
3084 managing = lambda f: f in man1 or f in man2
3068 else:
3085 else:
3069 managing = lambda f: f in man1
3086 managing = lambda f: f in man1
3070
3087
3071 modified, added, removed = [], [], []
3088 modified, added, removed = [], [], []
3072 for f in self._files:
3089 for f in self._files:
3073 if not managing(f):
3090 if not managing(f):
3074 added.append(f)
3091 added.append(f)
3075 elif f in self:
3092 elif f in self:
3076 modified.append(f)
3093 modified.append(f)
3077 else:
3094 else:
3078 removed.append(f)
3095 removed.append(f)
3079
3096
3080 return scmutil.status(modified, added, removed, [], [], [], [])
3097 return scmutil.status(modified, added, removed, [], [], [], [])
3081
3098
3082
3099
3083 class arbitraryfilectx(object):
3100 class arbitraryfilectx(object):
3084 """Allows you to use filectx-like functions on a file in an arbitrary
3101 """Allows you to use filectx-like functions on a file in an arbitrary
3085 location on disk, possibly not in the working directory.
3102 location on disk, possibly not in the working directory.
3086 """
3103 """
3087
3104
3088 def __init__(self, path, repo=None):
3105 def __init__(self, path, repo=None):
3089 # Repo is optional because contrib/simplemerge uses this class.
3106 # Repo is optional because contrib/simplemerge uses this class.
3090 self._repo = repo
3107 self._repo = repo
3091 self._path = path
3108 self._path = path
3092
3109
3093 def cmp(self, fctx):
3110 def cmp(self, fctx):
3094 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3111 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3095 # path if either side is a symlink.
3112 # path if either side is a symlink.
3096 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3113 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3097 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3114 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3098 # Add a fast-path for merge if both sides are disk-backed.
3115 # Add a fast-path for merge if both sides are disk-backed.
3099 # Note that filecmp uses the opposite return values (True if same)
3116 # Note that filecmp uses the opposite return values (True if same)
3100 # from our cmp functions (True if different).
3117 # from our cmp functions (True if different).
3101 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3118 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3102 return self.data() != fctx.data()
3119 return self.data() != fctx.data()
3103
3120
3104 def path(self):
3121 def path(self):
3105 return self._path
3122 return self._path
3106
3123
3107 def flags(self):
3124 def flags(self):
3108 return b''
3125 return b''
3109
3126
3110 def data(self):
3127 def data(self):
3111 return util.readfile(self._path)
3128 return util.readfile(self._path)
3112
3129
3113 def decodeddata(self):
3130 def decodeddata(self):
3114 with open(self._path, b"rb") as f:
3131 with open(self._path, b"rb") as f:
3115 return f.read()
3132 return f.read()
3116
3133
3117 def remove(self):
3134 def remove(self):
3118 util.unlink(self._path)
3135 util.unlink(self._path)
3119
3136
3120 def write(self, data, flags, **kwargs):
3137 def write(self, data, flags, **kwargs):
3121 assert not flags
3138 assert not flags
3122 with open(self._path, b"wb") as f:
3139 with open(self._path, b"wb") as f:
3123 f.write(data)
3140 f.write(data)
General Comments 0
You need to be logged in to leave comments. Login now