##// END OF EJS Templates
context: some gratuitous documentation improvement...
marmoute -
r52655:dcbe7fda default
parent child Browse files
Show More
@@ -1,3145 +1,3148 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2006, 2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8
8
9 import filecmp
9 import filecmp
10 import os
10 import os
11 import stat
11 import stat
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 nullrev,
16 nullrev,
17 short,
17 short,
18 )
18 )
19 from . import (
19 from . import (
20 dagop,
20 dagop,
21 encoding,
21 encoding,
22 error,
22 error,
23 fileset,
23 fileset,
24 match as matchmod,
24 match as matchmod,
25 mergestate as mergestatemod,
25 mergestate as mergestatemod,
26 metadata,
26 metadata,
27 obsolete as obsmod,
27 obsolete as obsmod,
28 patch,
28 patch,
29 pathutil,
29 pathutil,
30 phases,
30 phases,
31 repoview,
31 repoview,
32 scmutil,
32 scmutil,
33 sparse,
33 sparse,
34 subrepo,
34 subrepo,
35 subrepoutil,
35 subrepoutil,
36 testing,
36 testing,
37 util,
37 util,
38 )
38 )
39 from .utils import (
39 from .utils import (
40 dateutil,
40 dateutil,
41 stringutil,
41 stringutil,
42 )
42 )
43 from .dirstateutils import (
43 from .dirstateutils import (
44 timestamp,
44 timestamp,
45 )
45 )
46
46
47 propertycache = util.propertycache
47 propertycache = util.propertycache
48
48
49
49
50 class basectx:
50 class basectx:
51 """A basectx object represents the common logic for its children:
51 """A basectx object represents the common logic for its children:
52 changectx: read-only context that is already present in the repo,
52 changectx: read-only context that is already present in the repo,
53 workingctx: a context that represents the working directory and can
53 workingctx: a context that represents the working directory and can
54 be committed,
54 be committed,
55 memctx: a context that represents changes in-memory and can also
55 memctx: a context that represents changes in-memory and can also
56 be committed."""
56 be committed."""
57
57
58 def __init__(self, repo):
58 def __init__(self, repo):
59 self._repo = repo
59 self._repo = repo
60
60
61 def __bytes__(self):
61 def __bytes__(self):
62 return short(self.node())
62 return short(self.node())
63
63
64 __str__ = encoding.strmethod(__bytes__)
64 __str__ = encoding.strmethod(__bytes__)
65
65
66 def __repr__(self):
66 def __repr__(self):
67 return "<%s %s>" % (type(self).__name__, str(self))
67 return "<%s %s>" % (type(self).__name__, str(self))
68
68
69 def __eq__(self, other):
69 def __eq__(self, other):
70 try:
70 try:
71 return type(self) == type(other) and self._rev == other._rev
71 return type(self) == type(other) and self._rev == other._rev
72 except AttributeError:
72 except AttributeError:
73 return False
73 return False
74
74
75 def __ne__(self, other):
75 def __ne__(self, other):
76 return not (self == other)
76 return not (self == other)
77
77
78 def __contains__(self, key):
78 def __contains__(self, key):
79 return key in self._manifest
79 return key in self._manifest
80
80
81 def __getitem__(self, key):
81 def __getitem__(self, key):
82 return self.filectx(key)
82 return self.filectx(key)
83
83
84 def __iter__(self):
84 def __iter__(self):
85 return iter(self._manifest)
85 return iter(self._manifest)
86
86
87 def _buildstatusmanifest(self, status):
87 def _buildstatusmanifest(self, status):
88 """Builds a manifest that includes the given status results, if this is
88 """Builds a manifest that includes the given status results, if this is
89 a working copy context. For non-working copy contexts, it just returns
89 a working copy context. For non-working copy contexts, it just returns
90 the normal manifest."""
90 the normal manifest."""
91 return self.manifest()
91 return self.manifest()
92
92
93 def _matchstatus(self, other, match):
93 def _matchstatus(self, other, match):
94 """This internal method provides a way for child objects to override the
94 """This internal method provides a way for child objects to override the
95 match operator.
95 match operator.
96 """
96 """
97 return match
97 return match
98
98
99 def _buildstatus(
99 def _buildstatus(
100 self, other, s, match, listignored, listclean, listunknown
100 self, other, s, match, listignored, listclean, listunknown
101 ):
101 ):
102 """build a status with respect to another context"""
102 """build a status with respect to another context"""
103 # Load earliest manifest first for caching reasons. More specifically,
103 # Load earliest manifest first for caching reasons. More specifically,
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
104 # if you have revisions 1000 and 1001, 1001 is probably stored as a
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
105 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
106 # 1000 and cache it so that when you read 1001, we just need to apply a
106 # 1000 and cache it so that when you read 1001, we just need to apply a
107 # delta to what's in the cache. So that's one full reconstruction + one
107 # delta to what's in the cache. So that's one full reconstruction + one
108 # delta application.
108 # delta application.
109 mf2 = None
109 mf2 = None
110 if self.rev() is not None and self.rev() < other.rev():
110 if self.rev() is not None and self.rev() < other.rev():
111 mf2 = self._buildstatusmanifest(s)
111 mf2 = self._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
112 mf1 = other._buildstatusmanifest(s)
113 if mf2 is None:
113 if mf2 is None:
114 mf2 = self._buildstatusmanifest(s)
114 mf2 = self._buildstatusmanifest(s)
115
115
116 modified, added = [], []
116 modified, added = [], []
117 removed = []
117 removed = []
118 clean = []
118 clean = []
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
119 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
120 deletedset = set(deleted)
120 deletedset = set(deleted)
121 d = mf1.diff(mf2, match=match, clean=listclean)
121 d = mf1.diff(mf2, match=match, clean=listclean)
122 for fn, value in d.items():
122 for fn, value in d.items():
123 if fn in deletedset:
123 if fn in deletedset:
124 continue
124 continue
125 if value is None:
125 if value is None:
126 clean.append(fn)
126 clean.append(fn)
127 continue
127 continue
128 (node1, flag1), (node2, flag2) = value
128 (node1, flag1), (node2, flag2) = value
129 if node1 is None:
129 if node1 is None:
130 added.append(fn)
130 added.append(fn)
131 elif node2 is None:
131 elif node2 is None:
132 removed.append(fn)
132 removed.append(fn)
133 elif flag1 != flag2:
133 elif flag1 != flag2:
134 modified.append(fn)
134 modified.append(fn)
135 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
135 elif node2 not in self._repo.nodeconstants.wdirfilenodeids:
136 # When comparing files between two commits, we save time by
136 # When comparing files between two commits, we save time by
137 # not comparing the file contents when the nodeids differ.
137 # not comparing the file contents when the nodeids differ.
138 # Note that this means we incorrectly report a reverted change
138 # Note that this means we incorrectly report a reverted change
139 # to a file as a modification.
139 # to a file as a modification.
140 modified.append(fn)
140 modified.append(fn)
141 elif self[fn].cmp(other[fn]):
141 elif self[fn].cmp(other[fn]):
142 modified.append(fn)
142 modified.append(fn)
143 else:
143 else:
144 clean.append(fn)
144 clean.append(fn)
145
145
146 if removed:
146 if removed:
147 # need to filter files if they are already reported as removed
147 # need to filter files if they are already reported as removed
148 unknown = [
148 unknown = [
149 fn
149 fn
150 for fn in unknown
150 for fn in unknown
151 if fn not in mf1 and (not match or match(fn))
151 if fn not in mf1 and (not match or match(fn))
152 ]
152 ]
153 ignored = [
153 ignored = [
154 fn
154 fn
155 for fn in ignored
155 for fn in ignored
156 if fn not in mf1 and (not match or match(fn))
156 if fn not in mf1 and (not match or match(fn))
157 ]
157 ]
158 # if they're deleted, don't report them as removed
158 # if they're deleted, don't report them as removed
159 removed = [fn for fn in removed if fn not in deletedset]
159 removed = [fn for fn in removed if fn not in deletedset]
160
160
161 return scmutil.status(
161 return scmutil.status(
162 modified, added, removed, deleted, unknown, ignored, clean
162 modified, added, removed, deleted, unknown, ignored, clean
163 )
163 )
164
164
165 @propertycache
165 @propertycache
166 def substate(self):
166 def substate(self):
167 return subrepoutil.state(self, self._repo.ui)
167 return subrepoutil.state(self, self._repo.ui)
168
168
169 def subrev(self, subpath):
169 def subrev(self, subpath):
170 return self.substate[subpath][1]
170 return self.substate[subpath][1]
171
171
172 def rev(self):
172 def rev(self):
173 return self._rev
173 return self._rev
174
174
175 def node(self):
175 def node(self):
176 return self._node
176 return self._node
177
177
178 def hex(self):
178 def hex(self):
179 return hex(self.node())
179 return hex(self.node())
180
180
181 def manifest(self):
181 def manifest(self):
182 return self._manifest
182 return self._manifest
183
183
184 def manifestctx(self):
184 def manifestctx(self):
185 return self._manifestctx
185 return self._manifestctx
186
186
187 def repo(self):
187 def repo(self):
188 return self._repo
188 return self._repo
189
189
190 def phasestr(self):
190 def phasestr(self):
191 return phases.phasenames[self.phase()]
191 return phases.phasenames[self.phase()]
192
192
193 def mutable(self):
193 def mutable(self):
194 return self.phase() > phases.public
194 return self.phase() > phases.public
195
195
196 def matchfileset(self, cwd, expr, badfn=None):
196 def matchfileset(self, cwd, expr, badfn=None):
197 return fileset.match(self, cwd, expr, badfn=badfn)
197 return fileset.match(self, cwd, expr, badfn=badfn)
198
198
199 def obsolete(self):
199 def obsolete(self):
200 """True if the changeset is obsolete"""
200 """True if the changeset is obsolete"""
201 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
201 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
202
202
203 def extinct(self):
203 def extinct(self):
204 """True if the changeset is extinct"""
204 """True if the changeset is extinct"""
205 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
205 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
206
206
207 def orphan(self):
207 def orphan(self):
208 """True if the changeset is not obsolete, but its ancestor is"""
208 """True if the changeset is not obsolete, but its ancestor is"""
209 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
209 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
210
210
211 def phasedivergent(self):
211 def phasedivergent(self):
212 """True if the changeset tries to be a successor of a public changeset
212 """True if the changeset tries to be a successor of a public changeset
213
213
214 Only non-public and non-obsolete changesets may be phase-divergent.
214 Only non-public and non-obsolete changesets may be phase-divergent.
215 """
215 """
216 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
216 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
217
217
218 def contentdivergent(self):
218 def contentdivergent(self):
219 """Is a successor of a changeset with multiple possible successor sets
219 """Is a successor of a changeset with multiple possible successor sets
220
220
221 Only non-public and non-obsolete changesets may be content-divergent.
221 Only non-public and non-obsolete changesets may be content-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
224
224
225 def isunstable(self):
225 def isunstable(self):
226 """True if the changeset is either orphan, phase-divergent or
226 """True if the changeset is either orphan, phase-divergent or
227 content-divergent"""
227 content-divergent"""
228 return self.orphan() or self.phasedivergent() or self.contentdivergent()
228 return self.orphan() or self.phasedivergent() or self.contentdivergent()
229
229
230 def instabilities(self):
230 def instabilities(self):
231 """return the list of instabilities affecting this changeset.
231 """return the list of instabilities affecting this changeset.
232
232
233 Instabilities are returned as strings. possible values are:
233 Instabilities are returned as strings. possible values are:
234 - orphan,
234 - orphan,
235 - phase-divergent,
235 - phase-divergent,
236 - content-divergent.
236 - content-divergent.
237 """
237 """
238 instabilities = []
238 instabilities = []
239 if self.orphan():
239 if self.orphan():
240 instabilities.append(b'orphan')
240 instabilities.append(b'orphan')
241 if self.phasedivergent():
241 if self.phasedivergent():
242 instabilities.append(b'phase-divergent')
242 instabilities.append(b'phase-divergent')
243 if self.contentdivergent():
243 if self.contentdivergent():
244 instabilities.append(b'content-divergent')
244 instabilities.append(b'content-divergent')
245 return instabilities
245 return instabilities
246
246
247 def parents(self):
247 def parents(self):
248 """return contexts for each parent changeset"""
248 """return contexts for each parent changeset"""
249 return self._parents
249 return self._parents
250
250
251 def p1(self):
251 def p1(self):
252 return self._parents[0]
252 return self._parents[0]
253
253
254 def p2(self):
254 def p2(self):
255 parents = self._parents
255 parents = self._parents
256 if len(parents) == 2:
256 if len(parents) == 2:
257 return parents[1]
257 return parents[1]
258 return self._repo[nullrev]
258 return self._repo[nullrev]
259
259
260 def _fileinfo(self, path):
260 def _fileinfo(self, path):
261 if '_manifest' in self.__dict__:
261 if '_manifest' in self.__dict__:
262 try:
262 try:
263 return self._manifest.find(path)
263 return self._manifest.find(path)
264 except KeyError:
264 except KeyError:
265 raise error.ManifestLookupError(
265 raise error.ManifestLookupError(
266 self._node or b'None', path, _(b'not found in manifest')
266 self._node or b'None', path, _(b'not found in manifest')
267 )
267 )
268 # Try to find the file in the manifest delta that can be faster to read
269 # than a full manifest. If we fail to find the file, it might still
270 # exist in the full manifest, so lets look for it there.
268 if '_manifestdelta' in self.__dict__ or path in self.files():
271 if '_manifestdelta' in self.__dict__ or path in self.files():
269 if path in self._manifestdelta:
272 if path in self._manifestdelta:
270 return (
273 return (
271 self._manifestdelta[path],
274 self._manifestdelta[path],
272 self._manifestdelta.flags(path),
275 self._manifestdelta.flags(path),
273 )
276 )
274 mfl = self._repo.manifestlog
277 mfl = self._repo.manifestlog
275 try:
278 try:
276 node, flag = mfl[self._changeset.manifest].find(path)
279 node, flag = mfl[self._changeset.manifest].find(path)
277 except KeyError:
280 except KeyError:
278 raise error.ManifestLookupError(
281 raise error.ManifestLookupError(
279 self._node or b'None', path, _(b'not found in manifest')
282 self._node or b'None', path, _(b'not found in manifest')
280 )
283 )
281
284
282 return node, flag
285 return node, flag
283
286
284 def filenode(self, path):
287 def filenode(self, path):
285 return self._fileinfo(path)[0]
288 return self._fileinfo(path)[0]
286
289
287 def flags(self, path):
290 def flags(self, path):
288 try:
291 try:
289 return self._fileinfo(path)[1]
292 return self._fileinfo(path)[1]
290 except error.LookupError:
293 except error.LookupError:
291 return b''
294 return b''
292
295
293 @propertycache
296 @propertycache
294 def _copies(self):
297 def _copies(self):
295 return metadata.computechangesetcopies(self)
298 return metadata.computechangesetcopies(self)
296
299
297 def p1copies(self):
300 def p1copies(self):
298 return self._copies[0]
301 return self._copies[0]
299
302
300 def p2copies(self):
303 def p2copies(self):
301 return self._copies[1]
304 return self._copies[1]
302
305
303 def sub(self, path, allowcreate=True):
306 def sub(self, path, allowcreate=True):
304 '''return a subrepo for the stored revision of path, never wdir()'''
307 '''return a subrepo for the stored revision of path, never wdir()'''
305 return subrepo.subrepo(self, path, allowcreate=allowcreate)
308 return subrepo.subrepo(self, path, allowcreate=allowcreate)
306
309
307 def nullsub(self, path, pctx):
310 def nullsub(self, path, pctx):
308 return subrepo.nullsubrepo(self, path, pctx)
311 return subrepo.nullsubrepo(self, path, pctx)
309
312
310 def workingsub(self, path):
313 def workingsub(self, path):
311 """return a subrepo for the stored revision, or wdir if this is a wdir
314 """return a subrepo for the stored revision, or wdir if this is a wdir
312 context.
315 context.
313 """
316 """
314 return subrepo.subrepo(self, path, allowwdir=True)
317 return subrepo.subrepo(self, path, allowwdir=True)
315
318
316 def match(
319 def match(
317 self,
320 self,
318 pats=None,
321 pats=None,
319 include=None,
322 include=None,
320 exclude=None,
323 exclude=None,
321 default=b'glob',
324 default=b'glob',
322 listsubrepos=False,
325 listsubrepos=False,
323 badfn=None,
326 badfn=None,
324 cwd=None,
327 cwd=None,
325 ):
328 ):
326 r = self._repo
329 r = self._repo
327 if not cwd:
330 if not cwd:
328 cwd = r.getcwd()
331 cwd = r.getcwd()
329 return matchmod.match(
332 return matchmod.match(
330 r.root,
333 r.root,
331 cwd,
334 cwd,
332 pats,
335 pats,
333 include,
336 include,
334 exclude,
337 exclude,
335 default,
338 default,
336 auditor=r.nofsauditor,
339 auditor=r.nofsauditor,
337 ctx=self,
340 ctx=self,
338 listsubrepos=listsubrepos,
341 listsubrepos=listsubrepos,
339 badfn=badfn,
342 badfn=badfn,
340 )
343 )
341
344
342 def diff(
345 def diff(
343 self,
346 self,
344 ctx2=None,
347 ctx2=None,
345 match=None,
348 match=None,
346 changes=None,
349 changes=None,
347 opts=None,
350 opts=None,
348 losedatafn=None,
351 losedatafn=None,
349 pathfn=None,
352 pathfn=None,
350 copy=None,
353 copy=None,
351 copysourcematch=None,
354 copysourcematch=None,
352 hunksfilterfn=None,
355 hunksfilterfn=None,
353 ):
356 ):
354 """Returns a diff generator for the given contexts and matcher"""
357 """Returns a diff generator for the given contexts and matcher"""
355 if ctx2 is None:
358 if ctx2 is None:
356 ctx2 = self.p1()
359 ctx2 = self.p1()
357 if ctx2 is not None:
360 if ctx2 is not None:
358 ctx2 = self._repo[ctx2]
361 ctx2 = self._repo[ctx2]
359 return patch.diff(
362 return patch.diff(
360 self._repo,
363 self._repo,
361 ctx2,
364 ctx2,
362 self,
365 self,
363 match=match,
366 match=match,
364 changes=changes,
367 changes=changes,
365 opts=opts,
368 opts=opts,
366 losedatafn=losedatafn,
369 losedatafn=losedatafn,
367 pathfn=pathfn,
370 pathfn=pathfn,
368 copy=copy,
371 copy=copy,
369 copysourcematch=copysourcematch,
372 copysourcematch=copysourcematch,
370 hunksfilterfn=hunksfilterfn,
373 hunksfilterfn=hunksfilterfn,
371 )
374 )
372
375
373 def dirs(self):
376 def dirs(self):
374 return self._manifest.dirs()
377 return self._manifest.dirs()
375
378
376 def hasdir(self, dir):
379 def hasdir(self, dir):
377 return self._manifest.hasdir(dir)
380 return self._manifest.hasdir(dir)
378
381
379 def status(
382 def status(
380 self,
383 self,
381 other=None,
384 other=None,
382 match=None,
385 match=None,
383 listignored=False,
386 listignored=False,
384 listclean=False,
387 listclean=False,
385 listunknown=False,
388 listunknown=False,
386 listsubrepos=False,
389 listsubrepos=False,
387 ):
390 ):
388 """return status of files between two nodes or node and working
391 """return status of files between two nodes or node and working
389 directory.
392 directory.
390
393
391 If other is None, compare this node with working directory.
394 If other is None, compare this node with working directory.
392
395
393 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
396 ctx1.status(ctx2) returns the status of change from ctx1 to ctx2
394
397
395 Returns a mercurial.scmutils.status object.
398 Returns a mercurial.scmutils.status object.
396
399
397 Data can be accessed using either tuple notation:
400 Data can be accessed using either tuple notation:
398
401
399 (modified, added, removed, deleted, unknown, ignored, clean)
402 (modified, added, removed, deleted, unknown, ignored, clean)
400
403
401 or direct attribute access:
404 or direct attribute access:
402
405
403 s.modified, s.added, ...
406 s.modified, s.added, ...
404 """
407 """
405
408
406 ctx1 = self
409 ctx1 = self
407 ctx2 = self._repo[other]
410 ctx2 = self._repo[other]
408
411
409 # This next code block is, admittedly, fragile logic that tests for
412 # This next code block is, admittedly, fragile logic that tests for
410 # reversing the contexts and wouldn't need to exist if it weren't for
413 # reversing the contexts and wouldn't need to exist if it weren't for
411 # the fast (and common) code path of comparing the working directory
414 # the fast (and common) code path of comparing the working directory
412 # with its first parent.
415 # with its first parent.
413 #
416 #
414 # What we're aiming for here is the ability to call:
417 # What we're aiming for here is the ability to call:
415 #
418 #
416 # workingctx.status(parentctx)
419 # workingctx.status(parentctx)
417 #
420 #
418 # If we always built the manifest for each context and compared those,
421 # If we always built the manifest for each context and compared those,
419 # then we'd be done. But the special case of the above call means we
422 # then we'd be done. But the special case of the above call means we
420 # just copy the manifest of the parent.
423 # just copy the manifest of the parent.
421 reversed = False
424 reversed = False
422 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
425 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
423 reversed = True
426 reversed = True
424 ctx1, ctx2 = ctx2, ctx1
427 ctx1, ctx2 = ctx2, ctx1
425
428
426 match = self._repo.narrowmatch(match)
429 match = self._repo.narrowmatch(match)
427 match = ctx2._matchstatus(ctx1, match)
430 match = ctx2._matchstatus(ctx1, match)
428 r = scmutil.status([], [], [], [], [], [], [])
431 r = scmutil.status([], [], [], [], [], [], [])
429 r = ctx2._buildstatus(
432 r = ctx2._buildstatus(
430 ctx1, r, match, listignored, listclean, listunknown
433 ctx1, r, match, listignored, listclean, listunknown
431 )
434 )
432
435
433 if reversed:
436 if reversed:
434 # Reverse added and removed. Clear deleted, unknown and ignored as
437 # Reverse added and removed. Clear deleted, unknown and ignored as
435 # these make no sense to reverse.
438 # these make no sense to reverse.
436 r = scmutil.status(
439 r = scmutil.status(
437 r.modified, r.removed, r.added, [], [], [], r.clean
440 r.modified, r.removed, r.added, [], [], [], r.clean
438 )
441 )
439
442
440 if listsubrepos:
443 if listsubrepos:
441 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
444 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
442 try:
445 try:
443 rev2 = ctx2.subrev(subpath)
446 rev2 = ctx2.subrev(subpath)
444 except KeyError:
447 except KeyError:
445 # A subrepo that existed in node1 was deleted between
448 # A subrepo that existed in node1 was deleted between
446 # node1 and node2 (inclusive). Thus, ctx2's substate
449 # node1 and node2 (inclusive). Thus, ctx2's substate
447 # won't contain that subpath. The best we can do ignore it.
450 # won't contain that subpath. The best we can do ignore it.
448 rev2 = None
451 rev2 = None
449 submatch = matchmod.subdirmatcher(subpath, match)
452 submatch = matchmod.subdirmatcher(subpath, match)
450 s = sub.status(
453 s = sub.status(
451 rev2,
454 rev2,
452 match=submatch,
455 match=submatch,
453 ignored=listignored,
456 ignored=listignored,
454 clean=listclean,
457 clean=listclean,
455 unknown=listunknown,
458 unknown=listunknown,
456 listsubrepos=True,
459 listsubrepos=True,
457 )
460 )
458 for k in (
461 for k in (
459 'modified',
462 'modified',
460 'added',
463 'added',
461 'removed',
464 'removed',
462 'deleted',
465 'deleted',
463 'unknown',
466 'unknown',
464 'ignored',
467 'ignored',
465 'clean',
468 'clean',
466 ):
469 ):
467 rfiles, sfiles = getattr(r, k), getattr(s, k)
470 rfiles, sfiles = getattr(r, k), getattr(s, k)
468 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
471 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
469
472
470 r.modified.sort()
473 r.modified.sort()
471 r.added.sort()
474 r.added.sort()
472 r.removed.sort()
475 r.removed.sort()
473 r.deleted.sort()
476 r.deleted.sort()
474 r.unknown.sort()
477 r.unknown.sort()
475 r.ignored.sort()
478 r.ignored.sort()
476 r.clean.sort()
479 r.clean.sort()
477
480
478 return r
481 return r
479
482
480 def mergestate(self, clean=False):
483 def mergestate(self, clean=False):
481 """Get a mergestate object for this context."""
484 """Get a mergestate object for this context."""
482 raise NotImplementedError(
485 raise NotImplementedError(
483 '%s does not implement mergestate()' % self.__class__
486 '%s does not implement mergestate()' % self.__class__
484 )
487 )
485
488
486 def isempty(self):
489 def isempty(self):
487 return not (
490 return not (
488 len(self.parents()) > 1
491 len(self.parents()) > 1
489 or self.branch() != self.p1().branch()
492 or self.branch() != self.p1().branch()
490 or self.closesbranch()
493 or self.closesbranch()
491 or self.files()
494 or self.files()
492 )
495 )
493
496
494
497
495 class changectx(basectx):
498 class changectx(basectx):
496 """A changecontext object makes access to data related to a particular
499 """A changecontext object makes access to data related to a particular
497 changeset convenient. It represents a read-only context already present in
500 changeset convenient. It represents a read-only context already present in
498 the repo."""
501 the repo."""
499
502
500 def __init__(self, repo, rev, node, maybe_filtered=True):
503 def __init__(self, repo, rev, node, maybe_filtered=True):
501 super(changectx, self).__init__(repo)
504 super(changectx, self).__init__(repo)
502 self._rev = rev
505 self._rev = rev
503 self._node = node
506 self._node = node
504 # When maybe_filtered is True, the revision might be affected by
507 # When maybe_filtered is True, the revision might be affected by
505 # changelog filtering and operation through the filtered changelog must be used.
508 # changelog filtering and operation through the filtered changelog must be used.
506 #
509 #
507 # When maybe_filtered is False, the revision has already been checked
510 # When maybe_filtered is False, the revision has already been checked
508 # against filtering and is not filtered. Operation through the
511 # against filtering and is not filtered. Operation through the
509 # unfiltered changelog might be used in some case.
512 # unfiltered changelog might be used in some case.
510 self._maybe_filtered = maybe_filtered
513 self._maybe_filtered = maybe_filtered
511
514
512 def __hash__(self):
515 def __hash__(self):
513 try:
516 try:
514 return hash(self._rev)
517 return hash(self._rev)
515 except AttributeError:
518 except AttributeError:
516 return id(self)
519 return id(self)
517
520
518 def __nonzero__(self):
521 def __nonzero__(self):
519 return self._rev != nullrev
522 return self._rev != nullrev
520
523
521 __bool__ = __nonzero__
524 __bool__ = __nonzero__
522
525
523 @propertycache
526 @propertycache
524 def _changeset(self):
527 def _changeset(self):
525 if self._maybe_filtered:
528 if self._maybe_filtered:
526 repo = self._repo
529 repo = self._repo
527 else:
530 else:
528 repo = self._repo.unfiltered()
531 repo = self._repo.unfiltered()
529 return repo.changelog.changelogrevision(self.rev())
532 return repo.changelog.changelogrevision(self.rev())
530
533
531 @propertycache
534 @propertycache
532 def _manifest(self):
535 def _manifest(self):
533 return self._manifestctx.read()
536 return self._manifestctx.read()
534
537
535 @property
538 @property
536 def _manifestctx(self):
539 def _manifestctx(self):
537 return self._repo.manifestlog[self._changeset.manifest]
540 return self._repo.manifestlog[self._changeset.manifest]
538
541
539 @propertycache
542 @propertycache
540 def _manifestdelta(self):
543 def _manifestdelta(self):
541 return self._manifestctx.readdelta()
544 return self._manifestctx.readdelta()
542
545
543 @propertycache
546 @propertycache
544 def _parents(self):
547 def _parents(self):
545 repo = self._repo
548 repo = self._repo
546 if self._maybe_filtered:
549 if self._maybe_filtered:
547 cl = repo.changelog
550 cl = repo.changelog
548 else:
551 else:
549 cl = repo.unfiltered().changelog
552 cl = repo.unfiltered().changelog
550
553
551 p1, p2 = cl.parentrevs(self._rev)
554 p1, p2 = cl.parentrevs(self._rev)
552 if p2 == nullrev:
555 if p2 == nullrev:
553 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
556 return [changectx(repo, p1, cl.node(p1), maybe_filtered=False)]
554 return [
557 return [
555 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
558 changectx(repo, p1, cl.node(p1), maybe_filtered=False),
556 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
559 changectx(repo, p2, cl.node(p2), maybe_filtered=False),
557 ]
560 ]
558
561
559 def changeset(self):
562 def changeset(self):
560 c = self._changeset
563 c = self._changeset
561 return (
564 return (
562 c.manifest,
565 c.manifest,
563 c.user,
566 c.user,
564 c.date,
567 c.date,
565 c.files,
568 c.files,
566 c.description,
569 c.description,
567 c.extra,
570 c.extra,
568 )
571 )
569
572
570 def manifestnode(self):
573 def manifestnode(self):
571 return self._changeset.manifest
574 return self._changeset.manifest
572
575
573 def user(self):
576 def user(self):
574 return self._changeset.user
577 return self._changeset.user
575
578
576 def date(self):
579 def date(self):
577 return self._changeset.date
580 return self._changeset.date
578
581
579 def files(self):
582 def files(self):
580 return self._changeset.files
583 return self._changeset.files
581
584
582 def filesmodified(self):
585 def filesmodified(self):
583 modified = set(self.files())
586 modified = set(self.files())
584 modified.difference_update(self.filesadded())
587 modified.difference_update(self.filesadded())
585 modified.difference_update(self.filesremoved())
588 modified.difference_update(self.filesremoved())
586 return sorted(modified)
589 return sorted(modified)
587
590
588 def filesadded(self):
591 def filesadded(self):
589 filesadded = self._changeset.filesadded
592 filesadded = self._changeset.filesadded
590 compute_on_none = True
593 compute_on_none = True
591 if self._repo.filecopiesmode == b'changeset-sidedata':
594 if self._repo.filecopiesmode == b'changeset-sidedata':
592 compute_on_none = False
595 compute_on_none = False
593 else:
596 else:
594 source = self._repo.ui.config(b'experimental', b'copies.read-from')
597 source = self._repo.ui.config(b'experimental', b'copies.read-from')
595 if source == b'changeset-only':
598 if source == b'changeset-only':
596 compute_on_none = False
599 compute_on_none = False
597 elif source != b'compatibility':
600 elif source != b'compatibility':
598 # filelog mode, ignore any changelog content
601 # filelog mode, ignore any changelog content
599 filesadded = None
602 filesadded = None
600 if filesadded is None:
603 if filesadded is None:
601 if compute_on_none:
604 if compute_on_none:
602 filesadded = metadata.computechangesetfilesadded(self)
605 filesadded = metadata.computechangesetfilesadded(self)
603 else:
606 else:
604 filesadded = []
607 filesadded = []
605 return filesadded
608 return filesadded
606
609
607 def filesremoved(self):
610 def filesremoved(self):
608 filesremoved = self._changeset.filesremoved
611 filesremoved = self._changeset.filesremoved
609 compute_on_none = True
612 compute_on_none = True
610 if self._repo.filecopiesmode == b'changeset-sidedata':
613 if self._repo.filecopiesmode == b'changeset-sidedata':
611 compute_on_none = False
614 compute_on_none = False
612 else:
615 else:
613 source = self._repo.ui.config(b'experimental', b'copies.read-from')
616 source = self._repo.ui.config(b'experimental', b'copies.read-from')
614 if source == b'changeset-only':
617 if source == b'changeset-only':
615 compute_on_none = False
618 compute_on_none = False
616 elif source != b'compatibility':
619 elif source != b'compatibility':
617 # filelog mode, ignore any changelog content
620 # filelog mode, ignore any changelog content
618 filesremoved = None
621 filesremoved = None
619 if filesremoved is None:
622 if filesremoved is None:
620 if compute_on_none:
623 if compute_on_none:
621 filesremoved = metadata.computechangesetfilesremoved(self)
624 filesremoved = metadata.computechangesetfilesremoved(self)
622 else:
625 else:
623 filesremoved = []
626 filesremoved = []
624 return filesremoved
627 return filesremoved
625
628
626 @propertycache
629 @propertycache
627 def _copies(self):
630 def _copies(self):
628 p1copies = self._changeset.p1copies
631 p1copies = self._changeset.p1copies
629 p2copies = self._changeset.p2copies
632 p2copies = self._changeset.p2copies
630 compute_on_none = True
633 compute_on_none = True
631 if self._repo.filecopiesmode == b'changeset-sidedata':
634 if self._repo.filecopiesmode == b'changeset-sidedata':
632 compute_on_none = False
635 compute_on_none = False
633 else:
636 else:
634 source = self._repo.ui.config(b'experimental', b'copies.read-from')
637 source = self._repo.ui.config(b'experimental', b'copies.read-from')
635 # If config says to get copy metadata only from changeset, then
638 # If config says to get copy metadata only from changeset, then
636 # return that, defaulting to {} if there was no copy metadata. In
639 # return that, defaulting to {} if there was no copy metadata. In
637 # compatibility mode, we return copy data from the changeset if it
640 # compatibility mode, we return copy data from the changeset if it
638 # was recorded there, and otherwise we fall back to getting it from
641 # was recorded there, and otherwise we fall back to getting it from
639 # the filelogs (below).
642 # the filelogs (below).
640 #
643 #
641 # If we are in compatiblity mode and there is not data in the
644 # If we are in compatiblity mode and there is not data in the
642 # changeset), we get the copy metadata from the filelogs.
645 # changeset), we get the copy metadata from the filelogs.
643 #
646 #
644 # otherwise, when config said to read only from filelog, we get the
647 # otherwise, when config said to read only from filelog, we get the
645 # copy metadata from the filelogs.
648 # copy metadata from the filelogs.
646 if source == b'changeset-only':
649 if source == b'changeset-only':
647 compute_on_none = False
650 compute_on_none = False
648 elif source != b'compatibility':
651 elif source != b'compatibility':
649 # filelog mode, ignore any changelog content
652 # filelog mode, ignore any changelog content
650 p1copies = p2copies = None
653 p1copies = p2copies = None
651 if p1copies is None:
654 if p1copies is None:
652 if compute_on_none:
655 if compute_on_none:
653 p1copies, p2copies = super(changectx, self)._copies
656 p1copies, p2copies = super(changectx, self)._copies
654 else:
657 else:
655 if p1copies is None:
658 if p1copies is None:
656 p1copies = {}
659 p1copies = {}
657 if p2copies is None:
660 if p2copies is None:
658 p2copies = {}
661 p2copies = {}
659 return p1copies, p2copies
662 return p1copies, p2copies
660
663
661 def description(self):
664 def description(self):
662 return self._changeset.description
665 return self._changeset.description
663
666
664 def branch(self):
667 def branch(self):
665 return encoding.tolocal(self._changeset.extra.get(b"branch"))
668 return encoding.tolocal(self._changeset.extra.get(b"branch"))
666
669
667 def closesbranch(self):
670 def closesbranch(self):
668 return b'close' in self._changeset.extra
671 return b'close' in self._changeset.extra
669
672
670 def extra(self):
673 def extra(self):
671 """Return a dict of extra information."""
674 """Return a dict of extra information."""
672 return self._changeset.extra
675 return self._changeset.extra
673
676
674 def tags(self):
677 def tags(self):
675 """Return a list of byte tag names"""
678 """Return a list of byte tag names"""
676 return self._repo.nodetags(self._node)
679 return self._repo.nodetags(self._node)
677
680
678 def bookmarks(self):
681 def bookmarks(self):
679 """Return a list of byte bookmark names."""
682 """Return a list of byte bookmark names."""
680 return self._repo.nodebookmarks(self._node)
683 return self._repo.nodebookmarks(self._node)
681
684
682 def fast_rank(self):
685 def fast_rank(self):
683 repo = self._repo
686 repo = self._repo
684 if self._maybe_filtered:
687 if self._maybe_filtered:
685 cl = repo.changelog
688 cl = repo.changelog
686 else:
689 else:
687 cl = repo.unfiltered().changelog
690 cl = repo.unfiltered().changelog
688 return cl.fast_rank(self._rev)
691 return cl.fast_rank(self._rev)
689
692
690 def phase(self):
693 def phase(self):
691 return self._repo._phasecache.phase(self._repo, self._rev)
694 return self._repo._phasecache.phase(self._repo, self._rev)
692
695
693 def hidden(self):
696 def hidden(self):
694 return self._rev in repoview.filterrevs(self._repo, b'visible')
697 return self._rev in repoview.filterrevs(self._repo, b'visible')
695
698
696 def isinmemory(self):
699 def isinmemory(self):
697 return False
700 return False
698
701
699 def children(self):
702 def children(self):
700 """return list of changectx contexts for each child changeset.
703 """return list of changectx contexts for each child changeset.
701
704
702 This returns only the immediate child changesets. Use descendants() to
705 This returns only the immediate child changesets. Use descendants() to
703 recursively walk children.
706 recursively walk children.
704 """
707 """
705 c = self._repo.changelog.children(self._node)
708 c = self._repo.changelog.children(self._node)
706 return [self._repo[x] for x in c]
709 return [self._repo[x] for x in c]
707
710
708 def ancestors(self):
711 def ancestors(self):
709 for a in self._repo.changelog.ancestors([self._rev]):
712 for a in self._repo.changelog.ancestors([self._rev]):
710 yield self._repo[a]
713 yield self._repo[a]
711
714
712 def descendants(self):
715 def descendants(self):
713 """Recursively yield all children of the changeset.
716 """Recursively yield all children of the changeset.
714
717
715 For just the immediate children, use children()
718 For just the immediate children, use children()
716 """
719 """
717 for d in self._repo.changelog.descendants([self._rev]):
720 for d in self._repo.changelog.descendants([self._rev]):
718 yield self._repo[d]
721 yield self._repo[d]
719
722
720 def filectx(self, path, fileid=None, filelog=None):
723 def filectx(self, path, fileid=None, filelog=None):
721 """get a file context from this changeset"""
724 """get a file context from this changeset"""
722 if fileid is None:
725 if fileid is None:
723 fileid = self.filenode(path)
726 fileid = self.filenode(path)
724 return filectx(
727 return filectx(
725 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
728 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
726 )
729 )
727
730
728 def ancestor(self, c2, warn=False):
731 def ancestor(self, c2, warn=False):
729 """return the "best" ancestor context of self and c2
732 """return the "best" ancestor context of self and c2
730
733
731 If there are multiple candidates, it will show a message and check
734 If there are multiple candidates, it will show a message and check
732 merge.preferancestor configuration before falling back to the
735 merge.preferancestor configuration before falling back to the
733 revlog ancestor."""
736 revlog ancestor."""
734 # deal with workingctxs
737 # deal with workingctxs
735 n2 = c2._node
738 n2 = c2._node
736 if n2 is None:
739 if n2 is None:
737 n2 = c2._parents[0]._node
740 n2 = c2._parents[0]._node
738 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
741 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
739 if not cahs:
742 if not cahs:
740 anc = self._repo.nodeconstants.nullid
743 anc = self._repo.nodeconstants.nullid
741 elif len(cahs) == 1:
744 elif len(cahs) == 1:
742 anc = cahs[0]
745 anc = cahs[0]
743 else:
746 else:
744 # experimental config: merge.preferancestor
747 # experimental config: merge.preferancestor
745 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
748 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
746 try:
749 try:
747 ctx = scmutil.revsymbol(self._repo, r)
750 ctx = scmutil.revsymbol(self._repo, r)
748 except error.RepoLookupError:
751 except error.RepoLookupError:
749 continue
752 continue
750 anc = ctx.node()
753 anc = ctx.node()
751 if anc in cahs:
754 if anc in cahs:
752 break
755 break
753 else:
756 else:
754 anc = self._repo.changelog.ancestor(self._node, n2)
757 anc = self._repo.changelog.ancestor(self._node, n2)
755 if warn:
758 if warn:
756 self._repo.ui.status(
759 self._repo.ui.status(
757 (
760 (
758 _(b"note: using %s as ancestor of %s and %s\n")
761 _(b"note: using %s as ancestor of %s and %s\n")
759 % (short(anc), short(self._node), short(n2))
762 % (short(anc), short(self._node), short(n2))
760 )
763 )
761 + b''.join(
764 + b''.join(
762 _(
765 _(
763 b" alternatively, use --config "
766 b" alternatively, use --config "
764 b"merge.preferancestor=%s\n"
767 b"merge.preferancestor=%s\n"
765 )
768 )
766 % short(n)
769 % short(n)
767 for n in sorted(cahs)
770 for n in sorted(cahs)
768 if n != anc
771 if n != anc
769 )
772 )
770 )
773 )
771 return self._repo[anc]
774 return self._repo[anc]
772
775
773 def isancestorof(self, other):
776 def isancestorof(self, other):
774 """True if this changeset is an ancestor of other"""
777 """True if this changeset is an ancestor of other"""
775 return self._repo.changelog.isancestorrev(self._rev, other._rev)
778 return self._repo.changelog.isancestorrev(self._rev, other._rev)
776
779
777 def walk(self, match):
780 def walk(self, match):
778 '''Generates matching file names.'''
781 '''Generates matching file names.'''
779
782
780 # Wrap match.bad method to have message with nodeid
783 # Wrap match.bad method to have message with nodeid
781 def bad(fn, msg):
784 def bad(fn, msg):
782 # The manifest doesn't know about subrepos, so don't complain about
785 # The manifest doesn't know about subrepos, so don't complain about
783 # paths into valid subrepos.
786 # paths into valid subrepos.
784 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
787 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
785 return
788 return
786 match.bad(fn, _(b'no such file in rev %s') % self)
789 match.bad(fn, _(b'no such file in rev %s') % self)
787
790
788 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
791 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
789 return self._manifest.walk(m)
792 return self._manifest.walk(m)
790
793
791 def matches(self, match):
794 def matches(self, match):
792 return self.walk(match)
795 return self.walk(match)
793
796
794
797
795 class basefilectx:
798 class basefilectx:
796 """A filecontext object represents the common logic for its children:
799 """A filecontext object represents the common logic for its children:
797 filectx: read-only access to a filerevision that is already present
800 filectx: read-only access to a filerevision that is already present
798 in the repo,
801 in the repo,
799 workingfilectx: a filecontext that represents files from the working
802 workingfilectx: a filecontext that represents files from the working
800 directory,
803 directory,
801 memfilectx: a filecontext that represents files in-memory,
804 memfilectx: a filecontext that represents files in-memory,
802 """
805 """
803
806
804 @propertycache
807 @propertycache
805 def _filelog(self):
808 def _filelog(self):
806 return self._repo.file(self._path)
809 return self._repo.file(self._path)
807
810
808 @propertycache
811 @propertycache
809 def _changeid(self):
812 def _changeid(self):
810 if '_changectx' in self.__dict__:
813 if '_changectx' in self.__dict__:
811 return self._changectx.rev()
814 return self._changectx.rev()
812 elif '_descendantrev' in self.__dict__:
815 elif '_descendantrev' in self.__dict__:
813 # this file context was created from a revision with a known
816 # this file context was created from a revision with a known
814 # descendant, we can (lazily) correct for linkrev aliases
817 # descendant, we can (lazily) correct for linkrev aliases
815 return self._adjustlinkrev(self._descendantrev)
818 return self._adjustlinkrev(self._descendantrev)
816 else:
819 else:
817 return self._filelog.linkrev(self._filerev)
820 return self._filelog.linkrev(self._filerev)
818
821
819 @propertycache
822 @propertycache
820 def _filenode(self):
823 def _filenode(self):
821 if '_fileid' in self.__dict__:
824 if '_fileid' in self.__dict__:
822 return self._filelog.lookup(self._fileid)
825 return self._filelog.lookup(self._fileid)
823 else:
826 else:
824 return self._changectx.filenode(self._path)
827 return self._changectx.filenode(self._path)
825
828
826 @propertycache
829 @propertycache
827 def _filerev(self):
830 def _filerev(self):
828 return self._filelog.rev(self._filenode)
831 return self._filelog.rev(self._filenode)
829
832
830 @propertycache
833 @propertycache
831 def _repopath(self):
834 def _repopath(self):
832 return self._path
835 return self._path
833
836
834 def __nonzero__(self):
837 def __nonzero__(self):
835 try:
838 try:
836 self._filenode
839 self._filenode
837 return True
840 return True
838 except error.LookupError:
841 except error.LookupError:
839 # file is missing
842 # file is missing
840 return False
843 return False
841
844
842 __bool__ = __nonzero__
845 __bool__ = __nonzero__
843
846
844 def __bytes__(self):
847 def __bytes__(self):
845 try:
848 try:
846 return b"%s@%s" % (self.path(), self._changectx)
849 return b"%s@%s" % (self.path(), self._changectx)
847 except error.LookupError:
850 except error.LookupError:
848 return b"%s@???" % self.path()
851 return b"%s@???" % self.path()
849
852
850 __str__ = encoding.strmethod(__bytes__)
853 __str__ = encoding.strmethod(__bytes__)
851
854
852 def __repr__(self):
855 def __repr__(self):
853 return "<%s %s>" % (type(self).__name__, str(self))
856 return "<%s %s>" % (type(self).__name__, str(self))
854
857
855 def __hash__(self):
858 def __hash__(self):
856 try:
859 try:
857 return hash((self._path, self._filenode))
860 return hash((self._path, self._filenode))
858 except AttributeError:
861 except AttributeError:
859 return id(self)
862 return id(self)
860
863
861 def __eq__(self, other):
864 def __eq__(self, other):
862 try:
865 try:
863 return (
866 return (
864 type(self) == type(other)
867 type(self) == type(other)
865 and self._path == other._path
868 and self._path == other._path
866 and self._filenode == other._filenode
869 and self._filenode == other._filenode
867 )
870 )
868 except AttributeError:
871 except AttributeError:
869 return False
872 return False
870
873
871 def __ne__(self, other):
874 def __ne__(self, other):
872 return not (self == other)
875 return not (self == other)
873
876
874 def filerev(self):
877 def filerev(self):
875 return self._filerev
878 return self._filerev
876
879
877 def filenode(self):
880 def filenode(self):
878 return self._filenode
881 return self._filenode
879
882
880 @propertycache
883 @propertycache
881 def _flags(self):
884 def _flags(self):
882 return self._changectx.flags(self._path)
885 return self._changectx.flags(self._path)
883
886
884 def flags(self):
887 def flags(self):
885 return self._flags
888 return self._flags
886
889
887 def filelog(self):
890 def filelog(self):
888 return self._filelog
891 return self._filelog
889
892
890 def rev(self):
893 def rev(self):
891 return self._changeid
894 return self._changeid
892
895
893 def linkrev(self):
896 def linkrev(self):
894 return self._filelog.linkrev(self._filerev)
897 return self._filelog.linkrev(self._filerev)
895
898
896 def node(self):
899 def node(self):
897 return self._changectx.node()
900 return self._changectx.node()
898
901
899 def hex(self):
902 def hex(self):
900 return self._changectx.hex()
903 return self._changectx.hex()
901
904
902 def user(self):
905 def user(self):
903 return self._changectx.user()
906 return self._changectx.user()
904
907
905 def date(self):
908 def date(self):
906 return self._changectx.date()
909 return self._changectx.date()
907
910
908 def files(self):
911 def files(self):
909 return self._changectx.files()
912 return self._changectx.files()
910
913
911 def description(self):
914 def description(self):
912 return self._changectx.description()
915 return self._changectx.description()
913
916
914 def branch(self):
917 def branch(self):
915 return self._changectx.branch()
918 return self._changectx.branch()
916
919
917 def extra(self):
920 def extra(self):
918 return self._changectx.extra()
921 return self._changectx.extra()
919
922
920 def phase(self):
923 def phase(self):
921 return self._changectx.phase()
924 return self._changectx.phase()
922
925
923 def phasestr(self):
926 def phasestr(self):
924 return self._changectx.phasestr()
927 return self._changectx.phasestr()
925
928
926 def obsolete(self):
929 def obsolete(self):
927 return self._changectx.obsolete()
930 return self._changectx.obsolete()
928
931
929 def instabilities(self):
932 def instabilities(self):
930 return self._changectx.instabilities()
933 return self._changectx.instabilities()
931
934
932 def manifest(self):
935 def manifest(self):
933 return self._changectx.manifest()
936 return self._changectx.manifest()
934
937
935 def changectx(self):
938 def changectx(self):
936 return self._changectx
939 return self._changectx
937
940
938 def renamed(self):
941 def renamed(self):
939 return self._copied
942 return self._copied
940
943
941 def copysource(self):
944 def copysource(self):
942 return self._copied and self._copied[0]
945 return self._copied and self._copied[0]
943
946
944 def repo(self):
947 def repo(self):
945 return self._repo
948 return self._repo
946
949
947 def size(self):
950 def size(self):
948 return len(self.data())
951 return len(self.data())
949
952
950 def path(self):
953 def path(self):
951 return self._path
954 return self._path
952
955
953 def isbinary(self):
956 def isbinary(self):
954 try:
957 try:
955 return stringutil.binary(self.data())
958 return stringutil.binary(self.data())
956 except IOError:
959 except IOError:
957 return False
960 return False
958
961
959 def isexec(self):
962 def isexec(self):
960 return b'x' in self.flags()
963 return b'x' in self.flags()
961
964
962 def islink(self):
965 def islink(self):
963 return b'l' in self.flags()
966 return b'l' in self.flags()
964
967
965 def isabsent(self):
968 def isabsent(self):
966 """whether this filectx represents a file not in self._changectx
969 """whether this filectx represents a file not in self._changectx
967
970
968 This is mainly for merge code to detect change/delete conflicts. This is
971 This is mainly for merge code to detect change/delete conflicts. This is
969 expected to be True for all subclasses of basectx."""
972 expected to be True for all subclasses of basectx."""
970 return False
973 return False
971
974
972 _customcmp = False
975 _customcmp = False
973
976
974 def cmp(self, fctx):
977 def cmp(self, fctx):
975 """compare with other file context
978 """compare with other file context
976
979
977 returns True if different than fctx.
980 returns True if different than fctx.
978 """
981 """
979 if fctx._customcmp:
982 if fctx._customcmp:
980 return fctx.cmp(self)
983 return fctx.cmp(self)
981
984
982 if self._filenode is None:
985 if self._filenode is None:
983 raise error.ProgrammingError(
986 raise error.ProgrammingError(
984 b'filectx.cmp() must be reimplemented if not backed by revlog'
987 b'filectx.cmp() must be reimplemented if not backed by revlog'
985 )
988 )
986
989
987 if fctx._filenode is None:
990 if fctx._filenode is None:
988 if self._repo._encodefilterpats:
991 if self._repo._encodefilterpats:
989 # can't rely on size() because wdir content may be decoded
992 # can't rely on size() because wdir content may be decoded
990 return self._filelog.cmp(self._filenode, fctx.data())
993 return self._filelog.cmp(self._filenode, fctx.data())
991 # filelog.size() has two special cases:
994 # filelog.size() has two special cases:
992 # - censored metadata
995 # - censored metadata
993 # - copy/rename tracking
996 # - copy/rename tracking
994 # The first is detected by peaking into the delta,
997 # The first is detected by peaking into the delta,
995 # the second is detected by abusing parent order
998 # the second is detected by abusing parent order
996 # in the revlog index as flag bit. This leaves files using
999 # in the revlog index as flag bit. This leaves files using
997 # the dummy encoding and non-standard meta attributes.
1000 # the dummy encoding and non-standard meta attributes.
998 # The following check is a special case for the empty
1001 # The following check is a special case for the empty
999 # metadata block used if the raw file content starts with '\1\n'.
1002 # metadata block used if the raw file content starts with '\1\n'.
1000 # Cases of arbitrary metadata flags are currently mishandled.
1003 # Cases of arbitrary metadata flags are currently mishandled.
1001 if self.size() - 4 == fctx.size():
1004 if self.size() - 4 == fctx.size():
1002 # size() can match:
1005 # size() can match:
1003 # if file data starts with '\1\n', empty metadata block is
1006 # if file data starts with '\1\n', empty metadata block is
1004 # prepended, which adds 4 bytes to filelog.size().
1007 # prepended, which adds 4 bytes to filelog.size().
1005 return self._filelog.cmp(self._filenode, fctx.data())
1008 return self._filelog.cmp(self._filenode, fctx.data())
1006 if self.size() == fctx.size() or self.flags() == b'l':
1009 if self.size() == fctx.size() or self.flags() == b'l':
1007 # size() matches: need to compare content
1010 # size() matches: need to compare content
1008 # issue6456: Always compare symlinks because size can represent
1011 # issue6456: Always compare symlinks because size can represent
1009 # encrypted string for EXT-4 encryption(fscrypt).
1012 # encrypted string for EXT-4 encryption(fscrypt).
1010 return self._filelog.cmp(self._filenode, fctx.data())
1013 return self._filelog.cmp(self._filenode, fctx.data())
1011
1014
1012 # size() differs
1015 # size() differs
1013 return True
1016 return True
1014
1017
1015 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1018 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
1016 """return the first ancestor of <srcrev> introducing <fnode>
1019 """return the first ancestor of <srcrev> introducing <fnode>
1017
1020
1018 If the linkrev of the file revision does not point to an ancestor of
1021 If the linkrev of the file revision does not point to an ancestor of
1019 srcrev, we'll walk down the ancestors until we find one introducing
1022 srcrev, we'll walk down the ancestors until we find one introducing
1020 this file revision.
1023 this file revision.
1021
1024
1022 :srcrev: the changeset revision we search ancestors from
1025 :srcrev: the changeset revision we search ancestors from
1023 :inclusive: if true, the src revision will also be checked
1026 :inclusive: if true, the src revision will also be checked
1024 :stoprev: an optional revision to stop the walk at. If no introduction
1027 :stoprev: an optional revision to stop the walk at. If no introduction
1025 of this file content could be found before this floor
1028 of this file content could be found before this floor
1026 revision, the function will returns "None" and stops its
1029 revision, the function will returns "None" and stops its
1027 iteration.
1030 iteration.
1028 """
1031 """
1029 repo = self._repo
1032 repo = self._repo
1030 cl = repo.unfiltered().changelog
1033 cl = repo.unfiltered().changelog
1031 mfl = repo.manifestlog
1034 mfl = repo.manifestlog
1032 # fetch the linkrev
1035 # fetch the linkrev
1033 lkr = self.linkrev()
1036 lkr = self.linkrev()
1034 if srcrev == lkr:
1037 if srcrev == lkr:
1035 return lkr
1038 return lkr
1036 # hack to reuse ancestor computation when searching for renames
1039 # hack to reuse ancestor computation when searching for renames
1037 memberanc = getattr(self, '_ancestrycontext', None)
1040 memberanc = getattr(self, '_ancestrycontext', None)
1038 iteranc = None
1041 iteranc = None
1039 if srcrev is None:
1042 if srcrev is None:
1040 # wctx case, used by workingfilectx during mergecopy
1043 # wctx case, used by workingfilectx during mergecopy
1041 revs = [p.rev() for p in self._repo[None].parents()]
1044 revs = [p.rev() for p in self._repo[None].parents()]
1042 inclusive = True # we skipped the real (revless) source
1045 inclusive = True # we skipped the real (revless) source
1043 else:
1046 else:
1044 revs = [srcrev]
1047 revs = [srcrev]
1045 if memberanc is None:
1048 if memberanc is None:
1046 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1049 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1047 # check if this linkrev is an ancestor of srcrev
1050 # check if this linkrev is an ancestor of srcrev
1048 if lkr not in memberanc:
1051 if lkr not in memberanc:
1049 if iteranc is None:
1052 if iteranc is None:
1050 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1053 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
1051 fnode = self._filenode
1054 fnode = self._filenode
1052 path = self._path
1055 path = self._path
1053 for a in iteranc:
1056 for a in iteranc:
1054 if stoprev is not None and a < stoprev:
1057 if stoprev is not None and a < stoprev:
1055 return None
1058 return None
1056 ac = cl.read(a) # get changeset data (we avoid object creation)
1059 ac = cl.read(a) # get changeset data (we avoid object creation)
1057 if path in ac[3]: # checking the 'files' field.
1060 if path in ac[3]: # checking the 'files' field.
1058 # The file has been touched, check if the content is
1061 # The file has been touched, check if the content is
1059 # similar to the one we search for.
1062 # similar to the one we search for.
1060 if fnode == mfl[ac[0]].readfast().get(path):
1063 if fnode == mfl[ac[0]].readfast().get(path):
1061 return a
1064 return a
1062 # In theory, we should never get out of that loop without a result.
1065 # In theory, we should never get out of that loop without a result.
1063 # But if manifest uses a buggy file revision (not children of the
1066 # But if manifest uses a buggy file revision (not children of the
1064 # one it replaces) we could. Such a buggy situation will likely
1067 # one it replaces) we could. Such a buggy situation will likely
1065 # result is crash somewhere else at to some point.
1068 # result is crash somewhere else at to some point.
1066 return lkr
1069 return lkr
1067
1070
1068 def isintroducedafter(self, changelogrev):
1071 def isintroducedafter(self, changelogrev):
1069 """True if a filectx has been introduced after a given floor revision"""
1072 """True if a filectx has been introduced after a given floor revision"""
1070 if self.linkrev() >= changelogrev:
1073 if self.linkrev() >= changelogrev:
1071 return True
1074 return True
1072 introrev = self._introrev(stoprev=changelogrev)
1075 introrev = self._introrev(stoprev=changelogrev)
1073 if introrev is None:
1076 if introrev is None:
1074 return False
1077 return False
1075 return introrev >= changelogrev
1078 return introrev >= changelogrev
1076
1079
1077 def introrev(self):
1080 def introrev(self):
1078 """return the rev of the changeset which introduced this file revision
1081 """return the rev of the changeset which introduced this file revision
1079
1082
1080 This method is different from linkrev because it take into account the
1083 This method is different from linkrev because it take into account the
1081 changeset the filectx was created from. It ensures the returned
1084 changeset the filectx was created from. It ensures the returned
1082 revision is one of its ancestors. This prevents bugs from
1085 revision is one of its ancestors. This prevents bugs from
1083 'linkrev-shadowing' when a file revision is used by multiple
1086 'linkrev-shadowing' when a file revision is used by multiple
1084 changesets.
1087 changesets.
1085 """
1088 """
1086 return self._introrev()
1089 return self._introrev()
1087
1090
1088 def _introrev(self, stoprev=None):
1091 def _introrev(self, stoprev=None):
1089 """
1092 """
1090 Same as `introrev` but, with an extra argument to limit changelog
1093 Same as `introrev` but, with an extra argument to limit changelog
1091 iteration range in some internal usecase.
1094 iteration range in some internal usecase.
1092
1095
1093 If `stoprev` is set, the `introrev` will not be searched past that
1096 If `stoprev` is set, the `introrev` will not be searched past that
1094 `stoprev` revision and "None" might be returned. This is useful to
1097 `stoprev` revision and "None" might be returned. This is useful to
1095 limit the iteration range.
1098 limit the iteration range.
1096 """
1099 """
1097 toprev = None
1100 toprev = None
1098 attrs = vars(self)
1101 attrs = vars(self)
1099 if '_changeid' in attrs:
1102 if '_changeid' in attrs:
1100 # We have a cached value already
1103 # We have a cached value already
1101 toprev = self._changeid
1104 toprev = self._changeid
1102 elif '_changectx' in attrs:
1105 elif '_changectx' in attrs:
1103 # We know which changelog entry we are coming from
1106 # We know which changelog entry we are coming from
1104 toprev = self._changectx.rev()
1107 toprev = self._changectx.rev()
1105
1108
1106 if toprev is not None:
1109 if toprev is not None:
1107 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1110 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1108 elif '_descendantrev' in attrs:
1111 elif '_descendantrev' in attrs:
1109 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1112 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1110 # be nice and cache the result of the computation
1113 # be nice and cache the result of the computation
1111 if introrev is not None:
1114 if introrev is not None:
1112 self._changeid = introrev
1115 self._changeid = introrev
1113 return introrev
1116 return introrev
1114 else:
1117 else:
1115 return self.linkrev()
1118 return self.linkrev()
1116
1119
1117 def introfilectx(self):
1120 def introfilectx(self):
1118 """Return filectx having identical contents, but pointing to the
1121 """Return filectx having identical contents, but pointing to the
1119 changeset revision where this filectx was introduced"""
1122 changeset revision where this filectx was introduced"""
1120 introrev = self.introrev()
1123 introrev = self.introrev()
1121 if self.rev() == introrev:
1124 if self.rev() == introrev:
1122 return self
1125 return self
1123 return self.filectx(self.filenode(), changeid=introrev)
1126 return self.filectx(self.filenode(), changeid=introrev)
1124
1127
1125 def _parentfilectx(self, path, fileid, filelog):
1128 def _parentfilectx(self, path, fileid, filelog):
1126 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1129 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1127 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1130 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1128 if '_changeid' in vars(self) or '_changectx' in vars(self):
1131 if '_changeid' in vars(self) or '_changectx' in vars(self):
1129 # If self is associated with a changeset (probably explicitly
1132 # If self is associated with a changeset (probably explicitly
1130 # fed), ensure the created filectx is associated with a
1133 # fed), ensure the created filectx is associated with a
1131 # changeset that is an ancestor of self.changectx.
1134 # changeset that is an ancestor of self.changectx.
1132 # This lets us later use _adjustlinkrev to get a correct link.
1135 # This lets us later use _adjustlinkrev to get a correct link.
1133 fctx._descendantrev = self.rev()
1136 fctx._descendantrev = self.rev()
1134 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1137 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1135 elif '_descendantrev' in vars(self):
1138 elif '_descendantrev' in vars(self):
1136 # Otherwise propagate _descendantrev if we have one associated.
1139 # Otherwise propagate _descendantrev if we have one associated.
1137 fctx._descendantrev = self._descendantrev
1140 fctx._descendantrev = self._descendantrev
1138 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1141 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1139 return fctx
1142 return fctx
1140
1143
1141 def parents(self):
1144 def parents(self):
1142 _path = self._path
1145 _path = self._path
1143 fl = self._filelog
1146 fl = self._filelog
1144 parents = self._filelog.parents(self._filenode)
1147 parents = self._filelog.parents(self._filenode)
1145 pl = [
1148 pl = [
1146 (_path, node, fl)
1149 (_path, node, fl)
1147 for node in parents
1150 for node in parents
1148 if node != self._repo.nodeconstants.nullid
1151 if node != self._repo.nodeconstants.nullid
1149 ]
1152 ]
1150
1153
1151 r = fl.renamed(self._filenode)
1154 r = fl.renamed(self._filenode)
1152 if r:
1155 if r:
1153 # - In the simple rename case, both parent are nullid, pl is empty.
1156 # - In the simple rename case, both parent are nullid, pl is empty.
1154 # - In case of merge, only one of the parent is null id and should
1157 # - In case of merge, only one of the parent is null id and should
1155 # be replaced with the rename information. This parent is -always-
1158 # be replaced with the rename information. This parent is -always-
1156 # the first one.
1159 # the first one.
1157 #
1160 #
1158 # As null id have always been filtered out in the previous list
1161 # As null id have always been filtered out in the previous list
1159 # comprehension, inserting to 0 will always result in "replacing
1162 # comprehension, inserting to 0 will always result in "replacing
1160 # first nullid parent with rename information.
1163 # first nullid parent with rename information.
1161 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1164 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1162
1165
1163 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1166 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1164
1167
1165 def p1(self):
1168 def p1(self):
1166 return self.parents()[0]
1169 return self.parents()[0]
1167
1170
1168 def p2(self):
1171 def p2(self):
1169 p = self.parents()
1172 p = self.parents()
1170 if len(p) == 2:
1173 if len(p) == 2:
1171 return p[1]
1174 return p[1]
1172 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1175 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1173
1176
1174 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1177 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1175 """Returns a list of annotateline objects for each line in the file
1178 """Returns a list of annotateline objects for each line in the file
1176
1179
1177 - line.fctx is the filectx of the node where that line was last changed
1180 - line.fctx is the filectx of the node where that line was last changed
1178 - line.lineno is the line number at the first appearance in the managed
1181 - line.lineno is the line number at the first appearance in the managed
1179 file
1182 file
1180 - line.text is the data on that line (including newline character)
1183 - line.text is the data on that line (including newline character)
1181 """
1184 """
1182 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1185 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1183
1186
1184 def parents(f):
1187 def parents(f):
1185 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1188 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1186 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1189 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1187 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1190 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1188 # isn't an ancestor of the srcrev.
1191 # isn't an ancestor of the srcrev.
1189 f._changeid
1192 f._changeid
1190 pl = f.parents()
1193 pl = f.parents()
1191
1194
1192 # Don't return renamed parents if we aren't following.
1195 # Don't return renamed parents if we aren't following.
1193 if not follow:
1196 if not follow:
1194 pl = [p for p in pl if p.path() == f.path()]
1197 pl = [p for p in pl if p.path() == f.path()]
1195
1198
1196 # renamed filectx won't have a filelog yet, so set it
1199 # renamed filectx won't have a filelog yet, so set it
1197 # from the cache to save time
1200 # from the cache to save time
1198 for p in pl:
1201 for p in pl:
1199 if not '_filelog' in p.__dict__:
1202 if not '_filelog' in p.__dict__:
1200 p._filelog = getlog(p.path())
1203 p._filelog = getlog(p.path())
1201
1204
1202 return pl
1205 return pl
1203
1206
1204 # use linkrev to find the first changeset where self appeared
1207 # use linkrev to find the first changeset where self appeared
1205 base = self.introfilectx()
1208 base = self.introfilectx()
1206 if getattr(base, '_ancestrycontext', None) is None:
1209 if getattr(base, '_ancestrycontext', None) is None:
1207 # it is safe to use an unfiltered repository here because we are
1210 # it is safe to use an unfiltered repository here because we are
1208 # walking ancestors only.
1211 # walking ancestors only.
1209 cl = self._repo.unfiltered().changelog
1212 cl = self._repo.unfiltered().changelog
1210 if base.rev() is None:
1213 if base.rev() is None:
1211 # wctx is not inclusive, but works because _ancestrycontext
1214 # wctx is not inclusive, but works because _ancestrycontext
1212 # is used to test filelog revisions
1215 # is used to test filelog revisions
1213 ac = cl.ancestors(
1216 ac = cl.ancestors(
1214 [p.rev() for p in base.parents()], inclusive=True
1217 [p.rev() for p in base.parents()], inclusive=True
1215 )
1218 )
1216 else:
1219 else:
1217 ac = cl.ancestors([base.rev()], inclusive=True)
1220 ac = cl.ancestors([base.rev()], inclusive=True)
1218 base._ancestrycontext = ac
1221 base._ancestrycontext = ac
1219
1222
1220 return dagop.annotate(
1223 return dagop.annotate(
1221 base, parents, skiprevs=skiprevs, diffopts=diffopts
1224 base, parents, skiprevs=skiprevs, diffopts=diffopts
1222 )
1225 )
1223
1226
1224 def ancestors(self, followfirst=False):
1227 def ancestors(self, followfirst=False):
1225 visit = {}
1228 visit = {}
1226 c = self
1229 c = self
1227 if followfirst:
1230 if followfirst:
1228 cut = 1
1231 cut = 1
1229 else:
1232 else:
1230 cut = None
1233 cut = None
1231
1234
1232 while True:
1235 while True:
1233 for parent in c.parents()[:cut]:
1236 for parent in c.parents()[:cut]:
1234 visit[(parent.linkrev(), parent.filenode())] = parent
1237 visit[(parent.linkrev(), parent.filenode())] = parent
1235 if not visit:
1238 if not visit:
1236 break
1239 break
1237 c = visit.pop(max(visit))
1240 c = visit.pop(max(visit))
1238 yield c
1241 yield c
1239
1242
1240 def decodeddata(self):
1243 def decodeddata(self):
1241 """Returns `data()` after running repository decoding filters.
1244 """Returns `data()` after running repository decoding filters.
1242
1245
1243 This is often equivalent to how the data would be expressed on disk.
1246 This is often equivalent to how the data would be expressed on disk.
1244 """
1247 """
1245 return self._repo.wwritedata(self.path(), self.data())
1248 return self._repo.wwritedata(self.path(), self.data())
1246
1249
1247
1250
1248 class filectx(basefilectx):
1251 class filectx(basefilectx):
1249 """A filecontext object makes access to data related to a particular
1252 """A filecontext object makes access to data related to a particular
1250 filerevision convenient."""
1253 filerevision convenient."""
1251
1254
1252 def __init__(
1255 def __init__(
1253 self,
1256 self,
1254 repo,
1257 repo,
1255 path,
1258 path,
1256 changeid=None,
1259 changeid=None,
1257 fileid=None,
1260 fileid=None,
1258 filelog=None,
1261 filelog=None,
1259 changectx=None,
1262 changectx=None,
1260 ):
1263 ):
1261 """changeid must be a revision number, if specified.
1264 """changeid must be a revision number, if specified.
1262 fileid can be a file revision or node."""
1265 fileid can be a file revision or node."""
1263 self._repo = repo
1266 self._repo = repo
1264 self._path = path
1267 self._path = path
1265
1268
1266 assert (
1269 assert (
1267 changeid is not None or fileid is not None or changectx is not None
1270 changeid is not None or fileid is not None or changectx is not None
1268 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1271 ), b"bad args: changeid=%r, fileid=%r, changectx=%r" % (
1269 changeid,
1272 changeid,
1270 fileid,
1273 fileid,
1271 changectx,
1274 changectx,
1272 )
1275 )
1273
1276
1274 if filelog is not None:
1277 if filelog is not None:
1275 self._filelog = filelog
1278 self._filelog = filelog
1276
1279
1277 if changeid is not None:
1280 if changeid is not None:
1278 self._changeid = changeid
1281 self._changeid = changeid
1279 if changectx is not None:
1282 if changectx is not None:
1280 self._changectx = changectx
1283 self._changectx = changectx
1281 if fileid is not None:
1284 if fileid is not None:
1282 self._fileid = fileid
1285 self._fileid = fileid
1283
1286
1284 @propertycache
1287 @propertycache
1285 def _changectx(self):
1288 def _changectx(self):
1286 try:
1289 try:
1287 return self._repo[self._changeid]
1290 return self._repo[self._changeid]
1288 except error.FilteredRepoLookupError:
1291 except error.FilteredRepoLookupError:
1289 # Linkrev may point to any revision in the repository. When the
1292 # Linkrev may point to any revision in the repository. When the
1290 # repository is filtered this may lead to `filectx` trying to build
1293 # repository is filtered this may lead to `filectx` trying to build
1291 # `changectx` for filtered revision. In such case we fallback to
1294 # `changectx` for filtered revision. In such case we fallback to
1292 # creating `changectx` on the unfiltered version of the reposition.
1295 # creating `changectx` on the unfiltered version of the reposition.
1293 # This fallback should not be an issue because `changectx` from
1296 # This fallback should not be an issue because `changectx` from
1294 # `filectx` are not used in complex operations that care about
1297 # `filectx` are not used in complex operations that care about
1295 # filtering.
1298 # filtering.
1296 #
1299 #
1297 # This fallback is a cheap and dirty fix that prevent several
1300 # This fallback is a cheap and dirty fix that prevent several
1298 # crashes. It does not ensure the behavior is correct. However the
1301 # crashes. It does not ensure the behavior is correct. However the
1299 # behavior was not correct before filtering either and "incorrect
1302 # behavior was not correct before filtering either and "incorrect
1300 # behavior" is seen as better as "crash"
1303 # behavior" is seen as better as "crash"
1301 #
1304 #
1302 # Linkrevs have several serious troubles with filtering that are
1305 # Linkrevs have several serious troubles with filtering that are
1303 # complicated to solve. Proper handling of the issue here should be
1306 # complicated to solve. Proper handling of the issue here should be
1304 # considered when solving linkrev issue are on the table.
1307 # considered when solving linkrev issue are on the table.
1305 return self._repo.unfiltered()[self._changeid]
1308 return self._repo.unfiltered()[self._changeid]
1306
1309
1307 def filectx(self, fileid, changeid=None):
1310 def filectx(self, fileid, changeid=None):
1308 """opens an arbitrary revision of the file without
1311 """opens an arbitrary revision of the file without
1309 opening a new filelog"""
1312 opening a new filelog"""
1310 return filectx(
1313 return filectx(
1311 self._repo,
1314 self._repo,
1312 self._path,
1315 self._path,
1313 fileid=fileid,
1316 fileid=fileid,
1314 filelog=self._filelog,
1317 filelog=self._filelog,
1315 changeid=changeid,
1318 changeid=changeid,
1316 )
1319 )
1317
1320
1318 def rawdata(self):
1321 def rawdata(self):
1319 return self._filelog.rawdata(self._filenode)
1322 return self._filelog.rawdata(self._filenode)
1320
1323
1321 def rawflags(self):
1324 def rawflags(self):
1322 """low-level revlog flags"""
1325 """low-level revlog flags"""
1323 return self._filelog.flags(self._filerev)
1326 return self._filelog.flags(self._filerev)
1324
1327
1325 def data(self):
1328 def data(self):
1326 try:
1329 try:
1327 return self._filelog.read(self._filenode)
1330 return self._filelog.read(self._filenode)
1328 except error.CensoredNodeError:
1331 except error.CensoredNodeError:
1329 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1332 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1330 return b""
1333 return b""
1331 raise error.Abort(
1334 raise error.Abort(
1332 _(b"censored node: %s") % short(self._filenode),
1335 _(b"censored node: %s") % short(self._filenode),
1333 hint=_(b"set censor.policy to ignore errors"),
1336 hint=_(b"set censor.policy to ignore errors"),
1334 )
1337 )
1335
1338
1336 def size(self):
1339 def size(self):
1337 return self._filelog.size(self._filerev)
1340 return self._filelog.size(self._filerev)
1338
1341
1339 @propertycache
1342 @propertycache
1340 def _copied(self):
1343 def _copied(self):
1341 """check if file was actually renamed in this changeset revision
1344 """check if file was actually renamed in this changeset revision
1342
1345
1343 If rename logged in file revision, we report copy for changeset only
1346 If rename logged in file revision, we report copy for changeset only
1344 if file revisions linkrev points back to the changeset in question
1347 if file revisions linkrev points back to the changeset in question
1345 or both changeset parents contain different file revisions.
1348 or both changeset parents contain different file revisions.
1346 """
1349 """
1347
1350
1348 renamed = self._filelog.renamed(self._filenode)
1351 renamed = self._filelog.renamed(self._filenode)
1349 if not renamed:
1352 if not renamed:
1350 return None
1353 return None
1351
1354
1352 if self.rev() == self.linkrev():
1355 if self.rev() == self.linkrev():
1353 return renamed
1356 return renamed
1354
1357
1355 name = self.path()
1358 name = self.path()
1356 fnode = self._filenode
1359 fnode = self._filenode
1357 for p in self._changectx.parents():
1360 for p in self._changectx.parents():
1358 try:
1361 try:
1359 if fnode == p.filenode(name):
1362 if fnode == p.filenode(name):
1360 return None
1363 return None
1361 except error.LookupError:
1364 except error.LookupError:
1362 pass
1365 pass
1363 return renamed
1366 return renamed
1364
1367
1365 def children(self):
1368 def children(self):
1366 # hard for renames
1369 # hard for renames
1367 c = self._filelog.children(self._filenode)
1370 c = self._filelog.children(self._filenode)
1368 return [
1371 return [
1369 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1372 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1370 for x in c
1373 for x in c
1371 ]
1374 ]
1372
1375
1373
1376
1374 class committablectx(basectx):
1377 class committablectx(basectx):
1375 """A committablectx object provides common functionality for a context that
1378 """A committablectx object provides common functionality for a context that
1376 wants the ability to commit, e.g. workingctx or memctx."""
1379 wants the ability to commit, e.g. workingctx or memctx."""
1377
1380
1378 def __init__(
1381 def __init__(
1379 self,
1382 self,
1380 repo,
1383 repo,
1381 text=b"",
1384 text=b"",
1382 user=None,
1385 user=None,
1383 date=None,
1386 date=None,
1384 extra=None,
1387 extra=None,
1385 changes=None,
1388 changes=None,
1386 branch=None,
1389 branch=None,
1387 ):
1390 ):
1388 super(committablectx, self).__init__(repo)
1391 super(committablectx, self).__init__(repo)
1389 self._rev = None
1392 self._rev = None
1390 self._node = None
1393 self._node = None
1391 self._text = text
1394 self._text = text
1392 if date:
1395 if date:
1393 self._date = dateutil.parsedate(date)
1396 self._date = dateutil.parsedate(date)
1394 if user:
1397 if user:
1395 self._user = user
1398 self._user = user
1396 if changes:
1399 if changes:
1397 self._status = changes
1400 self._status = changes
1398
1401
1399 self._extra = {}
1402 self._extra = {}
1400 if extra:
1403 if extra:
1401 self._extra = extra.copy()
1404 self._extra = extra.copy()
1402 if branch is not None:
1405 if branch is not None:
1403 self._extra[b'branch'] = encoding.fromlocal(branch)
1406 self._extra[b'branch'] = encoding.fromlocal(branch)
1404 if not self._extra.get(b'branch'):
1407 if not self._extra.get(b'branch'):
1405 self._extra[b'branch'] = b'default'
1408 self._extra[b'branch'] = b'default'
1406
1409
1407 def __bytes__(self):
1410 def __bytes__(self):
1408 return bytes(self._parents[0]) + b"+"
1411 return bytes(self._parents[0]) + b"+"
1409
1412
1410 def hex(self):
1413 def hex(self):
1411 self._repo.nodeconstants.wdirhex
1414 self._repo.nodeconstants.wdirhex
1412
1415
1413 __str__ = encoding.strmethod(__bytes__)
1416 __str__ = encoding.strmethod(__bytes__)
1414
1417
1415 def __nonzero__(self):
1418 def __nonzero__(self):
1416 return True
1419 return True
1417
1420
1418 __bool__ = __nonzero__
1421 __bool__ = __nonzero__
1419
1422
1420 @propertycache
1423 @propertycache
1421 def _status(self):
1424 def _status(self):
1422 return self._repo.status()
1425 return self._repo.status()
1423
1426
1424 @propertycache
1427 @propertycache
1425 def _user(self):
1428 def _user(self):
1426 return self._repo.ui.username()
1429 return self._repo.ui.username()
1427
1430
1428 @propertycache
1431 @propertycache
1429 def _date(self):
1432 def _date(self):
1430 ui = self._repo.ui
1433 ui = self._repo.ui
1431 date = ui.configdate(b'devel', b'default-date')
1434 date = ui.configdate(b'devel', b'default-date')
1432 if date is None:
1435 if date is None:
1433 date = dateutil.makedate()
1436 date = dateutil.makedate()
1434 return date
1437 return date
1435
1438
1436 def subrev(self, subpath):
1439 def subrev(self, subpath):
1437 return None
1440 return None
1438
1441
1439 def manifestnode(self):
1442 def manifestnode(self):
1440 return None
1443 return None
1441
1444
1442 def user(self):
1445 def user(self):
1443 return self._user or self._repo.ui.username()
1446 return self._user or self._repo.ui.username()
1444
1447
1445 def date(self):
1448 def date(self):
1446 return self._date
1449 return self._date
1447
1450
1448 def description(self):
1451 def description(self):
1449 return self._text
1452 return self._text
1450
1453
1451 def files(self):
1454 def files(self):
1452 return sorted(
1455 return sorted(
1453 self._status.modified + self._status.added + self._status.removed
1456 self._status.modified + self._status.added + self._status.removed
1454 )
1457 )
1455
1458
1456 def modified(self):
1459 def modified(self):
1457 return self._status.modified
1460 return self._status.modified
1458
1461
1459 def added(self):
1462 def added(self):
1460 return self._status.added
1463 return self._status.added
1461
1464
1462 def removed(self):
1465 def removed(self):
1463 return self._status.removed
1466 return self._status.removed
1464
1467
1465 def deleted(self):
1468 def deleted(self):
1466 return self._status.deleted
1469 return self._status.deleted
1467
1470
1468 filesmodified = modified
1471 filesmodified = modified
1469 filesadded = added
1472 filesadded = added
1470 filesremoved = removed
1473 filesremoved = removed
1471
1474
1472 def branch(self):
1475 def branch(self):
1473 return encoding.tolocal(self._extra[b'branch'])
1476 return encoding.tolocal(self._extra[b'branch'])
1474
1477
1475 def closesbranch(self):
1478 def closesbranch(self):
1476 return b'close' in self._extra
1479 return b'close' in self._extra
1477
1480
1478 def extra(self):
1481 def extra(self):
1479 return self._extra
1482 return self._extra
1480
1483
1481 def isinmemory(self):
1484 def isinmemory(self):
1482 return False
1485 return False
1483
1486
1484 def tags(self):
1487 def tags(self):
1485 return []
1488 return []
1486
1489
1487 def bookmarks(self):
1490 def bookmarks(self):
1488 b = []
1491 b = []
1489 for p in self.parents():
1492 for p in self.parents():
1490 b.extend(p.bookmarks())
1493 b.extend(p.bookmarks())
1491 return b
1494 return b
1492
1495
1493 def phase(self):
1496 def phase(self):
1494 phase = phases.newcommitphase(self._repo.ui)
1497 phase = phases.newcommitphase(self._repo.ui)
1495 for p in self.parents():
1498 for p in self.parents():
1496 phase = max(phase, p.phase())
1499 phase = max(phase, p.phase())
1497 return phase
1500 return phase
1498
1501
1499 def hidden(self):
1502 def hidden(self):
1500 return False
1503 return False
1501
1504
1502 def children(self):
1505 def children(self):
1503 return []
1506 return []
1504
1507
1505 def flags(self, path):
1508 def flags(self, path):
1506 if '_manifest' in self.__dict__:
1509 if '_manifest' in self.__dict__:
1507 try:
1510 try:
1508 return self._manifest.flags(path)
1511 return self._manifest.flags(path)
1509 except KeyError:
1512 except KeyError:
1510 return b''
1513 return b''
1511
1514
1512 try:
1515 try:
1513 return self._flagfunc(path)
1516 return self._flagfunc(path)
1514 except OSError:
1517 except OSError:
1515 return b''
1518 return b''
1516
1519
1517 def ancestor(self, c2):
1520 def ancestor(self, c2):
1518 """return the "best" ancestor context of self and c2"""
1521 """return the "best" ancestor context of self and c2"""
1519 return self._parents[0].ancestor(c2) # punt on two parents for now
1522 return self._parents[0].ancestor(c2) # punt on two parents for now
1520
1523
1521 def ancestors(self):
1524 def ancestors(self):
1522 for p in self._parents:
1525 for p in self._parents:
1523 yield p
1526 yield p
1524 for a in self._repo.changelog.ancestors(
1527 for a in self._repo.changelog.ancestors(
1525 [p.rev() for p in self._parents]
1528 [p.rev() for p in self._parents]
1526 ):
1529 ):
1527 yield self._repo[a]
1530 yield self._repo[a]
1528
1531
1529 def markcommitted(self, node):
1532 def markcommitted(self, node):
1530 """Perform post-commit cleanup necessary after committing this ctx
1533 """Perform post-commit cleanup necessary after committing this ctx
1531
1534
1532 Specifically, this updates backing stores this working context
1535 Specifically, this updates backing stores this working context
1533 wraps to reflect the fact that the changes reflected by this
1536 wraps to reflect the fact that the changes reflected by this
1534 workingctx have been committed. For example, it marks
1537 workingctx have been committed. For example, it marks
1535 modified and added files as normal in the dirstate.
1538 modified and added files as normal in the dirstate.
1536
1539
1537 """
1540 """
1538
1541
1539 def dirty(self, missing=False, merge=True, branch=True):
1542 def dirty(self, missing=False, merge=True, branch=True):
1540 return False
1543 return False
1541
1544
1542
1545
1543 class workingctx(committablectx):
1546 class workingctx(committablectx):
1544 """A workingctx object makes access to data related to
1547 """A workingctx object makes access to data related to
1545 the current working directory convenient.
1548 the current working directory convenient.
1546 date - any valid date string or (unixtime, offset), or None.
1549 date - any valid date string or (unixtime, offset), or None.
1547 user - username string, or None.
1550 user - username string, or None.
1548 extra - a dictionary of extra values, or None.
1551 extra - a dictionary of extra values, or None.
1549 changes - a list of file lists as returned by localrepo.status()
1552 changes - a list of file lists as returned by localrepo.status()
1550 or None to use the repository status.
1553 or None to use the repository status.
1551 """
1554 """
1552
1555
1553 def __init__(
1556 def __init__(
1554 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1557 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1555 ):
1558 ):
1556 branch = None
1559 branch = None
1557 if not extra or b'branch' not in extra:
1560 if not extra or b'branch' not in extra:
1558 try:
1561 try:
1559 branch = repo.dirstate.branch()
1562 branch = repo.dirstate.branch()
1560 except UnicodeDecodeError:
1563 except UnicodeDecodeError:
1561 raise error.Abort(_(b'branch name not in UTF-8!'))
1564 raise error.Abort(_(b'branch name not in UTF-8!'))
1562 super(workingctx, self).__init__(
1565 super(workingctx, self).__init__(
1563 repo, text, user, date, extra, changes, branch=branch
1566 repo, text, user, date, extra, changes, branch=branch
1564 )
1567 )
1565
1568
1566 def __iter__(self):
1569 def __iter__(self):
1567 d = self._repo.dirstate
1570 d = self._repo.dirstate
1568 for f in d:
1571 for f in d:
1569 if d.get_entry(f).tracked:
1572 if d.get_entry(f).tracked:
1570 yield f
1573 yield f
1571
1574
1572 def __contains__(self, key):
1575 def __contains__(self, key):
1573 return self._repo.dirstate.get_entry(key).tracked
1576 return self._repo.dirstate.get_entry(key).tracked
1574
1577
1575 def hex(self):
1578 def hex(self):
1576 return self._repo.nodeconstants.wdirhex
1579 return self._repo.nodeconstants.wdirhex
1577
1580
1578 @propertycache
1581 @propertycache
1579 def _parents(self):
1582 def _parents(self):
1580 p = self._repo.dirstate.parents()
1583 p = self._repo.dirstate.parents()
1581 if p[1] == self._repo.nodeconstants.nullid:
1584 if p[1] == self._repo.nodeconstants.nullid:
1582 p = p[:-1]
1585 p = p[:-1]
1583 # use unfiltered repo to delay/avoid loading obsmarkers
1586 # use unfiltered repo to delay/avoid loading obsmarkers
1584 unfi = self._repo.unfiltered()
1587 unfi = self._repo.unfiltered()
1585 return [
1588 return [
1586 changectx(
1589 changectx(
1587 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1590 self._repo, unfi.changelog.rev(n), n, maybe_filtered=False
1588 )
1591 )
1589 for n in p
1592 for n in p
1590 ]
1593 ]
1591
1594
1592 def setparents(self, p1node, p2node=None):
1595 def setparents(self, p1node, p2node=None):
1593 if p2node is None:
1596 if p2node is None:
1594 p2node = self._repo.nodeconstants.nullid
1597 p2node = self._repo.nodeconstants.nullid
1595 dirstate = self._repo.dirstate
1598 dirstate = self._repo.dirstate
1596 with dirstate.changing_parents(self._repo):
1599 with dirstate.changing_parents(self._repo):
1597 copies = dirstate.setparents(p1node, p2node)
1600 copies = dirstate.setparents(p1node, p2node)
1598 pctx = self._repo[p1node]
1601 pctx = self._repo[p1node]
1599 if copies:
1602 if copies:
1600 # Adjust copy records, the dirstate cannot do it, it
1603 # Adjust copy records, the dirstate cannot do it, it
1601 # requires access to parents manifests. Preserve them
1604 # requires access to parents manifests. Preserve them
1602 # only for entries added to first parent.
1605 # only for entries added to first parent.
1603 for f in copies:
1606 for f in copies:
1604 if f not in pctx and copies[f] in pctx:
1607 if f not in pctx and copies[f] in pctx:
1605 dirstate.copy(copies[f], f)
1608 dirstate.copy(copies[f], f)
1606 if p2node == self._repo.nodeconstants.nullid:
1609 if p2node == self._repo.nodeconstants.nullid:
1607 for f, s in sorted(dirstate.copies().items()):
1610 for f, s in sorted(dirstate.copies().items()):
1608 if f not in pctx and s not in pctx:
1611 if f not in pctx and s not in pctx:
1609 dirstate.copy(None, f)
1612 dirstate.copy(None, f)
1610
1613
1611 def _fileinfo(self, path):
1614 def _fileinfo(self, path):
1612 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1615 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1613 self._manifest
1616 self._manifest
1614 return super(workingctx, self)._fileinfo(path)
1617 return super(workingctx, self)._fileinfo(path)
1615
1618
1616 def _buildflagfunc(self):
1619 def _buildflagfunc(self):
1617 # Create a fallback function for getting file flags when the
1620 # Create a fallback function for getting file flags when the
1618 # filesystem doesn't support them
1621 # filesystem doesn't support them
1619
1622
1620 copiesget = self._repo.dirstate.copies().get
1623 copiesget = self._repo.dirstate.copies().get
1621 parents = self.parents()
1624 parents = self.parents()
1622 if len(parents) < 2:
1625 if len(parents) < 2:
1623 # when we have one parent, it's easy: copy from parent
1626 # when we have one parent, it's easy: copy from parent
1624 man = parents[0].manifest()
1627 man = parents[0].manifest()
1625
1628
1626 def func(f):
1629 def func(f):
1627 f = copiesget(f, f)
1630 f = copiesget(f, f)
1628 return man.flags(f)
1631 return man.flags(f)
1629
1632
1630 else:
1633 else:
1631 # merges are tricky: we try to reconstruct the unstored
1634 # merges are tricky: we try to reconstruct the unstored
1632 # result from the merge (issue1802)
1635 # result from the merge (issue1802)
1633 p1, p2 = parents
1636 p1, p2 = parents
1634 pa = p1.ancestor(p2)
1637 pa = p1.ancestor(p2)
1635 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1638 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1636
1639
1637 def func(f):
1640 def func(f):
1638 f = copiesget(f, f) # may be wrong for merges with copies
1641 f = copiesget(f, f) # may be wrong for merges with copies
1639 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1642 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1640 if fl1 == fl2:
1643 if fl1 == fl2:
1641 return fl1
1644 return fl1
1642 if fl1 == fla:
1645 if fl1 == fla:
1643 return fl2
1646 return fl2
1644 if fl2 == fla:
1647 if fl2 == fla:
1645 return fl1
1648 return fl1
1646 return b'' # punt for conflicts
1649 return b'' # punt for conflicts
1647
1650
1648 return func
1651 return func
1649
1652
1650 @propertycache
1653 @propertycache
1651 def _flagfunc(self):
1654 def _flagfunc(self):
1652 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1655 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1653
1656
1654 def flags(self, path):
1657 def flags(self, path):
1655 try:
1658 try:
1656 return self._flagfunc(path)
1659 return self._flagfunc(path)
1657 except OSError:
1660 except OSError:
1658 return b''
1661 return b''
1659
1662
1660 def filectx(self, path, filelog=None):
1663 def filectx(self, path, filelog=None):
1661 """get a file context from the working directory"""
1664 """get a file context from the working directory"""
1662 return workingfilectx(
1665 return workingfilectx(
1663 self._repo, path, workingctx=self, filelog=filelog
1666 self._repo, path, workingctx=self, filelog=filelog
1664 )
1667 )
1665
1668
1666 def dirty(self, missing=False, merge=True, branch=True):
1669 def dirty(self, missing=False, merge=True, branch=True):
1667 """check whether a working directory is modified"""
1670 """check whether a working directory is modified"""
1668 # check subrepos first
1671 # check subrepos first
1669 for s in sorted(self.substate):
1672 for s in sorted(self.substate):
1670 if self.sub(s).dirty(missing=missing):
1673 if self.sub(s).dirty(missing=missing):
1671 return True
1674 return True
1672 # check current working dir
1675 # check current working dir
1673 return (
1676 return (
1674 (merge and self.p2())
1677 (merge and self.p2())
1675 or (branch and self.branch() != self.p1().branch())
1678 or (branch and self.branch() != self.p1().branch())
1676 or self.modified()
1679 or self.modified()
1677 or self.added()
1680 or self.added()
1678 or self.removed()
1681 or self.removed()
1679 or (missing and self.deleted())
1682 or (missing and self.deleted())
1680 )
1683 )
1681
1684
1682 def add(self, list, prefix=b""):
1685 def add(self, list, prefix=b""):
1683 with self._repo.wlock():
1686 with self._repo.wlock():
1684 ui, ds = self._repo.ui, self._repo.dirstate
1687 ui, ds = self._repo.ui, self._repo.dirstate
1685 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1688 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1686 rejected = []
1689 rejected = []
1687 lstat = self._repo.wvfs.lstat
1690 lstat = self._repo.wvfs.lstat
1688 for f in list:
1691 for f in list:
1689 # ds.pathto() returns an absolute file when this is invoked from
1692 # ds.pathto() returns an absolute file when this is invoked from
1690 # the keyword extension. That gets flagged as non-portable on
1693 # the keyword extension. That gets flagged as non-portable on
1691 # Windows, since it contains the drive letter and colon.
1694 # Windows, since it contains the drive letter and colon.
1692 scmutil.checkportable(ui, os.path.join(prefix, f))
1695 scmutil.checkportable(ui, os.path.join(prefix, f))
1693 try:
1696 try:
1694 st = lstat(f)
1697 st = lstat(f)
1695 except OSError:
1698 except OSError:
1696 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1699 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1697 rejected.append(f)
1700 rejected.append(f)
1698 continue
1701 continue
1699 limit = ui.configbytes(b'ui', b'large-file-limit')
1702 limit = ui.configbytes(b'ui', b'large-file-limit')
1700 if limit != 0 and st.st_size > limit:
1703 if limit != 0 and st.st_size > limit:
1701 ui.warn(
1704 ui.warn(
1702 _(
1705 _(
1703 b"%s: up to %d MB of RAM may be required "
1706 b"%s: up to %d MB of RAM may be required "
1704 b"to manage this file\n"
1707 b"to manage this file\n"
1705 b"(use 'hg revert %s' to cancel the "
1708 b"(use 'hg revert %s' to cancel the "
1706 b"pending addition)\n"
1709 b"pending addition)\n"
1707 )
1710 )
1708 % (f, 3 * st.st_size // 1000000, uipath(f))
1711 % (f, 3 * st.st_size // 1000000, uipath(f))
1709 )
1712 )
1710 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1713 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1711 ui.warn(
1714 ui.warn(
1712 _(
1715 _(
1713 b"%s not added: only files and symlinks "
1716 b"%s not added: only files and symlinks "
1714 b"supported currently\n"
1717 b"supported currently\n"
1715 )
1718 )
1716 % uipath(f)
1719 % uipath(f)
1717 )
1720 )
1718 rejected.append(f)
1721 rejected.append(f)
1719 elif not ds.set_tracked(f):
1722 elif not ds.set_tracked(f):
1720 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1723 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1721 return rejected
1724 return rejected
1722
1725
1723 def forget(self, files, prefix=b""):
1726 def forget(self, files, prefix=b""):
1724 with self._repo.wlock():
1727 with self._repo.wlock():
1725 ds = self._repo.dirstate
1728 ds = self._repo.dirstate
1726 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1729 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1727 rejected = []
1730 rejected = []
1728 for f in files:
1731 for f in files:
1729 if not ds.set_untracked(f):
1732 if not ds.set_untracked(f):
1730 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1733 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1731 rejected.append(f)
1734 rejected.append(f)
1732 return rejected
1735 return rejected
1733
1736
1734 def copy(self, source, dest):
1737 def copy(self, source, dest):
1735 try:
1738 try:
1736 st = self._repo.wvfs.lstat(dest)
1739 st = self._repo.wvfs.lstat(dest)
1737 except FileNotFoundError:
1740 except FileNotFoundError:
1738 self._repo.ui.warn(
1741 self._repo.ui.warn(
1739 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1742 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1740 )
1743 )
1741 return
1744 return
1742 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1745 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1743 self._repo.ui.warn(
1746 self._repo.ui.warn(
1744 _(b"copy failed: %s is not a file or a symbolic link\n")
1747 _(b"copy failed: %s is not a file or a symbolic link\n")
1745 % self._repo.dirstate.pathto(dest)
1748 % self._repo.dirstate.pathto(dest)
1746 )
1749 )
1747 else:
1750 else:
1748 with self._repo.wlock():
1751 with self._repo.wlock():
1749 ds = self._repo.dirstate
1752 ds = self._repo.dirstate
1750 ds.set_tracked(dest)
1753 ds.set_tracked(dest)
1751 ds.copy(source, dest)
1754 ds.copy(source, dest)
1752
1755
1753 def match(
1756 def match(
1754 self,
1757 self,
1755 pats=None,
1758 pats=None,
1756 include=None,
1759 include=None,
1757 exclude=None,
1760 exclude=None,
1758 default=b'glob',
1761 default=b'glob',
1759 listsubrepos=False,
1762 listsubrepos=False,
1760 badfn=None,
1763 badfn=None,
1761 cwd=None,
1764 cwd=None,
1762 ):
1765 ):
1763 r = self._repo
1766 r = self._repo
1764 if not cwd:
1767 if not cwd:
1765 cwd = r.getcwd()
1768 cwd = r.getcwd()
1766
1769
1767 # Only a case insensitive filesystem needs magic to translate user input
1770 # Only a case insensitive filesystem needs magic to translate user input
1768 # to actual case in the filesystem.
1771 # to actual case in the filesystem.
1769 icasefs = not util.fscasesensitive(r.root)
1772 icasefs = not util.fscasesensitive(r.root)
1770 return matchmod.match(
1773 return matchmod.match(
1771 r.root,
1774 r.root,
1772 cwd,
1775 cwd,
1773 pats,
1776 pats,
1774 include,
1777 include,
1775 exclude,
1778 exclude,
1776 default,
1779 default,
1777 auditor=r.auditor,
1780 auditor=r.auditor,
1778 ctx=self,
1781 ctx=self,
1779 listsubrepos=listsubrepos,
1782 listsubrepos=listsubrepos,
1780 badfn=badfn,
1783 badfn=badfn,
1781 icasefs=icasefs,
1784 icasefs=icasefs,
1782 )
1785 )
1783
1786
1784 def _filtersuspectsymlink(self, files):
1787 def _filtersuspectsymlink(self, files):
1785 if not files or self._repo.dirstate._checklink:
1788 if not files or self._repo.dirstate._checklink:
1786 return files
1789 return files
1787
1790
1788 # Symlink placeholders may get non-symlink-like contents
1791 # Symlink placeholders may get non-symlink-like contents
1789 # via user error or dereferencing by NFS or Samba servers,
1792 # via user error or dereferencing by NFS or Samba servers,
1790 # so we filter out any placeholders that don't look like a
1793 # so we filter out any placeholders that don't look like a
1791 # symlink
1794 # symlink
1792 sane = []
1795 sane = []
1793 for f in files:
1796 for f in files:
1794 if self.flags(f) == b'l':
1797 if self.flags(f) == b'l':
1795 d = self[f].data()
1798 d = self[f].data()
1796 if (
1799 if (
1797 d == b''
1800 d == b''
1798 or len(d) >= 1024
1801 or len(d) >= 1024
1799 or b'\n' in d
1802 or b'\n' in d
1800 or stringutil.binary(d)
1803 or stringutil.binary(d)
1801 ):
1804 ):
1802 self._repo.ui.debug(
1805 self._repo.ui.debug(
1803 b'ignoring suspect symlink placeholder "%s"\n' % f
1806 b'ignoring suspect symlink placeholder "%s"\n' % f
1804 )
1807 )
1805 continue
1808 continue
1806 sane.append(f)
1809 sane.append(f)
1807 return sane
1810 return sane
1808
1811
1809 def _checklookup(self, files, mtime_boundary):
1812 def _checklookup(self, files, mtime_boundary):
1810 # check for any possibly clean files
1813 # check for any possibly clean files
1811 if not files:
1814 if not files:
1812 return [], [], [], []
1815 return [], [], [], []
1813
1816
1814 modified = []
1817 modified = []
1815 deleted = []
1818 deleted = []
1816 clean = []
1819 clean = []
1817 fixup = []
1820 fixup = []
1818 pctx = self._parents[0]
1821 pctx = self._parents[0]
1819 # do a full compare of any files that might have changed
1822 # do a full compare of any files that might have changed
1820 for f in sorted(files):
1823 for f in sorted(files):
1821 try:
1824 try:
1822 # This will return True for a file that got replaced by a
1825 # This will return True for a file that got replaced by a
1823 # directory in the interim, but fixing that is pretty hard.
1826 # directory in the interim, but fixing that is pretty hard.
1824 if (
1827 if (
1825 f not in pctx
1828 f not in pctx
1826 or self.flags(f) != pctx.flags(f)
1829 or self.flags(f) != pctx.flags(f)
1827 or pctx[f].cmp(self[f])
1830 or pctx[f].cmp(self[f])
1828 ):
1831 ):
1829 modified.append(f)
1832 modified.append(f)
1830 elif mtime_boundary is None:
1833 elif mtime_boundary is None:
1831 clean.append(f)
1834 clean.append(f)
1832 else:
1835 else:
1833 s = self[f].lstat()
1836 s = self[f].lstat()
1834 mode = s.st_mode
1837 mode = s.st_mode
1835 size = s.st_size
1838 size = s.st_size
1836 file_mtime = timestamp.reliable_mtime_of(s, mtime_boundary)
1839 file_mtime = timestamp.reliable_mtime_of(s, mtime_boundary)
1837 if file_mtime is not None:
1840 if file_mtime is not None:
1838 cache_info = (mode, size, file_mtime)
1841 cache_info = (mode, size, file_mtime)
1839 fixup.append((f, cache_info))
1842 fixup.append((f, cache_info))
1840 else:
1843 else:
1841 clean.append(f)
1844 clean.append(f)
1842 except (IOError, OSError):
1845 except (IOError, OSError):
1843 # A file become inaccessible in between? Mark it as deleted,
1846 # A file become inaccessible in between? Mark it as deleted,
1844 # matching dirstate behavior (issue5584).
1847 # matching dirstate behavior (issue5584).
1845 # The dirstate has more complex behavior around whether a
1848 # The dirstate has more complex behavior around whether a
1846 # missing file matches a directory, etc, but we don't need to
1849 # missing file matches a directory, etc, but we don't need to
1847 # bother with that: if f has made it to this point, we're sure
1850 # bother with that: if f has made it to this point, we're sure
1848 # it's in the dirstate.
1851 # it's in the dirstate.
1849 deleted.append(f)
1852 deleted.append(f)
1850
1853
1851 return modified, deleted, clean, fixup
1854 return modified, deleted, clean, fixup
1852
1855
1853 def _poststatusfixup(self, status, fixup):
1856 def _poststatusfixup(self, status, fixup):
1854 """update dirstate for files that are actually clean"""
1857 """update dirstate for files that are actually clean"""
1855 testing.wait_on_cfg(self._repo.ui, b'status.pre-dirstate-write-file')
1858 testing.wait_on_cfg(self._repo.ui, b'status.pre-dirstate-write-file')
1856 dirstate = self._repo.dirstate
1859 dirstate = self._repo.dirstate
1857 poststatus = self._repo.postdsstatus()
1860 poststatus = self._repo.postdsstatus()
1858 if fixup:
1861 if fixup:
1859 if dirstate.is_changing_parents:
1862 if dirstate.is_changing_parents:
1860 normal = lambda f, pfd: dirstate.update_file(
1863 normal = lambda f, pfd: dirstate.update_file(
1861 f,
1864 f,
1862 p1_tracked=True,
1865 p1_tracked=True,
1863 wc_tracked=True,
1866 wc_tracked=True,
1864 )
1867 )
1865 else:
1868 else:
1866 normal = dirstate.set_clean
1869 normal = dirstate.set_clean
1867 for f, pdf in fixup:
1870 for f, pdf in fixup:
1868 normal(f, pdf)
1871 normal(f, pdf)
1869 if poststatus or self._repo.dirstate._dirty:
1872 if poststatus or self._repo.dirstate._dirty:
1870 try:
1873 try:
1871 # updating the dirstate is optional
1874 # updating the dirstate is optional
1872 # so we don't wait on the lock
1875 # so we don't wait on the lock
1873 # wlock can invalidate the dirstate, so cache normal _after_
1876 # wlock can invalidate the dirstate, so cache normal _after_
1874 # taking the lock
1877 # taking the lock
1875 pre_dirty = dirstate._dirty
1878 pre_dirty = dirstate._dirty
1876 with self._repo.wlock(False):
1879 with self._repo.wlock(False):
1877 assert self._repo.dirstate is dirstate
1880 assert self._repo.dirstate is dirstate
1878 post_dirty = dirstate._dirty
1881 post_dirty = dirstate._dirty
1879 if post_dirty:
1882 if post_dirty:
1880 tr = self._repo.currenttransaction()
1883 tr = self._repo.currenttransaction()
1881 dirstate.write(tr)
1884 dirstate.write(tr)
1882 elif pre_dirty:
1885 elif pre_dirty:
1883 # the wlock grabbing detected that dirtate changes
1886 # the wlock grabbing detected that dirtate changes
1884 # needed to be dropped
1887 # needed to be dropped
1885 m = b'skip updating dirstate: identity mismatch\n'
1888 m = b'skip updating dirstate: identity mismatch\n'
1886 self._repo.ui.debug(m)
1889 self._repo.ui.debug(m)
1887 if poststatus:
1890 if poststatus:
1888 for ps in poststatus:
1891 for ps in poststatus:
1889 ps(self, status)
1892 ps(self, status)
1890 except error.LockError:
1893 except error.LockError:
1891 dirstate.invalidate()
1894 dirstate.invalidate()
1892 finally:
1895 finally:
1893 # Even if the wlock couldn't be grabbed, clear out the list.
1896 # Even if the wlock couldn't be grabbed, clear out the list.
1894 self._repo.clearpostdsstatus()
1897 self._repo.clearpostdsstatus()
1895
1898
1896 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1899 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1897 '''Gets the status from the dirstate -- internal use only.'''
1900 '''Gets the status from the dirstate -- internal use only.'''
1898 subrepos = []
1901 subrepos = []
1899 if b'.hgsub' in self:
1902 if b'.hgsub' in self:
1900 subrepos = sorted(self.substate)
1903 subrepos = sorted(self.substate)
1901 dirstate = self._repo.dirstate
1904 dirstate = self._repo.dirstate
1902 with dirstate.running_status(self._repo):
1905 with dirstate.running_status(self._repo):
1903 cmp, s, mtime_boundary = dirstate.status(
1906 cmp, s, mtime_boundary = dirstate.status(
1904 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1907 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1905 )
1908 )
1906
1909
1907 # check for any possibly clean files
1910 # check for any possibly clean files
1908 fixup = []
1911 fixup = []
1909 if cmp:
1912 if cmp:
1910 modified2, deleted2, clean_set, fixup = self._checklookup(
1913 modified2, deleted2, clean_set, fixup = self._checklookup(
1911 cmp, mtime_boundary
1914 cmp, mtime_boundary
1912 )
1915 )
1913 s.modified.extend(modified2)
1916 s.modified.extend(modified2)
1914 s.deleted.extend(deleted2)
1917 s.deleted.extend(deleted2)
1915
1918
1916 if clean_set and clean:
1919 if clean_set and clean:
1917 s.clean.extend(clean_set)
1920 s.clean.extend(clean_set)
1918 if fixup and clean:
1921 if fixup and clean:
1919 s.clean.extend((f for f, _ in fixup))
1922 s.clean.extend((f for f, _ in fixup))
1920
1923
1921 self._poststatusfixup(s, fixup)
1924 self._poststatusfixup(s, fixup)
1922
1925
1923 if match.always():
1926 if match.always():
1924 # cache for performance
1927 # cache for performance
1925 if s.unknown or s.ignored or s.clean:
1928 if s.unknown or s.ignored or s.clean:
1926 # "_status" is cached with list*=False in the normal route
1929 # "_status" is cached with list*=False in the normal route
1927 self._status = scmutil.status(
1930 self._status = scmutil.status(
1928 s.modified, s.added, s.removed, s.deleted, [], [], []
1931 s.modified, s.added, s.removed, s.deleted, [], [], []
1929 )
1932 )
1930 else:
1933 else:
1931 self._status = s
1934 self._status = s
1932
1935
1933 return s
1936 return s
1934
1937
1935 @propertycache
1938 @propertycache
1936 def _copies(self):
1939 def _copies(self):
1937 p1copies = {}
1940 p1copies = {}
1938 p2copies = {}
1941 p2copies = {}
1939 parents = self._repo.dirstate.parents()
1942 parents = self._repo.dirstate.parents()
1940 p1manifest = self._repo[parents[0]].manifest()
1943 p1manifest = self._repo[parents[0]].manifest()
1941 p2manifest = self._repo[parents[1]].manifest()
1944 p2manifest = self._repo[parents[1]].manifest()
1942 changedset = set(self.added()) | set(self.modified())
1945 changedset = set(self.added()) | set(self.modified())
1943 narrowmatch = self._repo.narrowmatch()
1946 narrowmatch = self._repo.narrowmatch()
1944 for dst, src in self._repo.dirstate.copies().items():
1947 for dst, src in self._repo.dirstate.copies().items():
1945 if dst not in changedset or not narrowmatch(dst):
1948 if dst not in changedset or not narrowmatch(dst):
1946 continue
1949 continue
1947 if src in p1manifest:
1950 if src in p1manifest:
1948 p1copies[dst] = src
1951 p1copies[dst] = src
1949 elif src in p2manifest:
1952 elif src in p2manifest:
1950 p2copies[dst] = src
1953 p2copies[dst] = src
1951 return p1copies, p2copies
1954 return p1copies, p2copies
1952
1955
1953 @propertycache
1956 @propertycache
1954 def _manifest(self):
1957 def _manifest(self):
1955 """generate a manifest corresponding to the values in self._status
1958 """generate a manifest corresponding to the values in self._status
1956
1959
1957 This reuse the file nodeid from parent, but we use special node
1960 This reuse the file nodeid from parent, but we use special node
1958 identifiers for added and modified files. This is used by manifests
1961 identifiers for added and modified files. This is used by manifests
1959 merge to see that files are different and by update logic to avoid
1962 merge to see that files are different and by update logic to avoid
1960 deleting newly added files.
1963 deleting newly added files.
1961 """
1964 """
1962 return self._buildstatusmanifest(self._status)
1965 return self._buildstatusmanifest(self._status)
1963
1966
1964 def _buildstatusmanifest(self, status):
1967 def _buildstatusmanifest(self, status):
1965 """Builds a manifest that includes the given status results."""
1968 """Builds a manifest that includes the given status results."""
1966 parents = self.parents()
1969 parents = self.parents()
1967
1970
1968 man = parents[0].manifest().copy()
1971 man = parents[0].manifest().copy()
1969
1972
1970 ff = self._flagfunc
1973 ff = self._flagfunc
1971 for i, l in (
1974 for i, l in (
1972 (self._repo.nodeconstants.addednodeid, status.added),
1975 (self._repo.nodeconstants.addednodeid, status.added),
1973 (self._repo.nodeconstants.modifiednodeid, status.modified),
1976 (self._repo.nodeconstants.modifiednodeid, status.modified),
1974 ):
1977 ):
1975 for f in l:
1978 for f in l:
1976 man[f] = i
1979 man[f] = i
1977 try:
1980 try:
1978 man.setflag(f, ff(f))
1981 man.setflag(f, ff(f))
1979 except OSError:
1982 except OSError:
1980 pass
1983 pass
1981
1984
1982 for f in status.deleted + status.removed:
1985 for f in status.deleted + status.removed:
1983 if f in man:
1986 if f in man:
1984 del man[f]
1987 del man[f]
1985
1988
1986 return man
1989 return man
1987
1990
1988 def _buildstatus(
1991 def _buildstatus(
1989 self, other, s, match, listignored, listclean, listunknown
1992 self, other, s, match, listignored, listclean, listunknown
1990 ):
1993 ):
1991 """build a status with respect to another context
1994 """build a status with respect to another context
1992
1995
1993 This includes logic for maintaining the fast path of status when
1996 This includes logic for maintaining the fast path of status when
1994 comparing the working directory against its parent, which is to skip
1997 comparing the working directory against its parent, which is to skip
1995 building a new manifest if self (working directory) is not comparing
1998 building a new manifest if self (working directory) is not comparing
1996 against its parent (repo['.']).
1999 against its parent (repo['.']).
1997 """
2000 """
1998 s = self._dirstatestatus(match, listignored, listclean, listunknown)
2001 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1999 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
2002 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
2000 # might have accidentally ended up with the entire contents of the file
2003 # might have accidentally ended up with the entire contents of the file
2001 # they are supposed to be linking to.
2004 # they are supposed to be linking to.
2002 s.modified[:] = self._filtersuspectsymlink(s.modified)
2005 s.modified[:] = self._filtersuspectsymlink(s.modified)
2003 if other != self._repo[b'.']:
2006 if other != self._repo[b'.']:
2004 s = super(workingctx, self)._buildstatus(
2007 s = super(workingctx, self)._buildstatus(
2005 other, s, match, listignored, listclean, listunknown
2008 other, s, match, listignored, listclean, listunknown
2006 )
2009 )
2007 return s
2010 return s
2008
2011
2009 def _matchstatus(self, other, match):
2012 def _matchstatus(self, other, match):
2010 """override the match method with a filter for directory patterns
2013 """override the match method with a filter for directory patterns
2011
2014
2012 We use inheritance to customize the match.bad method only in cases of
2015 We use inheritance to customize the match.bad method only in cases of
2013 workingctx since it belongs only to the working directory when
2016 workingctx since it belongs only to the working directory when
2014 comparing against the parent changeset.
2017 comparing against the parent changeset.
2015
2018
2016 If we aren't comparing against the working directory's parent, then we
2019 If we aren't comparing against the working directory's parent, then we
2017 just use the default match object sent to us.
2020 just use the default match object sent to us.
2018 """
2021 """
2019 if other != self._repo[b'.']:
2022 if other != self._repo[b'.']:
2020
2023
2021 def bad(f, msg):
2024 def bad(f, msg):
2022 # 'f' may be a directory pattern from 'match.files()',
2025 # 'f' may be a directory pattern from 'match.files()',
2023 # so 'f not in ctx1' is not enough
2026 # so 'f not in ctx1' is not enough
2024 if f not in other and not other.hasdir(f):
2027 if f not in other and not other.hasdir(f):
2025 self._repo.ui.warn(
2028 self._repo.ui.warn(
2026 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2029 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
2027 )
2030 )
2028
2031
2029 match.bad = bad
2032 match.bad = bad
2030 return match
2033 return match
2031
2034
2032 def walk(self, match):
2035 def walk(self, match):
2033 '''Generates matching file names.'''
2036 '''Generates matching file names.'''
2034 return sorted(
2037 return sorted(
2035 self._repo.dirstate.walk(
2038 self._repo.dirstate.walk(
2036 self._repo.narrowmatch(match),
2039 self._repo.narrowmatch(match),
2037 subrepos=sorted(self.substate),
2040 subrepos=sorted(self.substate),
2038 unknown=True,
2041 unknown=True,
2039 ignored=False,
2042 ignored=False,
2040 )
2043 )
2041 )
2044 )
2042
2045
2043 def matches(self, match):
2046 def matches(self, match):
2044 match = self._repo.narrowmatch(match)
2047 match = self._repo.narrowmatch(match)
2045 ds = self._repo.dirstate
2048 ds = self._repo.dirstate
2046 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2049 return sorted(f for f in ds.matches(match) if ds.get_entry(f).tracked)
2047
2050
2048 def markcommitted(self, node):
2051 def markcommitted(self, node):
2049 with self._repo.dirstate.changing_parents(self._repo):
2052 with self._repo.dirstate.changing_parents(self._repo):
2050 for f in self.modified() + self.added():
2053 for f in self.modified() + self.added():
2051 self._repo.dirstate.update_file(
2054 self._repo.dirstate.update_file(
2052 f, p1_tracked=True, wc_tracked=True
2055 f, p1_tracked=True, wc_tracked=True
2053 )
2056 )
2054 for f in self.removed():
2057 for f in self.removed():
2055 self._repo.dirstate.update_file(
2058 self._repo.dirstate.update_file(
2056 f, p1_tracked=False, wc_tracked=False
2059 f, p1_tracked=False, wc_tracked=False
2057 )
2060 )
2058 self._repo.dirstate.setparents(node)
2061 self._repo.dirstate.setparents(node)
2059 self._repo._quick_access_changeid_invalidate()
2062 self._repo._quick_access_changeid_invalidate()
2060
2063
2061 sparse.aftercommit(self._repo, node)
2064 sparse.aftercommit(self._repo, node)
2062
2065
2063 # write changes out explicitly, because nesting wlock at
2066 # write changes out explicitly, because nesting wlock at
2064 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2067 # runtime may prevent 'wlock.release()' in 'repo.commit()'
2065 # from immediately doing so for subsequent changing files
2068 # from immediately doing so for subsequent changing files
2066 self._repo.dirstate.write(self._repo.currenttransaction())
2069 self._repo.dirstate.write(self._repo.currenttransaction())
2067
2070
2068 def mergestate(self, clean=False):
2071 def mergestate(self, clean=False):
2069 if clean:
2072 if clean:
2070 return mergestatemod.mergestate.clean(self._repo)
2073 return mergestatemod.mergestate.clean(self._repo)
2071 return mergestatemod.mergestate.read(self._repo)
2074 return mergestatemod.mergestate.read(self._repo)
2072
2075
2073
2076
2074 class committablefilectx(basefilectx):
2077 class committablefilectx(basefilectx):
2075 """A committablefilectx provides common functionality for a file context
2078 """A committablefilectx provides common functionality for a file context
2076 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2079 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
2077
2080
2078 def __init__(self, repo, path, filelog=None, ctx=None):
2081 def __init__(self, repo, path, filelog=None, ctx=None):
2079 self._repo = repo
2082 self._repo = repo
2080 self._path = path
2083 self._path = path
2081 self._changeid = None
2084 self._changeid = None
2082 self._filerev = self._filenode = None
2085 self._filerev = self._filenode = None
2083
2086
2084 if filelog is not None:
2087 if filelog is not None:
2085 self._filelog = filelog
2088 self._filelog = filelog
2086 if ctx:
2089 if ctx:
2087 self._changectx = ctx
2090 self._changectx = ctx
2088
2091
2089 def __nonzero__(self):
2092 def __nonzero__(self):
2090 return True
2093 return True
2091
2094
2092 __bool__ = __nonzero__
2095 __bool__ = __nonzero__
2093
2096
2094 def linkrev(self):
2097 def linkrev(self):
2095 # linked to self._changectx no matter if file is modified or not
2098 # linked to self._changectx no matter if file is modified or not
2096 return self.rev()
2099 return self.rev()
2097
2100
2098 def renamed(self):
2101 def renamed(self):
2099 path = self.copysource()
2102 path = self.copysource()
2100 if not path:
2103 if not path:
2101 return None
2104 return None
2102 return (
2105 return (
2103 path,
2106 path,
2104 self._changectx._parents[0]._manifest.get(
2107 self._changectx._parents[0]._manifest.get(
2105 path, self._repo.nodeconstants.nullid
2108 path, self._repo.nodeconstants.nullid
2106 ),
2109 ),
2107 )
2110 )
2108
2111
2109 def parents(self):
2112 def parents(self):
2110 '''return parent filectxs, following copies if necessary'''
2113 '''return parent filectxs, following copies if necessary'''
2111
2114
2112 def filenode(ctx, path):
2115 def filenode(ctx, path):
2113 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2116 return ctx._manifest.get(path, self._repo.nodeconstants.nullid)
2114
2117
2115 path = self._path
2118 path = self._path
2116 fl = self._filelog
2119 fl = self._filelog
2117 pcl = self._changectx._parents
2120 pcl = self._changectx._parents
2118 renamed = self.renamed()
2121 renamed = self.renamed()
2119
2122
2120 if renamed:
2123 if renamed:
2121 pl = [renamed + (None,)]
2124 pl = [renamed + (None,)]
2122 else:
2125 else:
2123 pl = [(path, filenode(pcl[0], path), fl)]
2126 pl = [(path, filenode(pcl[0], path), fl)]
2124
2127
2125 for pc in pcl[1:]:
2128 for pc in pcl[1:]:
2126 pl.append((path, filenode(pc, path), fl))
2129 pl.append((path, filenode(pc, path), fl))
2127
2130
2128 return [
2131 return [
2129 self._parentfilectx(p, fileid=n, filelog=l)
2132 self._parentfilectx(p, fileid=n, filelog=l)
2130 for p, n, l in pl
2133 for p, n, l in pl
2131 if n != self._repo.nodeconstants.nullid
2134 if n != self._repo.nodeconstants.nullid
2132 ]
2135 ]
2133
2136
2134 def children(self):
2137 def children(self):
2135 return []
2138 return []
2136
2139
2137
2140
2138 class workingfilectx(committablefilectx):
2141 class workingfilectx(committablefilectx):
2139 """A workingfilectx object makes access to data related to a particular
2142 """A workingfilectx object makes access to data related to a particular
2140 file in the working directory convenient."""
2143 file in the working directory convenient."""
2141
2144
2142 def __init__(self, repo, path, filelog=None, workingctx=None):
2145 def __init__(self, repo, path, filelog=None, workingctx=None):
2143 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2146 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2144
2147
2145 @propertycache
2148 @propertycache
2146 def _changectx(self):
2149 def _changectx(self):
2147 return workingctx(self._repo)
2150 return workingctx(self._repo)
2148
2151
2149 def data(self):
2152 def data(self):
2150 return self._repo.wread(self._path)
2153 return self._repo.wread(self._path)
2151
2154
2152 def copysource(self):
2155 def copysource(self):
2153 return self._repo.dirstate.copied(self._path)
2156 return self._repo.dirstate.copied(self._path)
2154
2157
2155 def size(self):
2158 def size(self):
2156 return self._repo.wvfs.lstat(self._path).st_size
2159 return self._repo.wvfs.lstat(self._path).st_size
2157
2160
2158 def lstat(self):
2161 def lstat(self):
2159 return self._repo.wvfs.lstat(self._path)
2162 return self._repo.wvfs.lstat(self._path)
2160
2163
2161 def date(self):
2164 def date(self):
2162 t, tz = self._changectx.date()
2165 t, tz = self._changectx.date()
2163 try:
2166 try:
2164 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2167 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2165 except FileNotFoundError:
2168 except FileNotFoundError:
2166 return (t, tz)
2169 return (t, tz)
2167
2170
2168 def exists(self):
2171 def exists(self):
2169 return self._repo.wvfs.exists(self._path)
2172 return self._repo.wvfs.exists(self._path)
2170
2173
2171 def lexists(self):
2174 def lexists(self):
2172 return self._repo.wvfs.lexists(self._path)
2175 return self._repo.wvfs.lexists(self._path)
2173
2176
2174 def audit(self):
2177 def audit(self):
2175 return self._repo.wvfs.audit(self._path)
2178 return self._repo.wvfs.audit(self._path)
2176
2179
2177 def cmp(self, fctx):
2180 def cmp(self, fctx):
2178 """compare with other file context
2181 """compare with other file context
2179
2182
2180 returns True if different than fctx.
2183 returns True if different than fctx.
2181 """
2184 """
2182 # fctx should be a filectx (not a workingfilectx)
2185 # fctx should be a filectx (not a workingfilectx)
2183 # invert comparison to reuse the same code path
2186 # invert comparison to reuse the same code path
2184 return fctx.cmp(self)
2187 return fctx.cmp(self)
2185
2188
2186 def remove(self, ignoremissing=False):
2189 def remove(self, ignoremissing=False):
2187 """wraps unlink for a repo's working directory"""
2190 """wraps unlink for a repo's working directory"""
2188 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2191 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2189 self._repo.wvfs.unlinkpath(
2192 self._repo.wvfs.unlinkpath(
2190 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2193 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2191 )
2194 )
2192
2195
2193 def write(self, data, flags, backgroundclose=False, **kwargs):
2196 def write(self, data, flags, backgroundclose=False, **kwargs):
2194 """wraps repo.wwrite"""
2197 """wraps repo.wwrite"""
2195 return self._repo.wwrite(
2198 return self._repo.wwrite(
2196 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2199 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2197 )
2200 )
2198
2201
2199 def markcopied(self, src):
2202 def markcopied(self, src):
2200 """marks this file a copy of `src`"""
2203 """marks this file a copy of `src`"""
2201 self._repo.dirstate.copy(src, self._path)
2204 self._repo.dirstate.copy(src, self._path)
2202
2205
2203 def clearunknown(self):
2206 def clearunknown(self):
2204 """Removes conflicting items in the working directory so that
2207 """Removes conflicting items in the working directory so that
2205 ``write()`` can be called successfully.
2208 ``write()`` can be called successfully.
2206 """
2209 """
2207 wvfs = self._repo.wvfs
2210 wvfs = self._repo.wvfs
2208 f = self._path
2211 f = self._path
2209 wvfs.audit(f)
2212 wvfs.audit(f)
2210 if self._repo.ui.configbool(
2213 if self._repo.ui.configbool(
2211 b'experimental', b'merge.checkpathconflicts'
2214 b'experimental', b'merge.checkpathconflicts'
2212 ):
2215 ):
2213 # remove files under the directory as they should already be
2216 # remove files under the directory as they should already be
2214 # warned and backed up
2217 # warned and backed up
2215 if wvfs.isdir(f) and not wvfs.islink(f):
2218 if wvfs.isdir(f) and not wvfs.islink(f):
2216 wvfs.rmtree(f, forcibly=True)
2219 wvfs.rmtree(f, forcibly=True)
2217 for p in reversed(list(pathutil.finddirs(f))):
2220 for p in reversed(list(pathutil.finddirs(f))):
2218 if wvfs.isfileorlink(p):
2221 if wvfs.isfileorlink(p):
2219 wvfs.unlink(p)
2222 wvfs.unlink(p)
2220 break
2223 break
2221 else:
2224 else:
2222 # don't remove files if path conflicts are not processed
2225 # don't remove files if path conflicts are not processed
2223 if wvfs.isdir(f) and not wvfs.islink(f):
2226 if wvfs.isdir(f) and not wvfs.islink(f):
2224 wvfs.removedirs(f)
2227 wvfs.removedirs(f)
2225
2228
2226 def setflags(self, l, x):
2229 def setflags(self, l, x):
2227 self._repo.wvfs.setflags(self._path, l, x)
2230 self._repo.wvfs.setflags(self._path, l, x)
2228
2231
2229
2232
2230 class overlayworkingctx(committablectx):
2233 class overlayworkingctx(committablectx):
2231 """Wraps another mutable context with a write-back cache that can be
2234 """Wraps another mutable context with a write-back cache that can be
2232 converted into a commit context.
2235 converted into a commit context.
2233
2236
2234 self._cache[path] maps to a dict with keys: {
2237 self._cache[path] maps to a dict with keys: {
2235 'exists': bool?
2238 'exists': bool?
2236 'date': date?
2239 'date': date?
2237 'data': str?
2240 'data': str?
2238 'flags': str?
2241 'flags': str?
2239 'copied': str? (path or None)
2242 'copied': str? (path or None)
2240 }
2243 }
2241 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2244 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2242 is `False`, the file was deleted.
2245 is `False`, the file was deleted.
2243 """
2246 """
2244
2247
2245 def __init__(self, repo):
2248 def __init__(self, repo):
2246 super(overlayworkingctx, self).__init__(repo)
2249 super(overlayworkingctx, self).__init__(repo)
2247 self.clean()
2250 self.clean()
2248
2251
2249 def setbase(self, wrappedctx):
2252 def setbase(self, wrappedctx):
2250 self._wrappedctx = wrappedctx
2253 self._wrappedctx = wrappedctx
2251 self._parents = [wrappedctx]
2254 self._parents = [wrappedctx]
2252 # Drop old manifest cache as it is now out of date.
2255 # Drop old manifest cache as it is now out of date.
2253 # This is necessary when, e.g., rebasing several nodes with one
2256 # This is necessary when, e.g., rebasing several nodes with one
2254 # ``overlayworkingctx`` (e.g. with --collapse).
2257 # ``overlayworkingctx`` (e.g. with --collapse).
2255 util.clearcachedproperty(self, b'_manifest')
2258 util.clearcachedproperty(self, b'_manifest')
2256
2259
2257 def setparents(self, p1node, p2node=None):
2260 def setparents(self, p1node, p2node=None):
2258 if p2node is None:
2261 if p2node is None:
2259 p2node = self._repo.nodeconstants.nullid
2262 p2node = self._repo.nodeconstants.nullid
2260 assert p1node == self._wrappedctx.node()
2263 assert p1node == self._wrappedctx.node()
2261 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2264 self._parents = [self._wrappedctx, self._repo.unfiltered()[p2node]]
2262
2265
2263 def data(self, path):
2266 def data(self, path):
2264 if self.isdirty(path):
2267 if self.isdirty(path):
2265 if self._cache[path][b'exists']:
2268 if self._cache[path][b'exists']:
2266 if self._cache[path][b'data'] is not None:
2269 if self._cache[path][b'data'] is not None:
2267 return self._cache[path][b'data']
2270 return self._cache[path][b'data']
2268 else:
2271 else:
2269 # Must fallback here, too, because we only set flags.
2272 # Must fallback here, too, because we only set flags.
2270 return self._wrappedctx[path].data()
2273 return self._wrappedctx[path].data()
2271 else:
2274 else:
2272 raise error.ProgrammingError(
2275 raise error.ProgrammingError(
2273 b"No such file or directory: %s" % path
2276 b"No such file or directory: %s" % path
2274 )
2277 )
2275 else:
2278 else:
2276 return self._wrappedctx[path].data()
2279 return self._wrappedctx[path].data()
2277
2280
2278 @propertycache
2281 @propertycache
2279 def _manifest(self):
2282 def _manifest(self):
2280 parents = self.parents()
2283 parents = self.parents()
2281 man = parents[0].manifest().copy()
2284 man = parents[0].manifest().copy()
2282
2285
2283 flag = self._flagfunc
2286 flag = self._flagfunc
2284 for path in self.added():
2287 for path in self.added():
2285 man[path] = self._repo.nodeconstants.addednodeid
2288 man[path] = self._repo.nodeconstants.addednodeid
2286 man.setflag(path, flag(path))
2289 man.setflag(path, flag(path))
2287 for path in self.modified():
2290 for path in self.modified():
2288 man[path] = self._repo.nodeconstants.modifiednodeid
2291 man[path] = self._repo.nodeconstants.modifiednodeid
2289 man.setflag(path, flag(path))
2292 man.setflag(path, flag(path))
2290 for path in self.removed():
2293 for path in self.removed():
2291 del man[path]
2294 del man[path]
2292 return man
2295 return man
2293
2296
2294 @propertycache
2297 @propertycache
2295 def _flagfunc(self):
2298 def _flagfunc(self):
2296 def f(path):
2299 def f(path):
2297 return self._cache[path][b'flags']
2300 return self._cache[path][b'flags']
2298
2301
2299 return f
2302 return f
2300
2303
2301 def files(self):
2304 def files(self):
2302 return sorted(self.added() + self.modified() + self.removed())
2305 return sorted(self.added() + self.modified() + self.removed())
2303
2306
2304 def modified(self):
2307 def modified(self):
2305 return [
2308 return [
2306 f
2309 f
2307 for f in self._cache.keys()
2310 for f in self._cache.keys()
2308 if self._cache[f][b'exists'] and self._existsinparent(f)
2311 if self._cache[f][b'exists'] and self._existsinparent(f)
2309 ]
2312 ]
2310
2313
2311 def added(self):
2314 def added(self):
2312 return [
2315 return [
2313 f
2316 f
2314 for f in self._cache.keys()
2317 for f in self._cache.keys()
2315 if self._cache[f][b'exists'] and not self._existsinparent(f)
2318 if self._cache[f][b'exists'] and not self._existsinparent(f)
2316 ]
2319 ]
2317
2320
2318 def removed(self):
2321 def removed(self):
2319 return [
2322 return [
2320 f
2323 f
2321 for f in self._cache.keys()
2324 for f in self._cache.keys()
2322 if not self._cache[f][b'exists'] and self._existsinparent(f)
2325 if not self._cache[f][b'exists'] and self._existsinparent(f)
2323 ]
2326 ]
2324
2327
2325 def p1copies(self):
2328 def p1copies(self):
2326 copies = {}
2329 copies = {}
2327 narrowmatch = self._repo.narrowmatch()
2330 narrowmatch = self._repo.narrowmatch()
2328 for f in self._cache.keys():
2331 for f in self._cache.keys():
2329 if not narrowmatch(f):
2332 if not narrowmatch(f):
2330 continue
2333 continue
2331 copies.pop(f, None) # delete if it exists
2334 copies.pop(f, None) # delete if it exists
2332 source = self._cache[f][b'copied']
2335 source = self._cache[f][b'copied']
2333 if source:
2336 if source:
2334 copies[f] = source
2337 copies[f] = source
2335 return copies
2338 return copies
2336
2339
2337 def p2copies(self):
2340 def p2copies(self):
2338 copies = {}
2341 copies = {}
2339 narrowmatch = self._repo.narrowmatch()
2342 narrowmatch = self._repo.narrowmatch()
2340 for f in self._cache.keys():
2343 for f in self._cache.keys():
2341 if not narrowmatch(f):
2344 if not narrowmatch(f):
2342 continue
2345 continue
2343 copies.pop(f, None) # delete if it exists
2346 copies.pop(f, None) # delete if it exists
2344 source = self._cache[f][b'copied']
2347 source = self._cache[f][b'copied']
2345 if source:
2348 if source:
2346 copies[f] = source
2349 copies[f] = source
2347 return copies
2350 return copies
2348
2351
2349 def isinmemory(self):
2352 def isinmemory(self):
2350 return True
2353 return True
2351
2354
2352 def filedate(self, path):
2355 def filedate(self, path):
2353 if self.isdirty(path):
2356 if self.isdirty(path):
2354 return self._cache[path][b'date']
2357 return self._cache[path][b'date']
2355 else:
2358 else:
2356 return self._wrappedctx[path].date()
2359 return self._wrappedctx[path].date()
2357
2360
2358 def markcopied(self, path, origin):
2361 def markcopied(self, path, origin):
2359 self._markdirty(
2362 self._markdirty(
2360 path,
2363 path,
2361 exists=True,
2364 exists=True,
2362 date=self.filedate(path),
2365 date=self.filedate(path),
2363 flags=self.flags(path),
2366 flags=self.flags(path),
2364 copied=origin,
2367 copied=origin,
2365 )
2368 )
2366
2369
2367 def copydata(self, path):
2370 def copydata(self, path):
2368 if self.isdirty(path):
2371 if self.isdirty(path):
2369 return self._cache[path][b'copied']
2372 return self._cache[path][b'copied']
2370 else:
2373 else:
2371 return None
2374 return None
2372
2375
2373 def flags(self, path):
2376 def flags(self, path):
2374 if self.isdirty(path):
2377 if self.isdirty(path):
2375 if self._cache[path][b'exists']:
2378 if self._cache[path][b'exists']:
2376 return self._cache[path][b'flags']
2379 return self._cache[path][b'flags']
2377 else:
2380 else:
2378 raise error.ProgrammingError(
2381 raise error.ProgrammingError(
2379 b"No such file or directory: %s" % path
2382 b"No such file or directory: %s" % path
2380 )
2383 )
2381 else:
2384 else:
2382 return self._wrappedctx[path].flags()
2385 return self._wrappedctx[path].flags()
2383
2386
2384 def __contains__(self, key):
2387 def __contains__(self, key):
2385 if key in self._cache:
2388 if key in self._cache:
2386 return self._cache[key][b'exists']
2389 return self._cache[key][b'exists']
2387 return key in self.p1()
2390 return key in self.p1()
2388
2391
2389 def _existsinparent(self, path):
2392 def _existsinparent(self, path):
2390 try:
2393 try:
2391 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2394 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2392 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2395 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2393 # with an ``exists()`` function.
2396 # with an ``exists()`` function.
2394 self._wrappedctx[path]
2397 self._wrappedctx[path]
2395 return True
2398 return True
2396 except error.ManifestLookupError:
2399 except error.ManifestLookupError:
2397 return False
2400 return False
2398
2401
2399 def _auditconflicts(self, path):
2402 def _auditconflicts(self, path):
2400 """Replicates conflict checks done by wvfs.write().
2403 """Replicates conflict checks done by wvfs.write().
2401
2404
2402 Since we never write to the filesystem and never call `applyupdates` in
2405 Since we never write to the filesystem and never call `applyupdates` in
2403 IMM, we'll never check that a path is actually writable -- e.g., because
2406 IMM, we'll never check that a path is actually writable -- e.g., because
2404 it adds `a/foo`, but `a` is actually a file in the other commit.
2407 it adds `a/foo`, but `a` is actually a file in the other commit.
2405 """
2408 """
2406
2409
2407 def fail(path, component):
2410 def fail(path, component):
2408 # p1() is the base and we're receiving "writes" for p2()'s
2411 # p1() is the base and we're receiving "writes" for p2()'s
2409 # files.
2412 # files.
2410 if b'l' in self.p1()[component].flags():
2413 if b'l' in self.p1()[component].flags():
2411 raise error.Abort(
2414 raise error.Abort(
2412 b"error: %s conflicts with symlink %s "
2415 b"error: %s conflicts with symlink %s "
2413 b"in %d." % (path, component, self.p1().rev())
2416 b"in %d." % (path, component, self.p1().rev())
2414 )
2417 )
2415 else:
2418 else:
2416 raise error.Abort(
2419 raise error.Abort(
2417 b"error: '%s' conflicts with file '%s' in "
2420 b"error: '%s' conflicts with file '%s' in "
2418 b"%d." % (path, component, self.p1().rev())
2421 b"%d." % (path, component, self.p1().rev())
2419 )
2422 )
2420
2423
2421 # Test that each new directory to be created to write this path from p2
2424 # Test that each new directory to be created to write this path from p2
2422 # is not a file in p1.
2425 # is not a file in p1.
2423 components = path.split(b'/')
2426 components = path.split(b'/')
2424 for i in range(len(components)):
2427 for i in range(len(components)):
2425 component = b"/".join(components[0:i])
2428 component = b"/".join(components[0:i])
2426 if component in self:
2429 if component in self:
2427 fail(path, component)
2430 fail(path, component)
2428
2431
2429 # Test the other direction -- that this path from p2 isn't a directory
2432 # Test the other direction -- that this path from p2 isn't a directory
2430 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2433 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2431 match = self.match([path], default=b'path')
2434 match = self.match([path], default=b'path')
2432 mfiles = list(self.p1().manifest().walk(match))
2435 mfiles = list(self.p1().manifest().walk(match))
2433 if len(mfiles) > 0:
2436 if len(mfiles) > 0:
2434 if len(mfiles) == 1 and mfiles[0] == path:
2437 if len(mfiles) == 1 and mfiles[0] == path:
2435 return
2438 return
2436 # omit the files which are deleted in current IMM wctx
2439 # omit the files which are deleted in current IMM wctx
2437 mfiles = [m for m in mfiles if m in self]
2440 mfiles = [m for m in mfiles if m in self]
2438 if not mfiles:
2441 if not mfiles:
2439 return
2442 return
2440 raise error.Abort(
2443 raise error.Abort(
2441 b"error: file '%s' cannot be written because "
2444 b"error: file '%s' cannot be written because "
2442 b" '%s/' is a directory in %s (containing %d "
2445 b" '%s/' is a directory in %s (containing %d "
2443 b"entries: %s)"
2446 b"entries: %s)"
2444 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2447 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2445 )
2448 )
2446
2449
2447 def write(self, path, data, flags=b'', **kwargs):
2450 def write(self, path, data, flags=b'', **kwargs):
2448 if data is None:
2451 if data is None:
2449 raise error.ProgrammingError(b"data must be non-None")
2452 raise error.ProgrammingError(b"data must be non-None")
2450 self._auditconflicts(path)
2453 self._auditconflicts(path)
2451 self._markdirty(
2454 self._markdirty(
2452 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2455 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2453 )
2456 )
2454
2457
2455 def setflags(self, path, l, x):
2458 def setflags(self, path, l, x):
2456 flag = b''
2459 flag = b''
2457 if l:
2460 if l:
2458 flag = b'l'
2461 flag = b'l'
2459 elif x:
2462 elif x:
2460 flag = b'x'
2463 flag = b'x'
2461 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2464 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2462
2465
2463 def remove(self, path):
2466 def remove(self, path):
2464 self._markdirty(path, exists=False)
2467 self._markdirty(path, exists=False)
2465
2468
2466 def exists(self, path):
2469 def exists(self, path):
2467 """exists behaves like `lexists`, but needs to follow symlinks and
2470 """exists behaves like `lexists`, but needs to follow symlinks and
2468 return False if they are broken.
2471 return False if they are broken.
2469 """
2472 """
2470 if self.isdirty(path):
2473 if self.isdirty(path):
2471 # If this path exists and is a symlink, "follow" it by calling
2474 # If this path exists and is a symlink, "follow" it by calling
2472 # exists on the destination path.
2475 # exists on the destination path.
2473 if (
2476 if (
2474 self._cache[path][b'exists']
2477 self._cache[path][b'exists']
2475 and b'l' in self._cache[path][b'flags']
2478 and b'l' in self._cache[path][b'flags']
2476 ):
2479 ):
2477 return self.exists(self._cache[path][b'data'].strip())
2480 return self.exists(self._cache[path][b'data'].strip())
2478 else:
2481 else:
2479 return self._cache[path][b'exists']
2482 return self._cache[path][b'exists']
2480
2483
2481 return self._existsinparent(path)
2484 return self._existsinparent(path)
2482
2485
2483 def lexists(self, path):
2486 def lexists(self, path):
2484 """lexists returns True if the path exists"""
2487 """lexists returns True if the path exists"""
2485 if self.isdirty(path):
2488 if self.isdirty(path):
2486 return self._cache[path][b'exists']
2489 return self._cache[path][b'exists']
2487
2490
2488 return self._existsinparent(path)
2491 return self._existsinparent(path)
2489
2492
2490 def size(self, path):
2493 def size(self, path):
2491 if self.isdirty(path):
2494 if self.isdirty(path):
2492 if self._cache[path][b'exists']:
2495 if self._cache[path][b'exists']:
2493 return len(self._cache[path][b'data'])
2496 return len(self._cache[path][b'data'])
2494 else:
2497 else:
2495 raise error.ProgrammingError(
2498 raise error.ProgrammingError(
2496 b"No such file or directory: %s" % path
2499 b"No such file or directory: %s" % path
2497 )
2500 )
2498 return self._wrappedctx[path].size()
2501 return self._wrappedctx[path].size()
2499
2502
2500 def tomemctx(
2503 def tomemctx(
2501 self,
2504 self,
2502 text,
2505 text,
2503 branch=None,
2506 branch=None,
2504 extra=None,
2507 extra=None,
2505 date=None,
2508 date=None,
2506 parents=None,
2509 parents=None,
2507 user=None,
2510 user=None,
2508 editor=None,
2511 editor=None,
2509 ):
2512 ):
2510 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2513 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2511 committed.
2514 committed.
2512
2515
2513 ``text`` is the commit message.
2516 ``text`` is the commit message.
2514 ``parents`` (optional) are rev numbers.
2517 ``parents`` (optional) are rev numbers.
2515 """
2518 """
2516 # Default parents to the wrapped context if not passed.
2519 # Default parents to the wrapped context if not passed.
2517 if parents is None:
2520 if parents is None:
2518 parents = self.parents()
2521 parents = self.parents()
2519 if len(parents) == 1:
2522 if len(parents) == 1:
2520 parents = (parents[0], None)
2523 parents = (parents[0], None)
2521
2524
2522 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2525 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2523 if parents[1] is None:
2526 if parents[1] is None:
2524 parents = (self._repo[parents[0]], None)
2527 parents = (self._repo[parents[0]], None)
2525 else:
2528 else:
2526 parents = (self._repo[parents[0]], self._repo[parents[1]])
2529 parents = (self._repo[parents[0]], self._repo[parents[1]])
2527
2530
2528 files = self.files()
2531 files = self.files()
2529
2532
2530 def getfile(repo, memctx, path):
2533 def getfile(repo, memctx, path):
2531 if self._cache[path][b'exists']:
2534 if self._cache[path][b'exists']:
2532 return memfilectx(
2535 return memfilectx(
2533 repo,
2536 repo,
2534 memctx,
2537 memctx,
2535 path,
2538 path,
2536 self._cache[path][b'data'],
2539 self._cache[path][b'data'],
2537 b'l' in self._cache[path][b'flags'],
2540 b'l' in self._cache[path][b'flags'],
2538 b'x' in self._cache[path][b'flags'],
2541 b'x' in self._cache[path][b'flags'],
2539 self._cache[path][b'copied'],
2542 self._cache[path][b'copied'],
2540 )
2543 )
2541 else:
2544 else:
2542 # Returning None, but including the path in `files`, is
2545 # Returning None, but including the path in `files`, is
2543 # necessary for memctx to register a deletion.
2546 # necessary for memctx to register a deletion.
2544 return None
2547 return None
2545
2548
2546 if branch is None:
2549 if branch is None:
2547 branch = self._wrappedctx.branch()
2550 branch = self._wrappedctx.branch()
2548
2551
2549 return memctx(
2552 return memctx(
2550 self._repo,
2553 self._repo,
2551 parents,
2554 parents,
2552 text,
2555 text,
2553 files,
2556 files,
2554 getfile,
2557 getfile,
2555 date=date,
2558 date=date,
2556 extra=extra,
2559 extra=extra,
2557 user=user,
2560 user=user,
2558 branch=branch,
2561 branch=branch,
2559 editor=editor,
2562 editor=editor,
2560 )
2563 )
2561
2564
2562 def tomemctx_for_amend(self, precursor):
2565 def tomemctx_for_amend(self, precursor):
2563 extra = precursor.extra().copy()
2566 extra = precursor.extra().copy()
2564 extra[b'amend_source'] = precursor.hex()
2567 extra[b'amend_source'] = precursor.hex()
2565 return self.tomemctx(
2568 return self.tomemctx(
2566 text=precursor.description(),
2569 text=precursor.description(),
2567 branch=precursor.branch(),
2570 branch=precursor.branch(),
2568 extra=extra,
2571 extra=extra,
2569 date=precursor.date(),
2572 date=precursor.date(),
2570 user=precursor.user(),
2573 user=precursor.user(),
2571 )
2574 )
2572
2575
2573 def isdirty(self, path):
2576 def isdirty(self, path):
2574 return path in self._cache
2577 return path in self._cache
2575
2578
2576 def clean(self):
2579 def clean(self):
2577 self._mergestate = None
2580 self._mergestate = None
2578 self._cache = {}
2581 self._cache = {}
2579
2582
2580 def _compact(self):
2583 def _compact(self):
2581 """Removes keys from the cache that are actually clean, by comparing
2584 """Removes keys from the cache that are actually clean, by comparing
2582 them with the underlying context.
2585 them with the underlying context.
2583
2586
2584 This can occur during the merge process, e.g. by passing --tool :local
2587 This can occur during the merge process, e.g. by passing --tool :local
2585 to resolve a conflict.
2588 to resolve a conflict.
2586 """
2589 """
2587 keys = []
2590 keys = []
2588 # This won't be perfect, but can help performance significantly when
2591 # This won't be perfect, but can help performance significantly when
2589 # using things like remotefilelog.
2592 # using things like remotefilelog.
2590 scmutil.prefetchfiles(
2593 scmutil.prefetchfiles(
2591 self.repo(),
2594 self.repo(),
2592 [
2595 [
2593 (
2596 (
2594 self.p1().rev(),
2597 self.p1().rev(),
2595 scmutil.matchfiles(self.repo(), self._cache.keys()),
2598 scmutil.matchfiles(self.repo(), self._cache.keys()),
2596 )
2599 )
2597 ],
2600 ],
2598 )
2601 )
2599
2602
2600 for path in self._cache.keys():
2603 for path in self._cache.keys():
2601 cache = self._cache[path]
2604 cache = self._cache[path]
2602 try:
2605 try:
2603 underlying = self._wrappedctx[path]
2606 underlying = self._wrappedctx[path]
2604 if (
2607 if (
2605 underlying.data() == cache[b'data']
2608 underlying.data() == cache[b'data']
2606 and underlying.flags() == cache[b'flags']
2609 and underlying.flags() == cache[b'flags']
2607 ):
2610 ):
2608 keys.append(path)
2611 keys.append(path)
2609 except error.ManifestLookupError:
2612 except error.ManifestLookupError:
2610 # Path not in the underlying manifest (created).
2613 # Path not in the underlying manifest (created).
2611 continue
2614 continue
2612
2615
2613 for path in keys:
2616 for path in keys:
2614 del self._cache[path]
2617 del self._cache[path]
2615 return keys
2618 return keys
2616
2619
2617 def _markdirty(
2620 def _markdirty(
2618 self, path, exists, data=None, date=None, flags=b'', copied=None
2621 self, path, exists, data=None, date=None, flags=b'', copied=None
2619 ):
2622 ):
2620 # data not provided, let's see if we already have some; if not, let's
2623 # data not provided, let's see if we already have some; if not, let's
2621 # grab it from our underlying context, so that we always have data if
2624 # grab it from our underlying context, so that we always have data if
2622 # the file is marked as existing.
2625 # the file is marked as existing.
2623 if exists and data is None:
2626 if exists and data is None:
2624 oldentry = self._cache.get(path) or {}
2627 oldentry = self._cache.get(path) or {}
2625 data = oldentry.get(b'data')
2628 data = oldentry.get(b'data')
2626 if data is None:
2629 if data is None:
2627 data = self._wrappedctx[path].data()
2630 data = self._wrappedctx[path].data()
2628
2631
2629 self._cache[path] = {
2632 self._cache[path] = {
2630 b'exists': exists,
2633 b'exists': exists,
2631 b'data': data,
2634 b'data': data,
2632 b'date': date,
2635 b'date': date,
2633 b'flags': flags,
2636 b'flags': flags,
2634 b'copied': copied,
2637 b'copied': copied,
2635 }
2638 }
2636 util.clearcachedproperty(self, b'_manifest')
2639 util.clearcachedproperty(self, b'_manifest')
2637
2640
2638 def filectx(self, path, filelog=None):
2641 def filectx(self, path, filelog=None):
2639 return overlayworkingfilectx(
2642 return overlayworkingfilectx(
2640 self._repo, path, parent=self, filelog=filelog
2643 self._repo, path, parent=self, filelog=filelog
2641 )
2644 )
2642
2645
2643 def mergestate(self, clean=False):
2646 def mergestate(self, clean=False):
2644 if clean or self._mergestate is None:
2647 if clean or self._mergestate is None:
2645 self._mergestate = mergestatemod.memmergestate(self._repo)
2648 self._mergestate = mergestatemod.memmergestate(self._repo)
2646 return self._mergestate
2649 return self._mergestate
2647
2650
2648
2651
2649 class overlayworkingfilectx(committablefilectx):
2652 class overlayworkingfilectx(committablefilectx):
2650 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2653 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2651 cache, which can be flushed through later by calling ``flush()``."""
2654 cache, which can be flushed through later by calling ``flush()``."""
2652
2655
2653 def __init__(self, repo, path, filelog=None, parent=None):
2656 def __init__(self, repo, path, filelog=None, parent=None):
2654 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2657 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2655 self._repo = repo
2658 self._repo = repo
2656 self._parent = parent
2659 self._parent = parent
2657 self._path = path
2660 self._path = path
2658
2661
2659 def cmp(self, fctx):
2662 def cmp(self, fctx):
2660 return self.data() != fctx.data()
2663 return self.data() != fctx.data()
2661
2664
2662 def changectx(self):
2665 def changectx(self):
2663 return self._parent
2666 return self._parent
2664
2667
2665 def data(self):
2668 def data(self):
2666 return self._parent.data(self._path)
2669 return self._parent.data(self._path)
2667
2670
2668 def date(self):
2671 def date(self):
2669 return self._parent.filedate(self._path)
2672 return self._parent.filedate(self._path)
2670
2673
2671 def exists(self):
2674 def exists(self):
2672 return self.lexists()
2675 return self.lexists()
2673
2676
2674 def lexists(self):
2677 def lexists(self):
2675 return self._parent.exists(self._path)
2678 return self._parent.exists(self._path)
2676
2679
2677 def copysource(self):
2680 def copysource(self):
2678 return self._parent.copydata(self._path)
2681 return self._parent.copydata(self._path)
2679
2682
2680 def size(self):
2683 def size(self):
2681 return self._parent.size(self._path)
2684 return self._parent.size(self._path)
2682
2685
2683 def markcopied(self, origin):
2686 def markcopied(self, origin):
2684 self._parent.markcopied(self._path, origin)
2687 self._parent.markcopied(self._path, origin)
2685
2688
2686 def audit(self):
2689 def audit(self):
2687 pass
2690 pass
2688
2691
2689 def flags(self):
2692 def flags(self):
2690 return self._parent.flags(self._path)
2693 return self._parent.flags(self._path)
2691
2694
2692 def setflags(self, islink, isexec):
2695 def setflags(self, islink, isexec):
2693 return self._parent.setflags(self._path, islink, isexec)
2696 return self._parent.setflags(self._path, islink, isexec)
2694
2697
2695 def write(self, data, flags, backgroundclose=False, **kwargs):
2698 def write(self, data, flags, backgroundclose=False, **kwargs):
2696 return self._parent.write(self._path, data, flags, **kwargs)
2699 return self._parent.write(self._path, data, flags, **kwargs)
2697
2700
2698 def remove(self, ignoremissing=False):
2701 def remove(self, ignoremissing=False):
2699 return self._parent.remove(self._path)
2702 return self._parent.remove(self._path)
2700
2703
2701 def clearunknown(self):
2704 def clearunknown(self):
2702 pass
2705 pass
2703
2706
2704
2707
2705 class workingcommitctx(workingctx):
2708 class workingcommitctx(workingctx):
2706 """A workingcommitctx object makes access to data related to
2709 """A workingcommitctx object makes access to data related to
2707 the revision being committed convenient.
2710 the revision being committed convenient.
2708
2711
2709 This hides changes in the working directory, if they aren't
2712 This hides changes in the working directory, if they aren't
2710 committed in this context.
2713 committed in this context.
2711 """
2714 """
2712
2715
2713 def __init__(
2716 def __init__(
2714 self, repo, changes, text=b"", user=None, date=None, extra=None
2717 self, repo, changes, text=b"", user=None, date=None, extra=None
2715 ):
2718 ):
2716 super(workingcommitctx, self).__init__(
2719 super(workingcommitctx, self).__init__(
2717 repo, text, user, date, extra, changes
2720 repo, text, user, date, extra, changes
2718 )
2721 )
2719
2722
2720 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2723 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2721 """Return matched files only in ``self._status``
2724 """Return matched files only in ``self._status``
2722
2725
2723 Uncommitted files appear "clean" via this context, even if
2726 Uncommitted files appear "clean" via this context, even if
2724 they aren't actually so in the working directory.
2727 they aren't actually so in the working directory.
2725 """
2728 """
2726 if clean:
2729 if clean:
2727 clean = [f for f in self._manifest if f not in self._changedset]
2730 clean = [f for f in self._manifest if f not in self._changedset]
2728 else:
2731 else:
2729 clean = []
2732 clean = []
2730 return scmutil.status(
2733 return scmutil.status(
2731 [f for f in self._status.modified if match(f)],
2734 [f for f in self._status.modified if match(f)],
2732 [f for f in self._status.added if match(f)],
2735 [f for f in self._status.added if match(f)],
2733 [f for f in self._status.removed if match(f)],
2736 [f for f in self._status.removed if match(f)],
2734 [],
2737 [],
2735 [],
2738 [],
2736 [],
2739 [],
2737 clean,
2740 clean,
2738 )
2741 )
2739
2742
2740 @propertycache
2743 @propertycache
2741 def _changedset(self):
2744 def _changedset(self):
2742 """Return the set of files changed in this context"""
2745 """Return the set of files changed in this context"""
2743 changed = set(self._status.modified)
2746 changed = set(self._status.modified)
2744 changed.update(self._status.added)
2747 changed.update(self._status.added)
2745 changed.update(self._status.removed)
2748 changed.update(self._status.removed)
2746 return changed
2749 return changed
2747
2750
2748
2751
2749 def makecachingfilectxfn(func):
2752 def makecachingfilectxfn(func):
2750 """Create a filectxfn that caches based on the path.
2753 """Create a filectxfn that caches based on the path.
2751
2754
2752 We can't use util.cachefunc because it uses all arguments as the cache
2755 We can't use util.cachefunc because it uses all arguments as the cache
2753 key and this creates a cycle since the arguments include the repo and
2756 key and this creates a cycle since the arguments include the repo and
2754 memctx.
2757 memctx.
2755 """
2758 """
2756 cache = {}
2759 cache = {}
2757
2760
2758 def getfilectx(repo, memctx, path):
2761 def getfilectx(repo, memctx, path):
2759 if path not in cache:
2762 if path not in cache:
2760 cache[path] = func(repo, memctx, path)
2763 cache[path] = func(repo, memctx, path)
2761 return cache[path]
2764 return cache[path]
2762
2765
2763 return getfilectx
2766 return getfilectx
2764
2767
2765
2768
2766 def memfilefromctx(ctx):
2769 def memfilefromctx(ctx):
2767 """Given a context return a memfilectx for ctx[path]
2770 """Given a context return a memfilectx for ctx[path]
2768
2771
2769 This is a convenience method for building a memctx based on another
2772 This is a convenience method for building a memctx based on another
2770 context.
2773 context.
2771 """
2774 """
2772
2775
2773 def getfilectx(repo, memctx, path):
2776 def getfilectx(repo, memctx, path):
2774 fctx = ctx[path]
2777 fctx = ctx[path]
2775 copysource = fctx.copysource()
2778 copysource = fctx.copysource()
2776 return memfilectx(
2779 return memfilectx(
2777 repo,
2780 repo,
2778 memctx,
2781 memctx,
2779 path,
2782 path,
2780 fctx.data(),
2783 fctx.data(),
2781 islink=fctx.islink(),
2784 islink=fctx.islink(),
2782 isexec=fctx.isexec(),
2785 isexec=fctx.isexec(),
2783 copysource=copysource,
2786 copysource=copysource,
2784 )
2787 )
2785
2788
2786 return getfilectx
2789 return getfilectx
2787
2790
2788
2791
2789 def memfilefrompatch(patchstore):
2792 def memfilefrompatch(patchstore):
2790 """Given a patch (e.g. patchstore object) return a memfilectx
2793 """Given a patch (e.g. patchstore object) return a memfilectx
2791
2794
2792 This is a convenience method for building a memctx based on a patchstore.
2795 This is a convenience method for building a memctx based on a patchstore.
2793 """
2796 """
2794
2797
2795 def getfilectx(repo, memctx, path):
2798 def getfilectx(repo, memctx, path):
2796 data, mode, copysource = patchstore.getfile(path)
2799 data, mode, copysource = patchstore.getfile(path)
2797 if data is None:
2800 if data is None:
2798 return None
2801 return None
2799 islink, isexec = mode
2802 islink, isexec = mode
2800 return memfilectx(
2803 return memfilectx(
2801 repo,
2804 repo,
2802 memctx,
2805 memctx,
2803 path,
2806 path,
2804 data,
2807 data,
2805 islink=islink,
2808 islink=islink,
2806 isexec=isexec,
2809 isexec=isexec,
2807 copysource=copysource,
2810 copysource=copysource,
2808 )
2811 )
2809
2812
2810 return getfilectx
2813 return getfilectx
2811
2814
2812
2815
2813 class memctx(committablectx):
2816 class memctx(committablectx):
2814 """Use memctx to perform in-memory commits via localrepo.commitctx().
2817 """Use memctx to perform in-memory commits via localrepo.commitctx().
2815
2818
2816 Revision information is supplied at initialization time while
2819 Revision information is supplied at initialization time while
2817 related files data and is made available through a callback
2820 related files data and is made available through a callback
2818 mechanism. 'repo' is the current localrepo, 'parents' is a
2821 mechanism. 'repo' is the current localrepo, 'parents' is a
2819 sequence of two parent revisions identifiers (pass None for every
2822 sequence of two parent revisions identifiers (pass None for every
2820 missing parent), 'text' is the commit message and 'files' lists
2823 missing parent), 'text' is the commit message and 'files' lists
2821 names of files touched by the revision (normalized and relative to
2824 names of files touched by the revision (normalized and relative to
2822 repository root).
2825 repository root).
2823
2826
2824 filectxfn(repo, memctx, path) is a callable receiving the
2827 filectxfn(repo, memctx, path) is a callable receiving the
2825 repository, the current memctx object and the normalized path of
2828 repository, the current memctx object and the normalized path of
2826 requested file, relative to repository root. It is fired by the
2829 requested file, relative to repository root. It is fired by the
2827 commit function for every file in 'files', but calls order is
2830 commit function for every file in 'files', but calls order is
2828 undefined. If the file is available in the revision being
2831 undefined. If the file is available in the revision being
2829 committed (updated or added), filectxfn returns a memfilectx
2832 committed (updated or added), filectxfn returns a memfilectx
2830 object. If the file was removed, filectxfn return None for recent
2833 object. If the file was removed, filectxfn return None for recent
2831 Mercurial. Moved files are represented by marking the source file
2834 Mercurial. Moved files are represented by marking the source file
2832 removed and the new file added with copy information (see
2835 removed and the new file added with copy information (see
2833 memfilectx).
2836 memfilectx).
2834
2837
2835 user receives the committer name and defaults to current
2838 user receives the committer name and defaults to current
2836 repository username, date is the commit date in any format
2839 repository username, date is the commit date in any format
2837 supported by dateutil.parsedate() and defaults to current date, extra
2840 supported by dateutil.parsedate() and defaults to current date, extra
2838 is a dictionary of metadata or is left empty.
2841 is a dictionary of metadata or is left empty.
2839 """
2842 """
2840
2843
2841 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2844 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2842 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2845 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2843 # this field to determine what to do in filectxfn.
2846 # this field to determine what to do in filectxfn.
2844 _returnnoneformissingfiles = True
2847 _returnnoneformissingfiles = True
2845
2848
2846 def __init__(
2849 def __init__(
2847 self,
2850 self,
2848 repo,
2851 repo,
2849 parents,
2852 parents,
2850 text,
2853 text,
2851 files,
2854 files,
2852 filectxfn,
2855 filectxfn,
2853 user=None,
2856 user=None,
2854 date=None,
2857 date=None,
2855 extra=None,
2858 extra=None,
2856 branch=None,
2859 branch=None,
2857 editor=None,
2860 editor=None,
2858 ):
2861 ):
2859 super(memctx, self).__init__(
2862 super(memctx, self).__init__(
2860 repo, text, user, date, extra, branch=branch
2863 repo, text, user, date, extra, branch=branch
2861 )
2864 )
2862 self._rev = None
2865 self._rev = None
2863 self._node = None
2866 self._node = None
2864 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2867 parents = [(p or self._repo.nodeconstants.nullid) for p in parents]
2865 p1, p2 = parents
2868 p1, p2 = parents
2866 self._parents = [self._repo[p] for p in (p1, p2)]
2869 self._parents = [self._repo[p] for p in (p1, p2)]
2867 files = sorted(set(files))
2870 files = sorted(set(files))
2868 self._files = files
2871 self._files = files
2869 self.substate = {}
2872 self.substate = {}
2870
2873
2871 if isinstance(filectxfn, patch.filestore):
2874 if isinstance(filectxfn, patch.filestore):
2872 filectxfn = memfilefrompatch(filectxfn)
2875 filectxfn = memfilefrompatch(filectxfn)
2873 elif not callable(filectxfn):
2876 elif not callable(filectxfn):
2874 # if store is not callable, wrap it in a function
2877 # if store is not callable, wrap it in a function
2875 filectxfn = memfilefromctx(filectxfn)
2878 filectxfn = memfilefromctx(filectxfn)
2876
2879
2877 # memoizing increases performance for e.g. vcs convert scenarios.
2880 # memoizing increases performance for e.g. vcs convert scenarios.
2878 self._filectxfn = makecachingfilectxfn(filectxfn)
2881 self._filectxfn = makecachingfilectxfn(filectxfn)
2879
2882
2880 if editor:
2883 if editor:
2881 self._text = editor(self._repo, self, [])
2884 self._text = editor(self._repo, self, [])
2882 self._repo.savecommitmessage(self._text)
2885 self._repo.savecommitmessage(self._text)
2883
2886
2884 def filectx(self, path, filelog=None):
2887 def filectx(self, path, filelog=None):
2885 """get a file context from the working directory
2888 """get a file context from the working directory
2886
2889
2887 Returns None if file doesn't exist and should be removed."""
2890 Returns None if file doesn't exist and should be removed."""
2888 return self._filectxfn(self._repo, self, path)
2891 return self._filectxfn(self._repo, self, path)
2889
2892
2890 def commit(self):
2893 def commit(self):
2891 """commit context to the repo"""
2894 """commit context to the repo"""
2892 return self._repo.commitctx(self)
2895 return self._repo.commitctx(self)
2893
2896
2894 @propertycache
2897 @propertycache
2895 def _manifest(self):
2898 def _manifest(self):
2896 """generate a manifest based on the return values of filectxfn"""
2899 """generate a manifest based on the return values of filectxfn"""
2897
2900
2898 # keep this simple for now; just worry about p1
2901 # keep this simple for now; just worry about p1
2899 pctx = self._parents[0]
2902 pctx = self._parents[0]
2900 man = pctx.manifest().copy()
2903 man = pctx.manifest().copy()
2901
2904
2902 for f in self._status.modified:
2905 for f in self._status.modified:
2903 man[f] = self._repo.nodeconstants.modifiednodeid
2906 man[f] = self._repo.nodeconstants.modifiednodeid
2904
2907
2905 for f in self._status.added:
2908 for f in self._status.added:
2906 man[f] = self._repo.nodeconstants.addednodeid
2909 man[f] = self._repo.nodeconstants.addednodeid
2907
2910
2908 for f in self._status.removed:
2911 for f in self._status.removed:
2909 if f in man:
2912 if f in man:
2910 del man[f]
2913 del man[f]
2911
2914
2912 return man
2915 return man
2913
2916
2914 @propertycache
2917 @propertycache
2915 def _status(self):
2918 def _status(self):
2916 """Calculate exact status from ``files`` specified at construction"""
2919 """Calculate exact status from ``files`` specified at construction"""
2917 man1 = self.p1().manifest()
2920 man1 = self.p1().manifest()
2918 p2 = self._parents[1]
2921 p2 = self._parents[1]
2919 # "1 < len(self._parents)" can't be used for checking
2922 # "1 < len(self._parents)" can't be used for checking
2920 # existence of the 2nd parent, because "memctx._parents" is
2923 # existence of the 2nd parent, because "memctx._parents" is
2921 # explicitly initialized by the list, of which length is 2.
2924 # explicitly initialized by the list, of which length is 2.
2922 if p2.rev() != nullrev:
2925 if p2.rev() != nullrev:
2923 man2 = p2.manifest()
2926 man2 = p2.manifest()
2924 managing = lambda f: f in man1 or f in man2
2927 managing = lambda f: f in man1 or f in man2
2925 else:
2928 else:
2926 managing = lambda f: f in man1
2929 managing = lambda f: f in man1
2927
2930
2928 modified, added, removed = [], [], []
2931 modified, added, removed = [], [], []
2929 for f in self._files:
2932 for f in self._files:
2930 if not managing(f):
2933 if not managing(f):
2931 added.append(f)
2934 added.append(f)
2932 elif self[f]:
2935 elif self[f]:
2933 modified.append(f)
2936 modified.append(f)
2934 else:
2937 else:
2935 removed.append(f)
2938 removed.append(f)
2936
2939
2937 return scmutil.status(modified, added, removed, [], [], [], [])
2940 return scmutil.status(modified, added, removed, [], [], [], [])
2938
2941
2939 def parents(self):
2942 def parents(self):
2940 if self._parents[1].rev() == nullrev:
2943 if self._parents[1].rev() == nullrev:
2941 return [self._parents[0]]
2944 return [self._parents[0]]
2942 return self._parents
2945 return self._parents
2943
2946
2944
2947
2945 class memfilectx(committablefilectx):
2948 class memfilectx(committablefilectx):
2946 """memfilectx represents an in-memory file to commit.
2949 """memfilectx represents an in-memory file to commit.
2947
2950
2948 See memctx and committablefilectx for more details.
2951 See memctx and committablefilectx for more details.
2949 """
2952 """
2950
2953
2951 def __init__(
2954 def __init__(
2952 self,
2955 self,
2953 repo,
2956 repo,
2954 changectx,
2957 changectx,
2955 path,
2958 path,
2956 data,
2959 data,
2957 islink=False,
2960 islink=False,
2958 isexec=False,
2961 isexec=False,
2959 copysource=None,
2962 copysource=None,
2960 ):
2963 ):
2961 """
2964 """
2962 path is the normalized file path relative to repository root.
2965 path is the normalized file path relative to repository root.
2963 data is the file content as a string.
2966 data is the file content as a string.
2964 islink is True if the file is a symbolic link.
2967 islink is True if the file is a symbolic link.
2965 isexec is True if the file is executable.
2968 isexec is True if the file is executable.
2966 copied is the source file path if current file was copied in the
2969 copied is the source file path if current file was copied in the
2967 revision being committed, or None."""
2970 revision being committed, or None."""
2968 super(memfilectx, self).__init__(repo, path, None, changectx)
2971 super(memfilectx, self).__init__(repo, path, None, changectx)
2969 self._data = data
2972 self._data = data
2970 if islink:
2973 if islink:
2971 self._flags = b'l'
2974 self._flags = b'l'
2972 elif isexec:
2975 elif isexec:
2973 self._flags = b'x'
2976 self._flags = b'x'
2974 else:
2977 else:
2975 self._flags = b''
2978 self._flags = b''
2976 self._copysource = copysource
2979 self._copysource = copysource
2977
2980
2978 def copysource(self):
2981 def copysource(self):
2979 return self._copysource
2982 return self._copysource
2980
2983
2981 def cmp(self, fctx):
2984 def cmp(self, fctx):
2982 return self.data() != fctx.data()
2985 return self.data() != fctx.data()
2983
2986
2984 def data(self):
2987 def data(self):
2985 return self._data
2988 return self._data
2986
2989
2987 def remove(self, ignoremissing=False):
2990 def remove(self, ignoremissing=False):
2988 """wraps unlink for a repo's working directory"""
2991 """wraps unlink for a repo's working directory"""
2989 # need to figure out what to do here
2992 # need to figure out what to do here
2990 del self._changectx[self._path]
2993 del self._changectx[self._path]
2991
2994
2992 def write(self, data, flags, **kwargs):
2995 def write(self, data, flags, **kwargs):
2993 """wraps repo.wwrite"""
2996 """wraps repo.wwrite"""
2994 self._data = data
2997 self._data = data
2995
2998
2996
2999
2997 class metadataonlyctx(committablectx):
3000 class metadataonlyctx(committablectx):
2998 """Like memctx but it's reusing the manifest of different commit.
3001 """Like memctx but it's reusing the manifest of different commit.
2999 Intended to be used by lightweight operations that are creating
3002 Intended to be used by lightweight operations that are creating
3000 metadata-only changes.
3003 metadata-only changes.
3001
3004
3002 Revision information is supplied at initialization time. 'repo' is the
3005 Revision information is supplied at initialization time. 'repo' is the
3003 current localrepo, 'ctx' is original revision which manifest we're reuisng
3006 current localrepo, 'ctx' is original revision which manifest we're reuisng
3004 'parents' is a sequence of two parent revisions identifiers (pass None for
3007 'parents' is a sequence of two parent revisions identifiers (pass None for
3005 every missing parent), 'text' is the commit.
3008 every missing parent), 'text' is the commit.
3006
3009
3007 user receives the committer name and defaults to current repository
3010 user receives the committer name and defaults to current repository
3008 username, date is the commit date in any format supported by
3011 username, date is the commit date in any format supported by
3009 dateutil.parsedate() and defaults to current date, extra is a dictionary of
3012 dateutil.parsedate() and defaults to current date, extra is a dictionary of
3010 metadata or is left empty.
3013 metadata or is left empty.
3011 """
3014 """
3012
3015
3013 def __init__(
3016 def __init__(
3014 self,
3017 self,
3015 repo,
3018 repo,
3016 originalctx,
3019 originalctx,
3017 parents=None,
3020 parents=None,
3018 text=None,
3021 text=None,
3019 user=None,
3022 user=None,
3020 date=None,
3023 date=None,
3021 extra=None,
3024 extra=None,
3022 editor=None,
3025 editor=None,
3023 ):
3026 ):
3024 if text is None:
3027 if text is None:
3025 text = originalctx.description()
3028 text = originalctx.description()
3026 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3029 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
3027 self._rev = None
3030 self._rev = None
3028 self._node = None
3031 self._node = None
3029 self._originalctx = originalctx
3032 self._originalctx = originalctx
3030 self._manifestnode = originalctx.manifestnode()
3033 self._manifestnode = originalctx.manifestnode()
3031 if parents is None:
3034 if parents is None:
3032 parents = originalctx.parents()
3035 parents = originalctx.parents()
3033 else:
3036 else:
3034 parents = [repo[p] for p in parents if p is not None]
3037 parents = [repo[p] for p in parents if p is not None]
3035 parents = parents[:]
3038 parents = parents[:]
3036 while len(parents) < 2:
3039 while len(parents) < 2:
3037 parents.append(repo[nullrev])
3040 parents.append(repo[nullrev])
3038 p1, p2 = self._parents = parents
3041 p1, p2 = self._parents = parents
3039
3042
3040 # sanity check to ensure that the reused manifest parents are
3043 # sanity check to ensure that the reused manifest parents are
3041 # manifests of our commit parents
3044 # manifests of our commit parents
3042 mp1, mp2 = self.manifestctx().parents
3045 mp1, mp2 = self.manifestctx().parents
3043 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3046 if p1 != self._repo.nodeconstants.nullid and p1.manifestnode() != mp1:
3044 raise RuntimeError(
3047 raise RuntimeError(
3045 r"can't reuse the manifest: its p1 "
3048 r"can't reuse the manifest: its p1 "
3046 r"doesn't match the new ctx p1"
3049 r"doesn't match the new ctx p1"
3047 )
3050 )
3048 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3051 if p2 != self._repo.nodeconstants.nullid and p2.manifestnode() != mp2:
3049 raise RuntimeError(
3052 raise RuntimeError(
3050 r"can't reuse the manifest: "
3053 r"can't reuse the manifest: "
3051 r"its p2 doesn't match the new ctx p2"
3054 r"its p2 doesn't match the new ctx p2"
3052 )
3055 )
3053
3056
3054 self._files = originalctx.files()
3057 self._files = originalctx.files()
3055 self.substate = {}
3058 self.substate = {}
3056
3059
3057 if editor:
3060 if editor:
3058 self._text = editor(self._repo, self, [])
3061 self._text = editor(self._repo, self, [])
3059 self._repo.savecommitmessage(self._text)
3062 self._repo.savecommitmessage(self._text)
3060
3063
3061 def manifestnode(self):
3064 def manifestnode(self):
3062 return self._manifestnode
3065 return self._manifestnode
3063
3066
3064 @property
3067 @property
3065 def _manifestctx(self):
3068 def _manifestctx(self):
3066 return self._repo.manifestlog[self._manifestnode]
3069 return self._repo.manifestlog[self._manifestnode]
3067
3070
3068 def filectx(self, path, filelog=None):
3071 def filectx(self, path, filelog=None):
3069 return self._originalctx.filectx(path, filelog=filelog)
3072 return self._originalctx.filectx(path, filelog=filelog)
3070
3073
3071 def commit(self):
3074 def commit(self):
3072 """commit context to the repo"""
3075 """commit context to the repo"""
3073 return self._repo.commitctx(self)
3076 return self._repo.commitctx(self)
3074
3077
3075 @property
3078 @property
3076 def _manifest(self):
3079 def _manifest(self):
3077 return self._originalctx.manifest()
3080 return self._originalctx.manifest()
3078
3081
3079 @propertycache
3082 @propertycache
3080 def _status(self):
3083 def _status(self):
3081 """Calculate exact status from ``files`` specified in the ``origctx``
3084 """Calculate exact status from ``files`` specified in the ``origctx``
3082 and parents manifests.
3085 and parents manifests.
3083 """
3086 """
3084 man1 = self.p1().manifest()
3087 man1 = self.p1().manifest()
3085 p2 = self._parents[1]
3088 p2 = self._parents[1]
3086 # "1 < len(self._parents)" can't be used for checking
3089 # "1 < len(self._parents)" can't be used for checking
3087 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3090 # existence of the 2nd parent, because "metadataonlyctx._parents" is
3088 # explicitly initialized by the list, of which length is 2.
3091 # explicitly initialized by the list, of which length is 2.
3089 if p2.rev() != nullrev:
3092 if p2.rev() != nullrev:
3090 man2 = p2.manifest()
3093 man2 = p2.manifest()
3091 managing = lambda f: f in man1 or f in man2
3094 managing = lambda f: f in man1 or f in man2
3092 else:
3095 else:
3093 managing = lambda f: f in man1
3096 managing = lambda f: f in man1
3094
3097
3095 modified, added, removed = [], [], []
3098 modified, added, removed = [], [], []
3096 for f in self._files:
3099 for f in self._files:
3097 if not managing(f):
3100 if not managing(f):
3098 added.append(f)
3101 added.append(f)
3099 elif f in self:
3102 elif f in self:
3100 modified.append(f)
3103 modified.append(f)
3101 else:
3104 else:
3102 removed.append(f)
3105 removed.append(f)
3103
3106
3104 return scmutil.status(modified, added, removed, [], [], [], [])
3107 return scmutil.status(modified, added, removed, [], [], [], [])
3105
3108
3106
3109
3107 class arbitraryfilectx:
3110 class arbitraryfilectx:
3108 """Allows you to use filectx-like functions on a file in an arbitrary
3111 """Allows you to use filectx-like functions on a file in an arbitrary
3109 location on disk, possibly not in the working directory.
3112 location on disk, possibly not in the working directory.
3110 """
3113 """
3111
3114
3112 def __init__(self, path, repo=None):
3115 def __init__(self, path, repo=None):
3113 # Repo is optional because contrib/simplemerge uses this class.
3116 # Repo is optional because contrib/simplemerge uses this class.
3114 self._repo = repo
3117 self._repo = repo
3115 self._path = path
3118 self._path = path
3116
3119
3117 def cmp(self, fctx):
3120 def cmp(self, fctx):
3118 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3121 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
3119 # path if either side is a symlink.
3122 # path if either side is a symlink.
3120 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3123 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
3121 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3124 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
3122 # Add a fast-path for merge if both sides are disk-backed.
3125 # Add a fast-path for merge if both sides are disk-backed.
3123 # Note that filecmp uses the opposite return values (True if same)
3126 # Note that filecmp uses the opposite return values (True if same)
3124 # from our cmp functions (True if different).
3127 # from our cmp functions (True if different).
3125 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3128 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
3126 return self.data() != fctx.data()
3129 return self.data() != fctx.data()
3127
3130
3128 def path(self):
3131 def path(self):
3129 return self._path
3132 return self._path
3130
3133
3131 def flags(self):
3134 def flags(self):
3132 return b''
3135 return b''
3133
3136
3134 def data(self):
3137 def data(self):
3135 return util.readfile(self._path)
3138 return util.readfile(self._path)
3136
3139
3137 def decodeddata(self):
3140 def decodeddata(self):
3138 return util.readfile(self._path)
3141 return util.readfile(self._path)
3139
3142
3140 def remove(self):
3143 def remove(self):
3141 util.unlink(self._path)
3144 util.unlink(self._path)
3142
3145
3143 def write(self, data, flags, **kwargs):
3146 def write(self, data, flags, **kwargs):
3144 assert not flags
3147 assert not flags
3145 util.writefile(self._path, data)
3148 util.writefile(self._path, data)
General Comments 0
You need to be logged in to leave comments. Login now