Show More
@@ -1,2681 +1,2612 b'' | |||||
1 | # context.py - changeset and file context objects for mercurial |
|
1 | # context.py - changeset and file context objects for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import errno |
|
10 | import errno | |
11 | import filecmp |
|
11 | import filecmp | |
12 | import os |
|
12 | import os | |
13 | import re |
|
13 | import re | |
14 | import stat |
|
14 | import stat | |
15 |
|
15 | |||
16 | from .i18n import _ |
|
16 | from .i18n import _ | |
17 | from .node import ( |
|
17 | from .node import ( | |
18 | addednodeid, |
|
18 | addednodeid, | |
19 | bin, |
|
19 | bin, | |
20 | hex, |
|
20 | hex, | |
21 | modifiednodeid, |
|
21 | modifiednodeid, | |
22 | nullid, |
|
22 | nullid, | |
23 | nullrev, |
|
23 | nullrev, | |
24 | short, |
|
24 | short, | |
25 | wdirid, |
|
25 | wdirid, | |
26 | wdirnodes, |
|
26 | wdirnodes, | |
27 | wdirrev, |
|
27 | wdirrev, | |
28 | ) |
|
28 | ) | |
29 | from . import ( |
|
29 | from . import ( | |
30 | dagop, |
|
30 | dagop, | |
31 | encoding, |
|
31 | encoding, | |
32 | error, |
|
32 | error, | |
33 | fileset, |
|
33 | fileset, | |
34 | match as matchmod, |
|
34 | match as matchmod, | |
35 | mdiff, |
|
|||
36 | obsolete as obsmod, |
|
35 | obsolete as obsmod, | |
37 | obsutil, |
|
36 | obsutil, | |
38 | patch, |
|
37 | patch, | |
39 | pathutil, |
|
38 | pathutil, | |
40 | phases, |
|
39 | phases, | |
41 | pycompat, |
|
40 | pycompat, | |
42 | repoview, |
|
41 | repoview, | |
43 | revlog, |
|
42 | revlog, | |
44 | scmutil, |
|
43 | scmutil, | |
45 | sparse, |
|
44 | sparse, | |
46 | subrepo, |
|
45 | subrepo, | |
47 | subrepoutil, |
|
46 | subrepoutil, | |
48 | util, |
|
47 | util, | |
49 | ) |
|
48 | ) | |
50 | from .utils import dateutil |
|
49 | from .utils import dateutil | |
51 |
|
50 | |||
52 | propertycache = util.propertycache |
|
51 | propertycache = util.propertycache | |
53 |
|
52 | |||
54 | nonascii = re.compile(br'[^\x21-\x7f]').search |
|
53 | nonascii = re.compile(br'[^\x21-\x7f]').search | |
55 |
|
54 | |||
56 | class basectx(object): |
|
55 | class basectx(object): | |
57 | """A basectx object represents the common logic for its children: |
|
56 | """A basectx object represents the common logic for its children: | |
58 | changectx: read-only context that is already present in the repo, |
|
57 | changectx: read-only context that is already present in the repo, | |
59 | workingctx: a context that represents the working directory and can |
|
58 | workingctx: a context that represents the working directory and can | |
60 | be committed, |
|
59 | be committed, | |
61 | memctx: a context that represents changes in-memory and can also |
|
60 | memctx: a context that represents changes in-memory and can also | |
62 | be committed.""" |
|
61 | be committed.""" | |
63 | def __new__(cls, repo, changeid='', *args, **kwargs): |
|
62 | def __new__(cls, repo, changeid='', *args, **kwargs): | |
64 | if isinstance(changeid, basectx): |
|
63 | if isinstance(changeid, basectx): | |
65 | return changeid |
|
64 | return changeid | |
66 |
|
65 | |||
67 | o = super(basectx, cls).__new__(cls) |
|
66 | o = super(basectx, cls).__new__(cls) | |
68 |
|
67 | |||
69 | o._repo = repo |
|
68 | o._repo = repo | |
70 | o._rev = nullrev |
|
69 | o._rev = nullrev | |
71 | o._node = nullid |
|
70 | o._node = nullid | |
72 |
|
71 | |||
73 | return o |
|
72 | return o | |
74 |
|
73 | |||
75 | def __bytes__(self): |
|
74 | def __bytes__(self): | |
76 | return short(self.node()) |
|
75 | return short(self.node()) | |
77 |
|
76 | |||
78 | __str__ = encoding.strmethod(__bytes__) |
|
77 | __str__ = encoding.strmethod(__bytes__) | |
79 |
|
78 | |||
80 | def __repr__(self): |
|
79 | def __repr__(self): | |
81 | return r"<%s %s>" % (type(self).__name__, str(self)) |
|
80 | return r"<%s %s>" % (type(self).__name__, str(self)) | |
82 |
|
81 | |||
83 | def __eq__(self, other): |
|
82 | def __eq__(self, other): | |
84 | try: |
|
83 | try: | |
85 | return type(self) == type(other) and self._rev == other._rev |
|
84 | return type(self) == type(other) and self._rev == other._rev | |
86 | except AttributeError: |
|
85 | except AttributeError: | |
87 | return False |
|
86 | return False | |
88 |
|
87 | |||
89 | def __ne__(self, other): |
|
88 | def __ne__(self, other): | |
90 | return not (self == other) |
|
89 | return not (self == other) | |
91 |
|
90 | |||
92 | def __contains__(self, key): |
|
91 | def __contains__(self, key): | |
93 | return key in self._manifest |
|
92 | return key in self._manifest | |
94 |
|
93 | |||
95 | def __getitem__(self, key): |
|
94 | def __getitem__(self, key): | |
96 | return self.filectx(key) |
|
95 | return self.filectx(key) | |
97 |
|
96 | |||
98 | def __iter__(self): |
|
97 | def __iter__(self): | |
99 | return iter(self._manifest) |
|
98 | return iter(self._manifest) | |
100 |
|
99 | |||
101 | def _buildstatusmanifest(self, status): |
|
100 | def _buildstatusmanifest(self, status): | |
102 | """Builds a manifest that includes the given status results, if this is |
|
101 | """Builds a manifest that includes the given status results, if this is | |
103 | a working copy context. For non-working copy contexts, it just returns |
|
102 | a working copy context. For non-working copy contexts, it just returns | |
104 | the normal manifest.""" |
|
103 | the normal manifest.""" | |
105 | return self.manifest() |
|
104 | return self.manifest() | |
106 |
|
105 | |||
107 | def _matchstatus(self, other, match): |
|
106 | def _matchstatus(self, other, match): | |
108 | """This internal method provides a way for child objects to override the |
|
107 | """This internal method provides a way for child objects to override the | |
109 | match operator. |
|
108 | match operator. | |
110 | """ |
|
109 | """ | |
111 | return match |
|
110 | return match | |
112 |
|
111 | |||
113 | def _buildstatus(self, other, s, match, listignored, listclean, |
|
112 | def _buildstatus(self, other, s, match, listignored, listclean, | |
114 | listunknown): |
|
113 | listunknown): | |
115 | """build a status with respect to another context""" |
|
114 | """build a status with respect to another context""" | |
116 | # Load earliest manifest first for caching reasons. More specifically, |
|
115 | # Load earliest manifest first for caching reasons. More specifically, | |
117 | # if you have revisions 1000 and 1001, 1001 is probably stored as a |
|
116 | # if you have revisions 1000 and 1001, 1001 is probably stored as a | |
118 | # delta against 1000. Thus, if you read 1000 first, we'll reconstruct |
|
117 | # delta against 1000. Thus, if you read 1000 first, we'll reconstruct | |
119 | # 1000 and cache it so that when you read 1001, we just need to apply a |
|
118 | # 1000 and cache it so that when you read 1001, we just need to apply a | |
120 | # delta to what's in the cache. So that's one full reconstruction + one |
|
119 | # delta to what's in the cache. So that's one full reconstruction + one | |
121 | # delta application. |
|
120 | # delta application. | |
122 | mf2 = None |
|
121 | mf2 = None | |
123 | if self.rev() is not None and self.rev() < other.rev(): |
|
122 | if self.rev() is not None and self.rev() < other.rev(): | |
124 | mf2 = self._buildstatusmanifest(s) |
|
123 | mf2 = self._buildstatusmanifest(s) | |
125 | mf1 = other._buildstatusmanifest(s) |
|
124 | mf1 = other._buildstatusmanifest(s) | |
126 | if mf2 is None: |
|
125 | if mf2 is None: | |
127 | mf2 = self._buildstatusmanifest(s) |
|
126 | mf2 = self._buildstatusmanifest(s) | |
128 |
|
127 | |||
129 | modified, added = [], [] |
|
128 | modified, added = [], [] | |
130 | removed = [] |
|
129 | removed = [] | |
131 | clean = [] |
|
130 | clean = [] | |
132 | deleted, unknown, ignored = s.deleted, s.unknown, s.ignored |
|
131 | deleted, unknown, ignored = s.deleted, s.unknown, s.ignored | |
133 | deletedset = set(deleted) |
|
132 | deletedset = set(deleted) | |
134 | d = mf1.diff(mf2, match=match, clean=listclean) |
|
133 | d = mf1.diff(mf2, match=match, clean=listclean) | |
135 | for fn, value in d.iteritems(): |
|
134 | for fn, value in d.iteritems(): | |
136 | if fn in deletedset: |
|
135 | if fn in deletedset: | |
137 | continue |
|
136 | continue | |
138 | if value is None: |
|
137 | if value is None: | |
139 | clean.append(fn) |
|
138 | clean.append(fn) | |
140 | continue |
|
139 | continue | |
141 | (node1, flag1), (node2, flag2) = value |
|
140 | (node1, flag1), (node2, flag2) = value | |
142 | if node1 is None: |
|
141 | if node1 is None: | |
143 | added.append(fn) |
|
142 | added.append(fn) | |
144 | elif node2 is None: |
|
143 | elif node2 is None: | |
145 | removed.append(fn) |
|
144 | removed.append(fn) | |
146 | elif flag1 != flag2: |
|
145 | elif flag1 != flag2: | |
147 | modified.append(fn) |
|
146 | modified.append(fn) | |
148 | elif node2 not in wdirnodes: |
|
147 | elif node2 not in wdirnodes: | |
149 | # When comparing files between two commits, we save time by |
|
148 | # When comparing files between two commits, we save time by | |
150 | # not comparing the file contents when the nodeids differ. |
|
149 | # not comparing the file contents when the nodeids differ. | |
151 | # Note that this means we incorrectly report a reverted change |
|
150 | # Note that this means we incorrectly report a reverted change | |
152 | # to a file as a modification. |
|
151 | # to a file as a modification. | |
153 | modified.append(fn) |
|
152 | modified.append(fn) | |
154 | elif self[fn].cmp(other[fn]): |
|
153 | elif self[fn].cmp(other[fn]): | |
155 | modified.append(fn) |
|
154 | modified.append(fn) | |
156 | else: |
|
155 | else: | |
157 | clean.append(fn) |
|
156 | clean.append(fn) | |
158 |
|
157 | |||
159 | if removed: |
|
158 | if removed: | |
160 | # need to filter files if they are already reported as removed |
|
159 | # need to filter files if they are already reported as removed | |
161 | unknown = [fn for fn in unknown if fn not in mf1 and |
|
160 | unknown = [fn for fn in unknown if fn not in mf1 and | |
162 | (not match or match(fn))] |
|
161 | (not match or match(fn))] | |
163 | ignored = [fn for fn in ignored if fn not in mf1 and |
|
162 | ignored = [fn for fn in ignored if fn not in mf1 and | |
164 | (not match or match(fn))] |
|
163 | (not match or match(fn))] | |
165 | # if they're deleted, don't report them as removed |
|
164 | # if they're deleted, don't report them as removed | |
166 | removed = [fn for fn in removed if fn not in deletedset] |
|
165 | removed = [fn for fn in removed if fn not in deletedset] | |
167 |
|
166 | |||
168 | return scmutil.status(modified, added, removed, deleted, unknown, |
|
167 | return scmutil.status(modified, added, removed, deleted, unknown, | |
169 | ignored, clean) |
|
168 | ignored, clean) | |
170 |
|
169 | |||
171 | @propertycache |
|
170 | @propertycache | |
172 | def substate(self): |
|
171 | def substate(self): | |
173 | return subrepoutil.state(self, self._repo.ui) |
|
172 | return subrepoutil.state(self, self._repo.ui) | |
174 |
|
173 | |||
175 | def subrev(self, subpath): |
|
174 | def subrev(self, subpath): | |
176 | return self.substate[subpath][1] |
|
175 | return self.substate[subpath][1] | |
177 |
|
176 | |||
178 | def rev(self): |
|
177 | def rev(self): | |
179 | return self._rev |
|
178 | return self._rev | |
180 | def node(self): |
|
179 | def node(self): | |
181 | return self._node |
|
180 | return self._node | |
182 | def hex(self): |
|
181 | def hex(self): | |
183 | return hex(self.node()) |
|
182 | return hex(self.node()) | |
184 | def manifest(self): |
|
183 | def manifest(self): | |
185 | return self._manifest |
|
184 | return self._manifest | |
186 | def manifestctx(self): |
|
185 | def manifestctx(self): | |
187 | return self._manifestctx |
|
186 | return self._manifestctx | |
188 | def repo(self): |
|
187 | def repo(self): | |
189 | return self._repo |
|
188 | return self._repo | |
190 | def phasestr(self): |
|
189 | def phasestr(self): | |
191 | return phases.phasenames[self.phase()] |
|
190 | return phases.phasenames[self.phase()] | |
192 | def mutable(self): |
|
191 | def mutable(self): | |
193 | return self.phase() > phases.public |
|
192 | return self.phase() > phases.public | |
194 |
|
193 | |||
195 | def getfileset(self, expr): |
|
194 | def getfileset(self, expr): | |
196 | return fileset.getfileset(self, expr) |
|
195 | return fileset.getfileset(self, expr) | |
197 |
|
196 | |||
198 | def obsolete(self): |
|
197 | def obsolete(self): | |
199 | """True if the changeset is obsolete""" |
|
198 | """True if the changeset is obsolete""" | |
200 | return self.rev() in obsmod.getrevs(self._repo, 'obsolete') |
|
199 | return self.rev() in obsmod.getrevs(self._repo, 'obsolete') | |
201 |
|
200 | |||
202 | def extinct(self): |
|
201 | def extinct(self): | |
203 | """True if the changeset is extinct""" |
|
202 | """True if the changeset is extinct""" | |
204 | return self.rev() in obsmod.getrevs(self._repo, 'extinct') |
|
203 | return self.rev() in obsmod.getrevs(self._repo, 'extinct') | |
205 |
|
204 | |||
206 | def orphan(self): |
|
205 | def orphan(self): | |
207 | """True if the changeset is not obsolete but it's ancestor are""" |
|
206 | """True if the changeset is not obsolete but it's ancestor are""" | |
208 | return self.rev() in obsmod.getrevs(self._repo, 'orphan') |
|
207 | return self.rev() in obsmod.getrevs(self._repo, 'orphan') | |
209 |
|
208 | |||
210 | def phasedivergent(self): |
|
209 | def phasedivergent(self): | |
211 | """True if the changeset try to be a successor of a public changeset |
|
210 | """True if the changeset try to be a successor of a public changeset | |
212 |
|
211 | |||
213 | Only non-public and non-obsolete changesets may be bumped. |
|
212 | Only non-public and non-obsolete changesets may be bumped. | |
214 | """ |
|
213 | """ | |
215 | return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent') |
|
214 | return self.rev() in obsmod.getrevs(self._repo, 'phasedivergent') | |
216 |
|
215 | |||
217 | def contentdivergent(self): |
|
216 | def contentdivergent(self): | |
218 | """Is a successors of a changeset with multiple possible successors set |
|
217 | """Is a successors of a changeset with multiple possible successors set | |
219 |
|
218 | |||
220 | Only non-public and non-obsolete changesets may be divergent. |
|
219 | Only non-public and non-obsolete changesets may be divergent. | |
221 | """ |
|
220 | """ | |
222 | return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent') |
|
221 | return self.rev() in obsmod.getrevs(self._repo, 'contentdivergent') | |
223 |
|
222 | |||
224 | def isunstable(self): |
|
223 | def isunstable(self): | |
225 | """True if the changeset is either unstable, bumped or divergent""" |
|
224 | """True if the changeset is either unstable, bumped or divergent""" | |
226 | return self.orphan() or self.phasedivergent() or self.contentdivergent() |
|
225 | return self.orphan() or self.phasedivergent() or self.contentdivergent() | |
227 |
|
226 | |||
228 | def instabilities(self): |
|
227 | def instabilities(self): | |
229 | """return the list of instabilities affecting this changeset. |
|
228 | """return the list of instabilities affecting this changeset. | |
230 |
|
229 | |||
231 | Instabilities are returned as strings. possible values are: |
|
230 | Instabilities are returned as strings. possible values are: | |
232 | - orphan, |
|
231 | - orphan, | |
233 | - phase-divergent, |
|
232 | - phase-divergent, | |
234 | - content-divergent. |
|
233 | - content-divergent. | |
235 | """ |
|
234 | """ | |
236 | instabilities = [] |
|
235 | instabilities = [] | |
237 | if self.orphan(): |
|
236 | if self.orphan(): | |
238 | instabilities.append('orphan') |
|
237 | instabilities.append('orphan') | |
239 | if self.phasedivergent(): |
|
238 | if self.phasedivergent(): | |
240 | instabilities.append('phase-divergent') |
|
239 | instabilities.append('phase-divergent') | |
241 | if self.contentdivergent(): |
|
240 | if self.contentdivergent(): | |
242 | instabilities.append('content-divergent') |
|
241 | instabilities.append('content-divergent') | |
243 | return instabilities |
|
242 | return instabilities | |
244 |
|
243 | |||
245 | def parents(self): |
|
244 | def parents(self): | |
246 | """return contexts for each parent changeset""" |
|
245 | """return contexts for each parent changeset""" | |
247 | return self._parents |
|
246 | return self._parents | |
248 |
|
247 | |||
249 | def p1(self): |
|
248 | def p1(self): | |
250 | return self._parents[0] |
|
249 | return self._parents[0] | |
251 |
|
250 | |||
252 | def p2(self): |
|
251 | def p2(self): | |
253 | parents = self._parents |
|
252 | parents = self._parents | |
254 | if len(parents) == 2: |
|
253 | if len(parents) == 2: | |
255 | return parents[1] |
|
254 | return parents[1] | |
256 | return changectx(self._repo, nullrev) |
|
255 | return changectx(self._repo, nullrev) | |
257 |
|
256 | |||
258 | def _fileinfo(self, path): |
|
257 | def _fileinfo(self, path): | |
259 | if r'_manifest' in self.__dict__: |
|
258 | if r'_manifest' in self.__dict__: | |
260 | try: |
|
259 | try: | |
261 | return self._manifest[path], self._manifest.flags(path) |
|
260 | return self._manifest[path], self._manifest.flags(path) | |
262 | except KeyError: |
|
261 | except KeyError: | |
263 | raise error.ManifestLookupError(self._node, path, |
|
262 | raise error.ManifestLookupError(self._node, path, | |
264 | _('not found in manifest')) |
|
263 | _('not found in manifest')) | |
265 | if r'_manifestdelta' in self.__dict__ or path in self.files(): |
|
264 | if r'_manifestdelta' in self.__dict__ or path in self.files(): | |
266 | if path in self._manifestdelta: |
|
265 | if path in self._manifestdelta: | |
267 | return (self._manifestdelta[path], |
|
266 | return (self._manifestdelta[path], | |
268 | self._manifestdelta.flags(path)) |
|
267 | self._manifestdelta.flags(path)) | |
269 | mfl = self._repo.manifestlog |
|
268 | mfl = self._repo.manifestlog | |
270 | try: |
|
269 | try: | |
271 | node, flag = mfl[self._changeset.manifest].find(path) |
|
270 | node, flag = mfl[self._changeset.manifest].find(path) | |
272 | except KeyError: |
|
271 | except KeyError: | |
273 | raise error.ManifestLookupError(self._node, path, |
|
272 | raise error.ManifestLookupError(self._node, path, | |
274 | _('not found in manifest')) |
|
273 | _('not found in manifest')) | |
275 |
|
274 | |||
276 | return node, flag |
|
275 | return node, flag | |
277 |
|
276 | |||
278 | def filenode(self, path): |
|
277 | def filenode(self, path): | |
279 | return self._fileinfo(path)[0] |
|
278 | return self._fileinfo(path)[0] | |
280 |
|
279 | |||
281 | def flags(self, path): |
|
280 | def flags(self, path): | |
282 | try: |
|
281 | try: | |
283 | return self._fileinfo(path)[1] |
|
282 | return self._fileinfo(path)[1] | |
284 | except error.LookupError: |
|
283 | except error.LookupError: | |
285 | return '' |
|
284 | return '' | |
286 |
|
285 | |||
287 | def sub(self, path, allowcreate=True): |
|
286 | def sub(self, path, allowcreate=True): | |
288 | '''return a subrepo for the stored revision of path, never wdir()''' |
|
287 | '''return a subrepo for the stored revision of path, never wdir()''' | |
289 | return subrepo.subrepo(self, path, allowcreate=allowcreate) |
|
288 | return subrepo.subrepo(self, path, allowcreate=allowcreate) | |
290 |
|
289 | |||
291 | def nullsub(self, path, pctx): |
|
290 | def nullsub(self, path, pctx): | |
292 | return subrepo.nullsubrepo(self, path, pctx) |
|
291 | return subrepo.nullsubrepo(self, path, pctx) | |
293 |
|
292 | |||
294 | def workingsub(self, path): |
|
293 | def workingsub(self, path): | |
295 | '''return a subrepo for the stored revision, or wdir if this is a wdir |
|
294 | '''return a subrepo for the stored revision, or wdir if this is a wdir | |
296 | context. |
|
295 | context. | |
297 | ''' |
|
296 | ''' | |
298 | return subrepo.subrepo(self, path, allowwdir=True) |
|
297 | return subrepo.subrepo(self, path, allowwdir=True) | |
299 |
|
298 | |||
300 | def match(self, pats=None, include=None, exclude=None, default='glob', |
|
299 | def match(self, pats=None, include=None, exclude=None, default='glob', | |
301 | listsubrepos=False, badfn=None): |
|
300 | listsubrepos=False, badfn=None): | |
302 | r = self._repo |
|
301 | r = self._repo | |
303 | return matchmod.match(r.root, r.getcwd(), pats, |
|
302 | return matchmod.match(r.root, r.getcwd(), pats, | |
304 | include, exclude, default, |
|
303 | include, exclude, default, | |
305 | auditor=r.nofsauditor, ctx=self, |
|
304 | auditor=r.nofsauditor, ctx=self, | |
306 | listsubrepos=listsubrepos, badfn=badfn) |
|
305 | listsubrepos=listsubrepos, badfn=badfn) | |
307 |
|
306 | |||
308 | def diff(self, ctx2=None, match=None, **opts): |
|
307 | def diff(self, ctx2=None, match=None, **opts): | |
309 | """Returns a diff generator for the given contexts and matcher""" |
|
308 | """Returns a diff generator for the given contexts and matcher""" | |
310 | if ctx2 is None: |
|
309 | if ctx2 is None: | |
311 | ctx2 = self.p1() |
|
310 | ctx2 = self.p1() | |
312 | if ctx2 is not None: |
|
311 | if ctx2 is not None: | |
313 | ctx2 = self._repo[ctx2] |
|
312 | ctx2 = self._repo[ctx2] | |
314 | diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts)) |
|
313 | diffopts = patch.diffopts(self._repo.ui, pycompat.byteskwargs(opts)) | |
315 | return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts) |
|
314 | return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts) | |
316 |
|
315 | |||
317 | def dirs(self): |
|
316 | def dirs(self): | |
318 | return self._manifest.dirs() |
|
317 | return self._manifest.dirs() | |
319 |
|
318 | |||
320 | def hasdir(self, dir): |
|
319 | def hasdir(self, dir): | |
321 | return self._manifest.hasdir(dir) |
|
320 | return self._manifest.hasdir(dir) | |
322 |
|
321 | |||
323 | def status(self, other=None, match=None, listignored=False, |
|
322 | def status(self, other=None, match=None, listignored=False, | |
324 | listclean=False, listunknown=False, listsubrepos=False): |
|
323 | listclean=False, listunknown=False, listsubrepos=False): | |
325 | """return status of files between two nodes or node and working |
|
324 | """return status of files between two nodes or node and working | |
326 | directory. |
|
325 | directory. | |
327 |
|
326 | |||
328 | If other is None, compare this node with working directory. |
|
327 | If other is None, compare this node with working directory. | |
329 |
|
328 | |||
330 | returns (modified, added, removed, deleted, unknown, ignored, clean) |
|
329 | returns (modified, added, removed, deleted, unknown, ignored, clean) | |
331 | """ |
|
330 | """ | |
332 |
|
331 | |||
333 | ctx1 = self |
|
332 | ctx1 = self | |
334 | ctx2 = self._repo[other] |
|
333 | ctx2 = self._repo[other] | |
335 |
|
334 | |||
336 | # This next code block is, admittedly, fragile logic that tests for |
|
335 | # This next code block is, admittedly, fragile logic that tests for | |
337 | # reversing the contexts and wouldn't need to exist if it weren't for |
|
336 | # reversing the contexts and wouldn't need to exist if it weren't for | |
338 | # the fast (and common) code path of comparing the working directory |
|
337 | # the fast (and common) code path of comparing the working directory | |
339 | # with its first parent. |
|
338 | # with its first parent. | |
340 | # |
|
339 | # | |
341 | # What we're aiming for here is the ability to call: |
|
340 | # What we're aiming for here is the ability to call: | |
342 | # |
|
341 | # | |
343 | # workingctx.status(parentctx) |
|
342 | # workingctx.status(parentctx) | |
344 | # |
|
343 | # | |
345 | # If we always built the manifest for each context and compared those, |
|
344 | # If we always built the manifest for each context and compared those, | |
346 | # then we'd be done. But the special case of the above call means we |
|
345 | # then we'd be done. But the special case of the above call means we | |
347 | # just copy the manifest of the parent. |
|
346 | # just copy the manifest of the parent. | |
348 | reversed = False |
|
347 | reversed = False | |
349 | if (not isinstance(ctx1, changectx) |
|
348 | if (not isinstance(ctx1, changectx) | |
350 | and isinstance(ctx2, changectx)): |
|
349 | and isinstance(ctx2, changectx)): | |
351 | reversed = True |
|
350 | reversed = True | |
352 | ctx1, ctx2 = ctx2, ctx1 |
|
351 | ctx1, ctx2 = ctx2, ctx1 | |
353 |
|
352 | |||
354 | match = match or matchmod.always(self._repo.root, self._repo.getcwd()) |
|
353 | match = match or matchmod.always(self._repo.root, self._repo.getcwd()) | |
355 | match = ctx2._matchstatus(ctx1, match) |
|
354 | match = ctx2._matchstatus(ctx1, match) | |
356 | r = scmutil.status([], [], [], [], [], [], []) |
|
355 | r = scmutil.status([], [], [], [], [], [], []) | |
357 | r = ctx2._buildstatus(ctx1, r, match, listignored, listclean, |
|
356 | r = ctx2._buildstatus(ctx1, r, match, listignored, listclean, | |
358 | listunknown) |
|
357 | listunknown) | |
359 |
|
358 | |||
360 | if reversed: |
|
359 | if reversed: | |
361 | # Reverse added and removed. Clear deleted, unknown and ignored as |
|
360 | # Reverse added and removed. Clear deleted, unknown and ignored as | |
362 | # these make no sense to reverse. |
|
361 | # these make no sense to reverse. | |
363 | r = scmutil.status(r.modified, r.removed, r.added, [], [], [], |
|
362 | r = scmutil.status(r.modified, r.removed, r.added, [], [], [], | |
364 | r.clean) |
|
363 | r.clean) | |
365 |
|
364 | |||
366 | if listsubrepos: |
|
365 | if listsubrepos: | |
367 | for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): |
|
366 | for subpath, sub in scmutil.itersubrepos(ctx1, ctx2): | |
368 | try: |
|
367 | try: | |
369 | rev2 = ctx2.subrev(subpath) |
|
368 | rev2 = ctx2.subrev(subpath) | |
370 | except KeyError: |
|
369 | except KeyError: | |
371 | # A subrepo that existed in node1 was deleted between |
|
370 | # A subrepo that existed in node1 was deleted between | |
372 | # node1 and node2 (inclusive). Thus, ctx2's substate |
|
371 | # node1 and node2 (inclusive). Thus, ctx2's substate | |
373 | # won't contain that subpath. The best we can do ignore it. |
|
372 | # won't contain that subpath. The best we can do ignore it. | |
374 | rev2 = None |
|
373 | rev2 = None | |
375 | submatch = matchmod.subdirmatcher(subpath, match) |
|
374 | submatch = matchmod.subdirmatcher(subpath, match) | |
376 | s = sub.status(rev2, match=submatch, ignored=listignored, |
|
375 | s = sub.status(rev2, match=submatch, ignored=listignored, | |
377 | clean=listclean, unknown=listunknown, |
|
376 | clean=listclean, unknown=listunknown, | |
378 | listsubrepos=True) |
|
377 | listsubrepos=True) | |
379 | for rfiles, sfiles in zip(r, s): |
|
378 | for rfiles, sfiles in zip(r, s): | |
380 | rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) |
|
379 | rfiles.extend("%s/%s" % (subpath, f) for f in sfiles) | |
381 |
|
380 | |||
382 | for l in r: |
|
381 | for l in r: | |
383 | l.sort() |
|
382 | l.sort() | |
384 |
|
383 | |||
385 | return r |
|
384 | return r | |
386 |
|
385 | |||
387 | def _filterederror(repo, changeid): |
|
386 | def _filterederror(repo, changeid): | |
388 | """build an exception to be raised about a filtered changeid |
|
387 | """build an exception to be raised about a filtered changeid | |
389 |
|
388 | |||
390 | This is extracted in a function to help extensions (eg: evolve) to |
|
389 | This is extracted in a function to help extensions (eg: evolve) to | |
391 | experiment with various message variants.""" |
|
390 | experiment with various message variants.""" | |
392 | if repo.filtername.startswith('visible'): |
|
391 | if repo.filtername.startswith('visible'): | |
393 |
|
392 | |||
394 | # Check if the changeset is obsolete |
|
393 | # Check if the changeset is obsolete | |
395 | unfilteredrepo = repo.unfiltered() |
|
394 | unfilteredrepo = repo.unfiltered() | |
396 | ctx = unfilteredrepo[changeid] |
|
395 | ctx = unfilteredrepo[changeid] | |
397 |
|
396 | |||
398 | # If the changeset is obsolete, enrich the message with the reason |
|
397 | # If the changeset is obsolete, enrich the message with the reason | |
399 | # that made this changeset not visible |
|
398 | # that made this changeset not visible | |
400 | if ctx.obsolete(): |
|
399 | if ctx.obsolete(): | |
401 | msg = obsutil._getfilteredreason(repo, changeid, ctx) |
|
400 | msg = obsutil._getfilteredreason(repo, changeid, ctx) | |
402 | else: |
|
401 | else: | |
403 | msg = _("hidden revision '%s'") % changeid |
|
402 | msg = _("hidden revision '%s'") % changeid | |
404 |
|
403 | |||
405 | hint = _('use --hidden to access hidden revisions') |
|
404 | hint = _('use --hidden to access hidden revisions') | |
406 |
|
405 | |||
407 | return error.FilteredRepoLookupError(msg, hint=hint) |
|
406 | return error.FilteredRepoLookupError(msg, hint=hint) | |
408 | msg = _("filtered revision '%s' (not in '%s' subset)") |
|
407 | msg = _("filtered revision '%s' (not in '%s' subset)") | |
409 | msg %= (changeid, repo.filtername) |
|
408 | msg %= (changeid, repo.filtername) | |
410 | return error.FilteredRepoLookupError(msg) |
|
409 | return error.FilteredRepoLookupError(msg) | |
411 |
|
410 | |||
412 | class changectx(basectx): |
|
411 | class changectx(basectx): | |
413 | """A changecontext object makes access to data related to a particular |
|
412 | """A changecontext object makes access to data related to a particular | |
414 | changeset convenient. It represents a read-only context already present in |
|
413 | changeset convenient. It represents a read-only context already present in | |
415 | the repo.""" |
|
414 | the repo.""" | |
416 | def __init__(self, repo, changeid=''): |
|
415 | def __init__(self, repo, changeid=''): | |
417 | """changeid is a revision number, node, or tag""" |
|
416 | """changeid is a revision number, node, or tag""" | |
418 |
|
417 | |||
419 | # since basectx.__new__ already took care of copying the object, we |
|
418 | # since basectx.__new__ already took care of copying the object, we | |
420 | # don't need to do anything in __init__, so we just exit here |
|
419 | # don't need to do anything in __init__, so we just exit here | |
421 | if isinstance(changeid, basectx): |
|
420 | if isinstance(changeid, basectx): | |
422 | return |
|
421 | return | |
423 |
|
422 | |||
424 | if changeid == '': |
|
423 | if changeid == '': | |
425 | changeid = '.' |
|
424 | changeid = '.' | |
426 | self._repo = repo |
|
425 | self._repo = repo | |
427 |
|
426 | |||
428 | try: |
|
427 | try: | |
429 | if isinstance(changeid, int): |
|
428 | if isinstance(changeid, int): | |
430 | self._node = repo.changelog.node(changeid) |
|
429 | self._node = repo.changelog.node(changeid) | |
431 | self._rev = changeid |
|
430 | self._rev = changeid | |
432 | return |
|
431 | return | |
433 | if not pycompat.ispy3 and isinstance(changeid, long): |
|
432 | if not pycompat.ispy3 and isinstance(changeid, long): | |
434 | changeid = "%d" % changeid |
|
433 | changeid = "%d" % changeid | |
435 | if changeid == 'null': |
|
434 | if changeid == 'null': | |
436 | self._node = nullid |
|
435 | self._node = nullid | |
437 | self._rev = nullrev |
|
436 | self._rev = nullrev | |
438 | return |
|
437 | return | |
439 | if changeid == 'tip': |
|
438 | if changeid == 'tip': | |
440 | self._node = repo.changelog.tip() |
|
439 | self._node = repo.changelog.tip() | |
441 | self._rev = repo.changelog.rev(self._node) |
|
440 | self._rev = repo.changelog.rev(self._node) | |
442 | return |
|
441 | return | |
443 | if (changeid == '.' |
|
442 | if (changeid == '.' | |
444 | or repo.local() and changeid == repo.dirstate.p1()): |
|
443 | or repo.local() and changeid == repo.dirstate.p1()): | |
445 | # this is a hack to delay/avoid loading obsmarkers |
|
444 | # this is a hack to delay/avoid loading obsmarkers | |
446 | # when we know that '.' won't be hidden |
|
445 | # when we know that '.' won't be hidden | |
447 | self._node = repo.dirstate.p1() |
|
446 | self._node = repo.dirstate.p1() | |
448 | self._rev = repo.unfiltered().changelog.rev(self._node) |
|
447 | self._rev = repo.unfiltered().changelog.rev(self._node) | |
449 | return |
|
448 | return | |
450 | if len(changeid) == 20: |
|
449 | if len(changeid) == 20: | |
451 | try: |
|
450 | try: | |
452 | self._node = changeid |
|
451 | self._node = changeid | |
453 | self._rev = repo.changelog.rev(changeid) |
|
452 | self._rev = repo.changelog.rev(changeid) | |
454 | return |
|
453 | return | |
455 | except error.FilteredRepoLookupError: |
|
454 | except error.FilteredRepoLookupError: | |
456 | raise |
|
455 | raise | |
457 | except LookupError: |
|
456 | except LookupError: | |
458 | pass |
|
457 | pass | |
459 |
|
458 | |||
460 | try: |
|
459 | try: | |
461 | r = int(changeid) |
|
460 | r = int(changeid) | |
462 | if '%d' % r != changeid: |
|
461 | if '%d' % r != changeid: | |
463 | raise ValueError |
|
462 | raise ValueError | |
464 | l = len(repo.changelog) |
|
463 | l = len(repo.changelog) | |
465 | if r < 0: |
|
464 | if r < 0: | |
466 | r += l |
|
465 | r += l | |
467 | if r < 0 or r >= l and r != wdirrev: |
|
466 | if r < 0 or r >= l and r != wdirrev: | |
468 | raise ValueError |
|
467 | raise ValueError | |
469 | self._rev = r |
|
468 | self._rev = r | |
470 | self._node = repo.changelog.node(r) |
|
469 | self._node = repo.changelog.node(r) | |
471 | return |
|
470 | return | |
472 | except error.FilteredIndexError: |
|
471 | except error.FilteredIndexError: | |
473 | raise |
|
472 | raise | |
474 | except (ValueError, OverflowError, IndexError): |
|
473 | except (ValueError, OverflowError, IndexError): | |
475 | pass |
|
474 | pass | |
476 |
|
475 | |||
477 | if len(changeid) == 40: |
|
476 | if len(changeid) == 40: | |
478 | try: |
|
477 | try: | |
479 | self._node = bin(changeid) |
|
478 | self._node = bin(changeid) | |
480 | self._rev = repo.changelog.rev(self._node) |
|
479 | self._rev = repo.changelog.rev(self._node) | |
481 | return |
|
480 | return | |
482 | except error.FilteredLookupError: |
|
481 | except error.FilteredLookupError: | |
483 | raise |
|
482 | raise | |
484 | except (TypeError, LookupError): |
|
483 | except (TypeError, LookupError): | |
485 | pass |
|
484 | pass | |
486 |
|
485 | |||
487 | # lookup bookmarks through the name interface |
|
486 | # lookup bookmarks through the name interface | |
488 | try: |
|
487 | try: | |
489 | self._node = repo.names.singlenode(repo, changeid) |
|
488 | self._node = repo.names.singlenode(repo, changeid) | |
490 | self._rev = repo.changelog.rev(self._node) |
|
489 | self._rev = repo.changelog.rev(self._node) | |
491 | return |
|
490 | return | |
492 | except KeyError: |
|
491 | except KeyError: | |
493 | pass |
|
492 | pass | |
494 | except error.FilteredRepoLookupError: |
|
493 | except error.FilteredRepoLookupError: | |
495 | raise |
|
494 | raise | |
496 | except error.RepoLookupError: |
|
495 | except error.RepoLookupError: | |
497 | pass |
|
496 | pass | |
498 |
|
497 | |||
499 | self._node = repo.unfiltered().changelog._partialmatch(changeid) |
|
498 | self._node = repo.unfiltered().changelog._partialmatch(changeid) | |
500 | if self._node is not None: |
|
499 | if self._node is not None: | |
501 | self._rev = repo.changelog.rev(self._node) |
|
500 | self._rev = repo.changelog.rev(self._node) | |
502 | return |
|
501 | return | |
503 |
|
502 | |||
504 | # lookup failed |
|
503 | # lookup failed | |
505 | # check if it might have come from damaged dirstate |
|
504 | # check if it might have come from damaged dirstate | |
506 | # |
|
505 | # | |
507 | # XXX we could avoid the unfiltered if we had a recognizable |
|
506 | # XXX we could avoid the unfiltered if we had a recognizable | |
508 | # exception for filtered changeset access |
|
507 | # exception for filtered changeset access | |
509 | if (repo.local() |
|
508 | if (repo.local() | |
510 | and changeid in repo.unfiltered().dirstate.parents()): |
|
509 | and changeid in repo.unfiltered().dirstate.parents()): | |
511 | msg = _("working directory has unknown parent '%s'!") |
|
510 | msg = _("working directory has unknown parent '%s'!") | |
512 | raise error.Abort(msg % short(changeid)) |
|
511 | raise error.Abort(msg % short(changeid)) | |
513 | try: |
|
512 | try: | |
514 | if len(changeid) == 20 and nonascii(changeid): |
|
513 | if len(changeid) == 20 and nonascii(changeid): | |
515 | changeid = hex(changeid) |
|
514 | changeid = hex(changeid) | |
516 | except TypeError: |
|
515 | except TypeError: | |
517 | pass |
|
516 | pass | |
518 | except (error.FilteredIndexError, error.FilteredLookupError, |
|
517 | except (error.FilteredIndexError, error.FilteredLookupError, | |
519 | error.FilteredRepoLookupError): |
|
518 | error.FilteredRepoLookupError): | |
520 | raise _filterederror(repo, changeid) |
|
519 | raise _filterederror(repo, changeid) | |
521 | except IndexError: |
|
520 | except IndexError: | |
522 | pass |
|
521 | pass | |
523 | raise error.RepoLookupError( |
|
522 | raise error.RepoLookupError( | |
524 | _("unknown revision '%s'") % changeid) |
|
523 | _("unknown revision '%s'") % changeid) | |
525 |
|
524 | |||
526 | def __hash__(self): |
|
525 | def __hash__(self): | |
527 | try: |
|
526 | try: | |
528 | return hash(self._rev) |
|
527 | return hash(self._rev) | |
529 | except AttributeError: |
|
528 | except AttributeError: | |
530 | return id(self) |
|
529 | return id(self) | |
531 |
|
530 | |||
532 | def __nonzero__(self): |
|
531 | def __nonzero__(self): | |
533 | return self._rev != nullrev |
|
532 | return self._rev != nullrev | |
534 |
|
533 | |||
535 | __bool__ = __nonzero__ |
|
534 | __bool__ = __nonzero__ | |
536 |
|
535 | |||
537 | @propertycache |
|
536 | @propertycache | |
538 | def _changeset(self): |
|
537 | def _changeset(self): | |
539 | return self._repo.changelog.changelogrevision(self.rev()) |
|
538 | return self._repo.changelog.changelogrevision(self.rev()) | |
540 |
|
539 | |||
541 | @propertycache |
|
540 | @propertycache | |
542 | def _manifest(self): |
|
541 | def _manifest(self): | |
543 | return self._manifestctx.read() |
|
542 | return self._manifestctx.read() | |
544 |
|
543 | |||
545 | @property |
|
544 | @property | |
546 | def _manifestctx(self): |
|
545 | def _manifestctx(self): | |
547 | return self._repo.manifestlog[self._changeset.manifest] |
|
546 | return self._repo.manifestlog[self._changeset.manifest] | |
548 |
|
547 | |||
549 | @propertycache |
|
548 | @propertycache | |
550 | def _manifestdelta(self): |
|
549 | def _manifestdelta(self): | |
551 | return self._manifestctx.readdelta() |
|
550 | return self._manifestctx.readdelta() | |
552 |
|
551 | |||
553 | @propertycache |
|
552 | @propertycache | |
554 | def _parents(self): |
|
553 | def _parents(self): | |
555 | repo = self._repo |
|
554 | repo = self._repo | |
556 | p1, p2 = repo.changelog.parentrevs(self._rev) |
|
555 | p1, p2 = repo.changelog.parentrevs(self._rev) | |
557 | if p2 == nullrev: |
|
556 | if p2 == nullrev: | |
558 | return [changectx(repo, p1)] |
|
557 | return [changectx(repo, p1)] | |
559 | return [changectx(repo, p1), changectx(repo, p2)] |
|
558 | return [changectx(repo, p1), changectx(repo, p2)] | |
560 |
|
559 | |||
561 | def changeset(self): |
|
560 | def changeset(self): | |
562 | c = self._changeset |
|
561 | c = self._changeset | |
563 | return ( |
|
562 | return ( | |
564 | c.manifest, |
|
563 | c.manifest, | |
565 | c.user, |
|
564 | c.user, | |
566 | c.date, |
|
565 | c.date, | |
567 | c.files, |
|
566 | c.files, | |
568 | c.description, |
|
567 | c.description, | |
569 | c.extra, |
|
568 | c.extra, | |
570 | ) |
|
569 | ) | |
571 | def manifestnode(self): |
|
570 | def manifestnode(self): | |
572 | return self._changeset.manifest |
|
571 | return self._changeset.manifest | |
573 |
|
572 | |||
574 | def user(self): |
|
573 | def user(self): | |
575 | return self._changeset.user |
|
574 | return self._changeset.user | |
576 | def date(self): |
|
575 | def date(self): | |
577 | return self._changeset.date |
|
576 | return self._changeset.date | |
578 | def files(self): |
|
577 | def files(self): | |
579 | return self._changeset.files |
|
578 | return self._changeset.files | |
580 | def description(self): |
|
579 | def description(self): | |
581 | return self._changeset.description |
|
580 | return self._changeset.description | |
582 | def branch(self): |
|
581 | def branch(self): | |
583 | return encoding.tolocal(self._changeset.extra.get("branch")) |
|
582 | return encoding.tolocal(self._changeset.extra.get("branch")) | |
584 | def closesbranch(self): |
|
583 | def closesbranch(self): | |
585 | return 'close' in self._changeset.extra |
|
584 | return 'close' in self._changeset.extra | |
586 | def extra(self): |
|
585 | def extra(self): | |
587 | """Return a dict of extra information.""" |
|
586 | """Return a dict of extra information.""" | |
588 | return self._changeset.extra |
|
587 | return self._changeset.extra | |
589 | def tags(self): |
|
588 | def tags(self): | |
590 | """Return a list of byte tag names""" |
|
589 | """Return a list of byte tag names""" | |
591 | return self._repo.nodetags(self._node) |
|
590 | return self._repo.nodetags(self._node) | |
592 | def bookmarks(self): |
|
591 | def bookmarks(self): | |
593 | """Return a list of byte bookmark names.""" |
|
592 | """Return a list of byte bookmark names.""" | |
594 | return self._repo.nodebookmarks(self._node) |
|
593 | return self._repo.nodebookmarks(self._node) | |
595 | def phase(self): |
|
594 | def phase(self): | |
596 | return self._repo._phasecache.phase(self._repo, self._rev) |
|
595 | return self._repo._phasecache.phase(self._repo, self._rev) | |
597 | def hidden(self): |
|
596 | def hidden(self): | |
598 | return self._rev in repoview.filterrevs(self._repo, 'visible') |
|
597 | return self._rev in repoview.filterrevs(self._repo, 'visible') | |
599 |
|
598 | |||
600 | def isinmemory(self): |
|
599 | def isinmemory(self): | |
601 | return False |
|
600 | return False | |
602 |
|
601 | |||
603 | def children(self): |
|
602 | def children(self): | |
604 | """return list of changectx contexts for each child changeset. |
|
603 | """return list of changectx contexts for each child changeset. | |
605 |
|
604 | |||
606 | This returns only the immediate child changesets. Use descendants() to |
|
605 | This returns only the immediate child changesets. Use descendants() to | |
607 | recursively walk children. |
|
606 | recursively walk children. | |
608 | """ |
|
607 | """ | |
609 | c = self._repo.changelog.children(self._node) |
|
608 | c = self._repo.changelog.children(self._node) | |
610 | return [changectx(self._repo, x) for x in c] |
|
609 | return [changectx(self._repo, x) for x in c] | |
611 |
|
610 | |||
612 | def ancestors(self): |
|
611 | def ancestors(self): | |
613 | for a in self._repo.changelog.ancestors([self._rev]): |
|
612 | for a in self._repo.changelog.ancestors([self._rev]): | |
614 | yield changectx(self._repo, a) |
|
613 | yield changectx(self._repo, a) | |
615 |
|
614 | |||
616 | def descendants(self): |
|
615 | def descendants(self): | |
617 | """Recursively yield all children of the changeset. |
|
616 | """Recursively yield all children of the changeset. | |
618 |
|
617 | |||
619 | For just the immediate children, use children() |
|
618 | For just the immediate children, use children() | |
620 | """ |
|
619 | """ | |
621 | for d in self._repo.changelog.descendants([self._rev]): |
|
620 | for d in self._repo.changelog.descendants([self._rev]): | |
622 | yield changectx(self._repo, d) |
|
621 | yield changectx(self._repo, d) | |
623 |
|
622 | |||
624 | def filectx(self, path, fileid=None, filelog=None): |
|
623 | def filectx(self, path, fileid=None, filelog=None): | |
625 | """get a file context from this changeset""" |
|
624 | """get a file context from this changeset""" | |
626 | if fileid is None: |
|
625 | if fileid is None: | |
627 | fileid = self.filenode(path) |
|
626 | fileid = self.filenode(path) | |
628 | return filectx(self._repo, path, fileid=fileid, |
|
627 | return filectx(self._repo, path, fileid=fileid, | |
629 | changectx=self, filelog=filelog) |
|
628 | changectx=self, filelog=filelog) | |
630 |
|
629 | |||
631 | def ancestor(self, c2, warn=False): |
|
630 | def ancestor(self, c2, warn=False): | |
632 | """return the "best" ancestor context of self and c2 |
|
631 | """return the "best" ancestor context of self and c2 | |
633 |
|
632 | |||
634 | If there are multiple candidates, it will show a message and check |
|
633 | If there are multiple candidates, it will show a message and check | |
635 | merge.preferancestor configuration before falling back to the |
|
634 | merge.preferancestor configuration before falling back to the | |
636 | revlog ancestor.""" |
|
635 | revlog ancestor.""" | |
637 | # deal with workingctxs |
|
636 | # deal with workingctxs | |
638 | n2 = c2._node |
|
637 | n2 = c2._node | |
639 | if n2 is None: |
|
638 | if n2 is None: | |
640 | n2 = c2._parents[0]._node |
|
639 | n2 = c2._parents[0]._node | |
641 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) |
|
640 | cahs = self._repo.changelog.commonancestorsheads(self._node, n2) | |
642 | if not cahs: |
|
641 | if not cahs: | |
643 | anc = nullid |
|
642 | anc = nullid | |
644 | elif len(cahs) == 1: |
|
643 | elif len(cahs) == 1: | |
645 | anc = cahs[0] |
|
644 | anc = cahs[0] | |
646 | else: |
|
645 | else: | |
647 | # experimental config: merge.preferancestor |
|
646 | # experimental config: merge.preferancestor | |
648 | for r in self._repo.ui.configlist('merge', 'preferancestor'): |
|
647 | for r in self._repo.ui.configlist('merge', 'preferancestor'): | |
649 | try: |
|
648 | try: | |
650 | ctx = changectx(self._repo, r) |
|
649 | ctx = changectx(self._repo, r) | |
651 | except error.RepoLookupError: |
|
650 | except error.RepoLookupError: | |
652 | continue |
|
651 | continue | |
653 | anc = ctx.node() |
|
652 | anc = ctx.node() | |
654 | if anc in cahs: |
|
653 | if anc in cahs: | |
655 | break |
|
654 | break | |
656 | else: |
|
655 | else: | |
657 | anc = self._repo.changelog.ancestor(self._node, n2) |
|
656 | anc = self._repo.changelog.ancestor(self._node, n2) | |
658 | if warn: |
|
657 | if warn: | |
659 | self._repo.ui.status( |
|
658 | self._repo.ui.status( | |
660 | (_("note: using %s as ancestor of %s and %s\n") % |
|
659 | (_("note: using %s as ancestor of %s and %s\n") % | |
661 | (short(anc), short(self._node), short(n2))) + |
|
660 | (short(anc), short(self._node), short(n2))) + | |
662 | ''.join(_(" alternatively, use --config " |
|
661 | ''.join(_(" alternatively, use --config " | |
663 | "merge.preferancestor=%s\n") % |
|
662 | "merge.preferancestor=%s\n") % | |
664 | short(n) for n in sorted(cahs) if n != anc)) |
|
663 | short(n) for n in sorted(cahs) if n != anc)) | |
665 | return changectx(self._repo, anc) |
|
664 | return changectx(self._repo, anc) | |
666 |
|
665 | |||
667 | def descendant(self, other): |
|
666 | def descendant(self, other): | |
668 | """True if other is descendant of this changeset""" |
|
667 | """True if other is descendant of this changeset""" | |
669 | return self._repo.changelog.descendant(self._rev, other._rev) |
|
668 | return self._repo.changelog.descendant(self._rev, other._rev) | |
670 |
|
669 | |||
671 | def walk(self, match): |
|
670 | def walk(self, match): | |
672 | '''Generates matching file names.''' |
|
671 | '''Generates matching file names.''' | |
673 |
|
672 | |||
674 | # Wrap match.bad method to have message with nodeid |
|
673 | # Wrap match.bad method to have message with nodeid | |
675 | def bad(fn, msg): |
|
674 | def bad(fn, msg): | |
676 | # The manifest doesn't know about subrepos, so don't complain about |
|
675 | # The manifest doesn't know about subrepos, so don't complain about | |
677 | # paths into valid subrepos. |
|
676 | # paths into valid subrepos. | |
678 | if any(fn == s or fn.startswith(s + '/') |
|
677 | if any(fn == s or fn.startswith(s + '/') | |
679 | for s in self.substate): |
|
678 | for s in self.substate): | |
680 | return |
|
679 | return | |
681 | match.bad(fn, _('no such file in rev %s') % self) |
|
680 | match.bad(fn, _('no such file in rev %s') % self) | |
682 |
|
681 | |||
683 | m = matchmod.badmatch(match, bad) |
|
682 | m = matchmod.badmatch(match, bad) | |
684 | return self._manifest.walk(m) |
|
683 | return self._manifest.walk(m) | |
685 |
|
684 | |||
686 | def matches(self, match): |
|
685 | def matches(self, match): | |
687 | return self.walk(match) |
|
686 | return self.walk(match) | |
688 |
|
687 | |||
689 | class basefilectx(object): |
|
688 | class basefilectx(object): | |
690 | """A filecontext object represents the common logic for its children: |
|
689 | """A filecontext object represents the common logic for its children: | |
691 | filectx: read-only access to a filerevision that is already present |
|
690 | filectx: read-only access to a filerevision that is already present | |
692 | in the repo, |
|
691 | in the repo, | |
693 | workingfilectx: a filecontext that represents files from the working |
|
692 | workingfilectx: a filecontext that represents files from the working | |
694 | directory, |
|
693 | directory, | |
695 | memfilectx: a filecontext that represents files in-memory, |
|
694 | memfilectx: a filecontext that represents files in-memory, | |
696 | overlayfilectx: duplicate another filecontext with some fields overridden. |
|
695 | overlayfilectx: duplicate another filecontext with some fields overridden. | |
697 | """ |
|
696 | """ | |
698 | @propertycache |
|
697 | @propertycache | |
699 | def _filelog(self): |
|
698 | def _filelog(self): | |
700 | return self._repo.file(self._path) |
|
699 | return self._repo.file(self._path) | |
701 |
|
700 | |||
702 | @propertycache |
|
701 | @propertycache | |
703 | def _changeid(self): |
|
702 | def _changeid(self): | |
704 | if r'_changeid' in self.__dict__: |
|
703 | if r'_changeid' in self.__dict__: | |
705 | return self._changeid |
|
704 | return self._changeid | |
706 | elif r'_changectx' in self.__dict__: |
|
705 | elif r'_changectx' in self.__dict__: | |
707 | return self._changectx.rev() |
|
706 | return self._changectx.rev() | |
708 | elif r'_descendantrev' in self.__dict__: |
|
707 | elif r'_descendantrev' in self.__dict__: | |
709 | # this file context was created from a revision with a known |
|
708 | # this file context was created from a revision with a known | |
710 | # descendant, we can (lazily) correct for linkrev aliases |
|
709 | # descendant, we can (lazily) correct for linkrev aliases | |
711 | return self._adjustlinkrev(self._descendantrev) |
|
710 | return self._adjustlinkrev(self._descendantrev) | |
712 | else: |
|
711 | else: | |
713 | return self._filelog.linkrev(self._filerev) |
|
712 | return self._filelog.linkrev(self._filerev) | |
714 |
|
713 | |||
715 | @propertycache |
|
714 | @propertycache | |
716 | def _filenode(self): |
|
715 | def _filenode(self): | |
717 | if r'_fileid' in self.__dict__: |
|
716 | if r'_fileid' in self.__dict__: | |
718 | return self._filelog.lookup(self._fileid) |
|
717 | return self._filelog.lookup(self._fileid) | |
719 | else: |
|
718 | else: | |
720 | return self._changectx.filenode(self._path) |
|
719 | return self._changectx.filenode(self._path) | |
721 |
|
720 | |||
722 | @propertycache |
|
721 | @propertycache | |
723 | def _filerev(self): |
|
722 | def _filerev(self): | |
724 | return self._filelog.rev(self._filenode) |
|
723 | return self._filelog.rev(self._filenode) | |
725 |
|
724 | |||
726 | @propertycache |
|
725 | @propertycache | |
727 | def _repopath(self): |
|
726 | def _repopath(self): | |
728 | return self._path |
|
727 | return self._path | |
729 |
|
728 | |||
730 | def __nonzero__(self): |
|
729 | def __nonzero__(self): | |
731 | try: |
|
730 | try: | |
732 | self._filenode |
|
731 | self._filenode | |
733 | return True |
|
732 | return True | |
734 | except error.LookupError: |
|
733 | except error.LookupError: | |
735 | # file is missing |
|
734 | # file is missing | |
736 | return False |
|
735 | return False | |
737 |
|
736 | |||
738 | __bool__ = __nonzero__ |
|
737 | __bool__ = __nonzero__ | |
739 |
|
738 | |||
740 | def __bytes__(self): |
|
739 | def __bytes__(self): | |
741 | try: |
|
740 | try: | |
742 | return "%s@%s" % (self.path(), self._changectx) |
|
741 | return "%s@%s" % (self.path(), self._changectx) | |
743 | except error.LookupError: |
|
742 | except error.LookupError: | |
744 | return "%s@???" % self.path() |
|
743 | return "%s@???" % self.path() | |
745 |
|
744 | |||
746 | __str__ = encoding.strmethod(__bytes__) |
|
745 | __str__ = encoding.strmethod(__bytes__) | |
747 |
|
746 | |||
748 | def __repr__(self): |
|
747 | def __repr__(self): | |
749 | return r"<%s %s>" % (type(self).__name__, str(self)) |
|
748 | return r"<%s %s>" % (type(self).__name__, str(self)) | |
750 |
|
749 | |||
751 | def __hash__(self): |
|
750 | def __hash__(self): | |
752 | try: |
|
751 | try: | |
753 | return hash((self._path, self._filenode)) |
|
752 | return hash((self._path, self._filenode)) | |
754 | except AttributeError: |
|
753 | except AttributeError: | |
755 | return id(self) |
|
754 | return id(self) | |
756 |
|
755 | |||
757 | def __eq__(self, other): |
|
756 | def __eq__(self, other): | |
758 | try: |
|
757 | try: | |
759 | return (type(self) == type(other) and self._path == other._path |
|
758 | return (type(self) == type(other) and self._path == other._path | |
760 | and self._filenode == other._filenode) |
|
759 | and self._filenode == other._filenode) | |
761 | except AttributeError: |
|
760 | except AttributeError: | |
762 | return False |
|
761 | return False | |
763 |
|
762 | |||
764 | def __ne__(self, other): |
|
763 | def __ne__(self, other): | |
765 | return not (self == other) |
|
764 | return not (self == other) | |
766 |
|
765 | |||
767 | def filerev(self): |
|
766 | def filerev(self): | |
768 | return self._filerev |
|
767 | return self._filerev | |
769 | def filenode(self): |
|
768 | def filenode(self): | |
770 | return self._filenode |
|
769 | return self._filenode | |
771 | @propertycache |
|
770 | @propertycache | |
772 | def _flags(self): |
|
771 | def _flags(self): | |
773 | return self._changectx.flags(self._path) |
|
772 | return self._changectx.flags(self._path) | |
774 | def flags(self): |
|
773 | def flags(self): | |
775 | return self._flags |
|
774 | return self._flags | |
776 | def filelog(self): |
|
775 | def filelog(self): | |
777 | return self._filelog |
|
776 | return self._filelog | |
778 | def rev(self): |
|
777 | def rev(self): | |
779 | return self._changeid |
|
778 | return self._changeid | |
780 | def linkrev(self): |
|
779 | def linkrev(self): | |
781 | return self._filelog.linkrev(self._filerev) |
|
780 | return self._filelog.linkrev(self._filerev) | |
782 | def node(self): |
|
781 | def node(self): | |
783 | return self._changectx.node() |
|
782 | return self._changectx.node() | |
784 | def hex(self): |
|
783 | def hex(self): | |
785 | return self._changectx.hex() |
|
784 | return self._changectx.hex() | |
786 | def user(self): |
|
785 | def user(self): | |
787 | return self._changectx.user() |
|
786 | return self._changectx.user() | |
788 | def date(self): |
|
787 | def date(self): | |
789 | return self._changectx.date() |
|
788 | return self._changectx.date() | |
790 | def files(self): |
|
789 | def files(self): | |
791 | return self._changectx.files() |
|
790 | return self._changectx.files() | |
792 | def description(self): |
|
791 | def description(self): | |
793 | return self._changectx.description() |
|
792 | return self._changectx.description() | |
794 | def branch(self): |
|
793 | def branch(self): | |
795 | return self._changectx.branch() |
|
794 | return self._changectx.branch() | |
796 | def extra(self): |
|
795 | def extra(self): | |
797 | return self._changectx.extra() |
|
796 | return self._changectx.extra() | |
798 | def phase(self): |
|
797 | def phase(self): | |
799 | return self._changectx.phase() |
|
798 | return self._changectx.phase() | |
800 | def phasestr(self): |
|
799 | def phasestr(self): | |
801 | return self._changectx.phasestr() |
|
800 | return self._changectx.phasestr() | |
802 | def obsolete(self): |
|
801 | def obsolete(self): | |
803 | return self._changectx.obsolete() |
|
802 | return self._changectx.obsolete() | |
804 | def instabilities(self): |
|
803 | def instabilities(self): | |
805 | return self._changectx.instabilities() |
|
804 | return self._changectx.instabilities() | |
806 | def manifest(self): |
|
805 | def manifest(self): | |
807 | return self._changectx.manifest() |
|
806 | return self._changectx.manifest() | |
808 | def changectx(self): |
|
807 | def changectx(self): | |
809 | return self._changectx |
|
808 | return self._changectx | |
810 | def renamed(self): |
|
809 | def renamed(self): | |
811 | return self._copied |
|
810 | return self._copied | |
812 | def repo(self): |
|
811 | def repo(self): | |
813 | return self._repo |
|
812 | return self._repo | |
814 | def size(self): |
|
813 | def size(self): | |
815 | return len(self.data()) |
|
814 | return len(self.data()) | |
816 |
|
815 | |||
817 | def path(self): |
|
816 | def path(self): | |
818 | return self._path |
|
817 | return self._path | |
819 |
|
818 | |||
820 | def isbinary(self): |
|
819 | def isbinary(self): | |
821 | try: |
|
820 | try: | |
822 | return util.binary(self.data()) |
|
821 | return util.binary(self.data()) | |
823 | except IOError: |
|
822 | except IOError: | |
824 | return False |
|
823 | return False | |
825 | def isexec(self): |
|
824 | def isexec(self): | |
826 | return 'x' in self.flags() |
|
825 | return 'x' in self.flags() | |
827 | def islink(self): |
|
826 | def islink(self): | |
828 | return 'l' in self.flags() |
|
827 | return 'l' in self.flags() | |
829 |
|
828 | |||
830 | def isabsent(self): |
|
829 | def isabsent(self): | |
831 | """whether this filectx represents a file not in self._changectx |
|
830 | """whether this filectx represents a file not in self._changectx | |
832 |
|
831 | |||
833 | This is mainly for merge code to detect change/delete conflicts. This is |
|
832 | This is mainly for merge code to detect change/delete conflicts. This is | |
834 | expected to be True for all subclasses of basectx.""" |
|
833 | expected to be True for all subclasses of basectx.""" | |
835 | return False |
|
834 | return False | |
836 |
|
835 | |||
837 | _customcmp = False |
|
836 | _customcmp = False | |
838 | def cmp(self, fctx): |
|
837 | def cmp(self, fctx): | |
839 | """compare with other file context |
|
838 | """compare with other file context | |
840 |
|
839 | |||
841 | returns True if different than fctx. |
|
840 | returns True if different than fctx. | |
842 | """ |
|
841 | """ | |
843 | if fctx._customcmp: |
|
842 | if fctx._customcmp: | |
844 | return fctx.cmp(self) |
|
843 | return fctx.cmp(self) | |
845 |
|
844 | |||
846 | if (fctx._filenode is None |
|
845 | if (fctx._filenode is None | |
847 | and (self._repo._encodefilterpats |
|
846 | and (self._repo._encodefilterpats | |
848 | # if file data starts with '\1\n', empty metadata block is |
|
847 | # if file data starts with '\1\n', empty metadata block is | |
849 | # prepended, which adds 4 bytes to filelog.size(). |
|
848 | # prepended, which adds 4 bytes to filelog.size(). | |
850 | or self.size() - 4 == fctx.size()) |
|
849 | or self.size() - 4 == fctx.size()) | |
851 | or self.size() == fctx.size()): |
|
850 | or self.size() == fctx.size()): | |
852 | return self._filelog.cmp(self._filenode, fctx.data()) |
|
851 | return self._filelog.cmp(self._filenode, fctx.data()) | |
853 |
|
852 | |||
854 | return True |
|
853 | return True | |
855 |
|
854 | |||
856 | def _adjustlinkrev(self, srcrev, inclusive=False): |
|
855 | def _adjustlinkrev(self, srcrev, inclusive=False): | |
857 | """return the first ancestor of <srcrev> introducing <fnode> |
|
856 | """return the first ancestor of <srcrev> introducing <fnode> | |
858 |
|
857 | |||
859 | If the linkrev of the file revision does not point to an ancestor of |
|
858 | If the linkrev of the file revision does not point to an ancestor of | |
860 | srcrev, we'll walk down the ancestors until we find one introducing |
|
859 | srcrev, we'll walk down the ancestors until we find one introducing | |
861 | this file revision. |
|
860 | this file revision. | |
862 |
|
861 | |||
863 | :srcrev: the changeset revision we search ancestors from |
|
862 | :srcrev: the changeset revision we search ancestors from | |
864 | :inclusive: if true, the src revision will also be checked |
|
863 | :inclusive: if true, the src revision will also be checked | |
865 | """ |
|
864 | """ | |
866 | repo = self._repo |
|
865 | repo = self._repo | |
867 | cl = repo.unfiltered().changelog |
|
866 | cl = repo.unfiltered().changelog | |
868 | mfl = repo.manifestlog |
|
867 | mfl = repo.manifestlog | |
869 | # fetch the linkrev |
|
868 | # fetch the linkrev | |
870 | lkr = self.linkrev() |
|
869 | lkr = self.linkrev() | |
871 | # hack to reuse ancestor computation when searching for renames |
|
870 | # hack to reuse ancestor computation when searching for renames | |
872 | memberanc = getattr(self, '_ancestrycontext', None) |
|
871 | memberanc = getattr(self, '_ancestrycontext', None) | |
873 | iteranc = None |
|
872 | iteranc = None | |
874 | if srcrev is None: |
|
873 | if srcrev is None: | |
875 | # wctx case, used by workingfilectx during mergecopy |
|
874 | # wctx case, used by workingfilectx during mergecopy | |
876 | revs = [p.rev() for p in self._repo[None].parents()] |
|
875 | revs = [p.rev() for p in self._repo[None].parents()] | |
877 | inclusive = True # we skipped the real (revless) source |
|
876 | inclusive = True # we skipped the real (revless) source | |
878 | else: |
|
877 | else: | |
879 | revs = [srcrev] |
|
878 | revs = [srcrev] | |
880 | if memberanc is None: |
|
879 | if memberanc is None: | |
881 | memberanc = iteranc = cl.ancestors(revs, lkr, |
|
880 | memberanc = iteranc = cl.ancestors(revs, lkr, | |
882 | inclusive=inclusive) |
|
881 | inclusive=inclusive) | |
883 | # check if this linkrev is an ancestor of srcrev |
|
882 | # check if this linkrev is an ancestor of srcrev | |
884 | if lkr not in memberanc: |
|
883 | if lkr not in memberanc: | |
885 | if iteranc is None: |
|
884 | if iteranc is None: | |
886 | iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) |
|
885 | iteranc = cl.ancestors(revs, lkr, inclusive=inclusive) | |
887 | fnode = self._filenode |
|
886 | fnode = self._filenode | |
888 | path = self._path |
|
887 | path = self._path | |
889 | for a in iteranc: |
|
888 | for a in iteranc: | |
890 | ac = cl.read(a) # get changeset data (we avoid object creation) |
|
889 | ac = cl.read(a) # get changeset data (we avoid object creation) | |
891 | if path in ac[3]: # checking the 'files' field. |
|
890 | if path in ac[3]: # checking the 'files' field. | |
892 | # The file has been touched, check if the content is |
|
891 | # The file has been touched, check if the content is | |
893 | # similar to the one we search for. |
|
892 | # similar to the one we search for. | |
894 | if fnode == mfl[ac[0]].readfast().get(path): |
|
893 | if fnode == mfl[ac[0]].readfast().get(path): | |
895 | return a |
|
894 | return a | |
896 | # In theory, we should never get out of that loop without a result. |
|
895 | # In theory, we should never get out of that loop without a result. | |
897 | # But if manifest uses a buggy file revision (not children of the |
|
896 | # But if manifest uses a buggy file revision (not children of the | |
898 | # one it replaces) we could. Such a buggy situation will likely |
|
897 | # one it replaces) we could. Such a buggy situation will likely | |
899 | # result is crash somewhere else at to some point. |
|
898 | # result is crash somewhere else at to some point. | |
900 | return lkr |
|
899 | return lkr | |
901 |
|
900 | |||
902 | def introrev(self): |
|
901 | def introrev(self): | |
903 | """return the rev of the changeset which introduced this file revision |
|
902 | """return the rev of the changeset which introduced this file revision | |
904 |
|
903 | |||
905 | This method is different from linkrev because it take into account the |
|
904 | This method is different from linkrev because it take into account the | |
906 | changeset the filectx was created from. It ensures the returned |
|
905 | changeset the filectx was created from. It ensures the returned | |
907 | revision is one of its ancestors. This prevents bugs from |
|
906 | revision is one of its ancestors. This prevents bugs from | |
908 | 'linkrev-shadowing' when a file revision is used by multiple |
|
907 | 'linkrev-shadowing' when a file revision is used by multiple | |
909 | changesets. |
|
908 | changesets. | |
910 | """ |
|
909 | """ | |
911 | lkr = self.linkrev() |
|
910 | lkr = self.linkrev() | |
912 | attrs = vars(self) |
|
911 | attrs = vars(self) | |
913 | noctx = not (r'_changeid' in attrs or r'_changectx' in attrs) |
|
912 | noctx = not (r'_changeid' in attrs or r'_changectx' in attrs) | |
914 | if noctx or self.rev() == lkr: |
|
913 | if noctx or self.rev() == lkr: | |
915 | return self.linkrev() |
|
914 | return self.linkrev() | |
916 | return self._adjustlinkrev(self.rev(), inclusive=True) |
|
915 | return self._adjustlinkrev(self.rev(), inclusive=True) | |
917 |
|
916 | |||
918 | def introfilectx(self): |
|
917 | def introfilectx(self): | |
919 | """Return filectx having identical contents, but pointing to the |
|
918 | """Return filectx having identical contents, but pointing to the | |
920 | changeset revision where this filectx was introduced""" |
|
919 | changeset revision where this filectx was introduced""" | |
921 | introrev = self.introrev() |
|
920 | introrev = self.introrev() | |
922 | if self.rev() == introrev: |
|
921 | if self.rev() == introrev: | |
923 | return self |
|
922 | return self | |
924 | return self.filectx(self.filenode(), changeid=introrev) |
|
923 | return self.filectx(self.filenode(), changeid=introrev) | |
925 |
|
924 | |||
926 | def _parentfilectx(self, path, fileid, filelog): |
|
925 | def _parentfilectx(self, path, fileid, filelog): | |
927 | """create parent filectx keeping ancestry info for _adjustlinkrev()""" |
|
926 | """create parent filectx keeping ancestry info for _adjustlinkrev()""" | |
928 | fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) |
|
927 | fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog) | |
929 | if r'_changeid' in vars(self) or r'_changectx' in vars(self): |
|
928 | if r'_changeid' in vars(self) or r'_changectx' in vars(self): | |
930 | # If self is associated with a changeset (probably explicitly |
|
929 | # If self is associated with a changeset (probably explicitly | |
931 | # fed), ensure the created filectx is associated with a |
|
930 | # fed), ensure the created filectx is associated with a | |
932 | # changeset that is an ancestor of self.changectx. |
|
931 | # changeset that is an ancestor of self.changectx. | |
933 | # This lets us later use _adjustlinkrev to get a correct link. |
|
932 | # This lets us later use _adjustlinkrev to get a correct link. | |
934 | fctx._descendantrev = self.rev() |
|
933 | fctx._descendantrev = self.rev() | |
935 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) |
|
934 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) | |
936 | elif r'_descendantrev' in vars(self): |
|
935 | elif r'_descendantrev' in vars(self): | |
937 | # Otherwise propagate _descendantrev if we have one associated. |
|
936 | # Otherwise propagate _descendantrev if we have one associated. | |
938 | fctx._descendantrev = self._descendantrev |
|
937 | fctx._descendantrev = self._descendantrev | |
939 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) |
|
938 | fctx._ancestrycontext = getattr(self, '_ancestrycontext', None) | |
940 | return fctx |
|
939 | return fctx | |
941 |
|
940 | |||
942 | def parents(self): |
|
941 | def parents(self): | |
943 | _path = self._path |
|
942 | _path = self._path | |
944 | fl = self._filelog |
|
943 | fl = self._filelog | |
945 | parents = self._filelog.parents(self._filenode) |
|
944 | parents = self._filelog.parents(self._filenode) | |
946 | pl = [(_path, node, fl) for node in parents if node != nullid] |
|
945 | pl = [(_path, node, fl) for node in parents if node != nullid] | |
947 |
|
946 | |||
948 | r = fl.renamed(self._filenode) |
|
947 | r = fl.renamed(self._filenode) | |
949 | if r: |
|
948 | if r: | |
950 | # - In the simple rename case, both parent are nullid, pl is empty. |
|
949 | # - In the simple rename case, both parent are nullid, pl is empty. | |
951 | # - In case of merge, only one of the parent is null id and should |
|
950 | # - In case of merge, only one of the parent is null id and should | |
952 | # be replaced with the rename information. This parent is -always- |
|
951 | # be replaced with the rename information. This parent is -always- | |
953 | # the first one. |
|
952 | # the first one. | |
954 | # |
|
953 | # | |
955 | # As null id have always been filtered out in the previous list |
|
954 | # As null id have always been filtered out in the previous list | |
956 | # comprehension, inserting to 0 will always result in "replacing |
|
955 | # comprehension, inserting to 0 will always result in "replacing | |
957 | # first nullid parent with rename information. |
|
956 | # first nullid parent with rename information. | |
958 | pl.insert(0, (r[0], r[1], self._repo.file(r[0]))) |
|
957 | pl.insert(0, (r[0], r[1], self._repo.file(r[0]))) | |
959 |
|
958 | |||
960 | return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl] |
|
959 | return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl] | |
961 |
|
960 | |||
962 | def p1(self): |
|
961 | def p1(self): | |
963 | return self.parents()[0] |
|
962 | return self.parents()[0] | |
964 |
|
963 | |||
965 | def p2(self): |
|
964 | def p2(self): | |
966 | p = self.parents() |
|
965 | p = self.parents() | |
967 | if len(p) == 2: |
|
966 | if len(p) == 2: | |
968 | return p[1] |
|
967 | return p[1] | |
969 | return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) |
|
968 | return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog) | |
970 |
|
969 | |||
971 | def annotate(self, follow=False, linenumber=False, skiprevs=None, |
|
970 | def annotate(self, follow=False, linenumber=False, skiprevs=None, | |
972 | diffopts=None): |
|
971 | diffopts=None): | |
973 | '''returns a list of tuples of ((ctx, number), line) for each line |
|
972 | '''returns a list of tuples of ((ctx, number), line) for each line | |
974 | in the file, where ctx is the filectx of the node where |
|
973 | in the file, where ctx is the filectx of the node where | |
975 | that line was last changed; if linenumber parameter is true, number is |
|
974 | that line was last changed; if linenumber parameter is true, number is | |
976 | the line number at the first appearance in the managed file, otherwise, |
|
975 | the line number at the first appearance in the managed file, otherwise, | |
977 | number has a fixed value of False. |
|
976 | number has a fixed value of False. | |
978 | ''' |
|
977 | ''' | |
979 | annotateline = dagop.annotateline |
|
|||
980 | _annotatepair = dagop._annotatepair |
|
|||
981 |
|
||||
982 | def lines(text): |
|
|||
983 | if text.endswith("\n"): |
|
|||
984 | return text.count("\n") |
|
|||
985 | return text.count("\n") + int(bool(text)) |
|
|||
986 |
|
||||
987 | if linenumber: |
|
|||
988 | def decorate(text, rev): |
|
|||
989 | return ([annotateline(fctx=rev, lineno=i) |
|
|||
990 | for i in xrange(1, lines(text) + 1)], text) |
|
|||
991 | else: |
|
|||
992 | def decorate(text, rev): |
|
|||
993 | return ([annotateline(fctx=rev)] * lines(text), text) |
|
|||
994 |
|
||||
995 | getlog = util.lrucachefunc(lambda x: self._repo.file(x)) |
|
978 | getlog = util.lrucachefunc(lambda x: self._repo.file(x)) | |
996 |
|
979 | |||
997 | def parents(f): |
|
980 | def parents(f): | |
998 | # Cut _descendantrev here to mitigate the penalty of lazy linkrev |
|
981 | # Cut _descendantrev here to mitigate the penalty of lazy linkrev | |
999 | # adjustment. Otherwise, p._adjustlinkrev() would walk changelog |
|
982 | # adjustment. Otherwise, p._adjustlinkrev() would walk changelog | |
1000 | # from the topmost introrev (= srcrev) down to p.linkrev() if it |
|
983 | # from the topmost introrev (= srcrev) down to p.linkrev() if it | |
1001 | # isn't an ancestor of the srcrev. |
|
984 | # isn't an ancestor of the srcrev. | |
1002 | f._changeid |
|
985 | f._changeid | |
1003 | pl = f.parents() |
|
986 | pl = f.parents() | |
1004 |
|
987 | |||
1005 | # Don't return renamed parents if we aren't following. |
|
988 | # Don't return renamed parents if we aren't following. | |
1006 | if not follow: |
|
989 | if not follow: | |
1007 | pl = [p for p in pl if p.path() == f.path()] |
|
990 | pl = [p for p in pl if p.path() == f.path()] | |
1008 |
|
991 | |||
1009 | # renamed filectx won't have a filelog yet, so set it |
|
992 | # renamed filectx won't have a filelog yet, so set it | |
1010 | # from the cache to save time |
|
993 | # from the cache to save time | |
1011 | for p in pl: |
|
994 | for p in pl: | |
1012 | if not r'_filelog' in p.__dict__: |
|
995 | if not r'_filelog' in p.__dict__: | |
1013 | p._filelog = getlog(p.path()) |
|
996 | p._filelog = getlog(p.path()) | |
1014 |
|
997 | |||
1015 | return pl |
|
998 | return pl | |
1016 |
|
999 | |||
1017 | # use linkrev to find the first changeset where self appeared |
|
1000 | # use linkrev to find the first changeset where self appeared | |
1018 | base = self.introfilectx() |
|
1001 | base = self.introfilectx() | |
1019 | if getattr(base, '_ancestrycontext', None) is None: |
|
1002 | if getattr(base, '_ancestrycontext', None) is None: | |
1020 | cl = self._repo.changelog |
|
1003 | cl = self._repo.changelog | |
1021 | if base.rev() is None: |
|
1004 | if base.rev() is None: | |
1022 | # wctx is not inclusive, but works because _ancestrycontext |
|
1005 | # wctx is not inclusive, but works because _ancestrycontext | |
1023 | # is used to test filelog revisions |
|
1006 | # is used to test filelog revisions | |
1024 | ac = cl.ancestors([p.rev() for p in base.parents()], |
|
1007 | ac = cl.ancestors([p.rev() for p in base.parents()], | |
1025 | inclusive=True) |
|
1008 | inclusive=True) | |
1026 | else: |
|
1009 | else: | |
1027 | ac = cl.ancestors([base.rev()], inclusive=True) |
|
1010 | ac = cl.ancestors([base.rev()], inclusive=True) | |
1028 | base._ancestrycontext = ac |
|
1011 | base._ancestrycontext = ac | |
1029 |
|
1012 | |||
1030 | # This algorithm would prefer to be recursive, but Python is a |
|
1013 | return dagop.annotate(base, parents, linenumber=linenumber, | |
1031 | # bit recursion-hostile. Instead we do an iterative |
|
1014 | skiprevs=skiprevs, diffopts=diffopts) | |
1032 | # depth-first search. |
|
|||
1033 |
|
||||
1034 | # 1st DFS pre-calculates pcache and needed |
|
|||
1035 | visit = [base] |
|
|||
1036 | pcache = {} |
|
|||
1037 | needed = {base: 1} |
|
|||
1038 | while visit: |
|
|||
1039 | f = visit.pop() |
|
|||
1040 | if f in pcache: |
|
|||
1041 | continue |
|
|||
1042 | pl = parents(f) |
|
|||
1043 | pcache[f] = pl |
|
|||
1044 | for p in pl: |
|
|||
1045 | needed[p] = needed.get(p, 0) + 1 |
|
|||
1046 | if p not in pcache: |
|
|||
1047 | visit.append(p) |
|
|||
1048 |
|
||||
1049 | # 2nd DFS does the actual annotate |
|
|||
1050 | visit[:] = [base] |
|
|||
1051 | hist = {} |
|
|||
1052 | while visit: |
|
|||
1053 | f = visit[-1] |
|
|||
1054 | if f in hist: |
|
|||
1055 | visit.pop() |
|
|||
1056 | continue |
|
|||
1057 |
|
||||
1058 | ready = True |
|
|||
1059 | pl = pcache[f] |
|
|||
1060 | for p in pl: |
|
|||
1061 | if p not in hist: |
|
|||
1062 | ready = False |
|
|||
1063 | visit.append(p) |
|
|||
1064 | if ready: |
|
|||
1065 | visit.pop() |
|
|||
1066 | curr = decorate(f.data(), f) |
|
|||
1067 | skipchild = False |
|
|||
1068 | if skiprevs is not None: |
|
|||
1069 | skipchild = f._changeid in skiprevs |
|
|||
1070 | curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild, |
|
|||
1071 | diffopts) |
|
|||
1072 | for p in pl: |
|
|||
1073 | if needed[p] == 1: |
|
|||
1074 | del hist[p] |
|
|||
1075 | del needed[p] |
|
|||
1076 | else: |
|
|||
1077 | needed[p] -= 1 |
|
|||
1078 |
|
||||
1079 | hist[f] = curr |
|
|||
1080 | del pcache[f] |
|
|||
1081 |
|
||||
1082 | lineattrs, text = hist[base] |
|
|||
1083 | return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text)) |
|
|||
1084 |
|
1015 | |||
1085 | def ancestors(self, followfirst=False): |
|
1016 | def ancestors(self, followfirst=False): | |
1086 | visit = {} |
|
1017 | visit = {} | |
1087 | c = self |
|
1018 | c = self | |
1088 | if followfirst: |
|
1019 | if followfirst: | |
1089 | cut = 1 |
|
1020 | cut = 1 | |
1090 | else: |
|
1021 | else: | |
1091 | cut = None |
|
1022 | cut = None | |
1092 |
|
1023 | |||
1093 | while True: |
|
1024 | while True: | |
1094 | for parent in c.parents()[:cut]: |
|
1025 | for parent in c.parents()[:cut]: | |
1095 | visit[(parent.linkrev(), parent.filenode())] = parent |
|
1026 | visit[(parent.linkrev(), parent.filenode())] = parent | |
1096 | if not visit: |
|
1027 | if not visit: | |
1097 | break |
|
1028 | break | |
1098 | c = visit.pop(max(visit)) |
|
1029 | c = visit.pop(max(visit)) | |
1099 | yield c |
|
1030 | yield c | |
1100 |
|
1031 | |||
1101 | def decodeddata(self): |
|
1032 | def decodeddata(self): | |
1102 | """Returns `data()` after running repository decoding filters. |
|
1033 | """Returns `data()` after running repository decoding filters. | |
1103 |
|
1034 | |||
1104 | This is often equivalent to how the data would be expressed on disk. |
|
1035 | This is often equivalent to how the data would be expressed on disk. | |
1105 | """ |
|
1036 | """ | |
1106 | return self._repo.wwritedata(self.path(), self.data()) |
|
1037 | return self._repo.wwritedata(self.path(), self.data()) | |
1107 |
|
1038 | |||
1108 | class filectx(basefilectx): |
|
1039 | class filectx(basefilectx): | |
1109 | """A filecontext object makes access to data related to a particular |
|
1040 | """A filecontext object makes access to data related to a particular | |
1110 | filerevision convenient.""" |
|
1041 | filerevision convenient.""" | |
1111 | def __init__(self, repo, path, changeid=None, fileid=None, |
|
1042 | def __init__(self, repo, path, changeid=None, fileid=None, | |
1112 | filelog=None, changectx=None): |
|
1043 | filelog=None, changectx=None): | |
1113 | """changeid can be a changeset revision, node, or tag. |
|
1044 | """changeid can be a changeset revision, node, or tag. | |
1114 | fileid can be a file revision or node.""" |
|
1045 | fileid can be a file revision or node.""" | |
1115 | self._repo = repo |
|
1046 | self._repo = repo | |
1116 | self._path = path |
|
1047 | self._path = path | |
1117 |
|
1048 | |||
1118 | assert (changeid is not None |
|
1049 | assert (changeid is not None | |
1119 | or fileid is not None |
|
1050 | or fileid is not None | |
1120 | or changectx is not None), \ |
|
1051 | or changectx is not None), \ | |
1121 | ("bad args: changeid=%r, fileid=%r, changectx=%r" |
|
1052 | ("bad args: changeid=%r, fileid=%r, changectx=%r" | |
1122 | % (changeid, fileid, changectx)) |
|
1053 | % (changeid, fileid, changectx)) | |
1123 |
|
1054 | |||
1124 | if filelog is not None: |
|
1055 | if filelog is not None: | |
1125 | self._filelog = filelog |
|
1056 | self._filelog = filelog | |
1126 |
|
1057 | |||
1127 | if changeid is not None: |
|
1058 | if changeid is not None: | |
1128 | self._changeid = changeid |
|
1059 | self._changeid = changeid | |
1129 | if changectx is not None: |
|
1060 | if changectx is not None: | |
1130 | self._changectx = changectx |
|
1061 | self._changectx = changectx | |
1131 | if fileid is not None: |
|
1062 | if fileid is not None: | |
1132 | self._fileid = fileid |
|
1063 | self._fileid = fileid | |
1133 |
|
1064 | |||
1134 | @propertycache |
|
1065 | @propertycache | |
1135 | def _changectx(self): |
|
1066 | def _changectx(self): | |
1136 | try: |
|
1067 | try: | |
1137 | return changectx(self._repo, self._changeid) |
|
1068 | return changectx(self._repo, self._changeid) | |
1138 | except error.FilteredRepoLookupError: |
|
1069 | except error.FilteredRepoLookupError: | |
1139 | # Linkrev may point to any revision in the repository. When the |
|
1070 | # Linkrev may point to any revision in the repository. When the | |
1140 | # repository is filtered this may lead to `filectx` trying to build |
|
1071 | # repository is filtered this may lead to `filectx` trying to build | |
1141 | # `changectx` for filtered revision. In such case we fallback to |
|
1072 | # `changectx` for filtered revision. In such case we fallback to | |
1142 | # creating `changectx` on the unfiltered version of the reposition. |
|
1073 | # creating `changectx` on the unfiltered version of the reposition. | |
1143 | # This fallback should not be an issue because `changectx` from |
|
1074 | # This fallback should not be an issue because `changectx` from | |
1144 | # `filectx` are not used in complex operations that care about |
|
1075 | # `filectx` are not used in complex operations that care about | |
1145 | # filtering. |
|
1076 | # filtering. | |
1146 | # |
|
1077 | # | |
1147 | # This fallback is a cheap and dirty fix that prevent several |
|
1078 | # This fallback is a cheap and dirty fix that prevent several | |
1148 | # crashes. It does not ensure the behavior is correct. However the |
|
1079 | # crashes. It does not ensure the behavior is correct. However the | |
1149 | # behavior was not correct before filtering either and "incorrect |
|
1080 | # behavior was not correct before filtering either and "incorrect | |
1150 | # behavior" is seen as better as "crash" |
|
1081 | # behavior" is seen as better as "crash" | |
1151 | # |
|
1082 | # | |
1152 | # Linkrevs have several serious troubles with filtering that are |
|
1083 | # Linkrevs have several serious troubles with filtering that are | |
1153 | # complicated to solve. Proper handling of the issue here should be |
|
1084 | # complicated to solve. Proper handling of the issue here should be | |
1154 | # considered when solving linkrev issue are on the table. |
|
1085 | # considered when solving linkrev issue are on the table. | |
1155 | return changectx(self._repo.unfiltered(), self._changeid) |
|
1086 | return changectx(self._repo.unfiltered(), self._changeid) | |
1156 |
|
1087 | |||
1157 | def filectx(self, fileid, changeid=None): |
|
1088 | def filectx(self, fileid, changeid=None): | |
1158 | '''opens an arbitrary revision of the file without |
|
1089 | '''opens an arbitrary revision of the file without | |
1159 | opening a new filelog''' |
|
1090 | opening a new filelog''' | |
1160 | return filectx(self._repo, self._path, fileid=fileid, |
|
1091 | return filectx(self._repo, self._path, fileid=fileid, | |
1161 | filelog=self._filelog, changeid=changeid) |
|
1092 | filelog=self._filelog, changeid=changeid) | |
1162 |
|
1093 | |||
1163 | def rawdata(self): |
|
1094 | def rawdata(self): | |
1164 | return self._filelog.revision(self._filenode, raw=True) |
|
1095 | return self._filelog.revision(self._filenode, raw=True) | |
1165 |
|
1096 | |||
1166 | def rawflags(self): |
|
1097 | def rawflags(self): | |
1167 | """low-level revlog flags""" |
|
1098 | """low-level revlog flags""" | |
1168 | return self._filelog.flags(self._filerev) |
|
1099 | return self._filelog.flags(self._filerev) | |
1169 |
|
1100 | |||
1170 | def data(self): |
|
1101 | def data(self): | |
1171 | try: |
|
1102 | try: | |
1172 | return self._filelog.read(self._filenode) |
|
1103 | return self._filelog.read(self._filenode) | |
1173 | except error.CensoredNodeError: |
|
1104 | except error.CensoredNodeError: | |
1174 | if self._repo.ui.config("censor", "policy") == "ignore": |
|
1105 | if self._repo.ui.config("censor", "policy") == "ignore": | |
1175 | return "" |
|
1106 | return "" | |
1176 | raise error.Abort(_("censored node: %s") % short(self._filenode), |
|
1107 | raise error.Abort(_("censored node: %s") % short(self._filenode), | |
1177 | hint=_("set censor.policy to ignore errors")) |
|
1108 | hint=_("set censor.policy to ignore errors")) | |
1178 |
|
1109 | |||
1179 | def size(self): |
|
1110 | def size(self): | |
1180 | return self._filelog.size(self._filerev) |
|
1111 | return self._filelog.size(self._filerev) | |
1181 |
|
1112 | |||
1182 | @propertycache |
|
1113 | @propertycache | |
1183 | def _copied(self): |
|
1114 | def _copied(self): | |
1184 | """check if file was actually renamed in this changeset revision |
|
1115 | """check if file was actually renamed in this changeset revision | |
1185 |
|
1116 | |||
1186 | If rename logged in file revision, we report copy for changeset only |
|
1117 | If rename logged in file revision, we report copy for changeset only | |
1187 | if file revisions linkrev points back to the changeset in question |
|
1118 | if file revisions linkrev points back to the changeset in question | |
1188 | or both changeset parents contain different file revisions. |
|
1119 | or both changeset parents contain different file revisions. | |
1189 | """ |
|
1120 | """ | |
1190 |
|
1121 | |||
1191 | renamed = self._filelog.renamed(self._filenode) |
|
1122 | renamed = self._filelog.renamed(self._filenode) | |
1192 | if not renamed: |
|
1123 | if not renamed: | |
1193 | return renamed |
|
1124 | return renamed | |
1194 |
|
1125 | |||
1195 | if self.rev() == self.linkrev(): |
|
1126 | if self.rev() == self.linkrev(): | |
1196 | return renamed |
|
1127 | return renamed | |
1197 |
|
1128 | |||
1198 | name = self.path() |
|
1129 | name = self.path() | |
1199 | fnode = self._filenode |
|
1130 | fnode = self._filenode | |
1200 | for p in self._changectx.parents(): |
|
1131 | for p in self._changectx.parents(): | |
1201 | try: |
|
1132 | try: | |
1202 | if fnode == p.filenode(name): |
|
1133 | if fnode == p.filenode(name): | |
1203 | return None |
|
1134 | return None | |
1204 | except error.LookupError: |
|
1135 | except error.LookupError: | |
1205 | pass |
|
1136 | pass | |
1206 | return renamed |
|
1137 | return renamed | |
1207 |
|
1138 | |||
1208 | def children(self): |
|
1139 | def children(self): | |
1209 | # hard for renames |
|
1140 | # hard for renames | |
1210 | c = self._filelog.children(self._filenode) |
|
1141 | c = self._filelog.children(self._filenode) | |
1211 | return [filectx(self._repo, self._path, fileid=x, |
|
1142 | return [filectx(self._repo, self._path, fileid=x, | |
1212 | filelog=self._filelog) for x in c] |
|
1143 | filelog=self._filelog) for x in c] | |
1213 |
|
1144 | |||
1214 | class committablectx(basectx): |
|
1145 | class committablectx(basectx): | |
1215 | """A committablectx object provides common functionality for a context that |
|
1146 | """A committablectx object provides common functionality for a context that | |
1216 | wants the ability to commit, e.g. workingctx or memctx.""" |
|
1147 | wants the ability to commit, e.g. workingctx or memctx.""" | |
1217 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1148 | def __init__(self, repo, text="", user=None, date=None, extra=None, | |
1218 | changes=None): |
|
1149 | changes=None): | |
1219 | self._repo = repo |
|
1150 | self._repo = repo | |
1220 | self._rev = None |
|
1151 | self._rev = None | |
1221 | self._node = None |
|
1152 | self._node = None | |
1222 | self._text = text |
|
1153 | self._text = text | |
1223 | if date: |
|
1154 | if date: | |
1224 | self._date = dateutil.parsedate(date) |
|
1155 | self._date = dateutil.parsedate(date) | |
1225 | if user: |
|
1156 | if user: | |
1226 | self._user = user |
|
1157 | self._user = user | |
1227 | if changes: |
|
1158 | if changes: | |
1228 | self._status = changes |
|
1159 | self._status = changes | |
1229 |
|
1160 | |||
1230 | self._extra = {} |
|
1161 | self._extra = {} | |
1231 | if extra: |
|
1162 | if extra: | |
1232 | self._extra = extra.copy() |
|
1163 | self._extra = extra.copy() | |
1233 | if 'branch' not in self._extra: |
|
1164 | if 'branch' not in self._extra: | |
1234 | try: |
|
1165 | try: | |
1235 | branch = encoding.fromlocal(self._repo.dirstate.branch()) |
|
1166 | branch = encoding.fromlocal(self._repo.dirstate.branch()) | |
1236 | except UnicodeDecodeError: |
|
1167 | except UnicodeDecodeError: | |
1237 | raise error.Abort(_('branch name not in UTF-8!')) |
|
1168 | raise error.Abort(_('branch name not in UTF-8!')) | |
1238 | self._extra['branch'] = branch |
|
1169 | self._extra['branch'] = branch | |
1239 | if self._extra['branch'] == '': |
|
1170 | if self._extra['branch'] == '': | |
1240 | self._extra['branch'] = 'default' |
|
1171 | self._extra['branch'] = 'default' | |
1241 |
|
1172 | |||
1242 | def __bytes__(self): |
|
1173 | def __bytes__(self): | |
1243 | return bytes(self._parents[0]) + "+" |
|
1174 | return bytes(self._parents[0]) + "+" | |
1244 |
|
1175 | |||
1245 | __str__ = encoding.strmethod(__bytes__) |
|
1176 | __str__ = encoding.strmethod(__bytes__) | |
1246 |
|
1177 | |||
1247 | def __nonzero__(self): |
|
1178 | def __nonzero__(self): | |
1248 | return True |
|
1179 | return True | |
1249 |
|
1180 | |||
1250 | __bool__ = __nonzero__ |
|
1181 | __bool__ = __nonzero__ | |
1251 |
|
1182 | |||
1252 | def _buildflagfunc(self): |
|
1183 | def _buildflagfunc(self): | |
1253 | # Create a fallback function for getting file flags when the |
|
1184 | # Create a fallback function for getting file flags when the | |
1254 | # filesystem doesn't support them |
|
1185 | # filesystem doesn't support them | |
1255 |
|
1186 | |||
1256 | copiesget = self._repo.dirstate.copies().get |
|
1187 | copiesget = self._repo.dirstate.copies().get | |
1257 | parents = self.parents() |
|
1188 | parents = self.parents() | |
1258 | if len(parents) < 2: |
|
1189 | if len(parents) < 2: | |
1259 | # when we have one parent, it's easy: copy from parent |
|
1190 | # when we have one parent, it's easy: copy from parent | |
1260 | man = parents[0].manifest() |
|
1191 | man = parents[0].manifest() | |
1261 | def func(f): |
|
1192 | def func(f): | |
1262 | f = copiesget(f, f) |
|
1193 | f = copiesget(f, f) | |
1263 | return man.flags(f) |
|
1194 | return man.flags(f) | |
1264 | else: |
|
1195 | else: | |
1265 | # merges are tricky: we try to reconstruct the unstored |
|
1196 | # merges are tricky: we try to reconstruct the unstored | |
1266 | # result from the merge (issue1802) |
|
1197 | # result from the merge (issue1802) | |
1267 | p1, p2 = parents |
|
1198 | p1, p2 = parents | |
1268 | pa = p1.ancestor(p2) |
|
1199 | pa = p1.ancestor(p2) | |
1269 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() |
|
1200 | m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest() | |
1270 |
|
1201 | |||
1271 | def func(f): |
|
1202 | def func(f): | |
1272 | f = copiesget(f, f) # may be wrong for merges with copies |
|
1203 | f = copiesget(f, f) # may be wrong for merges with copies | |
1273 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) |
|
1204 | fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f) | |
1274 | if fl1 == fl2: |
|
1205 | if fl1 == fl2: | |
1275 | return fl1 |
|
1206 | return fl1 | |
1276 | if fl1 == fla: |
|
1207 | if fl1 == fla: | |
1277 | return fl2 |
|
1208 | return fl2 | |
1278 | if fl2 == fla: |
|
1209 | if fl2 == fla: | |
1279 | return fl1 |
|
1210 | return fl1 | |
1280 | return '' # punt for conflicts |
|
1211 | return '' # punt for conflicts | |
1281 |
|
1212 | |||
1282 | return func |
|
1213 | return func | |
1283 |
|
1214 | |||
1284 | @propertycache |
|
1215 | @propertycache | |
1285 | def _flagfunc(self): |
|
1216 | def _flagfunc(self): | |
1286 | return self._repo.dirstate.flagfunc(self._buildflagfunc) |
|
1217 | return self._repo.dirstate.flagfunc(self._buildflagfunc) | |
1287 |
|
1218 | |||
1288 | @propertycache |
|
1219 | @propertycache | |
1289 | def _status(self): |
|
1220 | def _status(self): | |
1290 | return self._repo.status() |
|
1221 | return self._repo.status() | |
1291 |
|
1222 | |||
1292 | @propertycache |
|
1223 | @propertycache | |
1293 | def _user(self): |
|
1224 | def _user(self): | |
1294 | return self._repo.ui.username() |
|
1225 | return self._repo.ui.username() | |
1295 |
|
1226 | |||
1296 | @propertycache |
|
1227 | @propertycache | |
1297 | def _date(self): |
|
1228 | def _date(self): | |
1298 | ui = self._repo.ui |
|
1229 | ui = self._repo.ui | |
1299 | date = ui.configdate('devel', 'default-date') |
|
1230 | date = ui.configdate('devel', 'default-date') | |
1300 | if date is None: |
|
1231 | if date is None: | |
1301 | date = dateutil.makedate() |
|
1232 | date = dateutil.makedate() | |
1302 | return date |
|
1233 | return date | |
1303 |
|
1234 | |||
1304 | def subrev(self, subpath): |
|
1235 | def subrev(self, subpath): | |
1305 | return None |
|
1236 | return None | |
1306 |
|
1237 | |||
1307 | def manifestnode(self): |
|
1238 | def manifestnode(self): | |
1308 | return None |
|
1239 | return None | |
1309 | def user(self): |
|
1240 | def user(self): | |
1310 | return self._user or self._repo.ui.username() |
|
1241 | return self._user or self._repo.ui.username() | |
1311 | def date(self): |
|
1242 | def date(self): | |
1312 | return self._date |
|
1243 | return self._date | |
1313 | def description(self): |
|
1244 | def description(self): | |
1314 | return self._text |
|
1245 | return self._text | |
1315 | def files(self): |
|
1246 | def files(self): | |
1316 | return sorted(self._status.modified + self._status.added + |
|
1247 | return sorted(self._status.modified + self._status.added + | |
1317 | self._status.removed) |
|
1248 | self._status.removed) | |
1318 |
|
1249 | |||
1319 | def modified(self): |
|
1250 | def modified(self): | |
1320 | return self._status.modified |
|
1251 | return self._status.modified | |
1321 | def added(self): |
|
1252 | def added(self): | |
1322 | return self._status.added |
|
1253 | return self._status.added | |
1323 | def removed(self): |
|
1254 | def removed(self): | |
1324 | return self._status.removed |
|
1255 | return self._status.removed | |
1325 | def deleted(self): |
|
1256 | def deleted(self): | |
1326 | return self._status.deleted |
|
1257 | return self._status.deleted | |
1327 | def branch(self): |
|
1258 | def branch(self): | |
1328 | return encoding.tolocal(self._extra['branch']) |
|
1259 | return encoding.tolocal(self._extra['branch']) | |
1329 | def closesbranch(self): |
|
1260 | def closesbranch(self): | |
1330 | return 'close' in self._extra |
|
1261 | return 'close' in self._extra | |
1331 | def extra(self): |
|
1262 | def extra(self): | |
1332 | return self._extra |
|
1263 | return self._extra | |
1333 |
|
1264 | |||
1334 | def isinmemory(self): |
|
1265 | def isinmemory(self): | |
1335 | return False |
|
1266 | return False | |
1336 |
|
1267 | |||
1337 | def tags(self): |
|
1268 | def tags(self): | |
1338 | return [] |
|
1269 | return [] | |
1339 |
|
1270 | |||
1340 | def bookmarks(self): |
|
1271 | def bookmarks(self): | |
1341 | b = [] |
|
1272 | b = [] | |
1342 | for p in self.parents(): |
|
1273 | for p in self.parents(): | |
1343 | b.extend(p.bookmarks()) |
|
1274 | b.extend(p.bookmarks()) | |
1344 | return b |
|
1275 | return b | |
1345 |
|
1276 | |||
1346 | def phase(self): |
|
1277 | def phase(self): | |
1347 | phase = phases.draft # default phase to draft |
|
1278 | phase = phases.draft # default phase to draft | |
1348 | for p in self.parents(): |
|
1279 | for p in self.parents(): | |
1349 | phase = max(phase, p.phase()) |
|
1280 | phase = max(phase, p.phase()) | |
1350 | return phase |
|
1281 | return phase | |
1351 |
|
1282 | |||
1352 | def hidden(self): |
|
1283 | def hidden(self): | |
1353 | return False |
|
1284 | return False | |
1354 |
|
1285 | |||
1355 | def children(self): |
|
1286 | def children(self): | |
1356 | return [] |
|
1287 | return [] | |
1357 |
|
1288 | |||
1358 | def flags(self, path): |
|
1289 | def flags(self, path): | |
1359 | if r'_manifest' in self.__dict__: |
|
1290 | if r'_manifest' in self.__dict__: | |
1360 | try: |
|
1291 | try: | |
1361 | return self._manifest.flags(path) |
|
1292 | return self._manifest.flags(path) | |
1362 | except KeyError: |
|
1293 | except KeyError: | |
1363 | return '' |
|
1294 | return '' | |
1364 |
|
1295 | |||
1365 | try: |
|
1296 | try: | |
1366 | return self._flagfunc(path) |
|
1297 | return self._flagfunc(path) | |
1367 | except OSError: |
|
1298 | except OSError: | |
1368 | return '' |
|
1299 | return '' | |
1369 |
|
1300 | |||
1370 | def ancestor(self, c2): |
|
1301 | def ancestor(self, c2): | |
1371 | """return the "best" ancestor context of self and c2""" |
|
1302 | """return the "best" ancestor context of self and c2""" | |
1372 | return self._parents[0].ancestor(c2) # punt on two parents for now |
|
1303 | return self._parents[0].ancestor(c2) # punt on two parents for now | |
1373 |
|
1304 | |||
1374 | def walk(self, match): |
|
1305 | def walk(self, match): | |
1375 | '''Generates matching file names.''' |
|
1306 | '''Generates matching file names.''' | |
1376 | return sorted(self._repo.dirstate.walk(match, |
|
1307 | return sorted(self._repo.dirstate.walk(match, | |
1377 | subrepos=sorted(self.substate), |
|
1308 | subrepos=sorted(self.substate), | |
1378 | unknown=True, ignored=False)) |
|
1309 | unknown=True, ignored=False)) | |
1379 |
|
1310 | |||
1380 | def matches(self, match): |
|
1311 | def matches(self, match): | |
1381 | return sorted(self._repo.dirstate.matches(match)) |
|
1312 | return sorted(self._repo.dirstate.matches(match)) | |
1382 |
|
1313 | |||
1383 | def ancestors(self): |
|
1314 | def ancestors(self): | |
1384 | for p in self._parents: |
|
1315 | for p in self._parents: | |
1385 | yield p |
|
1316 | yield p | |
1386 | for a in self._repo.changelog.ancestors( |
|
1317 | for a in self._repo.changelog.ancestors( | |
1387 | [p.rev() for p in self._parents]): |
|
1318 | [p.rev() for p in self._parents]): | |
1388 | yield changectx(self._repo, a) |
|
1319 | yield changectx(self._repo, a) | |
1389 |
|
1320 | |||
1390 | def markcommitted(self, node): |
|
1321 | def markcommitted(self, node): | |
1391 | """Perform post-commit cleanup necessary after committing this ctx |
|
1322 | """Perform post-commit cleanup necessary after committing this ctx | |
1392 |
|
1323 | |||
1393 | Specifically, this updates backing stores this working context |
|
1324 | Specifically, this updates backing stores this working context | |
1394 | wraps to reflect the fact that the changes reflected by this |
|
1325 | wraps to reflect the fact that the changes reflected by this | |
1395 | workingctx have been committed. For example, it marks |
|
1326 | workingctx have been committed. For example, it marks | |
1396 | modified and added files as normal in the dirstate. |
|
1327 | modified and added files as normal in the dirstate. | |
1397 |
|
1328 | |||
1398 | """ |
|
1329 | """ | |
1399 |
|
1330 | |||
1400 | with self._repo.dirstate.parentchange(): |
|
1331 | with self._repo.dirstate.parentchange(): | |
1401 | for f in self.modified() + self.added(): |
|
1332 | for f in self.modified() + self.added(): | |
1402 | self._repo.dirstate.normal(f) |
|
1333 | self._repo.dirstate.normal(f) | |
1403 | for f in self.removed(): |
|
1334 | for f in self.removed(): | |
1404 | self._repo.dirstate.drop(f) |
|
1335 | self._repo.dirstate.drop(f) | |
1405 | self._repo.dirstate.setparents(node) |
|
1336 | self._repo.dirstate.setparents(node) | |
1406 |
|
1337 | |||
1407 | # write changes out explicitly, because nesting wlock at |
|
1338 | # write changes out explicitly, because nesting wlock at | |
1408 | # runtime may prevent 'wlock.release()' in 'repo.commit()' |
|
1339 | # runtime may prevent 'wlock.release()' in 'repo.commit()' | |
1409 | # from immediately doing so for subsequent changing files |
|
1340 | # from immediately doing so for subsequent changing files | |
1410 | self._repo.dirstate.write(self._repo.currenttransaction()) |
|
1341 | self._repo.dirstate.write(self._repo.currenttransaction()) | |
1411 |
|
1342 | |||
1412 | def dirty(self, missing=False, merge=True, branch=True): |
|
1343 | def dirty(self, missing=False, merge=True, branch=True): | |
1413 | return False |
|
1344 | return False | |
1414 |
|
1345 | |||
1415 | class workingctx(committablectx): |
|
1346 | class workingctx(committablectx): | |
1416 | """A workingctx object makes access to data related to |
|
1347 | """A workingctx object makes access to data related to | |
1417 | the current working directory convenient. |
|
1348 | the current working directory convenient. | |
1418 | date - any valid date string or (unixtime, offset), or None. |
|
1349 | date - any valid date string or (unixtime, offset), or None. | |
1419 | user - username string, or None. |
|
1350 | user - username string, or None. | |
1420 | extra - a dictionary of extra values, or None. |
|
1351 | extra - a dictionary of extra values, or None. | |
1421 | changes - a list of file lists as returned by localrepo.status() |
|
1352 | changes - a list of file lists as returned by localrepo.status() | |
1422 | or None to use the repository status. |
|
1353 | or None to use the repository status. | |
1423 | """ |
|
1354 | """ | |
1424 | def __init__(self, repo, text="", user=None, date=None, extra=None, |
|
1355 | def __init__(self, repo, text="", user=None, date=None, extra=None, | |
1425 | changes=None): |
|
1356 | changes=None): | |
1426 | super(workingctx, self).__init__(repo, text, user, date, extra, changes) |
|
1357 | super(workingctx, self).__init__(repo, text, user, date, extra, changes) | |
1427 |
|
1358 | |||
1428 | def __iter__(self): |
|
1359 | def __iter__(self): | |
1429 | d = self._repo.dirstate |
|
1360 | d = self._repo.dirstate | |
1430 | for f in d: |
|
1361 | for f in d: | |
1431 | if d[f] != 'r': |
|
1362 | if d[f] != 'r': | |
1432 | yield f |
|
1363 | yield f | |
1433 |
|
1364 | |||
1434 | def __contains__(self, key): |
|
1365 | def __contains__(self, key): | |
1435 | return self._repo.dirstate[key] not in "?r" |
|
1366 | return self._repo.dirstate[key] not in "?r" | |
1436 |
|
1367 | |||
1437 | def hex(self): |
|
1368 | def hex(self): | |
1438 | return hex(wdirid) |
|
1369 | return hex(wdirid) | |
1439 |
|
1370 | |||
1440 | @propertycache |
|
1371 | @propertycache | |
1441 | def _parents(self): |
|
1372 | def _parents(self): | |
1442 | p = self._repo.dirstate.parents() |
|
1373 | p = self._repo.dirstate.parents() | |
1443 | if p[1] == nullid: |
|
1374 | if p[1] == nullid: | |
1444 | p = p[:-1] |
|
1375 | p = p[:-1] | |
1445 | return [changectx(self._repo, x) for x in p] |
|
1376 | return [changectx(self._repo, x) for x in p] | |
1446 |
|
1377 | |||
1447 | def filectx(self, path, filelog=None): |
|
1378 | def filectx(self, path, filelog=None): | |
1448 | """get a file context from the working directory""" |
|
1379 | """get a file context from the working directory""" | |
1449 | return workingfilectx(self._repo, path, workingctx=self, |
|
1380 | return workingfilectx(self._repo, path, workingctx=self, | |
1450 | filelog=filelog) |
|
1381 | filelog=filelog) | |
1451 |
|
1382 | |||
1452 | def dirty(self, missing=False, merge=True, branch=True): |
|
1383 | def dirty(self, missing=False, merge=True, branch=True): | |
1453 | "check whether a working directory is modified" |
|
1384 | "check whether a working directory is modified" | |
1454 | # check subrepos first |
|
1385 | # check subrepos first | |
1455 | for s in sorted(self.substate): |
|
1386 | for s in sorted(self.substate): | |
1456 | if self.sub(s).dirty(missing=missing): |
|
1387 | if self.sub(s).dirty(missing=missing): | |
1457 | return True |
|
1388 | return True | |
1458 | # check current working dir |
|
1389 | # check current working dir | |
1459 | return ((merge and self.p2()) or |
|
1390 | return ((merge and self.p2()) or | |
1460 | (branch and self.branch() != self.p1().branch()) or |
|
1391 | (branch and self.branch() != self.p1().branch()) or | |
1461 | self.modified() or self.added() or self.removed() or |
|
1392 | self.modified() or self.added() or self.removed() or | |
1462 | (missing and self.deleted())) |
|
1393 | (missing and self.deleted())) | |
1463 |
|
1394 | |||
1464 | def add(self, list, prefix=""): |
|
1395 | def add(self, list, prefix=""): | |
1465 | with self._repo.wlock(): |
|
1396 | with self._repo.wlock(): | |
1466 | ui, ds = self._repo.ui, self._repo.dirstate |
|
1397 | ui, ds = self._repo.ui, self._repo.dirstate | |
1467 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
|
1398 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) | |
1468 | rejected = [] |
|
1399 | rejected = [] | |
1469 | lstat = self._repo.wvfs.lstat |
|
1400 | lstat = self._repo.wvfs.lstat | |
1470 | for f in list: |
|
1401 | for f in list: | |
1471 | # ds.pathto() returns an absolute file when this is invoked from |
|
1402 | # ds.pathto() returns an absolute file when this is invoked from | |
1472 | # the keyword extension. That gets flagged as non-portable on |
|
1403 | # the keyword extension. That gets flagged as non-portable on | |
1473 | # Windows, since it contains the drive letter and colon. |
|
1404 | # Windows, since it contains the drive letter and colon. | |
1474 | scmutil.checkportable(ui, os.path.join(prefix, f)) |
|
1405 | scmutil.checkportable(ui, os.path.join(prefix, f)) | |
1475 | try: |
|
1406 | try: | |
1476 | st = lstat(f) |
|
1407 | st = lstat(f) | |
1477 | except OSError: |
|
1408 | except OSError: | |
1478 | ui.warn(_("%s does not exist!\n") % uipath(f)) |
|
1409 | ui.warn(_("%s does not exist!\n") % uipath(f)) | |
1479 | rejected.append(f) |
|
1410 | rejected.append(f) | |
1480 | continue |
|
1411 | continue | |
1481 | if st.st_size > 10000000: |
|
1412 | if st.st_size > 10000000: | |
1482 | ui.warn(_("%s: up to %d MB of RAM may be required " |
|
1413 | ui.warn(_("%s: up to %d MB of RAM may be required " | |
1483 | "to manage this file\n" |
|
1414 | "to manage this file\n" | |
1484 | "(use 'hg revert %s' to cancel the " |
|
1415 | "(use 'hg revert %s' to cancel the " | |
1485 | "pending addition)\n") |
|
1416 | "pending addition)\n") | |
1486 | % (f, 3 * st.st_size // 1000000, uipath(f))) |
|
1417 | % (f, 3 * st.st_size // 1000000, uipath(f))) | |
1487 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1418 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | |
1488 | ui.warn(_("%s not added: only files and symlinks " |
|
1419 | ui.warn(_("%s not added: only files and symlinks " | |
1489 | "supported currently\n") % uipath(f)) |
|
1420 | "supported currently\n") % uipath(f)) | |
1490 | rejected.append(f) |
|
1421 | rejected.append(f) | |
1491 | elif ds[f] in 'amn': |
|
1422 | elif ds[f] in 'amn': | |
1492 | ui.warn(_("%s already tracked!\n") % uipath(f)) |
|
1423 | ui.warn(_("%s already tracked!\n") % uipath(f)) | |
1493 | elif ds[f] == 'r': |
|
1424 | elif ds[f] == 'r': | |
1494 | ds.normallookup(f) |
|
1425 | ds.normallookup(f) | |
1495 | else: |
|
1426 | else: | |
1496 | ds.add(f) |
|
1427 | ds.add(f) | |
1497 | return rejected |
|
1428 | return rejected | |
1498 |
|
1429 | |||
1499 | def forget(self, files, prefix=""): |
|
1430 | def forget(self, files, prefix=""): | |
1500 | with self._repo.wlock(): |
|
1431 | with self._repo.wlock(): | |
1501 | ds = self._repo.dirstate |
|
1432 | ds = self._repo.dirstate | |
1502 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) |
|
1433 | uipath = lambda f: ds.pathto(pathutil.join(prefix, f)) | |
1503 | rejected = [] |
|
1434 | rejected = [] | |
1504 | for f in files: |
|
1435 | for f in files: | |
1505 | if f not in self._repo.dirstate: |
|
1436 | if f not in self._repo.dirstate: | |
1506 | self._repo.ui.warn(_("%s not tracked!\n") % uipath(f)) |
|
1437 | self._repo.ui.warn(_("%s not tracked!\n") % uipath(f)) | |
1507 | rejected.append(f) |
|
1438 | rejected.append(f) | |
1508 | elif self._repo.dirstate[f] != 'a': |
|
1439 | elif self._repo.dirstate[f] != 'a': | |
1509 | self._repo.dirstate.remove(f) |
|
1440 | self._repo.dirstate.remove(f) | |
1510 | else: |
|
1441 | else: | |
1511 | self._repo.dirstate.drop(f) |
|
1442 | self._repo.dirstate.drop(f) | |
1512 | return rejected |
|
1443 | return rejected | |
1513 |
|
1444 | |||
1514 | def undelete(self, list): |
|
1445 | def undelete(self, list): | |
1515 | pctxs = self.parents() |
|
1446 | pctxs = self.parents() | |
1516 | with self._repo.wlock(): |
|
1447 | with self._repo.wlock(): | |
1517 | ds = self._repo.dirstate |
|
1448 | ds = self._repo.dirstate | |
1518 | for f in list: |
|
1449 | for f in list: | |
1519 | if self._repo.dirstate[f] != 'r': |
|
1450 | if self._repo.dirstate[f] != 'r': | |
1520 | self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f)) |
|
1451 | self._repo.ui.warn(_("%s not removed!\n") % ds.pathto(f)) | |
1521 | else: |
|
1452 | else: | |
1522 | fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f] |
|
1453 | fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f] | |
1523 | t = fctx.data() |
|
1454 | t = fctx.data() | |
1524 | self._repo.wwrite(f, t, fctx.flags()) |
|
1455 | self._repo.wwrite(f, t, fctx.flags()) | |
1525 | self._repo.dirstate.normal(f) |
|
1456 | self._repo.dirstate.normal(f) | |
1526 |
|
1457 | |||
1527 | def copy(self, source, dest): |
|
1458 | def copy(self, source, dest): | |
1528 | try: |
|
1459 | try: | |
1529 | st = self._repo.wvfs.lstat(dest) |
|
1460 | st = self._repo.wvfs.lstat(dest) | |
1530 | except OSError as err: |
|
1461 | except OSError as err: | |
1531 | if err.errno != errno.ENOENT: |
|
1462 | if err.errno != errno.ENOENT: | |
1532 | raise |
|
1463 | raise | |
1533 | self._repo.ui.warn(_("%s does not exist!\n") |
|
1464 | self._repo.ui.warn(_("%s does not exist!\n") | |
1534 | % self._repo.dirstate.pathto(dest)) |
|
1465 | % self._repo.dirstate.pathto(dest)) | |
1535 | return |
|
1466 | return | |
1536 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): |
|
1467 | if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)): | |
1537 | self._repo.ui.warn(_("copy failed: %s is not a file or a " |
|
1468 | self._repo.ui.warn(_("copy failed: %s is not a file or a " | |
1538 | "symbolic link\n") |
|
1469 | "symbolic link\n") | |
1539 | % self._repo.dirstate.pathto(dest)) |
|
1470 | % self._repo.dirstate.pathto(dest)) | |
1540 | else: |
|
1471 | else: | |
1541 | with self._repo.wlock(): |
|
1472 | with self._repo.wlock(): | |
1542 | if self._repo.dirstate[dest] in '?': |
|
1473 | if self._repo.dirstate[dest] in '?': | |
1543 | self._repo.dirstate.add(dest) |
|
1474 | self._repo.dirstate.add(dest) | |
1544 | elif self._repo.dirstate[dest] in 'r': |
|
1475 | elif self._repo.dirstate[dest] in 'r': | |
1545 | self._repo.dirstate.normallookup(dest) |
|
1476 | self._repo.dirstate.normallookup(dest) | |
1546 | self._repo.dirstate.copy(source, dest) |
|
1477 | self._repo.dirstate.copy(source, dest) | |
1547 |
|
1478 | |||
1548 | def match(self, pats=None, include=None, exclude=None, default='glob', |
|
1479 | def match(self, pats=None, include=None, exclude=None, default='glob', | |
1549 | listsubrepos=False, badfn=None): |
|
1480 | listsubrepos=False, badfn=None): | |
1550 | r = self._repo |
|
1481 | r = self._repo | |
1551 |
|
1482 | |||
1552 | # Only a case insensitive filesystem needs magic to translate user input |
|
1483 | # Only a case insensitive filesystem needs magic to translate user input | |
1553 | # to actual case in the filesystem. |
|
1484 | # to actual case in the filesystem. | |
1554 | icasefs = not util.fscasesensitive(r.root) |
|
1485 | icasefs = not util.fscasesensitive(r.root) | |
1555 | return matchmod.match(r.root, r.getcwd(), pats, include, exclude, |
|
1486 | return matchmod.match(r.root, r.getcwd(), pats, include, exclude, | |
1556 | default, auditor=r.auditor, ctx=self, |
|
1487 | default, auditor=r.auditor, ctx=self, | |
1557 | listsubrepos=listsubrepos, badfn=badfn, |
|
1488 | listsubrepos=listsubrepos, badfn=badfn, | |
1558 | icasefs=icasefs) |
|
1489 | icasefs=icasefs) | |
1559 |
|
1490 | |||
1560 | def _filtersuspectsymlink(self, files): |
|
1491 | def _filtersuspectsymlink(self, files): | |
1561 | if not files or self._repo.dirstate._checklink: |
|
1492 | if not files or self._repo.dirstate._checklink: | |
1562 | return files |
|
1493 | return files | |
1563 |
|
1494 | |||
1564 | # Symlink placeholders may get non-symlink-like contents |
|
1495 | # Symlink placeholders may get non-symlink-like contents | |
1565 | # via user error or dereferencing by NFS or Samba servers, |
|
1496 | # via user error or dereferencing by NFS or Samba servers, | |
1566 | # so we filter out any placeholders that don't look like a |
|
1497 | # so we filter out any placeholders that don't look like a | |
1567 | # symlink |
|
1498 | # symlink | |
1568 | sane = [] |
|
1499 | sane = [] | |
1569 | for f in files: |
|
1500 | for f in files: | |
1570 | if self.flags(f) == 'l': |
|
1501 | if self.flags(f) == 'l': | |
1571 | d = self[f].data() |
|
1502 | d = self[f].data() | |
1572 | if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d): |
|
1503 | if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d): | |
1573 | self._repo.ui.debug('ignoring suspect symlink placeholder' |
|
1504 | self._repo.ui.debug('ignoring suspect symlink placeholder' | |
1574 | ' "%s"\n' % f) |
|
1505 | ' "%s"\n' % f) | |
1575 | continue |
|
1506 | continue | |
1576 | sane.append(f) |
|
1507 | sane.append(f) | |
1577 | return sane |
|
1508 | return sane | |
1578 |
|
1509 | |||
1579 | def _checklookup(self, files): |
|
1510 | def _checklookup(self, files): | |
1580 | # check for any possibly clean files |
|
1511 | # check for any possibly clean files | |
1581 | if not files: |
|
1512 | if not files: | |
1582 | return [], [], [] |
|
1513 | return [], [], [] | |
1583 |
|
1514 | |||
1584 | modified = [] |
|
1515 | modified = [] | |
1585 | deleted = [] |
|
1516 | deleted = [] | |
1586 | fixup = [] |
|
1517 | fixup = [] | |
1587 | pctx = self._parents[0] |
|
1518 | pctx = self._parents[0] | |
1588 | # do a full compare of any files that might have changed |
|
1519 | # do a full compare of any files that might have changed | |
1589 | for f in sorted(files): |
|
1520 | for f in sorted(files): | |
1590 | try: |
|
1521 | try: | |
1591 | # This will return True for a file that got replaced by a |
|
1522 | # This will return True for a file that got replaced by a | |
1592 | # directory in the interim, but fixing that is pretty hard. |
|
1523 | # directory in the interim, but fixing that is pretty hard. | |
1593 | if (f not in pctx or self.flags(f) != pctx.flags(f) |
|
1524 | if (f not in pctx or self.flags(f) != pctx.flags(f) | |
1594 | or pctx[f].cmp(self[f])): |
|
1525 | or pctx[f].cmp(self[f])): | |
1595 | modified.append(f) |
|
1526 | modified.append(f) | |
1596 | else: |
|
1527 | else: | |
1597 | fixup.append(f) |
|
1528 | fixup.append(f) | |
1598 | except (IOError, OSError): |
|
1529 | except (IOError, OSError): | |
1599 | # A file become inaccessible in between? Mark it as deleted, |
|
1530 | # A file become inaccessible in between? Mark it as deleted, | |
1600 | # matching dirstate behavior (issue5584). |
|
1531 | # matching dirstate behavior (issue5584). | |
1601 | # The dirstate has more complex behavior around whether a |
|
1532 | # The dirstate has more complex behavior around whether a | |
1602 | # missing file matches a directory, etc, but we don't need to |
|
1533 | # missing file matches a directory, etc, but we don't need to | |
1603 | # bother with that: if f has made it to this point, we're sure |
|
1534 | # bother with that: if f has made it to this point, we're sure | |
1604 | # it's in the dirstate. |
|
1535 | # it's in the dirstate. | |
1605 | deleted.append(f) |
|
1536 | deleted.append(f) | |
1606 |
|
1537 | |||
1607 | return modified, deleted, fixup |
|
1538 | return modified, deleted, fixup | |
1608 |
|
1539 | |||
1609 | def _poststatusfixup(self, status, fixup): |
|
1540 | def _poststatusfixup(self, status, fixup): | |
1610 | """update dirstate for files that are actually clean""" |
|
1541 | """update dirstate for files that are actually clean""" | |
1611 | poststatus = self._repo.postdsstatus() |
|
1542 | poststatus = self._repo.postdsstatus() | |
1612 | if fixup or poststatus: |
|
1543 | if fixup or poststatus: | |
1613 | try: |
|
1544 | try: | |
1614 | oldid = self._repo.dirstate.identity() |
|
1545 | oldid = self._repo.dirstate.identity() | |
1615 |
|
1546 | |||
1616 | # updating the dirstate is optional |
|
1547 | # updating the dirstate is optional | |
1617 | # so we don't wait on the lock |
|
1548 | # so we don't wait on the lock | |
1618 | # wlock can invalidate the dirstate, so cache normal _after_ |
|
1549 | # wlock can invalidate the dirstate, so cache normal _after_ | |
1619 | # taking the lock |
|
1550 | # taking the lock | |
1620 | with self._repo.wlock(False): |
|
1551 | with self._repo.wlock(False): | |
1621 | if self._repo.dirstate.identity() == oldid: |
|
1552 | if self._repo.dirstate.identity() == oldid: | |
1622 | if fixup: |
|
1553 | if fixup: | |
1623 | normal = self._repo.dirstate.normal |
|
1554 | normal = self._repo.dirstate.normal | |
1624 | for f in fixup: |
|
1555 | for f in fixup: | |
1625 | normal(f) |
|
1556 | normal(f) | |
1626 | # write changes out explicitly, because nesting |
|
1557 | # write changes out explicitly, because nesting | |
1627 | # wlock at runtime may prevent 'wlock.release()' |
|
1558 | # wlock at runtime may prevent 'wlock.release()' | |
1628 | # after this block from doing so for subsequent |
|
1559 | # after this block from doing so for subsequent | |
1629 | # changing files |
|
1560 | # changing files | |
1630 | tr = self._repo.currenttransaction() |
|
1561 | tr = self._repo.currenttransaction() | |
1631 | self._repo.dirstate.write(tr) |
|
1562 | self._repo.dirstate.write(tr) | |
1632 |
|
1563 | |||
1633 | if poststatus: |
|
1564 | if poststatus: | |
1634 | for ps in poststatus: |
|
1565 | for ps in poststatus: | |
1635 | ps(self, status) |
|
1566 | ps(self, status) | |
1636 | else: |
|
1567 | else: | |
1637 | # in this case, writing changes out breaks |
|
1568 | # in this case, writing changes out breaks | |
1638 | # consistency, because .hg/dirstate was |
|
1569 | # consistency, because .hg/dirstate was | |
1639 | # already changed simultaneously after last |
|
1570 | # already changed simultaneously after last | |
1640 | # caching (see also issue5584 for detail) |
|
1571 | # caching (see also issue5584 for detail) | |
1641 | self._repo.ui.debug('skip updating dirstate: ' |
|
1572 | self._repo.ui.debug('skip updating dirstate: ' | |
1642 | 'identity mismatch\n') |
|
1573 | 'identity mismatch\n') | |
1643 | except error.LockError: |
|
1574 | except error.LockError: | |
1644 | pass |
|
1575 | pass | |
1645 | finally: |
|
1576 | finally: | |
1646 | # Even if the wlock couldn't be grabbed, clear out the list. |
|
1577 | # Even if the wlock couldn't be grabbed, clear out the list. | |
1647 | self._repo.clearpostdsstatus() |
|
1578 | self._repo.clearpostdsstatus() | |
1648 |
|
1579 | |||
1649 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
|
1580 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): | |
1650 | '''Gets the status from the dirstate -- internal use only.''' |
|
1581 | '''Gets the status from the dirstate -- internal use only.''' | |
1651 | subrepos = [] |
|
1582 | subrepos = [] | |
1652 | if '.hgsub' in self: |
|
1583 | if '.hgsub' in self: | |
1653 | subrepos = sorted(self.substate) |
|
1584 | subrepos = sorted(self.substate) | |
1654 | cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored, |
|
1585 | cmp, s = self._repo.dirstate.status(match, subrepos, ignored=ignored, | |
1655 | clean=clean, unknown=unknown) |
|
1586 | clean=clean, unknown=unknown) | |
1656 |
|
1587 | |||
1657 | # check for any possibly clean files |
|
1588 | # check for any possibly clean files | |
1658 | fixup = [] |
|
1589 | fixup = [] | |
1659 | if cmp: |
|
1590 | if cmp: | |
1660 | modified2, deleted2, fixup = self._checklookup(cmp) |
|
1591 | modified2, deleted2, fixup = self._checklookup(cmp) | |
1661 | s.modified.extend(modified2) |
|
1592 | s.modified.extend(modified2) | |
1662 | s.deleted.extend(deleted2) |
|
1593 | s.deleted.extend(deleted2) | |
1663 |
|
1594 | |||
1664 | if fixup and clean: |
|
1595 | if fixup and clean: | |
1665 | s.clean.extend(fixup) |
|
1596 | s.clean.extend(fixup) | |
1666 |
|
1597 | |||
1667 | self._poststatusfixup(s, fixup) |
|
1598 | self._poststatusfixup(s, fixup) | |
1668 |
|
1599 | |||
1669 | if match.always(): |
|
1600 | if match.always(): | |
1670 | # cache for performance |
|
1601 | # cache for performance | |
1671 | if s.unknown or s.ignored or s.clean: |
|
1602 | if s.unknown or s.ignored or s.clean: | |
1672 | # "_status" is cached with list*=False in the normal route |
|
1603 | # "_status" is cached with list*=False in the normal route | |
1673 | self._status = scmutil.status(s.modified, s.added, s.removed, |
|
1604 | self._status = scmutil.status(s.modified, s.added, s.removed, | |
1674 | s.deleted, [], [], []) |
|
1605 | s.deleted, [], [], []) | |
1675 | else: |
|
1606 | else: | |
1676 | self._status = s |
|
1607 | self._status = s | |
1677 |
|
1608 | |||
1678 | return s |
|
1609 | return s | |
1679 |
|
1610 | |||
1680 | @propertycache |
|
1611 | @propertycache | |
1681 | def _manifest(self): |
|
1612 | def _manifest(self): | |
1682 | """generate a manifest corresponding to the values in self._status |
|
1613 | """generate a manifest corresponding to the values in self._status | |
1683 |
|
1614 | |||
1684 | This reuse the file nodeid from parent, but we use special node |
|
1615 | This reuse the file nodeid from parent, but we use special node | |
1685 | identifiers for added and modified files. This is used by manifests |
|
1616 | identifiers for added and modified files. This is used by manifests | |
1686 | merge to see that files are different and by update logic to avoid |
|
1617 | merge to see that files are different and by update logic to avoid | |
1687 | deleting newly added files. |
|
1618 | deleting newly added files. | |
1688 | """ |
|
1619 | """ | |
1689 | return self._buildstatusmanifest(self._status) |
|
1620 | return self._buildstatusmanifest(self._status) | |
1690 |
|
1621 | |||
1691 | def _buildstatusmanifest(self, status): |
|
1622 | def _buildstatusmanifest(self, status): | |
1692 | """Builds a manifest that includes the given status results.""" |
|
1623 | """Builds a manifest that includes the given status results.""" | |
1693 | parents = self.parents() |
|
1624 | parents = self.parents() | |
1694 |
|
1625 | |||
1695 | man = parents[0].manifest().copy() |
|
1626 | man = parents[0].manifest().copy() | |
1696 |
|
1627 | |||
1697 | ff = self._flagfunc |
|
1628 | ff = self._flagfunc | |
1698 | for i, l in ((addednodeid, status.added), |
|
1629 | for i, l in ((addednodeid, status.added), | |
1699 | (modifiednodeid, status.modified)): |
|
1630 | (modifiednodeid, status.modified)): | |
1700 | for f in l: |
|
1631 | for f in l: | |
1701 | man[f] = i |
|
1632 | man[f] = i | |
1702 | try: |
|
1633 | try: | |
1703 | man.setflag(f, ff(f)) |
|
1634 | man.setflag(f, ff(f)) | |
1704 | except OSError: |
|
1635 | except OSError: | |
1705 | pass |
|
1636 | pass | |
1706 |
|
1637 | |||
1707 | for f in status.deleted + status.removed: |
|
1638 | for f in status.deleted + status.removed: | |
1708 | if f in man: |
|
1639 | if f in man: | |
1709 | del man[f] |
|
1640 | del man[f] | |
1710 |
|
1641 | |||
1711 | return man |
|
1642 | return man | |
1712 |
|
1643 | |||
1713 | def _buildstatus(self, other, s, match, listignored, listclean, |
|
1644 | def _buildstatus(self, other, s, match, listignored, listclean, | |
1714 | listunknown): |
|
1645 | listunknown): | |
1715 | """build a status with respect to another context |
|
1646 | """build a status with respect to another context | |
1716 |
|
1647 | |||
1717 | This includes logic for maintaining the fast path of status when |
|
1648 | This includes logic for maintaining the fast path of status when | |
1718 | comparing the working directory against its parent, which is to skip |
|
1649 | comparing the working directory against its parent, which is to skip | |
1719 | building a new manifest if self (working directory) is not comparing |
|
1650 | building a new manifest if self (working directory) is not comparing | |
1720 | against its parent (repo['.']). |
|
1651 | against its parent (repo['.']). | |
1721 | """ |
|
1652 | """ | |
1722 | s = self._dirstatestatus(match, listignored, listclean, listunknown) |
|
1653 | s = self._dirstatestatus(match, listignored, listclean, listunknown) | |
1723 | # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, |
|
1654 | # Filter out symlinks that, in the case of FAT32 and NTFS filesystems, | |
1724 | # might have accidentally ended up with the entire contents of the file |
|
1655 | # might have accidentally ended up with the entire contents of the file | |
1725 | # they are supposed to be linking to. |
|
1656 | # they are supposed to be linking to. | |
1726 | s.modified[:] = self._filtersuspectsymlink(s.modified) |
|
1657 | s.modified[:] = self._filtersuspectsymlink(s.modified) | |
1727 | if other != self._repo['.']: |
|
1658 | if other != self._repo['.']: | |
1728 | s = super(workingctx, self)._buildstatus(other, s, match, |
|
1659 | s = super(workingctx, self)._buildstatus(other, s, match, | |
1729 | listignored, listclean, |
|
1660 | listignored, listclean, | |
1730 | listunknown) |
|
1661 | listunknown) | |
1731 | return s |
|
1662 | return s | |
1732 |
|
1663 | |||
1733 | def _matchstatus(self, other, match): |
|
1664 | def _matchstatus(self, other, match): | |
1734 | """override the match method with a filter for directory patterns |
|
1665 | """override the match method with a filter for directory patterns | |
1735 |
|
1666 | |||
1736 | We use inheritance to customize the match.bad method only in cases of |
|
1667 | We use inheritance to customize the match.bad method only in cases of | |
1737 | workingctx since it belongs only to the working directory when |
|
1668 | workingctx since it belongs only to the working directory when | |
1738 | comparing against the parent changeset. |
|
1669 | comparing against the parent changeset. | |
1739 |
|
1670 | |||
1740 | If we aren't comparing against the working directory's parent, then we |
|
1671 | If we aren't comparing against the working directory's parent, then we | |
1741 | just use the default match object sent to us. |
|
1672 | just use the default match object sent to us. | |
1742 | """ |
|
1673 | """ | |
1743 | if other != self._repo['.']: |
|
1674 | if other != self._repo['.']: | |
1744 | def bad(f, msg): |
|
1675 | def bad(f, msg): | |
1745 | # 'f' may be a directory pattern from 'match.files()', |
|
1676 | # 'f' may be a directory pattern from 'match.files()', | |
1746 | # so 'f not in ctx1' is not enough |
|
1677 | # so 'f not in ctx1' is not enough | |
1747 | if f not in other and not other.hasdir(f): |
|
1678 | if f not in other and not other.hasdir(f): | |
1748 | self._repo.ui.warn('%s: %s\n' % |
|
1679 | self._repo.ui.warn('%s: %s\n' % | |
1749 | (self._repo.dirstate.pathto(f), msg)) |
|
1680 | (self._repo.dirstate.pathto(f), msg)) | |
1750 | match.bad = bad |
|
1681 | match.bad = bad | |
1751 | return match |
|
1682 | return match | |
1752 |
|
1683 | |||
1753 | def markcommitted(self, node): |
|
1684 | def markcommitted(self, node): | |
1754 | super(workingctx, self).markcommitted(node) |
|
1685 | super(workingctx, self).markcommitted(node) | |
1755 |
|
1686 | |||
1756 | sparse.aftercommit(self._repo, node) |
|
1687 | sparse.aftercommit(self._repo, node) | |
1757 |
|
1688 | |||
1758 | class committablefilectx(basefilectx): |
|
1689 | class committablefilectx(basefilectx): | |
1759 | """A committablefilectx provides common functionality for a file context |
|
1690 | """A committablefilectx provides common functionality for a file context | |
1760 | that wants the ability to commit, e.g. workingfilectx or memfilectx.""" |
|
1691 | that wants the ability to commit, e.g. workingfilectx or memfilectx.""" | |
1761 | def __init__(self, repo, path, filelog=None, ctx=None): |
|
1692 | def __init__(self, repo, path, filelog=None, ctx=None): | |
1762 | self._repo = repo |
|
1693 | self._repo = repo | |
1763 | self._path = path |
|
1694 | self._path = path | |
1764 | self._changeid = None |
|
1695 | self._changeid = None | |
1765 | self._filerev = self._filenode = None |
|
1696 | self._filerev = self._filenode = None | |
1766 |
|
1697 | |||
1767 | if filelog is not None: |
|
1698 | if filelog is not None: | |
1768 | self._filelog = filelog |
|
1699 | self._filelog = filelog | |
1769 | if ctx: |
|
1700 | if ctx: | |
1770 | self._changectx = ctx |
|
1701 | self._changectx = ctx | |
1771 |
|
1702 | |||
1772 | def __nonzero__(self): |
|
1703 | def __nonzero__(self): | |
1773 | return True |
|
1704 | return True | |
1774 |
|
1705 | |||
1775 | __bool__ = __nonzero__ |
|
1706 | __bool__ = __nonzero__ | |
1776 |
|
1707 | |||
1777 | def linkrev(self): |
|
1708 | def linkrev(self): | |
1778 | # linked to self._changectx no matter if file is modified or not |
|
1709 | # linked to self._changectx no matter if file is modified or not | |
1779 | return self.rev() |
|
1710 | return self.rev() | |
1780 |
|
1711 | |||
1781 | def parents(self): |
|
1712 | def parents(self): | |
1782 | '''return parent filectxs, following copies if necessary''' |
|
1713 | '''return parent filectxs, following copies if necessary''' | |
1783 | def filenode(ctx, path): |
|
1714 | def filenode(ctx, path): | |
1784 | return ctx._manifest.get(path, nullid) |
|
1715 | return ctx._manifest.get(path, nullid) | |
1785 |
|
1716 | |||
1786 | path = self._path |
|
1717 | path = self._path | |
1787 | fl = self._filelog |
|
1718 | fl = self._filelog | |
1788 | pcl = self._changectx._parents |
|
1719 | pcl = self._changectx._parents | |
1789 | renamed = self.renamed() |
|
1720 | renamed = self.renamed() | |
1790 |
|
1721 | |||
1791 | if renamed: |
|
1722 | if renamed: | |
1792 | pl = [renamed + (None,)] |
|
1723 | pl = [renamed + (None,)] | |
1793 | else: |
|
1724 | else: | |
1794 | pl = [(path, filenode(pcl[0], path), fl)] |
|
1725 | pl = [(path, filenode(pcl[0], path), fl)] | |
1795 |
|
1726 | |||
1796 | for pc in pcl[1:]: |
|
1727 | for pc in pcl[1:]: | |
1797 | pl.append((path, filenode(pc, path), fl)) |
|
1728 | pl.append((path, filenode(pc, path), fl)) | |
1798 |
|
1729 | |||
1799 | return [self._parentfilectx(p, fileid=n, filelog=l) |
|
1730 | return [self._parentfilectx(p, fileid=n, filelog=l) | |
1800 | for p, n, l in pl if n != nullid] |
|
1731 | for p, n, l in pl if n != nullid] | |
1801 |
|
1732 | |||
1802 | def children(self): |
|
1733 | def children(self): | |
1803 | return [] |
|
1734 | return [] | |
1804 |
|
1735 | |||
1805 | class workingfilectx(committablefilectx): |
|
1736 | class workingfilectx(committablefilectx): | |
1806 | """A workingfilectx object makes access to data related to a particular |
|
1737 | """A workingfilectx object makes access to data related to a particular | |
1807 | file in the working directory convenient.""" |
|
1738 | file in the working directory convenient.""" | |
1808 | def __init__(self, repo, path, filelog=None, workingctx=None): |
|
1739 | def __init__(self, repo, path, filelog=None, workingctx=None): | |
1809 | super(workingfilectx, self).__init__(repo, path, filelog, workingctx) |
|
1740 | super(workingfilectx, self).__init__(repo, path, filelog, workingctx) | |
1810 |
|
1741 | |||
1811 | @propertycache |
|
1742 | @propertycache | |
1812 | def _changectx(self): |
|
1743 | def _changectx(self): | |
1813 | return workingctx(self._repo) |
|
1744 | return workingctx(self._repo) | |
1814 |
|
1745 | |||
1815 | def data(self): |
|
1746 | def data(self): | |
1816 | return self._repo.wread(self._path) |
|
1747 | return self._repo.wread(self._path) | |
1817 | def renamed(self): |
|
1748 | def renamed(self): | |
1818 | rp = self._repo.dirstate.copied(self._path) |
|
1749 | rp = self._repo.dirstate.copied(self._path) | |
1819 | if not rp: |
|
1750 | if not rp: | |
1820 | return None |
|
1751 | return None | |
1821 | return rp, self._changectx._parents[0]._manifest.get(rp, nullid) |
|
1752 | return rp, self._changectx._parents[0]._manifest.get(rp, nullid) | |
1822 |
|
1753 | |||
1823 | def size(self): |
|
1754 | def size(self): | |
1824 | return self._repo.wvfs.lstat(self._path).st_size |
|
1755 | return self._repo.wvfs.lstat(self._path).st_size | |
1825 | def date(self): |
|
1756 | def date(self): | |
1826 | t, tz = self._changectx.date() |
|
1757 | t, tz = self._changectx.date() | |
1827 | try: |
|
1758 | try: | |
1828 | return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz) |
|
1759 | return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz) | |
1829 | except OSError as err: |
|
1760 | except OSError as err: | |
1830 | if err.errno != errno.ENOENT: |
|
1761 | if err.errno != errno.ENOENT: | |
1831 | raise |
|
1762 | raise | |
1832 | return (t, tz) |
|
1763 | return (t, tz) | |
1833 |
|
1764 | |||
1834 | def exists(self): |
|
1765 | def exists(self): | |
1835 | return self._repo.wvfs.exists(self._path) |
|
1766 | return self._repo.wvfs.exists(self._path) | |
1836 |
|
1767 | |||
1837 | def lexists(self): |
|
1768 | def lexists(self): | |
1838 | return self._repo.wvfs.lexists(self._path) |
|
1769 | return self._repo.wvfs.lexists(self._path) | |
1839 |
|
1770 | |||
1840 | def audit(self): |
|
1771 | def audit(self): | |
1841 | return self._repo.wvfs.audit(self._path) |
|
1772 | return self._repo.wvfs.audit(self._path) | |
1842 |
|
1773 | |||
1843 | def cmp(self, fctx): |
|
1774 | def cmp(self, fctx): | |
1844 | """compare with other file context |
|
1775 | """compare with other file context | |
1845 |
|
1776 | |||
1846 | returns True if different than fctx. |
|
1777 | returns True if different than fctx. | |
1847 | """ |
|
1778 | """ | |
1848 | # fctx should be a filectx (not a workingfilectx) |
|
1779 | # fctx should be a filectx (not a workingfilectx) | |
1849 | # invert comparison to reuse the same code path |
|
1780 | # invert comparison to reuse the same code path | |
1850 | return fctx.cmp(self) |
|
1781 | return fctx.cmp(self) | |
1851 |
|
1782 | |||
1852 | def remove(self, ignoremissing=False): |
|
1783 | def remove(self, ignoremissing=False): | |
1853 | """wraps unlink for a repo's working directory""" |
|
1784 | """wraps unlink for a repo's working directory""" | |
1854 | self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing) |
|
1785 | self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing) | |
1855 |
|
1786 | |||
1856 | def write(self, data, flags, backgroundclose=False, **kwargs): |
|
1787 | def write(self, data, flags, backgroundclose=False, **kwargs): | |
1857 | """wraps repo.wwrite""" |
|
1788 | """wraps repo.wwrite""" | |
1858 | self._repo.wwrite(self._path, data, flags, |
|
1789 | self._repo.wwrite(self._path, data, flags, | |
1859 | backgroundclose=backgroundclose, |
|
1790 | backgroundclose=backgroundclose, | |
1860 | **kwargs) |
|
1791 | **kwargs) | |
1861 |
|
1792 | |||
1862 | def markcopied(self, src): |
|
1793 | def markcopied(self, src): | |
1863 | """marks this file a copy of `src`""" |
|
1794 | """marks this file a copy of `src`""" | |
1864 | if self._repo.dirstate[self._path] in "nma": |
|
1795 | if self._repo.dirstate[self._path] in "nma": | |
1865 | self._repo.dirstate.copy(src, self._path) |
|
1796 | self._repo.dirstate.copy(src, self._path) | |
1866 |
|
1797 | |||
1867 | def clearunknown(self): |
|
1798 | def clearunknown(self): | |
1868 | """Removes conflicting items in the working directory so that |
|
1799 | """Removes conflicting items in the working directory so that | |
1869 | ``write()`` can be called successfully. |
|
1800 | ``write()`` can be called successfully. | |
1870 | """ |
|
1801 | """ | |
1871 | wvfs = self._repo.wvfs |
|
1802 | wvfs = self._repo.wvfs | |
1872 | f = self._path |
|
1803 | f = self._path | |
1873 | wvfs.audit(f) |
|
1804 | wvfs.audit(f) | |
1874 | if wvfs.isdir(f) and not wvfs.islink(f): |
|
1805 | if wvfs.isdir(f) and not wvfs.islink(f): | |
1875 | wvfs.rmtree(f, forcibly=True) |
|
1806 | wvfs.rmtree(f, forcibly=True) | |
1876 | for p in reversed(list(util.finddirs(f))): |
|
1807 | for p in reversed(list(util.finddirs(f))): | |
1877 | if wvfs.isfileorlink(p): |
|
1808 | if wvfs.isfileorlink(p): | |
1878 | wvfs.unlink(p) |
|
1809 | wvfs.unlink(p) | |
1879 | break |
|
1810 | break | |
1880 |
|
1811 | |||
1881 | def setflags(self, l, x): |
|
1812 | def setflags(self, l, x): | |
1882 | self._repo.wvfs.setflags(self._path, l, x) |
|
1813 | self._repo.wvfs.setflags(self._path, l, x) | |
1883 |
|
1814 | |||
1884 | class overlayworkingctx(committablectx): |
|
1815 | class overlayworkingctx(committablectx): | |
1885 | """Wraps another mutable context with a write-back cache that can be |
|
1816 | """Wraps another mutable context with a write-back cache that can be | |
1886 | converted into a commit context. |
|
1817 | converted into a commit context. | |
1887 |
|
1818 | |||
1888 | self._cache[path] maps to a dict with keys: { |
|
1819 | self._cache[path] maps to a dict with keys: { | |
1889 | 'exists': bool? |
|
1820 | 'exists': bool? | |
1890 | 'date': date? |
|
1821 | 'date': date? | |
1891 | 'data': str? |
|
1822 | 'data': str? | |
1892 | 'flags': str? |
|
1823 | 'flags': str? | |
1893 | 'copied': str? (path or None) |
|
1824 | 'copied': str? (path or None) | |
1894 | } |
|
1825 | } | |
1895 | If `exists` is True, `flags` must be non-None and 'date' is non-None. If it |
|
1826 | If `exists` is True, `flags` must be non-None and 'date' is non-None. If it | |
1896 | is `False`, the file was deleted. |
|
1827 | is `False`, the file was deleted. | |
1897 | """ |
|
1828 | """ | |
1898 |
|
1829 | |||
1899 | def __init__(self, repo): |
|
1830 | def __init__(self, repo): | |
1900 | super(overlayworkingctx, self).__init__(repo) |
|
1831 | super(overlayworkingctx, self).__init__(repo) | |
1901 | self._repo = repo |
|
1832 | self._repo = repo | |
1902 | self.clean() |
|
1833 | self.clean() | |
1903 |
|
1834 | |||
1904 | def setbase(self, wrappedctx): |
|
1835 | def setbase(self, wrappedctx): | |
1905 | self._wrappedctx = wrappedctx |
|
1836 | self._wrappedctx = wrappedctx | |
1906 | self._parents = [wrappedctx] |
|
1837 | self._parents = [wrappedctx] | |
1907 | # Drop old manifest cache as it is now out of date. |
|
1838 | # Drop old manifest cache as it is now out of date. | |
1908 | # This is necessary when, e.g., rebasing several nodes with one |
|
1839 | # This is necessary when, e.g., rebasing several nodes with one | |
1909 | # ``overlayworkingctx`` (e.g. with --collapse). |
|
1840 | # ``overlayworkingctx`` (e.g. with --collapse). | |
1910 | util.clearcachedproperty(self, '_manifest') |
|
1841 | util.clearcachedproperty(self, '_manifest') | |
1911 |
|
1842 | |||
1912 | def data(self, path): |
|
1843 | def data(self, path): | |
1913 | if self.isdirty(path): |
|
1844 | if self.isdirty(path): | |
1914 | if self._cache[path]['exists']: |
|
1845 | if self._cache[path]['exists']: | |
1915 | if self._cache[path]['data']: |
|
1846 | if self._cache[path]['data']: | |
1916 | return self._cache[path]['data'] |
|
1847 | return self._cache[path]['data'] | |
1917 | else: |
|
1848 | else: | |
1918 | # Must fallback here, too, because we only set flags. |
|
1849 | # Must fallback here, too, because we only set flags. | |
1919 | return self._wrappedctx[path].data() |
|
1850 | return self._wrappedctx[path].data() | |
1920 | else: |
|
1851 | else: | |
1921 | raise error.ProgrammingError("No such file or directory: %s" % |
|
1852 | raise error.ProgrammingError("No such file or directory: %s" % | |
1922 | path) |
|
1853 | path) | |
1923 | else: |
|
1854 | else: | |
1924 | return self._wrappedctx[path].data() |
|
1855 | return self._wrappedctx[path].data() | |
1925 |
|
1856 | |||
1926 | @propertycache |
|
1857 | @propertycache | |
1927 | def _manifest(self): |
|
1858 | def _manifest(self): | |
1928 | parents = self.parents() |
|
1859 | parents = self.parents() | |
1929 | man = parents[0].manifest().copy() |
|
1860 | man = parents[0].manifest().copy() | |
1930 |
|
1861 | |||
1931 | flag = self._flagfunc |
|
1862 | flag = self._flagfunc | |
1932 | for path in self.added(): |
|
1863 | for path in self.added(): | |
1933 | man[path] = addednodeid |
|
1864 | man[path] = addednodeid | |
1934 | man.setflag(path, flag(path)) |
|
1865 | man.setflag(path, flag(path)) | |
1935 | for path in self.modified(): |
|
1866 | for path in self.modified(): | |
1936 | man[path] = modifiednodeid |
|
1867 | man[path] = modifiednodeid | |
1937 | man.setflag(path, flag(path)) |
|
1868 | man.setflag(path, flag(path)) | |
1938 | for path in self.removed(): |
|
1869 | for path in self.removed(): | |
1939 | del man[path] |
|
1870 | del man[path] | |
1940 | return man |
|
1871 | return man | |
1941 |
|
1872 | |||
1942 | @propertycache |
|
1873 | @propertycache | |
1943 | def _flagfunc(self): |
|
1874 | def _flagfunc(self): | |
1944 | def f(path): |
|
1875 | def f(path): | |
1945 | return self._cache[path]['flags'] |
|
1876 | return self._cache[path]['flags'] | |
1946 | return f |
|
1877 | return f | |
1947 |
|
1878 | |||
1948 | def files(self): |
|
1879 | def files(self): | |
1949 | return sorted(self.added() + self.modified() + self.removed()) |
|
1880 | return sorted(self.added() + self.modified() + self.removed()) | |
1950 |
|
1881 | |||
1951 | def modified(self): |
|
1882 | def modified(self): | |
1952 | return [f for f in self._cache.keys() if self._cache[f]['exists'] and |
|
1883 | return [f for f in self._cache.keys() if self._cache[f]['exists'] and | |
1953 | self._existsinparent(f)] |
|
1884 | self._existsinparent(f)] | |
1954 |
|
1885 | |||
1955 | def added(self): |
|
1886 | def added(self): | |
1956 | return [f for f in self._cache.keys() if self._cache[f]['exists'] and |
|
1887 | return [f for f in self._cache.keys() if self._cache[f]['exists'] and | |
1957 | not self._existsinparent(f)] |
|
1888 | not self._existsinparent(f)] | |
1958 |
|
1889 | |||
1959 | def removed(self): |
|
1890 | def removed(self): | |
1960 | return [f for f in self._cache.keys() if |
|
1891 | return [f for f in self._cache.keys() if | |
1961 | not self._cache[f]['exists'] and self._existsinparent(f)] |
|
1892 | not self._cache[f]['exists'] and self._existsinparent(f)] | |
1962 |
|
1893 | |||
1963 | def isinmemory(self): |
|
1894 | def isinmemory(self): | |
1964 | return True |
|
1895 | return True | |
1965 |
|
1896 | |||
1966 | def filedate(self, path): |
|
1897 | def filedate(self, path): | |
1967 | if self.isdirty(path): |
|
1898 | if self.isdirty(path): | |
1968 | return self._cache[path]['date'] |
|
1899 | return self._cache[path]['date'] | |
1969 | else: |
|
1900 | else: | |
1970 | return self._wrappedctx[path].date() |
|
1901 | return self._wrappedctx[path].date() | |
1971 |
|
1902 | |||
1972 | def markcopied(self, path, origin): |
|
1903 | def markcopied(self, path, origin): | |
1973 | if self.isdirty(path): |
|
1904 | if self.isdirty(path): | |
1974 | self._cache[path]['copied'] = origin |
|
1905 | self._cache[path]['copied'] = origin | |
1975 | else: |
|
1906 | else: | |
1976 | raise error.ProgrammingError('markcopied() called on clean context') |
|
1907 | raise error.ProgrammingError('markcopied() called on clean context') | |
1977 |
|
1908 | |||
1978 | def copydata(self, path): |
|
1909 | def copydata(self, path): | |
1979 | if self.isdirty(path): |
|
1910 | if self.isdirty(path): | |
1980 | return self._cache[path]['copied'] |
|
1911 | return self._cache[path]['copied'] | |
1981 | else: |
|
1912 | else: | |
1982 | raise error.ProgrammingError('copydata() called on clean context') |
|
1913 | raise error.ProgrammingError('copydata() called on clean context') | |
1983 |
|
1914 | |||
1984 | def flags(self, path): |
|
1915 | def flags(self, path): | |
1985 | if self.isdirty(path): |
|
1916 | if self.isdirty(path): | |
1986 | if self._cache[path]['exists']: |
|
1917 | if self._cache[path]['exists']: | |
1987 | return self._cache[path]['flags'] |
|
1918 | return self._cache[path]['flags'] | |
1988 | else: |
|
1919 | else: | |
1989 | raise error.ProgrammingError("No such file or directory: %s" % |
|
1920 | raise error.ProgrammingError("No such file or directory: %s" % | |
1990 | self._path) |
|
1921 | self._path) | |
1991 | else: |
|
1922 | else: | |
1992 | return self._wrappedctx[path].flags() |
|
1923 | return self._wrappedctx[path].flags() | |
1993 |
|
1924 | |||
1994 | def _existsinparent(self, path): |
|
1925 | def _existsinparent(self, path): | |
1995 | try: |
|
1926 | try: | |
1996 | # ``commitctx` raises a ``ManifestLookupError`` if a path does not |
|
1927 | # ``commitctx` raises a ``ManifestLookupError`` if a path does not | |
1997 | # exist, unlike ``workingctx``, which returns a ``workingfilectx`` |
|
1928 | # exist, unlike ``workingctx``, which returns a ``workingfilectx`` | |
1998 | # with an ``exists()`` function. |
|
1929 | # with an ``exists()`` function. | |
1999 | self._wrappedctx[path] |
|
1930 | self._wrappedctx[path] | |
2000 | return True |
|
1931 | return True | |
2001 | except error.ManifestLookupError: |
|
1932 | except error.ManifestLookupError: | |
2002 | return False |
|
1933 | return False | |
2003 |
|
1934 | |||
2004 | def _auditconflicts(self, path): |
|
1935 | def _auditconflicts(self, path): | |
2005 | """Replicates conflict checks done by wvfs.write(). |
|
1936 | """Replicates conflict checks done by wvfs.write(). | |
2006 |
|
1937 | |||
2007 | Since we never write to the filesystem and never call `applyupdates` in |
|
1938 | Since we never write to the filesystem and never call `applyupdates` in | |
2008 | IMM, we'll never check that a path is actually writable -- e.g., because |
|
1939 | IMM, we'll never check that a path is actually writable -- e.g., because | |
2009 | it adds `a/foo`, but `a` is actually a file in the other commit. |
|
1940 | it adds `a/foo`, but `a` is actually a file in the other commit. | |
2010 | """ |
|
1941 | """ | |
2011 | def fail(path, component): |
|
1942 | def fail(path, component): | |
2012 | # p1() is the base and we're receiving "writes" for p2()'s |
|
1943 | # p1() is the base and we're receiving "writes" for p2()'s | |
2013 | # files. |
|
1944 | # files. | |
2014 | if 'l' in self.p1()[component].flags(): |
|
1945 | if 'l' in self.p1()[component].flags(): | |
2015 | raise error.Abort("error: %s conflicts with symlink %s " |
|
1946 | raise error.Abort("error: %s conflicts with symlink %s " | |
2016 | "in %s." % (path, component, |
|
1947 | "in %s." % (path, component, | |
2017 | self.p1().rev())) |
|
1948 | self.p1().rev())) | |
2018 | else: |
|
1949 | else: | |
2019 | raise error.Abort("error: '%s' conflicts with file '%s' in " |
|
1950 | raise error.Abort("error: '%s' conflicts with file '%s' in " | |
2020 | "%s." % (path, component, |
|
1951 | "%s." % (path, component, | |
2021 | self.p1().rev())) |
|
1952 | self.p1().rev())) | |
2022 |
|
1953 | |||
2023 | # Test that each new directory to be created to write this path from p2 |
|
1954 | # Test that each new directory to be created to write this path from p2 | |
2024 | # is not a file in p1. |
|
1955 | # is not a file in p1. | |
2025 | components = path.split('/') |
|
1956 | components = path.split('/') | |
2026 | for i in xrange(len(components)): |
|
1957 | for i in xrange(len(components)): | |
2027 | component = "/".join(components[0:i]) |
|
1958 | component = "/".join(components[0:i]) | |
2028 | if component in self.p1(): |
|
1959 | if component in self.p1(): | |
2029 | fail(path, component) |
|
1960 | fail(path, component) | |
2030 |
|
1961 | |||
2031 | # Test the other direction -- that this path from p2 isn't a directory |
|
1962 | # Test the other direction -- that this path from p2 isn't a directory | |
2032 | # in p1 (test that p1 doesn't any paths matching `path/*`). |
|
1963 | # in p1 (test that p1 doesn't any paths matching `path/*`). | |
2033 | match = matchmod.match('/', '', [path + '/'], default=b'relpath') |
|
1964 | match = matchmod.match('/', '', [path + '/'], default=b'relpath') | |
2034 | matches = self.p1().manifest().matches(match) |
|
1965 | matches = self.p1().manifest().matches(match) | |
2035 | if len(matches) > 0: |
|
1966 | if len(matches) > 0: | |
2036 | if len(matches) == 1 and matches.keys()[0] == path: |
|
1967 | if len(matches) == 1 and matches.keys()[0] == path: | |
2037 | return |
|
1968 | return | |
2038 | raise error.Abort("error: file '%s' cannot be written because " |
|
1969 | raise error.Abort("error: file '%s' cannot be written because " | |
2039 | " '%s/' is a folder in %s (containing %d " |
|
1970 | " '%s/' is a folder in %s (containing %d " | |
2040 | "entries: %s)" |
|
1971 | "entries: %s)" | |
2041 | % (path, path, self.p1(), len(matches), |
|
1972 | % (path, path, self.p1(), len(matches), | |
2042 | ', '.join(matches.keys()))) |
|
1973 | ', '.join(matches.keys()))) | |
2043 |
|
1974 | |||
2044 | def write(self, path, data, flags='', **kwargs): |
|
1975 | def write(self, path, data, flags='', **kwargs): | |
2045 | if data is None: |
|
1976 | if data is None: | |
2046 | raise error.ProgrammingError("data must be non-None") |
|
1977 | raise error.ProgrammingError("data must be non-None") | |
2047 | self._auditconflicts(path) |
|
1978 | self._auditconflicts(path) | |
2048 | self._markdirty(path, exists=True, data=data, date=dateutil.makedate(), |
|
1979 | self._markdirty(path, exists=True, data=data, date=dateutil.makedate(), | |
2049 | flags=flags) |
|
1980 | flags=flags) | |
2050 |
|
1981 | |||
2051 | def setflags(self, path, l, x): |
|
1982 | def setflags(self, path, l, x): | |
2052 | self._markdirty(path, exists=True, date=dateutil.makedate(), |
|
1983 | self._markdirty(path, exists=True, date=dateutil.makedate(), | |
2053 | flags=(l and 'l' or '') + (x and 'x' or '')) |
|
1984 | flags=(l and 'l' or '') + (x and 'x' or '')) | |
2054 |
|
1985 | |||
2055 | def remove(self, path): |
|
1986 | def remove(self, path): | |
2056 | self._markdirty(path, exists=False) |
|
1987 | self._markdirty(path, exists=False) | |
2057 |
|
1988 | |||
2058 | def exists(self, path): |
|
1989 | def exists(self, path): | |
2059 | """exists behaves like `lexists`, but needs to follow symlinks and |
|
1990 | """exists behaves like `lexists`, but needs to follow symlinks and | |
2060 | return False if they are broken. |
|
1991 | return False if they are broken. | |
2061 | """ |
|
1992 | """ | |
2062 | if self.isdirty(path): |
|
1993 | if self.isdirty(path): | |
2063 | # If this path exists and is a symlink, "follow" it by calling |
|
1994 | # If this path exists and is a symlink, "follow" it by calling | |
2064 | # exists on the destination path. |
|
1995 | # exists on the destination path. | |
2065 | if (self._cache[path]['exists'] and |
|
1996 | if (self._cache[path]['exists'] and | |
2066 | 'l' in self._cache[path]['flags']): |
|
1997 | 'l' in self._cache[path]['flags']): | |
2067 | return self.exists(self._cache[path]['data'].strip()) |
|
1998 | return self.exists(self._cache[path]['data'].strip()) | |
2068 | else: |
|
1999 | else: | |
2069 | return self._cache[path]['exists'] |
|
2000 | return self._cache[path]['exists'] | |
2070 |
|
2001 | |||
2071 | return self._existsinparent(path) |
|
2002 | return self._existsinparent(path) | |
2072 |
|
2003 | |||
2073 | def lexists(self, path): |
|
2004 | def lexists(self, path): | |
2074 | """lexists returns True if the path exists""" |
|
2005 | """lexists returns True if the path exists""" | |
2075 | if self.isdirty(path): |
|
2006 | if self.isdirty(path): | |
2076 | return self._cache[path]['exists'] |
|
2007 | return self._cache[path]['exists'] | |
2077 |
|
2008 | |||
2078 | return self._existsinparent(path) |
|
2009 | return self._existsinparent(path) | |
2079 |
|
2010 | |||
2080 | def size(self, path): |
|
2011 | def size(self, path): | |
2081 | if self.isdirty(path): |
|
2012 | if self.isdirty(path): | |
2082 | if self._cache[path]['exists']: |
|
2013 | if self._cache[path]['exists']: | |
2083 | return len(self._cache[path]['data']) |
|
2014 | return len(self._cache[path]['data']) | |
2084 | else: |
|
2015 | else: | |
2085 | raise error.ProgrammingError("No such file or directory: %s" % |
|
2016 | raise error.ProgrammingError("No such file or directory: %s" % | |
2086 | self._path) |
|
2017 | self._path) | |
2087 | return self._wrappedctx[path].size() |
|
2018 | return self._wrappedctx[path].size() | |
2088 |
|
2019 | |||
2089 | def tomemctx(self, text, branch=None, extra=None, date=None, parents=None, |
|
2020 | def tomemctx(self, text, branch=None, extra=None, date=None, parents=None, | |
2090 | user=None, editor=None): |
|
2021 | user=None, editor=None): | |
2091 | """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be |
|
2022 | """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be | |
2092 | committed. |
|
2023 | committed. | |
2093 |
|
2024 | |||
2094 | ``text`` is the commit message. |
|
2025 | ``text`` is the commit message. | |
2095 | ``parents`` (optional) are rev numbers. |
|
2026 | ``parents`` (optional) are rev numbers. | |
2096 | """ |
|
2027 | """ | |
2097 | # Default parents to the wrapped contexts' if not passed. |
|
2028 | # Default parents to the wrapped contexts' if not passed. | |
2098 | if parents is None: |
|
2029 | if parents is None: | |
2099 | parents = self._wrappedctx.parents() |
|
2030 | parents = self._wrappedctx.parents() | |
2100 | if len(parents) == 1: |
|
2031 | if len(parents) == 1: | |
2101 | parents = (parents[0], None) |
|
2032 | parents = (parents[0], None) | |
2102 |
|
2033 | |||
2103 | # ``parents`` is passed as rev numbers; convert to ``commitctxs``. |
|
2034 | # ``parents`` is passed as rev numbers; convert to ``commitctxs``. | |
2104 | if parents[1] is None: |
|
2035 | if parents[1] is None: | |
2105 | parents = (self._repo[parents[0]], None) |
|
2036 | parents = (self._repo[parents[0]], None) | |
2106 | else: |
|
2037 | else: | |
2107 | parents = (self._repo[parents[0]], self._repo[parents[1]]) |
|
2038 | parents = (self._repo[parents[0]], self._repo[parents[1]]) | |
2108 |
|
2039 | |||
2109 | files = self._cache.keys() |
|
2040 | files = self._cache.keys() | |
2110 | def getfile(repo, memctx, path): |
|
2041 | def getfile(repo, memctx, path): | |
2111 | if self._cache[path]['exists']: |
|
2042 | if self._cache[path]['exists']: | |
2112 | return memfilectx(repo, memctx, path, |
|
2043 | return memfilectx(repo, memctx, path, | |
2113 | self._cache[path]['data'], |
|
2044 | self._cache[path]['data'], | |
2114 | 'l' in self._cache[path]['flags'], |
|
2045 | 'l' in self._cache[path]['flags'], | |
2115 | 'x' in self._cache[path]['flags'], |
|
2046 | 'x' in self._cache[path]['flags'], | |
2116 | self._cache[path]['copied']) |
|
2047 | self._cache[path]['copied']) | |
2117 | else: |
|
2048 | else: | |
2118 | # Returning None, but including the path in `files`, is |
|
2049 | # Returning None, but including the path in `files`, is | |
2119 | # necessary for memctx to register a deletion. |
|
2050 | # necessary for memctx to register a deletion. | |
2120 | return None |
|
2051 | return None | |
2121 | return memctx(self._repo, parents, text, files, getfile, date=date, |
|
2052 | return memctx(self._repo, parents, text, files, getfile, date=date, | |
2122 | extra=extra, user=user, branch=branch, editor=editor) |
|
2053 | extra=extra, user=user, branch=branch, editor=editor) | |
2123 |
|
2054 | |||
2124 | def isdirty(self, path): |
|
2055 | def isdirty(self, path): | |
2125 | return path in self._cache |
|
2056 | return path in self._cache | |
2126 |
|
2057 | |||
2127 | def isempty(self): |
|
2058 | def isempty(self): | |
2128 | # We need to discard any keys that are actually clean before the empty |
|
2059 | # We need to discard any keys that are actually clean before the empty | |
2129 | # commit check. |
|
2060 | # commit check. | |
2130 | self._compact() |
|
2061 | self._compact() | |
2131 | return len(self._cache) == 0 |
|
2062 | return len(self._cache) == 0 | |
2132 |
|
2063 | |||
2133 | def clean(self): |
|
2064 | def clean(self): | |
2134 | self._cache = {} |
|
2065 | self._cache = {} | |
2135 |
|
2066 | |||
2136 | def _compact(self): |
|
2067 | def _compact(self): | |
2137 | """Removes keys from the cache that are actually clean, by comparing |
|
2068 | """Removes keys from the cache that are actually clean, by comparing | |
2138 | them with the underlying context. |
|
2069 | them with the underlying context. | |
2139 |
|
2070 | |||
2140 | This can occur during the merge process, e.g. by passing --tool :local |
|
2071 | This can occur during the merge process, e.g. by passing --tool :local | |
2141 | to resolve a conflict. |
|
2072 | to resolve a conflict. | |
2142 | """ |
|
2073 | """ | |
2143 | keys = [] |
|
2074 | keys = [] | |
2144 | for path in self._cache.keys(): |
|
2075 | for path in self._cache.keys(): | |
2145 | cache = self._cache[path] |
|
2076 | cache = self._cache[path] | |
2146 | try: |
|
2077 | try: | |
2147 | underlying = self._wrappedctx[path] |
|
2078 | underlying = self._wrappedctx[path] | |
2148 | if (underlying.data() == cache['data'] and |
|
2079 | if (underlying.data() == cache['data'] and | |
2149 | underlying.flags() == cache['flags']): |
|
2080 | underlying.flags() == cache['flags']): | |
2150 | keys.append(path) |
|
2081 | keys.append(path) | |
2151 | except error.ManifestLookupError: |
|
2082 | except error.ManifestLookupError: | |
2152 | # Path not in the underlying manifest (created). |
|
2083 | # Path not in the underlying manifest (created). | |
2153 | continue |
|
2084 | continue | |
2154 |
|
2085 | |||
2155 | for path in keys: |
|
2086 | for path in keys: | |
2156 | del self._cache[path] |
|
2087 | del self._cache[path] | |
2157 | return keys |
|
2088 | return keys | |
2158 |
|
2089 | |||
2159 | def _markdirty(self, path, exists, data=None, date=None, flags=''): |
|
2090 | def _markdirty(self, path, exists, data=None, date=None, flags=''): | |
2160 | self._cache[path] = { |
|
2091 | self._cache[path] = { | |
2161 | 'exists': exists, |
|
2092 | 'exists': exists, | |
2162 | 'data': data, |
|
2093 | 'data': data, | |
2163 | 'date': date, |
|
2094 | 'date': date, | |
2164 | 'flags': flags, |
|
2095 | 'flags': flags, | |
2165 | 'copied': None, |
|
2096 | 'copied': None, | |
2166 | } |
|
2097 | } | |
2167 |
|
2098 | |||
2168 | def filectx(self, path, filelog=None): |
|
2099 | def filectx(self, path, filelog=None): | |
2169 | return overlayworkingfilectx(self._repo, path, parent=self, |
|
2100 | return overlayworkingfilectx(self._repo, path, parent=self, | |
2170 | filelog=filelog) |
|
2101 | filelog=filelog) | |
2171 |
|
2102 | |||
2172 | class overlayworkingfilectx(committablefilectx): |
|
2103 | class overlayworkingfilectx(committablefilectx): | |
2173 | """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory |
|
2104 | """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory | |
2174 | cache, which can be flushed through later by calling ``flush()``.""" |
|
2105 | cache, which can be flushed through later by calling ``flush()``.""" | |
2175 |
|
2106 | |||
2176 | def __init__(self, repo, path, filelog=None, parent=None): |
|
2107 | def __init__(self, repo, path, filelog=None, parent=None): | |
2177 | super(overlayworkingfilectx, self).__init__(repo, path, filelog, |
|
2108 | super(overlayworkingfilectx, self).__init__(repo, path, filelog, | |
2178 | parent) |
|
2109 | parent) | |
2179 | self._repo = repo |
|
2110 | self._repo = repo | |
2180 | self._parent = parent |
|
2111 | self._parent = parent | |
2181 | self._path = path |
|
2112 | self._path = path | |
2182 |
|
2113 | |||
2183 | def cmp(self, fctx): |
|
2114 | def cmp(self, fctx): | |
2184 | return self.data() != fctx.data() |
|
2115 | return self.data() != fctx.data() | |
2185 |
|
2116 | |||
2186 | def changectx(self): |
|
2117 | def changectx(self): | |
2187 | return self._parent |
|
2118 | return self._parent | |
2188 |
|
2119 | |||
2189 | def data(self): |
|
2120 | def data(self): | |
2190 | return self._parent.data(self._path) |
|
2121 | return self._parent.data(self._path) | |
2191 |
|
2122 | |||
2192 | def date(self): |
|
2123 | def date(self): | |
2193 | return self._parent.filedate(self._path) |
|
2124 | return self._parent.filedate(self._path) | |
2194 |
|
2125 | |||
2195 | def exists(self): |
|
2126 | def exists(self): | |
2196 | return self.lexists() |
|
2127 | return self.lexists() | |
2197 |
|
2128 | |||
2198 | def lexists(self): |
|
2129 | def lexists(self): | |
2199 | return self._parent.exists(self._path) |
|
2130 | return self._parent.exists(self._path) | |
2200 |
|
2131 | |||
2201 | def renamed(self): |
|
2132 | def renamed(self): | |
2202 | path = self._parent.copydata(self._path) |
|
2133 | path = self._parent.copydata(self._path) | |
2203 | if not path: |
|
2134 | if not path: | |
2204 | return None |
|
2135 | return None | |
2205 | return path, self._changectx._parents[0]._manifest.get(path, nullid) |
|
2136 | return path, self._changectx._parents[0]._manifest.get(path, nullid) | |
2206 |
|
2137 | |||
2207 | def size(self): |
|
2138 | def size(self): | |
2208 | return self._parent.size(self._path) |
|
2139 | return self._parent.size(self._path) | |
2209 |
|
2140 | |||
2210 | def markcopied(self, origin): |
|
2141 | def markcopied(self, origin): | |
2211 | self._parent.markcopied(self._path, origin) |
|
2142 | self._parent.markcopied(self._path, origin) | |
2212 |
|
2143 | |||
2213 | def audit(self): |
|
2144 | def audit(self): | |
2214 | pass |
|
2145 | pass | |
2215 |
|
2146 | |||
2216 | def flags(self): |
|
2147 | def flags(self): | |
2217 | return self._parent.flags(self._path) |
|
2148 | return self._parent.flags(self._path) | |
2218 |
|
2149 | |||
2219 | def setflags(self, islink, isexec): |
|
2150 | def setflags(self, islink, isexec): | |
2220 | return self._parent.setflags(self._path, islink, isexec) |
|
2151 | return self._parent.setflags(self._path, islink, isexec) | |
2221 |
|
2152 | |||
2222 | def write(self, data, flags, backgroundclose=False, **kwargs): |
|
2153 | def write(self, data, flags, backgroundclose=False, **kwargs): | |
2223 | return self._parent.write(self._path, data, flags, **kwargs) |
|
2154 | return self._parent.write(self._path, data, flags, **kwargs) | |
2224 |
|
2155 | |||
2225 | def remove(self, ignoremissing=False): |
|
2156 | def remove(self, ignoremissing=False): | |
2226 | return self._parent.remove(self._path) |
|
2157 | return self._parent.remove(self._path) | |
2227 |
|
2158 | |||
2228 | def clearunknown(self): |
|
2159 | def clearunknown(self): | |
2229 | pass |
|
2160 | pass | |
2230 |
|
2161 | |||
2231 | class workingcommitctx(workingctx): |
|
2162 | class workingcommitctx(workingctx): | |
2232 | """A workingcommitctx object makes access to data related to |
|
2163 | """A workingcommitctx object makes access to data related to | |
2233 | the revision being committed convenient. |
|
2164 | the revision being committed convenient. | |
2234 |
|
2165 | |||
2235 | This hides changes in the working directory, if they aren't |
|
2166 | This hides changes in the working directory, if they aren't | |
2236 | committed in this context. |
|
2167 | committed in this context. | |
2237 | """ |
|
2168 | """ | |
2238 | def __init__(self, repo, changes, |
|
2169 | def __init__(self, repo, changes, | |
2239 | text="", user=None, date=None, extra=None): |
|
2170 | text="", user=None, date=None, extra=None): | |
2240 | super(workingctx, self).__init__(repo, text, user, date, extra, |
|
2171 | super(workingctx, self).__init__(repo, text, user, date, extra, | |
2241 | changes) |
|
2172 | changes) | |
2242 |
|
2173 | |||
2243 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): |
|
2174 | def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False): | |
2244 | """Return matched files only in ``self._status`` |
|
2175 | """Return matched files only in ``self._status`` | |
2245 |
|
2176 | |||
2246 | Uncommitted files appear "clean" via this context, even if |
|
2177 | Uncommitted files appear "clean" via this context, even if | |
2247 | they aren't actually so in the working directory. |
|
2178 | they aren't actually so in the working directory. | |
2248 | """ |
|
2179 | """ | |
2249 | if clean: |
|
2180 | if clean: | |
2250 | clean = [f for f in self._manifest if f not in self._changedset] |
|
2181 | clean = [f for f in self._manifest if f not in self._changedset] | |
2251 | else: |
|
2182 | else: | |
2252 | clean = [] |
|
2183 | clean = [] | |
2253 | return scmutil.status([f for f in self._status.modified if match(f)], |
|
2184 | return scmutil.status([f for f in self._status.modified if match(f)], | |
2254 | [f for f in self._status.added if match(f)], |
|
2185 | [f for f in self._status.added if match(f)], | |
2255 | [f for f in self._status.removed if match(f)], |
|
2186 | [f for f in self._status.removed if match(f)], | |
2256 | [], [], [], clean) |
|
2187 | [], [], [], clean) | |
2257 |
|
2188 | |||
2258 | @propertycache |
|
2189 | @propertycache | |
2259 | def _changedset(self): |
|
2190 | def _changedset(self): | |
2260 | """Return the set of files changed in this context |
|
2191 | """Return the set of files changed in this context | |
2261 | """ |
|
2192 | """ | |
2262 | changed = set(self._status.modified) |
|
2193 | changed = set(self._status.modified) | |
2263 | changed.update(self._status.added) |
|
2194 | changed.update(self._status.added) | |
2264 | changed.update(self._status.removed) |
|
2195 | changed.update(self._status.removed) | |
2265 | return changed |
|
2196 | return changed | |
2266 |
|
2197 | |||
2267 | def makecachingfilectxfn(func): |
|
2198 | def makecachingfilectxfn(func): | |
2268 | """Create a filectxfn that caches based on the path. |
|
2199 | """Create a filectxfn that caches based on the path. | |
2269 |
|
2200 | |||
2270 | We can't use util.cachefunc because it uses all arguments as the cache |
|
2201 | We can't use util.cachefunc because it uses all arguments as the cache | |
2271 | key and this creates a cycle since the arguments include the repo and |
|
2202 | key and this creates a cycle since the arguments include the repo and | |
2272 | memctx. |
|
2203 | memctx. | |
2273 | """ |
|
2204 | """ | |
2274 | cache = {} |
|
2205 | cache = {} | |
2275 |
|
2206 | |||
2276 | def getfilectx(repo, memctx, path): |
|
2207 | def getfilectx(repo, memctx, path): | |
2277 | if path not in cache: |
|
2208 | if path not in cache: | |
2278 | cache[path] = func(repo, memctx, path) |
|
2209 | cache[path] = func(repo, memctx, path) | |
2279 | return cache[path] |
|
2210 | return cache[path] | |
2280 |
|
2211 | |||
2281 | return getfilectx |
|
2212 | return getfilectx | |
2282 |
|
2213 | |||
2283 | def memfilefromctx(ctx): |
|
2214 | def memfilefromctx(ctx): | |
2284 | """Given a context return a memfilectx for ctx[path] |
|
2215 | """Given a context return a memfilectx for ctx[path] | |
2285 |
|
2216 | |||
2286 | This is a convenience method for building a memctx based on another |
|
2217 | This is a convenience method for building a memctx based on another | |
2287 | context. |
|
2218 | context. | |
2288 | """ |
|
2219 | """ | |
2289 | def getfilectx(repo, memctx, path): |
|
2220 | def getfilectx(repo, memctx, path): | |
2290 | fctx = ctx[path] |
|
2221 | fctx = ctx[path] | |
2291 | # this is weird but apparently we only keep track of one parent |
|
2222 | # this is weird but apparently we only keep track of one parent | |
2292 | # (why not only store that instead of a tuple?) |
|
2223 | # (why not only store that instead of a tuple?) | |
2293 | copied = fctx.renamed() |
|
2224 | copied = fctx.renamed() | |
2294 | if copied: |
|
2225 | if copied: | |
2295 | copied = copied[0] |
|
2226 | copied = copied[0] | |
2296 | return memfilectx(repo, memctx, path, fctx.data(), |
|
2227 | return memfilectx(repo, memctx, path, fctx.data(), | |
2297 | islink=fctx.islink(), isexec=fctx.isexec(), |
|
2228 | islink=fctx.islink(), isexec=fctx.isexec(), | |
2298 | copied=copied) |
|
2229 | copied=copied) | |
2299 |
|
2230 | |||
2300 | return getfilectx |
|
2231 | return getfilectx | |
2301 |
|
2232 | |||
2302 | def memfilefrompatch(patchstore): |
|
2233 | def memfilefrompatch(patchstore): | |
2303 | """Given a patch (e.g. patchstore object) return a memfilectx |
|
2234 | """Given a patch (e.g. patchstore object) return a memfilectx | |
2304 |
|
2235 | |||
2305 | This is a convenience method for building a memctx based on a patchstore. |
|
2236 | This is a convenience method for building a memctx based on a patchstore. | |
2306 | """ |
|
2237 | """ | |
2307 | def getfilectx(repo, memctx, path): |
|
2238 | def getfilectx(repo, memctx, path): | |
2308 | data, mode, copied = patchstore.getfile(path) |
|
2239 | data, mode, copied = patchstore.getfile(path) | |
2309 | if data is None: |
|
2240 | if data is None: | |
2310 | return None |
|
2241 | return None | |
2311 | islink, isexec = mode |
|
2242 | islink, isexec = mode | |
2312 | return memfilectx(repo, memctx, path, data, islink=islink, |
|
2243 | return memfilectx(repo, memctx, path, data, islink=islink, | |
2313 | isexec=isexec, copied=copied) |
|
2244 | isexec=isexec, copied=copied) | |
2314 |
|
2245 | |||
2315 | return getfilectx |
|
2246 | return getfilectx | |
2316 |
|
2247 | |||
2317 | class memctx(committablectx): |
|
2248 | class memctx(committablectx): | |
2318 | """Use memctx to perform in-memory commits via localrepo.commitctx(). |
|
2249 | """Use memctx to perform in-memory commits via localrepo.commitctx(). | |
2319 |
|
2250 | |||
2320 | Revision information is supplied at initialization time while |
|
2251 | Revision information is supplied at initialization time while | |
2321 | related files data and is made available through a callback |
|
2252 | related files data and is made available through a callback | |
2322 | mechanism. 'repo' is the current localrepo, 'parents' is a |
|
2253 | mechanism. 'repo' is the current localrepo, 'parents' is a | |
2323 | sequence of two parent revisions identifiers (pass None for every |
|
2254 | sequence of two parent revisions identifiers (pass None for every | |
2324 | missing parent), 'text' is the commit message and 'files' lists |
|
2255 | missing parent), 'text' is the commit message and 'files' lists | |
2325 | names of files touched by the revision (normalized and relative to |
|
2256 | names of files touched by the revision (normalized and relative to | |
2326 | repository root). |
|
2257 | repository root). | |
2327 |
|
2258 | |||
2328 | filectxfn(repo, memctx, path) is a callable receiving the |
|
2259 | filectxfn(repo, memctx, path) is a callable receiving the | |
2329 | repository, the current memctx object and the normalized path of |
|
2260 | repository, the current memctx object and the normalized path of | |
2330 | requested file, relative to repository root. It is fired by the |
|
2261 | requested file, relative to repository root. It is fired by the | |
2331 | commit function for every file in 'files', but calls order is |
|
2262 | commit function for every file in 'files', but calls order is | |
2332 | undefined. If the file is available in the revision being |
|
2263 | undefined. If the file is available in the revision being | |
2333 | committed (updated or added), filectxfn returns a memfilectx |
|
2264 | committed (updated or added), filectxfn returns a memfilectx | |
2334 | object. If the file was removed, filectxfn return None for recent |
|
2265 | object. If the file was removed, filectxfn return None for recent | |
2335 | Mercurial. Moved files are represented by marking the source file |
|
2266 | Mercurial. Moved files are represented by marking the source file | |
2336 | removed and the new file added with copy information (see |
|
2267 | removed and the new file added with copy information (see | |
2337 | memfilectx). |
|
2268 | memfilectx). | |
2338 |
|
2269 | |||
2339 | user receives the committer name and defaults to current |
|
2270 | user receives the committer name and defaults to current | |
2340 | repository username, date is the commit date in any format |
|
2271 | repository username, date is the commit date in any format | |
2341 | supported by dateutil.parsedate() and defaults to current date, extra |
|
2272 | supported by dateutil.parsedate() and defaults to current date, extra | |
2342 | is a dictionary of metadata or is left empty. |
|
2273 | is a dictionary of metadata or is left empty. | |
2343 | """ |
|
2274 | """ | |
2344 |
|
2275 | |||
2345 | # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. |
|
2276 | # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files. | |
2346 | # Extensions that need to retain compatibility across Mercurial 3.1 can use |
|
2277 | # Extensions that need to retain compatibility across Mercurial 3.1 can use | |
2347 | # this field to determine what to do in filectxfn. |
|
2278 | # this field to determine what to do in filectxfn. | |
2348 | _returnnoneformissingfiles = True |
|
2279 | _returnnoneformissingfiles = True | |
2349 |
|
2280 | |||
2350 | def __init__(self, repo, parents, text, files, filectxfn, user=None, |
|
2281 | def __init__(self, repo, parents, text, files, filectxfn, user=None, | |
2351 | date=None, extra=None, branch=None, editor=False): |
|
2282 | date=None, extra=None, branch=None, editor=False): | |
2352 | super(memctx, self).__init__(repo, text, user, date, extra) |
|
2283 | super(memctx, self).__init__(repo, text, user, date, extra) | |
2353 | self._rev = None |
|
2284 | self._rev = None | |
2354 | self._node = None |
|
2285 | self._node = None | |
2355 | parents = [(p or nullid) for p in parents] |
|
2286 | parents = [(p or nullid) for p in parents] | |
2356 | p1, p2 = parents |
|
2287 | p1, p2 = parents | |
2357 | self._parents = [changectx(self._repo, p) for p in (p1, p2)] |
|
2288 | self._parents = [changectx(self._repo, p) for p in (p1, p2)] | |
2358 | files = sorted(set(files)) |
|
2289 | files = sorted(set(files)) | |
2359 | self._files = files |
|
2290 | self._files = files | |
2360 | if branch is not None: |
|
2291 | if branch is not None: | |
2361 | self._extra['branch'] = encoding.fromlocal(branch) |
|
2292 | self._extra['branch'] = encoding.fromlocal(branch) | |
2362 | self.substate = {} |
|
2293 | self.substate = {} | |
2363 |
|
2294 | |||
2364 | if isinstance(filectxfn, patch.filestore): |
|
2295 | if isinstance(filectxfn, patch.filestore): | |
2365 | filectxfn = memfilefrompatch(filectxfn) |
|
2296 | filectxfn = memfilefrompatch(filectxfn) | |
2366 | elif not callable(filectxfn): |
|
2297 | elif not callable(filectxfn): | |
2367 | # if store is not callable, wrap it in a function |
|
2298 | # if store is not callable, wrap it in a function | |
2368 | filectxfn = memfilefromctx(filectxfn) |
|
2299 | filectxfn = memfilefromctx(filectxfn) | |
2369 |
|
2300 | |||
2370 | # memoizing increases performance for e.g. vcs convert scenarios. |
|
2301 | # memoizing increases performance for e.g. vcs convert scenarios. | |
2371 | self._filectxfn = makecachingfilectxfn(filectxfn) |
|
2302 | self._filectxfn = makecachingfilectxfn(filectxfn) | |
2372 |
|
2303 | |||
2373 | if editor: |
|
2304 | if editor: | |
2374 | self._text = editor(self._repo, self, []) |
|
2305 | self._text = editor(self._repo, self, []) | |
2375 | self._repo.savecommitmessage(self._text) |
|
2306 | self._repo.savecommitmessage(self._text) | |
2376 |
|
2307 | |||
2377 | def filectx(self, path, filelog=None): |
|
2308 | def filectx(self, path, filelog=None): | |
2378 | """get a file context from the working directory |
|
2309 | """get a file context from the working directory | |
2379 |
|
2310 | |||
2380 | Returns None if file doesn't exist and should be removed.""" |
|
2311 | Returns None if file doesn't exist and should be removed.""" | |
2381 | return self._filectxfn(self._repo, self, path) |
|
2312 | return self._filectxfn(self._repo, self, path) | |
2382 |
|
2313 | |||
2383 | def commit(self): |
|
2314 | def commit(self): | |
2384 | """commit context to the repo""" |
|
2315 | """commit context to the repo""" | |
2385 | return self._repo.commitctx(self) |
|
2316 | return self._repo.commitctx(self) | |
2386 |
|
2317 | |||
2387 | @propertycache |
|
2318 | @propertycache | |
2388 | def _manifest(self): |
|
2319 | def _manifest(self): | |
2389 | """generate a manifest based on the return values of filectxfn""" |
|
2320 | """generate a manifest based on the return values of filectxfn""" | |
2390 |
|
2321 | |||
2391 | # keep this simple for now; just worry about p1 |
|
2322 | # keep this simple for now; just worry about p1 | |
2392 | pctx = self._parents[0] |
|
2323 | pctx = self._parents[0] | |
2393 | man = pctx.manifest().copy() |
|
2324 | man = pctx.manifest().copy() | |
2394 |
|
2325 | |||
2395 | for f in self._status.modified: |
|
2326 | for f in self._status.modified: | |
2396 | p1node = nullid |
|
2327 | p1node = nullid | |
2397 | p2node = nullid |
|
2328 | p2node = nullid | |
2398 | p = pctx[f].parents() # if file isn't in pctx, check p2? |
|
2329 | p = pctx[f].parents() # if file isn't in pctx, check p2? | |
2399 | if len(p) > 0: |
|
2330 | if len(p) > 0: | |
2400 | p1node = p[0].filenode() |
|
2331 | p1node = p[0].filenode() | |
2401 | if len(p) > 1: |
|
2332 | if len(p) > 1: | |
2402 | p2node = p[1].filenode() |
|
2333 | p2node = p[1].filenode() | |
2403 | man[f] = revlog.hash(self[f].data(), p1node, p2node) |
|
2334 | man[f] = revlog.hash(self[f].data(), p1node, p2node) | |
2404 |
|
2335 | |||
2405 | for f in self._status.added: |
|
2336 | for f in self._status.added: | |
2406 | man[f] = revlog.hash(self[f].data(), nullid, nullid) |
|
2337 | man[f] = revlog.hash(self[f].data(), nullid, nullid) | |
2407 |
|
2338 | |||
2408 | for f in self._status.removed: |
|
2339 | for f in self._status.removed: | |
2409 | if f in man: |
|
2340 | if f in man: | |
2410 | del man[f] |
|
2341 | del man[f] | |
2411 |
|
2342 | |||
2412 | return man |
|
2343 | return man | |
2413 |
|
2344 | |||
2414 | @propertycache |
|
2345 | @propertycache | |
2415 | def _status(self): |
|
2346 | def _status(self): | |
2416 | """Calculate exact status from ``files`` specified at construction |
|
2347 | """Calculate exact status from ``files`` specified at construction | |
2417 | """ |
|
2348 | """ | |
2418 | man1 = self.p1().manifest() |
|
2349 | man1 = self.p1().manifest() | |
2419 | p2 = self._parents[1] |
|
2350 | p2 = self._parents[1] | |
2420 | # "1 < len(self._parents)" can't be used for checking |
|
2351 | # "1 < len(self._parents)" can't be used for checking | |
2421 | # existence of the 2nd parent, because "memctx._parents" is |
|
2352 | # existence of the 2nd parent, because "memctx._parents" is | |
2422 | # explicitly initialized by the list, of which length is 2. |
|
2353 | # explicitly initialized by the list, of which length is 2. | |
2423 | if p2.node() != nullid: |
|
2354 | if p2.node() != nullid: | |
2424 | man2 = p2.manifest() |
|
2355 | man2 = p2.manifest() | |
2425 | managing = lambda f: f in man1 or f in man2 |
|
2356 | managing = lambda f: f in man1 or f in man2 | |
2426 | else: |
|
2357 | else: | |
2427 | managing = lambda f: f in man1 |
|
2358 | managing = lambda f: f in man1 | |
2428 |
|
2359 | |||
2429 | modified, added, removed = [], [], [] |
|
2360 | modified, added, removed = [], [], [] | |
2430 | for f in self._files: |
|
2361 | for f in self._files: | |
2431 | if not managing(f): |
|
2362 | if not managing(f): | |
2432 | added.append(f) |
|
2363 | added.append(f) | |
2433 | elif self[f]: |
|
2364 | elif self[f]: | |
2434 | modified.append(f) |
|
2365 | modified.append(f) | |
2435 | else: |
|
2366 | else: | |
2436 | removed.append(f) |
|
2367 | removed.append(f) | |
2437 |
|
2368 | |||
2438 | return scmutil.status(modified, added, removed, [], [], [], []) |
|
2369 | return scmutil.status(modified, added, removed, [], [], [], []) | |
2439 |
|
2370 | |||
2440 | class memfilectx(committablefilectx): |
|
2371 | class memfilectx(committablefilectx): | |
2441 | """memfilectx represents an in-memory file to commit. |
|
2372 | """memfilectx represents an in-memory file to commit. | |
2442 |
|
2373 | |||
2443 | See memctx and committablefilectx for more details. |
|
2374 | See memctx and committablefilectx for more details. | |
2444 | """ |
|
2375 | """ | |
2445 | def __init__(self, repo, changectx, path, data, islink=False, |
|
2376 | def __init__(self, repo, changectx, path, data, islink=False, | |
2446 | isexec=False, copied=None): |
|
2377 | isexec=False, copied=None): | |
2447 | """ |
|
2378 | """ | |
2448 | path is the normalized file path relative to repository root. |
|
2379 | path is the normalized file path relative to repository root. | |
2449 | data is the file content as a string. |
|
2380 | data is the file content as a string. | |
2450 | islink is True if the file is a symbolic link. |
|
2381 | islink is True if the file is a symbolic link. | |
2451 | isexec is True if the file is executable. |
|
2382 | isexec is True if the file is executable. | |
2452 | copied is the source file path if current file was copied in the |
|
2383 | copied is the source file path if current file was copied in the | |
2453 | revision being committed, or None.""" |
|
2384 | revision being committed, or None.""" | |
2454 | super(memfilectx, self).__init__(repo, path, None, changectx) |
|
2385 | super(memfilectx, self).__init__(repo, path, None, changectx) | |
2455 | self._data = data |
|
2386 | self._data = data | |
2456 | self._flags = (islink and 'l' or '') + (isexec and 'x' or '') |
|
2387 | self._flags = (islink and 'l' or '') + (isexec and 'x' or '') | |
2457 | self._copied = None |
|
2388 | self._copied = None | |
2458 | if copied: |
|
2389 | if copied: | |
2459 | self._copied = (copied, nullid) |
|
2390 | self._copied = (copied, nullid) | |
2460 |
|
2391 | |||
2461 | def data(self): |
|
2392 | def data(self): | |
2462 | return self._data |
|
2393 | return self._data | |
2463 |
|
2394 | |||
2464 | def remove(self, ignoremissing=False): |
|
2395 | def remove(self, ignoremissing=False): | |
2465 | """wraps unlink for a repo's working directory""" |
|
2396 | """wraps unlink for a repo's working directory""" | |
2466 | # need to figure out what to do here |
|
2397 | # need to figure out what to do here | |
2467 | del self._changectx[self._path] |
|
2398 | del self._changectx[self._path] | |
2468 |
|
2399 | |||
2469 | def write(self, data, flags, **kwargs): |
|
2400 | def write(self, data, flags, **kwargs): | |
2470 | """wraps repo.wwrite""" |
|
2401 | """wraps repo.wwrite""" | |
2471 | self._data = data |
|
2402 | self._data = data | |
2472 |
|
2403 | |||
2473 | class overlayfilectx(committablefilectx): |
|
2404 | class overlayfilectx(committablefilectx): | |
2474 | """Like memfilectx but take an original filectx and optional parameters to |
|
2405 | """Like memfilectx but take an original filectx and optional parameters to | |
2475 | override parts of it. This is useful when fctx.data() is expensive (i.e. |
|
2406 | override parts of it. This is useful when fctx.data() is expensive (i.e. | |
2476 | flag processor is expensive) and raw data, flags, and filenode could be |
|
2407 | flag processor is expensive) and raw data, flags, and filenode could be | |
2477 | reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file). |
|
2408 | reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file). | |
2478 | """ |
|
2409 | """ | |
2479 |
|
2410 | |||
2480 | def __init__(self, originalfctx, datafunc=None, path=None, flags=None, |
|
2411 | def __init__(self, originalfctx, datafunc=None, path=None, flags=None, | |
2481 | copied=None, ctx=None): |
|
2412 | copied=None, ctx=None): | |
2482 | """originalfctx: filecontext to duplicate |
|
2413 | """originalfctx: filecontext to duplicate | |
2483 |
|
2414 | |||
2484 | datafunc: None or a function to override data (file content). It is a |
|
2415 | datafunc: None or a function to override data (file content). It is a | |
2485 | function to be lazy. path, flags, copied, ctx: None or overridden value |
|
2416 | function to be lazy. path, flags, copied, ctx: None or overridden value | |
2486 |
|
2417 | |||
2487 | copied could be (path, rev), or False. copied could also be just path, |
|
2418 | copied could be (path, rev), or False. copied could also be just path, | |
2488 | and will be converted to (path, nullid). This simplifies some callers. |
|
2419 | and will be converted to (path, nullid). This simplifies some callers. | |
2489 | """ |
|
2420 | """ | |
2490 |
|
2421 | |||
2491 | if path is None: |
|
2422 | if path is None: | |
2492 | path = originalfctx.path() |
|
2423 | path = originalfctx.path() | |
2493 | if ctx is None: |
|
2424 | if ctx is None: | |
2494 | ctx = originalfctx.changectx() |
|
2425 | ctx = originalfctx.changectx() | |
2495 | ctxmatch = lambda: True |
|
2426 | ctxmatch = lambda: True | |
2496 | else: |
|
2427 | else: | |
2497 | ctxmatch = lambda: ctx == originalfctx.changectx() |
|
2428 | ctxmatch = lambda: ctx == originalfctx.changectx() | |
2498 |
|
2429 | |||
2499 | repo = originalfctx.repo() |
|
2430 | repo = originalfctx.repo() | |
2500 | flog = originalfctx.filelog() |
|
2431 | flog = originalfctx.filelog() | |
2501 | super(overlayfilectx, self).__init__(repo, path, flog, ctx) |
|
2432 | super(overlayfilectx, self).__init__(repo, path, flog, ctx) | |
2502 |
|
2433 | |||
2503 | if copied is None: |
|
2434 | if copied is None: | |
2504 | copied = originalfctx.renamed() |
|
2435 | copied = originalfctx.renamed() | |
2505 | copiedmatch = lambda: True |
|
2436 | copiedmatch = lambda: True | |
2506 | else: |
|
2437 | else: | |
2507 | if copied and not isinstance(copied, tuple): |
|
2438 | if copied and not isinstance(copied, tuple): | |
2508 | # repo._filecommit will recalculate copyrev so nullid is okay |
|
2439 | # repo._filecommit will recalculate copyrev so nullid is okay | |
2509 | copied = (copied, nullid) |
|
2440 | copied = (copied, nullid) | |
2510 | copiedmatch = lambda: copied == originalfctx.renamed() |
|
2441 | copiedmatch = lambda: copied == originalfctx.renamed() | |
2511 |
|
2442 | |||
2512 | # When data, copied (could affect data), ctx (could affect filelog |
|
2443 | # When data, copied (could affect data), ctx (could affect filelog | |
2513 | # parents) are not overridden, rawdata, rawflags, and filenode may be |
|
2444 | # parents) are not overridden, rawdata, rawflags, and filenode may be | |
2514 | # reused (repo._filecommit should double check filelog parents). |
|
2445 | # reused (repo._filecommit should double check filelog parents). | |
2515 | # |
|
2446 | # | |
2516 | # path, flags are not hashed in filelog (but in manifestlog) so they do |
|
2447 | # path, flags are not hashed in filelog (but in manifestlog) so they do | |
2517 | # not affect reusable here. |
|
2448 | # not affect reusable here. | |
2518 | # |
|
2449 | # | |
2519 | # If ctx or copied is overridden to a same value with originalfctx, |
|
2450 | # If ctx or copied is overridden to a same value with originalfctx, | |
2520 | # still consider it's reusable. originalfctx.renamed() may be a bit |
|
2451 | # still consider it's reusable. originalfctx.renamed() may be a bit | |
2521 | # expensive so it's not called unless necessary. Assuming datafunc is |
|
2452 | # expensive so it's not called unless necessary. Assuming datafunc is | |
2522 | # always expensive, do not call it for this "reusable" test. |
|
2453 | # always expensive, do not call it for this "reusable" test. | |
2523 | reusable = datafunc is None and ctxmatch() and copiedmatch() |
|
2454 | reusable = datafunc is None and ctxmatch() and copiedmatch() | |
2524 |
|
2455 | |||
2525 | if datafunc is None: |
|
2456 | if datafunc is None: | |
2526 | datafunc = originalfctx.data |
|
2457 | datafunc = originalfctx.data | |
2527 | if flags is None: |
|
2458 | if flags is None: | |
2528 | flags = originalfctx.flags() |
|
2459 | flags = originalfctx.flags() | |
2529 |
|
2460 | |||
2530 | self._datafunc = datafunc |
|
2461 | self._datafunc = datafunc | |
2531 | self._flags = flags |
|
2462 | self._flags = flags | |
2532 | self._copied = copied |
|
2463 | self._copied = copied | |
2533 |
|
2464 | |||
2534 | if reusable: |
|
2465 | if reusable: | |
2535 | # copy extra fields from originalfctx |
|
2466 | # copy extra fields from originalfctx | |
2536 | attrs = ['rawdata', 'rawflags', '_filenode', '_filerev'] |
|
2467 | attrs = ['rawdata', 'rawflags', '_filenode', '_filerev'] | |
2537 | for attr_ in attrs: |
|
2468 | for attr_ in attrs: | |
2538 | if util.safehasattr(originalfctx, attr_): |
|
2469 | if util.safehasattr(originalfctx, attr_): | |
2539 | setattr(self, attr_, getattr(originalfctx, attr_)) |
|
2470 | setattr(self, attr_, getattr(originalfctx, attr_)) | |
2540 |
|
2471 | |||
2541 | def data(self): |
|
2472 | def data(self): | |
2542 | return self._datafunc() |
|
2473 | return self._datafunc() | |
2543 |
|
2474 | |||
2544 | class metadataonlyctx(committablectx): |
|
2475 | class metadataonlyctx(committablectx): | |
2545 | """Like memctx but it's reusing the manifest of different commit. |
|
2476 | """Like memctx but it's reusing the manifest of different commit. | |
2546 | Intended to be used by lightweight operations that are creating |
|
2477 | Intended to be used by lightweight operations that are creating | |
2547 | metadata-only changes. |
|
2478 | metadata-only changes. | |
2548 |
|
2479 | |||
2549 | Revision information is supplied at initialization time. 'repo' is the |
|
2480 | Revision information is supplied at initialization time. 'repo' is the | |
2550 | current localrepo, 'ctx' is original revision which manifest we're reuisng |
|
2481 | current localrepo, 'ctx' is original revision which manifest we're reuisng | |
2551 | 'parents' is a sequence of two parent revisions identifiers (pass None for |
|
2482 | 'parents' is a sequence of two parent revisions identifiers (pass None for | |
2552 | every missing parent), 'text' is the commit. |
|
2483 | every missing parent), 'text' is the commit. | |
2553 |
|
2484 | |||
2554 | user receives the committer name and defaults to current repository |
|
2485 | user receives the committer name and defaults to current repository | |
2555 | username, date is the commit date in any format supported by |
|
2486 | username, date is the commit date in any format supported by | |
2556 | dateutil.parsedate() and defaults to current date, extra is a dictionary of |
|
2487 | dateutil.parsedate() and defaults to current date, extra is a dictionary of | |
2557 | metadata or is left empty. |
|
2488 | metadata or is left empty. | |
2558 | """ |
|
2489 | """ | |
2559 | def __new__(cls, repo, originalctx, *args, **kwargs): |
|
2490 | def __new__(cls, repo, originalctx, *args, **kwargs): | |
2560 | return super(metadataonlyctx, cls).__new__(cls, repo) |
|
2491 | return super(metadataonlyctx, cls).__new__(cls, repo) | |
2561 |
|
2492 | |||
2562 | def __init__(self, repo, originalctx, parents=None, text=None, user=None, |
|
2493 | def __init__(self, repo, originalctx, parents=None, text=None, user=None, | |
2563 | date=None, extra=None, editor=False): |
|
2494 | date=None, extra=None, editor=False): | |
2564 | if text is None: |
|
2495 | if text is None: | |
2565 | text = originalctx.description() |
|
2496 | text = originalctx.description() | |
2566 | super(metadataonlyctx, self).__init__(repo, text, user, date, extra) |
|
2497 | super(metadataonlyctx, self).__init__(repo, text, user, date, extra) | |
2567 | self._rev = None |
|
2498 | self._rev = None | |
2568 | self._node = None |
|
2499 | self._node = None | |
2569 | self._originalctx = originalctx |
|
2500 | self._originalctx = originalctx | |
2570 | self._manifestnode = originalctx.manifestnode() |
|
2501 | self._manifestnode = originalctx.manifestnode() | |
2571 | if parents is None: |
|
2502 | if parents is None: | |
2572 | parents = originalctx.parents() |
|
2503 | parents = originalctx.parents() | |
2573 | else: |
|
2504 | else: | |
2574 | parents = [repo[p] for p in parents if p is not None] |
|
2505 | parents = [repo[p] for p in parents if p is not None] | |
2575 | parents = parents[:] |
|
2506 | parents = parents[:] | |
2576 | while len(parents) < 2: |
|
2507 | while len(parents) < 2: | |
2577 | parents.append(repo[nullid]) |
|
2508 | parents.append(repo[nullid]) | |
2578 | p1, p2 = self._parents = parents |
|
2509 | p1, p2 = self._parents = parents | |
2579 |
|
2510 | |||
2580 | # sanity check to ensure that the reused manifest parents are |
|
2511 | # sanity check to ensure that the reused manifest parents are | |
2581 | # manifests of our commit parents |
|
2512 | # manifests of our commit parents | |
2582 | mp1, mp2 = self.manifestctx().parents |
|
2513 | mp1, mp2 = self.manifestctx().parents | |
2583 | if p1 != nullid and p1.manifestnode() != mp1: |
|
2514 | if p1 != nullid and p1.manifestnode() != mp1: | |
2584 | raise RuntimeError('can\'t reuse the manifest: ' |
|
2515 | raise RuntimeError('can\'t reuse the manifest: ' | |
2585 | 'its p1 doesn\'t match the new ctx p1') |
|
2516 | 'its p1 doesn\'t match the new ctx p1') | |
2586 | if p2 != nullid and p2.manifestnode() != mp2: |
|
2517 | if p2 != nullid and p2.manifestnode() != mp2: | |
2587 | raise RuntimeError('can\'t reuse the manifest: ' |
|
2518 | raise RuntimeError('can\'t reuse the manifest: ' | |
2588 | 'its p2 doesn\'t match the new ctx p2') |
|
2519 | 'its p2 doesn\'t match the new ctx p2') | |
2589 |
|
2520 | |||
2590 | self._files = originalctx.files() |
|
2521 | self._files = originalctx.files() | |
2591 | self.substate = {} |
|
2522 | self.substate = {} | |
2592 |
|
2523 | |||
2593 | if editor: |
|
2524 | if editor: | |
2594 | self._text = editor(self._repo, self, []) |
|
2525 | self._text = editor(self._repo, self, []) | |
2595 | self._repo.savecommitmessage(self._text) |
|
2526 | self._repo.savecommitmessage(self._text) | |
2596 |
|
2527 | |||
2597 | def manifestnode(self): |
|
2528 | def manifestnode(self): | |
2598 | return self._manifestnode |
|
2529 | return self._manifestnode | |
2599 |
|
2530 | |||
2600 | @property |
|
2531 | @property | |
2601 | def _manifestctx(self): |
|
2532 | def _manifestctx(self): | |
2602 | return self._repo.manifestlog[self._manifestnode] |
|
2533 | return self._repo.manifestlog[self._manifestnode] | |
2603 |
|
2534 | |||
2604 | def filectx(self, path, filelog=None): |
|
2535 | def filectx(self, path, filelog=None): | |
2605 | return self._originalctx.filectx(path, filelog=filelog) |
|
2536 | return self._originalctx.filectx(path, filelog=filelog) | |
2606 |
|
2537 | |||
2607 | def commit(self): |
|
2538 | def commit(self): | |
2608 | """commit context to the repo""" |
|
2539 | """commit context to the repo""" | |
2609 | return self._repo.commitctx(self) |
|
2540 | return self._repo.commitctx(self) | |
2610 |
|
2541 | |||
2611 | @property |
|
2542 | @property | |
2612 | def _manifest(self): |
|
2543 | def _manifest(self): | |
2613 | return self._originalctx.manifest() |
|
2544 | return self._originalctx.manifest() | |
2614 |
|
2545 | |||
2615 | @propertycache |
|
2546 | @propertycache | |
2616 | def _status(self): |
|
2547 | def _status(self): | |
2617 | """Calculate exact status from ``files`` specified in the ``origctx`` |
|
2548 | """Calculate exact status from ``files`` specified in the ``origctx`` | |
2618 | and parents manifests. |
|
2549 | and parents manifests. | |
2619 | """ |
|
2550 | """ | |
2620 | man1 = self.p1().manifest() |
|
2551 | man1 = self.p1().manifest() | |
2621 | p2 = self._parents[1] |
|
2552 | p2 = self._parents[1] | |
2622 | # "1 < len(self._parents)" can't be used for checking |
|
2553 | # "1 < len(self._parents)" can't be used for checking | |
2623 | # existence of the 2nd parent, because "metadataonlyctx._parents" is |
|
2554 | # existence of the 2nd parent, because "metadataonlyctx._parents" is | |
2624 | # explicitly initialized by the list, of which length is 2. |
|
2555 | # explicitly initialized by the list, of which length is 2. | |
2625 | if p2.node() != nullid: |
|
2556 | if p2.node() != nullid: | |
2626 | man2 = p2.manifest() |
|
2557 | man2 = p2.manifest() | |
2627 | managing = lambda f: f in man1 or f in man2 |
|
2558 | managing = lambda f: f in man1 or f in man2 | |
2628 | else: |
|
2559 | else: | |
2629 | managing = lambda f: f in man1 |
|
2560 | managing = lambda f: f in man1 | |
2630 |
|
2561 | |||
2631 | modified, added, removed = [], [], [] |
|
2562 | modified, added, removed = [], [], [] | |
2632 | for f in self._files: |
|
2563 | for f in self._files: | |
2633 | if not managing(f): |
|
2564 | if not managing(f): | |
2634 | added.append(f) |
|
2565 | added.append(f) | |
2635 | elif f in self: |
|
2566 | elif f in self: | |
2636 | modified.append(f) |
|
2567 | modified.append(f) | |
2637 | else: |
|
2568 | else: | |
2638 | removed.append(f) |
|
2569 | removed.append(f) | |
2639 |
|
2570 | |||
2640 | return scmutil.status(modified, added, removed, [], [], [], []) |
|
2571 | return scmutil.status(modified, added, removed, [], [], [], []) | |
2641 |
|
2572 | |||
2642 | class arbitraryfilectx(object): |
|
2573 | class arbitraryfilectx(object): | |
2643 | """Allows you to use filectx-like functions on a file in an arbitrary |
|
2574 | """Allows you to use filectx-like functions on a file in an arbitrary | |
2644 | location on disk, possibly not in the working directory. |
|
2575 | location on disk, possibly not in the working directory. | |
2645 | """ |
|
2576 | """ | |
2646 | def __init__(self, path, repo=None): |
|
2577 | def __init__(self, path, repo=None): | |
2647 | # Repo is optional because contrib/simplemerge uses this class. |
|
2578 | # Repo is optional because contrib/simplemerge uses this class. | |
2648 | self._repo = repo |
|
2579 | self._repo = repo | |
2649 | self._path = path |
|
2580 | self._path = path | |
2650 |
|
2581 | |||
2651 | def cmp(self, fctx): |
|
2582 | def cmp(self, fctx): | |
2652 | # filecmp follows symlinks whereas `cmp` should not, so skip the fast |
|
2583 | # filecmp follows symlinks whereas `cmp` should not, so skip the fast | |
2653 | # path if either side is a symlink. |
|
2584 | # path if either side is a symlink. | |
2654 | symlinks = ('l' in self.flags() or 'l' in fctx.flags()) |
|
2585 | symlinks = ('l' in self.flags() or 'l' in fctx.flags()) | |
2655 | if not symlinks and isinstance(fctx, workingfilectx) and self._repo: |
|
2586 | if not symlinks and isinstance(fctx, workingfilectx) and self._repo: | |
2656 | # Add a fast-path for merge if both sides are disk-backed. |
|
2587 | # Add a fast-path for merge if both sides are disk-backed. | |
2657 | # Note that filecmp uses the opposite return values (True if same) |
|
2588 | # Note that filecmp uses the opposite return values (True if same) | |
2658 | # from our cmp functions (True if different). |
|
2589 | # from our cmp functions (True if different). | |
2659 | return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path())) |
|
2590 | return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path())) | |
2660 | return self.data() != fctx.data() |
|
2591 | return self.data() != fctx.data() | |
2661 |
|
2592 | |||
2662 | def path(self): |
|
2593 | def path(self): | |
2663 | return self._path |
|
2594 | return self._path | |
2664 |
|
2595 | |||
2665 | def flags(self): |
|
2596 | def flags(self): | |
2666 | return '' |
|
2597 | return '' | |
2667 |
|
2598 | |||
2668 | def data(self): |
|
2599 | def data(self): | |
2669 | return util.readfile(self._path) |
|
2600 | return util.readfile(self._path) | |
2670 |
|
2601 | |||
2671 | def decodeddata(self): |
|
2602 | def decodeddata(self): | |
2672 | with open(self._path, "rb") as f: |
|
2603 | with open(self._path, "rb") as f: | |
2673 | return f.read() |
|
2604 | return f.read() | |
2674 |
|
2605 | |||
2675 | def remove(self): |
|
2606 | def remove(self): | |
2676 | util.unlink(self._path) |
|
2607 | util.unlink(self._path) | |
2677 |
|
2608 | |||
2678 | def write(self, data, flags, **kwargs): |
|
2609 | def write(self, data, flags, **kwargs): | |
2679 | assert not flags |
|
2610 | assert not flags | |
2680 | with open(self._path, "w") as f: |
|
2611 | with open(self._path, "w") as f: | |
2681 | f.write(data) |
|
2612 | f.write(data) |
@@ -1,628 +1,703 b'' | |||||
1 | # dagop.py - graph ancestry and topology algorithm for revset |
|
1 | # dagop.py - graph ancestry and topology algorithm for revset | |
2 | # |
|
2 | # | |
3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2010 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import heapq |
|
10 | import heapq | |
11 |
|
11 | |||
12 | from .thirdparty import ( |
|
12 | from .thirdparty import ( | |
13 | attr, |
|
13 | attr, | |
14 | ) |
|
14 | ) | |
15 | from . import ( |
|
15 | from . import ( | |
16 | error, |
|
16 | error, | |
17 | mdiff, |
|
17 | mdiff, | |
18 | node, |
|
18 | node, | |
19 | patch, |
|
19 | patch, | |
|
20 | pycompat, | |||
20 | smartset, |
|
21 | smartset, | |
21 | ) |
|
22 | ) | |
22 |
|
23 | |||
23 | baseset = smartset.baseset |
|
24 | baseset = smartset.baseset | |
24 | generatorset = smartset.generatorset |
|
25 | generatorset = smartset.generatorset | |
25 |
|
26 | |||
26 | # possible maximum depth between null and wdir() |
|
27 | # possible maximum depth between null and wdir() | |
27 | _maxlogdepth = 0x80000000 |
|
28 | _maxlogdepth = 0x80000000 | |
28 |
|
29 | |||
29 | def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse): |
|
30 | def _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse): | |
30 | """Walk DAG using 'pfunc' from the given 'revs' nodes |
|
31 | """Walk DAG using 'pfunc' from the given 'revs' nodes | |
31 |
|
32 | |||
32 | 'pfunc(rev)' should return the parent/child revisions of the given 'rev' |
|
33 | 'pfunc(rev)' should return the parent/child revisions of the given 'rev' | |
33 | if 'reverse' is True/False respectively. |
|
34 | if 'reverse' is True/False respectively. | |
34 |
|
35 | |||
35 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
36 | Scan ends at the stopdepth (exlusive) if specified. Revisions found | |
36 | earlier than the startdepth are omitted. |
|
37 | earlier than the startdepth are omitted. | |
37 | """ |
|
38 | """ | |
38 | if startdepth is None: |
|
39 | if startdepth is None: | |
39 | startdepth = 0 |
|
40 | startdepth = 0 | |
40 | if stopdepth is None: |
|
41 | if stopdepth is None: | |
41 | stopdepth = _maxlogdepth |
|
42 | stopdepth = _maxlogdepth | |
42 | if stopdepth == 0: |
|
43 | if stopdepth == 0: | |
43 | return |
|
44 | return | |
44 | if stopdepth < 0: |
|
45 | if stopdepth < 0: | |
45 | raise error.ProgrammingError('negative stopdepth') |
|
46 | raise error.ProgrammingError('negative stopdepth') | |
46 | if reverse: |
|
47 | if reverse: | |
47 | heapsign = -1 # max heap |
|
48 | heapsign = -1 # max heap | |
48 | else: |
|
49 | else: | |
49 | heapsign = +1 # min heap |
|
50 | heapsign = +1 # min heap | |
50 |
|
51 | |||
51 | # load input revs lazily to heap so earlier revisions can be yielded |
|
52 | # load input revs lazily to heap so earlier revisions can be yielded | |
52 | # without fully computing the input revs |
|
53 | # without fully computing the input revs | |
53 | revs.sort(reverse) |
|
54 | revs.sort(reverse) | |
54 | irevs = iter(revs) |
|
55 | irevs = iter(revs) | |
55 | pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first) |
|
56 | pendingheap = [] # [(heapsign * rev, depth), ...] (i.e. lower depth first) | |
56 |
|
57 | |||
57 | inputrev = next(irevs, None) |
|
58 | inputrev = next(irevs, None) | |
58 | if inputrev is not None: |
|
59 | if inputrev is not None: | |
59 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) |
|
60 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) | |
60 |
|
61 | |||
61 | lastrev = None |
|
62 | lastrev = None | |
62 | while pendingheap: |
|
63 | while pendingheap: | |
63 | currev, curdepth = heapq.heappop(pendingheap) |
|
64 | currev, curdepth = heapq.heappop(pendingheap) | |
64 | currev = heapsign * currev |
|
65 | currev = heapsign * currev | |
65 | if currev == inputrev: |
|
66 | if currev == inputrev: | |
66 | inputrev = next(irevs, None) |
|
67 | inputrev = next(irevs, None) | |
67 | if inputrev is not None: |
|
68 | if inputrev is not None: | |
68 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) |
|
69 | heapq.heappush(pendingheap, (heapsign * inputrev, 0)) | |
69 | # rescan parents until curdepth >= startdepth because queued entries |
|
70 | # rescan parents until curdepth >= startdepth because queued entries | |
70 | # of the same revision are iterated from the lowest depth |
|
71 | # of the same revision are iterated from the lowest depth | |
71 | foundnew = (currev != lastrev) |
|
72 | foundnew = (currev != lastrev) | |
72 | if foundnew and curdepth >= startdepth: |
|
73 | if foundnew and curdepth >= startdepth: | |
73 | lastrev = currev |
|
74 | lastrev = currev | |
74 | yield currev |
|
75 | yield currev | |
75 | pdepth = curdepth + 1 |
|
76 | pdepth = curdepth + 1 | |
76 | if foundnew and pdepth < stopdepth: |
|
77 | if foundnew and pdepth < stopdepth: | |
77 | for prev in pfunc(currev): |
|
78 | for prev in pfunc(currev): | |
78 | if prev != node.nullrev: |
|
79 | if prev != node.nullrev: | |
79 | heapq.heappush(pendingheap, (heapsign * prev, pdepth)) |
|
80 | heapq.heappush(pendingheap, (heapsign * prev, pdepth)) | |
80 |
|
81 | |||
81 | def filectxancestors(fctxs, followfirst=False): |
|
82 | def filectxancestors(fctxs, followfirst=False): | |
82 | """Like filectx.ancestors(), but can walk from multiple files/revisions, |
|
83 | """Like filectx.ancestors(), but can walk from multiple files/revisions, | |
83 | and includes the given fctxs themselves |
|
84 | and includes the given fctxs themselves | |
84 |
|
85 | |||
85 | Yields (rev, {fctx, ...}) pairs in descending order. |
|
86 | Yields (rev, {fctx, ...}) pairs in descending order. | |
86 | """ |
|
87 | """ | |
87 | visit = {} |
|
88 | visit = {} | |
88 | visitheap = [] |
|
89 | visitheap = [] | |
89 | def addvisit(fctx): |
|
90 | def addvisit(fctx): | |
90 | rev = fctx.rev() |
|
91 | rev = fctx.rev() | |
91 | if rev not in visit: |
|
92 | if rev not in visit: | |
92 | visit[rev] = set() |
|
93 | visit[rev] = set() | |
93 | heapq.heappush(visitheap, -rev) # max heap |
|
94 | heapq.heappush(visitheap, -rev) # max heap | |
94 | visit[rev].add(fctx) |
|
95 | visit[rev].add(fctx) | |
95 |
|
96 | |||
96 | if followfirst: |
|
97 | if followfirst: | |
97 | cut = 1 |
|
98 | cut = 1 | |
98 | else: |
|
99 | else: | |
99 | cut = None |
|
100 | cut = None | |
100 |
|
101 | |||
101 | for c in fctxs: |
|
102 | for c in fctxs: | |
102 | addvisit(c) |
|
103 | addvisit(c) | |
103 | while visit: |
|
104 | while visit: | |
104 | currev = -heapq.heappop(visitheap) |
|
105 | currev = -heapq.heappop(visitheap) | |
105 | curfctxs = visit.pop(currev) |
|
106 | curfctxs = visit.pop(currev) | |
106 | yield currev, curfctxs |
|
107 | yield currev, curfctxs | |
107 | for c in curfctxs: |
|
108 | for c in curfctxs: | |
108 | for parent in c.parents()[:cut]: |
|
109 | for parent in c.parents()[:cut]: | |
109 | addvisit(parent) |
|
110 | addvisit(parent) | |
110 | assert not visitheap |
|
111 | assert not visitheap | |
111 |
|
112 | |||
112 | def filerevancestors(fctxs, followfirst=False): |
|
113 | def filerevancestors(fctxs, followfirst=False): | |
113 | """Like filectx.ancestors(), but can walk from multiple files/revisions, |
|
114 | """Like filectx.ancestors(), but can walk from multiple files/revisions, | |
114 | and includes the given fctxs themselves |
|
115 | and includes the given fctxs themselves | |
115 |
|
116 | |||
116 | Returns a smartset. |
|
117 | Returns a smartset. | |
117 | """ |
|
118 | """ | |
118 | gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst)) |
|
119 | gen = (rev for rev, _cs in filectxancestors(fctxs, followfirst)) | |
119 | return generatorset(gen, iterasc=False) |
|
120 | return generatorset(gen, iterasc=False) | |
120 |
|
121 | |||
121 | def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc): |
|
122 | def _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, cutfunc): | |
122 | if followfirst: |
|
123 | if followfirst: | |
123 | cut = 1 |
|
124 | cut = 1 | |
124 | else: |
|
125 | else: | |
125 | cut = None |
|
126 | cut = None | |
126 | cl = repo.changelog |
|
127 | cl = repo.changelog | |
127 | def plainpfunc(rev): |
|
128 | def plainpfunc(rev): | |
128 | try: |
|
129 | try: | |
129 | return cl.parentrevs(rev)[:cut] |
|
130 | return cl.parentrevs(rev)[:cut] | |
130 | except error.WdirUnsupported: |
|
131 | except error.WdirUnsupported: | |
131 | return (pctx.rev() for pctx in repo[rev].parents()[:cut]) |
|
132 | return (pctx.rev() for pctx in repo[rev].parents()[:cut]) | |
132 | if cutfunc is None: |
|
133 | if cutfunc is None: | |
133 | pfunc = plainpfunc |
|
134 | pfunc = plainpfunc | |
134 | else: |
|
135 | else: | |
135 | pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)] |
|
136 | pfunc = lambda rev: [r for r in plainpfunc(rev) if not cutfunc(r)] | |
136 | revs = revs.filter(lambda rev: not cutfunc(rev)) |
|
137 | revs = revs.filter(lambda rev: not cutfunc(rev)) | |
137 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True) |
|
138 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=True) | |
138 |
|
139 | |||
139 | def revancestors(repo, revs, followfirst=False, startdepth=None, |
|
140 | def revancestors(repo, revs, followfirst=False, startdepth=None, | |
140 | stopdepth=None, cutfunc=None): |
|
141 | stopdepth=None, cutfunc=None): | |
141 | """Like revlog.ancestors(), but supports additional options, includes |
|
142 | """Like revlog.ancestors(), but supports additional options, includes | |
142 | the given revs themselves, and returns a smartset |
|
143 | the given revs themselves, and returns a smartset | |
143 |
|
144 | |||
144 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
145 | Scan ends at the stopdepth (exlusive) if specified. Revisions found | |
145 | earlier than the startdepth are omitted. |
|
146 | earlier than the startdepth are omitted. | |
146 |
|
147 | |||
147 | If cutfunc is provided, it will be used to cut the traversal of the DAG. |
|
148 | If cutfunc is provided, it will be used to cut the traversal of the DAG. | |
148 | When cutfunc(X) returns True, the DAG traversal stops - revision X and |
|
149 | When cutfunc(X) returns True, the DAG traversal stops - revision X and | |
149 | X's ancestors in the traversal path will be skipped. This could be an |
|
150 | X's ancestors in the traversal path will be skipped. This could be an | |
150 | optimization sometimes. |
|
151 | optimization sometimes. | |
151 |
|
152 | |||
152 | Note: if Y is an ancestor of X, cutfunc(X) returning True does not |
|
153 | Note: if Y is an ancestor of X, cutfunc(X) returning True does not | |
153 | necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to |
|
154 | necessarily mean Y will also be cut. Usually cutfunc(Y) also wants to | |
154 | return True in this case. For example, |
|
155 | return True in this case. For example, | |
155 |
|
156 | |||
156 | D # revancestors(repo, D, cutfunc=lambda rev: rev == B) |
|
157 | D # revancestors(repo, D, cutfunc=lambda rev: rev == B) | |
157 | |\ # will include "A", because the path D -> C -> A was not cut. |
|
158 | |\ # will include "A", because the path D -> C -> A was not cut. | |
158 | B C # If "B" gets cut, "A" might want to be cut too. |
|
159 | B C # If "B" gets cut, "A" might want to be cut too. | |
159 | |/ |
|
160 | |/ | |
160 | A |
|
161 | A | |
161 | """ |
|
162 | """ | |
162 | gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, |
|
163 | gen = _genrevancestors(repo, revs, followfirst, startdepth, stopdepth, | |
163 | cutfunc) |
|
164 | cutfunc) | |
164 | return generatorset(gen, iterasc=False) |
|
165 | return generatorset(gen, iterasc=False) | |
165 |
|
166 | |||
166 | def _genrevdescendants(repo, revs, followfirst): |
|
167 | def _genrevdescendants(repo, revs, followfirst): | |
167 | if followfirst: |
|
168 | if followfirst: | |
168 | cut = 1 |
|
169 | cut = 1 | |
169 | else: |
|
170 | else: | |
170 | cut = None |
|
171 | cut = None | |
171 |
|
172 | |||
172 | cl = repo.changelog |
|
173 | cl = repo.changelog | |
173 | first = revs.min() |
|
174 | first = revs.min() | |
174 | nullrev = node.nullrev |
|
175 | nullrev = node.nullrev | |
175 | if first == nullrev: |
|
176 | if first == nullrev: | |
176 | # Are there nodes with a null first parent and a non-null |
|
177 | # Are there nodes with a null first parent and a non-null | |
177 | # second one? Maybe. Do we care? Probably not. |
|
178 | # second one? Maybe. Do we care? Probably not. | |
178 | yield first |
|
179 | yield first | |
179 | for i in cl: |
|
180 | for i in cl: | |
180 | yield i |
|
181 | yield i | |
181 | else: |
|
182 | else: | |
182 | seen = set(revs) |
|
183 | seen = set(revs) | |
183 | for i in cl.revs(first): |
|
184 | for i in cl.revs(first): | |
184 | if i in seen: |
|
185 | if i in seen: | |
185 | yield i |
|
186 | yield i | |
186 | continue |
|
187 | continue | |
187 | for x in cl.parentrevs(i)[:cut]: |
|
188 | for x in cl.parentrevs(i)[:cut]: | |
188 | if x != nullrev and x in seen: |
|
189 | if x != nullrev and x in seen: | |
189 | seen.add(i) |
|
190 | seen.add(i) | |
190 | yield i |
|
191 | yield i | |
191 | break |
|
192 | break | |
192 |
|
193 | |||
193 | def _builddescendantsmap(repo, startrev, followfirst): |
|
194 | def _builddescendantsmap(repo, startrev, followfirst): | |
194 | """Build map of 'rev -> child revs', offset from startrev""" |
|
195 | """Build map of 'rev -> child revs', offset from startrev""" | |
195 | cl = repo.changelog |
|
196 | cl = repo.changelog | |
196 | nullrev = node.nullrev |
|
197 | nullrev = node.nullrev | |
197 | descmap = [[] for _rev in xrange(startrev, len(cl))] |
|
198 | descmap = [[] for _rev in xrange(startrev, len(cl))] | |
198 | for currev in cl.revs(startrev + 1): |
|
199 | for currev in cl.revs(startrev + 1): | |
199 | p1rev, p2rev = cl.parentrevs(currev) |
|
200 | p1rev, p2rev = cl.parentrevs(currev) | |
200 | if p1rev >= startrev: |
|
201 | if p1rev >= startrev: | |
201 | descmap[p1rev - startrev].append(currev) |
|
202 | descmap[p1rev - startrev].append(currev) | |
202 | if not followfirst and p2rev != nullrev and p2rev >= startrev: |
|
203 | if not followfirst and p2rev != nullrev and p2rev >= startrev: | |
203 | descmap[p2rev - startrev].append(currev) |
|
204 | descmap[p2rev - startrev].append(currev) | |
204 | return descmap |
|
205 | return descmap | |
205 |
|
206 | |||
206 | def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth): |
|
207 | def _genrevdescendantsofdepth(repo, revs, followfirst, startdepth, stopdepth): | |
207 | startrev = revs.min() |
|
208 | startrev = revs.min() | |
208 | descmap = _builddescendantsmap(repo, startrev, followfirst) |
|
209 | descmap = _builddescendantsmap(repo, startrev, followfirst) | |
209 | def pfunc(rev): |
|
210 | def pfunc(rev): | |
210 | return descmap[rev - startrev] |
|
211 | return descmap[rev - startrev] | |
211 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False) |
|
212 | return _walkrevtree(pfunc, revs, startdepth, stopdepth, reverse=False) | |
212 |
|
213 | |||
213 | def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None): |
|
214 | def revdescendants(repo, revs, followfirst, startdepth=None, stopdepth=None): | |
214 | """Like revlog.descendants() but supports additional options, includes |
|
215 | """Like revlog.descendants() but supports additional options, includes | |
215 | the given revs themselves, and returns a smartset |
|
216 | the given revs themselves, and returns a smartset | |
216 |
|
217 | |||
217 | Scan ends at the stopdepth (exlusive) if specified. Revisions found |
|
218 | Scan ends at the stopdepth (exlusive) if specified. Revisions found | |
218 | earlier than the startdepth are omitted. |
|
219 | earlier than the startdepth are omitted. | |
219 | """ |
|
220 | """ | |
220 | if startdepth is None and stopdepth is None: |
|
221 | if startdepth is None and stopdepth is None: | |
221 | gen = _genrevdescendants(repo, revs, followfirst) |
|
222 | gen = _genrevdescendants(repo, revs, followfirst) | |
222 | else: |
|
223 | else: | |
223 | gen = _genrevdescendantsofdepth(repo, revs, followfirst, |
|
224 | gen = _genrevdescendantsofdepth(repo, revs, followfirst, | |
224 | startdepth, stopdepth) |
|
225 | startdepth, stopdepth) | |
225 | return generatorset(gen, iterasc=True) |
|
226 | return generatorset(gen, iterasc=True) | |
226 |
|
227 | |||
227 | def _reachablerootspure(repo, minroot, roots, heads, includepath): |
|
228 | def _reachablerootspure(repo, minroot, roots, heads, includepath): | |
228 | """return (heads(::<roots> and ::<heads>)) |
|
229 | """return (heads(::<roots> and ::<heads>)) | |
229 |
|
230 | |||
230 | If includepath is True, return (<roots>::<heads>).""" |
|
231 | If includepath is True, return (<roots>::<heads>).""" | |
231 | if not roots: |
|
232 | if not roots: | |
232 | return [] |
|
233 | return [] | |
233 | parentrevs = repo.changelog.parentrevs |
|
234 | parentrevs = repo.changelog.parentrevs | |
234 | roots = set(roots) |
|
235 | roots = set(roots) | |
235 | visit = list(heads) |
|
236 | visit = list(heads) | |
236 | reachable = set() |
|
237 | reachable = set() | |
237 | seen = {} |
|
238 | seen = {} | |
238 | # prefetch all the things! (because python is slow) |
|
239 | # prefetch all the things! (because python is slow) | |
239 | reached = reachable.add |
|
240 | reached = reachable.add | |
240 | dovisit = visit.append |
|
241 | dovisit = visit.append | |
241 | nextvisit = visit.pop |
|
242 | nextvisit = visit.pop | |
242 | # open-code the post-order traversal due to the tiny size of |
|
243 | # open-code the post-order traversal due to the tiny size of | |
243 | # sys.getrecursionlimit() |
|
244 | # sys.getrecursionlimit() | |
244 | while visit: |
|
245 | while visit: | |
245 | rev = nextvisit() |
|
246 | rev = nextvisit() | |
246 | if rev in roots: |
|
247 | if rev in roots: | |
247 | reached(rev) |
|
248 | reached(rev) | |
248 | if not includepath: |
|
249 | if not includepath: | |
249 | continue |
|
250 | continue | |
250 | parents = parentrevs(rev) |
|
251 | parents = parentrevs(rev) | |
251 | seen[rev] = parents |
|
252 | seen[rev] = parents | |
252 | for parent in parents: |
|
253 | for parent in parents: | |
253 | if parent >= minroot and parent not in seen: |
|
254 | if parent >= minroot and parent not in seen: | |
254 | dovisit(parent) |
|
255 | dovisit(parent) | |
255 | if not reachable: |
|
256 | if not reachable: | |
256 | return baseset() |
|
257 | return baseset() | |
257 | if not includepath: |
|
258 | if not includepath: | |
258 | return reachable |
|
259 | return reachable | |
259 | for rev in sorted(seen): |
|
260 | for rev in sorted(seen): | |
260 | for parent in seen[rev]: |
|
261 | for parent in seen[rev]: | |
261 | if parent in reachable: |
|
262 | if parent in reachable: | |
262 | reached(rev) |
|
263 | reached(rev) | |
263 | return reachable |
|
264 | return reachable | |
264 |
|
265 | |||
265 | def reachableroots(repo, roots, heads, includepath=False): |
|
266 | def reachableroots(repo, roots, heads, includepath=False): | |
266 | """return (heads(::<roots> and ::<heads>)) |
|
267 | """return (heads(::<roots> and ::<heads>)) | |
267 |
|
268 | |||
268 | If includepath is True, return (<roots>::<heads>).""" |
|
269 | If includepath is True, return (<roots>::<heads>).""" | |
269 | if not roots: |
|
270 | if not roots: | |
270 | return baseset() |
|
271 | return baseset() | |
271 | minroot = roots.min() |
|
272 | minroot = roots.min() | |
272 | roots = list(roots) |
|
273 | roots = list(roots) | |
273 | heads = list(heads) |
|
274 | heads = list(heads) | |
274 | try: |
|
275 | try: | |
275 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) |
|
276 | revs = repo.changelog.reachableroots(minroot, heads, roots, includepath) | |
276 | except AttributeError: |
|
277 | except AttributeError: | |
277 | revs = _reachablerootspure(repo, minroot, roots, heads, includepath) |
|
278 | revs = _reachablerootspure(repo, minroot, roots, heads, includepath) | |
278 | revs = baseset(revs) |
|
279 | revs = baseset(revs) | |
279 | revs.sort() |
|
280 | revs.sort() | |
280 | return revs |
|
281 | return revs | |
281 |
|
282 | |||
282 | def _changesrange(fctx1, fctx2, linerange2, diffopts): |
|
283 | def _changesrange(fctx1, fctx2, linerange2, diffopts): | |
283 | """Return `(diffinrange, linerange1)` where `diffinrange` is True |
|
284 | """Return `(diffinrange, linerange1)` where `diffinrange` is True | |
284 | if diff from fctx2 to fctx1 has changes in linerange2 and |
|
285 | if diff from fctx2 to fctx1 has changes in linerange2 and | |
285 | `linerange1` is the new line range for fctx1. |
|
286 | `linerange1` is the new line range for fctx1. | |
286 | """ |
|
287 | """ | |
287 | blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts) |
|
288 | blocks = mdiff.allblocks(fctx1.data(), fctx2.data(), diffopts) | |
288 | filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2) |
|
289 | filteredblocks, linerange1 = mdiff.blocksinrange(blocks, linerange2) | |
289 | diffinrange = any(stype == '!' for _, stype in filteredblocks) |
|
290 | diffinrange = any(stype == '!' for _, stype in filteredblocks) | |
290 | return diffinrange, linerange1 |
|
291 | return diffinrange, linerange1 | |
291 |
|
292 | |||
292 | def blockancestors(fctx, fromline, toline, followfirst=False): |
|
293 | def blockancestors(fctx, fromline, toline, followfirst=False): | |
293 | """Yield ancestors of `fctx` with respect to the block of lines within |
|
294 | """Yield ancestors of `fctx` with respect to the block of lines within | |
294 | `fromline`-`toline` range. |
|
295 | `fromline`-`toline` range. | |
295 | """ |
|
296 | """ | |
296 | diffopts = patch.diffopts(fctx._repo.ui) |
|
297 | diffopts = patch.diffopts(fctx._repo.ui) | |
297 | fctx = fctx.introfilectx() |
|
298 | fctx = fctx.introfilectx() | |
298 | visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))} |
|
299 | visit = {(fctx.linkrev(), fctx.filenode()): (fctx, (fromline, toline))} | |
299 | while visit: |
|
300 | while visit: | |
300 | c, linerange2 = visit.pop(max(visit)) |
|
301 | c, linerange2 = visit.pop(max(visit)) | |
301 | pl = c.parents() |
|
302 | pl = c.parents() | |
302 | if followfirst: |
|
303 | if followfirst: | |
303 | pl = pl[:1] |
|
304 | pl = pl[:1] | |
304 | if not pl: |
|
305 | if not pl: | |
305 | # The block originates from the initial revision. |
|
306 | # The block originates from the initial revision. | |
306 | yield c, linerange2 |
|
307 | yield c, linerange2 | |
307 | continue |
|
308 | continue | |
308 | inrange = False |
|
309 | inrange = False | |
309 | for p in pl: |
|
310 | for p in pl: | |
310 | inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts) |
|
311 | inrangep, linerange1 = _changesrange(p, c, linerange2, diffopts) | |
311 | inrange = inrange or inrangep |
|
312 | inrange = inrange or inrangep | |
312 | if linerange1[0] == linerange1[1]: |
|
313 | if linerange1[0] == linerange1[1]: | |
313 | # Parent's linerange is empty, meaning that the block got |
|
314 | # Parent's linerange is empty, meaning that the block got | |
314 | # introduced in this revision; no need to go futher in this |
|
315 | # introduced in this revision; no need to go futher in this | |
315 | # branch. |
|
316 | # branch. | |
316 | continue |
|
317 | continue | |
317 | # Set _descendantrev with 'c' (a known descendant) so that, when |
|
318 | # Set _descendantrev with 'c' (a known descendant) so that, when | |
318 | # _adjustlinkrev is called for 'p', it receives this descendant |
|
319 | # _adjustlinkrev is called for 'p', it receives this descendant | |
319 | # (as srcrev) instead possibly topmost introrev. |
|
320 | # (as srcrev) instead possibly topmost introrev. | |
320 | p._descendantrev = c.rev() |
|
321 | p._descendantrev = c.rev() | |
321 | visit[p.linkrev(), p.filenode()] = p, linerange1 |
|
322 | visit[p.linkrev(), p.filenode()] = p, linerange1 | |
322 | if inrange: |
|
323 | if inrange: | |
323 | yield c, linerange2 |
|
324 | yield c, linerange2 | |
324 |
|
325 | |||
325 | def blockdescendants(fctx, fromline, toline): |
|
326 | def blockdescendants(fctx, fromline, toline): | |
326 | """Yield descendants of `fctx` with respect to the block of lines within |
|
327 | """Yield descendants of `fctx` with respect to the block of lines within | |
327 | `fromline`-`toline` range. |
|
328 | `fromline`-`toline` range. | |
328 | """ |
|
329 | """ | |
329 | # First possibly yield 'fctx' if it has changes in range with respect to |
|
330 | # First possibly yield 'fctx' if it has changes in range with respect to | |
330 | # its parents. |
|
331 | # its parents. | |
331 | try: |
|
332 | try: | |
332 | c, linerange1 = next(blockancestors(fctx, fromline, toline)) |
|
333 | c, linerange1 = next(blockancestors(fctx, fromline, toline)) | |
333 | except StopIteration: |
|
334 | except StopIteration: | |
334 | pass |
|
335 | pass | |
335 | else: |
|
336 | else: | |
336 | if c == fctx: |
|
337 | if c == fctx: | |
337 | yield c, linerange1 |
|
338 | yield c, linerange1 | |
338 |
|
339 | |||
339 | diffopts = patch.diffopts(fctx._repo.ui) |
|
340 | diffopts = patch.diffopts(fctx._repo.ui) | |
340 | fl = fctx.filelog() |
|
341 | fl = fctx.filelog() | |
341 | seen = {fctx.filerev(): (fctx, (fromline, toline))} |
|
342 | seen = {fctx.filerev(): (fctx, (fromline, toline))} | |
342 | for i in fl.descendants([fctx.filerev()]): |
|
343 | for i in fl.descendants([fctx.filerev()]): | |
343 | c = fctx.filectx(i) |
|
344 | c = fctx.filectx(i) | |
344 | inrange = False |
|
345 | inrange = False | |
345 | for x in fl.parentrevs(i): |
|
346 | for x in fl.parentrevs(i): | |
346 | try: |
|
347 | try: | |
347 | p, linerange2 = seen[x] |
|
348 | p, linerange2 = seen[x] | |
348 | except KeyError: |
|
349 | except KeyError: | |
349 | # nullrev or other branch |
|
350 | # nullrev or other branch | |
350 | continue |
|
351 | continue | |
351 | inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts) |
|
352 | inrangep, linerange1 = _changesrange(c, p, linerange2, diffopts) | |
352 | inrange = inrange or inrangep |
|
353 | inrange = inrange or inrangep | |
353 | # If revision 'i' has been seen (it's a merge) and the line range |
|
354 | # If revision 'i' has been seen (it's a merge) and the line range | |
354 | # previously computed differs from the one we just got, we take the |
|
355 | # previously computed differs from the one we just got, we take the | |
355 | # surrounding interval. This is conservative but avoids loosing |
|
356 | # surrounding interval. This is conservative but avoids loosing | |
356 | # information. |
|
357 | # information. | |
357 | if i in seen and seen[i][1] != linerange1: |
|
358 | if i in seen and seen[i][1] != linerange1: | |
358 | lbs, ubs = zip(linerange1, seen[i][1]) |
|
359 | lbs, ubs = zip(linerange1, seen[i][1]) | |
359 | linerange1 = min(lbs), max(ubs) |
|
360 | linerange1 = min(lbs), max(ubs) | |
360 | seen[i] = c, linerange1 |
|
361 | seen[i] = c, linerange1 | |
361 | if inrange: |
|
362 | if inrange: | |
362 | yield c, linerange1 |
|
363 | yield c, linerange1 | |
363 |
|
364 | |||
364 | @attr.s(slots=True, frozen=True) |
|
365 | @attr.s(slots=True, frozen=True) | |
365 | class annotateline(object): |
|
366 | class annotateline(object): | |
366 | fctx = attr.ib() |
|
367 | fctx = attr.ib() | |
367 | lineno = attr.ib(default=False) |
|
368 | lineno = attr.ib(default=False) | |
368 | # Whether this annotation was the result of a skip-annotate. |
|
369 | # Whether this annotation was the result of a skip-annotate. | |
369 | skip = attr.ib(default=False) |
|
370 | skip = attr.ib(default=False) | |
370 |
|
371 | |||
371 | def _annotatepair(parents, childfctx, child, skipchild, diffopts): |
|
372 | def _annotatepair(parents, childfctx, child, skipchild, diffopts): | |
372 | r''' |
|
373 | r''' | |
373 | Given parent and child fctxes and annotate data for parents, for all lines |
|
374 | Given parent and child fctxes and annotate data for parents, for all lines | |
374 | in either parent that match the child, annotate the child with the parent's |
|
375 | in either parent that match the child, annotate the child with the parent's | |
375 | data. |
|
376 | data. | |
376 |
|
377 | |||
377 | Additionally, if `skipchild` is True, replace all other lines with parent |
|
378 | Additionally, if `skipchild` is True, replace all other lines with parent | |
378 | annotate data as well such that child is never blamed for any lines. |
|
379 | annotate data as well such that child is never blamed for any lines. | |
379 |
|
380 | |||
380 | See test-annotate.py for unit tests. |
|
381 | See test-annotate.py for unit tests. | |
381 | ''' |
|
382 | ''' | |
382 | pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts)) |
|
383 | pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts)) | |
383 | for parent in parents] |
|
384 | for parent in parents] | |
384 |
|
385 | |||
385 | if skipchild: |
|
386 | if skipchild: | |
386 | # Need to iterate over the blocks twice -- make it a list |
|
387 | # Need to iterate over the blocks twice -- make it a list | |
387 | pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] |
|
388 | pblocks = [(p, list(blocks)) for (p, blocks) in pblocks] | |
388 | # Mercurial currently prefers p2 over p1 for annotate. |
|
389 | # Mercurial currently prefers p2 over p1 for annotate. | |
389 | # TODO: change this? |
|
390 | # TODO: change this? | |
390 | for parent, blocks in pblocks: |
|
391 | for parent, blocks in pblocks: | |
391 | for (a1, a2, b1, b2), t in blocks: |
|
392 | for (a1, a2, b1, b2), t in blocks: | |
392 | # Changed blocks ('!') or blocks made only of blank lines ('~') |
|
393 | # Changed blocks ('!') or blocks made only of blank lines ('~') | |
393 | # belong to the child. |
|
394 | # belong to the child. | |
394 | if t == '=': |
|
395 | if t == '=': | |
395 | child[0][b1:b2] = parent[0][a1:a2] |
|
396 | child[0][b1:b2] = parent[0][a1:a2] | |
396 |
|
397 | |||
397 | if skipchild: |
|
398 | if skipchild: | |
398 | # Now try and match up anything that couldn't be matched, |
|
399 | # Now try and match up anything that couldn't be matched, | |
399 | # Reversing pblocks maintains bias towards p2, matching above |
|
400 | # Reversing pblocks maintains bias towards p2, matching above | |
400 | # behavior. |
|
401 | # behavior. | |
401 | pblocks.reverse() |
|
402 | pblocks.reverse() | |
402 |
|
403 | |||
403 | # The heuristics are: |
|
404 | # The heuristics are: | |
404 | # * Work on blocks of changed lines (effectively diff hunks with -U0). |
|
405 | # * Work on blocks of changed lines (effectively diff hunks with -U0). | |
405 | # This could potentially be smarter but works well enough. |
|
406 | # This could potentially be smarter but works well enough. | |
406 | # * For a non-matching section, do a best-effort fit. Match lines in |
|
407 | # * For a non-matching section, do a best-effort fit. Match lines in | |
407 | # diff hunks 1:1, dropping lines as necessary. |
|
408 | # diff hunks 1:1, dropping lines as necessary. | |
408 | # * Repeat the last line as a last resort. |
|
409 | # * Repeat the last line as a last resort. | |
409 |
|
410 | |||
410 | # First, replace as much as possible without repeating the last line. |
|
411 | # First, replace as much as possible without repeating the last line. | |
411 | remaining = [(parent, []) for parent, _blocks in pblocks] |
|
412 | remaining = [(parent, []) for parent, _blocks in pblocks] | |
412 | for idx, (parent, blocks) in enumerate(pblocks): |
|
413 | for idx, (parent, blocks) in enumerate(pblocks): | |
413 | for (a1, a2, b1, b2), _t in blocks: |
|
414 | for (a1, a2, b1, b2), _t in blocks: | |
414 | if a2 - a1 >= b2 - b1: |
|
415 | if a2 - a1 >= b2 - b1: | |
415 | for bk in xrange(b1, b2): |
|
416 | for bk in xrange(b1, b2): | |
416 | if child[0][bk].fctx == childfctx: |
|
417 | if child[0][bk].fctx == childfctx: | |
417 | ak = min(a1 + (bk - b1), a2 - 1) |
|
418 | ak = min(a1 + (bk - b1), a2 - 1) | |
418 | child[0][bk] = attr.evolve(parent[0][ak], skip=True) |
|
419 | child[0][bk] = attr.evolve(parent[0][ak], skip=True) | |
419 | else: |
|
420 | else: | |
420 | remaining[idx][1].append((a1, a2, b1, b2)) |
|
421 | remaining[idx][1].append((a1, a2, b1, b2)) | |
421 |
|
422 | |||
422 | # Then, look at anything left, which might involve repeating the last |
|
423 | # Then, look at anything left, which might involve repeating the last | |
423 | # line. |
|
424 | # line. | |
424 | for parent, blocks in remaining: |
|
425 | for parent, blocks in remaining: | |
425 | for a1, a2, b1, b2 in blocks: |
|
426 | for a1, a2, b1, b2 in blocks: | |
426 | for bk in xrange(b1, b2): |
|
427 | for bk in xrange(b1, b2): | |
427 | if child[0][bk].fctx == childfctx: |
|
428 | if child[0][bk].fctx == childfctx: | |
428 | ak = min(a1 + (bk - b1), a2 - 1) |
|
429 | ak = min(a1 + (bk - b1), a2 - 1) | |
429 | child[0][bk] = attr.evolve(parent[0][ak], skip=True) |
|
430 | child[0][bk] = attr.evolve(parent[0][ak], skip=True) | |
430 | return child |
|
431 | return child | |
431 |
|
432 | |||
|
433 | def annotate(base, parents, linenumber=False, skiprevs=None, diffopts=None): | |||
|
434 | """Core algorithm for filectx.annotate() | |||
|
435 | ||||
|
436 | `parents(fctx)` is a function returning a list of parent filectxs. | |||
|
437 | """ | |||
|
438 | ||||
|
439 | def lines(text): | |||
|
440 | if text.endswith("\n"): | |||
|
441 | return text.count("\n") | |||
|
442 | return text.count("\n") + int(bool(text)) | |||
|
443 | ||||
|
444 | if linenumber: | |||
|
445 | def decorate(text, rev): | |||
|
446 | return ([annotateline(fctx=rev, lineno=i) | |||
|
447 | for i in xrange(1, lines(text) + 1)], text) | |||
|
448 | else: | |||
|
449 | def decorate(text, rev): | |||
|
450 | return ([annotateline(fctx=rev)] * lines(text), text) | |||
|
451 | ||||
|
452 | # This algorithm would prefer to be recursive, but Python is a | |||
|
453 | # bit recursion-hostile. Instead we do an iterative | |||
|
454 | # depth-first search. | |||
|
455 | ||||
|
456 | # 1st DFS pre-calculates pcache and needed | |||
|
457 | visit = [base] | |||
|
458 | pcache = {} | |||
|
459 | needed = {base: 1} | |||
|
460 | while visit: | |||
|
461 | f = visit.pop() | |||
|
462 | if f in pcache: | |||
|
463 | continue | |||
|
464 | pl = parents(f) | |||
|
465 | pcache[f] = pl | |||
|
466 | for p in pl: | |||
|
467 | needed[p] = needed.get(p, 0) + 1 | |||
|
468 | if p not in pcache: | |||
|
469 | visit.append(p) | |||
|
470 | ||||
|
471 | # 2nd DFS does the actual annotate | |||
|
472 | visit[:] = [base] | |||
|
473 | hist = {} | |||
|
474 | while visit: | |||
|
475 | f = visit[-1] | |||
|
476 | if f in hist: | |||
|
477 | visit.pop() | |||
|
478 | continue | |||
|
479 | ||||
|
480 | ready = True | |||
|
481 | pl = pcache[f] | |||
|
482 | for p in pl: | |||
|
483 | if p not in hist: | |||
|
484 | ready = False | |||
|
485 | visit.append(p) | |||
|
486 | if ready: | |||
|
487 | visit.pop() | |||
|
488 | curr = decorate(f.data(), f) | |||
|
489 | skipchild = False | |||
|
490 | if skiprevs is not None: | |||
|
491 | skipchild = f._changeid in skiprevs | |||
|
492 | curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild, | |||
|
493 | diffopts) | |||
|
494 | for p in pl: | |||
|
495 | if needed[p] == 1: | |||
|
496 | del hist[p] | |||
|
497 | del needed[p] | |||
|
498 | else: | |||
|
499 | needed[p] -= 1 | |||
|
500 | ||||
|
501 | hist[f] = curr | |||
|
502 | del pcache[f] | |||
|
503 | ||||
|
504 | lineattrs, text = hist[base] | |||
|
505 | return pycompat.ziplist(lineattrs, mdiff.splitnewlines(text)) | |||
|
506 | ||||
432 | def toposort(revs, parentsfunc, firstbranch=()): |
|
507 | def toposort(revs, parentsfunc, firstbranch=()): | |
433 | """Yield revisions from heads to roots one (topo) branch at a time. |
|
508 | """Yield revisions from heads to roots one (topo) branch at a time. | |
434 |
|
509 | |||
435 | This function aims to be used by a graph generator that wishes to minimize |
|
510 | This function aims to be used by a graph generator that wishes to minimize | |
436 | the number of parallel branches and their interleaving. |
|
511 | the number of parallel branches and their interleaving. | |
437 |
|
512 | |||
438 | Example iteration order (numbers show the "true" order in a changelog): |
|
513 | Example iteration order (numbers show the "true" order in a changelog): | |
439 |
|
514 | |||
440 | o 4 |
|
515 | o 4 | |
441 | | |
|
516 | | | |
442 | o 1 |
|
517 | o 1 | |
443 | | |
|
518 | | | |
444 | | o 3 |
|
519 | | o 3 | |
445 | | | |
|
520 | | | | |
446 | | o 2 |
|
521 | | o 2 | |
447 | |/ |
|
522 | |/ | |
448 | o 0 |
|
523 | o 0 | |
449 |
|
524 | |||
450 | Note that the ancestors of merges are understood by the current |
|
525 | Note that the ancestors of merges are understood by the current | |
451 | algorithm to be on the same branch. This means no reordering will |
|
526 | algorithm to be on the same branch. This means no reordering will | |
452 | occur behind a merge. |
|
527 | occur behind a merge. | |
453 | """ |
|
528 | """ | |
454 |
|
529 | |||
455 | ### Quick summary of the algorithm |
|
530 | ### Quick summary of the algorithm | |
456 | # |
|
531 | # | |
457 | # This function is based around a "retention" principle. We keep revisions |
|
532 | # This function is based around a "retention" principle. We keep revisions | |
458 | # in memory until we are ready to emit a whole branch that immediately |
|
533 | # in memory until we are ready to emit a whole branch that immediately | |
459 | # "merges" into an existing one. This reduces the number of parallel |
|
534 | # "merges" into an existing one. This reduces the number of parallel | |
460 | # branches with interleaved revisions. |
|
535 | # branches with interleaved revisions. | |
461 | # |
|
536 | # | |
462 | # During iteration revs are split into two groups: |
|
537 | # During iteration revs are split into two groups: | |
463 | # A) revision already emitted |
|
538 | # A) revision already emitted | |
464 | # B) revision in "retention". They are stored as different subgroups. |
|
539 | # B) revision in "retention". They are stored as different subgroups. | |
465 | # |
|
540 | # | |
466 | # for each REV, we do the following logic: |
|
541 | # for each REV, we do the following logic: | |
467 | # |
|
542 | # | |
468 | # 1) if REV is a parent of (A), we will emit it. If there is a |
|
543 | # 1) if REV is a parent of (A), we will emit it. If there is a | |
469 | # retention group ((B) above) that is blocked on REV being |
|
544 | # retention group ((B) above) that is blocked on REV being | |
470 | # available, we emit all the revisions out of that retention |
|
545 | # available, we emit all the revisions out of that retention | |
471 | # group first. |
|
546 | # group first. | |
472 | # |
|
547 | # | |
473 | # 2) else, we'll search for a subgroup in (B) awaiting for REV to be |
|
548 | # 2) else, we'll search for a subgroup in (B) awaiting for REV to be | |
474 | # available, if such subgroup exist, we add REV to it and the subgroup is |
|
549 | # available, if such subgroup exist, we add REV to it and the subgroup is | |
475 | # now awaiting for REV.parents() to be available. |
|
550 | # now awaiting for REV.parents() to be available. | |
476 | # |
|
551 | # | |
477 | # 3) finally if no such group existed in (B), we create a new subgroup. |
|
552 | # 3) finally if no such group existed in (B), we create a new subgroup. | |
478 | # |
|
553 | # | |
479 | # |
|
554 | # | |
480 | # To bootstrap the algorithm, we emit the tipmost revision (which |
|
555 | # To bootstrap the algorithm, we emit the tipmost revision (which | |
481 | # puts it in group (A) from above). |
|
556 | # puts it in group (A) from above). | |
482 |
|
557 | |||
483 | revs.sort(reverse=True) |
|
558 | revs.sort(reverse=True) | |
484 |
|
559 | |||
485 | # Set of parents of revision that have been emitted. They can be considered |
|
560 | # Set of parents of revision that have been emitted. They can be considered | |
486 | # unblocked as the graph generator is already aware of them so there is no |
|
561 | # unblocked as the graph generator is already aware of them so there is no | |
487 | # need to delay the revisions that reference them. |
|
562 | # need to delay the revisions that reference them. | |
488 | # |
|
563 | # | |
489 | # If someone wants to prioritize a branch over the others, pre-filling this |
|
564 | # If someone wants to prioritize a branch over the others, pre-filling this | |
490 | # set will force all other branches to wait until this branch is ready to be |
|
565 | # set will force all other branches to wait until this branch is ready to be | |
491 | # emitted. |
|
566 | # emitted. | |
492 | unblocked = set(firstbranch) |
|
567 | unblocked = set(firstbranch) | |
493 |
|
568 | |||
494 | # list of groups waiting to be displayed, each group is defined by: |
|
569 | # list of groups waiting to be displayed, each group is defined by: | |
495 | # |
|
570 | # | |
496 | # (revs: lists of revs waiting to be displayed, |
|
571 | # (revs: lists of revs waiting to be displayed, | |
497 | # blocked: set of that cannot be displayed before those in 'revs') |
|
572 | # blocked: set of that cannot be displayed before those in 'revs') | |
498 | # |
|
573 | # | |
499 | # The second value ('blocked') correspond to parents of any revision in the |
|
574 | # The second value ('blocked') correspond to parents of any revision in the | |
500 | # group ('revs') that is not itself contained in the group. The main idea |
|
575 | # group ('revs') that is not itself contained in the group. The main idea | |
501 | # of this algorithm is to delay as much as possible the emission of any |
|
576 | # of this algorithm is to delay as much as possible the emission of any | |
502 | # revision. This means waiting for the moment we are about to display |
|
577 | # revision. This means waiting for the moment we are about to display | |
503 | # these parents to display the revs in a group. |
|
578 | # these parents to display the revs in a group. | |
504 | # |
|
579 | # | |
505 | # This first implementation is smart until it encounters a merge: it will |
|
580 | # This first implementation is smart until it encounters a merge: it will | |
506 | # emit revs as soon as any parent is about to be emitted and can grow an |
|
581 | # emit revs as soon as any parent is about to be emitted and can grow an | |
507 | # arbitrary number of revs in 'blocked'. In practice this mean we properly |
|
582 | # arbitrary number of revs in 'blocked'. In practice this mean we properly | |
508 | # retains new branches but gives up on any special ordering for ancestors |
|
583 | # retains new branches but gives up on any special ordering for ancestors | |
509 | # of merges. The implementation can be improved to handle this better. |
|
584 | # of merges. The implementation can be improved to handle this better. | |
510 | # |
|
585 | # | |
511 | # The first subgroup is special. It corresponds to all the revision that |
|
586 | # The first subgroup is special. It corresponds to all the revision that | |
512 | # were already emitted. The 'revs' lists is expected to be empty and the |
|
587 | # were already emitted. The 'revs' lists is expected to be empty and the | |
513 | # 'blocked' set contains the parents revisions of already emitted revision. |
|
588 | # 'blocked' set contains the parents revisions of already emitted revision. | |
514 | # |
|
589 | # | |
515 | # You could pre-seed the <parents> set of groups[0] to a specific |
|
590 | # You could pre-seed the <parents> set of groups[0] to a specific | |
516 | # changesets to select what the first emitted branch should be. |
|
591 | # changesets to select what the first emitted branch should be. | |
517 | groups = [([], unblocked)] |
|
592 | groups = [([], unblocked)] | |
518 | pendingheap = [] |
|
593 | pendingheap = [] | |
519 | pendingset = set() |
|
594 | pendingset = set() | |
520 |
|
595 | |||
521 | heapq.heapify(pendingheap) |
|
596 | heapq.heapify(pendingheap) | |
522 | heappop = heapq.heappop |
|
597 | heappop = heapq.heappop | |
523 | heappush = heapq.heappush |
|
598 | heappush = heapq.heappush | |
524 | for currentrev in revs: |
|
599 | for currentrev in revs: | |
525 | # Heap works with smallest element, we want highest so we invert |
|
600 | # Heap works with smallest element, we want highest so we invert | |
526 | if currentrev not in pendingset: |
|
601 | if currentrev not in pendingset: | |
527 | heappush(pendingheap, -currentrev) |
|
602 | heappush(pendingheap, -currentrev) | |
528 | pendingset.add(currentrev) |
|
603 | pendingset.add(currentrev) | |
529 | # iterates on pending rev until after the current rev have been |
|
604 | # iterates on pending rev until after the current rev have been | |
530 | # processed. |
|
605 | # processed. | |
531 | rev = None |
|
606 | rev = None | |
532 | while rev != currentrev: |
|
607 | while rev != currentrev: | |
533 | rev = -heappop(pendingheap) |
|
608 | rev = -heappop(pendingheap) | |
534 | pendingset.remove(rev) |
|
609 | pendingset.remove(rev) | |
535 |
|
610 | |||
536 | # Seek for a subgroup blocked, waiting for the current revision. |
|
611 | # Seek for a subgroup blocked, waiting for the current revision. | |
537 | matching = [i for i, g in enumerate(groups) if rev in g[1]] |
|
612 | matching = [i for i, g in enumerate(groups) if rev in g[1]] | |
538 |
|
613 | |||
539 | if matching: |
|
614 | if matching: | |
540 | # The main idea is to gather together all sets that are blocked |
|
615 | # The main idea is to gather together all sets that are blocked | |
541 | # on the same revision. |
|
616 | # on the same revision. | |
542 | # |
|
617 | # | |
543 | # Groups are merged when a common blocking ancestor is |
|
618 | # Groups are merged when a common blocking ancestor is | |
544 | # observed. For example, given two groups: |
|
619 | # observed. For example, given two groups: | |
545 | # |
|
620 | # | |
546 | # revs [5, 4] waiting for 1 |
|
621 | # revs [5, 4] waiting for 1 | |
547 | # revs [3, 2] waiting for 1 |
|
622 | # revs [3, 2] waiting for 1 | |
548 | # |
|
623 | # | |
549 | # These two groups will be merged when we process |
|
624 | # These two groups will be merged when we process | |
550 | # 1. In theory, we could have merged the groups when |
|
625 | # 1. In theory, we could have merged the groups when | |
551 | # we added 2 to the group it is now in (we could have |
|
626 | # we added 2 to the group it is now in (we could have | |
552 | # noticed the groups were both blocked on 1 then), but |
|
627 | # noticed the groups were both blocked on 1 then), but | |
553 | # the way it works now makes the algorithm simpler. |
|
628 | # the way it works now makes the algorithm simpler. | |
554 | # |
|
629 | # | |
555 | # We also always keep the oldest subgroup first. We can |
|
630 | # We also always keep the oldest subgroup first. We can | |
556 | # probably improve the behavior by having the longest set |
|
631 | # probably improve the behavior by having the longest set | |
557 | # first. That way, graph algorithms could minimise the length |
|
632 | # first. That way, graph algorithms could minimise the length | |
558 | # of parallel lines their drawing. This is currently not done. |
|
633 | # of parallel lines their drawing. This is currently not done. | |
559 | targetidx = matching.pop(0) |
|
634 | targetidx = matching.pop(0) | |
560 | trevs, tparents = groups[targetidx] |
|
635 | trevs, tparents = groups[targetidx] | |
561 | for i in matching: |
|
636 | for i in matching: | |
562 | gr = groups[i] |
|
637 | gr = groups[i] | |
563 | trevs.extend(gr[0]) |
|
638 | trevs.extend(gr[0]) | |
564 | tparents |= gr[1] |
|
639 | tparents |= gr[1] | |
565 | # delete all merged subgroups (except the one we kept) |
|
640 | # delete all merged subgroups (except the one we kept) | |
566 | # (starting from the last subgroup for performance and |
|
641 | # (starting from the last subgroup for performance and | |
567 | # sanity reasons) |
|
642 | # sanity reasons) | |
568 | for i in reversed(matching): |
|
643 | for i in reversed(matching): | |
569 | del groups[i] |
|
644 | del groups[i] | |
570 | else: |
|
645 | else: | |
571 | # This is a new head. We create a new subgroup for it. |
|
646 | # This is a new head. We create a new subgroup for it. | |
572 | targetidx = len(groups) |
|
647 | targetidx = len(groups) | |
573 | groups.append(([], {rev})) |
|
648 | groups.append(([], {rev})) | |
574 |
|
649 | |||
575 | gr = groups[targetidx] |
|
650 | gr = groups[targetidx] | |
576 |
|
651 | |||
577 | # We now add the current nodes to this subgroups. This is done |
|
652 | # We now add the current nodes to this subgroups. This is done | |
578 | # after the subgroup merging because all elements from a subgroup |
|
653 | # after the subgroup merging because all elements from a subgroup | |
579 | # that relied on this rev must precede it. |
|
654 | # that relied on this rev must precede it. | |
580 | # |
|
655 | # | |
581 | # we also update the <parents> set to include the parents of the |
|
656 | # we also update the <parents> set to include the parents of the | |
582 | # new nodes. |
|
657 | # new nodes. | |
583 | if rev == currentrev: # only display stuff in rev |
|
658 | if rev == currentrev: # only display stuff in rev | |
584 | gr[0].append(rev) |
|
659 | gr[0].append(rev) | |
585 | gr[1].remove(rev) |
|
660 | gr[1].remove(rev) | |
586 | parents = [p for p in parentsfunc(rev) if p > node.nullrev] |
|
661 | parents = [p for p in parentsfunc(rev) if p > node.nullrev] | |
587 | gr[1].update(parents) |
|
662 | gr[1].update(parents) | |
588 | for p in parents: |
|
663 | for p in parents: | |
589 | if p not in pendingset: |
|
664 | if p not in pendingset: | |
590 | pendingset.add(p) |
|
665 | pendingset.add(p) | |
591 | heappush(pendingheap, -p) |
|
666 | heappush(pendingheap, -p) | |
592 |
|
667 | |||
593 | # Look for a subgroup to display |
|
668 | # Look for a subgroup to display | |
594 | # |
|
669 | # | |
595 | # When unblocked is empty (if clause), we were not waiting for any |
|
670 | # When unblocked is empty (if clause), we were not waiting for any | |
596 | # revisions during the first iteration (if no priority was given) or |
|
671 | # revisions during the first iteration (if no priority was given) or | |
597 | # if we emitted a whole disconnected set of the graph (reached a |
|
672 | # if we emitted a whole disconnected set of the graph (reached a | |
598 | # root). In that case we arbitrarily take the oldest known |
|
673 | # root). In that case we arbitrarily take the oldest known | |
599 | # subgroup. The heuristic could probably be better. |
|
674 | # subgroup. The heuristic could probably be better. | |
600 | # |
|
675 | # | |
601 | # Otherwise (elif clause) if the subgroup is blocked on |
|
676 | # Otherwise (elif clause) if the subgroup is blocked on | |
602 | # a revision we just emitted, we can safely emit it as |
|
677 | # a revision we just emitted, we can safely emit it as | |
603 | # well. |
|
678 | # well. | |
604 | if not unblocked: |
|
679 | if not unblocked: | |
605 | if len(groups) > 1: # display other subset |
|
680 | if len(groups) > 1: # display other subset | |
606 | targetidx = 1 |
|
681 | targetidx = 1 | |
607 | gr = groups[1] |
|
682 | gr = groups[1] | |
608 | elif not gr[1] & unblocked: |
|
683 | elif not gr[1] & unblocked: | |
609 | gr = None |
|
684 | gr = None | |
610 |
|
685 | |||
611 | if gr is not None: |
|
686 | if gr is not None: | |
612 | # update the set of awaited revisions with the one from the |
|
687 | # update the set of awaited revisions with the one from the | |
613 | # subgroup |
|
688 | # subgroup | |
614 | unblocked |= gr[1] |
|
689 | unblocked |= gr[1] | |
615 | # output all revisions in the subgroup |
|
690 | # output all revisions in the subgroup | |
616 | for r in gr[0]: |
|
691 | for r in gr[0]: | |
617 | yield r |
|
692 | yield r | |
618 | # delete the subgroup that you just output |
|
693 | # delete the subgroup that you just output | |
619 | # unless it is groups[0] in which case you just empty it. |
|
694 | # unless it is groups[0] in which case you just empty it. | |
620 | if targetidx: |
|
695 | if targetidx: | |
621 | del groups[targetidx] |
|
696 | del groups[targetidx] | |
622 | else: |
|
697 | else: | |
623 | gr[0][:] = [] |
|
698 | gr[0][:] = [] | |
624 | # Check if we have some subgroup waiting for revisions we are not going to |
|
699 | # Check if we have some subgroup waiting for revisions we are not going to | |
625 | # iterate over |
|
700 | # iterate over | |
626 | for g in groups: |
|
701 | for g in groups: | |
627 | for r in g[0]: |
|
702 | for r in g[0]: | |
628 | yield r |
|
703 | yield r |
General Comments 0
You need to be logged in to leave comments.
Login now